NTB bug and documentation fixes, new device IDs, performance
improvements, and adding a mailing list to MAINTAINERS for NTB. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABCAAGBQJV84vnAAoJEG5mS6x6i9IjbNkQAJsaG804NrU1hu4lpXX0nC2d LlmN6o9HveAJLZf0hX7WE5cjd0jkgzYevgXoFDOnTs9OpMATeQJ65pq/6fUWzrjd P/wnZFeJoGP/SWAm1M6r4mhOJe9L9R574FNKypopmm1yWuijDvqtAskhJX8wR++7 BWBRlL7lryCsvaNrZBMKWBWBih3TSAD1g2l4F/1TxSU25aBz3LU2u5fCNvtDbhJH FY9DKnymI91c4Do24iV8uTzGKUX+gXe0COsN5E57zCn2yYRKflz7H2reLXLNWXSv X2sNHscnix26GqyqAxvFCz0Lja0cLr1nT5LxcaZmLWgxY5Y52VcT14TKUu36i1L9 Ppt52QA56ZQaxQ1luLxBTfEuw/QdAVz1JKPf8aqKqzVKrOAcFp4566tmsfCmulq7 GhLZVzb80c5BEPNwOdJhRVxfcPQ/eSe4CMRa1gkyM4+vLbFf3bDpsZIUO/Bx7ug8 zyg0hBYNIKvhSMXb3vZs30xz2pVI13yxNnURXtP4P4Ifig8WYkcvxqYPvsPFjLmd yYm89ulBjhubTCNu3+40Jd+QWqnOE3egFobUis2BNOu5adZV7bQwu79DGYx3zysK OqPtS1qOpXR1ZA+AaRODk115ZFxa/Ebos6Lr0HnaZYwRAAF9HWPiS4ilLrH+Ls8R JN229+2kgvred9kBBwYB =CASz -----END PGP SIGNATURE----- Merge tag 'ntb-4.3' of git://github.com/jonmason/ntb Pull NTB fixes from Jon Mason: "NTB bug and documentation fixes, new device IDs, performance improvements, and adding a mailing list to MAINTAINERS for NTB" * tag 'ntb-4.3' of git://github.com/jonmason/ntb: NTB: Fix range check on memory window index NTB: Improve index handling in B2B MW workaround NTB: Fix documentation for ntb_peer_db_clear. NTB: Fix documentation for ntb_link_is_up NTB: Use unique DMA channels for TX and RX NTB: Remove dma_sync_wait from ntb_async_rx NTB: Clean up QP stats info NTB: Make the transport list in order of discovery NTB: Add PCI Device IDs for Broadwell Xeon NTB: Add flow control to the ntb_netdev NTB: Add list to MAINTAINERS
This commit is contained in:
commit
ded0e250b5
7 changed files with 210 additions and 47 deletions
|
@ -7396,6 +7396,7 @@ NTB DRIVER CORE
|
||||||
M: Jon Mason <jdmason@kudzu.us>
|
M: Jon Mason <jdmason@kudzu.us>
|
||||||
M: Dave Jiang <dave.jiang@intel.com>
|
M: Dave Jiang <dave.jiang@intel.com>
|
||||||
M: Allen Hubbe <Allen.Hubbe@emc.com>
|
M: Allen Hubbe <Allen.Hubbe@emc.com>
|
||||||
|
L: linux-ntb@googlegroups.com
|
||||||
S: Supported
|
S: Supported
|
||||||
W: https://github.com/jonmason/ntb/wiki
|
W: https://github.com/jonmason/ntb/wiki
|
||||||
T: git git://github.com/jonmason/ntb.git
|
T: git git://github.com/jonmason/ntb.git
|
||||||
|
@ -7407,6 +7408,7 @@ F: include/linux/ntb_transport.h
|
||||||
NTB INTEL DRIVER
|
NTB INTEL DRIVER
|
||||||
M: Jon Mason <jdmason@kudzu.us>
|
M: Jon Mason <jdmason@kudzu.us>
|
||||||
M: Dave Jiang <dave.jiang@intel.com>
|
M: Dave Jiang <dave.jiang@intel.com>
|
||||||
|
L: linux-ntb@googlegroups.com
|
||||||
S: Supported
|
S: Supported
|
||||||
W: https://github.com/jonmason/ntb/wiki
|
W: https://github.com/jonmason/ntb/wiki
|
||||||
T: git git://github.com/jonmason/ntb.git
|
T: git git://github.com/jonmason/ntb.git
|
||||||
|
|
|
@ -61,11 +61,21 @@ MODULE_VERSION(NTB_NETDEV_VER);
|
||||||
MODULE_LICENSE("Dual BSD/GPL");
|
MODULE_LICENSE("Dual BSD/GPL");
|
||||||
MODULE_AUTHOR("Intel Corporation");
|
MODULE_AUTHOR("Intel Corporation");
|
||||||
|
|
||||||
|
/* Time in usecs for tx resource reaper */
|
||||||
|
static unsigned int tx_time = 1;
|
||||||
|
|
||||||
|
/* Number of descriptors to free before resuming tx */
|
||||||
|
static unsigned int tx_start = 10;
|
||||||
|
|
||||||
|
/* Number of descriptors still available before stop upper layer tx */
|
||||||
|
static unsigned int tx_stop = 5;
|
||||||
|
|
||||||
struct ntb_netdev {
|
struct ntb_netdev {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct pci_dev *pdev;
|
struct pci_dev *pdev;
|
||||||
struct net_device *ndev;
|
struct net_device *ndev;
|
||||||
struct ntb_transport_qp *qp;
|
struct ntb_transport_qp *qp;
|
||||||
|
struct timer_list tx_timer;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define NTB_TX_TIMEOUT_MS 1000
|
#define NTB_TX_TIMEOUT_MS 1000
|
||||||
|
@ -136,11 +146,42 @@ enqueue_again:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev,
|
||||||
|
struct ntb_transport_qp *qp, int size)
|
||||||
|
{
|
||||||
|
struct ntb_netdev *dev = netdev_priv(netdev);
|
||||||
|
|
||||||
|
netif_stop_queue(netdev);
|
||||||
|
/* Make sure to see the latest value of ntb_transport_tx_free_entry()
|
||||||
|
* since the queue was last started.
|
||||||
|
*/
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
if (likely(ntb_transport_tx_free_entry(qp) < size)) {
|
||||||
|
mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
netif_start_queue(netdev);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ntb_netdev_maybe_stop_tx(struct net_device *ndev,
|
||||||
|
struct ntb_transport_qp *qp, int size)
|
||||||
|
{
|
||||||
|
if (netif_queue_stopped(ndev) ||
|
||||||
|
(ntb_transport_tx_free_entry(qp) >= size))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return __ntb_netdev_maybe_stop_tx(ndev, qp, size);
|
||||||
|
}
|
||||||
|
|
||||||
static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
|
static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
|
||||||
void *data, int len)
|
void *data, int len)
|
||||||
{
|
{
|
||||||
struct net_device *ndev = qp_data;
|
struct net_device *ndev = qp_data;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
struct ntb_netdev *dev = netdev_priv(ndev);
|
||||||
|
|
||||||
skb = data;
|
skb = data;
|
||||||
if (!skb || !ndev)
|
if (!skb || !ndev)
|
||||||
|
@ -155,6 +196,15 @@ static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
|
|
||||||
|
if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) {
|
||||||
|
/* Make sure anybody stopping the queue after this sees the new
|
||||||
|
* value of ntb_transport_tx_free_entry()
|
||||||
|
*/
|
||||||
|
smp_mb();
|
||||||
|
if (netif_queue_stopped(ndev))
|
||||||
|
netif_wake_queue(ndev);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
|
static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
|
||||||
|
@ -163,10 +213,15 @@ static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
|
||||||
struct ntb_netdev *dev = netdev_priv(ndev);
|
struct ntb_netdev *dev = netdev_priv(ndev);
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
|
||||||
|
|
||||||
rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
|
rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
/* check for next submit */
|
||||||
|
ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
|
||||||
|
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
|
@ -175,6 +230,23 @@ err:
|
||||||
return NETDEV_TX_BUSY;
|
return NETDEV_TX_BUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ntb_netdev_tx_timer(unsigned long data)
|
||||||
|
{
|
||||||
|
struct net_device *ndev = (struct net_device *)data;
|
||||||
|
struct ntb_netdev *dev = netdev_priv(ndev);
|
||||||
|
|
||||||
|
if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
|
||||||
|
mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time));
|
||||||
|
} else {
|
||||||
|
/* Make sure anybody stopping the queue after this sees the new
|
||||||
|
* value of ntb_transport_tx_free_entry()
|
||||||
|
*/
|
||||||
|
smp_mb();
|
||||||
|
if (netif_queue_stopped(ndev))
|
||||||
|
netif_wake_queue(ndev);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int ntb_netdev_open(struct net_device *ndev)
|
static int ntb_netdev_open(struct net_device *ndev)
|
||||||
{
|
{
|
||||||
struct ntb_netdev *dev = netdev_priv(ndev);
|
struct ntb_netdev *dev = netdev_priv(ndev);
|
||||||
|
@ -197,8 +269,11 @@ static int ntb_netdev_open(struct net_device *ndev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
setup_timer(&dev->tx_timer, ntb_netdev_tx_timer, (unsigned long)ndev);
|
||||||
|
|
||||||
netif_carrier_off(ndev);
|
netif_carrier_off(ndev);
|
||||||
ntb_transport_link_up(dev->qp);
|
ntb_transport_link_up(dev->qp);
|
||||||
|
netif_start_queue(ndev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -219,6 +294,8 @@ static int ntb_netdev_close(struct net_device *ndev)
|
||||||
while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
|
while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
|
|
||||||
|
del_timer_sync(&dev->tx_timer);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -190,14 +190,17 @@ static inline int pdev_is_xeon(struct pci_dev *pdev)
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
|
case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
|
case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
|
case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
|
||||||
|
case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
|
case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
|
case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
|
case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
|
case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
|
||||||
|
case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
|
case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
|
case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
|
case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
|
case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
|
||||||
|
case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -237,7 +240,7 @@ static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
|
||||||
|
|
||||||
static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
|
static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
|
||||||
{
|
{
|
||||||
if (idx < 0 || idx > ndev->mw_count)
|
if (idx < 0 || idx >= ndev->mw_count)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
return ndev->reg->mw_bar[idx];
|
return ndev->reg->mw_bar[idx];
|
||||||
}
|
}
|
||||||
|
@ -572,10 +575,13 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
|
||||||
"Connection Topology -\t%s\n",
|
"Connection Topology -\t%s\n",
|
||||||
ntb_topo_string(ndev->ntb.topo));
|
ntb_topo_string(ndev->ntb.topo));
|
||||||
|
|
||||||
off += scnprintf(buf + off, buf_size - off,
|
if (ndev->b2b_idx != UINT_MAX) {
|
||||||
"B2B Offset -\t\t%#lx\n", ndev->b2b_off);
|
off += scnprintf(buf + off, buf_size - off,
|
||||||
off += scnprintf(buf + off, buf_size - off,
|
"B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
|
||||||
"B2B MW Idx -\t\t%d\n", ndev->b2b_idx);
|
off += scnprintf(buf + off, buf_size - off,
|
||||||
|
"B2B Offset -\t\t%#lx\n", ndev->b2b_off);
|
||||||
|
}
|
||||||
|
|
||||||
off += scnprintf(buf + off, buf_size - off,
|
off += scnprintf(buf + off, buf_size - off,
|
||||||
"BAR4 Split -\t\t%s\n",
|
"BAR4 Split -\t\t%s\n",
|
||||||
ndev->bar4_split ? "yes" : "no");
|
ndev->bar4_split ? "yes" : "no");
|
||||||
|
@ -1484,7 +1490,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
|
||||||
pdev = ndev_pdev(ndev);
|
pdev = ndev_pdev(ndev);
|
||||||
mmio = ndev->self_mmio;
|
mmio = ndev->self_mmio;
|
||||||
|
|
||||||
if (ndev->b2b_idx >= ndev->mw_count) {
|
if (ndev->b2b_idx == UINT_MAX) {
|
||||||
dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
|
dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
|
||||||
b2b_bar = 0;
|
b2b_bar = 0;
|
||||||
ndev->b2b_off = 0;
|
ndev->b2b_off = 0;
|
||||||
|
@ -1776,6 +1782,13 @@ static int xeon_init_ntb(struct intel_ntb_dev *ndev)
|
||||||
else
|
else
|
||||||
ndev->b2b_idx = b2b_mw_idx;
|
ndev->b2b_idx = b2b_mw_idx;
|
||||||
|
|
||||||
|
if (ndev->b2b_idx >= ndev->mw_count) {
|
||||||
|
dev_dbg(ndev_dev(ndev),
|
||||||
|
"b2b_mw_idx %d invalid for mw_count %u\n",
|
||||||
|
b2b_mw_idx, ndev->mw_count);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
dev_dbg(ndev_dev(ndev),
|
dev_dbg(ndev_dev(ndev),
|
||||||
"setting up b2b mw idx %d means %d\n",
|
"setting up b2b mw idx %d means %d\n",
|
||||||
b2b_mw_idx, ndev->b2b_idx);
|
b2b_mw_idx, ndev->b2b_idx);
|
||||||
|
@ -1843,6 +1856,9 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev)
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
|
case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
|
case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
|
case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
|
||||||
|
case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
|
||||||
|
case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
|
||||||
|
case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
|
||||||
ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
|
ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1857,6 +1873,9 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev)
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
|
case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
|
case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
|
case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
|
||||||
|
case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
|
||||||
|
case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
|
||||||
|
case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
|
||||||
ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
|
ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1878,6 +1897,9 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev)
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
|
case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
|
case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
|
||||||
case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
|
case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
|
||||||
|
case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
|
||||||
|
case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
|
||||||
|
case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
|
||||||
ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
|
ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1996,7 +2018,7 @@ static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
|
||||||
ndev->ntb.ops = &intel_ntb_ops;
|
ndev->ntb.ops = &intel_ntb_ops;
|
||||||
|
|
||||||
ndev->b2b_off = 0;
|
ndev->b2b_off = 0;
|
||||||
ndev->b2b_idx = INT_MAX;
|
ndev->b2b_idx = UINT_MAX;
|
||||||
|
|
||||||
ndev->bar4_split = 0;
|
ndev->bar4_split = 0;
|
||||||
|
|
||||||
|
@ -2234,14 +2256,17 @@ static const struct pci_device_id intel_ntb_pci_tbl[] = {
|
||||||
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
|
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
|
||||||
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
|
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
|
||||||
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
|
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
|
||||||
|
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
|
||||||
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
|
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
|
||||||
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
|
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
|
||||||
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
|
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
|
||||||
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
|
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
|
||||||
|
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
|
||||||
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
|
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
|
||||||
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
|
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
|
||||||
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
|
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
|
||||||
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
|
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
|
||||||
|
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
|
||||||
{0}
|
{0}
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
|
MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
|
||||||
|
|
|
@ -67,6 +67,9 @@
|
||||||
#define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E
|
#define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E
|
||||||
#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F
|
#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F
|
||||||
#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
|
#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
|
||||||
|
#define PCI_DEVICE_ID_INTEL_NTB_B2B_BDX 0x6F0D
|
||||||
|
#define PCI_DEVICE_ID_INTEL_NTB_PS_BDX 0x6F0E
|
||||||
|
#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F
|
||||||
|
|
||||||
/* Intel Xeon hardware */
|
/* Intel Xeon hardware */
|
||||||
|
|
||||||
|
|
|
@ -119,7 +119,8 @@ struct ntb_transport_qp {
|
||||||
struct ntb_transport_ctx *transport;
|
struct ntb_transport_ctx *transport;
|
||||||
struct ntb_dev *ndev;
|
struct ntb_dev *ndev;
|
||||||
void *cb_data;
|
void *cb_data;
|
||||||
struct dma_chan *dma_chan;
|
struct dma_chan *tx_dma_chan;
|
||||||
|
struct dma_chan *rx_dma_chan;
|
||||||
|
|
||||||
bool client_ready;
|
bool client_ready;
|
||||||
bool link_is_up;
|
bool link_is_up;
|
||||||
|
@ -297,7 +298,7 @@ static LIST_HEAD(ntb_transport_list);
|
||||||
|
|
||||||
static int ntb_bus_init(struct ntb_transport_ctx *nt)
|
static int ntb_bus_init(struct ntb_transport_ctx *nt)
|
||||||
{
|
{
|
||||||
list_add(&nt->entry, &ntb_transport_list);
|
list_add_tail(&nt->entry, &ntb_transport_list);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -452,7 +453,7 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
|
||||||
|
|
||||||
out_offset = 0;
|
out_offset = 0;
|
||||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
"NTB QP stats\n");
|
"\nNTB QP stats:\n\n");
|
||||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
"rx_bytes - \t%llu\n", qp->rx_bytes);
|
"rx_bytes - \t%llu\n", qp->rx_bytes);
|
||||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
|
@ -470,11 +471,11 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
|
||||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
"rx_err_ver - \t%llu\n", qp->rx_err_ver);
|
"rx_err_ver - \t%llu\n", qp->rx_err_ver);
|
||||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
"rx_buff - \t%p\n", qp->rx_buff);
|
"rx_buff - \t0x%p\n", qp->rx_buff);
|
||||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
"rx_index - \t%u\n", qp->rx_index);
|
"rx_index - \t%u\n", qp->rx_index);
|
||||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
"rx_max_entry - \t%u\n", qp->rx_max_entry);
|
"rx_max_entry - \t%u\n\n", qp->rx_max_entry);
|
||||||
|
|
||||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
"tx_bytes - \t%llu\n", qp->tx_bytes);
|
"tx_bytes - \t%llu\n", qp->tx_bytes);
|
||||||
|
@ -489,15 +490,32 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
|
||||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
"tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
|
"tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
|
||||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
"tx_mw - \t%p\n", qp->tx_mw);
|
"tx_mw - \t0x%p\n", qp->tx_mw);
|
||||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
"tx_index - \t%u\n", qp->tx_index);
|
"tx_index (H) - \t%u\n", qp->tx_index);
|
||||||
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
|
"RRI (T) - \t%u\n",
|
||||||
|
qp->remote_rx_info->entry);
|
||||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
"tx_max_entry - \t%u\n", qp->tx_max_entry);
|
"tx_max_entry - \t%u\n", qp->tx_max_entry);
|
||||||
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
|
"free tx - \t%u\n",
|
||||||
|
ntb_transport_tx_free_entry(qp));
|
||||||
|
|
||||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
"\nQP Link %s\n",
|
"\n");
|
||||||
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
|
"Using TX DMA - \t%s\n",
|
||||||
|
qp->tx_dma_chan ? "Yes" : "No");
|
||||||
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
|
"Using RX DMA - \t%s\n",
|
||||||
|
qp->rx_dma_chan ? "Yes" : "No");
|
||||||
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
|
"QP Link - \t%s\n",
|
||||||
qp->link_is_up ? "Up" : "Down");
|
qp->link_is_up ? "Up" : "Down");
|
||||||
|
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||||
|
"\n");
|
||||||
|
|
||||||
if (out_offset > out_count)
|
if (out_offset > out_count)
|
||||||
out_offset = out_count;
|
out_offset = out_count;
|
||||||
|
|
||||||
|
@ -535,6 +553,7 @@ static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
|
||||||
}
|
}
|
||||||
entry = list_first_entry(list, struct ntb_queue_entry, entry);
|
entry = list_first_entry(list, struct ntb_queue_entry, entry);
|
||||||
list_del(&entry->entry);
|
list_del(&entry->entry);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(lock, flags);
|
spin_unlock_irqrestore(lock, flags);
|
||||||
|
|
||||||
|
@ -1206,7 +1225,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
|
||||||
{
|
{
|
||||||
struct dma_async_tx_descriptor *txd;
|
struct dma_async_tx_descriptor *txd;
|
||||||
struct ntb_transport_qp *qp = entry->qp;
|
struct ntb_transport_qp *qp = entry->qp;
|
||||||
struct dma_chan *chan = qp->dma_chan;
|
struct dma_chan *chan = qp->rx_dma_chan;
|
||||||
struct dma_device *device;
|
struct dma_device *device;
|
||||||
size_t pay_off, buff_off, len;
|
size_t pay_off, buff_off, len;
|
||||||
struct dmaengine_unmap_data *unmap;
|
struct dmaengine_unmap_data *unmap;
|
||||||
|
@ -1219,18 +1238,18 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if (len < copy_bytes)
|
if (len < copy_bytes)
|
||||||
goto err_wait;
|
goto err;
|
||||||
|
|
||||||
device = chan->device;
|
device = chan->device;
|
||||||
pay_off = (size_t)offset & ~PAGE_MASK;
|
pay_off = (size_t)offset & ~PAGE_MASK;
|
||||||
buff_off = (size_t)buf & ~PAGE_MASK;
|
buff_off = (size_t)buf & ~PAGE_MASK;
|
||||||
|
|
||||||
if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
|
if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
|
||||||
goto err_wait;
|
goto err;
|
||||||
|
|
||||||
unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
|
unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
|
||||||
if (!unmap)
|
if (!unmap)
|
||||||
goto err_wait;
|
goto err;
|
||||||
|
|
||||||
unmap->len = len;
|
unmap->len = len;
|
||||||
unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
|
unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
|
||||||
|
@ -1273,12 +1292,6 @@ err_set_unmap:
|
||||||
dmaengine_unmap_put(unmap);
|
dmaengine_unmap_put(unmap);
|
||||||
err_get_unmap:
|
err_get_unmap:
|
||||||
dmaengine_unmap_put(unmap);
|
dmaengine_unmap_put(unmap);
|
||||||
err_wait:
|
|
||||||
/* If the callbacks come out of order, the writing of the index to the
|
|
||||||
* last completed will be out of order. This may result in the
|
|
||||||
* receive stalling forever.
|
|
||||||
*/
|
|
||||||
dma_sync_wait(chan, qp->last_cookie);
|
|
||||||
err:
|
err:
|
||||||
ntb_memcpy_rx(entry, offset);
|
ntb_memcpy_rx(entry, offset);
|
||||||
qp->rx_memcpy++;
|
qp->rx_memcpy++;
|
||||||
|
@ -1373,8 +1386,8 @@ static void ntb_transport_rxc_db(unsigned long data)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i && qp->dma_chan)
|
if (i && qp->rx_dma_chan)
|
||||||
dma_async_issue_pending(qp->dma_chan);
|
dma_async_issue_pending(qp->rx_dma_chan);
|
||||||
|
|
||||||
if (i == qp->rx_max_entry) {
|
if (i == qp->rx_max_entry) {
|
||||||
/* there is more work to do */
|
/* there is more work to do */
|
||||||
|
@ -1441,7 +1454,7 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
|
||||||
{
|
{
|
||||||
struct ntb_payload_header __iomem *hdr;
|
struct ntb_payload_header __iomem *hdr;
|
||||||
struct dma_async_tx_descriptor *txd;
|
struct dma_async_tx_descriptor *txd;
|
||||||
struct dma_chan *chan = qp->dma_chan;
|
struct dma_chan *chan = qp->tx_dma_chan;
|
||||||
struct dma_device *device;
|
struct dma_device *device;
|
||||||
size_t dest_off, buff_off;
|
size_t dest_off, buff_off;
|
||||||
struct dmaengine_unmap_data *unmap;
|
struct dmaengine_unmap_data *unmap;
|
||||||
|
@ -1634,14 +1647,27 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
|
||||||
dma_cap_set(DMA_MEMCPY, dma_mask);
|
dma_cap_set(DMA_MEMCPY, dma_mask);
|
||||||
|
|
||||||
if (use_dma) {
|
if (use_dma) {
|
||||||
qp->dma_chan = dma_request_channel(dma_mask, ntb_dma_filter_fn,
|
qp->tx_dma_chan =
|
||||||
(void *)(unsigned long)node);
|
dma_request_channel(dma_mask, ntb_dma_filter_fn,
|
||||||
if (!qp->dma_chan)
|
(void *)(unsigned long)node);
|
||||||
dev_info(&pdev->dev, "Unable to allocate DMA channel\n");
|
if (!qp->tx_dma_chan)
|
||||||
|
dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n");
|
||||||
|
|
||||||
|
qp->rx_dma_chan =
|
||||||
|
dma_request_channel(dma_mask, ntb_dma_filter_fn,
|
||||||
|
(void *)(unsigned long)node);
|
||||||
|
if (!qp->rx_dma_chan)
|
||||||
|
dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n");
|
||||||
} else {
|
} else {
|
||||||
qp->dma_chan = NULL;
|
qp->tx_dma_chan = NULL;
|
||||||
|
qp->rx_dma_chan = NULL;
|
||||||
}
|
}
|
||||||
dev_dbg(&pdev->dev, "Using %s memcpy\n", qp->dma_chan ? "DMA" : "CPU");
|
|
||||||
|
dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
|
||||||
|
qp->tx_dma_chan ? "DMA" : "CPU");
|
||||||
|
|
||||||
|
dev_dbg(&pdev->dev, "Using %s memcpy for RX\n",
|
||||||
|
qp->rx_dma_chan ? "DMA" : "CPU");
|
||||||
|
|
||||||
for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
|
for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
|
||||||
entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
|
entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
|
||||||
|
@ -1676,8 +1702,10 @@ err2:
|
||||||
err1:
|
err1:
|
||||||
while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
|
while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
|
||||||
kfree(entry);
|
kfree(entry);
|
||||||
if (qp->dma_chan)
|
if (qp->tx_dma_chan)
|
||||||
dma_release_channel(qp->dma_chan);
|
dma_release_channel(qp->tx_dma_chan);
|
||||||
|
if (qp->rx_dma_chan)
|
||||||
|
dma_release_channel(qp->rx_dma_chan);
|
||||||
nt->qp_bitmap_free |= qp_bit;
|
nt->qp_bitmap_free |= qp_bit;
|
||||||
err:
|
err:
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1701,12 +1729,27 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
|
||||||
|
|
||||||
pdev = qp->ndev->pdev;
|
pdev = qp->ndev->pdev;
|
||||||
|
|
||||||
if (qp->dma_chan) {
|
if (qp->tx_dma_chan) {
|
||||||
struct dma_chan *chan = qp->dma_chan;
|
struct dma_chan *chan = qp->tx_dma_chan;
|
||||||
/* Putting the dma_chan to NULL will force any new traffic to be
|
/* Putting the dma_chan to NULL will force any new traffic to be
|
||||||
* processed by the CPU instead of the DAM engine
|
* processed by the CPU instead of the DAM engine
|
||||||
*/
|
*/
|
||||||
qp->dma_chan = NULL;
|
qp->tx_dma_chan = NULL;
|
||||||
|
|
||||||
|
/* Try to be nice and wait for any queued DMA engine
|
||||||
|
* transactions to process before smashing it with a rock
|
||||||
|
*/
|
||||||
|
dma_sync_wait(chan, qp->last_cookie);
|
||||||
|
dmaengine_terminate_all(chan);
|
||||||
|
dma_release_channel(chan);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (qp->rx_dma_chan) {
|
||||||
|
struct dma_chan *chan = qp->rx_dma_chan;
|
||||||
|
/* Putting the dma_chan to NULL will force any new traffic to be
|
||||||
|
* processed by the CPU instead of the DAM engine
|
||||||
|
*/
|
||||||
|
qp->rx_dma_chan = NULL;
|
||||||
|
|
||||||
/* Try to be nice and wait for any queued DMA engine
|
/* Try to be nice and wait for any queued DMA engine
|
||||||
* transactions to process before smashing it with a rock
|
* transactions to process before smashing it with a rock
|
||||||
|
@ -1843,7 +1886,7 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
|
||||||
entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
|
entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
|
||||||
if (!entry) {
|
if (!entry) {
|
||||||
qp->tx_err_no_buf++;
|
qp->tx_err_no_buf++;
|
||||||
return -ENOMEM;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
entry->cb_data = cb;
|
entry->cb_data = cb;
|
||||||
|
@ -1954,21 +1997,34 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
|
||||||
unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
|
unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
|
||||||
{
|
{
|
||||||
unsigned int max;
|
unsigned int max;
|
||||||
|
unsigned int copy_align;
|
||||||
|
|
||||||
if (!qp)
|
if (!qp)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!qp->dma_chan)
|
if (!qp->tx_dma_chan && !qp->rx_dma_chan)
|
||||||
return qp->tx_max_frame - sizeof(struct ntb_payload_header);
|
return qp->tx_max_frame - sizeof(struct ntb_payload_header);
|
||||||
|
|
||||||
|
copy_align = max(qp->tx_dma_chan->device->copy_align,
|
||||||
|
qp->rx_dma_chan->device->copy_align);
|
||||||
|
|
||||||
/* If DMA engine usage is possible, try to find the max size for that */
|
/* If DMA engine usage is possible, try to find the max size for that */
|
||||||
max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
|
max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
|
||||||
max -= max % (1 << qp->dma_chan->device->copy_align);
|
max -= max % (1 << copy_align);
|
||||||
|
|
||||||
return max;
|
return max;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ntb_transport_max_size);
|
EXPORT_SYMBOL_GPL(ntb_transport_max_size);
|
||||||
|
|
||||||
|
unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
|
||||||
|
{
|
||||||
|
unsigned int head = qp->tx_index;
|
||||||
|
unsigned int tail = qp->remote_rx_info->entry;
|
||||||
|
|
||||||
|
return tail > head ? tail - head : qp->tx_max_entry + tail - head;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
|
||||||
|
|
||||||
static void ntb_transport_doorbell_callback(void *data, int vector)
|
static void ntb_transport_doorbell_callback(void *data, int vector)
|
||||||
{
|
{
|
||||||
struct ntb_transport_ctx *nt = data;
|
struct ntb_transport_ctx *nt = data;
|
||||||
|
|
|
@ -522,10 +522,9 @@ static inline int ntb_mw_clear_trans(struct ntb_dev *ntb, int idx)
|
||||||
* @speed: OUT - The link speed expressed as PCIe generation number.
|
* @speed: OUT - The link speed expressed as PCIe generation number.
|
||||||
* @width: OUT - The link width expressed as the number of PCIe lanes.
|
* @width: OUT - The link width expressed as the number of PCIe lanes.
|
||||||
*
|
*
|
||||||
* Set the translation of a memory window. The peer may access local memory
|
* Get the current state of the ntb link. It is recommended to query the link
|
||||||
* through the window starting at the address, up to the size. The address
|
* state once after every link event. It is safe to query the link state in
|
||||||
* must be aligned to the alignment specified by ntb_mw_get_range(). The size
|
* the context of the link event callback.
|
||||||
* must be aligned to the size alignment specified by ntb_mw_get_range().
|
|
||||||
*
|
*
|
||||||
* Return: One if the link is up, zero if the link is down, otherwise a
|
* Return: One if the link is up, zero if the link is down, otherwise a
|
||||||
* negative value indicating the error number.
|
* negative value indicating the error number.
|
||||||
|
@ -795,7 +794,7 @@ static inline int ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ntb_peer_db_clear() - clear bits in the local doorbell register
|
* ntb_peer_db_clear() - clear bits in the peer doorbell register
|
||||||
* @ntb: NTB device context.
|
* @ntb: NTB device context.
|
||||||
* @db_bits: Doorbell bits to clear.
|
* @db_bits: Doorbell bits to clear.
|
||||||
*
|
*
|
||||||
|
|
|
@ -83,3 +83,4 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
|
||||||
void ntb_transport_link_up(struct ntb_transport_qp *qp);
|
void ntb_transport_link_up(struct ntb_transport_qp *qp);
|
||||||
void ntb_transport_link_down(struct ntb_transport_qp *qp);
|
void ntb_transport_link_down(struct ntb_transport_qp *qp);
|
||||||
bool ntb_transport_link_query(struct ntb_transport_qp *qp);
|
bool ntb_transport_link_query(struct ntb_transport_qp *qp);
|
||||||
|
unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp);
|
||||||
|
|
Loading…
Add table
Reference in a new issue