ixgbevf: implement CONFIG_NET_RX_BUSY_POLL
This patch enables CONFIG_NET_RX_BUSY_POLL support in the VF code. This enables sockets which have enabled the SO_BUSY_POLL socket option to use the ndo_busy_poll_recv operation which could result in lower latency, at the cost of higher CPU utilization, and increased power usage. This support is similar to how the ixgbe driver works. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
parent
08e50a20ed
commit
c777cdfa4e
2 changed files with 177 additions and 0 deletions
|
@ -38,6 +38,10 @@
|
||||||
|
|
||||||
#include "vf.h"
|
#include "vf.h"
|
||||||
|
|
||||||
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||||
|
#include <net/busy_poll.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
/* wrapper around a pointer to a socket buffer,
|
/* wrapper around a pointer to a socket buffer,
|
||||||
* so a DMA handle can be stored along with the buffer */
|
* so a DMA handle can be stored along with the buffer */
|
||||||
struct ixgbevf_tx_buffer {
|
struct ixgbevf_tx_buffer {
|
||||||
|
@ -145,7 +149,112 @@ struct ixgbevf_q_vector {
|
||||||
struct napi_struct napi;
|
struct napi_struct napi;
|
||||||
struct ixgbevf_ring_container rx, tx;
|
struct ixgbevf_ring_container rx, tx;
|
||||||
char name[IFNAMSIZ + 9];
|
char name[IFNAMSIZ + 9];
|
||||||
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||||
|
unsigned int state;
|
||||||
|
#define IXGBEVF_QV_STATE_IDLE 0
|
||||||
|
#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */
|
||||||
|
#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */
|
||||||
|
#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */
|
||||||
|
#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
|
||||||
|
#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
|
||||||
|
#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
|
||||||
|
#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
|
||||||
|
#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD)
|
||||||
|
#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD)
|
||||||
|
spinlock_t lock;
|
||||||
|
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
||||||
};
|
};
|
||||||
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||||
|
static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
|
||||||
|
{
|
||||||
|
|
||||||
|
spin_lock_init(&q_vector->lock);
|
||||||
|
q_vector->state = IXGBEVF_QV_STATE_IDLE;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* called from the device poll routine to get ownership of a q_vector */
|
||||||
|
static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
|
||||||
|
{
|
||||||
|
int rc = true;
|
||||||
|
spin_lock_bh(&q_vector->lock);
|
||||||
|
if (q_vector->state & IXGBEVF_QV_LOCKED) {
|
||||||
|
WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
|
||||||
|
q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
|
||||||
|
rc = false;
|
||||||
|
} else {
|
||||||
|
/* we don't care if someone yielded */
|
||||||
|
q_vector->state = IXGBEVF_QV_STATE_NAPI;
|
||||||
|
}
|
||||||
|
spin_unlock_bh(&q_vector->lock);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* returns true is someone tried to get the qv while napi had it */
|
||||||
|
static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
|
||||||
|
{
|
||||||
|
int rc = false;
|
||||||
|
spin_lock_bh(&q_vector->lock);
|
||||||
|
WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
|
||||||
|
IXGBEVF_QV_STATE_NAPI_YIELD));
|
||||||
|
|
||||||
|
if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
|
||||||
|
rc = true;
|
||||||
|
/* reset state to idle, unless QV is disabled */
|
||||||
|
q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
|
||||||
|
spin_unlock_bh(&q_vector->lock);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* called from ixgbevf_low_latency_poll() */
|
||||||
|
static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
|
||||||
|
{
|
||||||
|
int rc = true;
|
||||||
|
spin_lock_bh(&q_vector->lock);
|
||||||
|
if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
|
||||||
|
q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
|
||||||
|
rc = false;
|
||||||
|
} else {
|
||||||
|
/* preserve yield marks */
|
||||||
|
q_vector->state |= IXGBEVF_QV_STATE_POLL;
|
||||||
|
}
|
||||||
|
spin_unlock_bh(&q_vector->lock);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* returns true if someone tried to get the qv while it was locked */
|
||||||
|
static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
|
||||||
|
{
|
||||||
|
int rc = false;
|
||||||
|
spin_lock_bh(&q_vector->lock);
|
||||||
|
WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
|
||||||
|
|
||||||
|
if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
|
||||||
|
rc = true;
|
||||||
|
/* reset state to idle, unless QV is disabled */
|
||||||
|
q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
|
||||||
|
spin_unlock_bh(&q_vector->lock);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* true if a socket is polling, even if it did not get the lock */
|
||||||
|
static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
|
||||||
|
{
|
||||||
|
WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED));
|
||||||
|
return q_vector->state & IXGBEVF_QV_USER_PEND;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* false if QV is currently owned */
|
||||||
|
static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
|
||||||
|
{
|
||||||
|
int rc = true;
|
||||||
|
spin_lock_bh(&q_vector->lock);
|
||||||
|
if (q_vector->state & IXGBEVF_QV_OWNED)
|
||||||
|
rc = false;
|
||||||
|
spin_unlock_bh(&q_vector->lock);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* microsecond values for various ITR rates shifted by 2 to fit itr register
|
* microsecond values for various ITR rates shifted by 2 to fit itr register
|
||||||
|
|
|
@ -310,6 +310,16 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
|
||||||
struct sk_buff *skb, u8 status,
|
struct sk_buff *skb, u8 status,
|
||||||
union ixgbe_adv_rx_desc *rx_desc)
|
union ixgbe_adv_rx_desc *rx_desc)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||||
|
skb_mark_napi_id(skb, &q_vector->napi);
|
||||||
|
|
||||||
|
if (ixgbevf_qv_busy_polling(q_vector)) {
|
||||||
|
netif_receive_skb(skb);
|
||||||
|
/* exit early if we busy polled */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
||||||
|
|
||||||
ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
|
ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -563,6 +573,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
|
||||||
ixgbevf_for_each_ring(ring, q_vector->tx)
|
ixgbevf_for_each_ring(ring, q_vector->tx)
|
||||||
clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
|
clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
|
||||||
|
|
||||||
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||||
|
if (!ixgbevf_qv_lock_napi(q_vector))
|
||||||
|
return budget;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* attempt to distribute budget to each queue fairly, but don't allow
|
/* attempt to distribute budget to each queue fairly, but don't allow
|
||||||
* the budget to go below 1 because we'll exit polling */
|
* the budget to go below 1 because we'll exit polling */
|
||||||
if (q_vector->rx.count > 1)
|
if (q_vector->rx.count > 1)
|
||||||
|
@ -577,6 +592,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
|
||||||
< per_ring_budget);
|
< per_ring_budget);
|
||||||
adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
|
adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
|
||||||
|
|
||||||
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||||
|
ixgbevf_qv_unlock_napi(q_vector);
|
||||||
|
#endif
|
||||||
|
|
||||||
/* If all work not completed, return budget and keep polling */
|
/* If all work not completed, return budget and keep polling */
|
||||||
if (!clean_complete)
|
if (!clean_complete)
|
||||||
return budget;
|
return budget;
|
||||||
|
@ -611,6 +630,34 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
|
IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||||
|
/* must be called with local_bh_disable()d */
|
||||||
|
static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
|
||||||
|
{
|
||||||
|
struct ixgbevf_q_vector *q_vector =
|
||||||
|
container_of(napi, struct ixgbevf_q_vector, napi);
|
||||||
|
struct ixgbevf_adapter *adapter = q_vector->adapter;
|
||||||
|
struct ixgbevf_ring *ring;
|
||||||
|
int found = 0;
|
||||||
|
|
||||||
|
if (test_bit(__IXGBEVF_DOWN, &adapter->state))
|
||||||
|
return LL_FLUSH_FAILED;
|
||||||
|
|
||||||
|
if (!ixgbevf_qv_lock_poll(q_vector))
|
||||||
|
return LL_FLUSH_BUSY;
|
||||||
|
|
||||||
|
ixgbevf_for_each_ring(ring, q_vector->rx) {
|
||||||
|
found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
|
||||||
|
if (found)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ixgbevf_qv_unlock_poll(q_vector);
|
||||||
|
|
||||||
|
return found;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ixgbevf_configure_msix - Configure MSI-X hardware
|
* ixgbevf_configure_msix - Configure MSI-X hardware
|
||||||
* @adapter: board private structure
|
* @adapter: board private structure
|
||||||
|
@ -1297,6 +1344,9 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
|
||||||
|
|
||||||
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
|
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
|
||||||
q_vector = adapter->q_vector[q_idx];
|
q_vector = adapter->q_vector[q_idx];
|
||||||
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||||
|
ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
|
||||||
|
#endif
|
||||||
napi_enable(&q_vector->napi);
|
napi_enable(&q_vector->napi);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1310,6 +1360,12 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
|
||||||
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
|
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
|
||||||
q_vector = adapter->q_vector[q_idx];
|
q_vector = adapter->q_vector[q_idx];
|
||||||
napi_disable(&q_vector->napi);
|
napi_disable(&q_vector->napi);
|
||||||
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||||
|
while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
|
||||||
|
pr_info("QV %d locked\n", q_idx);
|
||||||
|
usleep_range(1000, 20000);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1960,6 +2016,9 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
|
||||||
q_vector->v_idx = q_idx;
|
q_vector->v_idx = q_idx;
|
||||||
netif_napi_add(adapter->netdev, &q_vector->napi,
|
netif_napi_add(adapter->netdev, &q_vector->napi,
|
||||||
ixgbevf_poll, 64);
|
ixgbevf_poll, 64);
|
||||||
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||||
|
napi_hash_add(&q_vector->napi);
|
||||||
|
#endif
|
||||||
adapter->q_vector[q_idx] = q_vector;
|
adapter->q_vector[q_idx] = q_vector;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1969,6 +2028,9 @@ err_out:
|
||||||
while (q_idx) {
|
while (q_idx) {
|
||||||
q_idx--;
|
q_idx--;
|
||||||
q_vector = adapter->q_vector[q_idx];
|
q_vector = adapter->q_vector[q_idx];
|
||||||
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||||
|
napi_hash_del(&q_vector->napi);
|
||||||
|
#endif
|
||||||
netif_napi_del(&q_vector->napi);
|
netif_napi_del(&q_vector->napi);
|
||||||
kfree(q_vector);
|
kfree(q_vector);
|
||||||
adapter->q_vector[q_idx] = NULL;
|
adapter->q_vector[q_idx] = NULL;
|
||||||
|
@ -1992,6 +2054,9 @@ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
|
||||||
struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
|
struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
|
||||||
|
|
||||||
adapter->q_vector[q_idx] = NULL;
|
adapter->q_vector[q_idx] = NULL;
|
||||||
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||||
|
napi_hash_del(&q_vector->napi);
|
||||||
|
#endif
|
||||||
netif_napi_del(&q_vector->napi);
|
netif_napi_del(&q_vector->napi);
|
||||||
kfree(q_vector);
|
kfree(q_vector);
|
||||||
}
|
}
|
||||||
|
@ -3323,6 +3388,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
|
||||||
.ndo_tx_timeout = ixgbevf_tx_timeout,
|
.ndo_tx_timeout = ixgbevf_tx_timeout,
|
||||||
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
|
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
|
||||||
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
|
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
|
||||||
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||||
|
.ndo_busy_poll = ixgbevf_busy_poll_recv,
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
|
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
|
||||||
|
|
Loading…
Add table
Reference in a new issue