xen-netback: notify immediately after pushing Tx response.
This fixes a performance regression introduced by
7fbb9d8415
(xen-netback: release pending
index before pushing Tx responses)
Moving the notify outside of the spin locks means it can be delayed a
long time (if the dealloc thread is descheduled or there is an
interrupt or softirq).
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Zoltan Kiss <zoltan.kiss@linaro.org>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b1cb59cf2e
commit
c8a4d29988
1 changed files with 12 additions and 11 deletions
|
@ -96,6 +96,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
|
||||||
static void make_tx_response(struct xenvif_queue *queue,
|
static void make_tx_response(struct xenvif_queue *queue,
|
||||||
struct xen_netif_tx_request *txp,
|
struct xen_netif_tx_request *txp,
|
||||||
s8 st);
|
s8 st);
|
||||||
|
static void push_tx_responses(struct xenvif_queue *queue);
|
||||||
|
|
||||||
static inline int tx_work_todo(struct xenvif_queue *queue);
|
static inline int tx_work_todo(struct xenvif_queue *queue);
|
||||||
|
|
||||||
|
@ -655,15 +656,10 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
int notify;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&queue->response_lock, flags);
|
spin_lock_irqsave(&queue->response_lock, flags);
|
||||||
make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
|
make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
|
||||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
|
push_tx_responses(queue);
|
||||||
spin_unlock_irqrestore(&queue->response_lock, flags);
|
spin_unlock_irqrestore(&queue->response_lock, flags);
|
||||||
if (notify)
|
|
||||||
notify_remote_via_irq(queue->tx_irq);
|
|
||||||
|
|
||||||
if (cons == end)
|
if (cons == end)
|
||||||
break;
|
break;
|
||||||
txp = RING_GET_REQUEST(&queue->tx, cons++);
|
txp = RING_GET_REQUEST(&queue->tx, cons++);
|
||||||
|
@ -1657,7 +1653,6 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
|
||||||
{
|
{
|
||||||
struct pending_tx_info *pending_tx_info;
|
struct pending_tx_info *pending_tx_info;
|
||||||
pending_ring_idx_t index;
|
pending_ring_idx_t index;
|
||||||
int notify;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
pending_tx_info = &queue->pending_tx_info[pending_idx];
|
pending_tx_info = &queue->pending_tx_info[pending_idx];
|
||||||
|
@ -1673,12 +1668,9 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
|
||||||
index = pending_index(queue->pending_prod++);
|
index = pending_index(queue->pending_prod++);
|
||||||
queue->pending_ring[index] = pending_idx;
|
queue->pending_ring[index] = pending_idx;
|
||||||
|
|
||||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
|
push_tx_responses(queue);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&queue->response_lock, flags);
|
spin_unlock_irqrestore(&queue->response_lock, flags);
|
||||||
|
|
||||||
if (notify)
|
|
||||||
notify_remote_via_irq(queue->tx_irq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1699,6 +1691,15 @@ static void make_tx_response(struct xenvif_queue *queue,
|
||||||
queue->tx.rsp_prod_pvt = ++i;
|
queue->tx.rsp_prod_pvt = ++i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void push_tx_responses(struct xenvif_queue *queue)
|
||||||
|
{
|
||||||
|
int notify;
|
||||||
|
|
||||||
|
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
|
||||||
|
if (notify)
|
||||||
|
notify_remote_via_irq(queue->tx_irq);
|
||||||
|
}
|
||||||
|
|
||||||
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
|
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
|
||||||
u16 id,
|
u16 id,
|
||||||
s8 st,
|
s8 st,
|
||||||
|
|
Loading…
Add table
Reference in a new issue