ixgbe: Make allocating skb and placing data in it a separate function

This patch creates a function named ixgbe_fetch_rx_buffer. The sole
purpose of this function is to retrieve a single buffer off of the ring and
to place it in an skb.

The advantage to doing this is that it helps improve the readability since
I can decrease the indentation and for the code in this section.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
This commit is contained in:
Alexander Duyck 2012-07-20 08:08:44 +00:00 committed by Peter P Waskiewicz Jr
parent cf3fe7aca0
commit 18806c9ea2

View file

@ -1693,57 +1693,14 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
return true; return true;
} }
/** static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
* ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf union ixgbe_adv_rx_desc *rx_desc)
* @q_vector: structure containing interrupt and ring information
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
*
* This function provides a "bounce buffer" approach to Rx interrupt
* processing. The advantage to this is that on systems that have
* expensive overhead for IOMMU access this provides a means of avoiding
* it by maintaining the mapping of the page to the syste.
*
* Returns true if all work is completed without reaching budget
**/
static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
int budget)
{ {
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter = q_vector->adapter;
int ddp_bytes = 0;
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
do {
struct ixgbe_rx_buffer *rx_buffer; struct ixgbe_rx_buffer *rx_buffer;
union ixgbe_adv_rx_desc *rx_desc;
struct sk_buff *skb; struct sk_buff *skb;
struct page *page; struct page *page;
u16 ntc;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
ntc = rx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC(rx_ring, ntc);
rx_buffer = &rx_ring->rx_buffer_info[ntc];
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
break;
/*
* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* RXD_STAT_DD bit is set
*/
rmb();
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
page = rx_buffer->page; page = rx_buffer->page;
prefetchw(page); prefetchw(page);
@ -1764,7 +1721,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
IXGBE_RX_HDR_SIZE); IXGBE_RX_HDR_SIZE);
if (unlikely(!skb)) { if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_rx_buff_failed++; rx_ring->rx_stats.alloc_rx_buff_failed++;
break; return NULL;
} }
/* /*
@ -1780,8 +1737,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
* after the writeback. Only unmap it when EOP is * after the writeback. Only unmap it when EOP is
* reached * reached
*/ */
if (likely(ixgbe_test_staterr(rx_desc, if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
IXGBE_RXD_STAT_EOP)))
goto dma_sync; goto dma_sync;
IXGBE_CB(skb)->dma = rx_buffer->dma; IXGBE_CB(skb)->dma = rx_buffer->dma;
@ -1817,6 +1773,62 @@ dma_sync:
rx_buffer->dma = 0; rx_buffer->dma = 0;
rx_buffer->page = NULL; rx_buffer->page = NULL;
return skb;
}
/**
* ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @q_vector: structure containing interrupt and ring information
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
*
* This function provides a "bounce buffer" approach to Rx interrupt
* processing. The advantage to this is that on systems that have
* expensive overhead for IOMMU access this provides a means of avoiding
* it by maintaining the mapping of the page to the syste.
*
* Returns true if all work is completed without reaching budget
**/
static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter = q_vector->adapter;
int ddp_bytes = 0;
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
do {
union ixgbe_adv_rx_desc *rx_desc;
struct sk_buff *skb;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
break;
/*
* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* RXD_STAT_DD bit is set
*/
rmb();
/* retrieve a buffer from the ring */
skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
/* exit if we failed to retrieve a buffer */
if (!skb)
break;
ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb); ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
cleaned_count++; cleaned_count++;