Merge branch 'xen-netfront-next'
David Vrabel says: ==================== xen-netfront: refactor making Tx requests As netfront as evolved to handle different sorts of skbs the code to fill a Tx requests has been copy and pasted several times. The series refactors this and a few other areas. The first patch is to a Xen header but this can be merged via net-next. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
6a38cc2be6
2 changed files with 83 additions and 124 deletions
|
@ -421,109 +421,68 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
|
||||||
xennet_maybe_wake_tx(queue);
|
xennet_maybe_wake_tx(queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
|
static struct xen_netif_tx_request *xennet_make_one_txreq(
|
||||||
struct xen_netif_tx_request *tx)
|
struct netfront_queue *queue, struct sk_buff *skb,
|
||||||
|
struct page *page, unsigned int offset, unsigned int len)
|
||||||
{
|
{
|
||||||
char *data = skb->data;
|
|
||||||
unsigned long mfn;
|
|
||||||
RING_IDX prod = queue->tx.req_prod_pvt;
|
|
||||||
int frags = skb_shinfo(skb)->nr_frags;
|
|
||||||
unsigned int offset = offset_in_page(data);
|
|
||||||
unsigned int len = skb_headlen(skb);
|
|
||||||
unsigned int id;
|
unsigned int id;
|
||||||
|
struct xen_netif_tx_request *tx;
|
||||||
grant_ref_t ref;
|
grant_ref_t ref;
|
||||||
int i;
|
|
||||||
|
|
||||||
/* While the header overlaps a page boundary (including being
|
len = min_t(unsigned int, PAGE_SIZE - offset, len);
|
||||||
larger than a page), split it it into page-sized chunks. */
|
|
||||||
while (len > PAGE_SIZE - offset) {
|
id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
|
||||||
tx->size = PAGE_SIZE - offset;
|
tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
|
||||||
|
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
|
||||||
|
BUG_ON((signed short)ref < 0);
|
||||||
|
|
||||||
|
gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
|
||||||
|
page_to_mfn(page), GNTMAP_readonly);
|
||||||
|
|
||||||
|
queue->tx_skbs[id].skb = skb;
|
||||||
|
queue->grant_tx_page[id] = page;
|
||||||
|
queue->grant_tx_ref[id] = ref;
|
||||||
|
|
||||||
|
tx->id = id;
|
||||||
|
tx->gref = ref;
|
||||||
|
tx->offset = offset;
|
||||||
|
tx->size = len;
|
||||||
|
tx->flags = 0;
|
||||||
|
|
||||||
|
return tx;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct xen_netif_tx_request *xennet_make_txreqs(
|
||||||
|
struct netfront_queue *queue, struct xen_netif_tx_request *tx,
|
||||||
|
struct sk_buff *skb, struct page *page,
|
||||||
|
unsigned int offset, unsigned int len)
|
||||||
|
{
|
||||||
|
/* Skip unused frames from start of page */
|
||||||
|
page += offset >> PAGE_SHIFT;
|
||||||
|
offset &= ~PAGE_MASK;
|
||||||
|
|
||||||
|
while (len) {
|
||||||
tx->flags |= XEN_NETTXF_more_data;
|
tx->flags |= XEN_NETTXF_more_data;
|
||||||
len -= tx->size;
|
tx = xennet_make_one_txreq(queue, skb_get(skb),
|
||||||
data += tx->size;
|
page, offset, len);
|
||||||
|
page++;
|
||||||
offset = 0;
|
offset = 0;
|
||||||
|
len -= tx->size;
|
||||||
id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
|
|
||||||
queue->tx_skbs[id].skb = skb_get(skb);
|
|
||||||
tx = RING_GET_REQUEST(&queue->tx, prod++);
|
|
||||||
tx->id = id;
|
|
||||||
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
|
|
||||||
BUG_ON((signed short)ref < 0);
|
|
||||||
|
|
||||||
mfn = virt_to_mfn(data);
|
|
||||||
gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
|
|
||||||
mfn, GNTMAP_readonly);
|
|
||||||
|
|
||||||
queue->grant_tx_page[id] = virt_to_page(data);
|
|
||||||
tx->gref = queue->grant_tx_ref[id] = ref;
|
|
||||||
tx->offset = offset;
|
|
||||||
tx->size = len;
|
|
||||||
tx->flags = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Grant backend access to each skb fragment page. */
|
return tx;
|
||||||
for (i = 0; i < frags; i++) {
|
|
||||||
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
|
|
||||||
struct page *page = skb_frag_page(frag);
|
|
||||||
|
|
||||||
len = skb_frag_size(frag);
|
|
||||||
offset = frag->page_offset;
|
|
||||||
|
|
||||||
/* Skip unused frames from start of page */
|
|
||||||
page += offset >> PAGE_SHIFT;
|
|
||||||
offset &= ~PAGE_MASK;
|
|
||||||
|
|
||||||
while (len > 0) {
|
|
||||||
unsigned long bytes;
|
|
||||||
|
|
||||||
bytes = PAGE_SIZE - offset;
|
|
||||||
if (bytes > len)
|
|
||||||
bytes = len;
|
|
||||||
|
|
||||||
tx->flags |= XEN_NETTXF_more_data;
|
|
||||||
|
|
||||||
id = get_id_from_freelist(&queue->tx_skb_freelist,
|
|
||||||
queue->tx_skbs);
|
|
||||||
queue->tx_skbs[id].skb = skb_get(skb);
|
|
||||||
tx = RING_GET_REQUEST(&queue->tx, prod++);
|
|
||||||
tx->id = id;
|
|
||||||
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
|
|
||||||
BUG_ON((signed short)ref < 0);
|
|
||||||
|
|
||||||
mfn = pfn_to_mfn(page_to_pfn(page));
|
|
||||||
gnttab_grant_foreign_access_ref(ref,
|
|
||||||
queue->info->xbdev->otherend_id,
|
|
||||||
mfn, GNTMAP_readonly);
|
|
||||||
|
|
||||||
queue->grant_tx_page[id] = page;
|
|
||||||
tx->gref = queue->grant_tx_ref[id] = ref;
|
|
||||||
tx->offset = offset;
|
|
||||||
tx->size = bytes;
|
|
||||||
tx->flags = 0;
|
|
||||||
|
|
||||||
offset += bytes;
|
|
||||||
len -= bytes;
|
|
||||||
|
|
||||||
/* Next frame */
|
|
||||||
if (offset == PAGE_SIZE && len) {
|
|
||||||
BUG_ON(!PageCompound(page));
|
|
||||||
page++;
|
|
||||||
offset = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
queue->tx.req_prod_pvt = prod;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Count how many ring slots are required to send the frags of this
|
* Count how many ring slots are required to send this skb. Each frag
|
||||||
* skb. Each frag might be a compound page.
|
* might be a compound page.
|
||||||
*/
|
*/
|
||||||
static int xennet_count_skb_frag_slots(struct sk_buff *skb)
|
static int xennet_count_skb_slots(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
int i, frags = skb_shinfo(skb)->nr_frags;
|
int i, frags = skb_shinfo(skb)->nr_frags;
|
||||||
int pages = 0;
|
int pages;
|
||||||
|
|
||||||
|
pages = PFN_UP(offset_in_page(skb->data) + skb_headlen(skb));
|
||||||
|
|
||||||
for (i = 0; i < frags; i++) {
|
for (i = 0; i < frags; i++) {
|
||||||
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
|
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
|
||||||
|
@ -559,18 +518,15 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||||
|
|
||||||
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
{
|
{
|
||||||
unsigned short id;
|
|
||||||
struct netfront_info *np = netdev_priv(dev);
|
struct netfront_info *np = netdev_priv(dev);
|
||||||
struct netfront_stats *stats = this_cpu_ptr(np->stats);
|
struct netfront_stats *stats = this_cpu_ptr(np->stats);
|
||||||
struct xen_netif_tx_request *tx;
|
struct xen_netif_tx_request *tx, *first_tx;
|
||||||
char *data = skb->data;
|
unsigned int i;
|
||||||
RING_IDX i;
|
|
||||||
grant_ref_t ref;
|
|
||||||
unsigned long mfn;
|
|
||||||
int notify;
|
int notify;
|
||||||
int slots;
|
int slots;
|
||||||
unsigned int offset = offset_in_page(data);
|
struct page *page;
|
||||||
unsigned int len = skb_headlen(skb);
|
unsigned int offset;
|
||||||
|
unsigned int len;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct netfront_queue *queue = NULL;
|
struct netfront_queue *queue = NULL;
|
||||||
unsigned int num_queues = dev->real_num_tx_queues;
|
unsigned int num_queues = dev->real_num_tx_queues;
|
||||||
|
@ -593,18 +549,18 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
goto drop;
|
goto drop;
|
||||||
}
|
}
|
||||||
|
|
||||||
slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
|
slots = xennet_count_skb_slots(skb);
|
||||||
xennet_count_skb_frag_slots(skb);
|
|
||||||
if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
|
if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
|
||||||
net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
|
net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
|
||||||
slots, skb->len);
|
slots, skb->len);
|
||||||
if (skb_linearize(skb))
|
if (skb_linearize(skb))
|
||||||
goto drop;
|
goto drop;
|
||||||
data = skb->data;
|
|
||||||
offset = offset_in_page(data);
|
|
||||||
len = skb_headlen(skb);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
page = virt_to_page(skb->data);
|
||||||
|
offset = offset_in_page(skb->data);
|
||||||
|
len = skb_headlen(skb);
|
||||||
|
|
||||||
spin_lock_irqsave(&queue->tx_lock, flags);
|
spin_lock_irqsave(&queue->tx_lock, flags);
|
||||||
|
|
||||||
if (unlikely(!netif_carrier_ok(dev) ||
|
if (unlikely(!netif_carrier_ok(dev) ||
|
||||||
|
@ -614,25 +570,13 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
goto drop;
|
goto drop;
|
||||||
}
|
}
|
||||||
|
|
||||||
i = queue->tx.req_prod_pvt;
|
/* First request for the linear area. */
|
||||||
|
first_tx = tx = xennet_make_one_txreq(queue, skb,
|
||||||
|
page, offset, len);
|
||||||
|
page++;
|
||||||
|
offset = 0;
|
||||||
|
len -= tx->size;
|
||||||
|
|
||||||
id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
|
|
||||||
queue->tx_skbs[id].skb = skb;
|
|
||||||
|
|
||||||
tx = RING_GET_REQUEST(&queue->tx, i);
|
|
||||||
|
|
||||||
tx->id = id;
|
|
||||||
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
|
|
||||||
BUG_ON((signed short)ref < 0);
|
|
||||||
mfn = virt_to_mfn(data);
|
|
||||||
gnttab_grant_foreign_access_ref(
|
|
||||||
ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly);
|
|
||||||
queue->grant_tx_page[id] = virt_to_page(data);
|
|
||||||
tx->gref = queue->grant_tx_ref[id] = ref;
|
|
||||||
tx->offset = offset;
|
|
||||||
tx->size = len;
|
|
||||||
|
|
||||||
tx->flags = 0;
|
|
||||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||||
/* local packet? */
|
/* local packet? */
|
||||||
tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
|
tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
|
||||||
|
@ -640,11 +584,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
/* remote but checksummed. */
|
/* remote but checksummed. */
|
||||||
tx->flags |= XEN_NETTXF_data_validated;
|
tx->flags |= XEN_NETTXF_data_validated;
|
||||||
|
|
||||||
|
/* Optional extra info after the first request. */
|
||||||
if (skb_shinfo(skb)->gso_size) {
|
if (skb_shinfo(skb)->gso_size) {
|
||||||
struct xen_netif_extra_info *gso;
|
struct xen_netif_extra_info *gso;
|
||||||
|
|
||||||
gso = (struct xen_netif_extra_info *)
|
gso = (struct xen_netif_extra_info *)
|
||||||
RING_GET_REQUEST(&queue->tx, ++i);
|
RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
|
||||||
|
|
||||||
tx->flags |= XEN_NETTXF_extra_info;
|
tx->flags |= XEN_NETTXF_extra_info;
|
||||||
|
|
||||||
|
@ -659,10 +604,19 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
gso->flags = 0;
|
gso->flags = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
queue->tx.req_prod_pvt = i + 1;
|
/* Requests for the rest of the linear area. */
|
||||||
|
tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
|
||||||
|
|
||||||
xennet_make_frags(skb, queue, tx);
|
/* Requests for all the frags. */
|
||||||
tx->size = skb->len;
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||||
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||||
|
tx = xennet_make_txreqs(queue, tx, skb,
|
||||||
|
skb_frag_page(frag), frag->page_offset,
|
||||||
|
skb_frag_size(frag));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* First request has the packet length. */
|
||||||
|
first_tx->size = skb->len;
|
||||||
|
|
||||||
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
|
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
|
||||||
if (notify)
|
if (notify)
|
||||||
|
|
|
@ -3,6 +3,11 @@
|
||||||
|
|
||||||
#include <asm/xen/page.h>
|
#include <asm/xen/page.h>
|
||||||
|
|
||||||
|
static inline unsigned long page_to_mfn(struct page *page)
|
||||||
|
{
|
||||||
|
return pfn_to_mfn(page_to_pfn(page));
|
||||||
|
}
|
||||||
|
|
||||||
struct xen_memory_region {
|
struct xen_memory_region {
|
||||||
phys_addr_t start;
|
phys_addr_t start;
|
||||||
phys_addr_t size;
|
phys_addr_t size;
|
||||||
|
|
Loading…
Add table
Reference in a new issue