[NET] gso: Add skb_is_gso
This patch adds the wrapper function skb_is_gso which can be used instead of directly testing skb_shinfo(skb)->gso_size. This makes things a little nicer and allows us to change the primary key for indicating whether an skb is GSO (if we ever want to do that). Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
9c6c6795ed
commit
89114afd43
18 changed files with 27 additions and 23 deletions
|
@ -1639,7 +1639,7 @@ bnx2_tx_int(struct bnx2 *bp)
|
||||||
skb = tx_buf->skb;
|
skb = tx_buf->skb;
|
||||||
#ifdef BCM_TSO
|
#ifdef BCM_TSO
|
||||||
/* partial BD completions possible with TSO packets */
|
/* partial BD completions possible with TSO packets */
|
||||||
if (skb_shinfo(skb)->gso_size) {
|
if (skb_is_gso(skb)) {
|
||||||
u16 last_idx, last_ring_idx;
|
u16 last_idx, last_ring_idx;
|
||||||
|
|
||||||
last_idx = sw_cons +
|
last_idx = sw_cons +
|
||||||
|
|
|
@ -1417,7 +1417,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
struct cpl_tx_pkt *cpl;
|
struct cpl_tx_pkt *cpl;
|
||||||
|
|
||||||
#ifdef NETIF_F_TSO
|
#ifdef NETIF_F_TSO
|
||||||
if (skb_shinfo(skb)->gso_size) {
|
if (skb_is_gso(skb)) {
|
||||||
int eth_type;
|
int eth_type;
|
||||||
struct cpl_tx_pkt_lso *hdr;
|
struct cpl_tx_pkt_lso *hdr;
|
||||||
|
|
||||||
|
|
|
@ -2394,7 +2394,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
|
||||||
uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
|
uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (skb_shinfo(skb)->gso_size) {
|
if (skb_is_gso(skb)) {
|
||||||
if (skb_header_cloned(skb)) {
|
if (skb_header_cloned(skb)) {
|
||||||
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -2519,7 +2519,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
|
||||||
* tso gets written back prematurely before the data is fully
|
* tso gets written back prematurely before the data is fully
|
||||||
* DMA'd to the controller */
|
* DMA'd to the controller */
|
||||||
if (!skb->data_len && tx_ring->last_tx_tso &&
|
if (!skb->data_len && tx_ring->last_tx_tso &&
|
||||||
!skb_shinfo(skb)->gso_size) {
|
!skb_is_gso(skb)) {
|
||||||
tx_ring->last_tx_tso = 0;
|
tx_ring->last_tx_tso = 0;
|
||||||
size -= 4;
|
size -= 4;
|
||||||
}
|
}
|
||||||
|
@ -2806,8 +2806,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||||
|
|
||||||
#ifdef NETIF_F_TSO
|
#ifdef NETIF_F_TSO
|
||||||
/* Controller Erratum workaround */
|
/* Controller Erratum workaround */
|
||||||
if (!skb->data_len && tx_ring->last_tx_tso &&
|
if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
|
||||||
!skb_shinfo(skb)->gso_size)
|
|
||||||
count++;
|
count++;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -1495,7 +1495,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
np->tx_skbuff[nr] = skb;
|
np->tx_skbuff[nr] = skb;
|
||||||
|
|
||||||
#ifdef NETIF_F_TSO
|
#ifdef NETIF_F_TSO
|
||||||
if (skb_shinfo(skb)->gso_size)
|
if (skb_is_gso(skb))
|
||||||
tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
|
tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1173,7 +1173,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
|
||||||
uint16_t ipcse, tucse, mss;
|
uint16_t ipcse, tucse, mss;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if(likely(skb_shinfo(skb)->gso_size)) {
|
if (likely(skb_is_gso(skb))) {
|
||||||
if (skb_header_cloned(skb)) {
|
if (skb_header_cloned(skb)) {
|
||||||
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef LOOPBACK_TSO
|
#ifdef LOOPBACK_TSO
|
||||||
if (skb_shinfo(skb)->gso_size) {
|
if (skb_is_gso(skb)) {
|
||||||
BUG_ON(skb->protocol != htons(ETH_P_IP));
|
BUG_ON(skb->protocol != htons(ETH_P_IP));
|
||||||
BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
|
BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
|
||||||
|
|
||||||
|
|
|
@ -2116,7 +2116,7 @@ abort_linearize:
|
||||||
}
|
}
|
||||||
idx = (idx + 1) & tx->mask;
|
idx = (idx + 1) & tx->mask;
|
||||||
} while (idx != last_idx);
|
} while (idx != last_idx);
|
||||||
if (skb_shinfo(skb)->gso_size) {
|
if (skb_is_gso(skb)) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"myri10ge: %s: TSO but wanted to linearize?!?!?\n",
|
"myri10ge: %s: TSO but wanted to linearize?!?!?\n",
|
||||||
mgp->dev->name);
|
mgp->dev->name);
|
||||||
|
|
|
@ -1159,7 +1159,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
|
||||||
count = sizeof(dma_addr_t) / sizeof(u32);
|
count = sizeof(dma_addr_t) / sizeof(u32);
|
||||||
count += skb_shinfo(skb)->nr_frags * count;
|
count += skb_shinfo(skb)->nr_frags * count;
|
||||||
|
|
||||||
if (skb_shinfo(skb)->gso_size)
|
if (skb_is_gso(skb))
|
||||||
++count;
|
++count;
|
||||||
|
|
||||||
if (skb->ip_summed == CHECKSUM_HW)
|
if (skb->ip_summed == CHECKSUM_HW)
|
||||||
|
|
|
@ -805,7 +805,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
|
||||||
* If problems develop with TSO, check this first.
|
* If problems develop with TSO, check this first.
|
||||||
*/
|
*/
|
||||||
numDesc = skb_shinfo(skb)->nr_frags + 1;
|
numDesc = skb_shinfo(skb)->nr_frags + 1;
|
||||||
if(skb_tso_size(skb))
|
if (skb_is_gso(skb))
|
||||||
numDesc++;
|
numDesc++;
|
||||||
|
|
||||||
/* When checking for free space in the ring, we need to also
|
/* When checking for free space in the ring, we need to also
|
||||||
|
@ -845,7 +845,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
|
||||||
TYPHOON_TX_PF_VLAN_TAG_SHIFT);
|
TYPHOON_TX_PF_VLAN_TAG_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(skb_tso_size(skb)) {
|
if (skb_is_gso(skb)) {
|
||||||
first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
|
first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
|
||||||
first_txd->numDesc++;
|
first_txd->numDesc++;
|
||||||
|
|
||||||
|
|
|
@ -4457,7 +4457,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
|
||||||
queue = card->qdio.out_qs
|
queue = card->qdio.out_qs
|
||||||
[qeth_get_priority_queue(card, skb, ipv, cast_type)];
|
[qeth_get_priority_queue(card, skb, ipv, cast_type)];
|
||||||
|
|
||||||
if (skb_shinfo(skb)->gso_size)
|
if (skb_is_gso(skb))
|
||||||
large_send = card->options.large_send;
|
large_send = card->options.large_send;
|
||||||
|
|
||||||
/*are we able to do TSO ? If so ,prepare and send it from here */
|
/*are we able to do TSO ? If so ,prepare and send it from here */
|
||||||
|
|
|
@ -1001,7 +1001,7 @@ static inline int net_gso_ok(int features, int gso_type)
|
||||||
|
|
||||||
static inline int skb_gso_ok(struct sk_buff *skb, int features)
|
static inline int skb_gso_ok(struct sk_buff *skb, int features)
|
||||||
{
|
{
|
||||||
return net_gso_ok(features, skb_shinfo(skb)->gso_size ?
|
return net_gso_ok(features, skb_is_gso(skb) ?
|
||||||
skb_shinfo(skb)->gso_type : 0);
|
skb_shinfo(skb)->gso_type : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1455,5 +1455,10 @@ static inline void skb_init_secmark(struct sk_buff *skb)
|
||||||
{ }
|
{ }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static inline int skb_is_gso(const struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
return skb_shinfo(skb)->gso_size;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* _LINUX_SKBUFF_H */
|
#endif /* _LINUX_SKBUFF_H */
|
||||||
|
|
|
@ -35,7 +35,7 @@ static inline unsigned packet_length(const struct sk_buff *skb)
|
||||||
int br_dev_queue_push_xmit(struct sk_buff *skb)
|
int br_dev_queue_push_xmit(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
/* drop mtu oversized packets except gso */
|
/* drop mtu oversized packets except gso */
|
||||||
if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
|
if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
else {
|
else {
|
||||||
#ifdef CONFIG_BRIDGE_NETFILTER
|
#ifdef CONFIG_BRIDGE_NETFILTER
|
||||||
|
|
|
@ -761,7 +761,7 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
if (skb->protocol == htons(ETH_P_IP) &&
|
if (skb->protocol == htons(ETH_P_IP) &&
|
||||||
skb->len > skb->dev->mtu &&
|
skb->len > skb->dev->mtu &&
|
||||||
!skb_shinfo(skb)->gso_size)
|
!skb_is_gso(skb))
|
||||||
return ip_fragment(skb, br_dev_queue_push_xmit);
|
return ip_fragment(skb, br_dev_queue_push_xmit);
|
||||||
else
|
else
|
||||||
return br_dev_queue_push_xmit(skb);
|
return br_dev_queue_push_xmit(skb);
|
||||||
|
|
|
@ -209,7 +209,7 @@ static inline int ip_finish_output(struct sk_buff *skb)
|
||||||
return dst_output(skb);
|
return dst_output(skb);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size)
|
if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
|
||||||
return ip_fragment(skb, ip_finish_output2);
|
return ip_fragment(skb, ip_finish_output2);
|
||||||
else
|
else
|
||||||
return ip_finish_output2(skb);
|
return ip_finish_output2(skb);
|
||||||
|
@ -1095,7 +1095,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
|
||||||
while (size > 0) {
|
while (size > 0) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (skb_shinfo(skb)->gso_size)
|
if (skb_is_gso(skb))
|
||||||
len = size;
|
len = size;
|
||||||
else {
|
else {
|
||||||
|
|
||||||
|
|
|
@ -134,7 +134,7 @@ static int xfrm4_output_finish(struct sk_buff *skb)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!skb_shinfo(skb)->gso_size)
|
if (!skb_is_gso(skb))
|
||||||
return xfrm4_output_finish2(skb);
|
return xfrm4_output_finish2(skb);
|
||||||
|
|
||||||
skb->protocol = htons(ETH_P_IP);
|
skb->protocol = htons(ETH_P_IP);
|
||||||
|
|
|
@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *skb)
|
||||||
|
|
||||||
int ip6_output(struct sk_buff *skb)
|
int ip6_output(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
|
if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
|
||||||
dst_allfrag(skb->dst))
|
dst_allfrag(skb->dst))
|
||||||
return ip6_fragment(skb, ip6_output2);
|
return ip6_fragment(skb, ip6_output2);
|
||||||
else
|
else
|
||||||
|
@ -229,7 +229,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
|
||||||
skb->priority = sk->sk_priority;
|
skb->priority = sk->sk_priority;
|
||||||
|
|
||||||
mtu = dst_mtu(dst);
|
mtu = dst_mtu(dst);
|
||||||
if ((skb->len <= mtu) || ipfragok || skb_shinfo(skb)->gso_size) {
|
if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
|
||||||
IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
|
IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
|
||||||
return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
|
return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
|
||||||
dst_output);
|
dst_output);
|
||||||
|
|
|
@ -122,7 +122,7 @@ static int xfrm6_output_finish(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct sk_buff *segs;
|
struct sk_buff *segs;
|
||||||
|
|
||||||
if (!skb_shinfo(skb)->gso_size)
|
if (!skb_is_gso(skb))
|
||||||
return xfrm6_output_finish2(skb);
|
return xfrm6_output_finish2(skb);
|
||||||
|
|
||||||
skb->protocol = htons(ETH_P_IP);
|
skb->protocol = htons(ETH_P_IP);
|
||||||
|
|
Loading…
Add table
Reference in a new issue