net: wrap sk->sk_backlog_rcv()
Wrap calling sk->sk_backlog_rcv() in a function. This will allow extending the generic sk_backlog_rcv behaviour. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b339a47c37
commit
c57943a1c9
5 changed files with 10 additions and 5 deletions
|
@ -482,6 +482,11 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
|
||||||
skb->next = NULL;
|
skb->next = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
return sk->sk_backlog_rcv(sk, skb);
|
||||||
|
}
|
||||||
|
|
||||||
#define sk_wait_event(__sk, __timeo, __condition) \
|
#define sk_wait_event(__sk, __timeo, __condition) \
|
||||||
({ int __rc; \
|
({ int __rc; \
|
||||||
release_sock(__sk); \
|
release_sock(__sk); \
|
||||||
|
|
|
@ -896,7 +896,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
|
||||||
BUG_ON(sock_owned_by_user(sk));
|
BUG_ON(sock_owned_by_user(sk));
|
||||||
|
|
||||||
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
|
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
|
||||||
sk->sk_backlog_rcv(sk, skb1);
|
sk_backlog_rcv(sk, skb1);
|
||||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED);
|
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -327,7 +327,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
|
||||||
*/
|
*/
|
||||||
mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
|
mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
|
||||||
|
|
||||||
rc = sk->sk_backlog_rcv(sk, skb);
|
rc = sk_backlog_rcv(sk, skb);
|
||||||
|
|
||||||
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
|
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
|
||||||
} else
|
} else
|
||||||
|
@ -1374,7 +1374,7 @@ static void __release_sock(struct sock *sk)
|
||||||
struct sk_buff *next = skb->next;
|
struct sk_buff *next = skb->next;
|
||||||
|
|
||||||
skb->next = NULL;
|
skb->next = NULL;
|
||||||
sk->sk_backlog_rcv(sk, skb);
|
sk_backlog_rcv(sk, skb);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We are in process context here with softirqs
|
* We are in process context here with softirqs
|
||||||
|
|
|
@ -1161,7 +1161,7 @@ static void tcp_prequeue_process(struct sock *sk)
|
||||||
* necessary */
|
* necessary */
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
|
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
|
||||||
sk->sk_backlog_rcv(sk, skb);
|
sk_backlog_rcv(sk, skb);
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
|
|
||||||
/* Clear memory counter. */
|
/* Clear memory counter. */
|
||||||
|
|
|
@ -201,7 +201,7 @@ static void tcp_delack_timer(unsigned long data)
|
||||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
|
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
|
||||||
|
|
||||||
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
|
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
|
||||||
sk->sk_backlog_rcv(sk, skb);
|
sk_backlog_rcv(sk, skb);
|
||||||
|
|
||||||
tp->ucopy.memory = 0;
|
tp->ucopy.memory = 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue