tcp: add max_quickacks param to tcp_incr_quickack and tcp_enter_quickack_mode

[ Upstream commit 9a9c9b51e54618861420093ae6e9b50a961914c5 ]

We want to add finer control of the number of ACK packets sent after
ECN events.

This patch is not changing current behavior, it only enables following
change.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Eric Dumazet 2018-05-21 15:08:56 -07:00 committed by Greg Kroah-Hartman
parent e2f337e2bd
commit 2b30c04bc6
3 changed files with 16 additions and 14 deletions

View file

@ -376,7 +376,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len, struct pipe_inode_info *pipe, size_t len,
unsigned int flags); unsigned int flags);
void tcp_enter_quickack_mode(struct sock *sk); void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
static inline void tcp_dec_quickack_mode(struct sock *sk, static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts) const unsigned int pkts)
{ {

View file

@ -138,7 +138,7 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
*/ */
if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
__tcp_send_ack(sk, ca->prior_rcv_nxt); __tcp_send_ack(sk, ca->prior_rcv_nxt);
tcp_enter_quickack_mode(sk); tcp_enter_quickack_mode(sk, 1);
} }
ca->prior_rcv_nxt = tp->rcv_nxt; ca->prior_rcv_nxt = tp->rcv_nxt;
@ -159,7 +159,7 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
*/ */
if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
__tcp_send_ack(sk, ca->prior_rcv_nxt); __tcp_send_ack(sk, ca->prior_rcv_nxt);
tcp_enter_quickack_mode(sk); tcp_enter_quickack_mode(sk, 1);
} }
ca->prior_rcv_nxt = tp->rcv_nxt; ca->prior_rcv_nxt = tp->rcv_nxt;

View file

@ -176,21 +176,23 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
} }
} }
static void tcp_incr_quickack(struct sock *sk) static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
{ {
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
if (quickacks == 0) if (quickacks == 0)
quickacks = 2; quickacks = 2;
quickacks = min(quickacks, max_quickacks);
if (quickacks > icsk->icsk_ack.quick) if (quickacks > icsk->icsk_ack.quick)
icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); icsk->icsk_ack.quick = quickacks;
} }
void tcp_enter_quickack_mode(struct sock *sk) void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
{ {
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
tcp_incr_quickack(sk);
tcp_incr_quickack(sk, max_quickacks);
icsk->icsk_ack.pingpong = 0; icsk->icsk_ack.pingpong = 0;
icsk->icsk_ack.ato = TCP_ATO_MIN; icsk->icsk_ack.ato = TCP_ATO_MIN;
} }
@ -235,7 +237,7 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
* it is probably a retransmit. * it is probably a retransmit.
*/ */
if (tp->ecn_flags & TCP_ECN_SEEN) if (tp->ecn_flags & TCP_ECN_SEEN)
tcp_enter_quickack_mode((struct sock *)tp); tcp_enter_quickack_mode((struct sock *)tp, TCP_MAX_QUICKACKS);
break; break;
case INET_ECN_CE: case INET_ECN_CE:
if (tcp_ca_needs_ecn((struct sock *)tp)) if (tcp_ca_needs_ecn((struct sock *)tp))
@ -243,7 +245,7 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
/* Better not delay acks, sender can have a very low cwnd */ /* Better not delay acks, sender can have a very low cwnd */
tcp_enter_quickack_mode((struct sock *)tp); tcp_enter_quickack_mode((struct sock *)tp, TCP_MAX_QUICKACKS);
tp->ecn_flags |= TCP_ECN_DEMAND_CWR; tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
} }
tp->ecn_flags |= TCP_ECN_SEEN; tp->ecn_flags |= TCP_ECN_SEEN;
@ -651,7 +653,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
/* The _first_ data packet received, initialize /* The _first_ data packet received, initialize
* delayed ACK engine. * delayed ACK engine.
*/ */
tcp_incr_quickack(sk); tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
icsk->icsk_ack.ato = TCP_ATO_MIN; icsk->icsk_ack.ato = TCP_ATO_MIN;
} else { } else {
int m = now - icsk->icsk_ack.lrcvtime; int m = now - icsk->icsk_ack.lrcvtime;
@ -667,7 +669,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
/* Too long gap. Apparently sender failed to /* Too long gap. Apparently sender failed to
* restart window, so that we send ACKs quickly. * restart window, so that we send ACKs quickly.
*/ */
tcp_incr_quickack(sk); tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
sk_mem_reclaim(sk); sk_mem_reclaim(sk);
} }
} }
@ -4136,7 +4138,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
tcp_enter_quickack_mode(sk); tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
if (tcp_is_sack(tp) && sysctl_tcp_dsack) { if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
u32 end_seq = TCP_SKB_CB(skb)->end_seq; u32 end_seq = TCP_SKB_CB(skb)->end_seq;
@ -4638,7 +4640,7 @@ queue_and_out:
tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
out_of_window: out_of_window:
tcp_enter_quickack_mode(sk); tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
inet_csk_schedule_ack(sk); inet_csk_schedule_ack(sk);
drop: drop:
__kfree_skb(skb); __kfree_skb(skb);
@ -5674,7 +5676,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
* to stand against the temptation 8) --ANK * to stand against the temptation 8) --ANK
*/ */
inet_csk_schedule_ack(sk); inet_csk_schedule_ack(sk);
tcp_enter_quickack_mode(sk); tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
TCP_DELACK_MAX, TCP_RTO_MAX); TCP_DELACK_MAX, TCP_RTO_MAX);