Merge android-4.4.182 (9c4ab57
) into msm-4.4
* refs/heads/tmp-9c4ab57 Linux 4.4.182 tcp: enforce tcp_min_snd_mss in tcp_mtu_probing() tcp: add tcp_min_snd_mss sysctl tcp: tcp_fragment() should apply sane memory limits tcp: limit payload size of sacked skbs UPSTREAM: binder: check for overflow when alloc for security context BACKPORT: binder: fix race between munmap() and direct reclaim Change-Id: I4cfb9eb282f54f083631fec4c8161eed42e8ab54 Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
This commit is contained in:
commit
a32f2cd759
15 changed files with 79 additions and 21 deletions
|
@ -220,6 +220,14 @@ tcp_base_mss - INTEGER
|
||||||
Path MTU discovery (MTU probing). If MTU probing is enabled,
|
Path MTU discovery (MTU probing). If MTU probing is enabled,
|
||||||
this is the initial MSS used by the connection.
|
this is the initial MSS used by the connection.
|
||||||
|
|
||||||
|
tcp_min_snd_mss - INTEGER
|
||||||
|
TCP SYN and SYNACK messages usually advertise an ADVMSS option,
|
||||||
|
as described in RFC 1122 and RFC 6691.
|
||||||
|
If this ADVMSS option is smaller than tcp_min_snd_mss,
|
||||||
|
it is silently capped to tcp_min_snd_mss.
|
||||||
|
|
||||||
|
Default : 48 (at least 8 bytes of payload per segment)
|
||||||
|
|
||||||
tcp_congestion_control - STRING
|
tcp_congestion_control - STRING
|
||||||
Set the congestion control algorithm to be used for new
|
Set the congestion control algorithm to be used for new
|
||||||
connections. The algorithm "reno" is always available, but
|
connections. The algorithm "reno" is always available, but
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 4
|
PATCHLEVEL = 4
|
||||||
SUBLEVEL = 181
|
SUBLEVEL = 182
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Blurry Fish Butt
|
NAME = Blurry Fish Butt
|
||||||
|
|
||||||
|
|
|
@ -3130,6 +3130,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||||
|
|
||||||
if (target_node && target_node->txn_security_ctx) {
|
if (target_node && target_node->txn_security_ctx) {
|
||||||
u32 secid;
|
u32 secid;
|
||||||
|
size_t added_size;
|
||||||
|
|
||||||
security_task_getsecid(proc->tsk, &secid);
|
security_task_getsecid(proc->tsk, &secid);
|
||||||
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
|
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
|
||||||
|
@ -3139,7 +3140,15 @@ static void binder_transaction(struct binder_proc *proc,
|
||||||
return_error_line = __LINE__;
|
return_error_line = __LINE__;
|
||||||
goto err_get_secctx_failed;
|
goto err_get_secctx_failed;
|
||||||
}
|
}
|
||||||
extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
|
added_size = ALIGN(secctx_sz, sizeof(u64));
|
||||||
|
extra_buffers_size += added_size;
|
||||||
|
if (extra_buffers_size < added_size) {
|
||||||
|
/* integer overflow of extra_buffers_size */
|
||||||
|
return_error = BR_FAILED_REPLY;
|
||||||
|
return_error_param = EINVAL;
|
||||||
|
return_error_line = __LINE__;
|
||||||
|
goto err_bad_extra_size;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_binder_transaction(reply, t, target_node);
|
trace_binder_transaction(reply, t, target_node);
|
||||||
|
@ -3441,6 +3450,7 @@ err_copy_data_failed:
|
||||||
t->buffer->transaction = NULL;
|
t->buffer->transaction = NULL;
|
||||||
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
|
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
|
||||||
err_binder_alloc_buf_failed:
|
err_binder_alloc_buf_failed:
|
||||||
|
err_bad_extra_size:
|
||||||
if (secctx)
|
if (secctx)
|
||||||
security_release_secctx(secctx, secctx_sz);
|
security_release_secctx(secctx, secctx_sz);
|
||||||
err_get_secctx_failed:
|
err_get_secctx_failed:
|
||||||
|
|
|
@ -924,15 +924,14 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
||||||
|
|
||||||
index = page - alloc->pages;
|
index = page - alloc->pages;
|
||||||
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
|
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
|
||||||
|
|
||||||
|
mm = alloc->vma_vm_mm;
|
||||||
|
/* Same as mmget_not_zero() in later kernel versions */
|
||||||
|
if (!atomic_inc_not_zero(&alloc->vma_vm_mm->mm_users))
|
||||||
|
goto err_mmget;
|
||||||
|
if (!down_write_trylock(&mm->mmap_sem))
|
||||||
|
goto err_down_write_mmap_sem_failed;
|
||||||
vma = alloc->vma;
|
vma = alloc->vma;
|
||||||
if (vma) {
|
|
||||||
/* Same as mmget_not_zero() in later kernel versions */
|
|
||||||
if (!atomic_inc_not_zero(&alloc->vma_vm_mm->mm_users))
|
|
||||||
goto err_mmget;
|
|
||||||
mm = alloc->vma_vm_mm;
|
|
||||||
if (!down_write_trylock(&mm->mmap_sem))
|
|
||||||
goto err_down_write_mmap_sem_failed;
|
|
||||||
}
|
|
||||||
|
|
||||||
list_lru_isolate(lru, item);
|
list_lru_isolate(lru, item);
|
||||||
spin_unlock(lock);
|
spin_unlock(lock);
|
||||||
|
@ -946,10 +945,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
||||||
PAGE_SIZE, NULL);
|
PAGE_SIZE, NULL);
|
||||||
|
|
||||||
trace_binder_unmap_user_end(alloc, index);
|
trace_binder_unmap_user_end(alloc, index);
|
||||||
|
|
||||||
up_write(&mm->mmap_sem);
|
|
||||||
mmput(mm);
|
|
||||||
}
|
}
|
||||||
|
up_write(&mm->mmap_sem);
|
||||||
|
mmput(mm);
|
||||||
|
|
||||||
trace_binder_unmap_kernel_start(alloc, index);
|
trace_binder_unmap_kernel_start(alloc, index);
|
||||||
|
|
||||||
|
|
|
@ -419,4 +419,7 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp)
|
||||||
tp->saved_syn = NULL;
|
tp->saved_syn = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount,
|
||||||
|
int shiftlen);
|
||||||
|
|
||||||
#endif /* _LINUX_TCP_H */
|
#endif /* _LINUX_TCP_H */
|
||||||
|
|
|
@ -88,6 +88,7 @@ struct netns_ipv4 {
|
||||||
int sysctl_tcp_fwmark_accept;
|
int sysctl_tcp_fwmark_accept;
|
||||||
int sysctl_tcp_mtu_probing;
|
int sysctl_tcp_mtu_probing;
|
||||||
int sysctl_tcp_base_mss;
|
int sysctl_tcp_base_mss;
|
||||||
|
int sysctl_tcp_min_snd_mss;
|
||||||
int sysctl_tcp_probe_threshold;
|
int sysctl_tcp_probe_threshold;
|
||||||
u32 sysctl_tcp_probe_interval;
|
u32 sysctl_tcp_probe_interval;
|
||||||
|
|
||||||
|
|
|
@ -54,6 +54,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
|
||||||
|
|
||||||
#define MAX_TCP_HEADER (128 + MAX_HEADER)
|
#define MAX_TCP_HEADER (128 + MAX_HEADER)
|
||||||
#define MAX_TCP_OPTION_SPACE 40
|
#define MAX_TCP_OPTION_SPACE 40
|
||||||
|
#define TCP_MIN_SND_MSS 48
|
||||||
|
#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Never offer a window over 32767 without using window scaling. Some
|
* Never offer a window over 32767 without using window scaling. Some
|
||||||
|
|
|
@ -281,6 +281,7 @@ enum
|
||||||
LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */
|
LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */
|
||||||
LINUX_MIB_TCPMTUPFAIL, /* TCPMTUPFail */
|
LINUX_MIB_TCPMTUPFAIL, /* TCPMTUPFail */
|
||||||
LINUX_MIB_TCPMTUPSUCCESS, /* TCPMTUPSuccess */
|
LINUX_MIB_TCPMTUPSUCCESS, /* TCPMTUPSuccess */
|
||||||
|
LINUX_MIB_TCPWQUEUETOOBIG, /* TCPWqueueTooBig */
|
||||||
__LINUX_MIB_MAX
|
__LINUX_MIB_MAX
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -303,6 +303,7 @@ static const struct snmp_mib snmp4_net_list[] = {
|
||||||
SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE),
|
SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE),
|
||||||
SNMP_MIB_ITEM("TCPMTUPFail", LINUX_MIB_TCPMTUPFAIL),
|
SNMP_MIB_ITEM("TCPMTUPFail", LINUX_MIB_TCPMTUPFAIL),
|
||||||
SNMP_MIB_ITEM("TCPMTUPSuccess", LINUX_MIB_TCPMTUPSUCCESS),
|
SNMP_MIB_ITEM("TCPMTUPSuccess", LINUX_MIB_TCPMTUPSUCCESS),
|
||||||
|
SNMP_MIB_ITEM("TCPWqueueTooBig", LINUX_MIB_TCPWQUEUETOOBIG),
|
||||||
SNMP_MIB_SENTINEL
|
SNMP_MIB_SENTINEL
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,8 @@ static int ip_local_port_range_min[] = { 1, 1 };
|
||||||
static int ip_local_port_range_max[] = { 65535, 65535 };
|
static int ip_local_port_range_max[] = { 65535, 65535 };
|
||||||
static int tcp_adv_win_scale_min = -31;
|
static int tcp_adv_win_scale_min = -31;
|
||||||
static int tcp_adv_win_scale_max = 31;
|
static int tcp_adv_win_scale_max = 31;
|
||||||
|
static int tcp_min_snd_mss_min = TCP_MIN_SND_MSS;
|
||||||
|
static int tcp_min_snd_mss_max = 65535;
|
||||||
static int ip_ttl_min = 1;
|
static int ip_ttl_min = 1;
|
||||||
static int ip_ttl_max = 255;
|
static int ip_ttl_max = 255;
|
||||||
static int tcp_syn_retries_min = 1;
|
static int tcp_syn_retries_min = 1;
|
||||||
|
@ -993,6 +995,15 @@ static struct ctl_table ipv4_net_table[] = {
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = proc_dointvec,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.procname = "tcp_min_snd_mss",
|
||||||
|
.data = &init_net.ipv4.sysctl_tcp_min_snd_mss,
|
||||||
|
.maxlen = sizeof(int),
|
||||||
|
.mode = 0644,
|
||||||
|
.proc_handler = proc_dointvec_minmax,
|
||||||
|
.extra1 = &tcp_min_snd_mss_min,
|
||||||
|
.extra2 = &tcp_min_snd_mss_max,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
.procname = "tcp_probe_threshold",
|
.procname = "tcp_probe_threshold",
|
||||||
.data = &init_net.ipv4.sysctl_tcp_probe_threshold,
|
.data = &init_net.ipv4.sysctl_tcp_probe_threshold,
|
||||||
|
|
|
@ -3207,6 +3207,7 @@ void __init tcp_init(void)
|
||||||
int max_rshare, max_wshare, cnt;
|
int max_rshare, max_wshare, cnt;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
|
BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE);
|
||||||
sock_skb_cb_check_size(sizeof(struct tcp_skb_cb));
|
sock_skb_cb_check_size(sizeof(struct tcp_skb_cb));
|
||||||
|
|
||||||
percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
|
percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
|
||||||
|
|
|
@ -1276,7 +1276,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
|
||||||
TCP_SKB_CB(skb)->seq += shifted;
|
TCP_SKB_CB(skb)->seq += shifted;
|
||||||
|
|
||||||
tcp_skb_pcount_add(prev, pcount);
|
tcp_skb_pcount_add(prev, pcount);
|
||||||
BUG_ON(tcp_skb_pcount(skb) < pcount);
|
WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
|
||||||
tcp_skb_pcount_add(skb, -pcount);
|
tcp_skb_pcount_add(skb, -pcount);
|
||||||
|
|
||||||
/* When we're adding to gso_segs == 1, gso_size will be zero,
|
/* When we're adding to gso_segs == 1, gso_size will be zero,
|
||||||
|
@ -1338,6 +1338,21 @@ static int skb_can_shift(const struct sk_buff *skb)
|
||||||
return !skb_headlen(skb) && skb_is_nonlinear(skb);
|
return !skb_headlen(skb) && skb_is_nonlinear(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from,
|
||||||
|
int pcount, int shiftlen)
|
||||||
|
{
|
||||||
|
/* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE)
|
||||||
|
* Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need
|
||||||
|
* to make sure not storing more than 65535 * 8 bytes per skb,
|
||||||
|
* even if current MSS is bigger.
|
||||||
|
*/
|
||||||
|
if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE))
|
||||||
|
return 0;
|
||||||
|
if (unlikely(tcp_skb_pcount(to) + pcount > 65535))
|
||||||
|
return 0;
|
||||||
|
return skb_shift(to, from, shiftlen);
|
||||||
|
}
|
||||||
|
|
||||||
/* Try collapsing SACK blocks spanning across multiple skbs to a single
|
/* Try collapsing SACK blocks spanning across multiple skbs to a single
|
||||||
* skb.
|
* skb.
|
||||||
*/
|
*/
|
||||||
|
@ -1349,6 +1364,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
struct sk_buff *prev;
|
struct sk_buff *prev;
|
||||||
int mss;
|
int mss;
|
||||||
|
int next_pcount;
|
||||||
int pcount = 0;
|
int pcount = 0;
|
||||||
int len;
|
int len;
|
||||||
int in_sack;
|
int in_sack;
|
||||||
|
@ -1443,7 +1459,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
|
||||||
if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
|
if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
|
||||||
goto fallback;
|
goto fallback;
|
||||||
|
|
||||||
if (!skb_shift(prev, skb, len))
|
if (!tcp_skb_shift(prev, skb, pcount, len))
|
||||||
goto fallback;
|
goto fallback;
|
||||||
if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
|
if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1462,11 +1478,11 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
len = skb->len;
|
len = skb->len;
|
||||||
if (skb_shift(prev, skb, len)) {
|
next_pcount = tcp_skb_pcount(skb);
|
||||||
pcount += tcp_skb_pcount(skb);
|
if (tcp_skb_shift(prev, skb, next_pcount, len)) {
|
||||||
tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
|
pcount += next_pcount;
|
||||||
|
tcp_shifted_skb(sk, skb, state, next_pcount, len, mss, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
state->fack_count += pcount;
|
state->fack_count += pcount;
|
||||||
return prev;
|
return prev;
|
||||||
|
|
|
@ -2435,6 +2435,7 @@ static int __net_init tcp_sk_init(struct net *net)
|
||||||
net->ipv4.sysctl_tcp_ecn_fallback = 1;
|
net->ipv4.sysctl_tcp_ecn_fallback = 1;
|
||||||
|
|
||||||
net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
|
net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
|
||||||
|
net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
|
||||||
net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
|
net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
|
||||||
net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
|
net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
|
||||||
|
|
||||||
|
|
|
@ -1161,6 +1161,11 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
|
||||||
if (nsize < 0)
|
if (nsize < 0)
|
||||||
nsize = 0;
|
nsize = 0;
|
||||||
|
|
||||||
|
if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf)) {
|
||||||
|
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
if (skb_unclone(skb, gfp))
|
if (skb_unclone(skb, gfp))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -1327,8 +1332,7 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
|
||||||
mss_now -= icsk->icsk_ext_hdr_len;
|
mss_now -= icsk->icsk_ext_hdr_len;
|
||||||
|
|
||||||
/* Then reserve room for full set of TCP options and 8 bytes of data */
|
/* Then reserve room for full set of TCP options and 8 bytes of data */
|
||||||
if (mss_now < 48)
|
mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss);
|
||||||
mss_now = 48;
|
|
||||||
return mss_now;
|
return mss_now;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -166,6 +166,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
|
||||||
mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
|
mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
|
||||||
mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
|
mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
|
||||||
mss = max(mss, 68 - tp->tcp_header_len);
|
mss = max(mss, 68 - tp->tcp_header_len);
|
||||||
|
mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss);
|
||||||
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
|
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
|
||||||
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
|
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue