This is the 4.4.82 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlmPuZcACgkQONu9yGCS
 aT5o0BAAlT21EbhyxoMPC6xrPHAF1Oi8mTVfpu+618AUs3B1M6xge/EKI08B/8DP
 MZgaqSqY5ttaIlDKX5OVhY+HiuMg3SbIaFDzhS+OzjpuIjSA9ljNHazp5/l2HsQu
 9zyPFN02L2zqWYppyDo6FQBfStB5rUHB4eVMgD6zuNU/YQovtibGqAY4LBfWvxf/
 eDO6VfjiS4zzcCoplZxcxim1YVZ+HX09BuwniJzukM4C4/uMDubwMlJmrN9YsQZW
 x5zWnLHce2MATk9yF4BzMI/iRDR+Bm6Vx3m1Vzq9WDu7/kkMTVdYjXHmZn02YQub
 q4q1nDZyzBf8SvE4kf+fMYS8+dUrwiKf0lahBTK31J5Bc33lRfBfvv+dr/aEnp/Q
 FhraSkcBDrulnxuq77WZbvWzj0otF1pCTtURyCSfdc4SOFwVIz2NLQ2ZnnXk4gnN
 h5TqjxSDwr2CwTMzOnaKjBcuWnKPvn3/Pjm+/MJS8wvQYPZv8a4AzMIwxjDEN78Z
 +FvtaWEoUCnlP869hyR7gTfk2541+qjMdDRRUPSQ16PvepKy1AG9iCqVvZThScyQ
 PygaiBYZ9pbcyFuExLQrj2FDY2odinPfN8IsCQQbk5Es5mCdzJZOOkLeO2PO0MxD
 Dya79igFnpNj7ZEu6T7lD6Izg/6fYWu7qKmpDKQ7/xn4hHxj/Ig=
 =D9TG
 -----END PGP SIGNATURE-----

Merge 4.4.82 into android-4.4

Changes in 4.4.82
	tcp: avoid setting cwnd to invalid ssthresh after cwnd reduction states
	net: fix keepalive code vs TCP_FASTOPEN_CONNECT
	bpf, s390: fix jit branch offset related to ldimm64
	net: sched: set xt_tgchk_param par.nft_compat as 0 in ipt_init_target
	tcp: fastopen: tcp_connect() must refresh the route
	net: avoid skb_warn_bad_offload false positives on UFO
	packet: fix tp_reserve race in packet_set_ring
	revert "net: account for current skb length when deciding about UFO"
	revert "ipv4: Should use consistent conditional judgement for ip fragment in __ip_append_data and ip_finish_output"
	udp: consistently apply ufo or fragmentation
	sparc64: Prevent perf from running during super critical sections
	KVM: arm/arm64: Handle hva aging while destroying the vm
	mm/mempool: avoid KASAN marking mempool poison checks as use-after-free
	ipv4: Should use consistent conditional judgement for ip fragment in __ip_append_data and ip_finish_output
	net: account for current skb length when deciding about UFO
	Linux 4.4.82

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2017-08-14 10:17:08 -07:00
commit 4b8fc9f2bc
18 changed files with 60 additions and 28 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 81
SUBLEVEL = 82
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -1629,12 +1629,16 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
{
if (!kvm->arch.pgd)
return 0;
trace_kvm_age_hva(start, end);
return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
}
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
if (!kvm->arch.pgd)
return 0;
trace_kvm_test_age_hva(hva);
return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
}

View file

@ -1250,7 +1250,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
insn_count = bpf_jit_insn(jit, fp, i);
if (insn_count < 0)
return -1;
jit->addrs[i + 1] = jit->prg; /* Next instruction address */
/* Next instruction address */
jit->addrs[i + insn_count] = jit->prg;
}
bpf_jit_epilogue(jit);

View file

@ -25,9 +25,11 @@ void destroy_context(struct mm_struct *mm);
void __tsb_context_switch(unsigned long pgd_pa,
struct tsb_config *tsb_base,
struct tsb_config *tsb_huge,
unsigned long tsb_descr_pa);
unsigned long tsb_descr_pa,
unsigned long secondary_ctx);
static inline void tsb_context_switch(struct mm_struct *mm)
static inline void tsb_context_switch_ctx(struct mm_struct *mm,
unsigned long ctx)
{
__tsb_context_switch(__pa(mm->pgd),
&mm->context.tsb_block[0],
@ -38,9 +40,12 @@ static inline void tsb_context_switch(struct mm_struct *mm)
#else
NULL
#endif
, __pa(&mm->context.tsb_descr[0]));
, __pa(&mm->context.tsb_descr[0]),
ctx);
}
#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
void tsb_grow(struct mm_struct *mm,
unsigned long tsb_index,
unsigned long mm_rss);
@ -110,8 +115,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
* cpu0 to update it's TSB because at that point the cpu_vm_mask
* only had cpu1 set in it.
*/
load_secondary_context(mm);
tsb_context_switch(mm);
tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
/* Any time a processor runs a context on an address space
* for the first time, we must flush that context out of the

View file

@ -375,6 +375,7 @@ tsb_flush:
* %o1: TSB base config pointer
* %o2: TSB huge config pointer, or NULL if none
* %o3: Hypervisor TSB descriptor physical address
* %o4: Secondary context to load, if non-zero
*
* We have to run this whole thing with interrupts
* disabled so that the current cpu doesn't change
@ -387,6 +388,17 @@ __tsb_context_switch:
rdpr %pstate, %g1
wrpr %g1, PSTATE_IE, %pstate
brz,pn %o4, 1f
mov SECONDARY_CONTEXT, %o5
661: stxa %o4, [%o5] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %o4, [%o5] ASI_MMU
.previous
flush %g6
1:
TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]

View file

@ -35,6 +35,5 @@ void restore_processor_state(void)
{
struct mm_struct *mm = current->active_mm;
load_secondary_context(mm);
tsb_context_switch(mm);
tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
}

View file

@ -135,8 +135,8 @@ static void *remove_element(mempool_t *pool)
void *element = pool->elements[--pool->curr_nr];
BUG_ON(pool->curr_nr < 0);
check_element(pool, element);
kasan_unpoison_element(pool, element);
check_element(pool, element);
return element;
}

View file

@ -2551,7 +2551,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
{
if (tx_path)
return skb->ip_summed != CHECKSUM_PARTIAL &&
skb->ip_summed != CHECKSUM_NONE;
skb->ip_summed != CHECKSUM_UNNECESSARY;
return skb->ip_summed == CHECKSUM_NONE;
}

View file

@ -922,11 +922,12 @@ static int __ip_append_data(struct sock *sk,
csummode = CHECKSUM_PARTIAL;
cork->length += length;
if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
(skb && skb_is_gso(skb))) &&
if ((skb && skb_is_gso(skb)) ||
(((length + (skb ? skb->len : fragheaderlen)) > mtu) &&
(skb_queue_len(queue) <= 1) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
(sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
(sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, transhdrlen,
maxfraglen, flags);
@ -1242,6 +1243,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
return -EINVAL;
if ((size + skb->len > mtu) &&
(skb_queue_len(&sk->sk_write_queue) == 1) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO)) {
if (skb->ip_summed != CHECKSUM_PARTIAL)

View file

@ -2504,8 +2504,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
(tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
(inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
tp->snd_cwnd = tp->snd_ssthresh;
tp->snd_cwnd_stamp = tcp_time_stamp;
}

View file

@ -3256,6 +3256,9 @@ int tcp_connect(struct sock *sk)
struct sk_buff *buff;
int err;
if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
return -EHOSTUNREACH; /* Routing failure or similar. */
tcp_connect_init(sk);
if (unlikely(tp->repair)) {

View file

@ -606,7 +606,8 @@ static void tcp_keepalive_timer (unsigned long data)
goto death;
}
if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
if (!sock_flag(sk, SOCK_KEEPOPEN) ||
((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
goto out;
elapsed = keepalive_time_when(tp);

View file

@ -819,7 +819,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
if (is_udplite) /* UDP-Lite */
csum = udplite_csum(skb);
else if (sk->sk_no_check_tx) { /* UDP csum disabled */
else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */
skb->ip_summed = CHECKSUM_NONE;
goto send;

View file

@ -231,7 +231,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
skb->ip_summed = CHECKSUM_NONE;
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* Fragment the skb. IP headers of the fragments are updated in
* inet_gso_segment()

View file

@ -1357,11 +1357,12 @@ emsgsize:
*/
cork->length += length;
if ((((length + (skb ? skb->len : headersize)) > mtu) ||
(skb && skb_is_gso(skb))) &&
if ((skb && skb_is_gso(skb)) ||
(((length + (skb ? skb->len : headersize)) > mtu) &&
(skb_queue_len(queue) <= 1) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO) &&
(sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
(sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) {
err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, exthdrlen,
transhdrlen, mtu, flags, fl6);

View file

@ -86,7 +86,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
skb->ip_summed = CHECKSUM_NONE;
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* Check if there is enough headroom to insert fragment header. */
tnl_hlen = skb_tnl_header_len(skb);

View file

@ -3622,14 +3622,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (optlen != sizeof(val))
return -EINVAL;
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
if (val > INT_MAX)
return -EINVAL;
po->tp_reserve = val;
return 0;
lock_sock(sk);
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
ret = -EBUSY;
} else {
po->tp_reserve = val;
ret = 0;
}
release_sock(sk);
return ret;
}
case PACKET_LOSS:
{

View file

@ -42,8 +42,8 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int
return PTR_ERR(target);
t->u.kernel.target = target;
memset(&par, 0, sizeof(par));
par.table = table;
par.entryinfo = NULL;
par.target = target;
par.targinfo = t->data;
par.hook_mask = hook;