This is the 4.4.32 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIcBAABCAAGBQJYKq+PAAoJEDjbvchgkmk+W3sQAKHJ6dI10P/sFTe4AlGoRGNr
 ZtCwGwwolBoD/NtXa2HCovc9ofIU4zWYXl5P+kbHtKV/ZB4q5+m7Q5bpWh4TQFUy
 9TKho6aywF9uXpAEV99qKYvAOIq5EgJXdgrhCRTYBBR9+uR3+B1cUJhxpyD6htw4
 H7ABpmihWjij0o9YYAin7y/O+8jeqnuNLPUoCek1Emf0cn7G5keMg8Lli0WCz7jM
 JdKOjbvaYscgvb4BqTKqtg5NneC3GoeNp43Kvz4LbmcPw1yT5N8sHswqlSio4U2U
 Sxyvtj0RxoSoAus2UR62pTGDu1TrSHxWEWpYpqa77hr1/TpBY7put1OldFmUfu1B
 voQUI05Ox74RT9pl5c8DGnXH8Zyiu6a7Fpj6EdWbWxtbIgvWCLaDHniEY1WKR6cj
 Bmil/zjGyDtzANJBasC9NJHF8yd+/vxNfn5n0eAz6Xp94MIdOGPIQle+NATG5osN
 0b/NLit64B2F6Djijkv1vV9V7x1oYqIYVG6f1BoVtRXCjhcx9PnkskXcP+1SKUhH
 xOTXLt6rGNaTj+T2/41VJUtZ6eiZj+0GZMXILu5SIEdKiRiGLfsLHX117OK3ZhYT
 PFzzzWZoC2FOL/ldp/K6ncPZV0oHn3yfQa3T97jGI1LbsYkXXyQkW5PNwqGccbUc
 xvhEAPDvBxDlfcgqWMaw
 =DC+B
 -----END PGP SIGNATURE-----

Merge tag 'v4.4.32' into android-4.4.y

This is the 4.4.32 stable release

Change-Id: I5028402eadfcf055ac44a5e67abc6da75b2068b3
This commit is contained in:
Dmitry Shmidt 2016-11-15 17:02:38 -08:00
commit 324e88de4a
112 changed files with 637 additions and 337 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 30 SUBLEVEL = 32
EXTRAVERSION = EXTRAVERSION =
NAME = Blurry Fish Butt NAME = Blurry Fish Butt
@ -617,6 +617,7 @@ include arch/$(SRCARCH)/Makefile
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os KBUILD_CFLAGS += -Os

View file

@ -17,7 +17,7 @@
#define fd_outb(val,port) \ #define fd_outb(val,port) \
do { \ do { \
if ((port) == FD_DOR) \ if ((port) == (u32)FD_DOR) \
fd_setdor((val)); \ fd_setdor((val)); \
else \ else \
outb((val),(port)); \ outb((val),(port)); \

View file

@ -31,7 +31,6 @@ struct thread_info {
int cpu; /* cpu we're on */ int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable, <0 => BUG */ int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit; mm_segment_t addr_limit;
struct restart_block restart_block;
}; };
/* /*
@ -44,9 +43,6 @@ struct thread_info {
.cpu = 0, \ .cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \ .preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \ .addr_limit = KERNEL_DS, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
} }
#define init_thread_info (init_thread_union.thread_info) #define init_thread_info (init_thread_union.thread_info)

View file

@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0)
unsigned int er0; unsigned int er0;
/* Always make any pending restarted system calls return -EINTR */ /* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall; current->restart_block.fn = do_no_restart_syscall;
/* restore passed registers */ /* restore passed registers */
#define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0) #define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0)

View file

@ -752,15 +752,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
kvm_clear_c0_guest_status(cop0, ST0_ERL);
vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
} else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
kvm_read_c0_guest_epc(cop0)); kvm_read_c0_guest_epc(cop0));
kvm_clear_c0_guest_status(cop0, ST0_EXL); kvm_clear_c0_guest_status(cop0, ST0_EXL);
vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
} else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
kvm_clear_c0_guest_status(cop0, ST0_ERL);
vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
} else { } else {
kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
vcpu->arch.pc); vcpu->arch.pc);
@ -822,7 +822,7 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
bool user; bool user;
/* No need to flush for entries which are already invalid */ /* No need to flush for entries which are already invalid */
if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V))
return; return;
/* User address space doesn't need flushing for KSeg2/3 changes */ /* User address space doesn't need flushing for KSeg2/3 changes */
user = tlb->tlb_hi < KVM_GUEST_KSEG0; user = tlb->tlb_hi < KVM_GUEST_KSEG0;

View file

@ -106,8 +106,6 @@ linux_gateway_entry:
mtsp %r0,%sr4 /* get kernel space into sr4 */ mtsp %r0,%sr4 /* get kernel space into sr4 */
mtsp %r0,%sr5 /* get kernel space into sr5 */ mtsp %r0,%sr5 /* get kernel space into sr5 */
mtsp %r0,%sr6 /* get kernel space into sr6 */ mtsp %r0,%sr6 /* get kernel space into sr6 */
mfsp %sr7,%r1 /* save user sr7 */
mtsp %r1,%sr3 /* and store it in sr3 */
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
/* for now we can *always* set the W bit on entry to the syscall /* for now we can *always* set the W bit on entry to the syscall
@ -133,6 +131,14 @@ linux_gateway_entry:
depdi 0, 31, 32, %r21 depdi 0, 31, 32, %r21
1: 1:
#endif #endif
/* We use a rsm/ssm pair to prevent sr3 from being clobbered
* by external interrupts.
*/
mfsp %sr7,%r1 /* save user sr7 */
rsm PSW_SM_I, %r0 /* disable interrupts */
mtsp %r1,%sr3 /* and store it in sr3 */
mfctl %cr30,%r1 mfctl %cr30,%r1
xor %r1,%r30,%r30 /* ye olde xor trick */ xor %r1,%r30,%r30 /* ye olde xor trick */
xor %r1,%r30,%r1 xor %r1,%r30,%r1
@ -147,6 +153,7 @@ linux_gateway_entry:
*/ */
mtsp %r0,%sr7 /* get kernel space into sr7 */ mtsp %r0,%sr7 /* get kernel space into sr7 */
ssm PSW_SM_I, %r0 /* enable interrupts */
STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */ STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
mfctl %cr30,%r1 /* get task ptr in %r1 */ mfctl %cr30,%r1 /* get task ptr in %r1 */
LDREG TI_TASK(%r1),%r1 LDREG TI_TASK(%r1),%r1

View file

@ -376,7 +376,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
#else #else
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
offsetof(struct thread_fp_state, fpr[32][0])); offsetof(struct thread_fp_state, fpr[32]));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_state, 0, -1); &target->thread.fp_state, 0, -1);
@ -404,7 +404,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
return 0; return 0;
#else #else
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
offsetof(struct thread_fp_state, fpr[32][0])); offsetof(struct thread_fp_state, fpr[32]));
return user_regset_copyin(&pos, &count, &kbuf, &ubuf, return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_state, 0, -1); &target->thread.fp_state, 0, -1);

View file

@ -339,7 +339,7 @@ do { \
#define __get_user_asm_u64(x, ptr, retval, errret) \ #define __get_user_asm_u64(x, ptr, retval, errret) \
__get_user_asm(x, ptr, retval, "q", "", "=r", errret) __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
#define __get_user_asm_ex_u64(x, ptr) \ #define __get_user_asm_ex_u64(x, ptr) \
__get_user_asm_ex(x, ptr, "q", "", "=r") __get_user_asm_ex(x, ptr, "q", "", "=&r")
#endif #endif
#define __get_user_size(x, ptr, size, retval, errret) \ #define __get_user_size(x, ptr, size, retval, errret) \
@ -386,13 +386,13 @@ do { \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
__get_user_asm_ex(x, ptr, "b", "b", "=q"); \ __get_user_asm_ex(x, ptr, "b", "b", "=&q"); \
break; \ break; \
case 2: \ case 2: \
__get_user_asm_ex(x, ptr, "w", "w", "=r"); \ __get_user_asm_ex(x, ptr, "w", "w", "=&r"); \
break; \ break; \
case 4: \ case 4: \
__get_user_asm_ex(x, ptr, "l", "k", "=r"); \ __get_user_asm_ex(x, ptr, "l", "k", "=&r"); \
break; \ break; \
case 8: \ case 8: \
__get_user_asm_ex_u64(x, ptr); \ __get_user_asm_ex_u64(x, ptr); \
@ -406,7 +406,7 @@ do { \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \ asm volatile("1: mov"itype" %1,%"rtype"0\n" \
"2:\n" \ "2:\n" \
_ASM_EXTABLE_EX(1b, 2b) \ _ASM_EXTABLE_EX(1b, 2b) \
: ltype(x) : "m" (__m(addr))) : ltype(x) : "m" (__m(addr)), "0" (0))
#define __put_user_nocheck(x, ptr, size) \ #define __put_user_nocheck(x, ptr, size) \
({ \ ({ \

View file

@ -5033,7 +5033,7 @@ done_prefixes:
/* Decode and fetch the destination operand: register or memory. */ /* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
if (ctxt->rip_relative) if (ctxt->rip_relative && likely(ctxt->memopp))
ctxt->memopp->addr.mem.ea = address_mask(ctxt, ctxt->memopp->addr.mem.ea = address_mask(ctxt,
ctxt->memopp->addr.mem.ea + ctxt->_eip); ctxt->memopp->addr.mem.ea + ctxt->_eip);

View file

@ -7252,10 +7252,12 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{ {
void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
kvmclock_reset(vcpu); kvmclock_reset(vcpu);
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
kvm_x86_ops->vcpu_free(vcpu); kvm_x86_ops->vcpu_free(vcpu);
free_cpumask_var(wbinvd_dirty_mask);
} }
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,

View file

@ -1113,7 +1113,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
/* NOTE: The loop is more greedy than the cleanup_highmap variant. /* NOTE: The loop is more greedy than the cleanup_highmap variant.
* We include the PMD passed in on _both_ boundaries. */ * We include the PMD passed in on _both_ boundaries. */
for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE)); for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
pmd++, vaddr += PMD_SIZE) { pmd++, vaddr += PMD_SIZE) {
if (pmd_none(*pmd)) if (pmd_none(*pmd))
continue; continue;

View file

@ -1042,7 +1042,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
static struct binder_ref *binder_get_ref(struct binder_proc *proc, static struct binder_ref *binder_get_ref(struct binder_proc *proc,
uint32_t desc, bool need_strong_ref) u32 desc, bool need_strong_ref)
{ {
struct rb_node *n = proc->refs_by_desc.rb_node; struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref; struct binder_ref *ref;

View file

@ -1533,19 +1533,29 @@ static void remove_port_data(struct port *port)
spin_lock_irq(&port->inbuf_lock); spin_lock_irq(&port->inbuf_lock);
/* Remove unused data this port might have received. */ /* Remove unused data this port might have received. */
discard_port_data(port); discard_port_data(port);
spin_unlock_irq(&port->inbuf_lock);
/* Remove buffers we queued up for the Host to send us data in. */ /* Remove buffers we queued up for the Host to send us data in. */
while ((buf = virtqueue_detach_unused_buf(port->in_vq))) do {
free_buf(buf, true); spin_lock_irq(&port->inbuf_lock);
spin_unlock_irq(&port->inbuf_lock); buf = virtqueue_detach_unused_buf(port->in_vq);
spin_unlock_irq(&port->inbuf_lock);
if (buf)
free_buf(buf, true);
} while (buf);
spin_lock_irq(&port->outvq_lock); spin_lock_irq(&port->outvq_lock);
reclaim_consumed_buffers(port); reclaim_consumed_buffers(port);
spin_unlock_irq(&port->outvq_lock);
/* Free pending buffers from the out-queue. */ /* Free pending buffers from the out-queue. */
while ((buf = virtqueue_detach_unused_buf(port->out_vq))) do {
free_buf(buf, true); spin_lock_irq(&port->outvq_lock);
spin_unlock_irq(&port->outvq_lock); buf = virtqueue_detach_unused_buf(port->out_vq);
spin_unlock_irq(&port->outvq_lock);
if (buf)
free_buf(buf, true);
} while (buf);
} }
/* /*

View file

@ -73,13 +73,13 @@ struct rfc2734_header {
#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16) #define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
#define fwnet_set_hdr_lf(lf) ((lf) << 30) #define fwnet_set_hdr_lf(lf) ((lf) << 30)
#define fwnet_set_hdr_ether_type(et) (et) #define fwnet_set_hdr_ether_type(et) (et)
#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16) #define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
#define fwnet_set_hdr_fg_off(fgo) (fgo) #define fwnet_set_hdr_fg_off(fgo) (fgo)
#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16) #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
@ -578,6 +578,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
int retval; int retval;
u16 ether_type; u16 ether_type;
if (len <= RFC2374_UNFRAG_HDR_SIZE)
return 0;
hdr.w0 = be32_to_cpu(buf[0]); hdr.w0 = be32_to_cpu(buf[0]);
lf = fwnet_get_hdr_lf(&hdr); lf = fwnet_get_hdr_lf(&hdr);
if (lf == RFC2374_HDR_UNFRAG) { if (lf == RFC2374_HDR_UNFRAG) {
@ -602,7 +605,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
return fwnet_finish_incoming_packet(net, skb, source_node_id, return fwnet_finish_incoming_packet(net, skb, source_node_id,
is_broadcast, ether_type); is_broadcast, ether_type);
} }
/* A datagram fragment has been received, now the fun begins. */ /* A datagram fragment has been received, now the fun begins. */
if (len <= RFC2374_FRAG_HDR_SIZE)
return 0;
hdr.w1 = ntohl(buf[1]); hdr.w1 = ntohl(buf[1]);
buf += 2; buf += 2;
len -= RFC2374_FRAG_HDR_SIZE; len -= RFC2374_FRAG_HDR_SIZE;
@ -614,7 +622,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
fg_off = fwnet_get_hdr_fg_off(&hdr); fg_off = fwnet_get_hdr_fg_off(&hdr);
} }
datagram_label = fwnet_get_hdr_dgl(&hdr); datagram_label = fwnet_get_hdr_dgl(&hdr);
dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */ dg_size = fwnet_get_hdr_dg_size(&hdr);
if (fg_off + len > dg_size)
return 0;
spin_lock_irqsave(&dev->lock, flags); spin_lock_irqsave(&dev->lock, flags);
@ -722,6 +733,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
fw_send_response(card, r, rcode); fw_send_response(card, r, rcode);
} }
static int gasp_source_id(__be32 *p)
{
return be32_to_cpu(p[0]) >> 16;
}
static u32 gasp_specifier_id(__be32 *p)
{
return (be32_to_cpu(p[0]) & 0xffff) << 8 |
(be32_to_cpu(p[1]) & 0xff000000) >> 24;
}
static u32 gasp_version(__be32 *p)
{
return be32_to_cpu(p[1]) & 0xffffff;
}
static void fwnet_receive_broadcast(struct fw_iso_context *context, static void fwnet_receive_broadcast(struct fw_iso_context *context,
u32 cycle, size_t header_length, void *header, void *data) u32 cycle, size_t header_length, void *header, void *data)
{ {
@ -731,9 +758,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
__be32 *buf_ptr; __be32 *buf_ptr;
int retval; int retval;
u32 length; u32 length;
u16 source_node_id;
u32 specifier_id;
u32 ver;
unsigned long offset; unsigned long offset;
unsigned long flags; unsigned long flags;
@ -750,22 +774,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
spin_unlock_irqrestore(&dev->lock, flags); spin_unlock_irqrestore(&dev->lock, flags);
specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 if (length > IEEE1394_GASP_HDR_SIZE &&
| (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; (gasp_version(buf_ptr) == RFC2734_SW_VERSION
source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
if (specifier_id == IANA_SPECIFIER_ID &&
(ver == RFC2734_SW_VERSION
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
|| ver == RFC3146_SW_VERSION || gasp_version(buf_ptr) == RFC3146_SW_VERSION
#endif #endif
)) { ))
buf_ptr += 2; fwnet_incoming_packet(dev, buf_ptr + 2,
length -= IEEE1394_GASP_HDR_SIZE; length - IEEE1394_GASP_HDR_SIZE,
fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, gasp_source_id(buf_ptr),
context->card->generation, true); context->card->generation, true);
}
packet.payload_length = dev->rcv_buffer_size; packet.payload_length = dev->rcv_buffer_size;
packet.interrupt = 1; packet.interrupt = 1;

View file

@ -265,15 +265,27 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
unsigned max_lane_num = drm_dp_max_lane_count(dpcd); unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
unsigned lane_num, i, max_pix_clock; unsigned lane_num, i, max_pix_clock;
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { ENCODER_OBJECT_ID_NUTMEG) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
max_pix_clock = (lane_num * 270000 * 8) / bpp;
if (max_pix_clock >= pix_clock) { if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num; *dp_lanes = lane_num;
*dp_rate = link_rates[i]; *dp_rate = 270000;
return 0; return 0;
} }
} }
} else {
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
*dp_rate = link_rates[i];
return 0;
}
}
}
} }
return -EINVAL; return -EINVAL;

View file

@ -909,6 +909,7 @@ static void drm_dp_destroy_port(struct kref *kref)
/* no need to clean up vcpi /* no need to clean up vcpi
* as if we have no connector we never setup a vcpi */ * as if we have no connector we never setup a vcpi */
drm_dp_port_teardown_pdt(port, port->pdt); drm_dp_port_teardown_pdt(port, port->pdt);
port->pdt = DP_PEER_DEVICE_NONE;
} }
kfree(port); kfree(port);
} }
@ -1154,7 +1155,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
drm_dp_put_port(port); drm_dp_put_port(port);
goto out; goto out;
} }
if (port->port_num >= DP_MST_LOGICAL_PORT_0) { if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
port->pdt == DP_PEER_DEVICE_SST_SINK) &&
port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
drm_mode_connector_set_tile_property(port->connector); drm_mode_connector_set_tile_property(port->connector);
} }
@ -2872,6 +2875,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
mgr->cbs->destroy_connector(mgr, port->connector); mgr->cbs->destroy_connector(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt); drm_dp_port_teardown_pdt(port, port->pdt);
port->pdt = DP_PEER_DEVICE_NONE;
if (!port->input && port->vcpi.vcpi > 0) { if (!port->input && port->vcpi.vcpi > 0) {
drm_dp_mst_reset_vcpi_slots(mgr, port); drm_dp_mst_reset_vcpi_slots(mgr, port);

View file

@ -101,7 +101,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
return 0; return 0;
err: err:
list_for_each_entry_reverse(subdrv, &subdrv->list, list) { list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
if (subdrv->close) if (subdrv->close)
subdrv->close(dev, subdrv->dev, file); subdrv->close(dev, subdrv->dev, file);
} }

View file

@ -315,15 +315,27 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
unsigned max_lane_num = drm_dp_max_lane_count(dpcd); unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
unsigned lane_num, i, max_pix_clock; unsigned lane_num, i, max_pix_clock;
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { ENCODER_OBJECT_ID_NUTMEG) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
max_pix_clock = (lane_num * 270000 * 8) / bpp;
if (max_pix_clock >= pix_clock) { if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num; *dp_lanes = lane_num;
*dp_rate = link_rates[i]; *dp_rate = 270000;
return 0; return 0;
} }
} }
} else {
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
*dp_rate = link_rates[i];
return 0;
}
}
}
} }
return -EINVAL; return -EINVAL;

View file

@ -1396,9 +1396,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
void cayman_cp_int_cntl_setup(struct radeon_device *rdev, void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl) int ring, u32 cp_int_cntl)
{ {
u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3; WREG32(SRBM_GFX_CNTL, RINGID(ring));
WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
WREG32(CP_INT_CNTL, cp_int_cntl); WREG32(CP_INT_CNTL, cp_int_cntl);
} }

View file

@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
tmp &= AUX_HPD_SEL(0x7); tmp &= AUX_HPD_SEL(0x7);
tmp |= AUX_HPD_SEL(chan->rec.hpd); tmp |= AUX_HPD_SEL(chan->rec.hpd);
tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1); tmp |= AUX_EN | AUX_LS_READ_EN;
WREG32(AUX_CONTROL + aux_offset[instance], tmp); WREG32(AUX_CONTROL + aux_offset[instance], tmp);

View file

@ -2999,6 +2999,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
int i; int i;
struct si_dpm_quirk *p = si_dpm_quirk_list; struct si_dpm_quirk *p = si_dpm_quirk_list;
/* limit all SI kickers */
if (rdev->family == CHIP_PITCAIRN) {
if ((rdev->pdev->revision == 0x81) ||
(rdev->pdev->device == 0x6810) ||
(rdev->pdev->device == 0x6811) ||
(rdev->pdev->device == 0x6816) ||
(rdev->pdev->device == 0x6817) ||
(rdev->pdev->device == 0x6806))
max_mclk = 120000;
} else if (rdev->family == CHIP_VERDE) {
if ((rdev->pdev->revision == 0x81) ||
(rdev->pdev->revision == 0x83) ||
(rdev->pdev->revision == 0x87) ||
(rdev->pdev->device == 0x6820) ||
(rdev->pdev->device == 0x6821) ||
(rdev->pdev->device == 0x6822) ||
(rdev->pdev->device == 0x6823) ||
(rdev->pdev->device == 0x682A) ||
(rdev->pdev->device == 0x682B)) {
max_sclk = 75000;
max_mclk = 80000;
}
} else if (rdev->family == CHIP_OLAND) {
if ((rdev->pdev->revision == 0xC7) ||
(rdev->pdev->revision == 0x80) ||
(rdev->pdev->revision == 0x81) ||
(rdev->pdev->revision == 0x83) ||
(rdev->pdev->device == 0x6604) ||
(rdev->pdev->device == 0x6605)) {
max_sclk = 75000;
max_mclk = 80000;
}
} else if (rdev->family == CHIP_HAINAN) {
if ((rdev->pdev->revision == 0x81) ||
(rdev->pdev->revision == 0x83) ||
(rdev->pdev->revision == 0xC3) ||
(rdev->pdev->device == 0x6664) ||
(rdev->pdev->device == 0x6665) ||
(rdev->pdev->device == 0x6667)) {
max_sclk = 75000;
max_mclk = 80000;
}
}
/* Apply dpm quirks */ /* Apply dpm quirks */
while (p && p->chip_device != 0) { while (p && p->chip_device != 0) {
if (rdev->pdev->vendor == p->chip_vendor && if (rdev->pdev->vendor == p->chip_vendor &&
@ -3011,16 +3054,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
} }
++p; ++p;
} }
/* limit mclk on all R7 370 parts for stability */
if (rdev->pdev->device == 0x6811 &&
rdev->pdev->revision == 0x81)
max_mclk = 120000;
/* limit sclk/mclk on Jet parts for stability */
if (rdev->pdev->device == 0x6665 &&
rdev->pdev->revision == 0xc3) {
max_sclk = 75000;
max_mclk = 80000;
}
if (rps->vce_active) { if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;

View file

@ -168,6 +168,7 @@
#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205 #define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208 #define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
#define USB_DEVICE_ID_ATEN_CS682 0x2213 #define USB_DEVICE_ID_ATEN_CS682 0x2213
#define USB_DEVICE_ID_ATEN_CS692 0x8021
#define USB_VENDOR_ID_ATMEL 0x03eb #define USB_VENDOR_ID_ATMEL 0x03eb
#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c

View file

@ -61,6 +61,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },

View file

@ -283,10 +283,14 @@ static void heartbeat_onchannelcallback(void *context)
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer; u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
struct icmsg_negotiate *negop = NULL; struct icmsg_negotiate *negop = NULL;
vmbus_recvpacket(channel, hbeat_txf_buf, while (1) {
PAGE_SIZE, &recvlen, &requestid);
vmbus_recvpacket(channel, hbeat_txf_buf,
PAGE_SIZE, &recvlen, &requestid);
if (!recvlen)
break;
if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[ icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
sizeof(struct vmbuspipe_hdr)]; sizeof(struct vmbuspipe_hdr)];

View file

@ -105,7 +105,7 @@ struct slimpro_i2c_dev {
struct mbox_chan *mbox_chan; struct mbox_chan *mbox_chan;
struct mbox_client mbox_client; struct mbox_client mbox_client;
struct completion rd_complete; struct completion rd_complete;
u8 dma_buffer[I2C_SMBUS_BLOCK_MAX]; u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
u32 *resp_msg; u32 *resp_msg;
}; };

View file

@ -1876,6 +1876,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
/* add the driver to the list of i2c drivers in the driver core */ /* add the driver to the list of i2c drivers in the driver core */
driver->driver.owner = owner; driver->driver.owner = owner;
driver->driver.bus = &i2c_bus_type; driver->driver.bus = &i2c_bus_type;
INIT_LIST_HEAD(&driver->clients);
/* When registration returns, the driver core /* When registration returns, the driver core
* will have called probe() for all matching-but-unbound devices. * will have called probe() for all matching-but-unbound devices.
@ -1886,7 +1887,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name); pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
INIT_LIST_HEAD(&driver->clients);
/* Walk the adapters that are already present */ /* Walk the adapters that are already present */
i2c_for_each_dev(driver, __process_new_driver); i2c_for_each_dev(driver, __process_new_driver);

View file

@ -877,6 +877,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "P34"), DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
}, },
}, },
{
/* Schenker XMG C504 - Elantech touchpad */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
},
},
{ } { }
}; };

View file

@ -1288,6 +1288,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
dm_bio_restore(bd, bio); dm_bio_restore(bd, bio);
bio_record->details.bi_bdev = NULL; bio_record->details.bi_bdev = NULL;
bio->bi_error = 0;
queue_bio(ms, bio, rw); queue_bio(ms, bio, rw);
return DM_ENDIO_INCOMPLETE; return DM_ENDIO_INCOMPLETE;

View file

@ -2260,8 +2260,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->bs) if (md->bs)
bioset_free(md->bs); bioset_free(md->bs);
cleanup_srcu_struct(&md->io_barrier);
if (md->disk) { if (md->disk) {
spin_lock(&_minor_lock); spin_lock(&_minor_lock);
md->disk->private_data = NULL; md->disk->private_data = NULL;
@ -2273,6 +2271,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->queue) if (md->queue)
blk_cleanup_queue(md->queue); blk_cleanup_queue(md->queue);
cleanup_srcu_struct(&md->io_barrier);
if (md->bdev) { if (md->bdev) {
bdput(md->bdev); bdput(md->bdev);
md->bdev = NULL; md->bdev = NULL;

View file

@ -352,17 +352,27 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
if (copy_from_user(sgl->lpage, user_addr + user_size - if (copy_from_user(sgl->lpage, user_addr + user_size -
sgl->lpage_size, sgl->lpage_size)) { sgl->lpage_size, sgl->lpage_size)) {
rc = -EFAULT; rc = -EFAULT;
goto err_out1; goto err_out2;
} }
} }
return 0; return 0;
err_out2:
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
sgl->lpage_dma_addr);
sgl->lpage = NULL;
sgl->lpage_dma_addr = 0;
err_out1: err_out1:
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage, __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
sgl->fpage_dma_addr); sgl->fpage_dma_addr);
sgl->fpage = NULL;
sgl->fpage_dma_addr = 0;
err_out: err_out:
__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl, __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
sgl->sgl_dma_addr); sgl->sgl_dma_addr);
sgl->sgl = NULL;
sgl->sgl_dma_addr = 0;
sgl->sgl_size = 0;
return -ENOMEM; return -ENOMEM;
} }

View file

@ -972,11 +972,13 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
hisr = mei_txe_br_reg_read(hw, HISR_REG); hisr = mei_txe_br_reg_read(hw, HISR_REG);
aliveness = mei_txe_aliveness_get(dev); aliveness = mei_txe_aliveness_get(dev);
if (hhisr & IPC_HHIER_SEC && aliveness) if (hhisr & IPC_HHIER_SEC && aliveness) {
ipc_isr = mei_txe_sec_reg_read_silent(hw, ipc_isr = mei_txe_sec_reg_read_silent(hw,
SEC_IPC_HOST_INT_STATUS_REG); SEC_IPC_HOST_INT_STATUS_REG);
else } else {
ipc_isr = 0; ipc_isr = 0;
hhisr &= ~IPC_HHIER_SEC;
}
generated = generated || generated = generated ||
(hisr & HISR_INT_STS_MSK) || (hisr & HISR_INT_STS_MSK) ||

View file

@ -59,12 +59,13 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
host->pdata = pdev->dev.platform_data; host->pdata = pdev->dev.platform_data;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
/* Get registers' physical base address */
host->phy_regs = regs->start;
host->regs = devm_ioremap_resource(&pdev->dev, regs); host->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(host->regs)) if (IS_ERR(host->regs))
return PTR_ERR(host->regs); return PTR_ERR(host->regs);
/* Get registers' physical base address */
host->phy_regs = regs->start;
platform_set_drvdata(pdev, host); platform_set_drvdata(pdev, host);
return dw_mci_probe(host); return dw_mci_probe(host);
} }

View file

@ -513,10 +513,11 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
unsigned long long ec = be64_to_cpu(ech->ec); unsigned long long ec = be64_to_cpu(ech->ec);
unmap_peb(ai, pnum); unmap_peb(ai, pnum);
dbg_bld("Adding PEB to free: %i", pnum); dbg_bld("Adding PEB to free: %i", pnum);
if (err == UBI_IO_FF_BITFLIPS) if (err == UBI_IO_FF_BITFLIPS)
add_aeb(ai, free, pnum, ec, 1); scrub = 1;
else
add_aeb(ai, free, pnum, ec, 0); add_aeb(ai, free, pnum, ec, scrub);
continue; continue;
} else if (err == 0 || err == UBI_IO_BITFLIPS) { } else if (err == 0 || err == UBI_IO_BITFLIPS) {
dbg_bld("Found non empty PEB:%i in pool", pnum); dbg_bld("Found non empty PEB:%i in pool", pnum);
@ -748,11 +749,11 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
fmvhdr->vol_type, fmvhdr->vol_type,
be32_to_cpu(fmvhdr->last_eb_bytes)); be32_to_cpu(fmvhdr->last_eb_bytes));
if (!av) if (IS_ERR(av)) {
goto fail_bad; if (PTR_ERR(av) == -EEXIST)
if (PTR_ERR(av) == -EINVAL) { ubi_err(ubi, "volume (ID %i) already exists",
ubi_err(ubi, "volume (ID %i) already exists", fmvhdr->vol_id);
fmvhdr->vol_id);
goto fail_bad; goto fail_bad;
} }

View file

@ -18142,14 +18142,14 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
rtnl_lock(); rtnl_lock();
/* We needn't recover from permanent error */
if (state == pci_channel_io_frozen)
tp->pcierr_recovery = true;
/* We probably don't have netdev yet */ /* We probably don't have netdev yet */
if (!netdev || !netif_running(netdev)) if (!netdev || !netif_running(netdev))
goto done; goto done;
/* We needn't recover from permanent error */
if (state == pci_channel_io_frozen)
tp->pcierr_recovery = true;
tg3_phy_stop(tp); tg3_phy_stop(tp);
tg3_netif_stop(tp); tg3_netif_stop(tp);
@ -18246,7 +18246,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
rtnl_lock(); rtnl_lock();
if (!netif_running(netdev)) if (!netdev || !netif_running(netdev))
goto done; goto done;
tg3_full_lock(tp, 0); tg3_full_lock(tp, 0);

View file

@ -944,11 +944,11 @@ fec_restart(struct net_device *ndev)
* enet-mac reset will reset mac address registers too, * enet-mac reset will reset mac address registers too,
* so need to reconfigure it. * so need to reconfigure it.
*/ */
if (fep->quirks & FEC_QUIRK_ENET_MAC) { memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); writel((__force u32)cpu_to_be32(temp_mac[0]),
writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); fep->hwp + FEC_ADDR_LOW);
writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); writel((__force u32)cpu_to_be32(temp_mac[1]),
} fep->hwp + FEC_ADDR_HIGH);
/* Clear any outstanding interrupt. */ /* Clear any outstanding interrupt. */
writel(0xffffffff, fep->hwp + FEC_IEVENT); writel(0xffffffff, fep->hwp + FEC_IEVENT);

View file

@ -540,7 +540,7 @@ static inline void smc_rcv(struct net_device *dev)
#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags) #define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags) #define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
#else #else
#define smc_special_trylock(lock, flags) (flags == flags) #define smc_special_trylock(lock, flags) ((void)flags, true)
#define smc_special_lock(lock, flags) do { flags = 0; } while (0) #define smc_special_lock(lock, flags) do { flags = 0; } while (0)
#define smc_special_unlock(lock, flags) do { flags = 0; } while (0) #define smc_special_unlock(lock, flags) do { flags = 0; } while (0)
#endif #endif

View file

@ -440,7 +440,7 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, gh_len); skb_gro_pull(skb, gh_len);
skb_gro_postpull_rcsum(skb, gh, gh_len); skb_gro_postpull_rcsum(skb, gh, gh_len);
pp = ptype->callbacks.gro_receive(head, skb); pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();

View file

@ -593,7 +593,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
} }
} }
pp = eth_gro_receive(head, skb); pp = call_gro_receive(eth_gro_receive, head, skb);
out: out:
skb_gro_remcsum_cleanup(skb, &grc); skb_gro_remcsum_cleanup(skb, &grc);

View file

@ -127,8 +127,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
} }
/* Need adjust the alignment to satisfy the CMA requirement */ /* Need adjust the alignment to satisfy the CMA requirement */
if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool")) if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool")) {
align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); unsigned long order =
max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
align = max(align, (phys_addr_t)PAGE_SIZE << order);
}
prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
if (prop) { if (prop) {

View file

@ -321,6 +321,8 @@ int pwmchip_remove(struct pwm_chip *chip)
unsigned int i; unsigned int i;
int ret = 0; int ret = 0;
pwmchip_sysfs_unexport_children(chip);
mutex_lock(&pwm_lock); mutex_lock(&pwm_lock);
for (i = 0; i < chip->npwm; i++) { for (i = 0; i < chip->npwm; i++) {

View file

@ -350,6 +350,24 @@ void pwmchip_sysfs_unexport(struct pwm_chip *chip)
} }
} }
void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
{
struct device *parent;
unsigned int i;
parent = class_find_device(&pwm_class, NULL, chip,
pwmchip_sysfs_match);
if (!parent)
return;
for (i = 0; i < chip->npwm; i++) {
struct pwm_device *pwm = &chip->pwms[i];
if (test_bit(PWMF_EXPORTED, &pwm->flags))
pwm_unexport_child(parent, pwm);
}
}
static int __init pwm_sysfs_init(void) static int __init pwm_sysfs_init(void)
{ {
return class_register(&pwm_class); return class_register(&pwm_class);

View file

@ -2545,18 +2545,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
struct CommandControlBlock *ccb; struct CommandControlBlock *ccb;
int target = cmd->device->id; int target = cmd->device->id;
int lun = cmd->device->lun;
uint8_t scsicmd = cmd->cmnd[0];
cmd->scsi_done = done; cmd->scsi_done = done;
cmd->host_scribble = NULL; cmd->host_scribble = NULL;
cmd->result = 0; cmd->result = 0;
if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
cmd->result = (DID_NO_CONNECT << 16);
}
cmd->scsi_done(cmd);
return 0;
}
if (target == 16) { if (target == 16) {
/* virtual device for iop message transfer */ /* virtual device for iop message transfer */
arcmsr_handle_virtual_command(acb, cmd); arcmsr_handle_virtual_command(acb, cmd);

View file

@ -1923,7 +1923,7 @@ struct megasas_instance_template {
}; };
#define MEGASAS_IS_LOGICAL(scp) \ #define MEGASAS_IS_LOGICAL(scp) \
(scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
#define MEGASAS_DEV_INDEX(scp) \ #define MEGASAS_DEV_INDEX(scp) \
(((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \

View file

@ -1688,16 +1688,13 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
goto out_done; goto out_done;
} }
switch (scmd->cmnd[0]) { /*
case SYNCHRONIZE_CACHE: * FW takes care of flush cache on its own for Virtual Disk.
/* * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
* FW takes care of flush cache on its own */
* No need to send it down if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
*/
scmd->result = DID_OK << 16; scmd->result = DID_OK << 16;
goto out_done; goto out_done;
default:
break;
} }
if (instance->instancet->build_and_issue_cmd(instance, scmd)) { if (instance->instancet->build_and_issue_cmd(instance, scmd)) {

View file

@ -4981,6 +4981,7 @@ static void __exit scsi_debug_exit(void)
bus_unregister(&pseudo_lld_bus); bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary); root_device_unregister(pseudo_primary);
vfree(map_storep);
vfree(dif_storep); vfree(dif_storep);
vfree(fake_storep); vfree(fake_storep);
} }

View file

@ -872,10 +872,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
return 0; return 0;
if (new_screen_size > (4 << 20))
return -EINVAL;
newscreen = kmalloc(new_screen_size, GFP_USER); newscreen = kmalloc(new_screen_size, GFP_USER);
if (!newscreen) if (!newscreen)
return -ENOMEM; return -ENOMEM;
if (vc == sel_cons)
clear_selection();
old_rows = vc->vc_rows; old_rows = vc->vc_rows;
old_row_size = vc->vc_size_row; old_row_size = vc->vc_size_row;
@ -1173,7 +1178,7 @@ static void csi_J(struct vc_data *vc, int vpar)
break; break;
case 3: /* erase scroll-back buffer (and whole display) */ case 3: /* erase scroll-back buffer (and whole display) */
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char, scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
vc->vc_screenbuf_size >> 1); vc->vc_screenbuf_size);
set_origin(vc); set_origin(vc);
if (CON_IS_VISIBLE(vc)) if (CON_IS_VISIBLE(vc))
update_screen(vc); update_screen(vc);

View file

@ -2845,7 +2845,7 @@ err3:
kfree(dwc->setup_buf); kfree(dwc->setup_buf);
err2: err2:
dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr); dwc->ep0_trb, dwc->ep0_trb_addr);
err1: err1:
@ -2869,7 +2869,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
kfree(dwc->setup_buf); kfree(dwc->setup_buf);
dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb), dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr); dwc->ep0_trb, dwc->ep0_trb_addr);
dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),

View file

@ -741,19 +741,13 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
req->length = length; req->length = length;
/* throttle highspeed IRQ rate back slightly */ /* throttle high/super speed IRQ rate back slightly */
if (gadget_is_dualspeed(dev->gadget) && if (gadget_is_dualspeed(dev->gadget))
(dev->gadget->speed == USB_SPEED_HIGH)) { req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
dev->tx_qlen++; dev->gadget->speed == USB_SPEED_SUPER)) &&
if (dev->tx_qlen == (dev->qmult/2)) { !list_empty(&dev->tx_reqs))
req->no_interrupt = 0; ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
dev->tx_qlen = 0; : 0;
} else {
req->no_interrupt = 1;
}
} else {
req->no_interrupt = 0;
}
retval = usb_ep_queue(in, req, GFP_ATOMIC); retval = usb_ep_queue(in, req, GFP_ATOMIC);
switch (retval) { switch (retval) {

View file

@ -72,7 +72,7 @@
static const char hcd_name [] = "ohci_hcd"; static const char hcd_name [] = "ohci_hcd";
#define STATECHANGE_DELAY msecs_to_jiffies(300) #define STATECHANGE_DELAY msecs_to_jiffies(300)
#define IO_WATCHDOG_DELAY msecs_to_jiffies(250) #define IO_WATCHDOG_DELAY msecs_to_jiffies(275)
#include "ohci.h" #include "ohci.h"
#include "pci-quirks.h" #include "pci-quirks.h"

View file

@ -1157,7 +1157,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_set_link_state(xhci, port_array, wIndex, xhci_set_link_state(xhci, port_array, wIndex,
XDEV_RESUME); XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags); spin_unlock_irqrestore(&xhci->lock, flags);
msleep(20); msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags); spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port_array, wIndex, xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0); XDEV_U0);
@ -1401,7 +1401,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
if (need_usb2_u3_exit) { if (need_usb2_u3_exit) {
spin_unlock_irqrestore(&xhci->lock, flags); spin_unlock_irqrestore(&xhci->lock, flags);
msleep(20); msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags); spin_lock_irqsave(&xhci->lock, flags);
} }

View file

@ -45,6 +45,7 @@
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
@ -154,7 +155,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_SPURIOUS_REBOOT; xhci->quirks |= XHCI_SPURIOUS_REBOOT;
} }
if (pdev->vendor == PCI_VENDOR_ID_INTEL && if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
xhci->quirks |= XHCI_SPURIOUS_REBOOT; xhci->quirks |= XHCI_SPURIOUS_REBOOT;
xhci->quirks |= XHCI_SPURIOUS_WAKEUP; xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
} }

View file

@ -845,7 +845,9 @@ static int cp210x_tiocmget(struct tty_struct *tty)
unsigned int control; unsigned int control;
int result; int result;
cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1); result = cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
if (result)
return result;
result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0) result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
|((control & CONTROL_RTS) ? TIOCM_RTS : 0) |((control & CONTROL_RTS) ? TIOCM_RTS : 0)

View file

@ -986,7 +986,8 @@ static const struct usb_device_id id_table_combined[] = {
/* ekey Devices */ /* ekey Devices */
{ USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
/* Infineon Devices */ /* Infineon Devices */
{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) },
{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) },
/* GE Healthcare devices */ /* GE Healthcare devices */
{ USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
/* Active Research (Actisense) devices */ /* Active Research (Actisense) devices */

View file

@ -626,8 +626,9 @@
/* /*
* Infineon Technologies * Infineon Technologies
*/ */
#define INFINEON_VID 0x058b #define INFINEON_VID 0x058b
#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */ #define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
/* /*
* Acton Research Corp. * Acton Research Corp.

View file

@ -1077,7 +1077,8 @@ static int usb_serial_probe(struct usb_interface *interface,
serial->disconnected = 0; serial->disconnected = 0;
usb_serial_console_init(serial->port[0]->minor); if (num_ports > 0)
usb_serial_console_init(serial->port[0]->minor);
exit: exit:
module_put(type->driver.owner); module_put(type->driver.owner);
return 0; return 0;

View file

@ -548,7 +548,8 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); if (!vq->event)
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
} }
} }
@ -580,7 +581,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
* entry. Always do both to keep code simple. */ * entry. Always do both to keep code simple. */
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); if (!vq->event)
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
} }
vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
END_USE(vq); END_USE(vq);
@ -648,10 +650,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
* more to do. */ * more to do. */
/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
* either clear the flags bit or point the event index at the next * either clear the flags bit or point the event index at the next
* entry. Always do both to keep code simple. */ * entry. Always update the event index to keep code simple. */
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); if (!vq->event)
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
} }
/* TODO: tune this threshold */ /* TODO: tune this threshold */
bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
@ -770,7 +773,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
/* No callback? Tell other side not to bother us. */ /* No callback? Tell other side not to bother us. */
if (!callback) { if (!callback) {
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); if (!vq->event)
vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
} }
/* Put everything in free lists. */ /* Put everything in free lists. */

View file

@ -2696,14 +2696,12 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
int index, int error) int index, int error)
{ {
struct btrfs_log_ctx *ctx; struct btrfs_log_ctx *ctx;
struct btrfs_log_ctx *safe;
if (!error) { list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
INIT_LIST_HEAD(&root->log_ctxs[index]); list_del_init(&ctx->list);
return;
}
list_for_each_entry(ctx, &root->log_ctxs[index], list)
ctx->log_ret = error; ctx->log_ret = error;
}
INIT_LIST_HEAD(&root->log_ctxs[index]); INIT_LIST_HEAD(&root->log_ctxs[index]);
} }
@ -2944,13 +2942,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&root->log_mutex); mutex_unlock(&root->log_mutex);
out_wake_log_root: out_wake_log_root:
/* mutex_lock(&log_root_tree->log_mutex);
* We needn't get log_mutex here because we are sure all
* the other tasks are blocked.
*/
btrfs_remove_all_log_ctxs(log_root_tree, index2, ret); btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
mutex_lock(&log_root_tree->log_mutex);
log_root_tree->log_transid_committed++; log_root_tree->log_transid_committed++;
atomic_set(&log_root_tree->log_commit[index2], 0); atomic_set(&log_root_tree->log_commit[index2], 0);
mutex_unlock(&log_root_tree->log_mutex); mutex_unlock(&log_root_tree->log_mutex);
@ -2961,10 +2955,8 @@ out_wake_log_root:
if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
wake_up(&log_root_tree->log_commit_wait[index2]); wake_up(&log_root_tree->log_commit_wait[index2]);
out: out:
/* See above. */
btrfs_remove_all_log_ctxs(root, index1, ret);
mutex_lock(&root->log_mutex); mutex_lock(&root->log_mutex);
btrfs_remove_all_log_ctxs(root, index1, ret);
root->log_transid_committed++; root->log_transid_committed++;
atomic_set(&root->log_commit[index1], 0); atomic_set(&root->log_commit[index1], 0);
mutex_unlock(&root->log_mutex); mutex_unlock(&root->log_mutex);

View file

@ -139,6 +139,8 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
len -= bytes; len -= bytes;
} }
if (!error)
error = vfs_fsync(new_file, 0);
fput(new_file); fput(new_file);
out_fput: out_fput:
fput(old_file); fput(old_file);

View file

@ -350,7 +350,7 @@ static unsigned int vfs_dent_type(uint8_t type)
*/ */
static int ubifs_readdir(struct file *file, struct dir_context *ctx) static int ubifs_readdir(struct file *file, struct dir_context *ctx)
{ {
int err; int err = 0;
struct qstr nm; struct qstr nm;
union ubifs_key key; union ubifs_key key;
struct ubifs_dent_node *dent; struct ubifs_dent_node *dent;
@ -452,14 +452,20 @@ out:
kfree(file->private_data); kfree(file->private_data);
file->private_data = NULL; file->private_data = NULL;
if (err != -ENOENT) { if (err != -ENOENT)
ubifs_err(c, "cannot find next direntry, error %d", err); ubifs_err(c, "cannot find next direntry, error %d", err);
return err; else
} /*
* -ENOENT is a non-fatal error in this context, the TNC uses
* it to indicate that the cursor moved past the current directory
* and readdir() has to stop.
*/
err = 0;
/* 2 is a special value indicating that there are no more direntries */ /* 2 is a special value indicating that there are no more direntries */
ctx->pos = 2; ctx->pos = 2;
return 0; return err;
} }
/* Free saved readdir() state when the directory is closed */ /* Free saved readdir() state when the directory is closed */

View file

@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc(
if (mp->m_quotainfo) if (mp->m_quotainfo)
ndquots = mp->m_quotainfo->qi_dqperchunk; ndquots = mp->m_quotainfo->qi_dqperchunk;
else else
ndquots = xfs_calc_dquots_per_chunk( ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
XFS_BB_TO_FSB(mp, bp->b_length));
for (i = 0; i < ndquots; i++, d++) { for (i = 0; i < ndquots; i++, d++) {
if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk), if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),

View file

@ -103,5 +103,5 @@ struct mfc_cache {
struct rtmsg; struct rtmsg;
extern int ipmr_get_route(struct net *net, struct sk_buff *skb, extern int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr, __be32 saddr, __be32 daddr,
struct rtmsg *rtm, int nowait); struct rtmsg *rtm, int nowait, u32 portid);
#endif #endif

View file

@ -115,7 +115,7 @@ struct mfc6_cache {
struct rtmsg; struct rtmsg;
extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
struct rtmsg *rtm, int nowait); struct rtmsg *rtm, int nowait, u32 portid);
#ifdef CONFIG_IPV6_MROUTE #ifdef CONFIG_IPV6_MROUTE
extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);

View file

@ -2003,7 +2003,10 @@ struct napi_gro_cb {
/* Used in foo-over-udp, set in udp[46]_gro_receive */ /* Used in foo-over-udp, set in udp[46]_gro_receive */
u8 is_ipv6:1; u8 is_ipv6:1;
/* 7 bit hole */ /* Number of gro_receive callbacks this packet already went through */
u8 recursion_counter:4;
/* 3 bit hole */
/* used to support CHECKSUM_COMPLETE for tunneling protocols */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum; __wsum csum;
@ -2014,6 +2017,25 @@ struct napi_gro_cb {
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
#define GRO_RECURSION_LIMIT 15
static inline int gro_recursion_inc_test(struct sk_buff *skb)
{
return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
}
typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
struct sk_buff **head,
struct sk_buff *skb)
{
if (unlikely(gro_recursion_inc_test(skb))) {
NAPI_GRO_CB(skb)->flush |= 1;
return NULL;
}
return cb(head, skb);
}
struct packet_type { struct packet_type {
__be16 type; /* This is really htons(ether_type). */ __be16 type; /* This is really htons(ether_type). */
struct net_device *dev; /* NULL is wildcarded here */ struct net_device *dev; /* NULL is wildcarded here */
@ -2059,6 +2081,22 @@ struct udp_offload {
struct udp_offload_callbacks callbacks; struct udp_offload_callbacks callbacks;
}; };
typedef struct sk_buff **(*gro_receive_udp_t)(struct sk_buff **,
struct sk_buff *,
struct udp_offload *);
static inline struct sk_buff **call_gro_receive_udp(gro_receive_udp_t cb,
struct sk_buff **head,
struct sk_buff *skb,
struct udp_offload *uoff)
{
if (unlikely(gro_recursion_inc_test(skb))) {
NAPI_GRO_CB(skb)->flush |= 1;
return NULL;
}
return cb(head, skb, uoff);
}
/* often modified stats are per cpu, other are shared (netdev->stats) */ /* often modified stats are per cpu, other are shared (netdev->stats) */
struct pcpu_sw_netstats { struct pcpu_sw_netstats {
u64 rx_packets; u64 rx_packets;

View file

@ -331,6 +331,7 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
#ifdef CONFIG_PWM_SYSFS #ifdef CONFIG_PWM_SYSFS
void pwmchip_sysfs_export(struct pwm_chip *chip); void pwmchip_sysfs_export(struct pwm_chip *chip);
void pwmchip_sysfs_unexport(struct pwm_chip *chip); void pwmchip_sysfs_unexport(struct pwm_chip *chip);
void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
#else #else
static inline void pwmchip_sysfs_export(struct pwm_chip *chip) static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
{ {
@ -339,6 +340,10 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip) static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
{ {
} }
static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
{
}
#endif /* CONFIG_PWM_SYSFS */ #endif /* CONFIG_PWM_SYSFS */
#endif /* __LINUX_PWM_H */ #endif /* __LINUX_PWM_H */

View file

@ -554,7 +554,7 @@ int ip_options_rcv_srr(struct sk_buff *skb);
*/ */
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset); void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset);
int ip_cmsg_send(struct net *net, struct msghdr *msg, int ip_cmsg_send(struct net *net, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6); struct ipcm_cookie *ipc, bool allow_ipv6);
int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
@ -576,7 +576,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
{ {
ip_cmsg_recv_offset(msg, skb, 0); ip_cmsg_recv_offset(msg, skb, 0, 0);
} }
bool icmp_global_allow(void); bool icmp_global_allow(void);

View file

@ -408,6 +408,15 @@ bool tcf_destroy(struct tcf_proto *tp, bool force);
void tcf_destroy_chain(struct tcf_proto __rcu **fl); void tcf_destroy_chain(struct tcf_proto __rcu **fl);
int skb_do_redirect(struct sk_buff *); int skb_do_redirect(struct sk_buff *);
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
return G_TC_AT(skb->tc_verd) & AT_INGRESS;
#else
return false;
#endif
}
/* Reset all TX qdiscs greater then index of a device. */ /* Reset all TX qdiscs greater then index of a device. */
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
{ {

View file

@ -1426,6 +1426,16 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
if (!sk_has_account(sk)) if (!sk_has_account(sk))
return; return;
sk->sk_forward_alloc += size; sk->sk_forward_alloc += size;
/* Avoid a possible overflow.
* TCP send queues can make this happen, if sk_mem_reclaim()
* is not called and more than 2 GBytes are released at once.
*
* If we reach 2 MBytes, reclaim 1 MBytes right now, there is
* no need to hold that much forward allocation anyway.
*/
if (unlikely(sk->sk_forward_alloc >= 1 << 21))
__sk_mem_reclaim(sk, 1 << 20);
} }
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)

View file

@ -344,7 +344,7 @@ struct rtnexthop {
#define RTNH_F_OFFLOAD 8 /* offloaded route */ #define RTNH_F_OFFLOAD 8 /* offloaded route */
#define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */ #define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */
#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN) #define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD)
/* Macros to handle hexthops */ /* Macros to handle hexthops */

View file

@ -236,6 +236,9 @@ static int cgroup_addrm_files(struct cgroup_subsys_state *css,
*/ */
static bool cgroup_ssid_enabled(int ssid) static bool cgroup_ssid_enabled(int ssid)
{ {
if (CGROUP_SUBSYS_COUNT == 0)
return false;
return static_key_enabled(cgroup_subsys_enabled_key[ssid]); return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
} }

View file

@ -183,7 +183,8 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
return -EINVAL; return -EINVAL;
/* ensure minimal alignment required by mm core */ /* ensure minimal alignment required by mm core */
alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); alignment = PAGE_SIZE <<
max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
/* alignment should be aligned with order_per_bit */ /* alignment should be aligned with order_per_bit */
if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
@ -266,8 +267,8 @@ int __init cma_declare_contiguous(phys_addr_t base,
* migratetype page by page allocator's buddy algorithm. In the case, * migratetype page by page allocator's buddy algorithm. In the case,
* you couldn't get a contiguous memory, which is not what we want. * you couldn't get a contiguous memory, which is not what we want.
*/ */
alignment = max(alignment, alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
(phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
base = ALIGN(base, alignment); base = ALIGN(base, alignment);
size = ALIGN(size, alignment); size = ALIGN(size, alignment);
limit &= ~(alignment - 1); limit &= ~(alignment - 1);

View file

@ -554,6 +554,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
err = memcg_init_list_lru(lru, memcg_aware); err = memcg_init_list_lru(lru, memcg_aware);
if (err) { if (err) {
kfree(lru->node); kfree(lru->node);
/* Do this so a list_lru_destroy() doesn't crash: */
lru->node = NULL;
goto out; goto out;
} }

View file

@ -2055,6 +2055,15 @@ retry:
current->flags & PF_EXITING)) current->flags & PF_EXITING))
goto force; goto force;
/*
* Prevent unbounded recursion when reclaim operations need to
* allocate memory. This might exceed the limits temporarily,
* but we prefer facilitating memory reclaim and getting back
* under the limit over triggering OOM kills in these cases.
*/
if (unlikely(current->flags & PF_MEMALLOC))
goto force;
if (unlikely(task_in_memcg_oom(current))) if (unlikely(task_in_memcg_oom(current)))
goto nomem; goto nomem;

View file

@ -2910,7 +2910,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
sc.may_writepage, sc.may_writepage,
sc.gfp_mask); sc.gfp_mask);
current->flags |= PF_MEMALLOC;
nr_reclaimed = do_try_to_free_pages(zonelist, &sc); nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
current->flags &= ~PF_MEMALLOC;
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);

View file

@ -659,7 +659,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(*vhdr)); skb_gro_pull(skb, sizeof(*vhdr));
skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr)); skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
pp = ptype->callbacks.gro_receive(head, skb); pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();

View file

@ -951,13 +951,12 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query)
mod_timer(&query->timer, jiffies); mod_timer(&query->timer, jiffies);
} }
void br_multicast_enable_port(struct net_bridge_port *port) static void __br_multicast_enable_port(struct net_bridge_port *port)
{ {
struct net_bridge *br = port->br; struct net_bridge *br = port->br;
spin_lock(&br->multicast_lock);
if (br->multicast_disabled || !netif_running(br->dev)) if (br->multicast_disabled || !netif_running(br->dev))
goto out; return;
br_multicast_enable(&port->ip4_own_query); br_multicast_enable(&port->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
@ -965,8 +964,14 @@ void br_multicast_enable_port(struct net_bridge_port *port)
#endif #endif
if (port->multicast_router == 2 && hlist_unhashed(&port->rlist)) if (port->multicast_router == 2 && hlist_unhashed(&port->rlist))
br_multicast_add_router(br, port); br_multicast_add_router(br, port);
}
out: void br_multicast_enable_port(struct net_bridge_port *port)
{
struct net_bridge *br = port->br;
spin_lock(&br->multicast_lock);
__br_multicast_enable_port(port);
spin_unlock(&br->multicast_lock); spin_unlock(&br->multicast_lock);
} }
@ -1905,8 +1910,9 @@ static void br_multicast_start_querier(struct net_bridge *br,
int br_multicast_toggle(struct net_bridge *br, unsigned long val) int br_multicast_toggle(struct net_bridge *br, unsigned long val)
{ {
int err = 0;
struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_htable *mdb;
struct net_bridge_port *port;
int err = 0;
spin_lock_bh(&br->multicast_lock); spin_lock_bh(&br->multicast_lock);
if (br->multicast_disabled == !val) if (br->multicast_disabled == !val)
@ -1934,10 +1940,9 @@ rollback:
goto rollback; goto rollback;
} }
br_multicast_start_querier(br, &br->ip4_own_query); br_multicast_open(br);
#if IS_ENABLED(CONFIG_IPV6) list_for_each_entry(port, &br->port_list, list)
br_multicast_start_querier(br, &br->ip6_own_query); __br_multicast_enable_port(port);
#endif
unlock: unlock:
spin_unlock_bh(&br->multicast_lock); spin_unlock_bh(&br->multicast_lock);

View file

@ -2836,6 +2836,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
} }
return head; return head;
} }
EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
static void qdisc_pkt_len_init(struct sk_buff *skb) static void qdisc_pkt_len_init(struct sk_buff *skb)
{ {
@ -4240,6 +4241,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->flush = 0; NAPI_GRO_CB(skb)->flush = 0;
NAPI_GRO_CB(skb)->free = 0; NAPI_GRO_CB(skb)->free = 0;
NAPI_GRO_CB(skb)->encap_mark = 0; NAPI_GRO_CB(skb)->encap_mark = 0;
NAPI_GRO_CB(skb)->recursion_counter = 0;
NAPI_GRO_CB(skb)->gro_remcsum_start = 0; NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
/* Setup for GRO checksum validation */ /* Setup for GRO checksum validation */
@ -5204,6 +5206,7 @@ static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
static int __netdev_adjacent_dev_insert(struct net_device *dev, static int __netdev_adjacent_dev_insert(struct net_device *dev,
struct net_device *adj_dev, struct net_device *adj_dev,
u16 ref_nr,
struct list_head *dev_list, struct list_head *dev_list,
void *private, bool master) void *private, bool master)
{ {
@ -5213,7 +5216,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
adj = __netdev_find_adj(adj_dev, dev_list); adj = __netdev_find_adj(adj_dev, dev_list);
if (adj) { if (adj) {
adj->ref_nr++; adj->ref_nr += ref_nr;
return 0; return 0;
} }
@ -5223,7 +5226,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
adj->dev = adj_dev; adj->dev = adj_dev;
adj->master = master; adj->master = master;
adj->ref_nr = 1; adj->ref_nr = ref_nr;
adj->private = private; adj->private = private;
dev_hold(adj_dev); dev_hold(adj_dev);
@ -5262,6 +5265,7 @@ free_adj:
static void __netdev_adjacent_dev_remove(struct net_device *dev, static void __netdev_adjacent_dev_remove(struct net_device *dev,
struct net_device *adj_dev, struct net_device *adj_dev,
u16 ref_nr,
struct list_head *dev_list) struct list_head *dev_list)
{ {
struct netdev_adjacent *adj; struct netdev_adjacent *adj;
@ -5274,10 +5278,10 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
BUG(); BUG();
} }
if (adj->ref_nr > 1) { if (adj->ref_nr > ref_nr) {
pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name, pr_debug("%s to %s ref_nr-%d = %d\n", dev->name, adj_dev->name,
adj->ref_nr-1); ref_nr, adj->ref_nr-ref_nr);
adj->ref_nr--; adj->ref_nr -= ref_nr;
return; return;
} }
@ -5296,21 +5300,22 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
static int __netdev_adjacent_dev_link_lists(struct net_device *dev, static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
struct net_device *upper_dev, struct net_device *upper_dev,
u16 ref_nr,
struct list_head *up_list, struct list_head *up_list,
struct list_head *down_list, struct list_head *down_list,
void *private, bool master) void *private, bool master)
{ {
int ret; int ret;
ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private, ret = __netdev_adjacent_dev_insert(dev, upper_dev, ref_nr, up_list,
master); private, master);
if (ret) if (ret)
return ret; return ret;
ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private, ret = __netdev_adjacent_dev_insert(upper_dev, dev, ref_nr, down_list,
false); private, false);
if (ret) { if (ret) {
__netdev_adjacent_dev_remove(dev, upper_dev, up_list); __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
return ret; return ret;
} }
@ -5318,9 +5323,10 @@ static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
} }
static int __netdev_adjacent_dev_link(struct net_device *dev, static int __netdev_adjacent_dev_link(struct net_device *dev,
struct net_device *upper_dev) struct net_device *upper_dev,
u16 ref_nr)
{ {
return __netdev_adjacent_dev_link_lists(dev, upper_dev, return __netdev_adjacent_dev_link_lists(dev, upper_dev, ref_nr,
&dev->all_adj_list.upper, &dev->all_adj_list.upper,
&upper_dev->all_adj_list.lower, &upper_dev->all_adj_list.lower,
NULL, false); NULL, false);
@ -5328,17 +5334,19 @@ static int __netdev_adjacent_dev_link(struct net_device *dev,
static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev, static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
struct net_device *upper_dev, struct net_device *upper_dev,
u16 ref_nr,
struct list_head *up_list, struct list_head *up_list,
struct list_head *down_list) struct list_head *down_list)
{ {
__netdev_adjacent_dev_remove(dev, upper_dev, up_list); __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
__netdev_adjacent_dev_remove(upper_dev, dev, down_list); __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
} }
static void __netdev_adjacent_dev_unlink(struct net_device *dev, static void __netdev_adjacent_dev_unlink(struct net_device *dev,
struct net_device *upper_dev) struct net_device *upper_dev,
u16 ref_nr)
{ {
__netdev_adjacent_dev_unlink_lists(dev, upper_dev, __netdev_adjacent_dev_unlink_lists(dev, upper_dev, ref_nr,
&dev->all_adj_list.upper, &dev->all_adj_list.upper,
&upper_dev->all_adj_list.lower); &upper_dev->all_adj_list.lower);
} }
@ -5347,17 +5355,17 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
struct net_device *upper_dev, struct net_device *upper_dev,
void *private, bool master) void *private, bool master)
{ {
int ret = __netdev_adjacent_dev_link(dev, upper_dev); int ret = __netdev_adjacent_dev_link(dev, upper_dev, 1);
if (ret) if (ret)
return ret; return ret;
ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, 1,
&dev->adj_list.upper, &dev->adj_list.upper,
&upper_dev->adj_list.lower, &upper_dev->adj_list.lower,
private, master); private, master);
if (ret) { if (ret) {
__netdev_adjacent_dev_unlink(dev, upper_dev); __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
return ret; return ret;
} }
@ -5367,8 +5375,8 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
struct net_device *upper_dev) struct net_device *upper_dev)
{ {
__netdev_adjacent_dev_unlink(dev, upper_dev); __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
__netdev_adjacent_dev_unlink_lists(dev, upper_dev, __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
&dev->adj_list.upper, &dev->adj_list.upper,
&upper_dev->adj_list.lower); &upper_dev->adj_list.lower);
} }
@ -5420,7 +5428,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) { list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
pr_debug("Interlinking %s with %s, non-neighbour\n", pr_debug("Interlinking %s with %s, non-neighbour\n",
i->dev->name, j->dev->name); i->dev->name, j->dev->name);
ret = __netdev_adjacent_dev_link(i->dev, j->dev); ret = __netdev_adjacent_dev_link(i->dev, j->dev, i->ref_nr);
if (ret) if (ret)
goto rollback_mesh; goto rollback_mesh;
} }
@ -5430,7 +5438,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) { list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
pr_debug("linking %s's upper device %s with %s\n", pr_debug("linking %s's upper device %s with %s\n",
upper_dev->name, i->dev->name, dev->name); upper_dev->name, i->dev->name, dev->name);
ret = __netdev_adjacent_dev_link(dev, i->dev); ret = __netdev_adjacent_dev_link(dev, i->dev, i->ref_nr);
if (ret) if (ret)
goto rollback_upper_mesh; goto rollback_upper_mesh;
} }
@ -5439,7 +5447,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
list_for_each_entry(i, &dev->all_adj_list.lower, list) { list_for_each_entry(i, &dev->all_adj_list.lower, list) {
pr_debug("linking %s's lower device %s with %s\n", dev->name, pr_debug("linking %s's lower device %s with %s\n", dev->name,
i->dev->name, upper_dev->name); i->dev->name, upper_dev->name);
ret = __netdev_adjacent_dev_link(i->dev, upper_dev); ret = __netdev_adjacent_dev_link(i->dev, upper_dev, i->ref_nr);
if (ret) if (ret)
goto rollback_lower_mesh; goto rollback_lower_mesh;
} }
@ -5453,7 +5461,7 @@ rollback_lower_mesh:
list_for_each_entry(i, &dev->all_adj_list.lower, list) { list_for_each_entry(i, &dev->all_adj_list.lower, list) {
if (i == to_i) if (i == to_i)
break; break;
__netdev_adjacent_dev_unlink(i->dev, upper_dev); __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
} }
i = NULL; i = NULL;
@ -5463,7 +5471,7 @@ rollback_upper_mesh:
list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) { list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
if (i == to_i) if (i == to_i)
break; break;
__netdev_adjacent_dev_unlink(dev, i->dev); __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
} }
i = j = NULL; i = j = NULL;
@ -5475,7 +5483,7 @@ rollback_mesh:
list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) { list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
if (i == to_i && j == to_j) if (i == to_i && j == to_j)
break; break;
__netdev_adjacent_dev_unlink(i->dev, j->dev); __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
} }
if (i == to_i) if (i == to_i)
break; break;
@ -5559,16 +5567,16 @@ void netdev_upper_dev_unlink(struct net_device *dev,
*/ */
list_for_each_entry(i, &dev->all_adj_list.lower, list) list_for_each_entry(i, &dev->all_adj_list.lower, list)
list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
__netdev_adjacent_dev_unlink(i->dev, j->dev); __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
/* remove also the devices itself from lower/upper device /* remove also the devices itself from lower/upper device
* list * list
*/ */
list_for_each_entry(i, &dev->all_adj_list.lower, list) list_for_each_entry(i, &dev->all_adj_list.lower, list)
__netdev_adjacent_dev_unlink(i->dev, upper_dev); __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
__netdev_adjacent_dev_unlink(dev, i->dev); __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
&changeupper_info.info); &changeupper_info.info);

View file

@ -215,8 +215,8 @@
#define M_NETIF_RECEIVE 1 /* Inject packets into stack */ #define M_NETIF_RECEIVE 1 /* Inject packets into stack */
/* If lock -- protects updating of if_list */ /* If lock -- protects updating of if_list */
#define if_lock(t) spin_lock(&(t->if_lock)); #define if_lock(t) mutex_lock(&(t->if_lock));
#define if_unlock(t) spin_unlock(&(t->if_lock)); #define if_unlock(t) mutex_unlock(&(t->if_lock));
/* Used to help with determining the pkts on receive */ /* Used to help with determining the pkts on receive */
#define PKTGEN_MAGIC 0xbe9be955 #define PKTGEN_MAGIC 0xbe9be955
@ -422,7 +422,7 @@ struct pktgen_net {
}; };
struct pktgen_thread { struct pktgen_thread {
spinlock_t if_lock; /* for list of devices */ struct mutex if_lock; /* for list of devices */
struct list_head if_list; /* All device here */ struct list_head if_list; /* All device here */
struct list_head th_list; struct list_head th_list;
struct task_struct *tsk; struct task_struct *tsk;
@ -2002,11 +2002,13 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
{ {
struct pktgen_thread *t; struct pktgen_thread *t;
mutex_lock(&pktgen_thread_lock);
list_for_each_entry(t, &pn->pktgen_threads, th_list) { list_for_each_entry(t, &pn->pktgen_threads, th_list) {
struct pktgen_dev *pkt_dev; struct pktgen_dev *pkt_dev;
rcu_read_lock(); if_lock(t);
list_for_each_entry_rcu(pkt_dev, &t->if_list, list) { list_for_each_entry(pkt_dev, &t->if_list, list) {
if (pkt_dev->odev != dev) if (pkt_dev->odev != dev)
continue; continue;
@ -2021,8 +2023,9 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
dev->name); dev->name);
break; break;
} }
rcu_read_unlock(); if_unlock(t);
} }
mutex_unlock(&pktgen_thread_lock);
} }
static int pktgen_device_event(struct notifier_block *unused, static int pktgen_device_event(struct notifier_block *unused,
@ -2278,7 +2281,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
{ {
pkt_dev->pkt_overhead = LL_RESERVED_SPACE(pkt_dev->odev); pkt_dev->pkt_overhead = 0;
pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32); pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev); pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev); pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
@ -2769,13 +2772,13 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
} }
static struct sk_buff *pktgen_alloc_skb(struct net_device *dev, static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
struct pktgen_dev *pkt_dev, struct pktgen_dev *pkt_dev)
unsigned int extralen)
{ {
unsigned int extralen = LL_RESERVED_SPACE(dev);
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
unsigned int size = pkt_dev->cur_pkt_size + 64 + extralen + unsigned int size;
pkt_dev->pkt_overhead;
size = pkt_dev->cur_pkt_size + 64 + extralen + pkt_dev->pkt_overhead;
if (pkt_dev->flags & F_NODE) { if (pkt_dev->flags & F_NODE) {
int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id(); int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id();
@ -2788,8 +2791,9 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT); skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
} }
/* the caller pre-fetches from skb->data and reserves for the mac hdr */
if (likely(skb)) if (likely(skb))
skb_reserve(skb, LL_RESERVED_SPACE(dev)); skb_reserve(skb, extralen - 16);
return skb; return skb;
} }
@ -2822,16 +2826,14 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
mod_cur_headers(pkt_dev); mod_cur_headers(pkt_dev);
queue_map = pkt_dev->cur_queue_map; queue_map = pkt_dev->cur_queue_map;
datalen = (odev->hard_header_len + 16) & ~0xf; skb = pktgen_alloc_skb(odev, pkt_dev);
skb = pktgen_alloc_skb(odev, pkt_dev, datalen);
if (!skb) { if (!skb) {
sprintf(pkt_dev->result, "No memory"); sprintf(pkt_dev->result, "No memory");
return NULL; return NULL;
} }
prefetchw(skb->data); prefetchw(skb->data);
skb_reserve(skb, datalen); skb_reserve(skb, 16);
/* Reserve for ethernet and IP header */ /* Reserve for ethernet and IP header */
eth = (__u8 *) skb_push(skb, 14); eth = (__u8 *) skb_push(skb, 14);
@ -2951,7 +2953,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
mod_cur_headers(pkt_dev); mod_cur_headers(pkt_dev);
queue_map = pkt_dev->cur_queue_map; queue_map = pkt_dev->cur_queue_map;
skb = pktgen_alloc_skb(odev, pkt_dev, 16); skb = pktgen_alloc_skb(odev, pkt_dev);
if (!skb) { if (!skb) {
sprintf(pkt_dev->result, "No memory"); sprintf(pkt_dev->result, "No memory");
return NULL; return NULL;
@ -3727,7 +3729,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
return -ENOMEM; return -ENOMEM;
} }
spin_lock_init(&t->if_lock); mutex_init(&t->if_lock);
t->cpu = cpu; t->cpu = cpu;
INIT_LIST_HEAD(&t->if_list); INIT_LIST_HEAD(&t->if_list);

View file

@ -436,7 +436,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(*eh)); skb_gro_pull(skb, sizeof(*eh));
skb_gro_postpull_rcsum(skb, eh, sizeof(*eh)); skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
pp = ptype->callbacks.gro_receive(head, skb); pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();

View file

@ -1387,7 +1387,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(*iph)); skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb)); skb_set_transport_header(skb, skb_gro_offset(skb));
pp = ops->callbacks.gro_receive(head, skb); pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();

View file

@ -201,7 +201,7 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
if (!ops || !ops->callbacks.gro_receive) if (!ops || !ops->callbacks.gro_receive)
goto out_unlock; goto out_unlock;
pp = ops->callbacks.gro_receive(head, skb); pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
@ -360,7 +360,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive)) if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
goto out_unlock; goto out_unlock;
pp = ops->callbacks.gro_receive(head, skb); pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();

View file

@ -219,7 +219,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
skb_gro_postpull_rcsum(skb, greh, grehlen); skb_gro_postpull_rcsum(skb, greh, grehlen);
pp = ptype->callbacks.gro_receive(head, skb); pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();

View file

@ -98,7 +98,7 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
} }
static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
int offset) int tlen, int offset)
{ {
__wsum csum = skb->csum; __wsum csum = skb->csum;
@ -106,7 +106,9 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
return; return;
if (offset != 0) if (offset != 0)
csum = csum_sub(csum, csum_partial(skb->data, offset, 0)); csum = csum_sub(csum,
csum_partial(skb->data + tlen,
offset, 0));
put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
} }
@ -152,7 +154,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
} }
void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
int offset) int tlen, int offset)
{ {
struct inet_sock *inet = inet_sk(skb->sk); struct inet_sock *inet = inet_sk(skb->sk);
unsigned int flags = inet->cmsg_flags; unsigned int flags = inet->cmsg_flags;
@ -215,7 +217,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
} }
if (flags & IP_CMSG_CHECKSUM) if (flags & IP_CMSG_CHECKSUM)
ip_cmsg_recv_checksum(msg, skb, offset); ip_cmsg_recv_checksum(msg, skb, tlen, offset);
} }
EXPORT_SYMBOL(ip_cmsg_recv_offset); EXPORT_SYMBOL(ip_cmsg_recv_offset);

View file

@ -2192,7 +2192,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
int ipmr_get_route(struct net *net, struct sk_buff *skb, int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr, __be32 saddr, __be32 daddr,
struct rtmsg *rtm, int nowait) struct rtmsg *rtm, int nowait, u32 portid)
{ {
struct mfc_cache *cache; struct mfc_cache *cache;
struct mr_table *mrt; struct mr_table *mrt;
@ -2237,6 +2237,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
return -ENOMEM; return -ENOMEM;
} }
NETLINK_CB(skb2).portid = portid;
skb_push(skb2, sizeof(struct iphdr)); skb_push(skb2, sizeof(struct iphdr));
skb_reset_network_header(skb2); skb_reset_network_header(skb2);
iph = ip_hdr(skb2); iph = ip_hdr(skb2);

View file

@ -2499,7 +2499,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
int err = ipmr_get_route(net, skb, int err = ipmr_get_route(net, skb,
fl4->saddr, fl4->daddr, fl4->saddr, fl4->daddr,
r, nowait); r, nowait, portid);
if (err <= 0) { if (err <= 0) {
if (!nowait) { if (!nowait) {
if (err == 0) if (err == 0)

View file

@ -97,11 +97,11 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
container_of(table->data, struct net, ipv4.ping_group_range.range); container_of(table->data, struct net, ipv4.ping_group_range.range);
unsigned int seq; unsigned int seq;
do { do {
seq = read_seqbegin(&net->ipv4.ip_local_ports.lock); seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
*low = data[0]; *low = data[0];
*high = data[1]; *high = data[1];
} while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq)); } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
} }
/* Update system visible IP port range */ /* Update system visible IP port range */
@ -110,10 +110,10 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
kgid_t *data = table->data; kgid_t *data = table->data;
struct net *net = struct net *net =
container_of(table->data, struct net, ipv4.ping_group_range.range); container_of(table->data, struct net, ipv4.ping_group_range.range);
write_seqlock(&net->ipv4.ip_local_ports.lock); write_seqlock(&net->ipv4.ping_group_range.lock);
data[0] = low; data[0] = low;
data[1] = high; data[1] = high;
write_sequnlock(&net->ipv4.ip_local_ports.lock); write_sequnlock(&net->ipv4.ping_group_range.lock);
} }
/* Validate changes from /proc interface. */ /* Validate changes from /proc interface. */

View file

@ -2325,10 +2325,9 @@ static void DBGUNDO(struct sock *sk, const char *msg)
} }
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
else if (sk->sk_family == AF_INET6) { else if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
msg, msg,
&np->daddr, ntohs(inet->inet_dport), &sk->sk_v6_daddr, ntohs(inet->inet_dport),
tp->snd_cwnd, tcp_left_out(tp), tp->snd_cwnd, tcp_left_out(tp),
tp->snd_ssthresh, tp->prior_ssthresh, tp->snd_ssthresh, tp->prior_ssthresh,
tp->packets_out); tp->packets_out);

View file

@ -1950,12 +1950,14 @@ static int tcp_mtu_probe(struct sock *sk)
len = 0; len = 0;
tcp_for_write_queue_from_safe(skb, next, sk) { tcp_for_write_queue_from_safe(skb, next, sk) {
copy = min_t(int, skb->len, probe_size - len); copy = min_t(int, skb->len, probe_size - len);
if (nskb->ip_summed) if (nskb->ip_summed) {
skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
else } else {
nskb->csum = skb_copy_and_csum_bits(skb, 0, __wsum csum = skb_copy_and_csum_bits(skb, 0,
skb_put(nskb, copy), skb_put(nskb, copy),
copy, nskb->csum); copy, 0);
nskb->csum = csum_block_add(nskb->csum, csum, len);
}
if (skb->len <= copy) { if (skb->len <= copy) {
/* We've eaten all the data from this skb. /* We've eaten all the data from this skb.
@ -2569,7 +2571,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
* copying overhead: fragmentation, tunneling, mangling etc. * copying overhead: fragmentation, tunneling, mangling etc.
*/ */
if (atomic_read(&sk->sk_wmem_alloc) > if (atomic_read(&sk->sk_wmem_alloc) >
min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
sk->sk_sndbuf))
return -EAGAIN; return -EAGAIN;
if (skb_still_in_host_queue(sk, skb)) if (skb_still_in_host_queue(sk, skb))

View file

@ -1343,7 +1343,7 @@ try_again:
*addr_len = sizeof(*sin); *addr_len = sizeof(*sin);
} }
if (inet->cmsg_flags) if (inet->cmsg_flags)
ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr)); ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off);
err = copied; err = copied;
if (flags & MSG_TRUNC) if (flags & MSG_TRUNC)

View file

@ -339,8 +339,8 @@ unflush:
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
pp = uo_priv->offload->callbacks.gro_receive(head, skb, pp = call_gro_receive_udp(uo_priv->offload->callbacks.gro_receive,
uo_priv->offload); head, skb, uo_priv->offload);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();

View file

@ -2943,7 +2943,7 @@ static void init_loopback(struct net_device *dev)
* lo device down, release this obsolete dst and * lo device down, release this obsolete dst and
* reallocate a new router for ifa. * reallocate a new router for ifa.
*/ */
if (sp_ifa->rt->dst.obsolete > 0) { if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
ip6_rt_put(sp_ifa->rt); ip6_rt_put(sp_ifa->rt);
sp_ifa->rt = NULL; sp_ifa->rt = NULL;
} else { } else {

View file

@ -886,7 +886,6 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
encap_limit = t->parms.encap_limit; encap_limit = t->parms.encap_limit;
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
fl6.flowi6_proto = skb->protocol;
err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu); err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu);

View file

@ -247,7 +247,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
skb_gro_postpull_rcsum(skb, iph, nlen); skb_gro_postpull_rcsum(skb, iph, nlen);
pp = ops->callbacks.gro_receive(head, skb); pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();

View file

@ -246,6 +246,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
hash = HASH(&any, local); hash = HASH(&any, local);
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
if (ipv6_addr_equal(local, &t->parms.laddr) && if (ipv6_addr_equal(local, &t->parms.laddr) &&
ipv6_addr_any(&t->parms.raddr) &&
(t->dev->flags & IFF_UP)) (t->dev->flags & IFF_UP))
return t; return t;
} }
@ -253,6 +254,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
hash = HASH(remote, &any); hash = HASH(remote, &any);
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
if (ipv6_addr_equal(remote, &t->parms.raddr) && if (ipv6_addr_equal(remote, &t->parms.raddr) &&
ipv6_addr_any(&t->parms.laddr) &&
(t->dev->flags & IFF_UP)) (t->dev->flags & IFF_UP))
return t; return t;
} }

View file

@ -2276,8 +2276,8 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
return 1; return 1;
} }
int ip6mr_get_route(struct net *net, int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
struct sk_buff *skb, struct rtmsg *rtm, int nowait) int nowait, u32 portid)
{ {
int err; int err;
struct mr6_table *mrt; struct mr6_table *mrt;
@ -2322,6 +2322,7 @@ int ip6mr_get_route(struct net *net,
return -ENOMEM; return -ENOMEM;
} }
NETLINK_CB(skb2).portid = portid;
skb_reset_transport_header(skb2); skb_reset_transport_header(skb2);
skb_put(skb2, sizeof(struct ipv6hdr)); skb_put(skb2, sizeof(struct ipv6hdr));

View file

@ -3128,7 +3128,9 @@ static int rt6_fill_node(struct net *net,
if (iif) { if (iif) {
#ifdef CONFIG_IPV6_MROUTE #ifdef CONFIG_IPV6_MROUTE
if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
int err = ip6mr_get_route(net, skb, rtm, nowait); int err = ip6mr_get_route(net, skb, rtm, nowait,
portid);
if (err <= 0) { if (err <= 0) {
if (!nowait) { if (!nowait) {
if (err == 0) if (err == 0)

View file

@ -1180,6 +1180,16 @@ out:
return NULL; return NULL;
} }
static void tcp_v6_restore_cb(struct sk_buff *skb)
{
/* We need to move header back to the beginning if xfrm6_policy_check()
* and tcp_v6_fill_cb() are going to be called again.
* ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
*/
memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
sizeof(struct inet6_skb_parm));
}
/* The socket must have it's spinlock held when we get /* The socket must have it's spinlock held when we get
* here, unless it is a TCP_LISTEN socket. * here, unless it is a TCP_LISTEN socket.
* *
@ -1309,6 +1319,7 @@ ipv6_pktoptions:
np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
skb_set_owner_r(opt_skb, sk); skb_set_owner_r(opt_skb, sk);
tcp_v6_restore_cb(opt_skb);
opt_skb = xchg(&np->pktoptions, opt_skb); opt_skb = xchg(&np->pktoptions, opt_skb);
} else { } else {
__kfree_skb(opt_skb); __kfree_skb(opt_skb);
@ -1342,15 +1353,6 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
TCP_SKB_CB(skb)->sacked = 0; TCP_SKB_CB(skb)->sacked = 0;
} }
static void tcp_v6_restore_cb(struct sk_buff *skb)
{
/* We need to move header back to the beginning if xfrm6_policy_check()
* and tcp_v6_fill_cb() are going to be called again.
*/
memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
sizeof(struct inet6_skb_parm));
}
static int tcp_v6_rcv(struct sk_buff *skb) static int tcp_v6_rcv(struct sk_buff *skb)
{ {
const struct tcphdr *th; const struct tcphdr *th;

View file

@ -498,7 +498,8 @@ try_again:
if (is_udp4) { if (is_udp4) {
if (inet->cmsg_flags) if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb); ip_cmsg_recv_offset(msg, skb,
sizeof(struct udphdr), off);
} else { } else {
if (np->rxopt.all) if (np->rxopt.all)
ip6_datagram_recv_specific_ctl(sk, msg, skb); ip6_datagram_recv_specific_ctl(sk, msg, skb);

View file

@ -2203,16 +2203,22 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
if (!(status->rx_flags & IEEE80211_RX_AMSDU)) if (!(status->rx_flags & IEEE80211_RX_AMSDU))
return RX_CONTINUE; return RX_CONTINUE;
if (ieee80211_has_a4(hdr->frame_control) && if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && switch (rx->sdata->vif.type) {
!rx->sdata->u.vlan.sta) case NL80211_IFTYPE_AP_VLAN:
return RX_DROP_UNUSABLE; if (!rx->sdata->u.vlan.sta)
return RX_DROP_UNUSABLE;
break;
case NL80211_IFTYPE_STATION:
if (!rx->sdata->u.mgd.use_4addr)
return RX_DROP_UNUSABLE;
break;
default:
return RX_DROP_UNUSABLE;
}
}
if (is_multicast_ether_addr(hdr->addr1) && if (is_multicast_ether_addr(hdr->addr1))
((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
rx->sdata->u.vlan.sta) ||
(rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
rx->sdata->u.mgd.use_4addr)))
return RX_DROP_UNUSABLE; return RX_DROP_UNUSABLE;
skb->dev = dev; skb->dev = dev;

View file

@ -2557,7 +2557,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
/* Record the max length of recvmsg() calls for future allocations */ /* Record the max length of recvmsg() calls for future allocations */
nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len); nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len, nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
16384); SKB_WITH_OVERHEAD(32768));
copied = data_skb->len; copied = data_skb->len;
if (len < copied) { if (len < copied) {
@ -2810,14 +2810,13 @@ static int netlink_dump(struct sock *sk)
if (alloc_min_size < nlk->max_recvmsg_len) { if (alloc_min_size < nlk->max_recvmsg_len) {
alloc_size = nlk->max_recvmsg_len; alloc_size = nlk->max_recvmsg_len;
skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
GFP_KERNEL | (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
__GFP_NOWARN | __GFP_NOWARN | __GFP_NORETRY);
__GFP_NORETRY);
} }
if (!skb) { if (!skb) {
alloc_size = alloc_min_size; alloc_size = alloc_min_size;
skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
GFP_KERNEL); (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM));
} }
if (!skb) if (!skb)
goto errout_skb; goto errout_skb;

View file

@ -249,7 +249,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
static int packet_direct_xmit(struct sk_buff *skb) static int packet_direct_xmit(struct sk_buff *skb)
{ {
struct net_device *dev = skb->dev; struct net_device *dev = skb->dev;
netdev_features_t features; struct sk_buff *orig_skb = skb;
struct netdev_queue *txq; struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY; int ret = NETDEV_TX_BUSY;
@ -257,9 +257,8 @@ static int packet_direct_xmit(struct sk_buff *skb)
!netif_carrier_ok(dev))) !netif_carrier_ok(dev)))
goto drop; goto drop;
features = netif_skb_features(skb); skb = validate_xmit_skb_list(skb, dev);
if (skb_needs_linearize(skb, features) && if (skb != orig_skb)
__skb_linearize(skb))
goto drop; goto drop;
txq = skb_get_tx_queue(dev, skb); txq = skb_get_tx_queue(dev, skb);
@ -279,7 +278,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
return ret; return ret;
drop: drop:
atomic_long_inc(&dev->tx_dropped); atomic_long_inc(&dev->tx_dropped);
kfree_skb(skb); kfree_skb_list(skb);
return NET_XMIT_DROP; return NET_XMIT_DROP;
} }
@ -3855,6 +3854,7 @@ static int packet_notifier(struct notifier_block *this,
} }
if (msg == NETDEV_UNREGISTER) { if (msg == NETDEV_UNREGISTER) {
packet_cached_dev_reset(po); packet_cached_dev_reset(po);
fanout_release(sk);
po->ifindex = -1; po->ifindex = -1;
if (po->prot_hook.dev) if (po->prot_hook.dev)
dev_put(po->prot_hook.dev); dev_put(po->prot_hook.dev);

Some files were not shown because too many files have changed in this diff Show more