This is the 4.4.108 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlpA+4sACgkQONu9yGCS
 aT4RphAAkoRI16GF7c2U1aLVGcYA5zezxnYEjtFXjoM7sLwmfEBeSTTR8OqCZGaa
 hLvhrqCbF6qjgu0dKAKaLasnoUk+/ZEpxSE0JAlQ0ZdmCP9YH+Sd2PqjD48QGJ80
 O2JktsYT3MjaNKFHNeSElf2ffk3mDzRpeeHEtSduJE3/Pqvoch2qV36qJWstOpLR
 QKEQCKVP9Xa71V5Zu5tDAMFI0IQ09HRjBjyjlAxnJL+/wgYM/NTraZ1/3Ju8nGDU
 Ohamr80SdIZ0+36Gl9mFTDQRl5yuh2SllqaD4Hq4P41HPIWaXuTuKQwA0LD7U8U6
 N7I5byN7QVmOszERc0jzkQPd4aN1FWfDqAR5i6S+fDbianDiHvBNsybihayWqzay
 MgX3VnzhFrnkh5UcijKOjsRHp/CkDIAfUseb5dFekAjuctWtNK7qkE+bGySZJ9ET
 XO2b8xlRz/nYuWodsAXT6FhVaVd45ba+VR/nDwSzijf+7SY2Ub9/lwChePbI8VJj
 vSiBURRtZhEgwXXJYH3JT4L+MSqfKA+1Jd+G7BqvZaSQ6S8RLvts/JZAX1nGs0JK
 B2a84CD0lXd5RcMBRFXkfzCEZDA1hB/oLpmVVXxNROseSlSc/ExQ3xZ9UPXlWCnN
 dB7XCV5GoV9Fqx5FX0lg6eEkNzUrkFV/My/FKaJtZR8U0TJB1nk=
 =uslW
 -----END PGP SIGNATURE-----

Merge 4.4.108 into android-4.4

Changes in 4.4.108
	arm64: Initialise high_memory global variable earlier
	cxl: Check if vphb exists before iterating over AFU devices
	x86/mm: Add INVPCID helpers
	x86/mm: Fix INVPCID asm constraint
	x86/mm: Add a 'noinvpcid' boot option to turn off INVPCID
	x86/mm: If INVPCID is available, use it to flush global mappings
	mm/rmap: batched invalidations should use existing api
	mm/mmu_context, sched/core: Fix mmu_context.h assumption
	sched/core: Add switch_mm_irqs_off() and use it in the scheduler
	x86/mm: Build arch/x86/mm/tlb.c even on !SMP
	x86/mm, sched/core: Uninline switch_mm()
	x86/mm, sched/core: Turn off IRQs in switch_mm()
	ARM: Hide finish_arch_post_lock_switch() from modules
	sched/core: Idle_task_exit() shouldn't use switch_mm_irqs_off()
	x86/irq: Do not substract irq_tlb_count from irq_call_count
	ALSA: hda - add support for docking station for HP 820 G2
	ALSA: hda - add support for docking station for HP 840 G3
	arm: kprobes: Fix the return address of multiple kretprobes
	arm: kprobes: Align stack to 8-bytes in test code
	cpuidle: Validate cpu_dev in cpuidle_add_sysfs()
	r8152: fix the list rx_done may be used without initialization
	crypto: deadlock between crypto_alg_sem/rtnl_mutex/genl_mutex
	sch_dsmark: fix invalid skb_cow() usage
	bna: integer overflow bug in debugfs
	net: qmi_wwan: Add USB IDs for MDM6600 modem on Motorola Droid 4
	usb: gadget: f_uvc: Sanity check wMaxPacketSize for SuperSpeed
	usb: gadget: udc: remove pointer dereference after free
	netfilter: nfnl_cthelper: fix runtime expectation policy updates
	netfilter: nfnl_cthelper: Fix memory leak
	inet: frag: release spinlock before calling icmp_send()
	pinctrl: st: add irq_request/release_resources callbacks
	scsi: lpfc: Fix PT2PT PRLI reject
	KVM: x86: correct async page present tracepoint
	KVM: VMX: Fix enable VPID conditions
	ARM: dts: ti: fix PCI bus dtc warnings
	hwmon: (asus_atk0110) fix uninitialized data access
	HID: xinmo: fix for out of range for THT 2P arcade controller.
	r8152: prevent the driver from transmitting packets with carrier off
	s390/qeth: no ETH header for outbound AF_IUCV
	bna: avoid writing uninitialized data into hw registers
	net: Do not allow negative values for busy_read and busy_poll sysctl interfaces
	i40e: Do not enable NAPI on q_vectors that have no rings
	RDMA/iser: Fix possible mr leak on device removal event
	irda: vlsi_ir: fix check for DMA mapping errors
	netfilter: nfnl_cthelper: fix a race when walk the nf_ct_helper_hash table
	netfilter: nf_nat_snmp: Fix panic when snmp_trap_helper fails to register
	ARM: dts: am335x-evmsk: adjust mmc2 param to allow suspend
	KVM: pci-assign: do not map smm memory slot pages in vt-d page tables
	isdn: kcapi: avoid uninitialized data
	xhci: plat: Register shutdown for xhci_plat
	netfilter: nfnetlink_queue: fix secctx memory leak
	ARM: dma-mapping: disallow dma_get_sgtable() for non-kernel managed memory
	cpuidle: powernv: Pass correct drv->cpumask for registration
	bnxt_en: Fix NULL pointer dereference in reopen failure path
	backlight: pwm_bl: Fix overflow condition
	crypto: crypto4xx - increase context and scatter ring buffer elements
	rtc: pl031: make interrupt optional
	net: phy: at803x: Change error to EINVAL for invalid MAC
	PCI: Avoid bus reset if bridge itself is broken
	scsi: cxgb4i: fix Tx skb leak
	scsi: mpt3sas: Fix IO error occurs on pulling out a drive from RAID1 volume created on two SATA drive
	PCI: Create SR-IOV virtfn/physfn links before attaching driver
	igb: check memory allocation failure
	ixgbe: fix use of uninitialized padding
	PCI/AER: Report non-fatal errors only to the affected endpoint
	scsi: lpfc: Fix secure firmware updates
	scsi: lpfc: PLOGI failures during NPIV testing
	fm10k: ensure we process SM mbx when processing VF mbx
	tcp: fix under-evaluated ssthresh in TCP Vegas
	rtc: set the alarm to the next expiring timer
	cpuidle: fix broadcast control when broadcast can not be entered
	thermal: hisilicon: Handle return value of clk_prepare_enable
	MIPS: math-emu: Fix final emulation phase for certain instructions
	Revert "Bluetooth: btusb: driver to enable the usb-wakeup feature"
	ALSA: hda - Clear the leftover component assignment at snd_hdac_i915_exit()
	ALSA: hda - Degrade i915 binding failure message
	ALSA: hda - Fix yet another i915 pointer leftover in error path
	alpha: fix build failures
	Linux 4.4.108

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2017-12-27 13:36:00 +01:00
commit 55b3b8c2b5
79 changed files with 797 additions and 366 deletions

View file

@ -2525,6 +2525,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nointroute [IA-64]
noinvpcid [X86] Disable the INVPCID cpu feature.
nojitter [IA-64] Disables jitter checking for ITC timers.
no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 107
SUBLEVEL = 108
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -7,6 +7,7 @@
* Copyright (C) 1996, Linus Torvalds
*/
#include <linux/sched.h>
#include <asm/machvec.h>
#include <asm/compiler.h>
#include <asm-generic/mm_hooks.h>

View file

@ -668,6 +668,7 @@
ti,non-removable;
bus-width = <4>;
cap-power-off-card;
keep-power-in-suspend;
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins>;

View file

@ -227,6 +227,7 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
ti,hwmods = "pcie1";
@ -262,6 +263,7 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
ti,hwmods = "pcie2";

View file

@ -61,6 +61,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
cpu_switch_mm(mm->pgd, mm);
}
#ifndef MODULE
#define finish_arch_post_lock_switch \
finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
@ -82,6 +83,7 @@ static inline void finish_arch_post_lock_switch(void)
preempt_enable_no_resched();
}
}
#endif /* !MODULE */
#endif /* CONFIG_MMU */

View file

@ -774,13 +774,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
}
/*
* The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
* that the intention is to allow exporting memory allocated via the
* coherent DMA APIs through the dma_buf API, which only accepts a
* scattertable. This presents a couple of problems:
* 1. Not all memory allocated via the coherent DMA APIs is backed by
* a struct page
* 2. Passing coherent DMA memory into the streaming APIs is not allowed
* as we will try to flush the memory through a different alias to that
* actually being used (and the flushes are redundant.)
*/
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size,
struct dma_attrs *attrs)
{
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
unsigned long pfn = dma_to_pfn(dev, handle);
struct page *page;
int ret;
/* If the PFN is not valid, we do not have a struct page */
if (!pfn_valid(pfn))
return -ENXIO;
page = pfn_to_page(pfn);
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (unlikely(ret))
return ret;

View file

@ -433,6 +433,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
kprobe_opcode_t *correct_ret_addr = NULL;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
@ -455,15 +456,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
/* another task is sharing our hash bucket */
continue;
if (ri->rp && ri->rp->handler) {
__this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->rp->handler(ri, regs);
__this_cpu_write(current_kprobe, NULL);
}
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
@ -475,6 +468,33 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
correct_ret_addr = ri->ret_addr;
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
__this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs);
__this_cpu_write(current_kprobe, NULL);
}
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_hash_unlock(current, &flags);
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {

View file

@ -976,7 +976,10 @@ static void coverage_end(void)
void __naked __kprobes_test_case_start(void)
{
__asm__ __volatile__ (
"stmdb sp!, {r4-r11} \n\t"
"mov r2, sp \n\t"
"bic r3, r2, #7 \n\t"
"mov sp, r3 \n\t"
"stmdb sp!, {r2-r11} \n\t"
"sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
"bic r0, lr, #1 @ r0 = inline data \n\t"
"mov r1, sp \n\t"
@ -996,7 +999,8 @@ void __naked __kprobes_test_case_end_32(void)
"movne pc, r0 \n\t"
"mov r0, r4 \n\t"
"add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
"ldmia sp!, {r4-r11} \n\t"
"ldmia sp!, {r2-r11} \n\t"
"mov sp, r2 \n\t"
"mov pc, r0 \n\t"
);
}
@ -1012,7 +1016,8 @@ void __naked __kprobes_test_case_end_16(void)
"bxne r0 \n\t"
"mov r0, r4 \n\t"
"add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
"ldmia sp!, {r4-r11} \n\t"
"ldmia sp!, {r2-r11} \n\t"
"mov sp, r2 \n\t"
"bx r0 \n\t"
);
}

View file

@ -249,6 +249,7 @@ void __init arm64_memblock_init(void)
arm64_dma_phys_limit = max_zone_dma_phys();
else
arm64_dma_phys_limit = PHYS_MASK + 1;
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
dma_contiguous_reserve(arm64_dma_phys_limit);
memblock_allow_resize();
@ -273,7 +274,6 @@ void __init bootmem_init(void)
sparse_init();
zone_sizes_init(min, max);
high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
max_pfn = max_low_pfn = max;
}

View file

@ -1777,7 +1777,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(fd, MIPSInst_FD(ir));
rv.s = ieee754sp_maddf(fd, fs, ft);
break;
goto copcsr;
}
case fmsubf_op: {
@ -1790,7 +1790,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(fd, MIPSInst_FD(ir));
rv.s = ieee754sp_msubf(fd, fs, ft);
break;
goto copcsr;
}
case frint_op: {
@ -1814,7 +1814,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(fs, MIPSInst_FS(ir));
rv.w = ieee754sp_2008class(fs);
rfmt = w_fmt;
break;
goto copcsr;
}
case fmin_op: {
@ -1826,7 +1826,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fmin(fs, ft);
break;
goto copcsr;
}
case fmina_op: {
@ -1838,7 +1838,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fmina(fs, ft);
break;
goto copcsr;
}
case fmax_op: {
@ -1850,7 +1850,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fmax(fs, ft);
break;
goto copcsr;
}
case fmaxa_op: {
@ -1862,7 +1862,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fmaxa(fs, ft);
break;
goto copcsr;
}
case fabs_op:
@ -2095,7 +2095,7 @@ copcsr:
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(fd, MIPSInst_FD(ir));
rv.d = ieee754dp_maddf(fd, fs, ft);
break;
goto copcsr;
}
case fmsubf_op: {
@ -2108,7 +2108,7 @@ copcsr:
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(fd, MIPSInst_FD(ir));
rv.d = ieee754dp_msubf(fd, fs, ft);
break;
goto copcsr;
}
case frint_op: {
@ -2132,7 +2132,7 @@ copcsr:
DPFROMREG(fs, MIPSInst_FS(ir));
rv.w = ieee754dp_2008class(fs);
rfmt = w_fmt;
break;
goto copcsr;
}
case fmin_op: {
@ -2144,7 +2144,7 @@ copcsr:
DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fmin(fs, ft);
break;
goto copcsr;
}
case fmina_op: {
@ -2156,7 +2156,7 @@ copcsr:
DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fmina(fs, ft);
break;
goto copcsr;
}
case fmax_op: {
@ -2168,7 +2168,7 @@ copcsr:
DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fmax(fs, ft);
break;
goto copcsr;
}
case fmaxa_op: {
@ -2180,7 +2180,7 @@ copcsr:
DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fmaxa(fs, ft);
break;
goto copcsr;
}
case fabs_op:

View file

@ -22,10 +22,6 @@ typedef struct {
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
unsigned int irq_call_count;
/*
* irq_tlb_count is double-counted in irq_call_count, so it must be
* subtracted from irq_call_count when displaying irq_call_count
*/
unsigned int irq_tlb_count;
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR

View file

@ -104,103 +104,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
#endif
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
if (likely(prev != next)) {
#ifdef CONFIG_SMP
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
this_cpu_write(cpu_tlbstate.active_mm, next);
#endif
cpumask_set_cpu(cpu, mm_cpumask(next));
/*
* Re-load page tables.
*
* This logic has an ordering constraint:
*
* CPU 0: Write to a PTE for 'next'
* CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
* CPU 1: set bit 1 in next's mm_cpumask
* CPU 1: load from the PTE that CPU 0 writes (implicit)
*
* We need to prevent an outcome in which CPU 1 observes
* the new PTE value and CPU 0 observes bit 1 clear in
* mm_cpumask. (If that occurs, then the IPI will never
* be sent, and CPU 0's TLB will contain a stale entry.)
*
* The bad outcome can occur if either CPU's load is
* reordered before that CPU's store, so both CPUs must
* execute full barriers to prevent this from happening.
*
* Thus, switch_mm needs a full barrier between the
* store to mm_cpumask and any operation that could load
* from next->pgd. TLB fills are special and can happen
* due to instruction fetches or for no reason at all,
* and neither LOCK nor MFENCE orders them.
* Fortunately, load_cr3() is serializing and gives the
* ordering guarantee we need.
*
*/
load_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
/* Stop flush ipis for the previous mm */
cpumask_clear_cpu(cpu, mm_cpumask(prev));
/* Load per-mm CR4 state */
load_mm_cr4(next);
#ifdef CONFIG_MODIFY_LDT_SYSCALL
/*
* Load the LDT, if the LDT is different.
*
* It's possible that prev->context.ldt doesn't match
* the LDT register. This can happen if leave_mm(prev)
* was called and then modify_ldt changed
* prev->context.ldt but suppressed an IPI to this CPU.
* In this case, prev->context.ldt != NULL, because we
* never set context.ldt to NULL while the mm still
* exists. That means that next->context.ldt !=
* prev->context.ldt, because mms never share an LDT.
*/
if (unlikely(prev->context.ldt != next->context.ldt))
load_mm_ldt(next);
#endif
}
#ifdef CONFIG_SMP
else {
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
/*
* On established mms, the mm_cpumask is only changed
* from irq context, from ptep_clear_flush() while in
* lazy tlb mode, and here. Irqs are blocked during
* schedule, protecting us from simultaneous changes.
*/
cpumask_set_cpu(cpu, mm_cpumask(next));
/*
* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*
* As above, load_cr3() is serializing and orders TLB
* fills with respect to the mm_cpumask write.
*/
load_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
load_mm_cr4(next);
load_mm_ldt(next);
}
}
#endif
}
extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
#define switch_mm_irqs_off switch_mm_irqs_off
#define activate_mm(prev, next) \
do { \

View file

@ -7,6 +7,54 @@
#include <asm/processor.h>
#include <asm/special_insns.h>
static inline void __invpcid(unsigned long pcid, unsigned long addr,
unsigned long type)
{
struct { u64 d[2]; } desc = { { pcid, addr } };
/*
* The memory clobber is because the whole point is to invalidate
* stale TLB entries and, especially if we're flushing global
* mappings, we don't want the compiler to reorder any subsequent
* memory accesses before the TLB flush.
*
* The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
* invpcid (%rcx), %rax in long mode.
*/
asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
: : "m" (desc), "a" (type), "c" (&desc) : "memory");
}
#define INVPCID_TYPE_INDIV_ADDR 0
#define INVPCID_TYPE_SINGLE_CTXT 1
#define INVPCID_TYPE_ALL_INCL_GLOBAL 2
#define INVPCID_TYPE_ALL_NON_GLOBAL 3
/* Flush all mappings for a given pcid and addr, not including globals. */
static inline void invpcid_flush_one(unsigned long pcid,
unsigned long addr)
{
__invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
}
/* Flush all mappings for a given PCID, not including globals. */
static inline void invpcid_flush_single_context(unsigned long pcid)
{
__invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
}
/* Flush all mappings, including globals, for all PCIDs. */
static inline void invpcid_flush_all(void)
{
__invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
}
/* Flush all mappings for all PCIDs except globals. */
static inline void invpcid_flush_all_nonglobals(void)
{
__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
}
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
@ -111,6 +159,15 @@ static inline void __native_flush_tlb_global(void)
{
unsigned long flags;
if (static_cpu_has(X86_FEATURE_INVPCID)) {
/*
* Using INVPCID is considerably faster than a pair of writes
* to CR4 sandwiched inside an IRQ flag save/restore.
*/
invpcid_flush_all();
return;
}
/*
* Read-modify-write to CR4 - protect it from preemption and
* from interrupts. (Use the raw variant because this code can
@ -268,12 +325,6 @@ static inline void reset_lazy_tlbstate(void)
#endif /* SMP */
/* Not inlined due to inc_irq_stat not being defined yet */
#define flush_tlb_local() { \
inc_irq_stat(irq_tlb_count); \
local_flush_tlb(); \
}
#ifndef CONFIG_PARAVIRT
#define flush_tlb_others(mask, mm, start, end) \
native_flush_tlb_others(mask, mm, start, end)

View file

@ -162,6 +162,22 @@ static int __init x86_mpx_setup(char *s)
}
__setup("nompx", x86_mpx_setup);
static int __init x86_noinvpcid_setup(char *s)
{
/* noinvpcid doesn't accept parameters */
if (s)
return -EINVAL;
/* do not emit a message if the feature is not present */
if (!boot_cpu_has(X86_FEATURE_INVPCID))
return 0;
setup_clear_cpu_cap(X86_FEATURE_INVPCID);
pr_info("noinvpcid: INVPCID feature disabled\n");
return 0;
}
early_param("noinvpcid", x86_noinvpcid_setup);
#ifdef CONFIG_X86_32
static int cachesize_override = -1;
static int disable_x86_serial_nr = 1;

View file

@ -102,8 +102,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_puts(p, " Rescheduling interrupts\n");
seq_printf(p, "%*s: ", prec, "CAL");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
irq_stats(j)->irq_tlb_count);
seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
seq_puts(p, " Function call interrupts\n");
seq_printf(p, "%*s: ", prec, "TLB");
for_each_online_cpu(j)

View file

@ -1107,6 +1107,11 @@ static inline bool cpu_has_vmx_invvpid_global(void)
return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
}
static inline bool cpu_has_vmx_invvpid(void)
{
return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
}
static inline bool cpu_has_vmx_ept(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
@ -6199,8 +6204,10 @@ static __init int hardware_setup(void)
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
if (!cpu_has_vmx_vpid())
if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
!(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
enable_vpid = 0;
if (!cpu_has_vmx_shadow_vmcs())
enable_shadow_vmcs = 0;
if (enable_shadow_vmcs)

View file

@ -8230,11 +8230,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
{
struct x86_exception fault;
trace_kvm_async_pf_ready(work->arch.token, work->gva);
if (work->wakeup_all)
work->arch.token = ~0; /* broadcast wakeup */
else
kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
trace_kvm_async_pf_ready(work->arch.token, work->gva);
if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {

View file

@ -2,7 +2,7 @@
KCOV_INSTRUMENT_tlb.o := n
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
pat.o pgtable.o physaddr.o gup.o setup_nx.o
pat.o pgtable.o physaddr.o gup.o setup_nx.o tlb.o
# Make sure __phys_addr has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector)
@ -12,7 +12,6 @@ CFLAGS_setup_nx.o := $(nostackp)
CFLAGS_fault.o := -I$(src)/../include/asm/trace
obj-$(CONFIG_X86_PAT) += pat_rbtree.o
obj-$(CONFIG_SMP) += tlb.o
obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o

View file

@ -28,6 +28,8 @@
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
*/
#ifdef CONFIG_SMP
struct flush_tlb_info {
struct mm_struct *flush_mm;
unsigned long flush_start;
@ -57,6 +59,118 @@ void leave_mm(int cpu)
}
EXPORT_SYMBOL_GPL(leave_mm);
#endif /* CONFIG_SMP */
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned long flags;
local_irq_save(flags);
switch_mm_irqs_off(prev, next, tsk);
local_irq_restore(flags);
}
void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
if (likely(prev != next)) {
#ifdef CONFIG_SMP
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
this_cpu_write(cpu_tlbstate.active_mm, next);
#endif
cpumask_set_cpu(cpu, mm_cpumask(next));
/*
* Re-load page tables.
*
* This logic has an ordering constraint:
*
* CPU 0: Write to a PTE for 'next'
* CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
* CPU 1: set bit 1 in next's mm_cpumask
* CPU 1: load from the PTE that CPU 0 writes (implicit)
*
* We need to prevent an outcome in which CPU 1 observes
* the new PTE value and CPU 0 observes bit 1 clear in
* mm_cpumask. (If that occurs, then the IPI will never
* be sent, and CPU 0's TLB will contain a stale entry.)
*
* The bad outcome can occur if either CPU's load is
* reordered before that CPU's store, so both CPUs must
* execute full barriers to prevent this from happening.
*
* Thus, switch_mm needs a full barrier between the
* store to mm_cpumask and any operation that could load
* from next->pgd. TLB fills are special and can happen
* due to instruction fetches or for no reason at all,
* and neither LOCK nor MFENCE orders them.
* Fortunately, load_cr3() is serializing and gives the
* ordering guarantee we need.
*
*/
load_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
/* Stop flush ipis for the previous mm */
cpumask_clear_cpu(cpu, mm_cpumask(prev));
/* Load per-mm CR4 state */
load_mm_cr4(next);
#ifdef CONFIG_MODIFY_LDT_SYSCALL
/*
* Load the LDT, if the LDT is different.
*
* It's possible that prev->context.ldt doesn't match
* the LDT register. This can happen if leave_mm(prev)
* was called and then modify_ldt changed
* prev->context.ldt but suppressed an IPI to this CPU.
* In this case, prev->context.ldt != NULL, because we
* never set context.ldt to NULL while the mm still
* exists. That means that next->context.ldt !=
* prev->context.ldt, because mms never share an LDT.
*/
if (unlikely(prev->context.ldt != next->context.ldt))
load_mm_ldt(next);
#endif
}
#ifdef CONFIG_SMP
else {
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
/*
* On established mms, the mm_cpumask is only changed
* from irq context, from ptep_clear_flush() while in
* lazy tlb mode, and here. Irqs are blocked during
* schedule, protecting us from simultaneous changes.
*/
cpumask_set_cpu(cpu, mm_cpumask(next));
/*
* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*
* As above, load_cr3() is serializing and orders TLB
* fills with respect to the mm_cpumask write.
*/
load_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
load_mm_cr4(next);
load_mm_ldt(next);
}
}
#endif
}
#ifdef CONFIG_SMP
/*
* The flush IPI assumes that a thread switch happens in this order:
* [cpu0: the cpu that switches]
@ -104,7 +218,7 @@ static void flush_tlb_func(void *info)
inc_irq_stat(irq_tlb_count);
if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
return;
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
@ -351,3 +465,5 @@ static int __init create_tlb_single_page_flush_ceiling(void)
return 0;
}
late_initcall(create_tlb_single_page_flush_ceiling);
#endif /* CONFIG_SMP */

View file

@ -1050,10 +1050,6 @@ static int btusb_open(struct hci_dev *hdev)
return err;
data->intf->needs_remote_wakeup = 1;
/* device specific wakeup source enabled and required for USB
* remote wakeup while host is suspended
*/
device_wakeup_enable(&data->udev->dev);
if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
goto done;
@ -1117,7 +1113,6 @@ static int btusb_close(struct hci_dev *hdev)
goto failed;
data->intf->needs_remote_wakeup = 0;
device_wakeup_disable(&data->udev->dev);
usb_autopm_put_interface(data->intf);
failed:

View file

@ -160,6 +160,24 @@ static int powernv_cpuidle_driver_init(void)
drv->state_count += 1;
}
/*
* On the PowerNV platform cpu_present may be less than cpu_possible in
* cases when firmware detects the CPU, but it is not available to the
* OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at
* run time and hence cpu_devices are not created for those CPUs by the
* generic topology_init().
*
* drv->cpumask defaults to cpu_possible_mask in
* __cpuidle_driver_init(). This breaks cpuidle on PowerNV where
* cpu_devices are not created for CPUs in cpu_possible_mask that
* cannot be hot-added later at run time.
*
* Trying cpuidle_register_device() on a CPU without a cpu_device is
* incorrect, so pass a correct CPU mask to the generic cpuidle driver.
*/
drv->cpumask = (struct cpumask *)cpu_present_mask;
return 0;
}

View file

@ -189,6 +189,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
return -EBUSY;
}
target_state = &drv->states[index];
broadcast = false;
}
/* Take note of the planned idle state. */

View file

@ -613,6 +613,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
int error;
/*
* Return if cpu_device is not setup for this CPU.
*
* This could happen if the arch did not set up cpu_device
* since this CPU is not in cpu_present mask and the
* driver did not send a correct CPU mask during registration.
* Without this check we would end up passing bogus
* value for &cpu_dev->kobj in kobject_init_and_add()
*/
if (!cpu_dev)
return -ENODEV;
kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
if (!kdev)
return -ENOMEM;

View file

@ -32,12 +32,12 @@
#define PPC405EX_CE_RESET 0x00000008
#define CRYPTO4XX_CRYPTO_PRIORITY 300
#define PPC4XX_LAST_PD 63
#define PPC4XX_NUM_PD 64
#define PPC4XX_LAST_GD 1023
#define PPC4XX_NUM_PD 256
#define PPC4XX_LAST_PD (PPC4XX_NUM_PD - 1)
#define PPC4XX_NUM_GD 1024
#define PPC4XX_LAST_SD 63
#define PPC4XX_NUM_SD 64
#define PPC4XX_LAST_GD (PPC4XX_NUM_GD - 1)
#define PPC4XX_NUM_SD 256
#define PPC4XX_LAST_SD (PPC4XX_NUM_SD - 1)
#define PPC4XX_SD_BUFFER_SIZE 2048
#define PD_ENTRY_INUSE 1

View file

@ -2053,6 +2053,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },

View file

@ -1021,6 +1021,7 @@
#define USB_VENDOR_ID_XIN_MO 0x16c0
#define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1
#define USB_DEVICE_ID_THT_2P_ARCADE 0x75e1
#define USB_VENDOR_ID_XIROKU 0x1477
#define USB_DEVICE_ID_XIROKU_SPX 0x1006

View file

@ -46,6 +46,7 @@ static int xinmo_event(struct hid_device *hdev, struct hid_field *field,
static const struct hid_device_id xinmo_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
{ }
};

View file

@ -646,6 +646,9 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value)
else
err = atk_read_value_new(sensor, value);
if (err)
return err;
sensor->is_valid = true;
sensor->last_updated = jiffies;
sensor->cached_value = *value;

View file

@ -450,6 +450,7 @@ struct iser_fr_desc {
struct list_head list;
struct iser_reg_resources rsc;
struct iser_pi_context *pi_ctx;
struct list_head all_list;
};
/**
@ -463,6 +464,7 @@ struct iser_fr_pool {
struct list_head list;
spinlock_t lock;
int size;
struct list_head all_list;
};
/**

View file

@ -405,6 +405,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
int i, ret;
INIT_LIST_HEAD(&fr_pool->list);
INIT_LIST_HEAD(&fr_pool->all_list);
spin_lock_init(&fr_pool->lock);
fr_pool->size = 0;
for (i = 0; i < cmds_max; i++) {
@ -416,6 +417,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
}
list_add_tail(&desc->list, &fr_pool->list);
list_add_tail(&desc->all_list, &fr_pool->all_list);
fr_pool->size++;
}
@ -435,13 +437,13 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
struct iser_fr_desc *desc, *tmp;
int i = 0;
if (list_empty(&fr_pool->list))
if (list_empty(&fr_pool->all_list))
return;
iser_info("freeing conn %p fr pool\n", ib_conn);
list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) {
list_del(&desc->list);
list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
list_del(&desc->all_list);
iser_free_reg_res(&desc->rsc);
if (desc->pi_ctx)
iser_free_pi_ctx(desc->pi_ctx);

View file

@ -1032,6 +1032,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
sizeof(avmb1_carddef))))
return -EFAULT;
cdef.cardtype = AVM_CARDTYPE_B1;
cdef.cardnr = 0;
} else {
if ((retval = copy_from_user(&cdef, data,
sizeof(avmb1_extcarddef))))

View file

@ -1329,6 +1329,9 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
/* There should only be one entry, but go through the list
* anyway
*/
if (afu->phb == NULL)
return result;
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
if (!afu_dev->driver)
continue;
@ -1369,6 +1372,10 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
*/
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
/*
* Tell the AFU drivers; but we don't care what they
* say, we're going away.
*/
cxl_vphb_error_detected(afu, state);
}
return PCI_ERS_RESULT_DISCONNECT;
@ -1492,6 +1499,9 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
if (cxl_afu_select_best_mode(afu))
goto err;
if (afu->phb == NULL)
continue;
cxl_pci_vphb_reconfigure(afu);
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
@ -1556,6 +1566,9 @@ static void cxl_pci_resume(struct pci_dev *pdev)
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
if (afu->phb == NULL)
continue;
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
if (afu_dev->driver && afu_dev->driver->err_handler &&
afu_dev->driver->err_handler->resume)

View file

@ -2014,6 +2014,18 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
return 0;
}
static void bnxt_init_cp_rings(struct bnxt *bp)
{
int i;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
}
}
static int bnxt_init_rx_rings(struct bnxt *bp)
{
int i, rc = 0;
@ -3977,6 +3989,7 @@ static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
{
bnxt_init_cp_rings(bp);
bnxt_init_rx_rings(bp);
bnxt_init_tx_rings(bp);
bnxt_init_ring_grps(bp, irq_re_init);

View file

@ -1930,13 +1930,13 @@ static void
bfa_ioc_send_enable(struct bfa_ioc *ioc)
{
struct bfi_ioc_ctrl_req enable_req;
struct timeval tv;
bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
bfa_ioc_portid(ioc));
enable_req.clscode = htons(ioc->clscode);
do_gettimeofday(&tv);
enable_req.tv_sec = ntohl(tv.tv_sec);
enable_req.rsvd = htons(0);
/* overflow in 2106 */
enable_req.tv_sec = ntohl(ktime_get_real_seconds());
bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
}
@ -1947,6 +1947,10 @@ bfa_ioc_send_disable(struct bfa_ioc *ioc)
bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
bfa_ioc_portid(ioc));
disable_req.clscode = htons(ioc->clscode);
disable_req.rsvd = htons(0);
/* overflow in 2106 */
disable_req.tv_sec = ntohl(ktime_get_real_seconds());
bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
}

View file

@ -324,7 +324,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
return PTR_ERR(kern_buf);
rc = sscanf(kern_buf, "%x:%x", &addr, &len);
if (rc < 2) {
if (rc < 2 || len > UINT_MAX >> 2) {
netdev_warn(bnad->netdev, "failed to read user buffer\n");
kfree(kern_buf);
return -EINVAL;

View file

@ -126,6 +126,9 @@ process_mbx:
struct fm10k_mbx_info *mbx = &vf_info->mbx;
u16 glort = vf_info->glort;
/* process the SM mailbox first to drain outgoing messages */
hw->mbx.ops.process(hw, &hw->mbx);
/* verify port mapping is valid, if not reset port */
if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort))
hw->iov.ops.reset_lport(hw, vf_info);

View file

@ -4201,8 +4201,12 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
if (!vsi->netdev)
return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
napi_enable(&vsi->q_vectors[q_idx]->napi);
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
if (q_vector->rx.ring || q_vector->tx.ring)
napi_enable(&q_vector->napi);
}
}
/**
@ -4216,8 +4220,12 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
if (!vsi->netdev)
return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
napi_disable(&vsi->q_vectors[q_idx]->napi);
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
if (q_vector->rx.ring || q_vector->tx.ring)
napi_disable(&q_vector->napi);
}
}
/**

View file

@ -3005,6 +3005,8 @@ static int igb_sw_init(struct igb_adapter *adapter)
/* Setup and initialize a copy of the hw vlan table array */
adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
GFP_ATOMIC);
if (!adapter->shadow_vfta)
return -ENOMEM;
/* This call may decrease the number of queues */
if (igb_init_interrupt_scheme(adapter, true)) {

View file

@ -3620,10 +3620,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
fw_cmd.ver_build = build;
fw_cmd.ver_sub = sub;
fw_cmd.hdr.checksum = 0;
fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
fw_cmd.pad = 0;
fw_cmd.pad2 = 0;
fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,

View file

@ -564,6 +564,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
/* convert offset from words to bytes */
buffer.address = cpu_to_be32((offset + current_word) * 2);
buffer.length = cpu_to_be16(words_to_read * 2);
buffer.pad2 = 0;
buffer.pad3 = 0;
status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
sizeof(buffer),

View file

@ -418,8 +418,9 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
memset(rd, 0, sizeof(*rd));
rd->hw = hwmap + i;
rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
if (rd->buf == NULL ||
!(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
if (rd->buf)
busaddr = pci_map_single(pdev, rd->buf, len, dir);
if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) {
if (rd->buf) {
net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
__func__, rd->buf);
@ -430,8 +431,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
rd = r->rd + j;
busaddr = rd_get_addr(rd);
rd_set_addr_status(rd, 0, 0);
if (busaddr)
pci_unmap_single(pdev, busaddr, len, dir);
pci_unmap_single(pdev, busaddr, len, dir);
kfree(rd->buf);
rd->buf = NULL;
}

View file

@ -105,7 +105,7 @@ static int at803x_set_wol(struct phy_device *phydev,
mac = (const u8 *) ndev->dev_addr;
if (!is_valid_ether_addr(mac))
return -EFAULT;
return -EINVAL;
for (i = 0; i < 3; i++) {
phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,

View file

@ -410,6 +410,10 @@ static const struct usb_device_id products[] = {
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Motorola Mapphone devices with MDM6600 */
USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff),
.driver_info = (unsigned long)&qmi_wwan_info,
},
/* 2. Combined interface devices matching on class+protocol */
{ /* Huawei E367 and possibly others in "Windows mode" */

View file

@ -1207,6 +1207,7 @@ static void intr_callback(struct urb *urb)
}
} else {
if (netif_carrier_ok(tp->netdev)) {
netif_stop_queue(tp->netdev);
set_bit(RTL8152_LINK_CHG, &tp->flags);
schedule_delayed_work(&tp->schedule, 0);
}
@ -1277,6 +1278,7 @@ static int alloc_all_mem(struct r8152 *tp)
spin_lock_init(&tp->rx_lock);
spin_lock_init(&tp->tx_lock);
INIT_LIST_HEAD(&tp->tx_free);
INIT_LIST_HEAD(&tp->rx_done);
skb_queue_head_init(&tp->tx_queue);
skb_queue_head_init(&tp->rx_queue);
@ -3000,6 +3002,9 @@ static void set_carrier(struct r8152 *tp)
napi_enable(&tp->napi);
netif_wake_queue(netdev);
netif_info(tp, link, netdev, "carrier on\n");
} else if (netif_queue_stopped(netdev) &&
skb_queue_len(&tp->tx_queue) < tp->tx_qlen) {
netif_wake_queue(netdev);
}
} else {
if (netif_carrier_ok(netdev)) {
@ -3560,8 +3565,18 @@ static int rtl8152_resume(struct usb_interface *intf)
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
napi_disable(&tp->napi);
set_bit(WORK_ENABLE, &tp->flags);
if (netif_carrier_ok(tp->netdev))
rtl_start_rx(tp);
if (netif_carrier_ok(tp->netdev)) {
if (rtl8152_get_speed(tp) & LINK_STATUS) {
rtl_start_rx(tp);
} else {
netif_carrier_off(tp->netdev);
tp->rtl_ops.disable(tp);
netif_info(tp, link, tp->netdev,
"linking down\n");
}
}
napi_enable(&tp->napi);
} else {
tp->rtl_ops.up(tp);

View file

@ -161,7 +161,6 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
pci_device_add(virtfn, virtfn->bus);
mutex_unlock(&iov->dev->sriov->lock);
pci_bus_add_device(virtfn);
sprintf(buf, "virtfn%u", id);
rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
if (rc)
@ -172,6 +171,8 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
pci_bus_add_device(virtfn);
return 0;
failed2:

View file

@ -3850,6 +3850,10 @@ static bool pci_bus_resetable(struct pci_bus *bus)
{
struct pci_dev *dev;
if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
return false;
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
(dev->subordinate && !pci_bus_resetable(dev->subordinate)))

View file

@ -388,7 +388,14 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
* If the error is reported by an end point, we think this
* error is related to the upstream link of the end point.
*/
pci_walk_bus(dev->bus, cb, &result_data);
if (state == pci_channel_io_normal)
/*
* the error is non fatal so the bus is ok, just invoke
* the callback for the function that logged the error.
*/
cb(dev, &result_data);
else
pci_walk_bus(dev->bus, cb, &result_data);
}
return result_data.result;

View file

@ -1338,6 +1338,22 @@ static void st_gpio_irq_unmask(struct irq_data *d)
writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK);
}
static int st_gpio_irq_request_resources(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
st_gpio_direction_input(gc, d->hwirq);
return gpiochip_lock_as_irq(gc, d->hwirq);
}
static void st_gpio_irq_release_resources(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
gpiochip_unlock_as_irq(gc, d->hwirq);
}
static int st_gpio_irq_set_type(struct irq_data *d, unsigned type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@ -1493,12 +1509,14 @@ static struct gpio_chip st_gpio_template = {
};
static struct irq_chip st_gpio_irqchip = {
.name = "GPIO",
.irq_disable = st_gpio_irq_mask,
.irq_mask = st_gpio_irq_mask,
.irq_unmask = st_gpio_irq_unmask,
.irq_set_type = st_gpio_irq_set_type,
.flags = IRQCHIP_SKIP_SET_WAKE,
.name = "GPIO",
.irq_request_resources = st_gpio_irq_request_resources,
.irq_release_resources = st_gpio_irq_release_resources,
.irq_disable = st_gpio_irq_mask,
.irq_mask = st_gpio_irq_mask,
.irq_unmask = st_gpio_irq_unmask,
.irq_set_type = st_gpio_irq_set_type,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
static int st_gpiolib_register_bank(struct st_pinctrl *info,

View file

@ -764,7 +764,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
}
timerqueue_add(&rtc->timerqueue, &timer->node);
if (!next) {
if (!next || ktime_before(timer->node.expires, next->expires)) {
struct rtc_wkalrm alarm;
int err;
alarm.time = rtc_ktime_to_tm(timer->node.expires);

View file

@ -308,7 +308,8 @@ static int pl031_remove(struct amba_device *adev)
dev_pm_clear_wake_irq(&adev->dev);
device_init_wakeup(&adev->dev, false);
free_irq(adev->irq[0], ldata);
if (adev->irq[0])
free_irq(adev->irq[0], ldata);
rtc_device_unregister(ldata->rtc);
iounmap(ldata->base);
kfree(ldata);
@ -381,12 +382,13 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
goto out_no_rtc;
}
if (request_irq(adev->irq[0], pl031_interrupt,
vendor->irqflags, "rtc-pl031", ldata)) {
ret = -EIO;
goto out_no_irq;
if (adev->irq[0]) {
ret = request_irq(adev->irq[0], pl031_interrupt,
vendor->irqflags, "rtc-pl031", ldata);
if (ret)
goto out_no_irq;
dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
}
dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
return 0;
out_no_irq:

View file

@ -2680,17 +2680,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
char daddr[16];
struct af_iucv_trans_hdr *iucv_hdr;
skb_pull(skb, 14);
card->dev->header_ops->create(skb, card->dev, 0,
card->dev->dev_addr, card->dev->dev_addr,
card->dev->addr_len);
skb_pull(skb, 14);
iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
memset(hdr, 0, sizeof(struct qeth_hdr));
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
hdr->hdr.l3.ext_flags = 0;
hdr->hdr.l3.length = skb->len;
hdr->hdr.l3.length = skb->len - ETH_HLEN;
hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
memset(daddr, 0, sizeof(daddr));
daddr[0] = 0xfe;
daddr[1] = 0x80;
@ -2873,10 +2869,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
(skb_shinfo(skb)->nr_frags == 0)) {
new_skb = skb;
if (new_skb->protocol == ETH_P_AF_IUCV)
data_offset = 0;
else
data_offset = ETH_HLEN;
data_offset = ETH_HLEN;
hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!hdr)
goto tx_drop;

View file

@ -1339,6 +1339,7 @@ static void release_offload_resources(struct cxgbi_sock *csk)
csk, csk->state, csk->flags, csk->tid);
cxgbi_sock_free_cpl_skbs(csk);
cxgbi_sock_purge_write_queue(csk);
if (csk->wr_cred != csk->wr_max_cred) {
cxgbi_sock_purge_wr_queue(csk);
cxgbi_sock_reset_wr_list(csk);

View file

@ -7491,7 +7491,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
did, vport->port_state, ndlp->nlp_flag);
phba->fc_stat.elsRcvPRLI++;
if (vport->port_state < LPFC_DISC_AUTH) {
if ((vport->port_state < LPFC_DISC_AUTH) &&
(vport->fc_flag & FC_FABRIC)) {
rjt_err = LSRJT_UNABLE_TPC;
rjt_exp = LSEXP_NOTHING_MORE;
break;

View file

@ -4777,7 +4777,8 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_cancel_retry_delay_tmo(vport, ndlp);
if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
!(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
!(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
phba->sli_rev != LPFC_SLI_REV4) {
/* For this case we need to cleanup the default rpi
* allocated by the firmware.
*/

View file

@ -3180,7 +3180,7 @@ struct lpfc_mbx_get_port_name {
#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
#define MB_CQE_STATUS_DMA_FAILED 0x5
#define LPFC_MBX_WR_CONFIG_MAX_BDE 8
#define LPFC_MBX_WR_CONFIG_MAX_BDE 1
struct lpfc_mbx_wr_object {
struct mbox_header header;
union {

View file

@ -4588,6 +4588,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
scmd->result = DID_RESET << 16;
break;
} else if ((scmd->device->channel == RAID_CHANNEL) &&
(scsi_state == (MPI2_SCSI_STATE_TERMINATED |
MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
scmd->result = DID_RESET << 16;
break;
}
scmd->result = DID_SOFT_ERROR << 16;
break;

View file

@ -389,8 +389,11 @@ static int hisi_thermal_suspend(struct device *dev)
static int hisi_thermal_resume(struct device *dev)
{
struct hisi_thermal_data *data = dev_get_drvdata(dev);
int ret;
clk_prepare_enable(data->clk);
ret = clk_prepare_enable(data->clk);
if (ret)
return ret;
data->irq_enabled = true;
hisi_thermal_enable_bind_irq_sensor(data);

View file

@ -594,6 +594,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U);
opts->streaming_maxburst = min(opts->streaming_maxburst, 15U);
/* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */
if (opts->streaming_maxburst &&
(opts->streaming_maxpacket % 1024) != 0) {
opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024);
INFO(cdev, "overriding streaming_maxpacket to %d\n",
opts->streaming_maxpacket);
}
/* Fill in the FS/HS/SS Video Streaming specific descriptors from the
* module parameters.
*

View file

@ -1534,7 +1534,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
td = phys_to_virt(addr);
addr2 = (dma_addr_t)td->next;
pci_pool_free(dev->data_requests, td, addr);
td->next = 0x00;
addr = addr2;
}
req->chain_len = 1;

View file

@ -284,6 +284,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
static struct platform_driver usb_xhci_driver = {
.probe = xhci_plat_probe,
.remove = xhci_plat_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xhci-hcd",
.pm = DEV_PM_OPS,

View file

@ -79,14 +79,17 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
{
unsigned int lth = pb->lth_brightness;
int duty_cycle;
u64 duty_cycle;
if (pb->levels)
duty_cycle = pb->levels[brightness];
else
duty_cycle = brightness;
return (duty_cycle * (pb->period - lth) / pb->scale) + lth;
duty_cycle *= pb->period - lth;
do_div(duty_cycle, pb->scale);
return duty_cycle + lth;
}
static int pwm_backlight_update_status(struct backlight_device *bl)

View file

@ -1,9 +1,16 @@
#ifndef _LINUX_MMU_CONTEXT_H
#define _LINUX_MMU_CONTEXT_H
#include <asm/mmu_context.h>
struct mm_struct;
void use_mm(struct mm_struct *mm);
void unuse_mm(struct mm_struct *mm);
/* Architectures that care about IRQ state in switch_mm can override this. */
#ifndef switch_mm_irqs_off
# define switch_mm_irqs_off switch_mm
#endif
#endif

View file

@ -32,7 +32,7 @@
#include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <asm/mmu_context.h>
#include <linux/mmu_context.h>
#include <linux/interrupt.h>
#include <linux/capability.h>
#include <linux/completion.h>
@ -2781,7 +2781,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
atomic_inc(&oldmm->mm_count);
enter_lazy_tlb(oldmm, next);
} else
switch_mm(oldmm, mm, next);
switch_mm_irqs_off(oldmm, mm, next);
if (!prev->mm) {
prev->active_mm = NULL;

View file

@ -4,9 +4,9 @@
*/
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/mmu_context.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <asm/mmu_context.h>

View file

@ -587,19 +587,6 @@ vma_address(struct page *page, struct vm_area_struct *vma)
}
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
static void percpu_flush_tlb_batch_pages(void *data)
{
/*
* All TLB entries are flushed on the assumption that it is
* cheaper to flush all TLBs and let them be refilled than
* flushing individual PFNs. Note that we do not track mm's
* to flush as that might simply be multiple full TLB flushes
* for no gain.
*/
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
flush_tlb_local();
}
/*
* Flush TLB entries for recently unmapped pages from remote CPUs. It is
* important if a PTE was dirty when it was unmapped that it's flushed
@ -616,15 +603,14 @@ void try_to_unmap_flush(void)
cpu = get_cpu();
trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL);
if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask))
percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask);
if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) {
smp_call_function_many(&tlb_ubc->cpumask,
percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true);
if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb();
trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
}
if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
cpumask_clear(&tlb_ubc->cpumask);
tlb_ubc->flush_required = false;
tlb_ubc->writable = false;

View file

@ -360,14 +360,16 @@ static struct ctl_table net_core_table[] = {
.data = &sysctl_net_busy_poll,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
{
.procname = "busy_read",
.data = &sysctl_net_busy_read,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
#endif
#ifdef CONFIG_NET_SCHED

View file

@ -200,6 +200,7 @@ static void ip_expire(unsigned long arg)
qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
net = container_of(qp->q.net, struct net, ipv4.frags);
rcu_read_lock();
spin_lock(&qp->q.lock);
if (qp->q.flags & INET_FRAG_COMPLETE)
@ -209,7 +210,7 @@ static void ip_expire(unsigned long arg)
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
if (!inet_frag_evicting(&qp->q)) {
struct sk_buff *head = qp->q.fragments;
struct sk_buff *clone, *head = qp->q.fragments;
const struct iphdr *iph;
int err;
@ -218,32 +219,40 @@ static void ip_expire(unsigned long arg)
if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
goto out;
rcu_read_lock();
head->dev = dev_get_by_index_rcu(net, qp->iif);
if (!head->dev)
goto out_rcu_unlock;
goto out;
/* skb has no dst, perform route lookup again */
iph = ip_hdr(head);
err = ip_route_input_noref(head, iph->daddr, iph->saddr,
iph->tos, head->dev);
if (err)
goto out_rcu_unlock;
goto out;
/* Only an end host needs to send an ICMP
* "Fragment Reassembly Timeout" message, per RFC792.
*/
if (frag_expire_skip_icmp(qp->user) &&
(skb_rtable(head)->rt_type != RTN_LOCAL))
goto out_rcu_unlock;
goto out;
clone = skb_clone(head, GFP_ATOMIC);
/* Send an ICMP "Fragment Reassembly Timeout" message. */
icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
out_rcu_unlock:
rcu_read_unlock();
if (clone) {
spin_unlock(&qp->q.lock);
icmp_send(clone, ICMP_TIME_EXCEEDED,
ICMP_EXC_FRAGTIME, 0);
consume_skb(clone);
goto out_rcu_unlock;
}
}
out:
spin_unlock(&qp->q.lock);
out_rcu_unlock:
rcu_read_unlock();
ipq_put(qp);
}

View file

@ -1260,16 +1260,6 @@ static const struct nf_conntrack_expect_policy snmp_exp_policy = {
.timeout = 180,
};
static struct nf_conntrack_helper snmp_helper __read_mostly = {
.me = THIS_MODULE,
.help = help,
.expect_policy = &snmp_exp_policy,
.name = "snmp",
.tuple.src.l3num = AF_INET,
.tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
};
static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
.me = THIS_MODULE,
.help = help,
@ -1288,17 +1278,10 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
static int __init nf_nat_snmp_basic_init(void)
{
int ret = 0;
BUG_ON(nf_nat_snmp_hook != NULL);
RCU_INIT_POINTER(nf_nat_snmp_hook, help);
ret = nf_conntrack_helper_register(&snmp_trap_helper);
if (ret < 0) {
nf_conntrack_helper_unregister(&snmp_helper);
return ret;
}
return ret;
return nf_conntrack_helper_register(&snmp_trap_helper);
}
static void __exit nf_nat_snmp_basic_fini(void)

View file

@ -158,7 +158,7 @@ EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
{
return min(tp->snd_ssthresh, tp->snd_cwnd-1);
return min(tp->snd_ssthresh, tp->snd_cwnd);
}
static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)

View file

@ -32,6 +32,13 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
struct nfnl_cthelper {
struct list_head list;
struct nf_conntrack_helper helper;
};
static LIST_HEAD(nfnl_cthelper_list);
static int
nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
@ -205,18 +212,20 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_helper *helper;
struct nfnl_cthelper *nfcth;
int ret;
if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
return -EINVAL;
helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL);
if (helper == NULL)
nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL);
if (nfcth == NULL)
return -ENOMEM;
helper = &nfcth->helper;
ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
if (ret < 0)
goto err;
goto err1;
strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
@ -247,12 +256,98 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
ret = nf_conntrack_helper_register(helper);
if (ret < 0)
goto err;
goto err2;
list_add_tail(&nfcth->list, &nfnl_cthelper_list);
return 0;
err2:
kfree(helper->expect_policy);
err1:
kfree(nfcth);
return ret;
}
static int
nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy,
struct nf_conntrack_expect_policy *new_policy,
const struct nlattr *attr)
{
struct nlattr *tb[NFCTH_POLICY_MAX + 1];
int err;
err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr,
nfnl_cthelper_expect_pol);
if (err < 0)
return err;
if (!tb[NFCTH_POLICY_NAME] ||
!tb[NFCTH_POLICY_EXPECT_MAX] ||
!tb[NFCTH_POLICY_EXPECT_TIMEOUT])
return -EINVAL;
if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name))
return -EBUSY;
new_policy->max_expected =
ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
new_policy->timeout =
ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
return 0;
err:
kfree(helper);
return ret;
}
static int nfnl_cthelper_update_policy_all(struct nlattr *tb[],
struct nf_conntrack_helper *helper)
{
struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1];
struct nf_conntrack_expect_policy *policy;
int i, err;
/* Check first that all policy attributes are well-formed, so we don't
* leave things in inconsistent state on errors.
*/
for (i = 0; i < helper->expect_class_max + 1; i++) {
if (!tb[NFCTH_POLICY_SET + i])
return -EINVAL;
err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i],
&new_policy[i],
tb[NFCTH_POLICY_SET + i]);
if (err < 0)
return err;
}
/* Now we can safely update them. */
for (i = 0; i < helper->expect_class_max + 1; i++) {
policy = (struct nf_conntrack_expect_policy *)
&helper->expect_policy[i];
policy->max_expected = new_policy->max_expected;
policy->timeout = new_policy->timeout;
}
return 0;
}
static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper,
const struct nlattr *attr)
{
struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
unsigned int class_max;
int err;
err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
nfnl_cthelper_expect_policy_set);
if (err < 0)
return err;
if (!tb[NFCTH_POLICY_SET_NUM])
return -EINVAL;
class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
if (helper->expect_class_max + 1 != class_max)
return -EBUSY;
return nfnl_cthelper_update_policy_all(tb, helper);
}
static int
@ -265,8 +360,7 @@ nfnl_cthelper_update(const struct nlattr * const tb[],
return -EBUSY;
if (tb[NFCTH_POLICY]) {
ret = nfnl_cthelper_parse_expect_policy(helper,
tb[NFCTH_POLICY]);
ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
if (ret < 0)
return ret;
}
@ -295,7 +389,8 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
const char *helper_name;
struct nf_conntrack_helper *cur, *helper = NULL;
struct nf_conntrack_tuple tuple;
int ret = 0, i;
struct nfnl_cthelper *nlcth;
int ret = 0;
if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
return -EINVAL;
@ -306,31 +401,22 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
if (ret < 0)
return ret;
rcu_read_lock();
for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
cur = &nlcth->helper;
/* skip non-userspace conntrack helpers. */
if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
continue;
if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
continue;
if (strncmp(cur->name, helper_name,
NF_CT_HELPER_NAME_LEN) != 0)
continue;
if ((tuple.src.l3num != cur->tuple.src.l3num ||
tuple.dst.protonum != cur->tuple.dst.protonum))
continue;
if ((tuple.src.l3num != cur->tuple.src.l3num ||
tuple.dst.protonum != cur->tuple.dst.protonum))
continue;
if (nlh->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
if (nlh->nlmsg_flags & NLM_F_EXCL) {
ret = -EEXIST;
goto err;
}
helper = cur;
break;
}
helper = cur;
break;
}
rcu_read_unlock();
if (helper == NULL)
ret = nfnl_cthelper_create(tb, &tuple);
@ -338,9 +424,6 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
ret = nfnl_cthelper_update(tb, helper);
return ret;
err:
rcu_read_unlock();
return ret;
}
static int
@ -504,11 +587,12 @@ static int
nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
const struct nlmsghdr *nlh, const struct nlattr * const tb[])
{
int ret = -ENOENT, i;
int ret = -ENOENT;
struct nf_conntrack_helper *cur;
struct sk_buff *skb2;
char *helper_name = NULL;
struct nf_conntrack_tuple tuple;
struct nfnl_cthelper *nlcth;
bool tuple_set = false;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
@ -529,45 +613,39 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
tuple_set = true;
}
for (i = 0; i < nf_ct_helper_hsize; i++) {
hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
cur = &nlcth->helper;
if (helper_name &&
strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
continue;
/* skip non-userspace conntrack helpers. */
if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
continue;
if (tuple_set &&
(tuple.src.l3num != cur->tuple.src.l3num ||
tuple.dst.protonum != cur->tuple.dst.protonum))
continue;
if (helper_name && strncmp(cur->name, helper_name,
NF_CT_HELPER_NAME_LEN) != 0) {
continue;
}
if (tuple_set &&
(tuple.src.l3num != cur->tuple.src.l3num ||
tuple.dst.protonum != cur->tuple.dst.protonum))
continue;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL) {
ret = -ENOMEM;
break;
}
ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
nlh->nlmsg_seq,
NFNL_MSG_TYPE(nlh->nlmsg_type),
NFNL_MSG_CTHELPER_NEW, cur);
if (ret <= 0) {
kfree_skb(skb2);
break;
}
ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
MSG_DONTWAIT);
if (ret > 0)
ret = 0;
/* this avoids a loop in nfnetlink. */
return ret == -EAGAIN ? -ENOBUFS : ret;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL) {
ret = -ENOMEM;
break;
}
ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
nlh->nlmsg_seq,
NFNL_MSG_TYPE(nlh->nlmsg_type),
NFNL_MSG_CTHELPER_NEW, cur);
if (ret <= 0) {
kfree_skb(skb2);
break;
}
ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
MSG_DONTWAIT);
if (ret > 0)
ret = 0;
/* this avoids a loop in nfnetlink. */
return ret == -EAGAIN ? -ENOBUFS : ret;
}
return ret;
}
@ -578,10 +656,10 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
{
char *helper_name = NULL;
struct nf_conntrack_helper *cur;
struct hlist_node *tmp;
struct nf_conntrack_tuple tuple;
bool tuple_set = false, found = false;
int i, j = 0, ret;
struct nfnl_cthelper *nlcth, *n;
int j = 0, ret;
if (tb[NFCTH_NAME])
helper_name = nla_data(tb[NFCTH_NAME]);
@ -594,28 +672,27 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
tuple_set = true;
}
for (i = 0; i < nf_ct_helper_hsize; i++) {
hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
hnode) {
/* skip non-userspace conntrack helpers. */
if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
continue;
list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
cur = &nlcth->helper;
j++;
j++;
if (helper_name &&
strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
continue;
if (helper_name && strncmp(cur->name, helper_name,
NF_CT_HELPER_NAME_LEN) != 0) {
continue;
}
if (tuple_set &&
(tuple.src.l3num != cur->tuple.src.l3num ||
tuple.dst.protonum != cur->tuple.dst.protonum))
continue;
if (tuple_set &&
(tuple.src.l3num != cur->tuple.src.l3num ||
tuple.dst.protonum != cur->tuple.dst.protonum))
continue;
found = true;
nf_conntrack_helper_unregister(cur);
}
found = true;
nf_conntrack_helper_unregister(cur);
kfree(cur->expect_policy);
list_del(&nlcth->list);
kfree(nlcth);
}
/* Make sure we return success if we flush and there is no helpers */
return (found || j == 0) ? 0 : -ENOENT;
}
@ -664,20 +741,16 @@ err_out:
static void __exit nfnl_cthelper_exit(void)
{
struct nf_conntrack_helper *cur;
struct hlist_node *tmp;
int i;
struct nfnl_cthelper *nlcth, *n;
nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
for (i=0; i<nf_ct_helper_hsize; i++) {
hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
hnode) {
/* skip non-userspace conntrack helpers. */
if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
continue;
list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
cur = &nlcth->helper;
nf_conntrack_helper_unregister(cur);
}
nf_conntrack_helper_unregister(cur);
kfree(cur->expect_policy);
kfree(nlcth);
}
}

View file

@ -390,7 +390,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
GFP_ATOMIC);
if (!skb) {
skb_tx_error(entskb);
return NULL;
goto nlmsg_failure;
}
nlh = nlmsg_put(skb, 0, 0,
@ -399,7 +399,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
if (!nlh) {
skb_tx_error(entskb);
kfree_skb(skb);
return NULL;
goto nlmsg_failure;
}
nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = entry->state.pf;
@ -542,12 +542,17 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
}
nlh->nlmsg_len = skb->len;
if (seclen)
security_release_secctx(secdata, seclen);
return skb;
nla_put_failure:
skb_tx_error(entskb);
kfree_skb(skb);
net_err_ratelimited("nf_queue: error creating packet message\n");
nlmsg_failure:
if (seclen)
security_release_secctx(secdata, seclen);
return NULL;
}

View file

@ -96,6 +96,44 @@ EXPORT_SYMBOL_GPL(nl_table);
static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
"nlk_cb_mutex-ROUTE",
"nlk_cb_mutex-1",
"nlk_cb_mutex-USERSOCK",
"nlk_cb_mutex-FIREWALL",
"nlk_cb_mutex-SOCK_DIAG",
"nlk_cb_mutex-NFLOG",
"nlk_cb_mutex-XFRM",
"nlk_cb_mutex-SELINUX",
"nlk_cb_mutex-ISCSI",
"nlk_cb_mutex-AUDIT",
"nlk_cb_mutex-FIB_LOOKUP",
"nlk_cb_mutex-CONNECTOR",
"nlk_cb_mutex-NETFILTER",
"nlk_cb_mutex-IP6_FW",
"nlk_cb_mutex-DNRTMSG",
"nlk_cb_mutex-KOBJECT_UEVENT",
"nlk_cb_mutex-GENERIC",
"nlk_cb_mutex-17",
"nlk_cb_mutex-SCSITRANSPORT",
"nlk_cb_mutex-ECRYPTFS",
"nlk_cb_mutex-RDMA",
"nlk_cb_mutex-CRYPTO",
"nlk_cb_mutex-SMC",
"nlk_cb_mutex-23",
"nlk_cb_mutex-24",
"nlk_cb_mutex-25",
"nlk_cb_mutex-26",
"nlk_cb_mutex-27",
"nlk_cb_mutex-28",
"nlk_cb_mutex-29",
"nlk_cb_mutex-30",
"nlk_cb_mutex-31",
"nlk_cb_mutex-MAX_LINKS"
};
static int netlink_dump(struct sock *sk);
static void netlink_skb_destructor(struct sk_buff *skb);
@ -585,6 +623,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
} else {
nlk->cb_mutex = &nlk->cb_def_mutex;
mutex_init(nlk->cb_mutex);
lockdep_set_class_and_name(nlk->cb_mutex,
nlk_cb_mutex_keys + protocol,
nlk_cb_mutex_key_strings[protocol]);
}
init_waitqueue_head(&nlk->wait);

View file

@ -199,9 +199,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
if (p->set_tc_index) {
int wlen = skb_network_offset(skb);
switch (tc_skb_protocol(skb)) {
case htons(ETH_P_IP):
if (skb_cow_head(skb, sizeof(struct iphdr)))
wlen += sizeof(struct iphdr);
if (!pskb_may_pull(skb, wlen) ||
skb_try_make_writable(skb, wlen))
goto drop;
skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
@ -209,7 +213,9 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
break;
case htons(ETH_P_IPV6):
if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
wlen += sizeof(struct ipv6hdr);
if (!pskb_may_pull(skb, wlen) ||
skb_try_make_writable(skb, wlen))
goto drop;
skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))

View file

@ -240,7 +240,8 @@ out_master_del:
out_err:
kfree(acomp);
bus->audio_component = NULL;
dev_err(dev, "failed to add i915 component master (%d)\n", ret);
hdac_acomp = NULL;
dev_info(dev, "failed to add i915 component master (%d)\n", ret);
return ret;
}
@ -273,6 +274,7 @@ int snd_hdac_i915_exit(struct hdac_bus *bus)
kfree(acomp);
bus->audio_component = NULL;
hdac_acomp = NULL;
return 0;
}

View file

@ -2088,9 +2088,11 @@ static int azx_probe_continue(struct azx *chip)
* for other chips, still continue probing as other
* codecs can be on the same link.
*/
if (CONTROLLER_IN_GPU(pci))
if (CONTROLLER_IN_GPU(pci)) {
dev_err(chip->card->dev,
"HSW/BDW HD-audio HDMI/DP requires binding with gfx driver\n");
goto out_free;
else
} else
goto skip_i915;
}

View file

@ -261,6 +261,7 @@ enum {
CXT_FIXUP_HP_530,
CXT_FIXUP_CAP_MIX_AMP_5047,
CXT_FIXUP_MUTE_LED_EAPD,
CXT_FIXUP_HP_DOCK,
CXT_FIXUP_HP_SPECTRE,
CXT_FIXUP_HP_GATE_MIC,
};
@ -778,6 +779,14 @@ static const struct hda_fixup cxt_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = cxt_fixup_mute_led_eapd,
},
[CXT_FIXUP_HP_DOCK] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
{ 0x16, 0x21011020 }, /* line-out */
{ 0x18, 0x2181103f }, /* line-in */
{ }
}
},
[CXT_FIXUP_HP_SPECTRE] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@ -839,6 +848,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
@ -872,6 +882,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
{ .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
{ .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
{ .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" },
{ .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" },
{}
};

View file

@ -4839,6 +4839,7 @@ enum {
ALC286_FIXUP_HP_GPIO_LED,
ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
ALC280_FIXUP_HP_DOCK_PINS,
ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED,
ALC280_FIXUP_HP_9480M,
ALC288_FIXUP_DELL_HEADSET_MODE,
ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
@ -5377,6 +5378,16 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC280_FIXUP_HP_GPIO4
},
[ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
{ 0x1b, 0x21011020 }, /* line-out */
{ 0x18, 0x2181103f }, /* line-in */
{ },
},
.chained = true,
.chain_id = ALC269_FIXUP_HP_GPIO_MIC1_LED
},
[ALC280_FIXUP_HP_9480M] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc280_fixup_hp_9480m,
@ -5629,7 +5640,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@ -5794,6 +5805,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"},
{.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
{.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
{.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"},
{.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
{.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
{.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},

View file

@ -986,7 +986,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
* changes) is disallowed above, so any other attribute changes getting
* here can be skipped.
*/
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) {
r = kvm_iommu_map_pages(kvm, &new);
return r;
}