This is the 4.4.109 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlpL3okACgkQONu9yGCS
 aT6p5g/8CAG9NU/fLu7IMcIlyqfVvdOhzxn44oHCxq08eycqoggdnb3TZXxBUBgY
 +w8uZk8yxNdjXR39GjkMSUy06WRvl2XDSrd36sDGRCBP62Fi8l5scmlRaNEnI/E8
 ltBSB93P16SmnpKa/3Zscz+7LcaoXHpU5Xhs8Zmf4I69qmzOFX2qSKsUyzVT+gNI
 ZoSN/mYuXf7+dzrcKhVdYzm4ZdMRvxdT0WefeoeZMekfAtU9D8zaFOA9jTIAMHSZ
 adNn18s7UKmaipZf/01mW9srvZce4nPKiUC8WVGstiyl27ws+IDleKVmDnqFALjy
 2LIxDvjDth/x8jfqTb7F6bFh6dVtMJjwUmd3KL7hgPuTddoQQe/GfKnjSHkbNxyR
 qNxNtbOgQ2EVOf59fejxWshCP/fButNo8uvCI1ERdm4axGXcf9hiucdlwzCYezHs
 UN0xrxAXprhqTq4hQFB9E4C49e8nMPNsyXTMZwSZRPe2z53spD53JR/0sl5Z2RWe
 ueO21tBZ6ev9jPNi+lJrCVw1oBO+PKOmdNPAaSynUVm96grRnW6grUI3mX9FqMXb
 r62UWG3YCWWBgxA3iQQrMxf/3S2YZXz59TBbp9GU8xOYJZLhKL29/iB7Rv4ANtkR
 aMDrABjWqrCZpIazqkZ5uwbsNl6Q51e3Mji3EfwkBaMqjc41++I=
 =B52+
 -----END PGP SIGNATURE-----

Merge 4.4.109 into android-4.4

Changes in 4.4.109
	ACPI: APEI / ERST: Fix missing error handling in erst_reader()
	crypto: mcryptd - protect the per-CPU queue with a lock
	mfd: cros ec: spi: Don't send first message too soon
	mfd: twl4030-audio: Fix sibling-node lookup
	mfd: twl6040: Fix child-node lookup
	ALSA: rawmidi: Avoid racy info ioctl via ctl device
	ALSA: usb-audio: Fix the missing ctl name suffix at parsing SU
	PCI / PM: Force devices to D0 in pci_pm_thaw_noirq()
	parisc: Hide Diva-built-in serial aux and graphics card
	spi: xilinx: Detect stall with Unknown commands
	KVM: X86: Fix load RFLAGS w/o the fixed bit
	kvm: x86: fix RSM when PCID is non-zero
	powerpc/perf: Dereference BHRB entries safely
	net: mvneta: clear interface link status on port disable
	tracing: Remove extra zeroing out of the ring buffer page
	tracing: Fix possible double free on failure of allocating trace buffer
	tracing: Fix crash when it fails to alloc ring buffer
	ring-buffer: Mask out the info bits when returning buffer page length
	iw_cxgb4: Only validate the MSN for successful completions
	ASoC: fsl_ssi: AC'97 ops need regmap, clock and cleaning up on failure
	ASoC: twl4030: fix child-node lookup
	ALSA: hda: Drop useless WARN_ON()
	ALSA: hda - fix headset mic detection issue on a Dell machine
	x86/vm86/32: Switch to flush_tlb_mm_range() in mark_screen_rdonly()
	x86/mm: Remove flush_tlb() and flush_tlb_current_task()
	x86/mm: Make flush_tlb_mm_range() more predictable
	x86/mm: Reimplement flush_tlb_page() using flush_tlb_mm_range()
	x86/mm: Remove the UP asm/tlbflush.h code, always use the (formerly) SMP code
	x86/mm: Disable PCID on 32-bit kernels
	x86/mm: Add the 'nopcid' boot option to turn off PCID
	x86/mm: Enable CR4.PCIDE on supported systems
	x86/mm/64: Fix reboot interaction with CR4.PCIDE
	kbuild: add '-fno-stack-check' to kernel build options
	ipv4: igmp: guard against silly MTU values
	ipv6: mcast: better catch silly mtu values
	net: igmp: Use correct source address on IGMPv3 reports
	netlink: Add netns check on taps
	net: qmi_wwan: add Sierra EM7565 1199:9091
	net: reevalulate autoflowlabel setting after sysctl setting
	tcp md5sig: Use skb's saddr when replying to an incoming segment
	tg3: Fix rx hang on MTU change with 5717/5719
	net: ipv4: fix for a race condition in raw_sendmsg
	net: mvmdio: disable/unprepare clocks in EPROBE_DEFER case
	sctp: Replace use of sockets_allocated with specified macro.
	ipv4: Fix use-after-free when flushing FIB tables
	net: bridge: fix early call to br_stp_change_bridge_id and plug newlink leaks
	net: Fix double free and memory corruption in get_net_ns_by_id()
	net: phy: micrel: ksz9031: reconfigure autoneg after phy autoneg workaround
	sock: free skb in skb_complete_tx_timestamp on error
	usbip: fix usbip bind writing random string after command in match_busid
	usbip: stub: stop printing kernel pointer addresses in messages
	usbip: vhci: stop printing kernel pointer addresses in messages
	USB: serial: ftdi_sio: add id for Airbus DS P8GR
	USB: serial: qcserial: add Sierra Wireless EM7565
	USB: serial: option: add support for Telit ME910 PID 0x1101
	USB: serial: option: adding support for YUGA CLM920-NC5
	usb: Add device quirk for Logitech HD Pro Webcam C925e
	usb: add RESET_RESUME for ELSA MicroLink 56K
	USB: Fix off by one in type-specific length check of BOS SSP capability
	usb: xhci: Add XHCI_TRUST_TX_LENGTH for Renesas uPD720201
	nohz: Prevent a timer interrupt storm in tick_nohz_stop_sched_tick()
	x86/smpboot: Remove stale TLB flush invocations
	n_tty: fix EXTPROC vs ICANON interaction with TIOCINQ (aka FIONREAD)
	mm/vmstat: Make NR_TLB_REMOTE_FLUSH_RECEIVED available even on UP
	Linux 4.4.109

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2018-01-02 20:58:26 +01:00
commit 8cbe01c651
77 changed files with 458 additions and 330 deletions

View file

@ -2561,6 +2561,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nopat [X86] Disable PAT (page attribute table extension of nopat [X86] Disable PAT (page attribute table extension of
pagetables) support. pagetables) support.
nopcid [X86-64] Disable the PCID cpu feature.
norandmaps Don't use address space randomization. Equivalent to norandmaps Don't use address space randomization. Equivalent to
echo 0 > /proc/sys/kernel/randomize_va_space echo 0 > /proc/sys/kernel/randomize_va_space

View file

@ -1,6 +1,6 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 108 SUBLEVEL = 109
EXTRAVERSION = EXTRAVERSION =
NAME = Blurry Fish Butt NAME = Blurry Fish Butt
@ -804,6 +804,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
# disable invalid "can't wrap" optimizations for signed / pointers # disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
# Make sure -fstack-check isn't enabled (like gentoo apparently did)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)
# conserve stack if available # conserve stack if available
KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)

View file

@ -401,8 +401,12 @@ static __u64 power_pmu_bhrb_to(u64 addr)
int ret; int ret;
__u64 target; __u64 target;
if (is_kernel_addr(addr)) if (is_kernel_addr(addr)) {
return branch_target((unsigned int *)addr); if (probe_kernel_read(&instr, (void *)addr, sizeof(instr)))
return 0;
return branch_target(&instr);
}
/* Userspace: need copy instruction here then translate it */ /* Userspace: need copy instruction here then translate it */
pagefault_disable(); pagefault_disable();

View file

@ -43,7 +43,7 @@ config X86
select ARCH_USE_CMPXCHG_LOCKREF if X86_64 select ARCH_USE_CMPXCHG_LOCKREF if X86_64
select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANTS_DYNAMIC_TASK_STRUCT select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_IPC_PARSE_VERSION if X86_32 select ARCH_WANT_IPC_PARSE_VERSION if X86_32

View file

@ -21,11 +21,13 @@
# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31)) # define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31)) # define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31)) # define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
# define DISABLE_PCID 0
#else #else
# define DISABLE_VME 0 # define DISABLE_VME 0
# define DISABLE_K6_MTRR 0 # define DISABLE_K6_MTRR 0
# define DISABLE_CYRIX_ARR 0 # define DISABLE_CYRIX_ARR 0
# define DISABLE_CENTAUR_MCR 0 # define DISABLE_CENTAUR_MCR 0
# define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31))
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
/* /*
@ -35,7 +37,7 @@
#define DISABLED_MASK1 0 #define DISABLED_MASK1 0
#define DISABLED_MASK2 0 #define DISABLED_MASK2 0
#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR) #define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
#define DISABLED_MASK4 0 #define DISABLED_MASK4 (DISABLE_PCID)
#define DISABLED_MASK5 0 #define DISABLED_MASK5 0
#define DISABLED_MASK6 0 #define DISABLED_MASK6 0
#define DISABLED_MASK7 0 #define DISABLED_MASK7 0

View file

@ -22,8 +22,8 @@ typedef struct {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned int irq_resched_count; unsigned int irq_resched_count;
unsigned int irq_call_count; unsigned int irq_call_count;
unsigned int irq_tlb_count;
#endif #endif
unsigned int irq_tlb_count;
#ifdef CONFIG_X86_THERMAL_VECTOR #ifdef CONFIG_X86_THERMAL_VECTOR
unsigned int irq_thermal_count; unsigned int irq_thermal_count;
#endif #endif

View file

@ -24,12 +24,6 @@ typedef struct {
atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */ atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
} mm_context_t; } mm_context_t;
#ifdef CONFIG_SMP
void leave_mm(int cpu); void leave_mm(int cpu);
#else
static inline void leave_mm(int cpu)
{
}
#endif
#endif /* _ASM_X86_MMU_H */ #endif /* _ASM_X86_MMU_H */

View file

@ -98,10 +98,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
#ifdef CONFIG_SMP
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
#endif
} }
extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,

View file

@ -6,6 +6,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/special_insns.h> #include <asm/special_insns.h>
#include <asm/smp.h>
static inline void __invpcid(unsigned long pcid, unsigned long addr, static inline void __invpcid(unsigned long pcid, unsigned long addr,
unsigned long type) unsigned long type)
@ -64,10 +65,8 @@ static inline void invpcid_flush_all_nonglobals(void)
#endif #endif
struct tlb_state { struct tlb_state {
#ifdef CONFIG_SMP
struct mm_struct *active_mm; struct mm_struct *active_mm;
int state; int state;
#endif
/* /*
* Access to this CR4 shadow and to H/W CR4 is protected by * Access to this CR4 shadow and to H/W CR4 is protected by
@ -191,6 +190,14 @@ static inline void __flush_tlb_all(void)
__flush_tlb_global(); __flush_tlb_global();
else else
__flush_tlb(); __flush_tlb();
/*
* Note: if we somehow had PCID but not PGE, then this wouldn't work --
* we'd end up flushing kernel translations for the current ASID but
* we might fail to flush kernel translations for other cached ASIDs.
*
* To avoid this issue, we force PCID off if PGE is off.
*/
} }
static inline void __flush_tlb_one(unsigned long addr) static inline void __flush_tlb_one(unsigned long addr)
@ -204,7 +211,6 @@ static inline void __flush_tlb_one(unsigned long addr)
/* /*
* TLB flushing: * TLB flushing:
* *
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs * - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page(vma, vmaddr) flushes one page
@ -216,84 +222,6 @@ static inline void __flush_tlb_one(unsigned long addr)
* and page-granular flushes are available only on i486 and up. * and page-granular flushes are available only on i486 and up.
*/ */
#ifndef CONFIG_SMP
/* "_up" is for UniProcessor.
*
* This is a helper for other header functions. *Not* intended to be called
* directly. All global TLB flushes need to either call this, or to bump the
* vm statistics themselves.
*/
static inline void __flush_tlb_up(void)
{
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb();
}
static inline void flush_tlb_all(void)
{
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb_all();
}
static inline void flush_tlb(void)
{
__flush_tlb_up();
}
static inline void local_flush_tlb(void)
{
__flush_tlb_up();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm)
__flush_tlb_up();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb_one(addr);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb_up();
}
static inline void flush_tlb_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end, unsigned long vmflag)
{
if (mm == current->active_mm)
__flush_tlb_up();
}
static inline void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
}
static inline void reset_lazy_tlbstate(void)
{
}
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
flush_tlb_all();
}
#else /* SMP */
#include <asm/smp.h>
#define local_flush_tlb() __flush_tlb() #define local_flush_tlb() __flush_tlb()
#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
@ -302,13 +230,14 @@ static inline void flush_tlb_kernel_range(unsigned long start,
flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
extern void flush_tlb_all(void); extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag); unsigned long end, unsigned long vmflag);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#define flush_tlb() flush_tlb_current_task() static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
{
flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
}
void native_flush_tlb_others(const struct cpumask *cpumask, void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm, struct mm_struct *mm,
@ -323,8 +252,6 @@ static inline void reset_lazy_tlbstate(void)
this_cpu_write(cpu_tlbstate.active_mm, &init_mm); this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
} }
#endif /* SMP */
#ifndef CONFIG_PARAVIRT #ifndef CONFIG_PARAVIRT
#define flush_tlb_others(mask, mm, start, end) \ #define flush_tlb_others(mask, mm, start, end) \
native_flush_tlb_others(mask, mm, start, end) native_flush_tlb_others(mask, mm, start, end)

View file

@ -19,6 +19,14 @@
void __init check_bugs(void) void __init check_bugs(void)
{ {
#ifdef CONFIG_X86_32
/*
* Regardless of whether PCID is enumerated, the SDM says
* that it can't be enabled in 32-bit mode.
*/
setup_clear_cpu_cap(X86_FEATURE_PCID);
#endif
identify_boot_cpu(); identify_boot_cpu();
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
pr_info("CPU: "); pr_info("CPU: ");

View file

@ -162,6 +162,24 @@ static int __init x86_mpx_setup(char *s)
} }
__setup("nompx", x86_mpx_setup); __setup("nompx", x86_mpx_setup);
#ifdef CONFIG_X86_64
static int __init x86_pcid_setup(char *s)
{
/* require an exact match without trailing characters */
if (strlen(s))
return 0;
/* do not emit a message if the feature is not present */
if (!boot_cpu_has(X86_FEATURE_PCID))
return 1;
setup_clear_cpu_cap(X86_FEATURE_PCID);
pr_info("nopcid: PCID feature disabled\n");
return 1;
}
__setup("nopcid", x86_pcid_setup);
#endif
static int __init x86_noinvpcid_setup(char *s) static int __init x86_noinvpcid_setup(char *s)
{ {
/* noinvpcid doesn't accept parameters */ /* noinvpcid doesn't accept parameters */
@ -303,6 +321,25 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
} }
} }
static void setup_pcid(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_PCID)) {
if (cpu_has(c, X86_FEATURE_PGE)) {
cr4_set_bits(X86_CR4_PCIDE);
} else {
/*
* flush_tlb_all(), as currently implemented, won't
* work if PCID is on but PGE is not. Since that
* combination doesn't exist on real hardware, there's
* no reason to try to fully support it, but it's
* polite to avoid corrupting data if we're on
* an improperly configured VM.
*/
clear_cpu_cap(c, X86_FEATURE_PCID);
}
}
}
/* /*
* Some CPU features depend on higher CPUID levels, which may not always * Some CPU features depend on higher CPUID levels, which may not always
* be available due to CPUID level capping or broken virtualization * be available due to CPUID level capping or broken virtualization
@ -934,6 +971,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
setup_smep(c); setup_smep(c);
setup_smap(c); setup_smap(c);
/* Set up PCID */
setup_pcid(c);
/* /*
* The vendor-specific functions might have changed features. * The vendor-specific functions might have changed features.
* Now we do "generic changes." * Now we do "generic changes."

View file

@ -93,6 +93,10 @@ void __noreturn machine_real_restart(unsigned int type)
load_cr3(initial_page_table); load_cr3(initial_page_table);
#else #else
write_cr3(real_mode_header->trampoline_pgd); write_cr3(real_mode_header->trampoline_pgd);
/* Exiting long mode will fail if CR4.PCIDE is set. */
if (static_cpu_has(X86_FEATURE_PCID))
cr4_clear_bits(X86_CR4_PCIDE);
#endif #endif
/* Jump to the identity-mapped low memory code */ /* Jump to the identity-mapped low memory code */

View file

@ -104,25 +104,16 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
spin_lock_irqsave(&rtc_lock, flags); spin_lock_irqsave(&rtc_lock, flags);
CMOS_WRITE(0xa, 0xf); CMOS_WRITE(0xa, 0xf);
spin_unlock_irqrestore(&rtc_lock, flags); spin_unlock_irqrestore(&rtc_lock, flags);
local_flush_tlb();
pr_debug("1.\n");
*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
start_eip >> 4; start_eip >> 4;
pr_debug("2.\n");
*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
start_eip & 0xf; start_eip & 0xf;
pr_debug("3.\n");
} }
static inline void smpboot_restore_warm_reset_vector(void) static inline void smpboot_restore_warm_reset_vector(void)
{ {
unsigned long flags; unsigned long flags;
/*
* Install writable page 0 entry to set BIOS data area.
*/
local_flush_tlb();
/* /*
* Paranoid: Set warm reset code and vector here back * Paranoid: Set warm reset code and vector here back
* to default values. * to default values.

View file

@ -187,7 +187,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
out: out:
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
flush_tlb(); flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, 0UL);
} }

View file

@ -2383,9 +2383,21 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
} }
static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
u64 cr0, u64 cr4) u64 cr0, u64 cr3, u64 cr4)
{ {
int bad; int bad;
u64 pcid;
/* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
pcid = 0;
if (cr4 & X86_CR4_PCIDE) {
pcid = cr3 & 0xfff;
cr3 &= ~0xfff;
}
bad = ctxt->ops->set_cr(ctxt, 3, cr3);
if (bad)
return X86EMUL_UNHANDLEABLE;
/* /*
* First enable PAE, long mode needs it before CR0.PG = 1 is set. * First enable PAE, long mode needs it before CR0.PG = 1 is set.
@ -2404,6 +2416,12 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
bad = ctxt->ops->set_cr(ctxt, 4, cr4); bad = ctxt->ops->set_cr(ctxt, 4, cr4);
if (bad) if (bad)
return X86EMUL_UNHANDLEABLE; return X86EMUL_UNHANDLEABLE;
if (pcid) {
bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
if (bad)
return X86EMUL_UNHANDLEABLE;
}
} }
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
@ -2414,11 +2432,11 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
struct desc_struct desc; struct desc_struct desc;
struct desc_ptr dt; struct desc_ptr dt;
u16 selector; u16 selector;
u32 val, cr0, cr4; u32 val, cr0, cr3, cr4;
int i; int i;
cr0 = GET_SMSTATE(u32, smbase, 0x7ffc); cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8)); cr3 = GET_SMSTATE(u32, smbase, 0x7ff8);
ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED; ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0); ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
@ -2460,14 +2478,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8)); ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
return rsm_enter_protected_mode(ctxt, cr0, cr4); return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
} }
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
{ {
struct desc_struct desc; struct desc_struct desc;
struct desc_ptr dt; struct desc_ptr dt;
u64 val, cr0, cr4; u64 val, cr0, cr3, cr4;
u32 base3; u32 base3;
u16 selector; u16 selector;
int i, r; int i, r;
@ -2484,7 +2502,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
cr0 = GET_SMSTATE(u64, smbase, 0x7f58); cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50)); cr3 = GET_SMSTATE(u64, smbase, 0x7f50);
cr4 = GET_SMSTATE(u64, smbase, 0x7f48); cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00)); ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
val = GET_SMSTATE(u64, smbase, 0x7ed0); val = GET_SMSTATE(u64, smbase, 0x7ed0);
@ -2512,7 +2530,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
dt.address = GET_SMSTATE(u64, smbase, 0x7e68); dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
ctxt->ops->set_gdt(ctxt, &dt); ctxt->ops->set_gdt(ctxt, &dt);
r = rsm_enter_protected_mode(ctxt, cr0, cr4); r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
if (r != X86EMUL_CONTINUE) if (r != X86EMUL_CONTINUE)
return r; return r;

View file

@ -6941,7 +6941,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
#endif #endif
kvm_rip_write(vcpu, regs->rip); kvm_rip_write(vcpu, regs->rip);
kvm_set_rflags(vcpu, regs->rflags); kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
vcpu->arch.exception.pending = false; vcpu->arch.exception.pending = false;

View file

@ -753,10 +753,8 @@ void __init zone_sizes_init(void)
} }
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
#ifdef CONFIG_SMP
.active_mm = &init_mm, .active_mm = &init_mm,
.state = 0, .state = 0,
#endif
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
}; };
EXPORT_SYMBOL_GPL(cpu_tlbstate); EXPORT_SYMBOL_GPL(cpu_tlbstate);

View file

@ -15,7 +15,7 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
/* /*
* Smarter SMP flushing macros. * TLB flushing, formerly SMP-only
* c/o Linus Torvalds. * c/o Linus Torvalds.
* *
* These mean you can really definitely utterly forget about * These mean you can really definitely utterly forget about
@ -28,8 +28,6 @@
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
*/ */
#ifdef CONFIG_SMP
struct flush_tlb_info { struct flush_tlb_info {
struct mm_struct *flush_mm; struct mm_struct *flush_mm;
unsigned long flush_start; unsigned long flush_start;
@ -59,8 +57,6 @@ void leave_mm(int cpu)
} }
EXPORT_SYMBOL_GPL(leave_mm); EXPORT_SYMBOL_GPL(leave_mm);
#endif /* CONFIG_SMP */
void switch_mm(struct mm_struct *prev, struct mm_struct *next, void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk) struct task_struct *tsk)
{ {
@ -77,10 +73,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
unsigned cpu = smp_processor_id(); unsigned cpu = smp_processor_id();
if (likely(prev != next)) { if (likely(prev != next)) {
#ifdef CONFIG_SMP
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
this_cpu_write(cpu_tlbstate.active_mm, next); this_cpu_write(cpu_tlbstate.active_mm, next);
#endif
cpumask_set_cpu(cpu, mm_cpumask(next)); cpumask_set_cpu(cpu, mm_cpumask(next));
/* /*
@ -137,9 +131,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
if (unlikely(prev->context.ldt != next->context.ldt)) if (unlikely(prev->context.ldt != next->context.ldt))
load_mm_ldt(next); load_mm_ldt(next);
#endif #endif
} } else {
#ifdef CONFIG_SMP
else {
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
@ -166,11 +158,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
load_mm_ldt(next); load_mm_ldt(next);
} }
} }
#endif
} }
#ifdef CONFIG_SMP
/* /*
* The flush IPI assumes that a thread switch happens in this order: * The flush IPI assumes that a thread switch happens in this order:
* [cpu0: the cpu that switches] * [cpu0: the cpu that switches]
@ -272,23 +261,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
smp_call_function_many(cpumask, flush_tlb_func, &info, 1); smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
} }
void flush_tlb_current_task(void)
{
struct mm_struct *mm = current->mm;
preempt_disable();
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
/* This is an implicit full barrier that synchronizes with switch_mm. */
local_flush_tlb();
trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
preempt_enable();
}
/* /*
* See Documentation/x86/tlb.txt for details. We choose 33 * See Documentation/x86/tlb.txt for details. We choose 33
* because it is large enough to cover the vast majority (at * because it is large enough to cover the vast majority (at
@ -309,6 +281,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long base_pages_to_flush = TLB_FLUSH_ALL; unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
preempt_disable(); preempt_disable();
if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
base_pages_to_flush = (end - start) >> PAGE_SHIFT;
if (base_pages_to_flush > tlb_single_page_flush_ceiling)
base_pages_to_flush = TLB_FLUSH_ALL;
if (current->active_mm != mm) { if (current->active_mm != mm) {
/* Synchronize with switch_mm. */ /* Synchronize with switch_mm. */
smp_mb(); smp_mb();
@ -325,15 +303,11 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
goto out; goto out;
} }
if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
base_pages_to_flush = (end - start) >> PAGE_SHIFT;
/* /*
* Both branches below are implicit full barriers (MOV to CR or * Both branches below are implicit full barriers (MOV to CR or
* INVLPG) that synchronize with switch_mm. * INVLPG) that synchronize with switch_mm.
*/ */
if (base_pages_to_flush > tlb_single_page_flush_ceiling) { if (base_pages_to_flush == TLB_FLUSH_ALL) {
base_pages_to_flush = TLB_FLUSH_ALL;
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb(); local_flush_tlb();
} else { } else {
@ -354,33 +328,6 @@ out:
preempt_enable(); preempt_enable();
} }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
{
struct mm_struct *mm = vma->vm_mm;
preempt_disable();
if (current->active_mm == mm) {
if (current->mm) {
/*
* Implicit full barrier (INVLPG) that synchronizes
* with switch_mm.
*/
__flush_tlb_one(start);
} else {
leave_mm(smp_processor_id());
/* Synchronize with switch_mm. */
smp_mb();
}
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
preempt_enable();
}
static void do_flush_tlb_all(void *info) static void do_flush_tlb_all(void *info)
{ {
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
@ -465,5 +412,3 @@ static int __init create_tlb_single_page_flush_ceiling(void)
return 0; return 0;
} }
late_initcall(create_tlb_single_page_flush_ceiling); late_initcall(create_tlb_single_page_flush_ceiling);
#endif /* CONFIG_SMP */

View file

@ -433,6 +433,12 @@ static void __init xen_init_cpuid_mask(void)
~((1 << X86_FEATURE_MTRR) | /* disable MTRR */ ~((1 << X86_FEATURE_MTRR) | /* disable MTRR */
(1 << X86_FEATURE_ACC)); /* thermal monitoring */ (1 << X86_FEATURE_ACC)); /* thermal monitoring */
/*
* Xen PV would need some work to support PCID: CR3 handling as well
* as xen_flush_tlb_others() would need updating.
*/
cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_PCID % 32)); /* disable PCID */
if (!xen_initial_domain()) if (!xen_initial_domain())
cpuid_leaf1_edx_mask &= cpuid_leaf1_edx_mask &=
~((1 << X86_FEATURE_ACPI)); /* disable ACPI */ ~((1 << X86_FEATURE_ACPI)); /* disable ACPI */

View file

@ -80,6 +80,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue,
pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
spin_lock_init(&cpu_queue->q_lock);
} }
return 0; return 0;
} }
@ -103,15 +104,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
int cpu, err; int cpu, err;
struct mcryptd_cpu_queue *cpu_queue; struct mcryptd_cpu_queue *cpu_queue;
cpu = get_cpu(); cpu_queue = raw_cpu_ptr(queue->cpu_queue);
cpu_queue = this_cpu_ptr(queue->cpu_queue); spin_lock(&cpu_queue->q_lock);
rctx->tag.cpu = cpu; cpu = smp_processor_id();
rctx->tag.cpu = smp_processor_id();
err = crypto_enqueue_request(&cpu_queue->queue, request); err = crypto_enqueue_request(&cpu_queue->queue, request);
pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
cpu, cpu_queue, request); cpu, cpu_queue, request);
spin_unlock(&cpu_queue->q_lock);
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
put_cpu();
return err; return err;
} }
@ -164,16 +166,11 @@ static void mcryptd_queue_worker(struct work_struct *work)
cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
i = 0; i = 0;
while (i < MCRYPTD_BATCH || single_task_running()) { while (i < MCRYPTD_BATCH || single_task_running()) {
/*
* preempt_disable/enable is used to prevent spin_lock_bh(&cpu_queue->q_lock);
* being preempted by mcryptd_enqueue_request()
*/
local_bh_disable();
preempt_disable();
backlog = crypto_get_backlog(&cpu_queue->queue); backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue);
preempt_enable(); spin_unlock_bh(&cpu_queue->q_lock);
local_bh_enable();
if (!req) { if (!req) {
mcryptd_opportunistic_flush(); mcryptd_opportunistic_flush();
@ -188,7 +185,7 @@ static void mcryptd_queue_worker(struct work_struct *work)
++i; ++i;
} }
if (cpu_queue->queue.qlen) if (cpu_queue->queue.qlen)
queue_work(kcrypto_wq, &cpu_queue->work); queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
} }
void mcryptd_flusher(struct work_struct *__work) void mcryptd_flusher(struct work_struct *__work)

View file

@ -1020,7 +1020,7 @@ skip:
/* The record may be cleared by others, try read next record */ /* The record may be cleared by others, try read next record */
if (len == -ENOENT) if (len == -ENOENT)
goto skip; goto skip;
else if (len < sizeof(*rcd)) { else if (len < 0 || len < sizeof(*rcd)) {
rc = -EIO; rc = -EIO;
goto out; goto out;
} }

View file

@ -579,10 +579,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
ret = -EAGAIN; ret = -EAGAIN;
goto skip_cqe; goto skip_cqe;
} }
if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { if (unlikely(!CQE_STATUS(hw_cqe) &&
CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
t4_set_wq_in_error(wq); t4_set_wq_in_error(wq);
hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN)); hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
goto proc_cqe;
} }
goto proc_cqe; goto proc_cqe;
} }

View file

@ -660,6 +660,7 @@ static int cros_ec_spi_probe(struct spi_device *spi)
sizeof(struct ec_response_get_protocol_info); sizeof(struct ec_response_get_protocol_info);
ec_dev->dout_size = sizeof(struct ec_host_request); ec_dev->dout_size = sizeof(struct ec_host_request);
ec_spi->last_transfer_ns = ktime_get_ns();
err = cros_ec_register(ec_dev); err = cros_ec_register(ec_dev);
if (err) { if (err) {

View file

@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void)
EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk); EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata, static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata,
struct device_node *node) struct device_node *parent)
{ {
struct device_node *node;
if (pdata && pdata->codec) if (pdata && pdata->codec)
return true; return true;
if (of_find_node_by_name(node, "codec")) node = of_get_child_by_name(parent, "codec");
if (node) {
of_node_put(node);
return true; return true;
}
return false; return false;
} }

View file

@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = {
}; };
static bool twl6040_has_vibra(struct device_node *node) static bool twl6040_has_vibra(struct device_node *parent)
{ {
#ifdef CONFIG_OF struct device_node *node;
if (of_find_node_by_name(node, "vibra"))
node = of_get_child_by_name(parent, "vibra");
if (node) {
of_node_put(node);
return true; return true;
#endif }
return false; return false;
} }

View file

@ -14228,7 +14228,9 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
/* Reset PHY, otherwise the read DMA engine will be in a mode that /* Reset PHY, otherwise the read DMA engine will be in a mode that
* breaks all requests to 256 bytes. * breaks all requests to 256 bytes.
*/ */
if (tg3_asic_rev(tp) == ASIC_REV_57766) if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_asic_rev(tp) == ASIC_REV_5719)
reset_phy = true; reset_phy = true;
err = tg3_restart_hw(tp, reset_phy); err = tg3_restart_hw(tp, reset_phy);

View file

@ -241,7 +241,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
dev->regs + MVMDIO_ERR_INT_MASK); dev->regs + MVMDIO_ERR_INT_MASK);
} else if (dev->err_interrupt == -EPROBE_DEFER) { } else if (dev->err_interrupt == -EPROBE_DEFER) {
return -EPROBE_DEFER; ret = -EPROBE_DEFER;
goto out_mdio;
} }
mutex_init(&dev->lock); mutex_init(&dev->lock);

View file

@ -914,6 +914,10 @@ static void mvneta_port_disable(struct mvneta_port *pp)
val &= ~MVNETA_GMAC0_PORT_ENABLE; val &= ~MVNETA_GMAC0_PORT_ENABLE;
mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
pp->link = 0;
pp->duplex = -1;
pp->speed = 0;
udelay(200); udelay(200);
} }

View file

@ -541,6 +541,7 @@ static int ksz9031_read_status(struct phy_device *phydev)
phydev->link = 0; phydev->link = 0;
if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
phydev->drv->config_intr(phydev); phydev->drv->config_intr(phydev);
return genphy_config_aneg(phydev);
} }
return 0; return 0;

View file

@ -737,6 +737,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */

View file

@ -1654,3 +1654,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
iounmap(base_addr); iounmap(base_addr);
} }
/*
* The design of the Diva management card in rp34x0 machines (rp3410, rp3440)
* seems rushed, so that many built-in components simply don't work.
* The following quirks disable the serial AUX port and the built-in ATI RV100
* Radeon 7000 graphics card which both don't have any external connectors and
* thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as
* such makes those machines the only PARISC machines on which we can't use
* ttyS0 as boot console.
*/
static void quirk_diva_ati_card(struct pci_dev *dev)
{
if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
dev->subsystem_device != 0x1292)
return;
dev_info(&dev->dev, "Hiding Diva built-in ATI card");
dev->device = 0;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
quirk_diva_ati_card);
static void quirk_diva_aux_disable(struct pci_dev *dev)
{
if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
dev->subsystem_device != 0x1291)
return;
dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
dev->device = 0;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
quirk_diva_aux_disable);

View file

@ -944,7 +944,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
if (pci_has_legacy_pm_support(pci_dev)) if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev); return pci_legacy_resume_early(dev);
pci_update_current_state(pci_dev, PCI_D0); /*
* pci_restore_state() requires the device to be in D0 (because of MSI
* restoration among other things), so force it into D0 in case the
* driver's "freeze" callbacks put it into a low-power state directly.
*/
pci_set_power_state(pci_dev, PCI_D0);
pci_restore_state(pci_dev); pci_restore_state(pci_dev);
if (drv && drv->pm && drv->pm->thaw_noirq) if (drv && drv->pm && drv->pm->thaw_noirq)

View file

@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
while (remaining_words) { while (remaining_words) {
int n_words, tx_words, rx_words; int n_words, tx_words, rx_words;
u32 sr; u32 sr;
int stalled;
n_words = min(remaining_words, xspi->buffer_size); n_words = min(remaining_words, xspi->buffer_size);
@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
/* Read out all the data from the Rx FIFO */ /* Read out all the data from the Rx FIFO */
rx_words = n_words; rx_words = n_words;
stalled = 10;
while (rx_words) { while (rx_words) {
if (rx_words == n_words && !(stalled--) &&
!(sr & XSPI_SR_TX_EMPTY_MASK) &&
(sr & XSPI_SR_RX_EMPTY_MASK)) {
dev_err(&spi->dev,
"Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
xspi_init_hw(xspi);
return -EIO;
}
if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) { if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
xilinx_spi_rx(xspi); xilinx_spi_rx(xspi);
rx_words--; rx_words--;

View file

@ -1801,7 +1801,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
{ {
struct n_tty_data *ldata = tty->disc_data; struct n_tty_data *ldata = tty->disc_data;
if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) { if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) {
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
ldata->line_start = ldata->read_tail; ldata->line_start = ldata->read_tail;
if (!L_ICANON(tty) || !read_cnt(ldata)) { if (!L_ICANON(tty) || !read_cnt(ldata)) {
@ -2493,7 +2493,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
return put_user(tty_chars_in_buffer(tty), (int __user *) arg); return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
case TIOCINQ: case TIOCINQ:
down_write(&tty->termios_rwsem); down_write(&tty->termios_rwsem);
if (L_ICANON(tty)) if (L_ICANON(tty) && !L_EXTPROC(tty))
retval = inq_canon(ldata); retval = inq_canon(ldata);
else else
retval = read_cnt(ldata); retval = read_cnt(ldata);

View file

@ -973,7 +973,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
case USB_SSP_CAP_TYPE: case USB_SSP_CAP_TYPE:
ssp_cap = (struct usb_ssp_cap_descriptor *)buffer; ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
ssac = (le32_to_cpu(ssp_cap->bmAttributes) & ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1; USB_SSP_SUBLINK_SPEED_ATTRIBS);
if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac)) if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
dev->bos->ssp_cap = ssp_cap; dev->bos->ssp_cap = ssp_cap;
break; break;

View file

@ -57,10 +57,11 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Microsoft LifeCam-VX700 v2.0 */ /* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech HD Pro Webcams C920, C920-C and C930e */ /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech ConferenceCam CC3000e */ /* Logitech ConferenceCam CC3000e */
{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
@ -154,6 +155,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */ /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
{ USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM }, { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
/* ELSA MicroLink 56K */
{ USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME },
/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },

View file

@ -184,6 +184,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_TRUST_TX_LENGTH; xhci->quirks |= XHCI_TRUST_TX_LENGTH;
xhci->quirks |= XHCI_BROKEN_STREAMS; xhci->quirks |= XHCI_BROKEN_STREAMS;
} }
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
pdev->device == 0x0014)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
if (pdev->vendor == PCI_VENDOR_ID_RENESAS && if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
pdev->device == 0x0015) pdev->device == 0x0015)
xhci->quirks |= XHCI_RESET_ON_RESUME; xhci->quirks |= XHCI_RESET_ON_RESUME;

View file

@ -1017,6 +1017,7 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) }, { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) }, { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
{ USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
{ } /* Terminating entry */ { } /* Terminating entry */
}; };

View file

@ -913,6 +913,12 @@
#define ICPDAS_I7561U_PID 0x0104 #define ICPDAS_I7561U_PID 0x0104
#define ICPDAS_I7563U_PID 0x0105 #define ICPDAS_I7563U_PID 0x0105
/*
* Airbus Defence and Space
*/
#define AIRBUS_DS_VID 0x1e8e /* Vendor ID */
#define AIRBUS_DS_P8GR 0x6001 /* Tetra P8GR */
/* /*
* RT Systems programming cables for various ham radios * RT Systems programming cables for various ham radios
*/ */

View file

@ -236,6 +236,8 @@ static void option_instat_callback(struct urb *urb);
/* These Quectel products use Qualcomm's vendor ID */ /* These Quectel products use Qualcomm's vendor ID */
#define QUECTEL_PRODUCT_UC20 0x9003 #define QUECTEL_PRODUCT_UC20 0x9003
#define QUECTEL_PRODUCT_UC15 0x9090 #define QUECTEL_PRODUCT_UC15 0x9090
/* These Yuga products use Qualcomm's vendor ID */
#define YUGA_PRODUCT_CLM920_NC5 0x9625
#define QUECTEL_VENDOR_ID 0x2c7c #define QUECTEL_VENDOR_ID 0x2c7c
/* These Quectel products use Quectel's vendor ID */ /* These Quectel products use Quectel's vendor ID */
@ -283,6 +285,7 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_LE922_USBCFG3 0x1043 #define TELIT_PRODUCT_LE922_USBCFG3 0x1043
#define TELIT_PRODUCT_LE922_USBCFG5 0x1045 #define TELIT_PRODUCT_LE922_USBCFG5 0x1045
#define TELIT_PRODUCT_ME910 0x1100 #define TELIT_PRODUCT_ME910 0x1100
#define TELIT_PRODUCT_ME910_DUAL_MODEM 0x1101
#define TELIT_PRODUCT_LE920 0x1200 #define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201 #define TELIT_PRODUCT_LE910 0x1201
#define TELIT_PRODUCT_LE910_USBCFG4 0x1206 #define TELIT_PRODUCT_LE910_USBCFG4 0x1206
@ -648,6 +651,11 @@ static const struct option_blacklist_info telit_me910_blacklist = {
.reserved = BIT(1) | BIT(3), .reserved = BIT(1) | BIT(3),
}; };
static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
.sendsetup = BIT(0),
.reserved = BIT(3),
};
static const struct option_blacklist_info telit_le910_blacklist = { static const struct option_blacklist_info telit_le910_blacklist = {
.sendsetup = BIT(0), .sendsetup = BIT(0),
.reserved = BIT(1) | BIT(2), .reserved = BIT(1) | BIT(2),
@ -677,6 +685,10 @@ static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
.reserved = BIT(4) | BIT(5), .reserved = BIT(4) | BIT(5),
}; };
static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
.reserved = BIT(1) | BIT(4),
};
static const struct usb_device_id option_ids[] = { static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@ -1181,6 +1193,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist }, .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
/* Yuga products use Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
.driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
/* Quectel products using Quectel vendor ID */ /* Quectel products using Quectel vendor ID */
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist }, .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
@ -1247,6 +1262,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = (kernel_ulong_t)&telit_me910_blacklist }, .driver_info = (kernel_ulong_t)&telit_me910_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
.driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = (kernel_ulong_t)&telit_le910_blacklist }, .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),

View file

@ -166,6 +166,8 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
{DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */ {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */
{DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */ {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */
{DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
{DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
{DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
@ -346,6 +348,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
break; break;
case 2: case 2:
dev_dbg(dev, "NMEA GPS interface found\n"); dev_dbg(dev, "NMEA GPS interface found\n");
sendsetup = true;
break; break;
case 3: case 3:
dev_dbg(dev, "Modem port found\n"); dev_dbg(dev, "Modem port found\n");

View file

@ -252,11 +252,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
struct stub_priv *priv; struct stub_priv *priv;
struct urb *urb; struct urb *urb;
dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev); dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n");
while ((priv = stub_priv_pop(sdev))) { while ((priv = stub_priv_pop(sdev))) {
urb = priv->urb; urb = priv->urb;
dev_dbg(&sdev->udev->dev, "free urb %p\n", urb); dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n",
priv->seqnum);
usb_kill_urb(urb); usb_kill_urb(urb);
kmem_cache_free(stub_priv_cache, priv); kmem_cache_free(stub_priv_cache, priv);

View file

@ -230,9 +230,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
if (priv->seqnum != pdu->u.cmd_unlink.seqnum) if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
continue; continue;
dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
priv->urb);
/* /*
* This matched urb is not completed yet (i.e., be in * This matched urb is not completed yet (i.e., be in
* flight in usb hcd hardware/driver). Now we are * flight in usb hcd hardware/driver). Now we are
@ -271,8 +268,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
ret = usb_unlink_urb(priv->urb); ret = usb_unlink_urb(priv->urb);
if (ret != -EINPROGRESS) if (ret != -EINPROGRESS)
dev_err(&priv->urb->dev->dev, dev_err(&priv->urb->dev->dev,
"failed to unlink a urb %p, ret %d\n", "failed to unlink a urb # %lu, ret %d\n",
priv->urb, ret); priv->seqnum, ret);
return 0; return 0;
} }

View file

@ -201,8 +201,8 @@ static int stub_send_ret_submit(struct stub_device *sdev)
/* 1. setup usbip_header */ /* 1. setup usbip_header */
setup_ret_submit_pdu(&pdu_header, urb); setup_ret_submit_pdu(&pdu_header, urb);
usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
pdu_header.base.seqnum, urb); pdu_header.base.seqnum);
usbip_header_correct_endian(&pdu_header, 1); usbip_header_correct_endian(&pdu_header, 1);
iov[iovnum].iov_base = &pdu_header; iov[iovnum].iov_base = &pdu_header;

View file

@ -467,9 +467,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
int ret = 0; int ret = 0;
struct vhci_device *vdev; struct vhci_device *vdev;
usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
hcd, urb, mem_flags);
/* patch to usb_sg_init() is in 2.5.60 */ /* patch to usb_sg_init() is in 2.5.60 */
BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length); BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
@ -627,8 +624,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
struct vhci_priv *priv; struct vhci_priv *priv;
struct vhci_device *vdev; struct vhci_device *vdev;
pr_info("dequeue a urb %p\n", urb);
spin_lock(&the_controller->lock); spin_lock(&the_controller->lock);
priv = urb->hcpriv; priv = urb->hcpriv;
@ -656,7 +651,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
/* tcp connection is closed */ /* tcp connection is closed */
spin_lock(&vdev->priv_lock); spin_lock(&vdev->priv_lock);
pr_info("device %p seems to be disconnected\n", vdev);
list_del(&priv->list); list_del(&priv->list);
kfree(priv); kfree(priv);
urb->hcpriv = NULL; urb->hcpriv = NULL;
@ -668,8 +662,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
* vhci_rx will receive RET_UNLINK and give back the URB. * vhci_rx will receive RET_UNLINK and give back the URB.
* Otherwise, we give back it here. * Otherwise, we give back it here.
*/ */
pr_info("gives back urb %p\n", urb);
usb_hcd_unlink_urb_from_ep(hcd, urb); usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock(&the_controller->lock); spin_unlock(&the_controller->lock);
@ -698,8 +690,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
unlink->unlink_seqnum = priv->seqnum; unlink->unlink_seqnum = priv->seqnum;
pr_info("device %p seems to be still connected\n", vdev);
/* send cmd_unlink and try to cancel the pending URB in the /* send cmd_unlink and try to cancel the pending URB in the
* peer */ * peer */
list_add_tail(&unlink->list, &vdev->unlink_tx); list_add_tail(&unlink->list, &vdev->unlink_tx);

View file

@ -37,24 +37,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum)
urb = priv->urb; urb = priv->urb;
status = urb->status; status = urb->status;
usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n", usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum);
urb, priv, seqnum);
switch (status) { switch (status) {
case -ENOENT: case -ENOENT:
/* fall through */ /* fall through */
case -ECONNRESET: case -ECONNRESET:
dev_info(&urb->dev->dev, dev_dbg(&urb->dev->dev,
"urb %p was unlinked %ssynchronuously.\n", urb, "urb seq# %u was unlinked %ssynchronuously\n",
status == -ENOENT ? "" : "a"); seqnum, status == -ENOENT ? "" : "a");
break; break;
case -EINPROGRESS: case -EINPROGRESS:
/* no info output */ /* no info output */
break; break;
default: default:
dev_info(&urb->dev->dev, dev_dbg(&urb->dev->dev,
"urb %p may be in a error, status %d\n", urb, "urb seq# %u may be in a error, status %d\n",
status); seqnum, status);
} }
list_del(&priv->list); list_del(&priv->list);
@ -78,8 +77,8 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
spin_unlock(&vdev->priv_lock); spin_unlock(&vdev->priv_lock);
if (!urb) { if (!urb) {
pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum); pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
pr_info("max seqnum %d\n", pdu->base.seqnum,
atomic_read(&the_controller->seqnum)); atomic_read(&the_controller->seqnum));
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
return; return;
@ -102,7 +101,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
if (usbip_dbg_flag_vhci_rx) if (usbip_dbg_flag_vhci_rx)
usbip_dump_urb(urb); usbip_dump_urb(urb);
usbip_dbg_vhci_rx("now giveback urb %p\n", urb); usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
spin_lock(&the_controller->lock); spin_lock(&the_controller->lock);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
@ -165,7 +164,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
pr_info("the urb (seqnum %d) was already given back\n", pr_info("the urb (seqnum %d) was already given back\n",
pdu->base.seqnum); pdu->base.seqnum);
} else { } else {
usbip_dbg_vhci_rx("now giveback urb %p\n", urb); usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum);
/* If unlink is successful, status is -ECONNRESET */ /* If unlink is successful, status is -ECONNRESET */
urb->status = pdu->u.ret_unlink.status; urb->status = pdu->u.ret_unlink.status;

View file

@ -82,7 +82,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
memset(&msg, 0, sizeof(msg)); memset(&msg, 0, sizeof(msg));
memset(&iov, 0, sizeof(iov)); memset(&iov, 0, sizeof(iov));
usbip_dbg_vhci_tx("setup txdata urb %p\n", urb); usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
priv->seqnum);
/* 1. setup usbip_header */ /* 1. setup usbip_header */
setup_cmd_submit_pdu(&pdu_header, urb); setup_cmd_submit_pdu(&pdu_header, urb);

View file

@ -26,6 +26,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
struct mcryptd_cpu_queue { struct mcryptd_cpu_queue {
struct crypto_queue queue; struct crypto_queue queue;
spinlock_t q_lock;
struct work_struct work; struct work_struct work;
}; };

View file

@ -218,7 +218,8 @@ struct ipv6_pinfo {
* 100: prefer care-of address * 100: prefer care-of address
*/ */
dontfrag:1, dontfrag:1,
autoflowlabel:1; autoflowlabel:1,
autoflowlabel_set:1;
__u8 min_hopcount; __u8 min_hopcount;
__u8 tclass; __u8 tclass;
__be32 rcv_flowinfo; __be32 rcv_flowinfo;

View file

@ -80,10 +80,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#endif #endif
#endif #endif
#ifdef CONFIG_DEBUG_TLBFLUSH #ifdef CONFIG_DEBUG_TLBFLUSH
#ifdef CONFIG_SMP
NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */ NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */ NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
#endif /* CONFIG_SMP */
NR_TLB_LOCAL_FLUSH_ALL, NR_TLB_LOCAL_FLUSH_ALL,
NR_TLB_LOCAL_FLUSH_ONE, NR_TLB_LOCAL_FLUSH_ONE,
#endif /* CONFIG_DEBUG_TLBFLUSH */ #endif /* CONFIG_DEBUG_TLBFLUSH */

View file

@ -33,6 +33,8 @@
#include <net/flow.h> #include <net/flow.h>
#include <net/flow_dissector.h> #include <net/flow_dissector.h>
#define IPV4_MIN_MTU 68 /* RFC 791 */
struct sock; struct sock;
struct inet_skb_parm { struct inet_skb_parm {

View file

@ -568,6 +568,11 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
} }
static inline bool local_timer_softirq_pending(void)
{
return local_softirq_pending() & TIMER_SOFTIRQ;
}
static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
ktime_t now, int cpu) ktime_t now, int cpu)
{ {
@ -584,8 +589,18 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
} while (read_seqretry(&jiffies_lock, seq)); } while (read_seqretry(&jiffies_lock, seq));
ts->last_jiffies = basejiff; ts->last_jiffies = basejiff;
if (rcu_needs_cpu(basemono, &next_rcu) || /*
arch_needs_cpu() || irq_work_needs_cpu()) { * Keep the periodic tick, when RCU, architecture or irq_work
* requests it.
* Aside of that check whether the local timer softirq is
* pending. If so its a bad idea to call get_next_timer_interrupt()
* because there is an already expired timer, so it will request
* immeditate expiry, which rearms the hardware timer with a
* minimal delta which brings us back to this place
* immediately. Lather, rinse and repeat...
*/
if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
irq_work_needs_cpu() || local_timer_softirq_pending()) {
next_tick = basemono + TICK_NSEC; next_tick = basemono + TICK_NSEC;
} else { } else {
/* /*

View file

@ -280,6 +280,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
/* Missed count stored at end */ /* Missed count stored at end */
#define RB_MISSED_STORED (1 << 30) #define RB_MISSED_STORED (1 << 30)
#define RB_MISSED_FLAGS (RB_MISSED_EVENTS|RB_MISSED_STORED)
struct buffer_data_page { struct buffer_data_page {
u64 time_stamp; /* page time stamp */ u64 time_stamp; /* page time stamp */
local_t commit; /* write committed index */ local_t commit; /* write committed index */
@ -331,7 +333,9 @@ static void rb_init_page(struct buffer_data_page *bpage)
*/ */
size_t ring_buffer_page_len(void *page) size_t ring_buffer_page_len(void *page)
{ {
return local_read(&((struct buffer_data_page *)page)->commit) struct buffer_data_page *bpage = page;
return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS)
+ BUF_PAGE_HDR_SIZE; + BUF_PAGE_HDR_SIZE;
} }

View file

@ -5844,7 +5844,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
.spd_release = buffer_spd_release, .spd_release = buffer_spd_release,
}; };
struct buffer_ref *ref; struct buffer_ref *ref;
int entries, size, i; int entries, i;
ssize_t ret = 0; ssize_t ret = 0;
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
@ -5895,14 +5895,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
break; break;
} }
/*
* zero out any left over data, this is going to
* user land.
*/
size = ring_buffer_page_len(ref->page);
if (size < PAGE_SIZE)
memset(ref->page + size, 0, PAGE_SIZE - size);
page = virt_to_page(ref->page); page = virt_to_page(ref->page);
spd.pages[i] = page; spd.pages[i] = page;
@ -6629,6 +6621,7 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
buf->data = alloc_percpu(struct trace_array_cpu); buf->data = alloc_percpu(struct trace_array_cpu);
if (!buf->data) { if (!buf->data) {
ring_buffer_free(buf->buffer); ring_buffer_free(buf->buffer);
buf->buffer = NULL;
return -ENOMEM; return -ENOMEM;
} }
@ -6652,7 +6645,9 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
allocate_snapshot ? size : 1); allocate_snapshot ? size : 1);
if (WARN_ON(ret)) { if (WARN_ON(ret)) {
ring_buffer_free(tr->trace_buffer.buffer); ring_buffer_free(tr->trace_buffer.buffer);
tr->trace_buffer.buffer = NULL;
free_percpu(tr->trace_buffer.data); free_percpu(tr->trace_buffer.data);
tr->trace_buffer.data = NULL;
return -ENOMEM; return -ENOMEM;
} }
tr->allocated_snapshot = allocate_snapshot; tr->allocated_snapshot = allocate_snapshot;

View file

@ -1067,19 +1067,20 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
struct net_bridge *br = netdev_priv(dev); struct net_bridge *br = netdev_priv(dev);
int err; int err;
err = register_netdevice(dev);
if (err)
return err;
if (tb[IFLA_ADDRESS]) { if (tb[IFLA_ADDRESS]) {
spin_lock_bh(&br->lock); spin_lock_bh(&br->lock);
br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
spin_unlock_bh(&br->lock); spin_unlock_bh(&br->lock);
} }
err = register_netdevice(dev);
if (err)
return err;
err = br_changelink(dev, tb, data); err = br_changelink(dev, tb, data);
if (err) if (err)
unregister_netdevice(dev); br_dev_delete(dev, NULL);
return err; return err;
} }

View file

@ -261,7 +261,7 @@ struct net *get_net_ns_by_id(struct net *net, int id)
spin_lock_irqsave(&net->nsid_lock, flags); spin_lock_irqsave(&net->nsid_lock, flags);
peer = idr_find(&net->netns_ids, id); peer = idr_find(&net->netns_ids, id);
if (peer) if (peer)
get_net(peer); peer = maybe_get_net(peer);
spin_unlock_irqrestore(&net->nsid_lock, flags); spin_unlock_irqrestore(&net->nsid_lock, flags);
rcu_read_unlock(); rcu_read_unlock();

View file

@ -3676,7 +3676,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
struct sock *sk = skb->sk; struct sock *sk = skb->sk;
if (!skb_may_tx_timestamp(sk, false)) if (!skb_may_tx_timestamp(sk, false))
return; goto err;
/* Take a reference to prevent skb_orphan() from freeing the socket, /* Take a reference to prevent skb_orphan() from freeing the socket,
* but only if the socket refcount is not zero. * but only if the socket refcount is not zero.
@ -3685,7 +3685,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
*skb_hwtstamps(skb) = *hwtstamps; *skb_hwtstamps(skb) = *hwtstamps;
__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
sock_put(sk); sock_put(sk);
return;
} }
err:
kfree_skb(skb);
} }
EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);

View file

@ -1358,7 +1358,7 @@ skip:
static bool inetdev_valid_mtu(unsigned int mtu) static bool inetdev_valid_mtu(unsigned int mtu)
{ {
return mtu >= 68; return mtu >= IPV4_MIN_MTU;
} }
static void inetdev_send_gratuitous_arp(struct net_device *dev, static void inetdev_send_gratuitous_arp(struct net_device *dev,

View file

@ -1253,7 +1253,7 @@ fail:
static void ip_fib_net_exit(struct net *net) static void ip_fib_net_exit(struct net *net)
{ {
unsigned int i; int i;
rtnl_lock(); rtnl_lock();
#ifdef CONFIG_IP_MULTIPLE_TABLES #ifdef CONFIG_IP_MULTIPLE_TABLES
@ -1261,7 +1261,12 @@ static void ip_fib_net_exit(struct net *net)
RCU_INIT_POINTER(net->ipv4.fib_main, NULL); RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
RCU_INIT_POINTER(net->ipv4.fib_default, NULL); RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
#endif #endif
for (i = 0; i < FIB_TABLE_HASHSZ; i++) { /* Destroy the tables in reverse order to guarantee that the
* local table, ID 255, is destroyed before the main table, ID
* 254. This is necessary as the local table may contain
* references to data contained in the main table.
*/
for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) {
struct hlist_head *head = &net->ipv4.fib_table_hash[i]; struct hlist_head *head = &net->ipv4.fib_table_hash[i];
struct hlist_node *tmp; struct hlist_node *tmp;
struct fib_table *tb; struct fib_table *tb;

View file

@ -89,6 +89,7 @@
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/times.h> #include <linux/times.h>
#include <linux/pkt_sched.h> #include <linux/pkt_sched.h>
#include <linux/byteorder/generic.h>
#include <net/net_namespace.h> #include <net/net_namespace.h>
#include <net/arp.h> #include <net/arp.h>
@ -327,6 +328,23 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
return scount; return scount;
} }
/* source address selection per RFC 3376 section 4.2.13 */
static __be32 igmpv3_get_srcaddr(struct net_device *dev,
const struct flowi4 *fl4)
{
struct in_device *in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
return htonl(INADDR_ANY);
for_ifa(in_dev) {
if (inet_ifa_match(fl4->saddr, ifa))
return fl4->saddr;
} endfor_ifa(in_dev);
return htonl(INADDR_ANY);
}
static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
{ {
struct sk_buff *skb; struct sk_buff *skb;
@ -374,7 +392,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
pip->frag_off = htons(IP_DF); pip->frag_off = htons(IP_DF);
pip->ttl = 1; pip->ttl = 1;
pip->daddr = fl4.daddr; pip->daddr = fl4.daddr;
pip->saddr = fl4.saddr; pip->saddr = igmpv3_get_srcaddr(dev, &fl4);
pip->protocol = IPPROTO_IGMP; pip->protocol = IPPROTO_IGMP;
pip->tot_len = 0; /* filled in later */ pip->tot_len = 0; /* filled in later */
ip_select_ident(net, skb, NULL); ip_select_ident(net, skb, NULL);
@ -410,16 +428,17 @@ static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
} }
static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
int type, struct igmpv3_grec **ppgr) int type, struct igmpv3_grec **ppgr, unsigned int mtu)
{ {
struct net_device *dev = pmc->interface->dev; struct net_device *dev = pmc->interface->dev;
struct igmpv3_report *pih; struct igmpv3_report *pih;
struct igmpv3_grec *pgr; struct igmpv3_grec *pgr;
if (!skb) if (!skb) {
skb = igmpv3_newpack(dev, dev->mtu); skb = igmpv3_newpack(dev, mtu);
if (!skb) if (!skb)
return NULL; return NULL;
}
pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec)); pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec));
pgr->grec_type = type; pgr->grec_type = type;
pgr->grec_auxwords = 0; pgr->grec_auxwords = 0;
@ -441,12 +460,17 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
struct igmpv3_grec *pgr = NULL; struct igmpv3_grec *pgr = NULL;
struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list; struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
int scount, stotal, first, isquery, truncate; int scount, stotal, first, isquery, truncate;
unsigned int mtu;
if (pmc->multiaddr == IGMP_ALL_HOSTS) if (pmc->multiaddr == IGMP_ALL_HOSTS)
return skb; return skb;
if (ipv4_is_local_multicast(pmc->multiaddr) && !sysctl_igmp_llm_reports) if (ipv4_is_local_multicast(pmc->multiaddr) && !sysctl_igmp_llm_reports)
return skb; return skb;
mtu = READ_ONCE(dev->mtu);
if (mtu < IPV4_MIN_MTU)
return skb;
isquery = type == IGMPV3_MODE_IS_INCLUDE || isquery = type == IGMPV3_MODE_IS_INCLUDE ||
type == IGMPV3_MODE_IS_EXCLUDE; type == IGMPV3_MODE_IS_EXCLUDE;
truncate = type == IGMPV3_MODE_IS_EXCLUDE || truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
@ -467,7 +491,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
if (skb) if (skb)
igmpv3_sendpack(skb); igmpv3_sendpack(skb);
skb = igmpv3_newpack(dev, dev->mtu); skb = igmpv3_newpack(dev, mtu);
} }
} }
first = 1; first = 1;
@ -494,12 +518,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
pgr->grec_nsrcs = htons(scount); pgr->grec_nsrcs = htons(scount);
if (skb) if (skb)
igmpv3_sendpack(skb); igmpv3_sendpack(skb);
skb = igmpv3_newpack(dev, dev->mtu); skb = igmpv3_newpack(dev, mtu);
first = 1; first = 1;
scount = 0; scount = 0;
} }
if (first) { if (first) {
skb = add_grhead(skb, pmc, type, &pgr); skb = add_grhead(skb, pmc, type, &pgr, mtu);
first = 0; first = 0;
} }
if (!skb) if (!skb)
@ -533,7 +557,7 @@ empty_source:
igmpv3_sendpack(skb); igmpv3_sendpack(skb);
skb = NULL; /* add_grhead will get a new one */ skb = NULL; /* add_grhead will get a new one */
} }
skb = add_grhead(skb, pmc, type, &pgr); skb = add_grhead(skb, pmc, type, &pgr, mtu);
} }
} }
if (pgr) if (pgr)

View file

@ -400,8 +400,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
dev->needed_headroom = t_hlen + hlen; dev->needed_headroom = t_hlen + hlen;
mtu -= (dev->hard_header_len + t_hlen); mtu -= (dev->hard_header_len + t_hlen);
if (mtu < 68) if (mtu < IPV4_MIN_MTU)
mtu = 68; mtu = IPV4_MIN_MTU;
return mtu; return mtu;
} }

View file

@ -500,11 +500,16 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
int err; int err;
struct ip_options_data opt_copy; struct ip_options_data opt_copy;
struct raw_frag_vec rfv; struct raw_frag_vec rfv;
int hdrincl;
err = -EMSGSIZE; err = -EMSGSIZE;
if (len > 0xFFFF) if (len > 0xFFFF)
goto out; goto out;
/* hdrincl should be READ_ONCE(inet->hdrincl)
* but READ_ONCE() doesn't work with bit fields
*/
hdrincl = inet->hdrincl;
/* /*
* Check the flags. * Check the flags.
*/ */
@ -579,7 +584,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
/* Linux does not mangle headers on raw sockets, /* Linux does not mangle headers on raw sockets,
* so that IP options + IP_HDRINCL is non-sense. * so that IP options + IP_HDRINCL is non-sense.
*/ */
if (inet->hdrincl) if (hdrincl)
goto done; goto done;
if (ipc.opt->opt.srr) { if (ipc.opt->opt.srr) {
if (!daddr) if (!daddr)
@ -601,9 +606,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE, RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, hdrincl ? IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk) | inet_sk_flowi_flags(sk) |
(inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
daddr, saddr, 0, 0, sk->sk_uid); daddr, saddr, 0, 0, sk->sk_uid);
if (!saddr && ipc.oif) { if (!saddr && ipc.oif) {
@ -612,7 +617,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
goto done; goto done;
} }
if (!inet->hdrincl) { if (!hdrincl) {
rfv.msg = msg; rfv.msg = msg;
rfv.hlen = 0; rfv.hlen = 0;
@ -637,7 +642,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
goto do_confirm; goto do_confirm;
back_from_confirm: back_from_confirm:
if (inet->hdrincl) if (hdrincl)
err = raw_send_hdrinc(sk, &fl4, msg, len, err = raw_send_hdrinc(sk, &fl4, msg, len,
&rt, msg->msg_flags); &rt, msg->msg_flags);

View file

@ -817,7 +817,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
tcp_time_stamp, tcp_time_stamp,
req->ts_recent, req->ts_recent,
0, 0,
tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
AF_INET), AF_INET),
inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
ip_hdr(skb)->tos); ip_hdr(skb)->tos);

View file

@ -216,7 +216,6 @@ lookup_protocol:
np->mcast_hops = IPV6_DEFAULT_MCASTHOPS; np->mcast_hops = IPV6_DEFAULT_MCASTHOPS;
np->mc_loop = 1; np->mc_loop = 1;
np->pmtudisc = IPV6_PMTUDISC_WANT; np->pmtudisc = IPV6_PMTUDISC_WANT;
np->autoflowlabel = ip6_default_np_autolabel(sock_net(sk));
sk->sk_ipv6only = net->ipv6.sysctl.bindv6only; sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
/* Init the ipv4 part of the socket since we can have sockets /* Init the ipv4 part of the socket since we can have sockets

View file

@ -148,6 +148,14 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IP6CB(skb)->flags & IP6SKB_REROUTED)); !(IP6CB(skb)->flags & IP6SKB_REROUTED));
} }
static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
{
if (!np->autoflowlabel_set)
return ip6_default_np_autolabel(net);
else
return np->autoflowlabel;
}
/* /*
* xmit an sk_buff (used by TCP, SCTP and DCCP) * xmit an sk_buff (used by TCP, SCTP and DCCP)
* Note : socket lock is not held for SYNACK packets, but might be modified * Note : socket lock is not held for SYNACK packets, but might be modified
@ -211,7 +219,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
hlimit = ip6_dst_hoplimit(dst); hlimit = ip6_dst_hoplimit(dst);
ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel, ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
np->autoflowlabel, fl6)); ip6_autoflowlabel(net, np), fl6));
hdr->payload_len = htons(seg_len); hdr->payload_len = htons(seg_len);
hdr->nexthdr = proto; hdr->nexthdr = proto;
@ -1675,7 +1683,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
ip6_flow_hdr(hdr, v6_cork->tclass, ip6_flow_hdr(hdr, v6_cork->tclass,
ip6_make_flowlabel(net, skb, fl6->flowlabel, ip6_make_flowlabel(net, skb, fl6->flowlabel,
np->autoflowlabel, fl6)); ip6_autoflowlabel(net, np), fl6));
hdr->hop_limit = v6_cork->hop_limit; hdr->hop_limit = v6_cork->hop_limit;
hdr->nexthdr = proto; hdr->nexthdr = proto;
hdr->saddr = fl6->saddr; hdr->saddr = fl6->saddr;

View file

@ -872,6 +872,7 @@ pref_skip_coa:
break; break;
case IPV6_AUTOFLOWLABEL: case IPV6_AUTOFLOWLABEL:
np->autoflowlabel = valbool; np->autoflowlabel = valbool;
np->autoflowlabel_set = 1;
retv = 0; retv = 0;
break; break;
} }

View file

@ -1668,16 +1668,16 @@ static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
} }
static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
int type, struct mld2_grec **ppgr) int type, struct mld2_grec **ppgr, unsigned int mtu)
{ {
struct net_device *dev = pmc->idev->dev;
struct mld2_report *pmr; struct mld2_report *pmr;
struct mld2_grec *pgr; struct mld2_grec *pgr;
if (!skb) if (!skb) {
skb = mld_newpack(pmc->idev, dev->mtu); skb = mld_newpack(pmc->idev, mtu);
if (!skb) if (!skb)
return NULL; return NULL;
}
pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec)); pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
pgr->grec_type = type; pgr->grec_type = type;
pgr->grec_auxwords = 0; pgr->grec_auxwords = 0;
@ -1700,10 +1700,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
struct mld2_grec *pgr = NULL; struct mld2_grec *pgr = NULL;
struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
int scount, stotal, first, isquery, truncate; int scount, stotal, first, isquery, truncate;
unsigned int mtu;
if (pmc->mca_flags & MAF_NOREPORT) if (pmc->mca_flags & MAF_NOREPORT)
return skb; return skb;
mtu = READ_ONCE(dev->mtu);
if (mtu < IPV6_MIN_MTU)
return skb;
isquery = type == MLD2_MODE_IS_INCLUDE || isquery = type == MLD2_MODE_IS_INCLUDE ||
type == MLD2_MODE_IS_EXCLUDE; type == MLD2_MODE_IS_EXCLUDE;
truncate = type == MLD2_MODE_IS_EXCLUDE || truncate = type == MLD2_MODE_IS_EXCLUDE ||
@ -1724,7 +1729,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
if (skb) if (skb)
mld_sendpack(skb); mld_sendpack(skb);
skb = mld_newpack(idev, dev->mtu); skb = mld_newpack(idev, mtu);
} }
} }
first = 1; first = 1;
@ -1751,12 +1756,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
pgr->grec_nsrcs = htons(scount); pgr->grec_nsrcs = htons(scount);
if (skb) if (skb)
mld_sendpack(skb); mld_sendpack(skb);
skb = mld_newpack(idev, dev->mtu); skb = mld_newpack(idev, mtu);
first = 1; first = 1;
scount = 0; scount = 0;
} }
if (first) { if (first) {
skb = add_grhead(skb, pmc, type, &pgr); skb = add_grhead(skb, pmc, type, &pgr, mtu);
first = 0; first = 0;
} }
if (!skb) if (!skb)
@ -1790,7 +1795,7 @@ empty_source:
mld_sendpack(skb); mld_sendpack(skb);
skb = NULL; /* add_grhead will get a new one */ skb = NULL; /* add_grhead will get a new one */
} }
skb = add_grhead(skb, pmc, type, &pgr); skb = add_grhead(skb, pmc, type, &pgr, mtu);
} }
} }
if (pgr) if (pgr)

View file

@ -951,7 +951,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
0, 0); 0, 0);
} }

View file

@ -261,6 +261,9 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
struct sock *sk = skb->sk; struct sock *sk = skb->sk;
int ret = -ENOMEM; int ret = -ENOMEM;
if (!net_eq(dev_net(dev), sock_net(sk)))
return 0;
dev_hold(dev); dev_hold(dev);
if (is_vmalloc_addr(skb->head)) if (is_vmalloc_addr(skb->head))

View file

@ -4153,7 +4153,7 @@ static int sctp_init_sock(struct sock *sk)
SCTP_DBG_OBJCNT_INC(sock); SCTP_DBG_OBJCNT_INC(sock);
local_bh_disable(); local_bh_disable();
percpu_counter_inc(&sctp_sockets_allocated); sk_sockets_allocated_inc(sk);
sock_prot_inuse_add(net, sk->sk_prot, 1); sock_prot_inuse_add(net, sk->sk_prot, 1);
/* Nothing can fail after this block, otherwise /* Nothing can fail after this block, otherwise
@ -4197,7 +4197,7 @@ static void sctp_destroy_sock(struct sock *sk)
} }
sctp_endpoint_free(sp->ep); sctp_endpoint_free(sp->ep);
local_bh_disable(); local_bh_disable();
percpu_counter_dec(&sctp_sockets_allocated); sk_sockets_allocated_dec(sk);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable(); local_bh_enable();
} }

View file

@ -579,15 +579,14 @@ static int snd_rawmidi_info_user(struct snd_rawmidi_substream *substream,
return 0; return 0;
} }
int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info) static int __snd_rawmidi_info_select(struct snd_card *card,
struct snd_rawmidi_info *info)
{ {
struct snd_rawmidi *rmidi; struct snd_rawmidi *rmidi;
struct snd_rawmidi_str *pstr; struct snd_rawmidi_str *pstr;
struct snd_rawmidi_substream *substream; struct snd_rawmidi_substream *substream;
mutex_lock(&register_mutex);
rmidi = snd_rawmidi_search(card, info->device); rmidi = snd_rawmidi_search(card, info->device);
mutex_unlock(&register_mutex);
if (!rmidi) if (!rmidi)
return -ENXIO; return -ENXIO;
if (info->stream < 0 || info->stream > 1) if (info->stream < 0 || info->stream > 1)
@ -603,6 +602,16 @@ int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info
} }
return -ENXIO; return -ENXIO;
} }
int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
{
int ret;
mutex_lock(&register_mutex);
ret = __snd_rawmidi_info_select(card, info);
mutex_unlock(&register_mutex);
return ret;
}
EXPORT_SYMBOL(snd_rawmidi_info_select); EXPORT_SYMBOL(snd_rawmidi_info_select);
static int snd_rawmidi_info_select_user(struct snd_card *card, static int snd_rawmidi_info_select_user(struct snd_card *card,

View file

@ -183,7 +183,7 @@ static int hdac_component_master_match(struct device *dev, void *data)
*/ */
int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops) int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops)
{ {
if (WARN_ON(!hdac_acomp)) if (!hdac_acomp)
return -ENODEV; return -ENODEV;
hdac_acomp->audio_ops = aops; hdac_acomp->audio_ops = aops;

View file

@ -5953,6 +5953,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x1b, 0x01011020}, {0x1b, 0x01011020},
{0x21, 0x02211010}), {0x21, 0x02211010}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60130},
{0x14, 0x90170110},
{0x1b, 0x01011020},
{0x21, 0x0221101f}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60160}, {0x12, 0x90a60160},
{0x14, 0x90170120}, {0x14, 0x90170120},

View file

@ -232,7 +232,7 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev); struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev);
struct device_node *twl4030_codec_node = NULL; struct device_node *twl4030_codec_node = NULL;
twl4030_codec_node = of_find_node_by_name(codec->dev->parent->of_node, twl4030_codec_node = of_get_child_by_name(codec->dev->parent->of_node,
"codec"); "codec");
if (!pdata && twl4030_codec_node) { if (!pdata && twl4030_codec_node) {
@ -241,9 +241,11 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
GFP_KERNEL); GFP_KERNEL);
if (!pdata) { if (!pdata) {
dev_err(codec->dev, "Can not allocate memory\n"); dev_err(codec->dev, "Can not allocate memory\n");
of_node_put(twl4030_codec_node);
return NULL; return NULL;
} }
twl4030_setup_pdata_of(pdata, twl4030_codec_node); twl4030_setup_pdata_of(pdata, twl4030_codec_node);
of_node_put(twl4030_codec_node);
} }
return pdata; return pdata;

View file

@ -1408,12 +1408,6 @@ static int fsl_ssi_probe(struct platform_device *pdev)
sizeof(fsl_ssi_ac97_dai)); sizeof(fsl_ssi_ac97_dai));
fsl_ac97_data = ssi_private; fsl_ac97_data = ssi_private;
ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
if (ret) {
dev_err(&pdev->dev, "could not set AC'97 ops\n");
return ret;
}
} else { } else {
/* Initialize this copy of the CPU DAI driver structure */ /* Initialize this copy of the CPU DAI driver structure */
memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template, memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
@ -1473,6 +1467,14 @@ static int fsl_ssi_probe(struct platform_device *pdev)
return ret; return ret;
} }
if (fsl_ssi_is_ac97(ssi_private)) {
ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
if (ret) {
dev_err(&pdev->dev, "could not set AC'97 ops\n");
goto error_ac97_ops;
}
}
ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component, ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
&ssi_private->cpu_dai_drv, 1); &ssi_private->cpu_dai_drv, 1);
if (ret) { if (ret) {
@ -1556,6 +1558,10 @@ error_sound_card:
fsl_ssi_debugfs_remove(&ssi_private->dbg_stats); fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
error_asoc_register: error_asoc_register:
if (fsl_ssi_is_ac97(ssi_private))
snd_soc_set_ac97_ops(NULL);
error_ac97_ops:
if (ssi_private->soc->imx) if (ssi_private->soc->imx)
fsl_ssi_imx_clean(pdev, ssi_private); fsl_ssi_imx_clean(pdev, ssi_private);

View file

@ -2101,20 +2101,25 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
kctl->private_value = (unsigned long)namelist; kctl->private_value = (unsigned long)namelist;
kctl->private_free = usb_mixer_selector_elem_free; kctl->private_free = usb_mixer_selector_elem_free;
nameid = uac_selector_unit_iSelector(desc); /* check the static mapping table at first */
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
if (len)
;
else if (nameid)
len = snd_usb_copy_string_desc(state, nameid, kctl->id.name,
sizeof(kctl->id.name));
else
len = get_term_name(state, &state->oterm,
kctl->id.name, sizeof(kctl->id.name), 0);
if (!len) { if (!len) {
strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name)); /* no mapping ? */
/* if iSelector is given, use it */
nameid = uac_selector_unit_iSelector(desc);
if (nameid)
len = snd_usb_copy_string_desc(state, nameid,
kctl->id.name,
sizeof(kctl->id.name));
/* ... or pick up the terminal name at next */
if (!len)
len = get_term_name(state, &state->oterm,
kctl->id.name, sizeof(kctl->id.name), 0);
/* ... or use the fixed string "USB" as the last resort */
if (!len)
strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
/* and add the proper suffix */
if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
append_ctl_name(kctl, " Clock Source"); append_ctl_name(kctl, " Clock Source");
else if ((state->oterm.type & 0xff00) == 0x0100) else if ((state->oterm.type & 0xff00) == 0x0100)

View file

@ -30,6 +30,7 @@ int modify_match_busid(char *busid, int add)
char command[SYSFS_BUS_ID_SIZE + 4]; char command[SYSFS_BUS_ID_SIZE + 4];
char match_busid_attr_path[SYSFS_PATH_MAX]; char match_busid_attr_path[SYSFS_PATH_MAX];
int rc; int rc;
int cmd_size;
snprintf(match_busid_attr_path, sizeof(match_busid_attr_path), snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),
"%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME, "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME,
@ -37,12 +38,14 @@ int modify_match_busid(char *busid, int add)
attr_name); attr_name);
if (add) if (add)
snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid); cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s",
busid);
else else
snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid); cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s",
busid);
rc = write_sysfs_attribute(match_busid_attr_path, command, rc = write_sysfs_attribute(match_busid_attr_path, command,
sizeof(command)); cmd_size);
if (rc < 0) { if (rc < 0) {
dbg("failed to write match_busid: %s", strerror(errno)); dbg("failed to write match_busid: %s", strerror(errno));
return -1; return -1;