This is the 4.4.1 stable release
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABCAAGBQJWrmCnAAoJEDjbvchgkmk+jWUQAJPiYhXW2O2PlyZ0Fqoy7aZd WCi3bPHMU6ALipqaI3Alps36lteYtRGhRMglgZSiuHZKGIsyV48xYIHU7H99r69g hupoFIRPc/Nx9WPhtHT6l/k+Jaj+KCNvOH8oZbLc6s3GpGyzFDjrXbeJdnPrlgPu p04o9ES0kozI0rWWDWErBNHPJWjgWqI5uGW9epcmQ4zBVGeg2eWglHpbria4wnqq MvtFHqB26BvZxVx8UcETSCl1nWHKftYYtiowC89dW0LfhWdu/OxxdbtraDv48uFI DNzDg27gzFS8cj9TxSsVz1okA7fu4j6EHhnok78KMNO2BkN9ipIJOhnd97fOGI4s O5FwKVNw+QDKuuw3GwtP9+n0ybuoCAdOExeQLlyzHrX1ExzUk/GnH/nuWm6eoqUX ZujUvDIFZ3STPZNdxoJzkuMM6Gd9VULstECYLryCn3ZdJkhaSCboGjGPrp+rzvHc YcqfHkUk7hwbe7qEVGINJCaCwVgMZjeC/xtE0UZfE7F3EhfS0SSUNHNXEQL4svB4 4TVIl9wqXhN7pjGNcwpunZ1YsrCrTldKAkbf4eM2TA2SOiMXvmflRgl1DIbFgmZU rTwMECluveaGWI9Uy0ol4+gQaL+v3avOpR6e4R9FK6N5uslr+dnutWOhLnUEgGt/ rmZYFZz0mZiWI6CHWPpG =Olxb -----END PGP SIGNATURE----- Merge tag 'v4.4.1' into linux-linaro-lsk-v4.4 This is the 4.4.1 stable release # gpg: Signature made Sun 31 Jan 2016 19:29:43 GMT using RSA key ID 6092693E # gpg: Good signature from "Greg Kroah-Hartman (Linux kernel stable release signing key) <greg@kroah.com>" # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 647F 2865 4894 E3BD 4571 99BE 38DB BDC8 6092 693E
This commit is contained in:
commit
34f6d2c9d1
80 changed files with 655 additions and 291 deletions
|
@ -134,19 +134,21 @@ Description:
|
|||
enabled for the device. Developer can write y/Y/1 or n/N/0 to
|
||||
the file to enable/disable the feature.
|
||||
|
||||
What: /sys/bus/usb/devices/.../power/usb3_hardware_lpm
|
||||
Date: June 2015
|
||||
What: /sys/bus/usb/devices/.../power/usb3_hardware_lpm_u1
|
||||
/sys/bus/usb/devices/.../power/usb3_hardware_lpm_u2
|
||||
Date: November 2015
|
||||
Contact: Kevin Strasser <kevin.strasser@linux.intel.com>
|
||||
Lu Baolu <baolu.lu@linux.intel.com>
|
||||
Description:
|
||||
If CONFIG_PM is set and a USB 3.0 lpm-capable device is plugged
|
||||
in to a xHCI host which supports link PM, it will check if U1
|
||||
and U2 exit latencies have been set in the BOS descriptor; if
|
||||
the check is is passed and the host supports USB3 hardware LPM,
|
||||
the check is passed and the host supports USB3 hardware LPM,
|
||||
USB3 hardware LPM will be enabled for the device and the USB
|
||||
device directory will contain a file named
|
||||
power/usb3_hardware_lpm. The file holds a string value (enable
|
||||
or disable) indicating whether or not USB3 hardware LPM is
|
||||
enabled for the device.
|
||||
device directory will contain two files named
|
||||
power/usb3_hardware_lpm_u1 and power/usb3_hardware_lpm_u2. These
|
||||
files hold a string value (enable or disable) indicating whether
|
||||
or not USB3 hardware LPM U1 or U2 is enabled for the device.
|
||||
|
||||
What: /sys/bus/usb/devices/.../removable
|
||||
Date: February 2012
|
||||
|
|
|
@ -537,17 +537,18 @@ relevant attribute files are usb2_hardware_lpm and usb3_hardware_lpm.
|
|||
can write y/Y/1 or n/N/0 to the file to enable/disable
|
||||
USB2 hardware LPM manually. This is for test purpose mainly.
|
||||
|
||||
power/usb3_hardware_lpm
|
||||
power/usb3_hardware_lpm_u1
|
||||
power/usb3_hardware_lpm_u2
|
||||
|
||||
When a USB 3.0 lpm-capable device is plugged in to a
|
||||
xHCI host which supports link PM, it will check if U1
|
||||
and U2 exit latencies have been set in the BOS
|
||||
descriptor; if the check is is passed and the host
|
||||
supports USB3 hardware LPM, USB3 hardware LPM will be
|
||||
enabled for the device and this file will be created.
|
||||
The file holds a string value (enable or disable)
|
||||
indicating whether or not USB3 hardware LPM is
|
||||
enabled for the device.
|
||||
enabled for the device and these files will be created.
|
||||
The files hold a string value (enable or disable)
|
||||
indicating whether or not USB3 hardware LPM U1 or U2
|
||||
is enabled for the device.
|
||||
|
||||
USB Port Power Control
|
||||
----------------------
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 0
|
||||
SUBLEVEL = 1
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -512,9 +512,14 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
|
|||
#endif
|
||||
|
||||
/* EL2 debug */
|
||||
mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
|
||||
sbfx x0, x0, #8, #4
|
||||
cmp x0, #1
|
||||
b.lt 4f // Skip if no PMU present
|
||||
mrs x0, pmcr_el0 // Disable debug access traps
|
||||
ubfx x0, x0, #11, #5 // to EL2 and allow access to
|
||||
msr mdcr_el2, x0 // all PMU counters from EL1
|
||||
4:
|
||||
|
||||
/* Stage-2 translation */
|
||||
msr vttbr_el2, xzr
|
||||
|
|
|
@ -574,9 +574,6 @@ static void armv8pmu_reset(void *info)
|
|||
|
||||
/* Initialize & Reset PMNC: C and P bits. */
|
||||
armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
|
||||
|
||||
/* Disable access from userspace. */
|
||||
asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
|
||||
}
|
||||
|
||||
static int armv8_pmuv3_map_event(struct perf_event *event)
|
||||
|
|
|
@ -58,6 +58,12 @@
|
|||
*/
|
||||
void ptrace_disable(struct task_struct *child)
|
||||
{
|
||||
/*
|
||||
* This would be better off in core code, but PTRACE_DETACH has
|
||||
* grown its fair share of arch-specific worts and changing it
|
||||
* is likely to cause regressions on obscure architectures.
|
||||
*/
|
||||
user_disable_single_step(child);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
|
|
|
@ -456,6 +456,9 @@ void __init paging_init(void)
|
|||
|
||||
empty_zero_page = virt_to_page(zero_page);
|
||||
|
||||
/* Ensure the zero page is visible to the page table walker */
|
||||
dsb(ishst);
|
||||
|
||||
/*
|
||||
* TTBR0 is only used for the identity mapping at this stage. Make it
|
||||
* point to zero page to avoid speculatively fetching new entries.
|
||||
|
|
|
@ -62,3 +62,15 @@
|
|||
bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
|
||||
*/
|
||||
.macro reset_pmuserenr_el0, tmpreg
|
||||
mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
|
||||
sbfx \tmpreg, \tmpreg, #8, #4
|
||||
cmp \tmpreg, #1 // Skip if no PMU present
|
||||
b.lt 9000f
|
||||
msr pmuserenr_el0, xzr // Disable PMU access from EL0
|
||||
9000:
|
||||
.endm
|
||||
|
|
|
@ -117,6 +117,7 @@ ENTRY(cpu_do_resume)
|
|||
*/
|
||||
ubfx x11, x11, #1, #1
|
||||
msr oslar_el1, x11
|
||||
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
|
||||
mov x0, x12
|
||||
dsb nsh // Make sure local tlb invalidation completed
|
||||
isb
|
||||
|
@ -155,6 +156,7 @@ ENTRY(__cpu_setup)
|
|||
msr cpacr_el1, x0 // Enable FP/ASIMD
|
||||
mov x0, #1 << 12 // Reset mdscr_el1 and disable
|
||||
msr mdscr_el1, x0 // access to the DCC from EL0
|
||||
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
|
||||
/*
|
||||
* Memory region attributes for LPAE:
|
||||
*
|
||||
|
|
|
@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val)
|
|||
unsigned long prev;
|
||||
|
||||
__asm__ __volatile__(
|
||||
PPC_RELEASE_BARRIER
|
||||
PPC_ATOMIC_ENTRY_BARRIER
|
||||
"1: lwarx %0,0,%2 \n"
|
||||
PPC405_ERR77(0,%2)
|
||||
" stwcx. %3,0,%2 \n\
|
||||
bne- 1b"
|
||||
PPC_ACQUIRE_BARRIER
|
||||
PPC_ATOMIC_EXIT_BARRIER
|
||||
: "=&r" (prev), "+m" (*(volatile unsigned int *)p)
|
||||
: "r" (p), "r" (val)
|
||||
: "cc", "memory");
|
||||
|
@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val)
|
|||
unsigned long prev;
|
||||
|
||||
__asm__ __volatile__(
|
||||
PPC_RELEASE_BARRIER
|
||||
PPC_ATOMIC_ENTRY_BARRIER
|
||||
"1: ldarx %0,0,%2 \n"
|
||||
PPC405_ERR77(0,%2)
|
||||
" stdcx. %3,0,%2 \n\
|
||||
bne- 1b"
|
||||
PPC_ACQUIRE_BARRIER
|
||||
PPC_ATOMIC_EXIT_BARRIER
|
||||
: "=&r" (prev), "+m" (*(volatile unsigned long *)p)
|
||||
: "r" (p), "r" (val)
|
||||
: "cc", "memory");
|
||||
|
@ -151,14 +151,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
|
|||
unsigned int prev;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
PPC_RELEASE_BARRIER
|
||||
PPC_ATOMIC_ENTRY_BARRIER
|
||||
"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
|
||||
cmpw 0,%0,%3\n\
|
||||
bne- 2f\n"
|
||||
PPC405_ERR77(0,%2)
|
||||
" stwcx. %4,0,%2\n\
|
||||
bne- 1b"
|
||||
PPC_ACQUIRE_BARRIER
|
||||
PPC_ATOMIC_EXIT_BARRIER
|
||||
"\n\
|
||||
2:"
|
||||
: "=&r" (prev), "+m" (*p)
|
||||
|
@ -197,13 +197,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
|
|||
unsigned long prev;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
PPC_RELEASE_BARRIER
|
||||
PPC_ATOMIC_ENTRY_BARRIER
|
||||
"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
|
||||
cmpd 0,%0,%3\n\
|
||||
bne- 2f\n\
|
||||
stdcx. %4,0,%2\n\
|
||||
bne- 1b"
|
||||
PPC_ACQUIRE_BARRIER
|
||||
PPC_ATOMIC_EXIT_BARRIER
|
||||
"\n\
|
||||
2:"
|
||||
: "=&r" (prev), "+m" (*p)
|
||||
|
|
|
@ -44,7 +44,7 @@ static inline void isync(void)
|
|||
MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
|
||||
#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
|
||||
#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
|
||||
#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
|
||||
#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
|
||||
#define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
|
||||
#else
|
||||
#define PPC_ACQUIRE_BARRIER
|
||||
|
|
|
@ -295,6 +295,8 @@ do { \
|
|||
#define R_PPC64_TLSLD 108
|
||||
#define R_PPC64_TOCSAVE 109
|
||||
|
||||
#define R_PPC64_ENTRY 118
|
||||
|
||||
#define R_PPC64_REL16 249
|
||||
#define R_PPC64_REL16_LO 250
|
||||
#define R_PPC64_REL16_HI 251
|
||||
|
|
|
@ -635,6 +635,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
|||
*/
|
||||
break;
|
||||
|
||||
case R_PPC64_ENTRY:
|
||||
/*
|
||||
* Optimize ELFv2 large code model entry point if
|
||||
* the TOC is within 2GB range of current location.
|
||||
*/
|
||||
value = my_r2(sechdrs, me) - (unsigned long)location;
|
||||
if (value + 0x80008000 > 0xffffffff)
|
||||
break;
|
||||
/*
|
||||
* Check for the large code model prolog sequence:
|
||||
* ld r2, ...(r12)
|
||||
* add r2, r2, r12
|
||||
*/
|
||||
if ((((uint32_t *)location)[0] & ~0xfffc)
|
||||
!= 0xe84c0000)
|
||||
break;
|
||||
if (((uint32_t *)location)[1] != 0x7c426214)
|
||||
break;
|
||||
/*
|
||||
* If found, replace it with:
|
||||
* addis r2, r12, (.TOC.-func)@ha
|
||||
* addi r2, r12, (.TOC.-func)@l
|
||||
*/
|
||||
((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
|
||||
((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
|
||||
break;
|
||||
|
||||
case R_PPC64_REL16_HA:
|
||||
/* Subtract location pointer */
|
||||
value -= (unsigned long)location;
|
||||
|
|
|
@ -551,6 +551,24 @@ static void tm_reclaim_thread(struct thread_struct *thr,
|
|||
msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the current MSR TM suspended bit to track if we have
|
||||
* checkpointed state outstanding.
|
||||
* On signal delivery, we'd normally reclaim the checkpointed
|
||||
* state to obtain stack pointer (see:get_tm_stackpointer()).
|
||||
* This will then directly return to userspace without going
|
||||
* through __switch_to(). However, if the stack frame is bad,
|
||||
* we need to exit this thread which calls __switch_to() which
|
||||
* will again attempt to reclaim the already saved tm state.
|
||||
* Hence we need to check that we've not already reclaimed
|
||||
* this state.
|
||||
* We do this using the current MSR, rather tracking it in
|
||||
* some specific thread_struct bit, as it has the additional
|
||||
* benifit of checking for a potential TM bad thing exception.
|
||||
*/
|
||||
if (!MSR_TM_SUSPENDED(mfmsr()))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Use the current MSR TM suspended bit to track if we have
|
||||
* checkpointed state outstanding.
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#define BOOT_HEAP_SIZE 0x400000
|
||||
#else /* !CONFIG_KERNEL_BZIP2 */
|
||||
|
||||
#define BOOT_HEAP_SIZE 0x8000
|
||||
#define BOOT_HEAP_SIZE 0x10000
|
||||
|
||||
#endif /* !CONFIG_KERNEL_BZIP2 */
|
||||
|
||||
|
|
|
@ -116,8 +116,36 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
#endif
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
|
||||
/* Re-load page tables */
|
||||
/*
|
||||
* Re-load page tables.
|
||||
*
|
||||
* This logic has an ordering constraint:
|
||||
*
|
||||
* CPU 0: Write to a PTE for 'next'
|
||||
* CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
|
||||
* CPU 1: set bit 1 in next's mm_cpumask
|
||||
* CPU 1: load from the PTE that CPU 0 writes (implicit)
|
||||
*
|
||||
* We need to prevent an outcome in which CPU 1 observes
|
||||
* the new PTE value and CPU 0 observes bit 1 clear in
|
||||
* mm_cpumask. (If that occurs, then the IPI will never
|
||||
* be sent, and CPU 0's TLB will contain a stale entry.)
|
||||
*
|
||||
* The bad outcome can occur if either CPU's load is
|
||||
* reordered before that CPU's store, so both CPUs must
|
||||
* execute full barriers to prevent this from happening.
|
||||
*
|
||||
* Thus, switch_mm needs a full barrier between the
|
||||
* store to mm_cpumask and any operation that could load
|
||||
* from next->pgd. TLB fills are special and can happen
|
||||
* due to instruction fetches or for no reason at all,
|
||||
* and neither LOCK nor MFENCE orders them.
|
||||
* Fortunately, load_cr3() is serializing and gives the
|
||||
* ordering guarantee we need.
|
||||
*
|
||||
*/
|
||||
load_cr3(next->pgd);
|
||||
|
||||
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
||||
|
||||
/* Stop flush ipis for the previous mm */
|
||||
|
@ -156,10 +184,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
* schedule, protecting us from simultaneous changes.
|
||||
*/
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
|
||||
/*
|
||||
* We were in lazy tlb mode and leave_mm disabled
|
||||
* tlb flush IPI delivery. We must reload CR3
|
||||
* to make sure to use no freed page tables.
|
||||
*
|
||||
* As above, load_cr3() is serializing and orders TLB
|
||||
* fills with respect to the mm_cpumask write.
|
||||
*/
|
||||
load_cr3(next->pgd);
|
||||
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
||||
|
|
|
@ -182,6 +182,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on the iMac10,1. */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Apple iMac10,1",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "iMac10,1"),
|
||||
},
|
||||
},
|
||||
|
||||
/* ASRock */
|
||||
{ /* Handle problems with rebooting on ASRock Q1900DC-ITX */
|
||||
|
|
|
@ -268,7 +268,7 @@ TRACE_EVENT(kvm_inj_virq,
|
|||
#define kvm_trace_sym_exc \
|
||||
EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \
|
||||
EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \
|
||||
EXS(MF), EXS(MC)
|
||||
EXS(MF), EXS(AC), EXS(MC)
|
||||
|
||||
/*
|
||||
* Tracepoint for kvm interrupt injection:
|
||||
|
|
|
@ -8932,7 +8932,8 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
|
|||
best->ebx &= ~bit(X86_FEATURE_INVPCID);
|
||||
}
|
||||
|
||||
vmcs_set_secondary_exec_control(secondary_exec_ctl);
|
||||
if (cpu_has_secondary_exec_ctrls())
|
||||
vmcs_set_secondary_exec_control(secondary_exec_ctl);
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_PCOMMIT) && nested) {
|
||||
if (guest_cpuid_has_pcommit(vcpu))
|
||||
|
|
|
@ -951,7 +951,7 @@ static u32 msrs_to_save[] = {
|
|||
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
|
||||
#endif
|
||||
MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
|
||||
MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS
|
||||
MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
|
||||
};
|
||||
|
||||
static unsigned num_msrs_to_save;
|
||||
|
@ -4006,16 +4006,17 @@ static void kvm_init_msr_list(void)
|
|||
|
||||
/*
|
||||
* Even MSRs that are valid in the host may not be exposed
|
||||
* to the guests in some cases. We could work around this
|
||||
* in VMX with the generic MSR save/load machinery, but it
|
||||
* is not really worthwhile since it will really only
|
||||
* happen with nested virtualization.
|
||||
* to the guests in some cases.
|
||||
*/
|
||||
switch (msrs_to_save[i]) {
|
||||
case MSR_IA32_BNDCFGS:
|
||||
if (!kvm_x86_ops->mpx_supported())
|
||||
continue;
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
if (!kvm_x86_ops->rdtscp_supported())
|
||||
continue;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -161,7 +161,10 @@ void flush_tlb_current_task(void)
|
|||
preempt_disable();
|
||||
|
||||
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
||||
|
||||
/* This is an implicit full barrier that synchronizes with switch_mm. */
|
||||
local_flush_tlb();
|
||||
|
||||
trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
|
||||
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
|
||||
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
|
||||
|
@ -188,17 +191,29 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|||
unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
|
||||
|
||||
preempt_disable();
|
||||
if (current->active_mm != mm)
|
||||
if (current->active_mm != mm) {
|
||||
/* Synchronize with switch_mm. */
|
||||
smp_mb();
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!current->mm) {
|
||||
leave_mm(smp_processor_id());
|
||||
|
||||
/* Synchronize with switch_mm. */
|
||||
smp_mb();
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
|
||||
base_pages_to_flush = (end - start) >> PAGE_SHIFT;
|
||||
|
||||
/*
|
||||
* Both branches below are implicit full barriers (MOV to CR or
|
||||
* INVLPG) that synchronize with switch_mm.
|
||||
*/
|
||||
if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
|
||||
base_pages_to_flush = TLB_FLUSH_ALL;
|
||||
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
||||
|
@ -228,10 +243,18 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
|
|||
preempt_disable();
|
||||
|
||||
if (current->active_mm == mm) {
|
||||
if (current->mm)
|
||||
if (current->mm) {
|
||||
/*
|
||||
* Implicit full barrier (INVLPG) that synchronizes
|
||||
* with switch_mm.
|
||||
*/
|
||||
__flush_tlb_one(start);
|
||||
else
|
||||
} else {
|
||||
leave_mm(smp_processor_id());
|
||||
|
||||
/* Synchronize with switch_mm. */
|
||||
smp_mb();
|
||||
}
|
||||
}
|
||||
|
||||
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
|
||||
|
|
|
@ -34,7 +34,8 @@ static void xen_hvm_post_suspend(int suspend_cancelled)
|
|||
{
|
||||
#ifdef CONFIG_XEN_PVHVM
|
||||
int cpu;
|
||||
xen_hvm_init_shared_info();
|
||||
if (!suspend_cancelled)
|
||||
xen_hvm_init_shared_info();
|
||||
xen_callback_vector();
|
||||
xen_unplug_emulated_devices();
|
||||
if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
|
||||
|
|
|
@ -756,7 +756,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
|
|||
int uninitialized_var(index);
|
||||
int uninitialized_var(inlen);
|
||||
int cqe_size;
|
||||
int irqn;
|
||||
unsigned int irqn;
|
||||
int eqn;
|
||||
int err;
|
||||
|
||||
|
|
|
@ -1207,7 +1207,6 @@ static int bond_master_upper_dev_link(struct net_device *bond_dev,
|
|||
err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave);
|
||||
if (err)
|
||||
return err;
|
||||
slave_dev->flags |= IFF_SLAVE;
|
||||
rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1465,6 +1464,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
|||
}
|
||||
}
|
||||
|
||||
/* set slave flag before open to prevent IPv6 addrconf */
|
||||
slave_dev->flags |= IFF_SLAVE;
|
||||
|
||||
/* open the slave since the application closed it */
|
||||
res = dev_open(slave_dev);
|
||||
if (res) {
|
||||
|
@ -1725,6 +1727,7 @@ err_close:
|
|||
dev_close(slave_dev);
|
||||
|
||||
err_restore_mac:
|
||||
slave_dev->flags &= ~IFF_SLAVE;
|
||||
if (!bond->params.fail_over_mac ||
|
||||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
|
||||
/* XXX TODO - fom follow mode needs to change master's
|
||||
|
|
|
@ -746,7 +746,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
|
|||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5_core_cq *mcq = &cq->mcq;
|
||||
int eqn_not_used;
|
||||
int irqn;
|
||||
unsigned int irqn;
|
||||
int err;
|
||||
u32 i;
|
||||
|
||||
|
@ -800,7 +800,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|||
void *in;
|
||||
void *cqc;
|
||||
int inlen;
|
||||
int irqn_not_used;
|
||||
unsigned int irqn_not_used;
|
||||
int eqn;
|
||||
int err;
|
||||
|
||||
|
@ -1504,7 +1504,7 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
|
|||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5_core_cq *mcq = &cq->mcq;
|
||||
int eqn_not_used;
|
||||
int irqn;
|
||||
unsigned int irqn;
|
||||
int err;
|
||||
|
||||
err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
|
||||
|
|
|
@ -568,7 +568,8 @@ static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
|
|||
mlx5_irq_clear_affinity_hint(mdev, i);
|
||||
}
|
||||
|
||||
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
|
||||
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
|
||||
unsigned int *irqn)
|
||||
{
|
||||
struct mlx5_eq_table *table = &dev->priv.eq_table;
|
||||
struct mlx5_eq *eq, *n;
|
||||
|
|
|
@ -2107,7 +2107,7 @@ static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp,
|
|||
dd = &lp->tx_descs[lp->tx_next];
|
||||
|
||||
/* Set DMA Descriptor fields */
|
||||
dd->des0 = dma_handle;
|
||||
dd->des0 = dma_handle + consumed_size;
|
||||
dd->des1 = 0;
|
||||
dd->des2 = dma_size;
|
||||
|
||||
|
|
|
@ -1845,10 +1845,10 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
|
|||
struct team *team = netdev_priv(dev);
|
||||
struct team_port *port;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(port, &team->port_list, list)
|
||||
mutex_lock(&team->lock);
|
||||
list_for_each_entry(port, &team->port_list, list)
|
||||
vlan_vid_del(port->dev, proto, vid);
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&team->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2751,7 +2751,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
|
|||
struct vxlan_config *conf)
|
||||
{
|
||||
struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
|
||||
struct vxlan_rdst *dst = &vxlan->default_dst;
|
||||
unsigned short needed_headroom = ETH_HLEN;
|
||||
int err;
|
||||
|
@ -2817,9 +2817,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
|
|||
if (!vxlan->cfg.age_interval)
|
||||
vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
|
||||
|
||||
if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET,
|
||||
vxlan->cfg.dst_port, vxlan->flags))
|
||||
list_for_each_entry(tmp, &vn->vxlan_list, next) {
|
||||
if (tmp->cfg.vni == conf->vni &&
|
||||
(tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
|
||||
tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
|
||||
tmp->cfg.dst_port == vxlan->cfg.dst_port &&
|
||||
(tmp->flags & VXLAN_F_RCV_FLAGS) ==
|
||||
(vxlan->flags & VXLAN_F_RCV_FLAGS))
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
dev->ethtool_ops = &vxlan_ethtool_ops;
|
||||
|
||||
|
|
|
@ -531,6 +531,8 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
|
|||
ieee80211_rx(hw, skb);
|
||||
else
|
||||
dev_kfree_skb_any(skb);
|
||||
} else {
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3895,17 +3895,30 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
|
|||
return;
|
||||
}
|
||||
|
||||
if (usb_set_lpm_timeout(udev, state, timeout))
|
||||
if (usb_set_lpm_timeout(udev, state, timeout)) {
|
||||
/* If we can't set the parent hub U1/U2 timeout,
|
||||
* device-initiated LPM won't be allowed either, so let the xHCI
|
||||
* host know that this link state won't be enabled.
|
||||
*/
|
||||
hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
|
||||
} else {
|
||||
/* Only a configured device will accept the Set Feature
|
||||
* U1/U2_ENABLE
|
||||
*/
|
||||
if (udev->actconfig)
|
||||
usb_set_device_initiated_lpm(udev, state, true);
|
||||
|
||||
/* Only a configured device will accept the Set Feature U1/U2_ENABLE */
|
||||
else if (udev->actconfig)
|
||||
usb_set_device_initiated_lpm(udev, state, true);
|
||||
|
||||
/* As soon as usb_set_lpm_timeout(timeout) returns 0, the
|
||||
* hub-initiated LPM is enabled. Thus, LPM is enabled no
|
||||
* matter the result of usb_set_device_initiated_lpm().
|
||||
* The only difference is whether device is able to initiate
|
||||
* LPM.
|
||||
*/
|
||||
if (state == USB3_LPM_U1)
|
||||
udev->usb3_lpm_u1_enabled = 1;
|
||||
else if (state == USB3_LPM_U2)
|
||||
udev->usb3_lpm_u2_enabled = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3945,6 +3958,18 @@ static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
|
|||
dev_warn(&udev->dev, "Could not disable xHCI %s timeout, "
|
||||
"bus schedule bandwidth may be impacted.\n",
|
||||
usb3_lpm_names[state]);
|
||||
|
||||
/* As soon as usb_set_lpm_timeout(0) return 0, hub initiated LPM
|
||||
* is disabled. Hub will disallows link to enter U1/U2 as well,
|
||||
* even device is initiating LPM. Hence LPM is disabled if hub LPM
|
||||
* timeout set to 0, no matter device-initiated LPM is disabled or
|
||||
* not.
|
||||
*/
|
||||
if (state == USB3_LPM_U1)
|
||||
udev->usb3_lpm_u1_enabled = 0;
|
||||
else if (state == USB3_LPM_U2)
|
||||
udev->usb3_lpm_u2_enabled = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3979,8 +4004,6 @@ int usb_disable_lpm(struct usb_device *udev)
|
|||
if (usb_disable_link_state(hcd, udev, USB3_LPM_U2))
|
||||
goto enable_lpm;
|
||||
|
||||
udev->usb3_lpm_enabled = 0;
|
||||
|
||||
return 0;
|
||||
|
||||
enable_lpm:
|
||||
|
@ -4038,8 +4061,6 @@ void usb_enable_lpm(struct usb_device *udev)
|
|||
|
||||
usb_enable_link_state(hcd, udev, USB3_LPM_U1);
|
||||
usb_enable_link_state(hcd, udev, USB3_LPM_U2);
|
||||
|
||||
udev->usb3_lpm_enabled = 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_enable_lpm);
|
||||
|
||||
|
|
|
@ -531,7 +531,7 @@ static ssize_t usb2_lpm_besl_store(struct device *dev,
|
|||
}
|
||||
static DEVICE_ATTR_RW(usb2_lpm_besl);
|
||||
|
||||
static ssize_t usb3_hardware_lpm_show(struct device *dev,
|
||||
static ssize_t usb3_hardware_lpm_u1_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct usb_device *udev = to_usb_device(dev);
|
||||
|
@ -539,7 +539,7 @@ static ssize_t usb3_hardware_lpm_show(struct device *dev,
|
|||
|
||||
usb_lock_device(udev);
|
||||
|
||||
if (udev->usb3_lpm_enabled)
|
||||
if (udev->usb3_lpm_u1_enabled)
|
||||
p = "enabled";
|
||||
else
|
||||
p = "disabled";
|
||||
|
@ -548,7 +548,26 @@ static ssize_t usb3_hardware_lpm_show(struct device *dev,
|
|||
|
||||
return sprintf(buf, "%s\n", p);
|
||||
}
|
||||
static DEVICE_ATTR_RO(usb3_hardware_lpm);
|
||||
static DEVICE_ATTR_RO(usb3_hardware_lpm_u1);
|
||||
|
||||
static ssize_t usb3_hardware_lpm_u2_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct usb_device *udev = to_usb_device(dev);
|
||||
const char *p;
|
||||
|
||||
usb_lock_device(udev);
|
||||
|
||||
if (udev->usb3_lpm_u2_enabled)
|
||||
p = "enabled";
|
||||
else
|
||||
p = "disabled";
|
||||
|
||||
usb_unlock_device(udev);
|
||||
|
||||
return sprintf(buf, "%s\n", p);
|
||||
}
|
||||
static DEVICE_ATTR_RO(usb3_hardware_lpm_u2);
|
||||
|
||||
static struct attribute *usb2_hardware_lpm_attr[] = {
|
||||
&dev_attr_usb2_hardware_lpm.attr,
|
||||
|
@ -562,7 +581,8 @@ static struct attribute_group usb2_hardware_lpm_attr_group = {
|
|||
};
|
||||
|
||||
static struct attribute *usb3_hardware_lpm_attr[] = {
|
||||
&dev_attr_usb3_hardware_lpm.attr,
|
||||
&dev_attr_usb3_hardware_lpm_u1.attr,
|
||||
&dev_attr_usb3_hardware_lpm_u2.attr,
|
||||
NULL,
|
||||
};
|
||||
static struct attribute_group usb3_hardware_lpm_attr_group = {
|
||||
|
@ -592,7 +612,8 @@ static int add_power_attributes(struct device *dev)
|
|||
if (udev->usb2_hw_lpm_capable == 1)
|
||||
rc = sysfs_merge_group(&dev->kobj,
|
||||
&usb2_hardware_lpm_attr_group);
|
||||
if (udev->lpm_capable == 1)
|
||||
if (udev->speed == USB_SPEED_SUPER &&
|
||||
udev->lpm_capable == 1)
|
||||
rc = sysfs_merge_group(&dev->kobj,
|
||||
&usb3_hardware_lpm_attr_group);
|
||||
}
|
||||
|
|
|
@ -5059,6 +5059,10 @@ static int __init xhci_hcd_init(void)
|
|||
BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
|
||||
/* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
|
||||
BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
|
||||
|
||||
if (usb_disabled())
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -160,6 +160,7 @@ static const struct usb_device_id id_table[] = {
|
|||
{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
|
||||
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
|
||||
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
|
||||
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
|
||||
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
|
||||
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
|
||||
{ USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
|
||||
|
|
|
@ -45,7 +45,7 @@ struct mlx5_core_cq {
|
|||
atomic_t refcount;
|
||||
struct completion free;
|
||||
unsigned vector;
|
||||
int irqn;
|
||||
unsigned int irqn;
|
||||
void (*comp) (struct mlx5_core_cq *);
|
||||
void (*event) (struct mlx5_core_cq *, enum mlx5_event);
|
||||
struct mlx5_uar *uar;
|
||||
|
|
|
@ -303,7 +303,7 @@ struct mlx5_eq {
|
|||
u32 cons_index;
|
||||
struct mlx5_buf buf;
|
||||
int size;
|
||||
u8 irqn;
|
||||
unsigned int irqn;
|
||||
u8 eqn;
|
||||
int nent;
|
||||
u64 mask;
|
||||
|
@ -762,7 +762,8 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
|
|||
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
|
||||
int mlx5_start_eqs(struct mlx5_core_dev *dev);
|
||||
int mlx5_stop_eqs(struct mlx5_core_dev *dev);
|
||||
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
|
||||
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
|
||||
unsigned int *irqn);
|
||||
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
|
||||
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
|
||||
|
||||
|
|
|
@ -830,6 +830,7 @@ struct user_struct {
|
|||
unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
|
||||
#endif
|
||||
unsigned long locked_shm; /* How many pages of mlocked shm ? */
|
||||
unsigned long unix_inflight; /* How many files in flight in unix sockets */
|
||||
|
||||
#ifdef CONFIG_KEYS
|
||||
struct key *uid_keyring; /* UID specific keyring */
|
||||
|
|
|
@ -3446,7 +3446,8 @@ struct skb_gso_cb {
|
|||
int encap_level;
|
||||
__u16 csum_start;
|
||||
};
|
||||
#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
|
||||
#define SKB_SGO_CB_OFFSET 32
|
||||
#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
|
||||
|
||||
static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
|
||||
{
|
||||
|
|
|
@ -511,6 +511,8 @@ struct usb3_lpm_parameters {
|
|||
* @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled
|
||||
* @usb2_hw_lpm_allowed: Userspace allows USB 2.0 LPM to be enabled
|
||||
* @usb3_lpm_enabled: USB3 hardware LPM enabled
|
||||
* @usb3_lpm_u1_enabled: USB3 hardware U1 LPM enabled
|
||||
* @usb3_lpm_u2_enabled: USB3 hardware U2 LPM enabled
|
||||
* @string_langid: language ID for strings
|
||||
* @product: iProduct string, if present (static)
|
||||
* @manufacturer: iManufacturer string, if present (static)
|
||||
|
@ -584,6 +586,8 @@ struct usb_device {
|
|||
unsigned usb2_hw_lpm_enabled:1;
|
||||
unsigned usb2_hw_lpm_allowed:1;
|
||||
unsigned usb3_lpm_enabled:1;
|
||||
unsigned usb3_lpm_u1_enabled:1;
|
||||
unsigned usb3_lpm_u2_enabled:1;
|
||||
int string_langid;
|
||||
|
||||
/* static strings from the device */
|
||||
|
|
|
@ -111,11 +111,24 @@ static inline void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner)
|
|||
|
||||
struct ipv6hdr;
|
||||
|
||||
static inline int IP6_ECN_set_ce(struct ipv6hdr *iph)
|
||||
/* Note:
|
||||
* IP_ECN_set_ce() has to tweak IPV4 checksum when setting CE,
|
||||
* meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE
|
||||
* In IPv6 case, no checksum compensates the change in IPv6 header,
|
||||
* so we have to update skb->csum.
|
||||
*/
|
||||
static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
|
||||
{
|
||||
__be32 from, to;
|
||||
|
||||
if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph)))
|
||||
return 0;
|
||||
*(__be32*)iph |= htonl(INET_ECN_CE << 20);
|
||||
|
||||
from = *(__be32 *)iph;
|
||||
to = from | htonl(INET_ECN_CE << 20);
|
||||
*(__be32 *)iph = to;
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
||||
skb->csum = csum_add(csum_sub(skb->csum, from), to);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -142,7 +155,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
|
|||
case cpu_to_be16(ETH_P_IPV6):
|
||||
if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
|
||||
skb_tail_pointer(skb))
|
||||
return IP6_ECN_set_ce(ipv6_hdr(skb));
|
||||
return IP6_ECN_set_ce(skb, ipv6_hdr(skb));
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -1121,6 +1121,16 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((opcode == BPF_LSH || opcode == BPF_RSH ||
|
||||
opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
|
||||
int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
|
||||
|
||||
if (insn->imm < 0 || insn->imm >= size) {
|
||||
verbose("invalid shift %d\n", insn->imm);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* pattern match 'bpf_add Rx, imm' instruction */
|
||||
if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
|
||||
regs[insn->dst_reg].type == FRAME_PTR &&
|
||||
|
|
|
@ -127,21 +127,17 @@ batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
|
|||
}
|
||||
|
||||
/* finally deinitialize the claim */
|
||||
static void batadv_claim_free_rcu(struct rcu_head *rcu)
|
||||
static void batadv_claim_release(struct batadv_bla_claim *claim)
|
||||
{
|
||||
struct batadv_bla_claim *claim;
|
||||
|
||||
claim = container_of(rcu, struct batadv_bla_claim, rcu);
|
||||
|
||||
batadv_backbone_gw_free_ref(claim->backbone_gw);
|
||||
kfree(claim);
|
||||
kfree_rcu(claim, rcu);
|
||||
}
|
||||
|
||||
/* free a claim, call claim_free_rcu if its the last reference */
|
||||
static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
|
||||
{
|
||||
if (atomic_dec_and_test(&claim->refcount))
|
||||
call_rcu(&claim->rcu, batadv_claim_free_rcu);
|
||||
batadv_claim_release(claim);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -75,18 +75,6 @@ batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
|
|||
call_rcu(&hard_iface->rcu, batadv_hardif_free_rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_hardif_free_ref_now - decrement the hard interface refcounter and
|
||||
* possibly free it (without rcu callback)
|
||||
* @hard_iface: the hard interface to free
|
||||
*/
|
||||
static inline void
|
||||
batadv_hardif_free_ref_now(struct batadv_hard_iface *hard_iface)
|
||||
{
|
||||
if (atomic_dec_and_test(&hard_iface->refcount))
|
||||
batadv_hardif_free_rcu(&hard_iface->rcu);
|
||||
}
|
||||
|
||||
static inline struct batadv_hard_iface *
|
||||
batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
|
||||
{
|
||||
|
|
|
@ -203,28 +203,25 @@ void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
|
|||
}
|
||||
|
||||
/**
|
||||
* batadv_nc_node_free_rcu - rcu callback to free an nc node and remove
|
||||
* its refcount on the orig_node
|
||||
* @rcu: rcu pointer of the nc node
|
||||
* batadv_nc_node_release - release nc_node from lists and queue for free after
|
||||
* rcu grace period
|
||||
* @nc_node: the nc node to free
|
||||
*/
|
||||
static void batadv_nc_node_free_rcu(struct rcu_head *rcu)
|
||||
static void batadv_nc_node_release(struct batadv_nc_node *nc_node)
|
||||
{
|
||||
struct batadv_nc_node *nc_node;
|
||||
|
||||
nc_node = container_of(rcu, struct batadv_nc_node, rcu);
|
||||
batadv_orig_node_free_ref(nc_node->orig_node);
|
||||
kfree(nc_node);
|
||||
kfree_rcu(nc_node, rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_nc_node_free_ref - decrements the nc node refcounter and possibly
|
||||
* frees it
|
||||
* batadv_nc_node_free_ref - decrement the nc node refcounter and possibly
|
||||
* release it
|
||||
* @nc_node: the nc node to free
|
||||
*/
|
||||
static void batadv_nc_node_free_ref(struct batadv_nc_node *nc_node)
|
||||
{
|
||||
if (atomic_dec_and_test(&nc_node->refcount))
|
||||
call_rcu(&nc_node->rcu, batadv_nc_node_free_rcu);
|
||||
batadv_nc_node_release(nc_node);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -163,92 +163,66 @@ err:
|
|||
}
|
||||
|
||||
/**
|
||||
* batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
|
||||
* @rcu: rcu pointer of the neigh_ifinfo object
|
||||
*/
|
||||
static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct batadv_neigh_ifinfo *neigh_ifinfo;
|
||||
|
||||
neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
|
||||
|
||||
if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
|
||||
batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
|
||||
|
||||
kfree(neigh_ifinfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
|
||||
* the neigh_ifinfo (without rcu callback)
|
||||
* batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
|
||||
* free after rcu grace period
|
||||
* @neigh_ifinfo: the neigh_ifinfo object to release
|
||||
*/
|
||||
static void
|
||||
batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
|
||||
batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo *neigh_ifinfo)
|
||||
{
|
||||
if (atomic_dec_and_test(&neigh_ifinfo->refcount))
|
||||
batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
|
||||
if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
|
||||
batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
|
||||
|
||||
kfree_rcu(neigh_ifinfo, rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
|
||||
* batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
|
||||
* the neigh_ifinfo
|
||||
* @neigh_ifinfo: the neigh_ifinfo object to release
|
||||
*/
|
||||
void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
|
||||
{
|
||||
if (atomic_dec_and_test(&neigh_ifinfo->refcount))
|
||||
call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
|
||||
batadv_neigh_ifinfo_release(neigh_ifinfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_neigh_node_free_rcu - free the neigh_node
|
||||
* @rcu: rcu pointer of the neigh_node
|
||||
* batadv_neigh_node_release - release neigh_node from lists and queue for
|
||||
* free after rcu grace period
|
||||
* @neigh_node: neigh neighbor to free
|
||||
*/
|
||||
static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
|
||||
static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node)
|
||||
{
|
||||
struct hlist_node *node_tmp;
|
||||
struct batadv_neigh_node *neigh_node;
|
||||
struct batadv_neigh_ifinfo *neigh_ifinfo;
|
||||
struct batadv_algo_ops *bao;
|
||||
|
||||
neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
|
||||
bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
|
||||
|
||||
hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
|
||||
&neigh_node->ifinfo_list, list) {
|
||||
batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
|
||||
batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
|
||||
}
|
||||
|
||||
if (bao->bat_neigh_free)
|
||||
bao->bat_neigh_free(neigh_node);
|
||||
|
||||
batadv_hardif_free_ref_now(neigh_node->if_incoming);
|
||||
batadv_hardif_free_ref(neigh_node->if_incoming);
|
||||
|
||||
kfree(neigh_node);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
|
||||
* and possibly free it (without rcu callback)
|
||||
* @neigh_node: neigh neighbor to free
|
||||
*/
|
||||
static void
|
||||
batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
|
||||
{
|
||||
if (atomic_dec_and_test(&neigh_node->refcount))
|
||||
batadv_neigh_node_free_rcu(&neigh_node->rcu);
|
||||
kfree_rcu(neigh_node, rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_neigh_node_free_ref - decrement the neighbors refcounter
|
||||
* and possibly free it
|
||||
* and possibly release it
|
||||
* @neigh_node: neigh neighbor to free
|
||||
*/
|
||||
void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
|
||||
{
|
||||
if (atomic_dec_and_test(&neigh_node->refcount))
|
||||
call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
|
||||
batadv_neigh_node_release(neigh_node);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -532,79 +506,48 @@ out:
|
|||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
|
||||
* @rcu: rcu pointer of the orig_ifinfo object
|
||||
* batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
|
||||
* free after rcu grace period
|
||||
* @orig_ifinfo: the orig_ifinfo object to release
|
||||
*/
|
||||
static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
|
||||
static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
|
||||
{
|
||||
struct batadv_orig_ifinfo *orig_ifinfo;
|
||||
struct batadv_neigh_node *router;
|
||||
|
||||
orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
|
||||
|
||||
if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
|
||||
batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
|
||||
batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
|
||||
|
||||
/* this is the last reference to this object */
|
||||
router = rcu_dereference_protected(orig_ifinfo->router, true);
|
||||
if (router)
|
||||
batadv_neigh_node_free_ref_now(router);
|
||||
kfree(orig_ifinfo);
|
||||
batadv_neigh_node_free_ref(router);
|
||||
|
||||
kfree_rcu(orig_ifinfo, rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
|
||||
* the orig_ifinfo (without rcu callback)
|
||||
* @orig_ifinfo: the orig_ifinfo object to release
|
||||
*/
|
||||
static void
|
||||
batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
|
||||
{
|
||||
if (atomic_dec_and_test(&orig_ifinfo->refcount))
|
||||
batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
|
||||
* batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
|
||||
* the orig_ifinfo
|
||||
* @orig_ifinfo: the orig_ifinfo object to release
|
||||
*/
|
||||
void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
|
||||
{
|
||||
if (atomic_dec_and_test(&orig_ifinfo->refcount))
|
||||
call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
|
||||
batadv_orig_ifinfo_release(orig_ifinfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_node_free_rcu - free the orig_node
|
||||
* @rcu: rcu pointer of the orig_node
|
||||
*/
|
||||
static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct hlist_node *node_tmp;
|
||||
struct batadv_neigh_node *neigh_node;
|
||||
struct batadv_orig_node *orig_node;
|
||||
struct batadv_orig_ifinfo *orig_ifinfo;
|
||||
|
||||
orig_node = container_of(rcu, struct batadv_orig_node, rcu);
|
||||
|
||||
spin_lock_bh(&orig_node->neigh_list_lock);
|
||||
|
||||
/* for all neighbors towards this originator ... */
|
||||
hlist_for_each_entry_safe(neigh_node, node_tmp,
|
||||
&orig_node->neigh_list, list) {
|
||||
hlist_del_rcu(&neigh_node->list);
|
||||
batadv_neigh_node_free_ref_now(neigh_node);
|
||||
}
|
||||
|
||||
hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
|
||||
&orig_node->ifinfo_list, list) {
|
||||
hlist_del_rcu(&orig_ifinfo->list);
|
||||
batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
|
||||
}
|
||||
spin_unlock_bh(&orig_node->neigh_list_lock);
|
||||
|
||||
batadv_mcast_purge_orig(orig_node);
|
||||
|
||||
/* Free nc_nodes */
|
||||
batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
|
||||
|
||||
batadv_frag_purge_orig(orig_node, NULL);
|
||||
|
||||
if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
|
||||
|
@ -614,26 +557,48 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
|
|||
kfree(orig_node);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_node_release - release orig_node from lists and queue for
|
||||
* free after rcu grace period
|
||||
* @orig_node: the orig node to free
|
||||
*/
|
||||
static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
|
||||
{
|
||||
struct hlist_node *node_tmp;
|
||||
struct batadv_neigh_node *neigh_node;
|
||||
struct batadv_orig_ifinfo *orig_ifinfo;
|
||||
|
||||
spin_lock_bh(&orig_node->neigh_list_lock);
|
||||
|
||||
/* for all neighbors towards this originator ... */
|
||||
hlist_for_each_entry_safe(neigh_node, node_tmp,
|
||||
&orig_node->neigh_list, list) {
|
||||
hlist_del_rcu(&neigh_node->list);
|
||||
batadv_neigh_node_free_ref(neigh_node);
|
||||
}
|
||||
|
||||
hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
|
||||
&orig_node->ifinfo_list, list) {
|
||||
hlist_del_rcu(&orig_ifinfo->list);
|
||||
batadv_orig_ifinfo_free_ref(orig_ifinfo);
|
||||
}
|
||||
spin_unlock_bh(&orig_node->neigh_list_lock);
|
||||
|
||||
/* Free nc_nodes */
|
||||
batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
|
||||
|
||||
call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
|
||||
* schedule an rcu callback for freeing it
|
||||
* release it
|
||||
* @orig_node: the orig node to free
|
||||
*/
|
||||
void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
|
||||
{
|
||||
if (atomic_dec_and_test(&orig_node->refcount))
|
||||
call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_orig_node_free_ref_now - decrement the orig node refcounter and
|
||||
* possibly free it (without rcu callback)
|
||||
* @orig_node: the orig node to free
|
||||
*/
|
||||
void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
|
||||
{
|
||||
if (atomic_dec_and_test(&orig_node->refcount))
|
||||
batadv_orig_node_free_rcu(&orig_node->rcu);
|
||||
batadv_orig_node_release(orig_node);
|
||||
}
|
||||
|
||||
void batadv_originator_free(struct batadv_priv *bat_priv)
|
||||
|
|
|
@ -38,7 +38,6 @@ int batadv_originator_init(struct batadv_priv *bat_priv);
|
|||
void batadv_originator_free(struct batadv_priv *bat_priv);
|
||||
void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
|
||||
void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
|
||||
void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
|
||||
struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
|
||||
const u8 *addr);
|
||||
struct batadv_neigh_node *
|
||||
|
|
|
@ -240,20 +240,6 @@ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
|
|||
return count;
|
||||
}
|
||||
|
||||
static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct batadv_tt_orig_list_entry *orig_entry;
|
||||
|
||||
orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
|
||||
|
||||
/* We are in an rcu callback here, therefore we cannot use
|
||||
* batadv_orig_node_free_ref() and its call_rcu():
|
||||
* An rcu_barrier() wouldn't wait for that to finish
|
||||
*/
|
||||
batadv_orig_node_free_ref_now(orig_entry->orig_node);
|
||||
kfree(orig_entry);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_tt_local_size_mod - change the size by v of the local table identified
|
||||
* by vid
|
||||
|
@ -349,13 +335,25 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
|
|||
batadv_tt_global_size_mod(orig_node, vid, -1);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_tt_orig_list_entry_release - release tt orig entry from lists and
|
||||
* queue for free after rcu grace period
|
||||
* @orig_entry: tt orig entry to be free'd
|
||||
*/
|
||||
static void
|
||||
batadv_tt_orig_list_entry_release(struct batadv_tt_orig_list_entry *orig_entry)
|
||||
{
|
||||
batadv_orig_node_free_ref(orig_entry->orig_node);
|
||||
kfree_rcu(orig_entry, rcu);
|
||||
}
|
||||
|
||||
static void
|
||||
batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
|
||||
{
|
||||
if (!atomic_dec_and_test(&orig_entry->refcount))
|
||||
return;
|
||||
|
||||
call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
|
||||
batadv_tt_orig_list_entry_release(orig_entry);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nf_br_ops);
|
||||
|
||||
static struct lock_class_key bridge_netdev_addr_lock_key;
|
||||
|
||||
/* net device transmit always called with BH disabled */
|
||||
netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
|
@ -87,6 +89,11 @@ out:
|
|||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static void br_set_lockdep_class(struct net_device *dev)
|
||||
{
|
||||
lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
|
||||
}
|
||||
|
||||
static int br_dev_init(struct net_device *dev)
|
||||
{
|
||||
struct net_bridge *br = netdev_priv(dev);
|
||||
|
@ -99,6 +106,7 @@ static int br_dev_init(struct net_device *dev)
|
|||
err = br_vlan_init(br);
|
||||
if (err)
|
||||
free_percpu(br->stats);
|
||||
br_set_lockdep_class(dev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -2542,6 +2542,8 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
|
|||
*
|
||||
* It may return NULL if the skb requires no segmentation. This is
|
||||
* only possible when GSO is used for verifying header integrity.
|
||||
*
|
||||
* Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
|
||||
*/
|
||||
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
|
||||
netdev_features_t features, bool tx_path)
|
||||
|
@ -2556,6 +2558,9 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
|
||||
sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
|
||||
|
||||
SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
|
||||
SKB_GSO_CB(skb)->encap_level = 0;
|
||||
|
||||
|
|
|
@ -777,6 +777,11 @@ static int bpf_check_classic(const struct sock_filter *filter,
|
|||
if (ftest->k == 0)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case BPF_ALU | BPF_LSH | BPF_K:
|
||||
case BPF_ALU | BPF_RSH | BPF_K:
|
||||
if (ftest->k >= 32)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case BPF_LD | BPF_MEM:
|
||||
case BPF_LDX | BPF_MEM:
|
||||
case BPF_ST:
|
||||
|
|
|
@ -2787,7 +2787,9 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
|
|||
} else {
|
||||
skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
|
||||
}
|
||||
skb_reserve(skb, LL_RESERVED_SPACE(dev));
|
||||
|
||||
if (likely(skb))
|
||||
skb_reserve(skb, LL_RESERVED_SPACE(dev));
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
|
|
@ -240,6 +240,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
|
|||
* from host network stack.
|
||||
*/
|
||||
features = netif_skb_features(skb);
|
||||
BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
|
||||
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
|
||||
if (IS_ERR_OR_NULL(segs)) {
|
||||
kfree_skb(skb);
|
||||
|
@ -921,7 +922,7 @@ static int __ip_append_data(struct sock *sk,
|
|||
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
|
||||
(sk->sk_type == SOCK_DGRAM)) {
|
||||
(sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
|
||||
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
|
||||
hh_len, fragheaderlen, transhdrlen,
|
||||
maxfraglen, flags);
|
||||
|
|
|
@ -219,7 +219,7 @@ static u32 tcp_yeah_ssthresh(struct sock *sk)
|
|||
yeah->fast_count = 0;
|
||||
yeah->reno_count = max(yeah->reno_count>>1, 2U);
|
||||
|
||||
return tp->snd_cwnd - reduction;
|
||||
return max_t(int, tp->snd_cwnd - reduction, 2);
|
||||
}
|
||||
|
||||
static struct tcp_congestion_ops tcp_yeah __read_mostly = {
|
||||
|
|
|
@ -1353,7 +1353,7 @@ emsgsize:
|
|||
(skb && skb_is_gso(skb))) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO) &&
|
||||
(sk->sk_type == SOCK_DGRAM)) {
|
||||
(sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
|
||||
err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
|
||||
hh_len, fragheaderlen,
|
||||
transhdrlen, mtu, flags, fl6);
|
||||
|
|
|
@ -462,8 +462,10 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
|
|||
if (np->repflow && ireq->pktopts)
|
||||
fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
|
||||
|
||||
rcu_read_lock();
|
||||
err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
|
||||
np->tclass);
|
||||
rcu_read_unlock();
|
||||
err = net_xmit_eval(err);
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
|
|||
struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
|
||||
|
||||
if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
|
||||
IP6_ECN_set_ce(inner_iph);
|
||||
IP6_ECN_set_ce(skb, inner_iph);
|
||||
}
|
||||
|
||||
/* Add encapsulation header.
|
||||
|
|
|
@ -336,12 +336,10 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
|
|||
unsigned short gso_type = skb_shinfo(skb)->gso_type;
|
||||
struct sw_flow_key later_key;
|
||||
struct sk_buff *segs, *nskb;
|
||||
struct ovs_skb_cb ovs_cb;
|
||||
int err;
|
||||
|
||||
ovs_cb = *OVS_CB(skb);
|
||||
BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
|
||||
segs = __skb_gso_segment(skb, NETIF_F_SG, false);
|
||||
*OVS_CB(skb) = ovs_cb;
|
||||
if (IS_ERR(segs))
|
||||
return PTR_ERR(segs);
|
||||
if (segs == NULL)
|
||||
|
@ -359,7 +357,6 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
|
|||
/* Queue all of the segments. */
|
||||
skb = segs;
|
||||
do {
|
||||
*OVS_CB(skb) = ovs_cb;
|
||||
if (gso_type & SKB_GSO_UDP && skb != segs)
|
||||
key = &later_key;
|
||||
|
||||
|
|
|
@ -377,6 +377,10 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||
struct sockaddr_pn sa;
|
||||
u16 len;
|
||||
|
||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return NET_RX_DROP;
|
||||
|
||||
/* check we have at least a full Phonet header */
|
||||
if (!pskb_pull(skb, sizeof(struct phonethdr)))
|
||||
goto out;
|
||||
|
|
|
@ -252,23 +252,28 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
|
|||
fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
|
||||
mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
|
||||
sizeof(key->eth.src));
|
||||
|
||||
fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
|
||||
&mask->basic.n_proto, TCA_FLOWER_UNSPEC,
|
||||
sizeof(key->basic.n_proto));
|
||||
|
||||
if (key->basic.n_proto == htons(ETH_P_IP) ||
|
||||
key->basic.n_proto == htons(ETH_P_IPV6)) {
|
||||
fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
|
||||
&mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
|
||||
sizeof(key->basic.ip_proto));
|
||||
}
|
||||
if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
|
||||
|
||||
if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
|
||||
key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
|
||||
fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
|
||||
&mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
|
||||
sizeof(key->ipv4.src));
|
||||
fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
|
||||
&mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
|
||||
sizeof(key->ipv4.dst));
|
||||
} else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
|
||||
} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
|
||||
key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
|
||||
fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
|
||||
&mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
|
||||
sizeof(key->ipv6.src));
|
||||
|
@ -276,6 +281,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
|
|||
&mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
|
||||
sizeof(key->ipv6.dst));
|
||||
}
|
||||
|
||||
if (key->basic.ip_proto == IPPROTO_TCP) {
|
||||
fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
|
||||
&mask->tp.src, TCA_FLOWER_UNSPEC,
|
||||
|
|
|
@ -320,7 +320,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
|
|||
struct ctl_table tbl;
|
||||
bool changed = false;
|
||||
char *none = "none";
|
||||
char tmp[8];
|
||||
char tmp[8] = {0};
|
||||
int ret;
|
||||
|
||||
memset(&tbl, 0, sizeof(struct ctl_table));
|
||||
|
|
|
@ -1513,6 +1513,21 @@ static void unix_destruct_scm(struct sk_buff *skb)
|
|||
sock_wfree(skb);
|
||||
}
|
||||
|
||||
/*
|
||||
* The "user->unix_inflight" variable is protected by the garbage
|
||||
* collection lock, and we just read it locklessly here. If you go
|
||||
* over the limit, there might be a tiny race in actually noticing
|
||||
* it across threads. Tough.
|
||||
*/
|
||||
static inline bool too_many_unix_fds(struct task_struct *p)
|
||||
{
|
||||
struct user_struct *user = current_user();
|
||||
|
||||
if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
|
||||
return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
|
||||
return false;
|
||||
}
|
||||
|
||||
#define MAX_RECURSION_LEVEL 4
|
||||
|
||||
static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
|
||||
|
@ -1521,6 +1536,9 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
|
|||
unsigned char max_level = 0;
|
||||
int unix_sock_count = 0;
|
||||
|
||||
if (too_many_unix_fds(current))
|
||||
return -ETOOMANYREFS;
|
||||
|
||||
for (i = scm->fp->count - 1; i >= 0; i--) {
|
||||
struct sock *sk = unix_get_socket(scm->fp->fp[i]);
|
||||
|
||||
|
@ -1542,10 +1560,8 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
|
|||
if (!UNIXCB(skb).fp)
|
||||
return -ENOMEM;
|
||||
|
||||
if (unix_sock_count) {
|
||||
for (i = scm->fp->count - 1; i >= 0; i--)
|
||||
unix_inflight(scm->fp->fp[i]);
|
||||
}
|
||||
for (i = scm->fp->count - 1; i >= 0; i--)
|
||||
unix_inflight(scm->fp->fp[i]);
|
||||
return max_level;
|
||||
}
|
||||
|
||||
|
|
|
@ -120,11 +120,11 @@ void unix_inflight(struct file *fp)
|
|||
{
|
||||
struct sock *s = unix_get_socket(fp);
|
||||
|
||||
spin_lock(&unix_gc_lock);
|
||||
|
||||
if (s) {
|
||||
struct unix_sock *u = unix_sk(s);
|
||||
|
||||
spin_lock(&unix_gc_lock);
|
||||
|
||||
if (atomic_long_inc_return(&u->inflight) == 1) {
|
||||
BUG_ON(!list_empty(&u->link));
|
||||
list_add_tail(&u->link, &gc_inflight_list);
|
||||
|
@ -132,25 +132,28 @@ void unix_inflight(struct file *fp)
|
|||
BUG_ON(list_empty(&u->link));
|
||||
}
|
||||
unix_tot_inflight++;
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
fp->f_cred->user->unix_inflight++;
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
|
||||
void unix_notinflight(struct file *fp)
|
||||
{
|
||||
struct sock *s = unix_get_socket(fp);
|
||||
|
||||
spin_lock(&unix_gc_lock);
|
||||
|
||||
if (s) {
|
||||
struct unix_sock *u = unix_sk(s);
|
||||
|
||||
spin_lock(&unix_gc_lock);
|
||||
BUG_ON(list_empty(&u->link));
|
||||
|
||||
if (atomic_long_dec_and_test(&u->inflight))
|
||||
list_del_init(&u->link);
|
||||
unix_tot_inflight--;
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
fp->f_cred->user->unix_inflight--;
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
|
||||
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
|
||||
|
|
|
@ -167,6 +167,8 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
|
|||
{
|
||||
struct sk_buff *segs;
|
||||
|
||||
BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
|
||||
BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET);
|
||||
segs = skb_gso_segment(skb, 0);
|
||||
kfree_skb(skb);
|
||||
if (IS_ERR(segs))
|
||||
|
|
|
@ -263,7 +263,8 @@ if ($arch eq "x86_64") {
|
|||
|
||||
} elsif ($arch eq "powerpc") {
|
||||
$local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
|
||||
$function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
|
||||
# See comment in the sparc64 section for why we use '\w'.
|
||||
$function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?\\w*?)>:";
|
||||
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
|
||||
|
||||
if ($bits == 64) {
|
||||
|
|
|
@ -794,6 +794,7 @@ long join_session_keyring(const char *name)
|
|||
ret = PTR_ERR(keyring);
|
||||
goto error2;
|
||||
} else if (keyring == new->session_keyring) {
|
||||
key_put(keyring);
|
||||
ret = 0;
|
||||
goto error2;
|
||||
}
|
||||
|
|
|
@ -1405,6 +1405,8 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
|
|||
return -EFAULT;
|
||||
if (tlv.length < sizeof(unsigned int) * 2)
|
||||
return -EINVAL;
|
||||
if (!tlv.numid)
|
||||
return -EINVAL;
|
||||
down_read(&card->controls_rwsem);
|
||||
kctl = snd_ctl_find_numid(card, tlv.numid);
|
||||
if (kctl == NULL) {
|
||||
|
|
|
@ -90,7 +90,7 @@ static int snd_hrtimer_start(struct snd_timer *t)
|
|||
struct snd_hrtimer *stime = t->private_data;
|
||||
|
||||
atomic_set(&stime->running, 0);
|
||||
hrtimer_cancel(&stime->hrt);
|
||||
hrtimer_try_to_cancel(&stime->hrt);
|
||||
hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution),
|
||||
HRTIMER_MODE_REL);
|
||||
atomic_set(&stime->running, 1);
|
||||
|
@ -101,6 +101,7 @@ static int snd_hrtimer_stop(struct snd_timer *t)
|
|||
{
|
||||
struct snd_hrtimer *stime = t->private_data;
|
||||
atomic_set(&stime->running, 0);
|
||||
hrtimer_try_to_cancel(&stime->hrt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -255,10 +255,15 @@ static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
|
|||
if (! (runtime = substream->runtime))
|
||||
return -ENOTTY;
|
||||
|
||||
/* only fifo_size is different, so just copy all */
|
||||
data = memdup_user(data32, sizeof(*data32));
|
||||
if (IS_ERR(data))
|
||||
return PTR_ERR(data);
|
||||
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
/* only fifo_size (RO from userspace) is different, so just copy all */
|
||||
if (copy_from_user(data, data32, sizeof(*data32))) {
|
||||
err = -EFAULT;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (refine)
|
||||
err = snd_pcm_hw_refine(substream, data);
|
||||
|
|
|
@ -1962,7 +1962,7 @@ static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
|
|||
* No restrictions so for a user client we can clear
|
||||
* the whole fifo
|
||||
*/
|
||||
if (client->type == USER_CLIENT)
|
||||
if (client->type == USER_CLIENT && client->data.user.fifo)
|
||||
snd_seq_fifo_clear(client->data.user.fifo);
|
||||
}
|
||||
|
||||
|
|
|
@ -49,11 +49,12 @@ static int snd_seq_call_port_info_ioctl(struct snd_seq_client *client, unsigned
|
|||
struct snd_seq_port_info *data;
|
||||
mm_segment_t fs;
|
||||
|
||||
data = memdup_user(data32, sizeof(*data32));
|
||||
if (IS_ERR(data))
|
||||
return PTR_ERR(data);
|
||||
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
if (get_user(data->flags, &data32->flags) ||
|
||||
if (copy_from_user(data, data32, sizeof(*data32)) ||
|
||||
get_user(data->flags, &data32->flags) ||
|
||||
get_user(data->time_queue, &data32->time_queue))
|
||||
goto error;
|
||||
data->kernel = NULL;
|
||||
|
|
|
@ -142,8 +142,10 @@ static struct snd_seq_queue *queue_new(int owner, int locked)
|
|||
static void queue_delete(struct snd_seq_queue *q)
|
||||
{
|
||||
/* stop and release the timer */
|
||||
mutex_lock(&q->timer_mutex);
|
||||
snd_seq_timer_stop(q->timer);
|
||||
snd_seq_timer_close(q);
|
||||
mutex_unlock(&q->timer_mutex);
|
||||
/* wait until access free */
|
||||
snd_use_lock_sync(&q->use_lock);
|
||||
/* release resources... */
|
||||
|
|
|
@ -65,6 +65,7 @@ struct snd_timer_user {
|
|||
int qtail;
|
||||
int qused;
|
||||
int queue_size;
|
||||
bool disconnected;
|
||||
struct snd_timer_read *queue;
|
||||
struct snd_timer_tread *tqueue;
|
||||
spinlock_t qlock;
|
||||
|
@ -73,7 +74,7 @@ struct snd_timer_user {
|
|||
struct timespec tstamp; /* trigger tstamp */
|
||||
wait_queue_head_t qchange_sleep;
|
||||
struct fasync_struct *fasync;
|
||||
struct mutex tread_sem;
|
||||
struct mutex ioctl_lock;
|
||||
};
|
||||
|
||||
/* list of timers */
|
||||
|
@ -215,11 +216,13 @@ static void snd_timer_check_master(struct snd_timer_instance *master)
|
|||
slave->slave_id == master->slave_id) {
|
||||
list_move_tail(&slave->open_list, &master->slave_list_head);
|
||||
spin_lock_irq(&slave_active_lock);
|
||||
spin_lock(&master->timer->lock);
|
||||
slave->master = master;
|
||||
slave->timer = master->timer;
|
||||
if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
|
||||
list_add_tail(&slave->active_list,
|
||||
&master->slave_active_head);
|
||||
spin_unlock(&master->timer->lock);
|
||||
spin_unlock_irq(&slave_active_lock);
|
||||
}
|
||||
}
|
||||
|
@ -288,6 +291,9 @@ int snd_timer_open(struct snd_timer_instance **ti,
|
|||
mutex_unlock(®ister_mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* take a card refcount for safe disconnection */
|
||||
if (timer->card)
|
||||
get_device(&timer->card->card_dev);
|
||||
timeri->slave_class = tid->dev_sclass;
|
||||
timeri->slave_id = slave_id;
|
||||
if (list_empty(&timer->open_list_head) && timer->hw.open)
|
||||
|
@ -346,15 +352,21 @@ int snd_timer_close(struct snd_timer_instance *timeri)
|
|||
timer->hw.close)
|
||||
timer->hw.close(timer);
|
||||
/* remove slave links */
|
||||
spin_lock_irq(&slave_active_lock);
|
||||
spin_lock(&timer->lock);
|
||||
list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
|
||||
open_list) {
|
||||
spin_lock_irq(&slave_active_lock);
|
||||
_snd_timer_stop(slave, 1, SNDRV_TIMER_EVENT_RESOLUTION);
|
||||
list_move_tail(&slave->open_list, &snd_timer_slave_list);
|
||||
slave->master = NULL;
|
||||
slave->timer = NULL;
|
||||
spin_unlock_irq(&slave_active_lock);
|
||||
list_del_init(&slave->ack_list);
|
||||
list_del_init(&slave->active_list);
|
||||
}
|
||||
spin_unlock(&timer->lock);
|
||||
spin_unlock_irq(&slave_active_lock);
|
||||
/* release a card refcount for safe disconnection */
|
||||
if (timer->card)
|
||||
put_device(&timer->card->card_dev);
|
||||
mutex_unlock(®ister_mutex);
|
||||
}
|
||||
out:
|
||||
|
@ -441,9 +453,12 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
|
|||
|
||||
spin_lock_irqsave(&slave_active_lock, flags);
|
||||
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
|
||||
if (timeri->master)
|
||||
if (timeri->master && timeri->timer) {
|
||||
spin_lock(&timeri->timer->lock);
|
||||
list_add_tail(&timeri->active_list,
|
||||
&timeri->master->slave_active_head);
|
||||
spin_unlock(&timeri->timer->lock);
|
||||
}
|
||||
spin_unlock_irqrestore(&slave_active_lock, flags);
|
||||
return 1; /* delayed start */
|
||||
}
|
||||
|
@ -467,6 +482,8 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
|
|||
timer = timeri->timer;
|
||||
if (timer == NULL)
|
||||
return -EINVAL;
|
||||
if (timer->card && timer->card->shutdown)
|
||||
return -ENODEV;
|
||||
spin_lock_irqsave(&timer->lock, flags);
|
||||
timeri->ticks = timeri->cticks = ticks;
|
||||
timeri->pticks = 0;
|
||||
|
@ -489,6 +506,8 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
|
|||
if (!keep_flag) {
|
||||
spin_lock_irqsave(&slave_active_lock, flags);
|
||||
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
|
||||
list_del_init(&timeri->ack_list);
|
||||
list_del_init(&timeri->active_list);
|
||||
spin_unlock_irqrestore(&slave_active_lock, flags);
|
||||
}
|
||||
goto __end;
|
||||
|
@ -499,6 +518,10 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
|
|||
spin_lock_irqsave(&timer->lock, flags);
|
||||
list_del_init(&timeri->ack_list);
|
||||
list_del_init(&timeri->active_list);
|
||||
if (timer->card && timer->card->shutdown) {
|
||||
spin_unlock_irqrestore(&timer->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
|
||||
!(--timer->running)) {
|
||||
timer->hw.stop(timer);
|
||||
|
@ -561,6 +584,8 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
|
|||
timer = timeri->timer;
|
||||
if (! timer)
|
||||
return -EINVAL;
|
||||
if (timer->card && timer->card->shutdown)
|
||||
return -ENODEV;
|
||||
spin_lock_irqsave(&timer->lock, flags);
|
||||
if (!timeri->cticks)
|
||||
timeri->cticks = 1;
|
||||
|
@ -624,6 +649,9 @@ static void snd_timer_tasklet(unsigned long arg)
|
|||
unsigned long resolution, ticks;
|
||||
unsigned long flags;
|
||||
|
||||
if (timer->card && timer->card->shutdown)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&timer->lock, flags);
|
||||
/* now process all callbacks */
|
||||
while (!list_empty(&timer->sack_list_head)) {
|
||||
|
@ -664,6 +692,9 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
|
|||
if (timer == NULL)
|
||||
return;
|
||||
|
||||
if (timer->card && timer->card->shutdown)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&timer->lock, flags);
|
||||
|
||||
/* remember the current resolution */
|
||||
|
@ -694,7 +725,7 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
|
|||
} else {
|
||||
ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
|
||||
if (--timer->running)
|
||||
list_del(&ti->active_list);
|
||||
list_del_init(&ti->active_list);
|
||||
}
|
||||
if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
|
||||
(ti->flags & SNDRV_TIMER_IFLG_FAST))
|
||||
|
@ -874,11 +905,28 @@ static int snd_timer_dev_register(struct snd_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* just for reference in snd_timer_dev_disconnect() below */
|
||||
static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
|
||||
int event, struct timespec *tstamp,
|
||||
unsigned long resolution);
|
||||
|
||||
static int snd_timer_dev_disconnect(struct snd_device *device)
|
||||
{
|
||||
struct snd_timer *timer = device->device_data;
|
||||
struct snd_timer_instance *ti;
|
||||
|
||||
mutex_lock(®ister_mutex);
|
||||
list_del_init(&timer->device_list);
|
||||
/* wake up pending sleepers */
|
||||
list_for_each_entry(ti, &timer->open_list_head, open_list) {
|
||||
/* FIXME: better to have a ti.disconnect() op */
|
||||
if (ti->ccallback == snd_timer_user_ccallback) {
|
||||
struct snd_timer_user *tu = ti->callback_data;
|
||||
|
||||
tu->disconnected = true;
|
||||
wake_up(&tu->qchange_sleep);
|
||||
}
|
||||
}
|
||||
mutex_unlock(®ister_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
@ -889,6 +937,8 @@ void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstam
|
|||
unsigned long resolution = 0;
|
||||
struct snd_timer_instance *ti, *ts;
|
||||
|
||||
if (timer->card && timer->card->shutdown)
|
||||
return;
|
||||
if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
|
||||
return;
|
||||
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
|
||||
|
@ -1047,6 +1097,8 @@ static void snd_timer_proc_read(struct snd_info_entry *entry,
|
|||
|
||||
mutex_lock(®ister_mutex);
|
||||
list_for_each_entry(timer, &snd_timer_list, device_list) {
|
||||
if (timer->card && timer->card->shutdown)
|
||||
continue;
|
||||
switch (timer->tmr_class) {
|
||||
case SNDRV_TIMER_CLASS_GLOBAL:
|
||||
snd_iprintf(buffer, "G%i: ", timer->tmr_device);
|
||||
|
@ -1253,7 +1305,7 @@ static int snd_timer_user_open(struct inode *inode, struct file *file)
|
|||
return -ENOMEM;
|
||||
spin_lock_init(&tu->qlock);
|
||||
init_waitqueue_head(&tu->qchange_sleep);
|
||||
mutex_init(&tu->tread_sem);
|
||||
mutex_init(&tu->ioctl_lock);
|
||||
tu->ticks = 1;
|
||||
tu->queue_size = 128;
|
||||
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
|
||||
|
@ -1273,8 +1325,10 @@ static int snd_timer_user_release(struct inode *inode, struct file *file)
|
|||
if (file->private_data) {
|
||||
tu = file->private_data;
|
||||
file->private_data = NULL;
|
||||
mutex_lock(&tu->ioctl_lock);
|
||||
if (tu->timeri)
|
||||
snd_timer_close(tu->timeri);
|
||||
mutex_unlock(&tu->ioctl_lock);
|
||||
kfree(tu->queue);
|
||||
kfree(tu->tqueue);
|
||||
kfree(tu);
|
||||
|
@ -1512,7 +1566,6 @@ static int snd_timer_user_tselect(struct file *file,
|
|||
int err = 0;
|
||||
|
||||
tu = file->private_data;
|
||||
mutex_lock(&tu->tread_sem);
|
||||
if (tu->timeri) {
|
||||
snd_timer_close(tu->timeri);
|
||||
tu->timeri = NULL;
|
||||
|
@ -1556,7 +1609,6 @@ static int snd_timer_user_tselect(struct file *file,
|
|||
}
|
||||
|
||||
__err:
|
||||
mutex_unlock(&tu->tread_sem);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1769,7 +1821,7 @@ enum {
|
|||
SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
|
||||
};
|
||||
|
||||
static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
|
||||
static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct snd_timer_user *tu;
|
||||
|
@ -1786,17 +1838,11 @@ static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
|
|||
{
|
||||
int xarg;
|
||||
|
||||
mutex_lock(&tu->tread_sem);
|
||||
if (tu->timeri) { /* too late */
|
||||
mutex_unlock(&tu->tread_sem);
|
||||
if (tu->timeri) /* too late */
|
||||
return -EBUSY;
|
||||
}
|
||||
if (get_user(xarg, p)) {
|
||||
mutex_unlock(&tu->tread_sem);
|
||||
if (get_user(xarg, p))
|
||||
return -EFAULT;
|
||||
}
|
||||
tu->tread = xarg ? 1 : 0;
|
||||
mutex_unlock(&tu->tread_sem);
|
||||
return 0;
|
||||
}
|
||||
case SNDRV_TIMER_IOCTL_GINFO:
|
||||
|
@ -1829,6 +1875,18 @@ static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
|
|||
return -ENOTTY;
|
||||
}
|
||||
|
||||
static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct snd_timer_user *tu = file->private_data;
|
||||
long ret;
|
||||
|
||||
mutex_lock(&tu->ioctl_lock);
|
||||
ret = __snd_timer_user_ioctl(file, cmd, arg);
|
||||
mutex_unlock(&tu->ioctl_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int snd_timer_user_fasync(int fd, struct file * file, int on)
|
||||
{
|
||||
struct snd_timer_user *tu;
|
||||
|
@ -1866,6 +1924,10 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
|
|||
|
||||
remove_wait_queue(&tu->qchange_sleep, &wait);
|
||||
|
||||
if (tu->disconnected) {
|
||||
err = -ENODEV;
|
||||
break;
|
||||
}
|
||||
if (signal_pending(current)) {
|
||||
err = -ERESTARTSYS;
|
||||
break;
|
||||
|
@ -1915,6 +1977,8 @@ static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
|
|||
mask = 0;
|
||||
if (tu->qused)
|
||||
mask |= POLLIN | POLLRDNORM;
|
||||
if (tu->disconnected)
|
||||
mask |= POLLERR;
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
|
|
@ -174,14 +174,40 @@ static inline bool codec_probed(struct hda_codec *codec)
|
|||
return device_attach(hda_codec_dev(codec)) > 0 && codec->preset;
|
||||
}
|
||||
|
||||
/* try to auto-load codec module */
|
||||
static void request_codec_module(struct hda_codec *codec)
|
||||
{
|
||||
#ifdef MODULE
|
||||
char modalias[32];
|
||||
const char *mod = NULL;
|
||||
|
||||
switch (codec->probe_id) {
|
||||
case HDA_CODEC_ID_GENERIC_HDMI:
|
||||
#if IS_MODULE(CONFIG_SND_HDA_CODEC_HDMI)
|
||||
mod = "snd-hda-codec-hdmi";
|
||||
#endif
|
||||
break;
|
||||
case HDA_CODEC_ID_GENERIC:
|
||||
#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
|
||||
mod = "snd-hda-codec-generic";
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
snd_hdac_codec_modalias(&codec->core, modalias, sizeof(modalias));
|
||||
mod = modalias;
|
||||
break;
|
||||
}
|
||||
|
||||
if (mod)
|
||||
request_module(mod);
|
||||
#endif /* MODULE */
|
||||
}
|
||||
|
||||
/* try to auto-load and bind the codec module */
|
||||
static void codec_bind_module(struct hda_codec *codec)
|
||||
{
|
||||
#ifdef MODULE
|
||||
char modalias[32];
|
||||
|
||||
snd_hdac_codec_modalias(&codec->core, modalias, sizeof(modalias));
|
||||
request_module(modalias);
|
||||
request_codec_module(codec);
|
||||
if (codec_probed(codec))
|
||||
return;
|
||||
#endif
|
||||
|
@ -218,17 +244,13 @@ static int codec_bind_generic(struct hda_codec *codec)
|
|||
|
||||
if (is_likely_hdmi_codec(codec)) {
|
||||
codec->probe_id = HDA_CODEC_ID_GENERIC_HDMI;
|
||||
#if IS_MODULE(CONFIG_SND_HDA_CODEC_HDMI)
|
||||
request_module("snd-hda-codec-hdmi");
|
||||
#endif
|
||||
request_codec_module(codec);
|
||||
if (codec_probed(codec))
|
||||
return 0;
|
||||
}
|
||||
|
||||
codec->probe_id = HDA_CODEC_ID_GENERIC;
|
||||
#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
|
||||
request_module("snd-hda-codec-generic");
|
||||
#endif
|
||||
request_codec_module(codec);
|
||||
if (codec_probed(codec))
|
||||
return 0;
|
||||
return -ENODEV;
|
||||
|
|
|
@ -2126,9 +2126,17 @@ i915_power_fail:
|
|||
static void azx_remove(struct pci_dev *pci)
|
||||
{
|
||||
struct snd_card *card = pci_get_drvdata(pci);
|
||||
struct azx *chip;
|
||||
struct hda_intel *hda;
|
||||
|
||||
if (card) {
|
||||
/* flush the pending probing work */
|
||||
chip = card->private_data;
|
||||
hda = container_of(chip, struct hda_intel, chip);
|
||||
flush_work(&hda->probe_work);
|
||||
|
||||
if (card)
|
||||
snd_card_free(card);
|
||||
}
|
||||
}
|
||||
|
||||
static void azx_shutdown(struct pci_dev *pci)
|
||||
|
|
|
@ -4666,6 +4666,7 @@ enum {
|
|||
ALC290_FIXUP_SUBWOOFER,
|
||||
ALC290_FIXUP_SUBWOOFER_HSJACK,
|
||||
ALC269_FIXUP_THINKPAD_ACPI,
|
||||
ALC269_FIXUP_DMIC_THINKPAD_ACPI,
|
||||
ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||
ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
|
||||
ALC255_FIXUP_HEADSET_MODE,
|
||||
|
@ -5103,6 +5104,12 @@ static const struct hda_fixup alc269_fixups[] = {
|
|||
.type = HDA_FIXUP_FUNC,
|
||||
.v.func = hda_fixup_thinkpad_acpi,
|
||||
},
|
||||
[ALC269_FIXUP_DMIC_THINKPAD_ACPI] = {
|
||||
.type = HDA_FIXUP_FUNC,
|
||||
.v.func = alc_fixup_inv_dmic,
|
||||
.chained = true,
|
||||
.chain_id = ALC269_FIXUP_THINKPAD_ACPI,
|
||||
},
|
||||
[ALC255_FIXUP_DELL1_MIC_NO_PRESENCE] = {
|
||||
.type = HDA_FIXUP_PINS,
|
||||
.v.pins = (const struct hda_pintbl[]) {
|
||||
|
@ -5324,6 +5331,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|||
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
|
||||
SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
|
||||
SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
|
||||
SND_PCI_QUIRK(0x1028, 0x05be, "Dell Latitude E6540", ALC292_FIXUP_DELL_E7X),
|
||||
SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X),
|
||||
SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X),
|
||||
SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER),
|
||||
|
@ -5332,6 +5340,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|||
SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||
SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
|
||||
SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
|
||||
SND_PCI_QUIRK(0x1028, 0x062c, "Dell Latitude E5550", ALC292_FIXUP_DELL_E7X),
|
||||
SND_PCI_QUIRK(0x1028, 0x062e, "Dell Latitude E7450", ALC292_FIXUP_DELL_E7X),
|
||||
SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
|
||||
SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||
|
@ -5457,6 +5466,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|||
SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
|
||||
SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
|
||||
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
|
||||
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
|
||||
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
|
||||
SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
|
||||
SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
|
||||
|
@ -5615,6 +5625,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
|
|||
{0x12, 0x90a60170},
|
||||
{0x14, 0x90170130},
|
||||
{0x21, 0x02211040}),
|
||||
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||
{0x12, 0x90a60170},
|
||||
{0x14, 0x90171130},
|
||||
{0x21, 0x02211040}),
|
||||
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||
{0x12, 0x90a60170},
|
||||
{0x14, 0x90170140},
|
||||
|
@ -6552,6 +6566,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
|
|||
SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
|
||||
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
|
||||
SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A),
|
||||
SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
|
||||
SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
|
||||
SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
|
||||
SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
|
||||
|
|
|
@ -360,15 +360,13 @@ static int wm5110_hp_ev(struct snd_soc_dapm_widget *w,
|
|||
|
||||
static int wm5110_clear_pga_volume(struct arizona *arizona, int output)
|
||||
{
|
||||
struct reg_sequence clear_pga = {
|
||||
ARIZONA_OUTPUT_PATH_CONFIG_1L + output * 4, 0x80
|
||||
};
|
||||
unsigned int reg = ARIZONA_OUTPUT_PATH_CONFIG_1L + output * 4;
|
||||
int ret;
|
||||
|
||||
ret = regmap_multi_reg_write_bypassed(arizona->regmap, &clear_pga, 1);
|
||||
ret = regmap_write(arizona->regmap, reg, 0x80);
|
||||
if (ret)
|
||||
dev_err(arizona->dev, "Failed to clear PGA (0x%x): %d\n",
|
||||
clear_pga.reg, ret);
|
||||
reg, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -630,6 +630,7 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
|
|||
struct snd_pcm *be_pcm;
|
||||
char new_name[64];
|
||||
int ret = 0, direction = 0;
|
||||
int playback = 0, capture = 0;
|
||||
|
||||
if (rtd->num_codecs > 1) {
|
||||
dev_err(rtd->card->dev, "Multicodec not supported for compressed stream\n");
|
||||
|
@ -641,11 +642,27 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
|
|||
rtd->dai_link->stream_name, codec_dai->name, num);
|
||||
|
||||
if (codec_dai->driver->playback.channels_min)
|
||||
direction = SND_COMPRESS_PLAYBACK;
|
||||
else if (codec_dai->driver->capture.channels_min)
|
||||
direction = SND_COMPRESS_CAPTURE;
|
||||
else
|
||||
playback = 1;
|
||||
if (codec_dai->driver->capture.channels_min)
|
||||
capture = 1;
|
||||
|
||||
capture = capture && cpu_dai->driver->capture.channels_min;
|
||||
playback = playback && cpu_dai->driver->playback.channels_min;
|
||||
|
||||
/*
|
||||
* Compress devices are unidirectional so only one of the directions
|
||||
* should be set, check for that (xor)
|
||||
*/
|
||||
if (playback + capture != 1) {
|
||||
dev_err(rtd->card->dev, "Invalid direction for compress P %d, C %d\n",
|
||||
playback, capture);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if(playback)
|
||||
direction = SND_COMPRESS_PLAYBACK;
|
||||
else
|
||||
direction = SND_COMPRESS_CAPTURE;
|
||||
|
||||
compr = kzalloc(sizeof(*compr), GFP_KERNEL);
|
||||
if (compr == NULL) {
|
||||
|
|
|
@ -675,6 +675,8 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
|
|||
|
||||
void snd_usb_autosuspend(struct snd_usb_audio *chip)
|
||||
{
|
||||
if (atomic_read(&chip->shutdown))
|
||||
return;
|
||||
if (atomic_dec_and_test(&chip->active))
|
||||
usb_autopm_put_interface(chip->pm_intf);
|
||||
}
|
||||
|
|
|
@ -793,7 +793,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
|
|||
return 0;
|
||||
|
||||
kcontrol->private_value &= ~(0xff << 24);
|
||||
kcontrol->private_value |= newval;
|
||||
kcontrol->private_value |= (unsigned int)newval << 24;
|
||||
err = snd_ni_update_cur_val(list);
|
||||
return err < 0 ? err : 1;
|
||||
}
|
||||
|
|
|
@ -1269,6 +1269,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
|
|||
case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
|
||||
case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
|
||||
case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
|
||||
case USB_ID(0x22d8, 0x0416): /* OPPO HA-1*/
|
||||
if (fp->altsetting == 2)
|
||||
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
|
||||
break;
|
||||
|
|
Loading…
Add table
Reference in a new issue