This is the 4.4.78 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAllxlOsACgkQONu9yGCS
 aT4naw//VrWIuIf523IP6egcS+iprNM1lt8HZIzTQ0b2x0qv82UFA9FSs3e97dbG
 LJAUEmHW+oBbm6AekAUrNFF62qaMM5HYxVgGYeiniA1dRvLCDG/OxzxPc0hv6Kmm
 VsbZBlPQEj2B5tqUOkpvQNcqbKpIVglBsK4tjeHE/mRziUkIoPXWKS6Tt7VKGtBa
 gIGY7VASyKhPtxGCdbKzHsO/IGi3cmpwAyhqQDRBR/5Dxy3p9xlQ4gTpobeL+x8z
 A7WHqVNbgfdz21LBii5xE8+GUwiRjlYhxeFJTjhM6wo2/XCtw1FJgc7EMmsbVtbZ
 xYu1/tkYaZGoxOQ7sH5jNgVjE8IcyVimDa5eJ7p7fbh3AzsyXzAJIRc8cS7cL40F
 jLDWrYDnm5t7ziITrAf0uoMmRZLHqS2Bv5sqaoCxR3D51r6LVaNdavD6hYN1CLRA
 fjlDvnvwxbLPI4YPr7PZNu4oSJiawKx9jEBOTFSYu9L1XLvdvdcVU6ULAdpShnLn
 80a+YJmYsNg54im7sxFUw6z87AScznzirIpXEJoUO+Hs0SN9Rq/BQx8cKvVeaMEI
 z53c4Hci45Go/ozZVqCTxGbkQ6tKWIKBSo6kl3xchwms4lmijpWuwbAkDUzECNvv
 0RgyQvNx4d2cRJ8hIVcdVFzp+h+kgNqVQQ2nDld7/1QSZHLPhFo=
 =vrgw
 -----END PGP SIGNATURE-----

Merge 4.4.78 into android-4.4

Changes in 4.4.78
	net_sched: fix error recovery at qdisc creation
	net: sched: Fix one possible panic when no destroy callback
	net/phy: micrel: configure intterupts after autoneg workaround
	ipv6: avoid unregistering inet6_dev for loopback
	net: dp83640: Avoid NULL pointer dereference.
	tcp: reset sk_rx_dst in tcp_disconnect()
	net: prevent sign extension in dev_get_stats()
	bpf: prevent leaking pointer via xadd on unpriviledged
	net: handle NAPI_GRO_FREE_STOLEN_HEAD case also in napi_frags_finish()
	ipv6: dad: don't remove dynamic addresses if link is down
	net: ipv6: Compare lwstate in detecting duplicate nexthops
	vrf: fix bug_on triggered by rx when destroying a vrf
	rds: tcp: use sock_create_lite() to create the accept socket
	brcmfmac: fix possible buffer overflow in brcmf_cfg80211_mgmt_tx()
	cfg80211: Define nla_policy for NL80211_ATTR_LOCAL_MESH_POWER_MODE
	cfg80211: Validate frequencies nested in NL80211_ATTR_SCAN_FREQUENCIES
	cfg80211: Check if PMKID attribute is of expected size
	irqchip/gic-v3: Fix out-of-bound access in gic_set_affinity
	parisc: Report SIGSEGV instead of SIGBUS when running out of stack
	parisc: use compat_sys_keyctl()
	parisc: DMA API: return error instead of BUG_ON for dma ops on non dma devs
	parisc/mm: Ensure IRQs are off in switch_mm()
	tools/lib/lockdep: Reduce MAX_LOCK_DEPTH to avoid overflowing lock_chain/: Depth
	kernel/extable.c: mark core_kernel_text notrace
	mm/list_lru.c: fix list_lru_count_node() to be race free
	fs/dcache.c: fix spin lockup issue on nlru->lock
	checkpatch: silence perl 5.26.0 unescaped left brace warnings
	binfmt_elf: use ELF_ET_DYN_BASE only for PIE
	arm: move ELF_ET_DYN_BASE to 4MB
	arm64: move ELF_ET_DYN_BASE to 4GB / 4MB
	powerpc: move ELF_ET_DYN_BASE to 4GB / 4MB
	s390: reduce ELF_ET_DYN_BASE
	exec: Limit arg stack to at most 75% of _STK_LIM
	vt: fix unchecked __put_user() in tioclinux ioctls
	mnt: In umount propagation reparent in a separate pass
	mnt: In propgate_umount handle visiting mounts in any order
	mnt: Make propagate_umount less slow for overlapping mount propagation trees
	selftests/capabilities: Fix the test_execve test
	tpm: Get rid of chip->pdev
	tpm: Provide strong locking for device removal
	Add "shutdown" to "struct class".
	tpm: Issue a TPM2_Shutdown for TPM2 devices.
	mm: fix overflow check in expand_upwards()
	crypto: talitos - Extend max key length for SHA384/512-HMAC and AEAD
	crypto: atmel - only treat EBUSY as transient if backlog
	crypto: sha1-ssse3 - Disable avx2
	crypto: caam - fix signals handling
	sched/topology: Fix overlapping sched_group_mask
	sched/topology: Optimize build_group_mask()
	PM / wakeirq: Convert to SRCU
	PM / QoS: return -EINVAL for bogus strings
	tracing: Use SOFTIRQ_OFFSET for softirq dectection for more accurate results
	KVM: x86: disable MPX if host did not enable MPX XSAVE features
	kvm: vmx: Do not disable intercepts for BNDCFGS
	kvm: x86: Guest BNDCFGS requires guest MPX support
	kvm: vmx: Check value written to IA32_BNDCFGS
	kvm: vmx: allow host to access guest MSR_IA32_BNDCFGS
	Linux 4.4.78

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2017-07-21 09:14:57 +02:00
commit 59ff2e15be
75 changed files with 750 additions and 347 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 77
SUBLEVEL = 78
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* This is the base location for PIE (ET_DYN with INTERP) loads. */
#define ELF_ET_DYN_BASE 0x400000UL
/* When the program starts, a1 contains a pointer to a function to be
registered with atexit, as per the SVR4 ABI. A value of 0 means we

View file

@ -113,12 +113,11 @@
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
* This is the location that an ET_DYN program is loaded if exec'ed. Typical
* use of this is to invoke "./ld.so someprog" to test out a new version of
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
* This is the base location for PIE (ET_DYN with INTERP) loads. On
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
#define ELF_ET_DYN_BASE 0x100000000UL
#ifndef __ASSEMBLY__
@ -169,7 +168,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
#ifdef CONFIG_COMPAT
#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
/* AArch32 registers. */
#define COMPAT_ELF_NGREG 18

View file

@ -39,6 +39,8 @@ struct hppa_dma_ops {
** flush/purge and allocate "regular" cacheable pages for everything.
*/
#define DMA_ERROR_CODE (~(dma_addr_t)0)
#ifdef CONFIG_PA11
extern struct hppa_dma_ops pcxl_dma_ops;
extern struct hppa_dma_ops pcx_dma_ops;
@ -209,12 +211,13 @@ parisc_walk_tree(struct device *dev)
break;
}
}
BUG_ON(!dev->platform_data);
return dev->platform_data;
}
#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu)
#define GET_IOC(dev) ({ \
void *__pdata = parisc_walk_tree(dev); \
__pdata ? HBA_DATA(__pdata)->iommu : NULL; \
})
#ifdef CONFIG_IOMMU_CCIO
struct parisc_device;

View file

@ -49,15 +49,26 @@ static inline void load_context(mm_context_t context)
mtctl(__space_to_prot(context), 8);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
static inline void switch_mm_irqs_off(struct mm_struct *prev,
struct mm_struct *next, struct task_struct *tsk)
{
if (prev != next) {
mtctl(__pa(next->pgd), 25);
load_context(next->context);
}
}
static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next, struct task_struct *tsk)
{
unsigned long flags;
local_irq_save(flags);
switch_mm_irqs_off(prev, next, tsk);
local_irq_restore(flags);
}
#define switch_mm_irqs_off switch_mm_irqs_off
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)

View file

@ -361,7 +361,7 @@
ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */
ENTRY_SAME(add_key)
ENTRY_SAME(request_key) /* 265 */
ENTRY_SAME(keyctl)
ENTRY_COMP(keyctl)
ENTRY_SAME(ioprio_set)
ENTRY_SAME(ioprio_get)
ENTRY_SAME(inotify_init)

View file

@ -303,7 +303,7 @@ bad_area:
case 15: /* Data TLB miss fault/Data page fault */
/* send SIGSEGV when outside of vma */
if (!vma ||
address < vma->vm_start || address > vma->vm_end) {
address < vma->vm_start || address >= vma->vm_end) {
si.si_signo = SIGSEGV;
si.si_code = SEGV_MAPERR;
break;

View file

@ -23,12 +23,13 @@
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE 0x20000000
/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \
0x100000000UL)
#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)

View file

@ -154,14 +154,13 @@ extern unsigned int vdso_enabled;
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. 64-bit
tasks are aligned to 4GB. */
#define ELF_ET_DYN_BASE (is_32bit_task() ? \
(STACK_TOP / 3 * 2) : \
(STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \
0x100000000UL)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */

View file

@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
static bool avx2_usable(void)
{
if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
&& boot_cpu_has(X86_FEATURE_BMI1)
&& boot_cpu_has(X86_FEATURE_BMI2))
return true;

View file

@ -245,12 +245,13 @@ extern int force_personality32;
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
0x100000000UL)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,

View file

@ -405,6 +405,8 @@
#define MSR_IA32_TSC_ADJUST 0x0000003b
#define MSR_IA32_BNDCFGS 0x00000d90
#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc
#define MSR_IA32_XSS 0x00000da0
#define FEATURE_CONTROL_LOCKED (1<<0)

View file

@ -46,11 +46,18 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted)
return ret;
}
bool kvm_mpx_supported(void)
{
return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
&& kvm_x86_ops->mpx_supported());
}
EXPORT_SYMBOL_GPL(kvm_mpx_supported);
u64 kvm_supported_xcr0(void)
{
u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
if (!kvm_x86_ops->mpx_supported())
if (!kvm_mpx_supported())
xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
return xcr0;
@ -97,7 +104,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu);
vcpu->arch.eager_fpu = use_eager_fpu();
if (vcpu->arch.eager_fpu)
kvm_x86_ops->fpu_activate(vcpu);
@ -295,7 +302,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
#endif
unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0;
unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
/* cpuid 1.edx */

View file

@ -4,6 +4,7 @@
#include "x86.h"
int kvm_update_cpuid(struct kvm_vcpu *vcpu);
bool kvm_mpx_supported(void);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index);
int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
@ -134,14 +135,6 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
return best && (best->ebx & bit(X86_FEATURE_RTM));
}
static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->ebx & bit(X86_FEATURE_MPX));
}
static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@ -150,6 +143,14 @@ static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
return best && (best->ebx & bit(X86_FEATURE_PCOMMIT));
}
static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->ebx & bit(X86_FEATURE_MPX));
}
static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;

View file

@ -863,7 +863,6 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
static u64 construct_eptp(unsigned long root_hpa);
static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
static bool vmx_mpx_supported(void);
static bool vmx_xsaves_supported(void);
static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
@ -2541,7 +2540,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
if (vmx_mpx_supported())
if (kvm_mpx_supported())
vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
/* We support free control of debug control saving. */
@ -2562,7 +2561,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
VM_ENTRY_LOAD_IA32_PAT;
vmx->nested.nested_vmx_entry_ctls_high |=
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
if (vmx_mpx_supported())
if (kvm_mpx_supported())
vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
/* We support free control of debug control loading. */
@ -2813,7 +2812,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
case MSR_IA32_BNDCFGS:
if (!vmx_mpx_supported())
if (!kvm_mpx_supported() ||
(!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
return 1;
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
break;
@ -2890,7 +2890,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_BNDCFGS:
if (!vmx_mpx_supported())
if (!kvm_mpx_supported() ||
(!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
return 1;
if (is_noncanonical_address(data & PAGE_MASK) ||
(data & MSR_IA32_BNDCFGS_RSVD))
return 1;
vmcs_write64(GUEST_BNDCFGS, data);
break;
@ -3363,7 +3367,7 @@ static void init_vmcs_shadow_fields(void)
for (i = j = 0; i < max_shadow_read_write_fields; i++) {
switch (shadow_read_write_fields[i]) {
case GUEST_BNDCFGS:
if (!vmx_mpx_supported())
if (!kvm_mpx_supported())
continue;
break;
default:
@ -6253,7 +6257,6 @@ static __init int hardware_setup(void)
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
memcpy(vmx_msr_bitmap_legacy_x2apic,
vmx_msr_bitmap_legacy, PAGE_SIZE);
@ -10265,7 +10268,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
if (vmx_mpx_supported())
if (kvm_mpx_supported())
vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
if (nested_cpu_has_xsaves(vmcs12))
vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);

View file

@ -2094,7 +2094,11 @@ void device_shutdown(void)
pm_runtime_get_noresume(dev);
pm_runtime_barrier(dev);
if (dev->bus && dev->bus->shutdown) {
if (dev->class && dev->class->shutdown) {
if (initcall_debug)
dev_info(dev, "shutdown\n");
dev->class->shutdown(dev);
} else if (dev->bus && dev->bus->shutdown) {
if (initcall_debug)
dev_info(dev, "shutdown\n");
dev->bus->shutdown(dev);

View file

@ -268,6 +268,8 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
value = PM_QOS_LATENCY_ANY;
else
return -EINVAL;
}
ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
return ret < 0 ? ret : n;

View file

@ -61,6 +61,8 @@ static LIST_HEAD(wakeup_sources);
static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
DEFINE_STATIC_SRCU(wakeup_srcu);
static struct wakeup_source deleted_ws = {
.name = "deleted",
.lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
@ -199,7 +201,7 @@ void wakeup_source_remove(struct wakeup_source *ws)
spin_lock_irqsave(&events_lock, flags);
list_del_rcu(&ws->entry);
spin_unlock_irqrestore(&events_lock, flags);
synchronize_rcu();
synchronize_srcu(&wakeup_srcu);
}
EXPORT_SYMBOL_GPL(wakeup_source_remove);
@ -331,13 +333,14 @@ void device_wakeup_detach_irq(struct device *dev)
void device_wakeup_arm_wake_irqs(void)
{
struct wakeup_source *ws;
int srcuidx;
rcu_read_lock();
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->wakeirq)
dev_pm_arm_wake_irq(ws->wakeirq);
}
rcu_read_unlock();
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
/**
@ -348,13 +351,14 @@ void device_wakeup_arm_wake_irqs(void)
void device_wakeup_disarm_wake_irqs(void)
{
struct wakeup_source *ws;
int srcuidx;
rcu_read_lock();
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->wakeirq)
dev_pm_disarm_wake_irq(ws->wakeirq);
}
rcu_read_unlock();
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
/**
@ -839,10 +843,10 @@ EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources);
void pm_print_active_wakeup_sources(void)
{
struct wakeup_source *ws;
int active = 0;
int srcuidx, active = 0;
struct wakeup_source *last_activity_ws = NULL;
rcu_read_lock();
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->active) {
pr_info("active wakeup source: %s\n", ws->name);
@ -858,7 +862,7 @@ void pm_print_active_wakeup_sources(void)
if (!active && last_activity_ws)
pr_info("last active wakeup source: %s\n",
last_activity_ws->name);
rcu_read_unlock();
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
@ -985,8 +989,9 @@ void pm_wakep_autosleep_enabled(bool set)
{
struct wakeup_source *ws;
ktime_t now = ktime_get();
int srcuidx;
rcu_read_lock();
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
spin_lock_irq(&ws->lock);
if (ws->autosleep_enabled != set) {
@ -1000,7 +1005,7 @@ void pm_wakep_autosleep_enabled(bool set)
}
spin_unlock_irq(&ws->lock);
}
rcu_read_unlock();
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
#endif /* CONFIG_PM_AUTOSLEEP */
@ -1061,15 +1066,16 @@ static int print_wakeup_source_stats(struct seq_file *m,
static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
{
struct wakeup_source *ws;
int srcuidx;
seq_puts(m, "name\t\t\t\t\tactive_count\tevent_count\twakeup_count\t"
"expire_count\tactive_since\ttotal_time\tmax_time\t"
"last_change\tprevent_suspend_time\n");
rcu_read_lock();
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
print_wakeup_source_stats(m, ws);
rcu_read_unlock();
srcu_read_unlock(&wakeup_srcu, srcuidx);
print_wakeup_source_stats(m, &deleted_ws);

View file

@ -36,10 +36,60 @@ static DEFINE_SPINLOCK(driver_lock);
struct class *tpm_class;
dev_t tpm_devt;
/*
* tpm_chip_find_get - return tpm_chip for a given chip number
* @chip_num the device number for the chip
/**
* tpm_try_get_ops() - Get a ref to the tpm_chip
* @chip: Chip to ref
*
* The caller must already have some kind of locking to ensure that chip is
* valid. This function will lock the chip so that the ops member can be
* accessed safely. The locking prevents tpm_chip_unregister from
* completing, so it should not be held for long periods.
*
* Returns -ERRNO if the chip could not be got.
*/
int tpm_try_get_ops(struct tpm_chip *chip)
{
int rc = -EIO;
get_device(&chip->dev);
down_read(&chip->ops_sem);
if (!chip->ops)
goto out_lock;
if (!try_module_get(chip->dev.parent->driver->owner))
goto out_lock;
return 0;
out_lock:
up_read(&chip->ops_sem);
put_device(&chip->dev);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_try_get_ops);
/**
* tpm_put_ops() - Release a ref to the tpm_chip
* @chip: Chip to put
*
* This is the opposite pair to tpm_try_get_ops(). After this returns chip may
* be kfree'd.
*/
void tpm_put_ops(struct tpm_chip *chip)
{
module_put(chip->dev.parent->driver->owner);
up_read(&chip->ops_sem);
put_device(&chip->dev);
}
EXPORT_SYMBOL_GPL(tpm_put_ops);
/**
* tpm_chip_find_get() - return tpm_chip for a given chip number
* @chip_num: id to find
*
* The return'd chip has been tpm_try_get_ops'd and must be released via
* tpm_put_ops
*/
struct tpm_chip *tpm_chip_find_get(int chip_num)
{
struct tpm_chip *pos, *chip = NULL;
@ -49,10 +99,10 @@ struct tpm_chip *tpm_chip_find_get(int chip_num)
if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num)
continue;
if (try_module_get(pos->pdev->driver->owner)) {
/* rcu prevents chip from being free'd */
if (!tpm_try_get_ops(pos))
chip = pos;
break;
}
break;
}
rcu_read_unlock();
return chip;
@ -74,6 +124,41 @@ static void tpm_dev_release(struct device *dev)
kfree(chip);
}
/**
* tpm_class_shutdown() - prepare the TPM device for loss of power.
* @dev: device to which the chip is associated.
*
* Issues a TPM2_Shutdown command prior to loss of power, as required by the
* TPM 2.0 spec.
* Then, calls bus- and device- specific shutdown code.
*
* XXX: This codepath relies on the fact that sysfs is not enabled for
* TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2
* has sysfs support enabled before TPM sysfs's implicit locking is fixed.
*/
static int tpm_class_shutdown(struct device *dev)
{
struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
down_write(&chip->ops_sem);
tpm2_shutdown(chip, TPM2_SU_CLEAR);
chip->ops = NULL;
up_write(&chip->ops_sem);
}
/* Allow bus- and device-specific code to run. Note: since chip->ops
* is NULL, more-specific shutdown code will not be able to issue TPM
* commands.
*/
if (dev->bus && dev->bus->shutdown)
dev->bus->shutdown(dev);
else if (dev->driver && dev->driver->shutdown)
dev->driver->shutdown(dev);
return 0;
}
/**
* tpmm_chip_alloc() - allocate a new struct tpm_chip instance
* @dev: device to which the chip is associated
@ -94,6 +179,7 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
return ERR_PTR(-ENOMEM);
mutex_init(&chip->tpm_mutex);
init_rwsem(&chip->ops_sem);
INIT_LIST_HEAD(&chip->list);
chip->ops = ops;
@ -112,13 +198,12 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
scnprintf(chip->devname, sizeof(chip->devname), "tpm%d", chip->dev_num);
chip->pdev = dev;
dev_set_drvdata(dev, chip);
chip->dev.class = tpm_class;
chip->dev.class->shutdown = tpm_class_shutdown;
chip->dev.release = tpm_dev_release;
chip->dev.parent = chip->pdev;
chip->dev.parent = dev;
#ifdef CONFIG_ACPI
chip->dev.groups = chip->groups;
#endif
@ -133,7 +218,7 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
device_initialize(&chip->dev);
cdev_init(&chip->cdev, &tpm_fops);
chip->cdev.owner = chip->pdev->driver->owner;
chip->cdev.owner = dev->driver->owner;
chip->cdev.kobj.parent = &chip->dev.kobj;
devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
@ -173,6 +258,12 @@ static int tpm_add_char_device(struct tpm_chip *chip)
static void tpm_del_char_device(struct tpm_chip *chip)
{
cdev_del(&chip->cdev);
/* Make the driver uncallable. */
down_write(&chip->ops_sem);
chip->ops = NULL;
up_write(&chip->ops_sem);
device_del(&chip->dev);
}
@ -236,9 +327,8 @@ int tpm_chip_register(struct tpm_chip *chip)
chip->flags |= TPM_CHIP_FLAG_REGISTERED;
if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
rc = __compat_only_sysfs_link_entry_to_kobj(&chip->pdev->kobj,
&chip->dev.kobj,
"ppi");
rc = __compat_only_sysfs_link_entry_to_kobj(
&chip->dev.parent->kobj, &chip->dev.kobj, "ppi");
if (rc && rc != -ENOENT) {
tpm_chip_unregister(chip);
return rc;
@ -259,6 +349,9 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
* Takes the chip first away from the list of available TPM chips and then
* cleans up all the resources reserved by tpm_chip_register().
*
* Once this function returns the driver call backs in 'op's will not be
* running and will no longer start.
*
* NOTE: This function should be only called before deinitializing chip
* resources.
*/
@ -273,7 +366,7 @@ void tpm_chip_unregister(struct tpm_chip *chip)
synchronize_rcu();
if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
sysfs_remove_link(&chip->pdev->kobj, "ppi");
sysfs_remove_link(&chip->dev.parent->kobj, "ppi");
tpm1_chip_unregister(chip);
tpm_del_char_device(chip);

View file

@ -61,7 +61,7 @@ static int tpm_open(struct inode *inode, struct file *file)
* by the check of is_open variable, which is protected
* by driver_lock. */
if (test_and_set_bit(0, &chip->is_open)) {
dev_dbg(chip->pdev, "Another process owns this TPM\n");
dev_dbg(&chip->dev, "Another process owns this TPM\n");
return -EBUSY;
}
@ -79,7 +79,6 @@ static int tpm_open(struct inode *inode, struct file *file)
INIT_WORK(&priv->work, timeout_work);
file->private_data = priv;
get_device(chip->pdev);
return 0;
}
@ -137,9 +136,18 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
return -EFAULT;
}
/* atomic tpm command send and result receive */
/* atomic tpm command send and result receive. We only hold the ops
* lock during this period so that the tpm can be unregistered even if
* the char dev is held open.
*/
if (tpm_try_get_ops(priv->chip)) {
mutex_unlock(&priv->buffer_mutex);
return -EPIPE;
}
out_size = tpm_transmit(priv->chip, priv->data_buffer,
sizeof(priv->data_buffer), 0);
tpm_put_ops(priv->chip);
if (out_size < 0) {
mutex_unlock(&priv->buffer_mutex);
return out_size;
@ -166,7 +174,6 @@ static int tpm_release(struct inode *inode, struct file *file)
file->private_data = NULL;
atomic_set(&priv->data_pending, 0);
clear_bit(0, &priv->chip->is_open);
put_device(priv->chip->pdev);
kfree(priv);
return 0;
}

View file

@ -343,7 +343,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
if (count == 0)
return -ENODATA;
if (count > bufsiz) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"invalid count value %x %zx\n", count, bufsiz);
return -E2BIG;
}
@ -353,7 +353,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
rc = chip->ops->send(chip, (u8 *) buf, count);
if (rc < 0) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"tpm_transmit: tpm_send: error %zd\n", rc);
goto out;
}
@ -372,7 +372,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
goto out_recv;
if (chip->ops->req_canceled(chip, status)) {
dev_err(chip->pdev, "Operation Canceled\n");
dev_err(&chip->dev, "Operation Canceled\n");
rc = -ECANCELED;
goto out;
}
@ -382,14 +382,14 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
} while (time_before(jiffies, stop));
chip->ops->cancel(chip);
dev_err(chip->pdev, "Operation Timed out\n");
dev_err(&chip->dev, "Operation Timed out\n");
rc = -ETIME;
goto out;
out_recv:
rc = chip->ops->recv(chip, (u8 *) buf, bufsiz);
if (rc < 0)
dev_err(chip->pdev,
dev_err(&chip->dev,
"tpm_transmit: tpm_recv: error %zd\n", rc);
out:
if (!(flags & TPM_TRANSMIT_UNLOCKED))
@ -416,7 +416,7 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd,
err = be32_to_cpu(header->return_code);
if (err != 0 && desc)
dev_err(chip->pdev, "A TPM error (%d) occurred %s\n", err,
dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
desc);
return err;
@ -514,7 +514,7 @@ int tpm_get_timeouts(struct tpm_chip *chip)
if (rc == TPM_ERR_INVALID_POSTINIT) {
/* The TPM is not started, we are the first to talk to it.
Execute a startup command. */
dev_info(chip->pdev, "Issuing TPM_STARTUP");
dev_info(&chip->dev, "Issuing TPM_STARTUP");
if (tpm_startup(chip, TPM_ST_CLEAR))
return rc;
@ -526,7 +526,7 @@ int tpm_get_timeouts(struct tpm_chip *chip)
0, NULL);
}
if (rc) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"A TPM error (%zd) occurred attempting to determine the timeouts\n",
rc);
goto duration;
@ -565,7 +565,7 @@ int tpm_get_timeouts(struct tpm_chip *chip)
/* Report adjusted timeouts */
if (chip->vendor.timeout_adjusted) {
dev_info(chip->pdev,
dev_info(&chip->dev,
HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
old_timeout[0], new_timeout[0],
old_timeout[1], new_timeout[1],
@ -612,7 +612,7 @@ duration:
chip->vendor.duration[TPM_MEDIUM] *= 1000;
chip->vendor.duration[TPM_LONG] *= 1000;
chip->vendor.duration_adjusted = true;
dev_info(chip->pdev, "Adjusting TPM timeout parameters.");
dev_info(&chip->dev, "Adjusting TPM timeout parameters.");
}
return 0;
}
@ -687,7 +687,7 @@ int tpm_is_tpm2(u32 chip_num)
rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
tpm_chip_put(chip);
tpm_put_ops(chip);
return rc;
}
@ -716,7 +716,7 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
rc = tpm2_pcr_read(chip, pcr_idx, res_buf);
else
rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf);
tpm_chip_put(chip);
tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_read);
@ -751,7 +751,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
rc = tpm2_pcr_extend(chip, pcr_idx, hash);
tpm_chip_put(chip);
tpm_put_ops(chip);
return rc;
}
@ -761,7 +761,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, 0,
"attempting extend a PCR value");
tpm_chip_put(chip);
tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_extend);
@ -802,7 +802,9 @@ int tpm_do_selftest(struct tpm_chip *chip)
* around 300ms while the self test is ongoing, keep trying
* until the self test duration expires. */
if (rc == -ETIME) {
dev_info(chip->pdev, HW_ERR "TPM command timed out during continue self test");
dev_info(
&chip->dev, HW_ERR
"TPM command timed out during continue self test");
msleep(delay_msec);
continue;
}
@ -812,7 +814,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
rc = be32_to_cpu(cmd.header.out.return_code);
if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
dev_info(chip->pdev,
dev_info(&chip->dev,
"TPM is disabled/deactivated (0x%X)\n", rc);
/* TPM is disabled and/or deactivated; driver can
* proceed and TPM does handle commands for
@ -840,7 +842,7 @@ int tpm_send(u32 chip_num, void *cmd, size_t buflen)
rc = tpm_transmit_cmd(chip, cmd, buflen, 0, "attempting tpm_cmd");
tpm_chip_put(chip);
tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_send);
@ -966,10 +968,10 @@ int tpm_pm_suspend(struct device *dev)
}
if (rc)
dev_err(chip->pdev,
dev_err(&chip->dev,
"Error (%d) sending savestate before suspend\n", rc);
else if (try > 0)
dev_warn(chip->pdev, "TPM savestate took %dms\n",
dev_warn(&chip->dev, "TPM savestate took %dms\n",
try * TPM_TIMEOUT_RETRY);
return rc;
@ -1023,7 +1025,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
err = tpm2_get_random(chip, out, max);
tpm_chip_put(chip);
tpm_put_ops(chip);
return err;
}
@ -1045,7 +1047,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
num_bytes -= recd;
} while (retries-- && total < max);
tpm_chip_put(chip);
tpm_put_ops(chip);
return total ? total : -EIO;
}
EXPORT_SYMBOL_GPL(tpm_get_random);
@ -1071,7 +1073,7 @@ int tpm_seal_trusted(u32 chip_num, struct trusted_key_payload *payload,
rc = tpm2_seal_trusted(chip, payload, options);
tpm_chip_put(chip);
tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_seal_trusted);
@ -1097,7 +1099,8 @@ int tpm_unseal_trusted(u32 chip_num, struct trusted_key_payload *payload,
rc = tpm2_unseal_trusted(chip, payload, options);
tpm_chip_put(chip);
tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_unseal_trusted);

View file

@ -284,16 +284,28 @@ static const struct attribute_group tpm_dev_group = {
int tpm_sysfs_add_device(struct tpm_chip *chip)
{
int err;
err = sysfs_create_group(&chip->pdev->kobj,
/* XXX: If you wish to remove this restriction, you must first update
* tpm_sysfs to explicitly lock chip->ops.
*/
if (chip->flags & TPM_CHIP_FLAG_TPM2)
return 0;
err = sysfs_create_group(&chip->dev.parent->kobj,
&tpm_dev_group);
if (err)
dev_err(chip->pdev,
dev_err(&chip->dev,
"failed to create sysfs attributes, %d\n", err);
return err;
}
void tpm_sysfs_del_device(struct tpm_chip *chip)
{
sysfs_remove_group(&chip->pdev->kobj, &tpm_dev_group);
/* The sysfs routines rely on an implicit tpm_try_get_ops, this
* function is called before ops is null'd and the sysfs core
* synchronizes this removal so that no callbacks are running or can
* run again
*/
sysfs_remove_group(&chip->dev.parent->kobj, &tpm_dev_group);
}

View file

@ -171,11 +171,16 @@ enum tpm_chip_flags {
};
struct tpm_chip {
struct device *pdev; /* Device stuff */
struct device dev;
struct cdev cdev;
/* A driver callback under ops cannot be run unless ops_sem is held
* (sometimes implicitly, eg for the sysfs code). ops becomes null
* when the driver is unregistered, see tpm_try_get_ops.
*/
struct rw_semaphore ops_sem;
const struct tpm_class_ops *ops;
unsigned int flags;
int dev_num; /* /dev/tpm# */
@ -201,11 +206,6 @@ struct tpm_chip {
#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
static inline void tpm_chip_put(struct tpm_chip *chip)
{
module_put(chip->pdev->driver->owner);
}
static inline int tpm_read_index(int base, int index)
{
outb(index, base);
@ -517,6 +517,9 @@ extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
wait_queue_head_t *, bool);
struct tpm_chip *tpm_chip_find_get(int chip_num);
__must_check int tpm_try_get_ops(struct tpm_chip *chip);
void tpm_put_ops(struct tpm_chip *chip);
extern struct tpm_chip *tpmm_chip_alloc(struct device *dev,
const struct tpm_class_ops *ops);
extern int tpm_chip_register(struct tpm_chip *chip);

View file

@ -570,7 +570,7 @@ static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_FLUSH_CONTEXT);
if (rc) {
dev_warn(chip->pdev, "0x%08x was not flushed, out of memory\n",
dev_warn(&chip->dev, "0x%08x was not flushed, out of memory\n",
handle);
return;
}
@ -580,7 +580,7 @@ static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags,
"flushing context");
if (rc)
dev_warn(chip->pdev, "0x%08x was not flushed, rc=%d\n", handle,
dev_warn(&chip->dev, "0x%08x was not flushed, rc=%d\n", handle,
rc);
tpm_buf_destroy(&buf);
@ -753,7 +753,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
* except print the error code on a system failure.
*/
if (rc < 0)
dev_warn(chip->pdev, "transmit returned %d while stopping the TPM",
dev_warn(&chip->dev, "transmit returned %d while stopping the TPM",
rc);
}
EXPORT_SYMBOL_GPL(tpm2_shutdown);
@ -820,7 +820,7 @@ static int tpm2_start_selftest(struct tpm_chip *chip, bool full)
* immediately. This is a workaround for that.
*/
if (rc == TPM2_RC_TESTING) {
dev_warn(chip->pdev, "Got RC_TESTING, ignoring\n");
dev_warn(&chip->dev, "Got RC_TESTING, ignoring\n");
rc = 0;
}

View file

@ -49,7 +49,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
for (i = 0; i < 6; i++) {
status = ioread8(chip->vendor.iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
dev_err(chip->pdev, "error reading header\n");
dev_err(&chip->dev, "error reading header\n");
return -EIO;
}
*buf++ = ioread8(chip->vendor.iobase);
@ -60,12 +60,12 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
size = be32_to_cpu(*native_size);
if (count < size) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"Recv size(%d) less than available space\n", size);
for (; i < size; i++) { /* clear the waiting data anyway */
status = ioread8(chip->vendor.iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
dev_err(chip->pdev, "error reading data\n");
dev_err(&chip->dev, "error reading data\n");
return -EIO;
}
}
@ -76,7 +76,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
for (; i < size; i++) {
status = ioread8(chip->vendor.iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
dev_err(chip->pdev, "error reading data\n");
dev_err(&chip->dev, "error reading data\n");
return -EIO;
}
*buf++ = ioread8(chip->vendor.iobase);
@ -86,7 +86,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
status = ioread8(chip->vendor.iobase + 1);
if (status & ATML_STATUS_DATA_AVAIL) {
dev_err(chip->pdev, "data available is stuck\n");
dev_err(&chip->dev, "data available is stuck\n");
return -EIO;
}
@ -97,9 +97,9 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
int i;
dev_dbg(chip->pdev, "tpm_atml_send:\n");
dev_dbg(&chip->dev, "tpm_atml_send:\n");
for (i = 0; i < count; i++) {
dev_dbg(chip->pdev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
dev_dbg(&chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
iowrite8(buf[i], chip->vendor.iobase);
}

View file

@ -52,7 +52,7 @@ struct priv_data {
static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
{
struct priv_data *priv = chip->vendor.priv;
struct i2c_client *client = to_i2c_client(chip->pdev);
struct i2c_client *client = to_i2c_client(chip->dev.parent);
s32 status;
priv->len = 0;
@ -62,7 +62,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
status = i2c_master_send(client, buf, len);
dev_dbg(chip->pdev,
dev_dbg(&chip->dev,
"%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
(int)min_t(size_t, 64, len), buf, len, status);
return status;
@ -71,7 +71,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct priv_data *priv = chip->vendor.priv;
struct i2c_client *client = to_i2c_client(chip->pdev);
struct i2c_client *client = to_i2c_client(chip->dev.parent);
struct tpm_output_header *hdr =
(struct tpm_output_header *)priv->buffer;
u32 expected_len;
@ -88,7 +88,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return -ENOMEM;
if (priv->len >= expected_len) {
dev_dbg(chip->pdev,
dev_dbg(&chip->dev,
"%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
(int)min_t(size_t, 64, expected_len), buf, count,
expected_len);
@ -97,7 +97,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
rc = i2c_master_recv(client, buf, expected_len);
dev_dbg(chip->pdev,
dev_dbg(&chip->dev,
"%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
(int)min_t(size_t, 64, expected_len), buf, count,
expected_len);
@ -106,13 +106,13 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
static void i2c_atmel_cancel(struct tpm_chip *chip)
{
dev_err(chip->pdev, "TPM operation cancellation was requested, but is not supported");
dev_err(&chip->dev, "TPM operation cancellation was requested, but is not supported");
}
static u8 i2c_atmel_read_status(struct tpm_chip *chip)
{
struct priv_data *priv = chip->vendor.priv;
struct i2c_client *client = to_i2c_client(chip->pdev);
struct i2c_client *client = to_i2c_client(chip->dev.parent);
int rc;
/* The TPM fails the I2C read until it is ready, so we do the entire
@ -125,7 +125,7 @@ static u8 i2c_atmel_read_status(struct tpm_chip *chip)
/* Once the TPM has completed the command the command remains readable
* until another command is issued. */
rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer));
dev_dbg(chip->pdev,
dev_dbg(&chip->dev,
"%s: sts=%d", __func__, rc);
if (rc <= 0)
return 0;

View file

@ -446,7 +446,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
/* read first 10 bytes, including tag, paramsize, and result */
size = recv_data(chip, buf, TPM_HEADER_SIZE);
if (size < TPM_HEADER_SIZE) {
dev_err(chip->pdev, "Unable to read header\n");
dev_err(&chip->dev, "Unable to read header\n");
goto out;
}
@ -459,14 +459,14 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
size += recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE);
if (size < expected) {
dev_err(chip->pdev, "Unable to read remainder of result\n");
dev_err(&chip->dev, "Unable to read remainder of result\n");
size = -ETIME;
goto out;
}
wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
dev_err(chip->pdev, "Error left over data\n");
dev_err(&chip->dev, "Error left over data\n");
size = -EIO;
goto out;
}

View file

@ -96,13 +96,13 @@ static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size,
/* read TPM_STS register */
static u8 i2c_nuvoton_read_status(struct tpm_chip *chip)
{
struct i2c_client *client = to_i2c_client(chip->pdev);
struct i2c_client *client = to_i2c_client(chip->dev.parent);
s32 status;
u8 data;
status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data);
if (status <= 0) {
dev_err(chip->pdev, "%s() error return %d\n", __func__,
dev_err(&chip->dev, "%s() error return %d\n", __func__,
status);
data = TPM_STS_ERR_VAL;
}
@ -127,13 +127,13 @@ static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data)
/* write commandReady to TPM_STS register */
static void i2c_nuvoton_ready(struct tpm_chip *chip)
{
struct i2c_client *client = to_i2c_client(chip->pdev);
struct i2c_client *client = to_i2c_client(chip->dev.parent);
s32 status;
/* this causes the current command to be aborted */
status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY);
if (status < 0)
dev_err(chip->pdev,
dev_err(&chip->dev,
"%s() fail to write TPM_STS.commandReady\n", __func__);
}
@ -212,7 +212,7 @@ static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value,
return 0;
} while (time_before(jiffies, stop));
}
dev_err(chip->pdev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
dev_err(&chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
value);
return -ETIMEDOUT;
}
@ -240,7 +240,7 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client,
&chip->vendor.read_queue) == 0) {
burst_count = i2c_nuvoton_get_burstcount(client, chip);
if (burst_count < 0) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"%s() fail to read burstCount=%d\n", __func__,
burst_count);
return -EIO;
@ -249,12 +249,12 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client,
rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R,
bytes2read, &buf[size]);
if (rc < 0) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"%s() fail on i2c_nuvoton_read_buf()=%d\n",
__func__, rc);
return -EIO;
}
dev_dbg(chip->pdev, "%s(%d):", __func__, bytes2read);
dev_dbg(&chip->dev, "%s(%d):", __func__, bytes2read);
size += bytes2read;
}
@ -264,7 +264,7 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client,
/* Read TPM command results */
static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct device *dev = chip->pdev;
struct device *dev = chip->dev.parent;
struct i2c_client *client = to_i2c_client(dev);
s32 rc;
int expected, status, burst_count, retries, size = 0;
@ -334,7 +334,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
break;
}
i2c_nuvoton_ready(chip);
dev_dbg(chip->pdev, "%s() -> %d\n", __func__, size);
dev_dbg(&chip->dev, "%s() -> %d\n", __func__, size);
return size;
}
@ -347,7 +347,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
*/
static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
{
struct device *dev = chip->pdev;
struct device *dev = chip->dev.parent;
struct i2c_client *client = to_i2c_client(dev);
u32 ordinal;
size_t count = 0;

View file

@ -195,9 +195,9 @@ static int wait(struct tpm_chip *chip, int wait_for_bit)
}
if (i == TPM_MAX_TRIES) { /* timeout occurs */
if (wait_for_bit == STAT_XFE)
dev_err(chip->pdev, "Timeout in wait(STAT_XFE)\n");
dev_err(&chip->dev, "Timeout in wait(STAT_XFE)\n");
if (wait_for_bit == STAT_RDA)
dev_err(chip->pdev, "Timeout in wait(STAT_RDA)\n");
dev_err(&chip->dev, "Timeout in wait(STAT_RDA)\n");
return -EIO;
}
return 0;
@ -220,7 +220,7 @@ static void wait_and_send(struct tpm_chip *chip, u8 sendbyte)
static void tpm_wtx(struct tpm_chip *chip)
{
number_of_wtx++;
dev_info(chip->pdev, "Granting WTX (%02d / %02d)\n",
dev_info(&chip->dev, "Granting WTX (%02d / %02d)\n",
number_of_wtx, TPM_MAX_WTX_PACKAGES);
wait_and_send(chip, TPM_VL_VER);
wait_and_send(chip, TPM_CTRL_WTX);
@ -231,7 +231,7 @@ static void tpm_wtx(struct tpm_chip *chip)
static void tpm_wtx_abort(struct tpm_chip *chip)
{
dev_info(chip->pdev, "Aborting WTX\n");
dev_info(&chip->dev, "Aborting WTX\n");
wait_and_send(chip, TPM_VL_VER);
wait_and_send(chip, TPM_CTRL_WTX_ABORT);
wait_and_send(chip, 0x00);
@ -257,7 +257,7 @@ recv_begin:
}
if (buf[0] != TPM_VL_VER) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"Wrong transport protocol implementation!\n");
return -EIO;
}
@ -272,7 +272,7 @@ recv_begin:
}
if ((size == 0x6D00) && (buf[1] == 0x80)) {
dev_err(chip->pdev, "Error handling on vendor layer!\n");
dev_err(&chip->dev, "Error handling on vendor layer!\n");
return -EIO;
}
@ -284,7 +284,7 @@ recv_begin:
}
if (buf[1] == TPM_CTRL_WTX) {
dev_info(chip->pdev, "WTX-package received\n");
dev_info(&chip->dev, "WTX-package received\n");
if (number_of_wtx < TPM_MAX_WTX_PACKAGES) {
tpm_wtx(chip);
goto recv_begin;
@ -295,14 +295,14 @@ recv_begin:
}
if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) {
dev_info(chip->pdev, "WTX-abort acknowledged\n");
dev_info(&chip->dev, "WTX-abort acknowledged\n");
return size;
}
if (buf[1] == TPM_CTRL_ERROR) {
dev_err(chip->pdev, "ERROR-package received:\n");
dev_err(&chip->dev, "ERROR-package received:\n");
if (buf[4] == TPM_INF_NAK)
dev_err(chip->pdev,
dev_err(&chip->dev,
"-> Negative acknowledgement"
" - retransmit command!\n");
return -EIO;
@ -321,7 +321,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
ret = empty_fifo(chip, 1);
if (ret) {
dev_err(chip->pdev, "Timeout while clearing FIFO\n");
dev_err(&chip->dev, "Timeout while clearing FIFO\n");
return -EIO;
}

View file

@ -113,7 +113,7 @@ static int nsc_wait_for_ready(struct tpm_chip *chip)
}
while (time_before(jiffies, stop));
dev_info(chip->pdev, "wait for ready failed\n");
dev_info(&chip->dev, "wait for ready failed\n");
return -EBUSY;
}
@ -129,12 +129,12 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
return -EIO;
if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) {
dev_err(chip->pdev, "F0 timeout\n");
dev_err(&chip->dev, "F0 timeout\n");
return -EIO;
}
if ((data =
inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
dev_err(chip->pdev, "not in normal mode (0x%x)\n",
dev_err(&chip->dev, "not in normal mode (0x%x)\n",
data);
return -EIO;
}
@ -143,7 +143,7 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
for (p = buffer; p < &buffer[count]; p++) {
if (wait_for_stat
(chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"OBF timeout (while reading data)\n");
return -EIO;
}
@ -154,11 +154,11 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
if ((data & NSC_STATUS_F0) == 0 &&
(wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) {
dev_err(chip->pdev, "F0 not set\n");
dev_err(&chip->dev, "F0 not set\n");
return -EIO;
}
if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"expected end of command(0x%x)\n", data);
return -EIO;
}
@ -189,19 +189,19 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
return -EIO;
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
dev_err(chip->pdev, "IBF timeout\n");
dev_err(&chip->dev, "IBF timeout\n");
return -EIO;
}
outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND);
if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
dev_err(chip->pdev, "IBR timeout\n");
dev_err(&chip->dev, "IBR timeout\n");
return -EIO;
}
for (i = 0; i < count; i++) {
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"IBF timeout (while writing data)\n");
return -EIO;
}
@ -209,7 +209,7 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
}
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
dev_err(chip->pdev, "IBF timeout\n");
dev_err(&chip->dev, "IBF timeout\n");
return -EIO;
}
outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND);

View file

@ -293,7 +293,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
/* read first 10 bytes, including tag, paramsize, and result */
if ((size =
recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
dev_err(chip->pdev, "Unable to read header\n");
dev_err(&chip->dev, "Unable to read header\n");
goto out;
}
@ -306,7 +306,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
if ((size +=
recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE)) < expected) {
dev_err(chip->pdev, "Unable to read remainder of result\n");
dev_err(&chip->dev, "Unable to read remainder of result\n");
size = -ETIME;
goto out;
}
@ -315,7 +315,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
&chip->vendor.int_queue, false);
status = tpm_tis_status(chip);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
dev_err(chip->pdev, "Error left over data\n");
dev_err(&chip->dev, "Error left over data\n");
size = -EIO;
goto out;
}
@ -401,7 +401,7 @@ static void disable_interrupts(struct tpm_chip *chip)
iowrite32(intmask,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
devm_free_irq(chip->pdev, chip->vendor.irq, chip);
devm_free_irq(&chip->dev, chip->vendor.irq, chip);
chip->vendor.irq = 0;
}
@ -463,7 +463,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
msleep(1);
if (!priv->irq_tested) {
disable_interrupts(chip);
dev_err(chip->pdev,
dev_err(&chip->dev,
FW_BUG "TPM interrupt not working, polling instead\n");
}
priv->irq_tested = true;
@ -533,7 +533,7 @@ static int probe_itpm(struct tpm_chip *chip)
rc = tpm_tis_send_data(chip, cmd_getticks, len);
if (rc == 0) {
dev_info(chip->pdev, "Detected an iTPM.\n");
dev_info(&chip->dev, "Detected an iTPM.\n");
rc = 1;
} else
rc = -EFAULT;
@ -766,7 +766,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
if (devm_request_irq
(dev, i, tis_int_probe, IRQF_SHARED,
chip->devname, chip) != 0) {
dev_info(chip->pdev,
dev_info(&chip->dev,
"Unable to request irq: %d for probe\n",
i);
continue;
@ -818,7 +818,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
if (devm_request_irq
(dev, chip->vendor.irq, tis_int_handler, IRQF_SHARED,
chip->devname, chip) != 0) {
dev_info(chip->pdev,
dev_info(&chip->dev,
"Unable to request irq: %d for use\n",
chip->vendor.irq);
chip->vendor.irq = 0;

View file

@ -963,7 +963,9 @@ static int atmel_sha_finup(struct ahash_request *req)
ctx->flags |= SHA_FLAGS_FINUP;
err1 = atmel_sha_update(req);
if (err1 == -EINPROGRESS || err1 == -EBUSY)
if (err1 == -EINPROGRESS ||
(err1 == -EBUSY && (ahash_request_flags(req) &
CRYPTO_TFM_REQ_MAY_BACKLOG)))
return err1;
/*

View file

@ -498,7 +498,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
/* in progress */
wait_for_completion_interruptible(&result.completion);
wait_for_completion(&result.completion);
ret = result.err;
#ifdef DEBUG
print_hex_dump(KERN_ERR,

View file

@ -103,7 +103,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
/* in progress */
wait_for_completion_interruptible(&result.completion);
wait_for_completion(&result.completion);
ret = result.err;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",

View file

@ -804,7 +804,7 @@ static void talitos_unregister_rng(struct device *dev)
* crypto alg
*/
#define TALITOS_CRA_PRIORITY 3000
#define TALITOS_MAX_KEY_SIZE 96
#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
struct talitos_ctx {
@ -1388,6 +1388,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
{
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
if (keylen > TALITOS_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
memcpy(&ctx->key, key, keylen);
ctx->keylen = keylen;

View file

@ -632,6 +632,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
int enabled;
u64 val;
if (cpu >= nr_cpu_ids)
return -EINVAL;
if (gic_irq_in_rdist(d))
return -EINVAL;

View file

@ -907,7 +907,7 @@ static void decode_txts(struct dp83640_private *dp83640,
if (overflow) {
pr_debug("tx timestamp queue overflow, count %d\n", overflow);
while (skb) {
skb_complete_tx_timestamp(skb, NULL);
kfree_skb(skb);
skb = skb_dequeue(&dp83640->tx_queue);
}
return;

View file

@ -539,6 +539,8 @@ static int ksz9031_read_status(struct phy_device *phydev)
if ((regval & 0xFF) == 0xFF) {
phy_init_hw(phydev);
phydev->link = 0;
if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
phydev->drv->config_intr(phydev);
}
return 0;

View file

@ -733,15 +733,15 @@ static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
static void vrf_dev_uninit(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
struct slave_queue *queue = &vrf->queue;
struct list_head *head = &queue->all_slaves;
struct slave *slave, *next;
// struct slave_queue *queue = &vrf->queue;
// struct list_head *head = &queue->all_slaves;
// struct slave *slave, *next;
vrf_rtable_destroy(vrf);
vrf_rt6_destroy(vrf);
list_for_each_entry_safe(slave, next, head, list)
vrf_del_slave(dev, slave->dev);
// list_for_each_entry_safe(slave, next, head, list)
// vrf_del_slave(dev, slave->dev);
free_percpu(dev->dstats);
dev->dstats = NULL;
@ -914,6 +914,14 @@ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
static void vrf_dellink(struct net_device *dev, struct list_head *head)
{
struct net_vrf *vrf = netdev_priv(dev);
struct slave_queue *queue = &vrf->queue;
struct list_head *all_slaves = &queue->all_slaves;
struct slave *slave, *next;
list_for_each_entry_safe(slave, next, all_slaves, list)
vrf_del_slave(dev, slave->dev);
unregister_netdevice_queue(dev, head);
}

View file

@ -4472,6 +4472,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
GFP_KERNEL);
} else if (ieee80211_is_action(mgmt->frame_control)) {
if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) {
brcmf_err("invalid action frame length\n");
err = -EINVAL;
goto exit;
}
af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
if (af_params == NULL) {
brcmf_err("unable to allocate frame\n");

View file

@ -741,6 +741,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
BUG_ON(!dev);
ioc = GET_IOC(dev);
if (!ioc)
return DMA_ERROR_CODE;
BUG_ON(size <= 0);
@ -805,6 +807,10 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
BUG_ON(!dev);
ioc = GET_IOC(dev);
if (!ioc) {
WARN_ON(!ioc);
return;
}
DBG_RUN("%s() iovp 0x%lx/%x\n",
__func__, (long)iova, size);
@ -908,6 +914,8 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
BUG_ON(!dev);
ioc = GET_IOC(dev);
if (!ioc)
return 0;
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
@ -980,6 +988,10 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
BUG_ON(!dev);
ioc = GET_IOC(dev);
if (!ioc) {
WARN_ON(!ioc);
return;
}
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
__func__, nents, sg_virt(sglist), sglist->length);

View file

@ -154,7 +154,10 @@ struct dino_device
};
/* Looks nice and keeps the compiler happy */
#define DINO_DEV(d) ((struct dino_device *) d)
#define DINO_DEV(d) ({ \
void *__pdata = d; \
BUG_ON(!__pdata); \
(struct dino_device *)__pdata; })
/*

View file

@ -111,8 +111,10 @@ static u32 lba_t32;
/* Looks nice and keeps the compiler happy */
#define LBA_DEV(d) ((struct lba_device *) (d))
#define LBA_DEV(d) ({ \
void *__pdata = d; \
BUG_ON(!__pdata); \
(struct lba_device *)__pdata; })
/*
** Only allow 8 subsidiary busses per LBA

View file

@ -691,6 +691,8 @@ static int sba_dma_supported( struct device *dev, u64 mask)
return 0;
ioc = GET_IOC(dev);
if (!ioc)
return 0;
/*
* check if mask is >= than the current max IO Virt Address
@ -722,6 +724,8 @@ sba_map_single(struct device *dev, void *addr, size_t size,
int pide;
ioc = GET_IOC(dev);
if (!ioc)
return DMA_ERROR_CODE;
/* save offset bits */
offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
@ -803,6 +807,10 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
ioc = GET_IOC(dev);
if (!ioc) {
WARN_ON(!ioc);
return;
}
offset = iova & ~IOVP_MASK;
iova ^= offset; /* clear offset bits */
size += offset;
@ -942,6 +950,8 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
ioc = GET_IOC(dev);
if (!ioc)
return 0;
/* Fast path single entry scatterlists. */
if (nents == 1) {
@ -1027,6 +1037,10 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
__func__, nents, sg_virt(sglist), sglist->length);
ioc = GET_IOC(dev);
if (!ioc) {
WARN_ON(!ioc);
return;
}
#ifdef SBA_COLLECT_STATS
ioc->usg_calls++;

View file

@ -2708,13 +2708,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
* related to the kernel should not use this.
*/
data = vt_get_shift_state();
ret = __put_user(data, p);
ret = put_user(data, p);
break;
case TIOCL_GETMOUSEREPORTING:
console_lock(); /* May be overkill */
data = mouse_reporting();
console_unlock();
ret = __put_user(data, p);
ret = put_user(data, p);
break;
case TIOCL_SETVESABLANK:
console_lock();
@ -2723,7 +2723,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
break;
case TIOCL_GETKMSGREDIRECT:
data = vt_get_kmsg_redirect();
ret = __put_user(data, p);
ret = put_user(data, p);
break;
case TIOCL_SETKMSGREDIRECT:
if (!capable(CAP_SYS_ADMIN)) {

View file

@ -905,17 +905,60 @@ static int load_elf_binary(struct linux_binprm *bprm)
elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
vaddr = elf_ppnt->p_vaddr;
/*
* If we are loading ET_EXEC or we have already performed
* the ET_DYN load_addr calculations, proceed normally.
*/
if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
elf_flags |= MAP_FIXED;
} else if (loc->elf_ex.e_type == ET_DYN) {
/* Try and get dynamic programs out of the way of the
* default mmap base, as well as whatever program they
* might try to exec. This is because the brk will
* follow the loader, and is not movable. */
load_bias = ELF_ET_DYN_BASE - vaddr;
if (current->flags & PF_RANDOMIZE)
load_bias += arch_mmap_rnd();
load_bias = ELF_PAGESTART(load_bias);
/*
* This logic is run once for the first LOAD Program
* Header for ET_DYN binaries to calculate the
* randomization (load_bias) for all the LOAD
* Program Headers, and to calculate the entire
* size of the ELF mapping (total_size). (Note that
* load_addr_set is set to true later once the
* initial mapping is performed.)
*
* There are effectively two types of ET_DYN
* binaries: programs (i.e. PIE: ET_DYN with INTERP)
* and loaders (ET_DYN without INTERP, since they
* _are_ the ELF interpreter). The loaders must
* be loaded away from programs since the program
* may otherwise collide with the loader (especially
* for ET_EXEC which does not have a randomized
* position). For example to handle invocations of
* "./ld.so someprog" to test out a new version of
* the loader, the subsequent program that the
* loader loads must avoid the loader itself, so
* they cannot share the same load range. Sufficient
* room for the brk must be allocated with the
* loader as well, since brk must be available with
* the loader.
*
* Therefore, programs are loaded offset from
* ELF_ET_DYN_BASE and loaders are loaded into the
* independently randomized mmap region (0 load_bias
* without MAP_FIXED).
*/
if (elf_interpreter) {
load_bias = ELF_ET_DYN_BASE;
if (current->flags & PF_RANDOMIZE)
load_bias += arch_mmap_rnd();
elf_flags |= MAP_FIXED;
} else
load_bias = 0;
/*
* Since load_bias is used for all subsequent loading
* calculations, we must lower it by the first vaddr
* so that the remaining calculations based on the
* ELF vaddrs will be correctly offset. The result
* is then page aligned.
*/
load_bias = ELF_PAGESTART(load_bias - vaddr);
total_size = total_mapping_size(elf_phdata,
loc->elf_ex.e_phnum);
if (!total_size) {

View file

@ -1128,11 +1128,12 @@ void shrink_dcache_sb(struct super_block *sb)
LIST_HEAD(dispose);
freed = list_lru_walk(&sb->s_dentry_lru,
dentry_lru_isolate_shrink, &dispose, UINT_MAX);
dentry_lru_isolate_shrink, &dispose, 1024);
this_cpu_sub(nr_dentry_unused, freed);
shrink_dentry_list(&dispose);
} while (freed > 0);
cond_resched();
} while (list_lru_count(&sb->s_dentry_lru) > 0);
}
EXPORT_SYMBOL(shrink_dcache_sb);

View file

@ -206,8 +206,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
if (write) {
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
unsigned long ptr_size;
struct rlimit *rlim;
unsigned long ptr_size, limit;
/*
* Since the stack will hold pointers to the strings, we
@ -236,14 +235,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
return page;
/*
* Limit to 1/4-th the stack size for the argv+env strings.
* Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
* (whichever is smaller) for the argv+env strings.
* This ensures that:
* - the remaining binfmt code will not run out of stack space,
* - the program will have a reasonable amount of stack left
* to work from.
*/
rlim = current->signal->rlim;
if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
limit = _STK_LIM / 4 * 3;
limit = min(limit, rlimit(RLIMIT_STACK) / 4);
if (size > limit)
goto fail;
}

View file

@ -57,6 +57,7 @@ struct mount {
struct mnt_namespace *mnt_ns; /* containing namespace */
struct mountpoint *mnt_mp; /* where is it mounted */
struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
struct list_head mnt_umounting; /* list entry for umount propagation */
#ifdef CONFIG_FSNOTIFY
struct hlist_head mnt_fsnotify_marks;
__u32 mnt_fsnotify_mask;

View file

@ -237,6 +237,7 @@ static struct mount *alloc_vfsmnt(const char *name)
INIT_LIST_HEAD(&mnt->mnt_slave_list);
INIT_LIST_HEAD(&mnt->mnt_slave);
INIT_HLIST_NODE(&mnt->mnt_mp_list);
INIT_LIST_HEAD(&mnt->mnt_umounting);
#ifdef CONFIG_FSNOTIFY
INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
#endif

View file

@ -24,6 +24,11 @@ static inline struct mount *first_slave(struct mount *p)
return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
}
static inline struct mount *last_slave(struct mount *p)
{
return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
}
static inline struct mount *next_slave(struct mount *p)
{
return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
@ -164,6 +169,19 @@ static struct mount *propagation_next(struct mount *m,
}
}
static struct mount *skip_propagation_subtree(struct mount *m,
struct mount *origin)
{
/*
* Advance m such that propagation_next will not return
* the slaves of m.
*/
if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
m = last_slave(m);
return m;
}
static struct mount *next_group(struct mount *m, struct mount *origin)
{
while (1) {
@ -415,68 +433,107 @@ void propagate_mount_unlock(struct mount *mnt)
}
}
/*
* Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
*/
static void mark_umount_candidates(struct mount *mnt)
static void umount_one(struct mount *mnt, struct list_head *to_umount)
{
struct mount *parent = mnt->mnt_parent;
struct mount *m;
BUG_ON(parent == mnt);
for (m = propagation_next(parent, parent); m;
m = propagation_next(m, parent)) {
struct mount *child = __lookup_mnt(&m->mnt,
mnt->mnt_mountpoint);
if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
continue;
if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
SET_MNT_MARK(child);
}
}
CLEAR_MNT_MARK(mnt);
mnt->mnt.mnt_flags |= MNT_UMOUNT;
list_del_init(&mnt->mnt_child);
list_del_init(&mnt->mnt_umounting);
list_move_tail(&mnt->mnt_list, to_umount);
}
/*
* NOTE: unmounting 'mnt' naturally propagates to all other mounts its
* parent propagates to.
*/
static void __propagate_umount(struct mount *mnt)
static bool __propagate_umount(struct mount *mnt,
struct list_head *to_umount,
struct list_head *to_restore)
{
struct mount *parent = mnt->mnt_parent;
struct mount *m;
bool progress = false;
struct mount *child;
BUG_ON(parent == mnt);
/*
* The state of the parent won't change if this mount is
* already unmounted or marked as without children.
*/
if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED))
goto out;
for (m = propagation_next(parent, parent); m;
m = propagation_next(m, parent)) {
struct mount *topper;
struct mount *child = __lookup_mnt(&m->mnt,
mnt->mnt_mountpoint);
/*
* umount the child only if the child has no children
* and the child is marked safe to unmount.
*/
if (!child || !IS_MNT_MARKED(child))
/* Verify topper is the only grandchild that has not been
* speculatively unmounted.
*/
list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
if (child->mnt_mountpoint == mnt->mnt.mnt_root)
continue;
CLEAR_MNT_MARK(child);
if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child))
continue;
/* Found a mounted child */
goto children;
}
/* If there is exactly one mount covering all of child
* replace child with that mount.
*/
topper = find_topper(child);
if (topper)
mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
topper);
/* Mark mounts that can be unmounted if not locked */
SET_MNT_MARK(mnt);
progress = true;
if (list_empty(&child->mnt_mounts)) {
list_del_init(&child->mnt_child);
child->mnt.mnt_flags |= MNT_UMOUNT;
list_move_tail(&child->mnt_list, &mnt->mnt_list);
/* If a mount is without children and not locked umount it. */
if (!IS_MNT_LOCKED(mnt)) {
umount_one(mnt, to_umount);
} else {
children:
list_move_tail(&mnt->mnt_umounting, to_restore);
}
out:
return progress;
}
static void umount_list(struct list_head *to_umount,
struct list_head *to_restore)
{
struct mount *mnt, *child, *tmp;
list_for_each_entry(mnt, to_umount, mnt_list) {
list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) {
/* topper? */
if (child->mnt_mountpoint == mnt->mnt.mnt_root)
list_move_tail(&child->mnt_umounting, to_restore);
else
umount_one(child, to_umount);
}
}
}
static void restore_mounts(struct list_head *to_restore)
{
/* Restore mounts to a clean working state */
while (!list_empty(to_restore)) {
struct mount *mnt, *parent;
struct mountpoint *mp;
mnt = list_first_entry(to_restore, struct mount, mnt_umounting);
CLEAR_MNT_MARK(mnt);
list_del_init(&mnt->mnt_umounting);
/* Should this mount be reparented? */
mp = mnt->mnt_mp;
parent = mnt->mnt_parent;
while (parent->mnt.mnt_flags & MNT_UMOUNT) {
mp = parent->mnt_mp;
parent = parent->mnt_parent;
}
if (parent != mnt->mnt_parent)
mnt_change_mountpoint(parent, mp, mnt);
}
}
static void cleanup_umount_visitations(struct list_head *visited)
{
while (!list_empty(visited)) {
struct mount *mnt =
list_first_entry(visited, struct mount, mnt_umounting);
list_del_init(&mnt->mnt_umounting);
}
}
/*
* collect all mounts that receive propagation from the mount in @list,
* and return these additional mounts in the same list.
@ -487,12 +544,69 @@ static void __propagate_umount(struct mount *mnt)
int propagate_umount(struct list_head *list)
{
struct mount *mnt;
LIST_HEAD(to_restore);
LIST_HEAD(to_umount);
LIST_HEAD(visited);
list_for_each_entry_reverse(mnt, list, mnt_list)
mark_umount_candidates(mnt);
/* Find candidates for unmounting */
list_for_each_entry_reverse(mnt, list, mnt_list) {
struct mount *parent = mnt->mnt_parent;
struct mount *m;
/*
* If this mount has already been visited it is known that it's
* entire peer group and all of their slaves in the propagation
* tree for the mountpoint has already been visited and there is
* no need to visit them again.
*/
if (!list_empty(&mnt->mnt_umounting))
continue;
list_add_tail(&mnt->mnt_umounting, &visited);
for (m = propagation_next(parent, parent); m;
m = propagation_next(m, parent)) {
struct mount *child = __lookup_mnt(&m->mnt,
mnt->mnt_mountpoint);
if (!child)
continue;
if (!list_empty(&child->mnt_umounting)) {
/*
* If the child has already been visited it is
* know that it's entire peer group and all of
* their slaves in the propgation tree for the
* mountpoint has already been visited and there
* is no need to visit this subtree again.
*/
m = skip_propagation_subtree(m, parent);
continue;
} else if (child->mnt.mnt_flags & MNT_UMOUNT) {
/*
* We have come accross an partially unmounted
* mount in list that has not been visited yet.
* Remember it has been visited and continue
* about our merry way.
*/
list_add_tail(&child->mnt_umounting, &visited);
continue;
}
/* Check the child and parents while progress is made */
while (__propagate_umount(child,
&to_umount, &to_restore)) {
/* Is the parent a umount candidate? */
child = child->mnt_parent;
if (list_empty(&child->mnt_umounting))
break;
}
}
}
umount_list(&to_umount, &to_restore);
restore_mounts(&to_restore);
cleanup_umount_visitations(&visited);
list_splice_tail(&to_umount, list);
list_for_each_entry(mnt, list, mnt_list)
__propagate_umount(mnt);
return 0;
}

View file

@ -368,6 +368,7 @@ int subsys_virtual_register(struct bus_type *subsys,
* @suspend: Used to put the device to sleep mode, usually to a low power
* state.
* @resume: Used to bring the device from the sleep mode.
* @shutdown: Called at shut-down time to quiesce the device.
* @ns_type: Callbacks so sysfs can detemine namespaces.
* @namespace: Namespace of the device belongs to this class.
* @pm: The default device power management operations of this class.
@ -396,6 +397,7 @@ struct class {
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
int (*shutdown)(struct device *dev);
const struct kobj_ns_type_operations *ns_type;
const void *(*namespace)(struct device *dev);

View file

@ -44,6 +44,7 @@ struct list_lru_node {
/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
struct list_lru_memcg *memcg_lrus;
#endif
long nr_items;
} ____cacheline_aligned_in_smp;
struct list_lru {

View file

@ -21,6 +21,7 @@ struct route_info {
#include <net/flow.h>
#include <net/ip6_fib.h>
#include <net/sock.h>
#include <net/lwtunnel.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/route.h>
@ -209,4 +210,11 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt,
return daddr;
}
static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b)
{
return a->dst.dev == b->dst.dev &&
a->rt6i_idev == b->rt6i_idev &&
ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) &&
!lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate);
}
#endif

View file

@ -765,6 +765,11 @@ static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
if (err)
return err;
if (is_pointer_value(env, insn->src_reg)) {
verbose("R%d leaks addr into mem\n", insn->src_reg);
return -EACCES;
}
/* check whether atomic_add can read the memory */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1);

View file

@ -66,7 +66,7 @@ static inline int init_kernel_text(unsigned long addr)
return 0;
}
int core_kernel_text(unsigned long addr)
int notrace core_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext &&
addr < (unsigned long)_etext)

View file

@ -6386,6 +6386,9 @@ enum s_alloc {
* Build an iteration mask that can exclude certain CPUs from the upwards
* domain traversal.
*
* Only CPUs that can arrive at this group should be considered to continue
* balancing.
*
* Asymmetric node setups can result in situations where the domain tree is of
* unequal depth, make sure to skip domains that already cover the entire
* range.
@ -6397,18 +6400,31 @@ enum s_alloc {
*/
static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
{
const struct cpumask *span = sched_domain_span(sd);
const struct cpumask *sg_span = sched_group_cpus(sg);
struct sd_data *sdd = sd->private;
struct sched_domain *sibling;
int i;
for_each_cpu(i, span) {
for_each_cpu(i, sg_span) {
sibling = *per_cpu_ptr(sdd->sd, i);
if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
/*
* Can happen in the asymmetric case, where these siblings are
* unused. The mask will not be empty because those CPUs that
* do have the top domain _should_ span the domain.
*/
if (!sibling->child)
continue;
/* If we would not end up here, we can't continue from here */
if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
continue;
cpumask_set_cpu(i, sched_group_mask(sg));
}
/* We must not have empty masks here */
WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg)));
}
/*

View file

@ -1681,7 +1681,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
TRACE_FLAG_IRQS_NOSUPPORT |
#endif
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
}

View file

@ -117,6 +117,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
l = list_lru_from_kmem(nlru, item);
list_add_tail(item, &l->list);
l->nr_items++;
nlru->nr_items++;
spin_unlock(&nlru->lock);
return true;
}
@ -136,6 +137,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
l = list_lru_from_kmem(nlru, item);
list_del_init(item);
l->nr_items--;
nlru->nr_items--;
spin_unlock(&nlru->lock);
return true;
}
@ -183,15 +185,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one);
unsigned long list_lru_count_node(struct list_lru *lru, int nid)
{
long count = 0;
int memcg_idx;
struct list_lru_node *nlru;
count += __list_lru_count_one(lru, nid, -1);
if (list_lru_memcg_aware(lru)) {
for_each_memcg_cache_index(memcg_idx)
count += __list_lru_count_one(lru, nid, memcg_idx);
}
return count;
nlru = &lru->node[nid];
return nlru->nr_items;
}
EXPORT_SYMBOL_GPL(list_lru_count_node);
@ -226,6 +223,7 @@ restart:
assert_spin_locked(&nlru->lock);
case LRU_REMOVED:
isolated++;
nlru->nr_items--;
/*
* If the lru lock has been dropped, our list
* traversal is now invalid and so we have to

View file

@ -2197,7 +2197,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
/* Guard against exceeding limits of the address space. */
address &= PAGE_MASK;
if (address >= TASK_SIZE)
if (address >= (TASK_SIZE & PAGE_MASK))
return -ENOMEM;
address += PAGE_SIZE;

View file

@ -4375,6 +4375,12 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
}
EXPORT_SYMBOL(gro_find_complete_by_type);
static void napi_skb_free_stolen_head(struct sk_buff *skb)
{
skb_dst_drop(skb);
kmem_cache_free(skbuff_head_cache, skb);
}
static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
{
switch (ret) {
@ -4388,12 +4394,10 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
break;
case GRO_MERGED_FREE:
if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
skb_dst_drop(skb);
kmem_cache_free(skbuff_head_cache, skb);
} else {
if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
napi_skb_free_stolen_head(skb);
else
__kfree_skb(skb);
}
break;
case GRO_HELD:
@ -4459,10 +4463,16 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi,
break;
case GRO_DROP:
case GRO_MERGED_FREE:
napi_reuse_skb(napi, skb);
break;
case GRO_MERGED_FREE:
if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
napi_skb_free_stolen_head(skb);
else
napi_reuse_skb(napi, skb);
break;
case GRO_MERGED:
break;
}
@ -7052,8 +7062,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
return storage;
}
EXPORT_SYMBOL(dev_get_stats);

View file

@ -2263,6 +2263,8 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_init_send_head(sk);
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
dst_release(sk->sk_rx_dst);
sk->sk_rx_dst = NULL;
tcp_saved_syn_free(tp);
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);

View file

@ -1799,17 +1799,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
{
if (ifp->flags&IFA_F_PERMANENT) {
spin_lock_bh(&ifp->lock);
addrconf_del_dad_work(ifp);
ifp->flags |= IFA_F_TENTATIVE;
if (dad_failed)
ifp->flags |= IFA_F_DADFAILED;
spin_unlock_bh(&ifp->lock);
if (dad_failed)
ipv6_ifa_notify(0, ifp);
in6_ifa_put(ifp);
} else if (ifp->flags&IFA_F_TEMPORARY) {
if (ifp->flags&IFA_F_TEMPORARY) {
struct inet6_ifaddr *ifpub;
spin_lock_bh(&ifp->lock);
ifpub = ifp->ifpub;
@ -1822,6 +1812,16 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
spin_unlock_bh(&ifp->lock);
}
ipv6_del_addr(ifp);
} else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
spin_lock_bh(&ifp->lock);
addrconf_del_dad_work(ifp);
ifp->flags |= IFA_F_TENTATIVE;
if (dad_failed)
ifp->flags |= IFA_F_DADFAILED;
spin_unlock_bh(&ifp->lock);
if (dad_failed)
ipv6_ifa_notify(0, ifp);
in6_ifa_put(ifp);
} else {
ipv6_del_addr(ifp);
}
@ -3195,6 +3195,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct inet6_dev *idev = __in6_dev_get(dev);
struct net *net = dev_net(dev);
int run_pending = 0;
int err;
@ -3210,7 +3211,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
case NETDEV_CHANGEMTU:
/* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
if (dev->mtu < IPV6_MIN_MTU) {
addrconf_ifdown(dev, 1);
addrconf_ifdown(dev, dev != net->loopback_dev);
break;
}
@ -3323,7 +3324,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
* IPV6_MIN_MTU stop IPv6 on this interface.
*/
if (dev->mtu < IPV6_MIN_MTU)
addrconf_ifdown(dev, 1);
addrconf_ifdown(dev, dev != net->loopback_dev);
}
break;

View file

@ -767,10 +767,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
goto next_iter;
}
if (iter->dst.dev == rt->dst.dev &&
iter->rt6i_idev == rt->rt6i_idev &&
ipv6_addr_equal(&iter->rt6i_gateway,
&rt->rt6i_gateway)) {
if (rt6_duplicate_nexthop(iter, rt)) {
if (rt->rt6i_nsiblings)
rt->rt6i_nsiblings = 0;
if (!(iter->rt6i_flags & RTF_EXPIRES))

View file

@ -2825,17 +2825,11 @@ static int ip6_route_info_append(struct list_head *rt6_nh_list,
struct rt6_info *rt, struct fib6_config *r_cfg)
{
struct rt6_nh *nh;
struct rt6_info *rtnh;
int err = -EEXIST;
list_for_each_entry(nh, rt6_nh_list, next) {
/* check if rt6_info already exists */
rtnh = nh->rt6_info;
if (rtnh->dst.dev == rt->dst.dev &&
rtnh->rt6i_idev == rt->rt6i_idev &&
ipv6_addr_equal(&rtnh->rt6i_gateway,
&rt->rt6i_gateway))
if (rt6_duplicate_nexthop(nh->rt6_info, rt))
return err;
}

View file

@ -78,7 +78,7 @@ int rds_tcp_accept_one(struct socket *sock)
struct inet_sock *inet;
struct rds_tcp_connection *rs_tcp;
ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
ret = sock_create_lite(sock->sk->sk_family,
sock->sk->sk_type, sock->sk->sk_protocol,
&new_sock);
if (ret)

View file

@ -1004,6 +1004,9 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
return sch;
}
/* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
if (ops->destroy)
ops->destroy(sch);
err_out3:
dev_put(dev);
kfree((char *) sch - sch->padded);

View file

@ -636,7 +636,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
sizeof(u32));
if (!q->hhf_arrays[i]) {
hhf_destroy(sch);
/* Note: hhf_destroy() will be called
* by our caller.
*/
return -ENOMEM;
}
}
@ -647,7 +649,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
BITS_PER_BYTE);
if (!q->hhf_valid_bits[i]) {
hhf_destroy(sch);
/* Note: hhf_destroy() will be called
* by our caller.
*/
return -ENOMEM;
}
}

View file

@ -52,7 +52,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
/* pre-allocate qdiscs, attachment can't fail */
priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
GFP_KERNEL);
if (priv->qdiscs == NULL)
if (!priv->qdiscs)
return -ENOMEM;
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
@ -60,18 +60,14 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(ntx + 1)));
if (qdisc == NULL)
goto err;
if (!qdisc)
return -ENOMEM;
priv->qdiscs[ntx] = qdisc;
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
sch->flags |= TCQ_F_MQROOT;
return 0;
err:
mq_destroy(sch);
return -ENOMEM;
}
static void mq_attach(struct Qdisc *sch)

View file

@ -117,20 +117,17 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
/* pre-allocate qdisc, attachment can't fail */
priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
GFP_KERNEL);
if (priv->qdiscs == NULL) {
err = -ENOMEM;
goto err;
}
if (!priv->qdiscs)
return -ENOMEM;
for (i = 0; i < dev->num_tx_queues; i++) {
dev_queue = netdev_get_tx_queue(dev, i);
qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(i + 1)));
if (qdisc == NULL) {
err = -ENOMEM;
goto err;
}
if (!qdisc)
return -ENOMEM;
priv->qdiscs[i] = qdisc;
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
@ -143,7 +140,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
priv->hw_owned = 1;
err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
if (err)
goto err;
return err;
} else {
netdev_set_num_tc(dev, qopt->num_tc);
for (i = 0; i < qopt->num_tc; i++)
@ -157,10 +154,6 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
sch->flags |= TCQ_F_MQROOT;
return 0;
err:
mqprio_destroy(sch);
return err;
}
static void mqprio_attach(struct Qdisc *sch)

View file

@ -742,9 +742,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
if (!q->ht || !q->slots) {
sfq_destroy(sch);
/* Note: sfq_destroy() will be called by our caller */
return -ENOMEM;
}
for (i = 0; i < q->divisor; i++)
q->ht[i] = SFQ_EMPTY_SLOT;

View file

@ -302,8 +302,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
[NL80211_ATTR_PID] = { .type = NLA_U32 },
[NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
[NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
.len = WLAN_PMKID_LEN },
[NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
[NL80211_ATTR_DURATION] = { .type = NLA_U32 },
[NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
[NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
@ -359,6 +358,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
[NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
[NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
[NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 },
[NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
[NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
[NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
@ -5705,6 +5705,10 @@ static int validate_scan_freqs(struct nlattr *freqs)
struct nlattr *attr1, *attr2;
int n_channels = 0, tmp1, tmp2;
nla_for_each_nested(attr1, freqs, tmp1)
if (nla_len(attr1) != sizeof(u32))
return 0;
nla_for_each_nested(attr1, freqs, tmp1) {
n_channels++;
/*

View file

@ -3253,7 +3253,7 @@ sub process {
$fixedline =~ s/\s*=\s*$/ = {/;
fix_insert_line($fixlinenr, $fixedline);
$fixedline = $line;
$fixedline =~ s/^(.\s*){\s*/$1/;
$fixedline =~ s/^(.\s*)\{\s*/$1/;
fix_insert_line($fixlinenr, $fixedline);
}
}
@ -3603,7 +3603,7 @@ sub process {
my $fixedline = rtrim($prevrawline) . " {";
fix_insert_line($fixlinenr, $fixedline);
$fixedline = $rawline;
$fixedline =~ s/^(.\s*){\s*/$1\t/;
$fixedline =~ s/^(.\s*)\{\s*/$1\t/;
if ($fixedline !~ /^\+\s*$/) {
fix_insert_line($fixlinenr, $fixedline);
}
@ -4092,7 +4092,7 @@ sub process {
if (ERROR("SPACING",
"space required before the open brace '{'\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/^(\+.*(?:do|\))){/$1 {/;
$fixed[$fixlinenr] =~ s/^(\+.*(?:do|\)))\{/$1 {/;
}
}

View file

@ -8,7 +8,7 @@
#include <linux/utsname.h>
#include <linux/compiler.h>
#define MAX_LOCK_DEPTH 2000UL
#define MAX_LOCK_DEPTH 255UL
#define asmlinkage
#define __visible

View file

@ -138,9 +138,6 @@ static void chdir_to_tmpfs(void)
if (chdir(cwd) != 0)
err(1, "chdir to private tmpfs");
if (umount2(".", MNT_DETACH) != 0)
err(1, "detach private tmpfs");
}
static void copy_fromat_to(int fromfd, const char *fromname, const char *toname)
@ -248,7 +245,7 @@ static int do_tests(int uid, const char *our_path)
err(1, "chown");
if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0)
err(1, "chmod");
}
}
capng_get_caps_process();
@ -384,7 +381,7 @@ static int do_tests(int uid, const char *our_path)
} else {
printf("[RUN]\tNon-root +ia, sgidnonroot => i\n");
exec_other_validate_cap("./validate_cap_sgidnonroot",
false, false, true, false);
false, false, true, false);
if (fork_wait()) {
printf("[RUN]\tNon-root +ia, sgidroot => i\n");