Merge android-4.4@59ff2e1 (v4.4.78) into msm-4.4

* refs/heads/tmp-59ff2e1
  Linux 4.4.78
  kvm: vmx: allow host to access guest MSR_IA32_BNDCFGS
  kvm: vmx: Check value written to IA32_BNDCFGS
  kvm: x86: Guest BNDCFGS requires guest MPX support
  kvm: vmx: Do not disable intercepts for BNDCFGS
  KVM: x86: disable MPX if host did not enable MPX XSAVE features
  tracing: Use SOFTIRQ_OFFSET for softirq dectection for more accurate results
  PM / QoS: return -EINVAL for bogus strings
  PM / wakeirq: Convert to SRCU
  sched/topology: Optimize build_group_mask()
  sched/topology: Fix overlapping sched_group_mask
  crypto: caam - fix signals handling
  crypto: sha1-ssse3 - Disable avx2
  crypto: atmel - only treat EBUSY as transient if backlog
  crypto: talitos - Extend max key length for SHA384/512-HMAC and AEAD
  mm: fix overflow check in expand_upwards()
  tpm: Issue a TPM2_Shutdown for TPM2 devices.
  Add "shutdown" to "struct class".
  tpm: Provide strong locking for device removal
  tpm: Get rid of chip->pdev
  selftests/capabilities: Fix the test_execve test
  mnt: Make propagate_umount less slow for overlapping mount propagation trees
  mnt: In propgate_umount handle visiting mounts in any order
  mnt: In umount propagation reparent in a separate pass
  vt: fix unchecked __put_user() in tioclinux ioctls
  exec: Limit arg stack to at most 75% of _STK_LIM
  s390: reduce ELF_ET_DYN_BASE
  powerpc: move ELF_ET_DYN_BASE to 4GB / 4MB
  arm64: move ELF_ET_DYN_BASE to 4GB / 4MB
  arm: move ELF_ET_DYN_BASE to 4MB
  binfmt_elf: use ELF_ET_DYN_BASE only for PIE
  checkpatch: silence perl 5.26.0 unescaped left brace warnings
  fs/dcache.c: fix spin lockup issue on nlru->lock
  mm/list_lru.c: fix list_lru_count_node() to be race free
  kernel/extable.c: mark core_kernel_text notrace
  tools/lib/lockdep: Reduce MAX_LOCK_DEPTH to avoid overflowing lock_chain/: Depth
  parisc/mm: Ensure IRQs are off in switch_mm()
  parisc: DMA API: return error instead of BUG_ON for dma ops on non dma devs
  parisc: use compat_sys_keyctl()
  parisc: Report SIGSEGV instead of SIGBUS when running out of stack
  irqchip/gic-v3: Fix out-of-bound access in gic_set_affinity
  cfg80211: Check if PMKID attribute is of expected size
  cfg80211: Validate frequencies nested in NL80211_ATTR_SCAN_FREQUENCIES
  cfg80211: Define nla_policy for NL80211_ATTR_LOCAL_MESH_POWER_MODE
  brcmfmac: fix possible buffer overflow in brcmf_cfg80211_mgmt_tx()
  rds: tcp: use sock_create_lite() to create the accept socket
  vrf: fix bug_on triggered by rx when destroying a vrf
  net: ipv6: Compare lwstate in detecting duplicate nexthops
  ipv6: dad: don't remove dynamic addresses if link is down
  net: handle NAPI_GRO_FREE_STOLEN_HEAD case also in napi_frags_finish()
  bpf: prevent leaking pointer via xadd on unpriviledged
  net: prevent sign extension in dev_get_stats()
  tcp: reset sk_rx_dst in tcp_disconnect()
  net: dp83640: Avoid NULL pointer dereference.
  ipv6: avoid unregistering inet6_dev for loopback
  net/phy: micrel: configure intterupts after autoneg workaround
  net: sched: Fix one possible panic when no destroy callback
  net_sched: fix error recovery at qdisc creation
  ANDROID: android-verity: mark dev as rw for linear target
  ANDROID: sdcardfs: Remove unnecessary lock
  ANDROID: binder: don't check prio permissions on restore.
  Add BINDER_GET_NODE_DEBUG_INFO ioctl
  UPSTREAM: cpufreq: schedutil: Trace frequency only if it has changed
  UPSTREAM: cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
  UPSTREAM: cpufreq: schedutil: Refactor sugov_next_freq_shared()
  UPSTREAM: cpufreq: schedutil: Fix per-CPU structure initialization in sugov_start()
  UPSTREAM: cpufreq: schedutil: Pass sg_policy to get_next_freq()
  UPSTREAM: cpufreq: schedutil: move cached_raw_freq to struct sugov_policy
  UPSTREAM: cpufreq: schedutil: Rectify comment in sugov_irq_work() function
  UPSTREAM: cpufreq: schedutil: irq-work and mutex are only used in slow path
  UPSTREAM: cpufreq: schedutil: enable fast switch earlier
  UPSTREAM: cpufreq: schedutil: Avoid indented labels
  Linux 4.4.77
  saa7134: fix warm Medion 7134 EEPROM read
  x86/mm/pat: Don't report PAT on CPUs that don't support it
  ext4: check return value of kstrtoull correctly in reserved_clusters_store
  staging: comedi: fix clean-up of comedi_class in comedi_init()
  staging: vt6556: vnt_start Fix missing call to vnt_key_init_table.
  tcp: fix tcp_mark_head_lost to check skb len before fragmenting
  md: fix super_offset endianness in super_1_rdev_size_change
  md: fix incorrect use of lexx_to_cpu in does_sb_need_changing
  perf tools: Use readdir() instead of deprecated readdir_r() again
  perf tests: Remove wrong semicolon in while loop in CQM test
  perf trace: Do not process PERF_RECORD_LOST twice
  perf dwarf: Guard !x86_64 definitions under #ifdef else clause
  perf pmu: Fix misleadingly indented assignment (whitespace)
  perf annotate browser: Fix behaviour of Shift-Tab with nothing focussed
  perf tools: Remove duplicate const qualifier
  perf script: Use readdir() instead of deprecated readdir_r()
  perf thread_map: Use readdir() instead of deprecated readdir_r()
  perf tools: Use readdir() instead of deprecated readdir_r()
  perf bench numa: Avoid possible truncation when using snprintf()
  perf tests: Avoid possible truncation with dirent->d_name + snprintf
  perf scripting perl: Fix compile error with some perl5 versions
  perf thread_map: Correctly size buffer used with dirent->dt_name
  perf intel-pt: Use __fallthrough
  perf top: Use __fallthrough
  tools strfilter: Use __fallthrough
  tools string: Use __fallthrough in perf_atoll()
  tools include: Add a __fallthrough statement
  mqueue: fix a use-after-free in sys_mq_notify()
  RDMA/uverbs: Check port number supplied by user verbs cmds
  KEYS: Fix an error code in request_master_key()
  ath10k: override CE5 config for QCA9377
  x86/uaccess: Optimize copy_user_enhanced_fast_string() for short strings
  x86/tools: Fix gcc-7 warning in relocs.c
  gfs2: Fix glock rhashtable rcu bug
  USB: serial: qcserial: new Sierra Wireless EM7305 device ID
  USB: serial: option: add two Longcheer device ids
  pinctrl: sh-pfc: Update info pointer after SoC-specific init
  pinctrl: mxs: atomically switch mux and drive strength config
  pinctrl: sunxi: Fix SPDIF function name for A83T
  pinctrl: meson: meson8b: fix the NAND DQS pins
  pinctrl: sh-pfc: r8a7791: Fix SCIF2 pinmux data
  sysctl: report EINVAL if value is larger than UINT_MAX for proc_douintvec
  sysctl: don't print negative flag for proc_douintvec
  mac80211_hwsim: Replace bogus hrtimer clockid
  usb: Fix typo in the definition of Endpoint[out]Request
  usb: usbip: set buffer pointers to NULL after free
  Add USB quirk for HVR-950q to avoid intermittent device resets
  USB: serial: cp210x: add ID for CEL EM3588 USB ZigBee stick
  usb: dwc3: replace %p with %pK
  drm/virtio: don't leak bo on drm_gem_object_init failure
  tracing/kprobes: Allow to create probe with a module name starting with a digit
  mm: fix classzone_idx underflow in shrink_zones()
  bgmac: reset & enable Ethernet core before using it
  driver core: platform: fix race condition with driver_override
  fs: completely ignore unknown open flags
  fs: add a VALID_OPEN_FLAGS
  ANDROID: binder: add RT inheritance flag to node.
  ANDROID: binder: improve priority inheritance.
  ANDROID: binder: add min sched_policy to node.
  ANDROID: binder: add support for RT prio inheritance.
  ANDROID: binder: push new transactions to waiting threads.
  ANDROID: binder: remove proc waitqueue
  FROMLIST: binder: remove global binder lock
  FROMLIST: binder: fix death race conditions
  FROMLIST: binder: protect against stale pointers in print_binder_transaction
  FROMLIST: binder: protect binder_ref with outer lock
  FROMLIST: binder: use inner lock to protect thread accounting
  FROMLIST: binder: protect transaction_stack with inner lock.
  FROMLIST: binder: protect proc->threads with inner_lock
  FROMLIST: binder: protect proc->nodes with inner lock
  FROMLIST: binder: add spinlock to protect binder_node
  FROMLIST: binder: add spinlocks to protect todo lists
  FROMLIST: binder: use inner lock to sync work dq and node counts
  FROMLIST: binder: introduce locking helper functions
  FROMLIST: binder: use node->tmp_refs to ensure node safety
  FROMLIST: binder: refactor binder ref inc/dec for thread safety
  FROMLIST: binder: make sure accesses to proc/thread are safe
  FROMLIST: binder: make sure target_node has strong ref
  FROMLIST: binder: guarantee txn complete / errors delivered in-order
  FROMLIST: binder: refactor binder_pop_transaction
  FROMLIST: binder: use atomic for transaction_log index
  FROMLIST: binder: add more debug info when allocation fails.
  FROMLIST: binder: protect against two threads freeing buffer
  FROMLIST: binder: remove dead code in binder_get_ref_for_node
  FROMLIST: binder: don't modify thread->looper from other threads
  FROMLIST: binder: avoid race conditions when enqueuing txn
  FROMLIST: binder: refactor queue management in binder_thread_read
  FROMLIST: binder: add log information for binder transaction failures
  FROMLIST: binder: make binder_last_id an atomic
  FROMLIST: binder: change binder_stats to atomics
  FROMLIST: binder: add protection for non-perf cases
  FROMLIST: binder: remove binder_debug_no_lock mechanism
  FROMLIST: binder: move binder_alloc to separate file
  FROMLIST: binder: separate out binder_alloc functions
  FROMLIST: binder: remove unneeded cleanup code
  FROMLIST: binder: separate binder allocator structure from binder proc
  FROMLIST: binder: Use wake up hint for synchronous transactions.
  Revert "android: binder: move global binder state into context struct."
  sched: walt: fix window misalignment when HZ=300
  ANDROID: android-base.cfg: remove CONFIG_CGROUP_DEBUG
  ANDROID: sdcardfs: use mount_nodev and fix a issue in sdcardfs_kill_sb

Conflicts:
	drivers/android/binder.c
	drivers/net/wireless/ath/ath10k/pci.c

Change-Id: Ic6f82c2ec9929733a16a03bb3b745187e002f4f6
Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
This commit is contained in:
Blagovest Kolenichev 2017-07-26 08:12:21 -07:00
commit b65ef47237
144 changed files with 4958 additions and 2075 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 76
SUBLEVEL = 78
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -17,7 +17,6 @@ CONFIG_AUDIT=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_SCHED=y
CONFIG_DEFAULT_SECURITY_SELINUX=y

View file

@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* This is the base location for PIE (ET_DYN with INTERP) loads. */
#define ELF_ET_DYN_BASE 0x400000UL
/* When the program starts, a1 contains a pointer to a function to be
registered with atexit, as per the SVR4 ABI. A value of 0 means we

View file

@ -114,12 +114,11 @@
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
* This is the location that an ET_DYN program is loaded if exec'ed. Typical
* use of this is to invoke "./ld.so someprog" to test out a new version of
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
* This is the base location for PIE (ET_DYN with INTERP) loads. On
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
#define ELF_ET_DYN_BASE 0x100000000UL
#ifndef __ASSEMBLY__
@ -170,7 +169,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
#ifdef CONFIG_COMPAT
#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
/* AArch32 registers. */
#define COMPAT_ELF_NGREG 18

View file

@ -39,6 +39,8 @@ struct hppa_dma_ops {
** flush/purge and allocate "regular" cacheable pages for everything.
*/
#define DMA_ERROR_CODE (~(dma_addr_t)0)
#ifdef CONFIG_PA11
extern struct hppa_dma_ops pcxl_dma_ops;
extern struct hppa_dma_ops pcx_dma_ops;
@ -209,12 +211,13 @@ parisc_walk_tree(struct device *dev)
break;
}
}
BUG_ON(!dev->platform_data);
return dev->platform_data;
}
#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu)
#define GET_IOC(dev) ({ \
void *__pdata = parisc_walk_tree(dev); \
__pdata ? HBA_DATA(__pdata)->iommu : NULL; \
})
#ifdef CONFIG_IOMMU_CCIO
struct parisc_device;

View file

@ -49,15 +49,26 @@ static inline void load_context(mm_context_t context)
mtctl(__space_to_prot(context), 8);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
static inline void switch_mm_irqs_off(struct mm_struct *prev,
struct mm_struct *next, struct task_struct *tsk)
{
if (prev != next) {
mtctl(__pa(next->pgd), 25);
load_context(next->context);
}
}
static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next, struct task_struct *tsk)
{
unsigned long flags;
local_irq_save(flags);
switch_mm_irqs_off(prev, next, tsk);
local_irq_restore(flags);
}
#define switch_mm_irqs_off switch_mm_irqs_off
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)

View file

@ -361,7 +361,7 @@
ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */
ENTRY_SAME(add_key)
ENTRY_SAME(request_key) /* 265 */
ENTRY_SAME(keyctl)
ENTRY_COMP(keyctl)
ENTRY_SAME(ioprio_set)
ENTRY_SAME(ioprio_get)
ENTRY_SAME(inotify_init)

View file

@ -298,7 +298,7 @@ bad_area:
case 15: /* Data TLB miss fault/Data page fault */
/* send SIGSEGV when outside of vma */
if (!vma ||
address < vma->vm_start || address > vma->vm_end) {
address < vma->vm_start || address >= vma->vm_end) {
si.si_signo = SIGSEGV;
si.si_code = SEGV_MAPERR;
break;

View file

@ -23,12 +23,13 @@
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE 0x20000000
/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \
0x100000000UL)
#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)

View file

@ -154,14 +154,13 @@ extern unsigned int vdso_enabled;
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. 64-bit
tasks are aligned to 4GB. */
#define ELF_ET_DYN_BASE (is_32bit_task() ? \
(STACK_TOP / 3 * 2) : \
(STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \
0x100000000UL)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */

View file

@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
static bool avx2_usable(void)
{
if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
&& boot_cpu_has(X86_FEATURE_BMI1)
&& boot_cpu_has(X86_FEATURE_BMI2))
return true;

View file

@ -245,12 +245,13 @@ extern int force_personality32;
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
0x100000000UL)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,

View file

@ -405,6 +405,8 @@
#define MSR_IA32_TSC_ADJUST 0x0000003b
#define MSR_IA32_BNDCFGS 0x00000d90
#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc
#define MSR_IA32_XSS 0x00000da0
#define FEATURE_CONTROL_LOCKED (1<<0)

View file

@ -7,6 +7,7 @@
bool pat_enabled(void);
void pat_disable(const char *reason);
extern void pat_init(void);
extern void init_cache_modes(void);
extern int reserve_memtype(u64 start, u64 end,
enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);

View file

@ -1048,6 +1048,13 @@ void __init setup_arch(char **cmdline_p)
if (mtrr_trim_uncached_memory(max_pfn))
max_pfn = e820_end_of_ram_pfn();
/*
* This call is required when the CPU does not support PAT. If
* mtrr_bp_init() invoked it already via pat_init() the call has no
* effect.
*/
init_cache_modes();
#ifdef CONFIG_X86_32
/* max_low_pfn get updated here */
find_low_pfn_range();

View file

@ -46,11 +46,18 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted)
return ret;
}
bool kvm_mpx_supported(void)
{
return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
&& kvm_x86_ops->mpx_supported());
}
EXPORT_SYMBOL_GPL(kvm_mpx_supported);
u64 kvm_supported_xcr0(void)
{
u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
if (!kvm_x86_ops->mpx_supported())
if (!kvm_mpx_supported())
xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
return xcr0;
@ -97,7 +104,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu);
vcpu->arch.eager_fpu = use_eager_fpu();
if (vcpu->arch.eager_fpu)
kvm_x86_ops->fpu_activate(vcpu);
@ -295,7 +302,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
#endif
unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0;
unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
/* cpuid 1.edx */

View file

@ -4,6 +4,7 @@
#include "x86.h"
int kvm_update_cpuid(struct kvm_vcpu *vcpu);
bool kvm_mpx_supported(void);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index);
int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
@ -134,14 +135,6 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
return best && (best->ebx & bit(X86_FEATURE_RTM));
}
static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->ebx & bit(X86_FEATURE_MPX));
}
static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@ -150,6 +143,14 @@ static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
return best && (best->ebx & bit(X86_FEATURE_PCOMMIT));
}
static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->ebx & bit(X86_FEATURE_MPX));
}
static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;

View file

@ -863,7 +863,6 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
static u64 construct_eptp(unsigned long root_hpa);
static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
static bool vmx_mpx_supported(void);
static bool vmx_xsaves_supported(void);
static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
@ -2541,7 +2540,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
if (vmx_mpx_supported())
if (kvm_mpx_supported())
vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
/* We support free control of debug control saving. */
@ -2562,7 +2561,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
VM_ENTRY_LOAD_IA32_PAT;
vmx->nested.nested_vmx_entry_ctls_high |=
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
if (vmx_mpx_supported())
if (kvm_mpx_supported())
vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
/* We support free control of debug control loading. */
@ -2813,7 +2812,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
case MSR_IA32_BNDCFGS:
if (!vmx_mpx_supported())
if (!kvm_mpx_supported() ||
(!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
return 1;
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
break;
@ -2890,7 +2890,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_BNDCFGS:
if (!vmx_mpx_supported())
if (!kvm_mpx_supported() ||
(!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
return 1;
if (is_noncanonical_address(data & PAGE_MASK) ||
(data & MSR_IA32_BNDCFGS_RSVD))
return 1;
vmcs_write64(GUEST_BNDCFGS, data);
break;
@ -3363,7 +3367,7 @@ static void init_vmcs_shadow_fields(void)
for (i = j = 0; i < max_shadow_read_write_fields; i++) {
switch (shadow_read_write_fields[i]) {
case GUEST_BNDCFGS:
if (!vmx_mpx_supported())
if (!kvm_mpx_supported())
continue;
break;
default:
@ -6253,7 +6257,6 @@ static __init int hardware_setup(void)
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
memcpy(vmx_msr_bitmap_legacy_x2apic,
vmx_msr_bitmap_legacy, PAGE_SIZE);
@ -10265,7 +10268,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
if (vmx_mpx_supported())
if (kvm_mpx_supported())
vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
if (nested_cpu_has_xsaves(vmcs12))
vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);

View file

@ -80,7 +80,7 @@ ENTRY(copy_user_generic_unrolled)
movl %edx,%ecx
andl $63,%edx
shrl $6,%ecx
jz 17f
jz .L_copy_short_string
1: movq (%rsi),%r8
2: movq 1*8(%rsi),%r9
3: movq 2*8(%rsi),%r10
@ -101,7 +101,8 @@ ENTRY(copy_user_generic_unrolled)
leaq 64(%rdi),%rdi
decl %ecx
jnz 1b
17: movl %edx,%ecx
.L_copy_short_string:
movl %edx,%ecx
andl $7,%edx
shrl $3,%ecx
jz 20f
@ -215,6 +216,8 @@ ENDPROC(copy_user_generic_string)
*/
ENTRY(copy_user_enhanced_fast_string)
ASM_STAC
cmpl $64,%edx
jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
movl %edx,%ecx
1: rep
movsb

View file

@ -36,14 +36,14 @@
#undef pr_fmt
#define pr_fmt(fmt) "" fmt
static bool boot_cpu_done;
static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
static void init_cache_modes(void);
static bool __read_mostly boot_cpu_done;
static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
static bool __read_mostly pat_initialized;
static bool __read_mostly init_cm_done;
void pat_disable(const char *reason)
{
if (!__pat_enabled)
if (pat_disabled)
return;
if (boot_cpu_done) {
@ -51,10 +51,8 @@ void pat_disable(const char *reason)
return;
}
__pat_enabled = 0;
pat_disabled = true;
pr_info("x86/PAT: %s\n", reason);
init_cache_modes();
}
static int __init nopat(char *str)
@ -66,7 +64,7 @@ early_param("nopat", nopat);
bool pat_enabled(void)
{
return !!__pat_enabled;
return pat_initialized;
}
EXPORT_SYMBOL_GPL(pat_enabled);
@ -204,6 +202,8 @@ static void __init_cache_modes(u64 pat)
update_cache_mode_entry(i, cache);
}
pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
init_cm_done = true;
}
#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
@ -224,6 +224,7 @@ static void pat_bsp_init(u64 pat)
}
wrmsrl(MSR_IA32_CR_PAT, pat);
pat_initialized = true;
__init_cache_modes(pat);
}
@ -241,10 +242,9 @@ static void pat_ap_init(u64 pat)
wrmsrl(MSR_IA32_CR_PAT, pat);
}
static void init_cache_modes(void)
void init_cache_modes(void)
{
u64 pat = 0;
static int init_cm_done;
if (init_cm_done)
return;
@ -286,8 +286,6 @@ static void init_cache_modes(void)
}
__init_cache_modes(pat);
init_cm_done = 1;
}
/**
@ -305,10 +303,8 @@ void pat_init(void)
u64 pat;
struct cpuinfo_x86 *c = &boot_cpu_data;
if (!pat_enabled()) {
init_cache_modes();
if (pat_disabled)
return;
}
if ((c->x86_vendor == X86_VENDOR_INTEL) &&
(((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||

View file

@ -992,11 +992,12 @@ static void emit_relocs(int as_text, int use_real_mode)
die("Segment relocations found but --realmode not specified\n");
/* Order the relocations for more efficient processing */
sort_relocs(&relocs16);
sort_relocs(&relocs32);
#if ELF_BITS == 64
sort_relocs(&relocs32neg);
sort_relocs(&relocs64);
#else
sort_relocs(&relocs16);
#endif
/* Print the relocations */

View file

@ -1,3 +1,3 @@
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,802 @@
/* binder_alloc.c
*
* Android IPC Subsystem
*
* Copyright (C) 2007-2017 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/cacheflush.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rtmutex.h>
#include <linux/rbtree.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "binder_alloc.h"
#include "binder_trace.h"
static DEFINE_MUTEX(binder_alloc_mmap_lock);
enum {
BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
};
static uint32_t binder_alloc_debug_mask;
module_param_named(debug_mask, binder_alloc_debug_mask,
uint, S_IWUSR | S_IRUGO);
#define binder_alloc_debug(mask, x...) \
do { \
if (binder_alloc_debug_mask & mask) \
pr_info(x); \
} while (0)
static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
return alloc->buffer +
alloc->buffer_size - (void *)buffer->data;
return (size_t)list_entry(buffer->entry.next,
struct binder_buffer, entry) - (size_t)buffer->data;
}
static void binder_insert_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *new_buffer)
{
struct rb_node **p = &alloc->free_buffers.rb_node;
struct rb_node *parent = NULL;
struct binder_buffer *buffer;
size_t buffer_size;
size_t new_buffer_size;
BUG_ON(!new_buffer->free);
new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: add free buffer, size %zd, at %pK\n",
alloc->pid, new_buffer_size, new_buffer);
while (*p) {
parent = *p;
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
buffer_size = binder_alloc_buffer_size(alloc, buffer);
if (new_buffer_size < buffer_size)
p = &parent->rb_left;
else
p = &parent->rb_right;
}
rb_link_node(&new_buffer->rb_node, parent, p);
rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
}
static void binder_insert_allocated_buffer_locked(
struct binder_alloc *alloc, struct binder_buffer *new_buffer)
{
struct rb_node **p = &alloc->allocated_buffers.rb_node;
struct rb_node *parent = NULL;
struct binder_buffer *buffer;
BUG_ON(new_buffer->free);
while (*p) {
parent = *p;
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
if (new_buffer < buffer)
p = &parent->rb_left;
else if (new_buffer > buffer)
p = &parent->rb_right;
else
BUG();
}
rb_link_node(&new_buffer->rb_node, parent, p);
rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
}
static struct binder_buffer *binder_alloc_prepare_to_free_locked(
struct binder_alloc *alloc,
uintptr_t user_ptr)
{
struct rb_node *n = alloc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
struct binder_buffer *kern_ptr;
kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
- offsetof(struct binder_buffer, data));
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
if (kern_ptr < buffer)
n = n->rb_left;
else if (kern_ptr > buffer)
n = n->rb_right;
else {
/*
* Guard against user threads attempting to
* free the buffer twice
*/
if (buffer->free_in_progress) {
pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
alloc->pid, current->pid, (u64)user_ptr);
return NULL;
}
buffer->free_in_progress = 1;
return buffer;
}
}
return NULL;
}
/**
* binder_alloc_buffer_lookup() - get buffer given user ptr
* @alloc: binder_alloc for this proc
* @user_ptr: User pointer to buffer data
*
* Validate userspace pointer to buffer data and return buffer corresponding to
* that user pointer. Search the rb tree for buffer that matches user data
* pointer.
*
* Return: Pointer to buffer or NULL
*/
struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
uintptr_t user_ptr)
{
struct binder_buffer *buffer;
mutex_lock(&alloc->mutex);
buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
mutex_unlock(&alloc->mutex);
return buffer;
}
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
void *start, void *end,
struct vm_area_struct *vma)
{
void *page_addr;
unsigned long user_page_addr;
struct page **page;
struct mm_struct *mm;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: %s pages %pK-%pK\n", alloc->pid,
allocate ? "allocate" : "free", start, end);
if (end <= start)
return 0;
trace_binder_update_page_range(alloc, allocate, start, end);
if (vma)
mm = NULL;
else
mm = get_task_mm(alloc->tsk);
if (mm) {
down_write(&mm->mmap_sem);
vma = alloc->vma;
if (vma && mm != alloc->vma_vm_mm) {
pr_err("%d: vma mm and task mm mismatch\n",
alloc->pid);
vma = NULL;
}
}
if (allocate == 0)
goto free_range;
if (vma == NULL) {
pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
alloc->pid);
goto err_no_vma;
}
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
BUG_ON(*page);
*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
if (*page == NULL) {
pr_err("%d: binder_alloc_buf failed for page at %pK\n",
alloc->pid, page_addr);
goto err_alloc_page_failed;
}
ret = map_kernel_range_noflush((unsigned long)page_addr,
PAGE_SIZE, PAGE_KERNEL, page);
flush_cache_vmap((unsigned long)page_addr,
(unsigned long)page_addr + PAGE_SIZE);
if (ret != 1) {
pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
alloc->pid, page_addr);
goto err_map_kernel_failed;
}
user_page_addr =
(uintptr_t)page_addr + alloc->user_buffer_offset;
ret = vm_insert_page(vma, user_page_addr, page[0]);
if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
alloc->pid, user_page_addr);
goto err_vm_insert_page_failed;
}
/* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
up_write(&mm->mmap_sem);
mmput(mm);
}
return 0;
free_range:
for (page_addr = end - PAGE_SIZE; page_addr >= start;
page_addr -= PAGE_SIZE) {
page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
if (vma)
zap_page_range(vma, (uintptr_t)page_addr +
alloc->user_buffer_offset, PAGE_SIZE, NULL);
err_vm_insert_page_failed:
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed:
__free_page(*page);
*page = NULL;
err_alloc_page_failed:
;
}
err_no_vma:
if (mm) {
up_write(&mm->mmap_sem);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;
}
struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async)
{
struct rb_node *n = alloc->free_buffers.rb_node;
struct binder_buffer *buffer;
size_t buffer_size;
struct rb_node *best_fit = NULL;
void *has_page_addr;
void *end_page_addr;
size_t size, data_offsets_size;
int ret;
if (alloc->vma == NULL) {
pr_err("%d: binder_alloc_buf, no vma\n",
alloc->pid);
return ERR_PTR(-ESRCH);
}
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: got transaction with invalid size %zd-%zd\n",
alloc->pid, data_size, offsets_size);
return ERR_PTR(-EINVAL);
}
size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
if (size < data_offsets_size || size < extra_buffers_size) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: got transaction with invalid extra_buffers_size %zd\n",
alloc->pid, extra_buffers_size);
return ERR_PTR(-EINVAL);
}
if (is_async &&
alloc->free_async_space < size + sizeof(struct binder_buffer)) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd failed, no async space left\n",
alloc->pid, size);
return ERR_PTR(-ENOSPC);
}
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
buffer_size = binder_alloc_buffer_size(alloc, buffer);
if (size < buffer_size) {
best_fit = n;
n = n->rb_left;
} else if (size > buffer_size)
n = n->rb_right;
else {
best_fit = n;
break;
}
}
if (best_fit == NULL) {
size_t allocated_buffers = 0;
size_t largest_alloc_size = 0;
size_t total_alloc_size = 0;
size_t free_buffers = 0;
size_t largest_free_size = 0;
size_t total_free_size = 0;
for (n = rb_first(&alloc->allocated_buffers); n != NULL;
n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
buffer_size = binder_alloc_buffer_size(alloc, buffer);
allocated_buffers++;
total_alloc_size += buffer_size;
if (buffer_size > largest_alloc_size)
largest_alloc_size = buffer_size;
}
for (n = rb_first(&alloc->free_buffers); n != NULL;
n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
buffer_size = binder_alloc_buffer_size(alloc, buffer);
free_buffers++;
total_free_size += buffer_size;
if (buffer_size > largest_free_size)
largest_free_size = buffer_size;
}
pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
alloc->pid, size);
pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
total_alloc_size, allocated_buffers, largest_alloc_size,
total_free_size, free_buffers, largest_free_size);
return ERR_PTR(-ENOSPC);
}
if (n == NULL) {
buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
buffer_size = binder_alloc_buffer_size(alloc, buffer);
}
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
alloc->pid, size, buffer, buffer_size);
has_page_addr =
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
if (n == NULL) {
if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
buffer_size = size; /* no room for other buffers */
else
buffer_size = size + sizeof(struct binder_buffer);
}
end_page_addr =
(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
ret = binder_update_page_range(alloc, 1,
(void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
if (ret)
return ERR_PTR(ret);
rb_erase(best_fit, &alloc->free_buffers);
buffer->free = 0;
buffer->free_in_progress = 0;
binder_insert_allocated_buffer_locked(alloc, buffer);
if (buffer_size != size) {
struct binder_buffer *new_buffer = (void *)buffer->data + size;
list_add(&new_buffer->entry, &buffer->entry);
new_buffer->free = 1;
binder_insert_free_buffer(alloc, new_buffer);
}
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got %pK\n",
alloc->pid, size, buffer);
buffer->data_size = data_size;
buffer->offsets_size = offsets_size;
buffer->async_transaction = is_async;
buffer->extra_buffers_size = extra_buffers_size;
if (is_async) {
alloc->free_async_space -= size + sizeof(struct binder_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
}
return buffer;
}
/**
* binder_alloc_new_buf() - Allocate a new binder buffer
* @alloc: binder_alloc for this proc
* @data_size: size of user data buffer
* @offsets_size: user specified buffer offset
* @extra_buffers_size: size of extra space for meta-data (eg, security context)
* @is_async: buffer for async transaction
*
* Allocate a new buffer given the requested sizes. Returns
* the kernel version of the buffer pointer. The size allocated
* is the sum of the three given sizes (each rounded up to
* pointer-sized boundary)
*
* Return: The allocated buffer or %NULL if error
*/
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async)
{
struct binder_buffer *buffer;
mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
extra_buffers_size, is_async);
mutex_unlock(&alloc->mutex);
return buffer;
}
static void *buffer_start_page(struct binder_buffer *buffer)
{
return (void *)((uintptr_t)buffer & PAGE_MASK);
}
static void *buffer_end_page(struct binder_buffer *buffer)
{
return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
}
static void binder_delete_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
struct binder_buffer *prev, *next = NULL;
int free_page_end = 1;
int free_page_start = 1;
BUG_ON(alloc->buffers.next == &buffer->entry);
prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
BUG_ON(!prev->free);
if (buffer_end_page(prev) == buffer_start_page(buffer)) {
free_page_start = 0;
if (buffer_end_page(prev) == buffer_end_page(buffer))
free_page_end = 0;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK share page with %pK\n",
alloc->pid, buffer, prev);
}
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
next = list_entry(buffer->entry.next,
struct binder_buffer, entry);
if (buffer_start_page(next) == buffer_end_page(buffer)) {
free_page_end = 0;
if (buffer_start_page(next) ==
buffer_start_page(buffer))
free_page_start = 0;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK share page with %pK\n",
alloc->pid, buffer, prev);
}
}
list_del(&buffer->entry);
if (free_page_start || free_page_end) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
alloc->pid, buffer, free_page_start ? "" : " end",
free_page_end ? "" : " start", prev, next);
binder_update_page_range(alloc, 0, free_page_start ?
buffer_start_page(buffer) : buffer_end_page(buffer),
(free_page_end ? buffer_end_page(buffer) :
buffer_start_page(buffer)) + PAGE_SIZE, NULL);
}
}
static void binder_free_buf_locked(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
size_t size, buffer_size;
buffer_size = binder_alloc_buffer_size(alloc, buffer);
size = ALIGN(buffer->data_size, sizeof(void *)) +
ALIGN(buffer->offsets_size, sizeof(void *)) +
ALIGN(buffer->extra_buffers_size, sizeof(void *));
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_free_buf %pK size %zd buffer_size %zd\n",
alloc->pid, buffer, size, buffer_size);
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
BUG_ON((void *)buffer < alloc->buffer);
BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
}
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
NULL);
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1;
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
struct binder_buffer *next = list_entry(buffer->entry.next,
struct binder_buffer, entry);
if (next->free) {
rb_erase(&next->rb_node, &alloc->free_buffers);
binder_delete_free_buffer(alloc, next);
}
}
if (alloc->buffers.next != &buffer->entry) {
struct binder_buffer *prev = list_entry(buffer->entry.prev,
struct binder_buffer, entry);
if (prev->free) {
binder_delete_free_buffer(alloc, buffer);
rb_erase(&prev->rb_node, &alloc->free_buffers);
buffer = prev;
}
}
binder_insert_free_buffer(alloc, buffer);
}
/**
* binder_alloc_free_buf() - free a binder buffer
* @alloc: binder_alloc for this proc
* @buffer: kernel pointer to buffer
*
* Free the buffer allocated via binder_alloc_new_buffer()
*/
void binder_alloc_free_buf(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
mutex_lock(&alloc->mutex);
binder_free_buf_locked(alloc, buffer);
mutex_unlock(&alloc->mutex);
}
/**
* binder_alloc_mmap_handler() - map virtual address space for proc
* @alloc: alloc structure for this proc
* @vma: vma passed to mmap()
*
* Called by binder_mmap() to initialize the space specified in
* vma for allocating binder buffers
*
* Return:
* 0 = success
* -EBUSY = address space already mapped
* -ENOMEM = failed to map memory to given address space
*/
int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
int ret;
struct vm_struct *area;
const char *failure_string;
struct binder_buffer *buffer;
mutex_lock(&binder_alloc_mmap_lock);
if (alloc->buffer) {
ret = -EBUSY;
failure_string = "already mapped";
goto err_already_mapped;
}
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
if (area == NULL) {
ret = -ENOMEM;
failure_string = "get_vm_area";
goto err_get_vm_area_failed;
}
alloc->buffer = area->addr;
alloc->user_buffer_offset =
vma->vm_start - (uintptr_t)alloc->buffer;
mutex_unlock(&binder_alloc_mmap_lock);
#ifdef CONFIG_CPU_CACHE_VIPT
if (cache_is_vipt_aliasing()) {
while (CACHE_COLOUR(
(vma->vm_start ^ (uint32_t)alloc->buffer))) {
pr_info("binder_mmap: %d %lx-%lx maps %pK bad alignment\n",
alloc->pid, vma->vm_start, vma->vm_end,
alloc->buffer);
vma->vm_start += PAGE_SIZE;
}
}
#endif
alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
((vma->vm_end - vma->vm_start) / PAGE_SIZE),
GFP_KERNEL);
if (alloc->pages == NULL) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
alloc->buffer_size = vma->vm_end - vma->vm_start;
if (binder_update_page_range(alloc, 1, alloc->buffer,
alloc->buffer + PAGE_SIZE, vma)) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;
}
buffer = alloc->buffer;
INIT_LIST_HEAD(&alloc->buffers);
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
barrier();
alloc->vma = vma;
alloc->vma_vm_mm = vma->vm_mm;
return 0;
err_alloc_small_buf_failed:
kfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
mutex_lock(&binder_alloc_mmap_lock);
vfree(alloc->buffer);
alloc->buffer = NULL;
err_get_vm_area_failed:
err_already_mapped:
mutex_unlock(&binder_alloc_mmap_lock);
pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
return ret;
}
void binder_alloc_deferred_release(struct binder_alloc *alloc)
{
struct rb_node *n;
int buffers, page_count;
BUG_ON(alloc->vma);
buffers = 0;
mutex_lock(&alloc->mutex);
while ((n = rb_first(&alloc->allocated_buffers))) {
struct binder_buffer *buffer;
buffer = rb_entry(n, struct binder_buffer, rb_node);
/* Transaction should already have been freed */
BUG_ON(buffer->transaction);
binder_free_buf_locked(alloc, buffer);
buffers++;
}
page_count = 0;
if (alloc->pages) {
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
void *page_addr;
if (!alloc->pages[i])
continue;
page_addr = alloc->buffer + i * PAGE_SIZE;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%s: %d: page %d at %pK not freed\n",
__func__, alloc->pid, i, page_addr);
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
__free_page(alloc->pages[i]);
page_count++;
}
kfree(alloc->pages);
vfree(alloc->buffer);
}
mutex_unlock(&alloc->mutex);
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d buffers %d, pages %d\n",
__func__, alloc->pid, buffers, page_count);
}
static void print_binder_buffer(struct seq_file *m, const char *prefix,
struct binder_buffer *buffer)
{
seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
prefix, buffer->debug_id, buffer->data,
buffer->data_size, buffer->offsets_size,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
}
/**
* binder_alloc_print_allocated() - print buffer info
* @m: seq_file for output via seq_printf()
* @alloc: binder_alloc for this proc
*
* Prints information about every buffer associated with
* the binder_alloc state to the given seq_file
*/
void binder_alloc_print_allocated(struct seq_file *m,
struct binder_alloc *alloc)
{
struct rb_node *n;
mutex_lock(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
print_binder_buffer(m, " buffer",
rb_entry(n, struct binder_buffer, rb_node));
mutex_unlock(&alloc->mutex);
}
/**
* binder_alloc_get_allocated_count() - return count of buffers
* @alloc: binder_alloc for this proc
*
* Return: count of allocated buffers
*/
int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
{
struct rb_node *n;
int count = 0;
mutex_lock(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
mutex_unlock(&alloc->mutex);
return count;
}
/**
* binder_alloc_vma_close() - invalidate address space
* @alloc: binder_alloc for this proc
*
* Called from binder_vma_close() when releasing address space.
* Clears alloc->vma to prevent new incoming transactions from
* allocating more buffers.
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
WRITE_ONCE(alloc->vma, NULL);
WRITE_ONCE(alloc->vma_vm_mm, NULL);
}
/**
* binder_alloc_init() - called by binder_open() for per-proc initialization
* @alloc: binder_alloc for this proc
*
* Called from binder_open() to initialize binder_alloc fields for
* new binder proc
*/
void binder_alloc_init(struct binder_alloc *alloc)
{
alloc->tsk = current->group_leader;
alloc->pid = current->group_leader->pid;
mutex_init(&alloc->mutex);
}

View file

@ -0,0 +1,163 @@
/*
* Copyright (C) 2017 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _LINUX_BINDER_ALLOC_H
#define _LINUX_BINDER_ALLOC_H
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/rtmutex.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
struct binder_transaction;
/**
* struct binder_buffer - buffer used for binder transactions
* @entry: entry alloc->buffers
* @rb_node: node for allocated_buffers/free_buffers rb trees
* @free: true if buffer is free
* @allow_user_free: describe the second member of struct blah,
* @async_transaction: describe the second member of struct blah,
* @debug_id: describe the second member of struct blah,
* @transaction: describe the second member of struct blah,
* @target_node: describe the second member of struct blah,
* @data_size: describe the second member of struct blah,
* @offsets_size: describe the second member of struct blah,
* @extra_buffers_size: describe the second member of struct blah,
* @data:i describe the second member of struct blah,
*
* Bookkeeping structure for binder transaction buffers
*/
struct binder_buffer {
struct list_head entry; /* free and allocated entries by address */
struct rb_node rb_node; /* free entry by size or allocated entry */
/* by address */
unsigned free:1;
unsigned allow_user_free:1;
unsigned async_transaction:1;
unsigned free_in_progress:1;
unsigned debug_id:28;
struct binder_transaction *transaction;
struct binder_node *target_node;
size_t data_size;
size_t offsets_size;
size_t extra_buffers_size;
uint8_t data[0];
};
/**
* struct binder_alloc - per-binder proc state for binder allocator
* @vma: vm_area_struct passed to mmap_handler
* (invarient after mmap)
* @tsk: tid for task that called init for this proc
* (invariant after init)
* @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
* @buffer: base of per-proc address space mapped via mmap
* @user_buffer_offset: offset between user and kernel VAs for buffer
* @buffers: list of all buffers for this proc
* @free_buffers: rb tree of buffers available for allocation
* sorted by size
* @allocated_buffers: rb tree of allocated buffers sorted by address
* @free_async_space: VA space available for async buffers. This is
* initialized at mmap time to 1/2 the full VA space
* @pages: array of physical page addresses for each
* page of mmap'd space
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
*
* Bookkeeping structure for per-proc address space management for binder
* buffers. It is normally initialized during binder_init() and binder_mmap()
* calls. The address space is used for both user-visible buffers and for
* struct binder_buffer objects used to track the user buffers
*/
struct binder_alloc {
struct mutex mutex;
struct task_struct *tsk;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
void *buffer;
ptrdiff_t user_buffer_offset;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
struct page **pages;
size_t buffer_size;
uint32_t buffer_free;
int pid;
};
extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async);
extern void binder_alloc_init(struct binder_alloc *alloc);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
extern struct binder_buffer *
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
uintptr_t user_ptr);
extern void binder_alloc_free_buf(struct binder_alloc *alloc,
struct binder_buffer *buffer);
extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct vm_area_struct *vma);
extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
extern void binder_alloc_print_allocated(struct seq_file *m,
struct binder_alloc *alloc);
/**
* binder_alloc_get_free_async_space() - get free space available for async
* @alloc: binder_alloc for this proc
*
* Return: the bytes remaining in the address-space for async transactions
*/
static inline size_t
binder_alloc_get_free_async_space(struct binder_alloc *alloc)
{
size_t free_async_space;
mutex_lock(&alloc->mutex);
free_async_space = alloc->free_async_space;
mutex_unlock(&alloc->mutex);
return free_async_space;
}
/**
* binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs
* @alloc: binder_alloc for this proc
*
* Return: the offset between kernel and user-space addresses to use for
* virtual address conversion
*/
static inline ptrdiff_t
binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
{
/*
* user_buffer_offset is constant if vma is set and
* undefined if vma is not set. It is possible to
* get here with !alloc->vma if the target process
* is dying while a transaction is being initiated.
* Returning the old value is ok in this case and
* the transaction will fail.
*/
return alloc->user_buffer_offset;
}
#endif /* _LINUX_BINDER_ALLOC_H */

View file

@ -23,7 +23,8 @@
struct binder_buffer;
struct binder_node;
struct binder_proc;
struct binder_ref;
struct binder_alloc;
struct binder_ref_data;
struct binder_thread;
struct binder_transaction;
@ -146,8 +147,8 @@ TRACE_EVENT(binder_transaction_received,
TRACE_EVENT(binder_transaction_node_to_ref,
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
struct binder_ref *ref),
TP_ARGS(t, node, ref),
struct binder_ref_data *rdata),
TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
@ -160,8 +161,8 @@ TRACE_EVENT(binder_transaction_node_to_ref,
__entry->debug_id = t->debug_id;
__entry->node_debug_id = node->debug_id;
__entry->node_ptr = node->ptr;
__entry->ref_debug_id = ref->debug_id;
__entry->ref_desc = ref->desc;
__entry->ref_debug_id = rdata->debug_id;
__entry->ref_desc = rdata->desc;
),
TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
__entry->debug_id, __entry->node_debug_id,
@ -170,8 +171,9 @@ TRACE_EVENT(binder_transaction_node_to_ref,
);
TRACE_EVENT(binder_transaction_ref_to_node,
TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
TP_ARGS(t, ref),
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
struct binder_ref_data *rdata),
TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
@ -182,10 +184,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
__entry->ref_debug_id = ref->debug_id;
__entry->ref_desc = ref->desc;
__entry->node_debug_id = ref->node->debug_id;
__entry->node_ptr = ref->node->ptr;
__entry->ref_debug_id = rdata->debug_id;
__entry->ref_desc = rdata->desc;
__entry->node_debug_id = node->debug_id;
__entry->node_ptr = node->ptr;
),
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
__entry->debug_id, __entry->node_debug_id,
@ -194,9 +196,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
);
TRACE_EVENT(binder_transaction_ref_to_ref,
TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
struct binder_ref *dest_ref),
TP_ARGS(t, src_ref, dest_ref),
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
struct binder_ref_data *src_ref,
struct binder_ref_data *dest_ref),
TP_ARGS(t, node, src_ref, dest_ref),
TP_STRUCT__entry(
__field(int, debug_id)
@ -208,7 +211,7 @@ TRACE_EVENT(binder_transaction_ref_to_ref,
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
__entry->node_debug_id = src_ref->node->debug_id;
__entry->node_debug_id = node->debug_id;
__entry->src_ref_debug_id = src_ref->debug_id;
__entry->src_ref_desc = src_ref->desc;
__entry->dest_ref_debug_id = dest_ref->debug_id;
@ -268,9 +271,9 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
TP_ARGS(buffer));
TRACE_EVENT(binder_update_page_range,
TP_PROTO(struct binder_proc *proc, bool allocate,
TP_PROTO(struct binder_alloc *alloc, bool allocate,
void *start, void *end),
TP_ARGS(proc, allocate, start, end),
TP_ARGS(alloc, allocate, start, end),
TP_STRUCT__entry(
__field(int, proc)
__field(bool, allocate)
@ -278,9 +281,9 @@ TRACE_EVENT(binder_update_page_range,
__field(size_t, size)
),
TP_fast_assign(
__entry->proc = proc->pid;
__entry->proc = alloc->pid;
__entry->allocate = allocate;
__entry->offset = start - proc->buffer;
__entry->offset = start - alloc->buffer;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",

View file

@ -2105,7 +2105,11 @@ void device_shutdown(void)
pm_runtime_get_noresume(dev);
pm_runtime_barrier(dev);
if (dev->bus && dev->bus->shutdown) {
if (dev->class && dev->class->shutdown) {
if (initcall_debug)
dev_info(dev, "shutdown\n");
dev->class->shutdown(dev);
} else if (dev->bus && dev->bus->shutdown) {
if (initcall_debug)
dev_info(dev, "shutdown\n");
dev->bus->shutdown(dev);

View file

@ -827,7 +827,7 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
char *driver_override, *old = pdev->driver_override, *cp;
char *driver_override, *old, *cp;
if (count > PATH_MAX)
return -EINVAL;
@ -840,12 +840,15 @@ static ssize_t driver_override_store(struct device *dev,
if (cp)
*cp = '\0';
device_lock(dev);
old = pdev->driver_override;
if (strlen(driver_override)) {
pdev->driver_override = driver_override;
} else {
kfree(driver_override);
pdev->driver_override = NULL;
}
device_unlock(dev);
kfree(old);
@ -856,8 +859,12 @@ static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
ssize_t len;
return sprintf(buf, "%s\n", pdev->driver_override);
device_lock(dev);
len = sprintf(buf, "%s\n", pdev->driver_override);
device_unlock(dev);
return len;
}
static DEVICE_ATTR_RW(driver_override);

View file

@ -268,6 +268,8 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
value = PM_QOS_LATENCY_ANY;
else
return -EINVAL;
}
ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
return ret < 0 ? ret : n;

View file

@ -61,6 +61,8 @@ static LIST_HEAD(wakeup_sources);
static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
DEFINE_STATIC_SRCU(wakeup_srcu);
static struct wakeup_source deleted_ws = {
.name = "deleted",
.lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
@ -199,7 +201,7 @@ void wakeup_source_remove(struct wakeup_source *ws)
spin_lock_irqsave(&events_lock, flags);
list_del_rcu(&ws->entry);
spin_unlock_irqrestore(&events_lock, flags);
synchronize_rcu();
synchronize_srcu(&wakeup_srcu);
}
EXPORT_SYMBOL_GPL(wakeup_source_remove);
@ -331,13 +333,14 @@ void device_wakeup_detach_irq(struct device *dev)
void device_wakeup_arm_wake_irqs(void)
{
struct wakeup_source *ws;
int srcuidx;
rcu_read_lock();
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->wakeirq)
dev_pm_arm_wake_irq(ws->wakeirq);
}
rcu_read_unlock();
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
/**
@ -348,13 +351,14 @@ void device_wakeup_arm_wake_irqs(void)
void device_wakeup_disarm_wake_irqs(void)
{
struct wakeup_source *ws;
int srcuidx;
rcu_read_lock();
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->wakeirq)
dev_pm_disarm_wake_irq(ws->wakeirq);
}
rcu_read_unlock();
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
/**
@ -839,10 +843,10 @@ EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources);
void pm_print_active_wakeup_sources(void)
{
struct wakeup_source *ws;
int active = 0;
int srcuidx, active = 0;
struct wakeup_source *last_activity_ws = NULL;
rcu_read_lock();
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->active) {
pr_info("active wakeup source: %s\n", ws->name);
@ -858,7 +862,7 @@ void pm_print_active_wakeup_sources(void)
if (!active && last_activity_ws)
pr_info("last active wakeup source: %s\n",
last_activity_ws->name);
rcu_read_unlock();
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
@ -985,8 +989,9 @@ void pm_wakep_autosleep_enabled(bool set)
{
struct wakeup_source *ws;
ktime_t now = ktime_get();
int srcuidx;
rcu_read_lock();
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
spin_lock_irq(&ws->lock);
if (ws->autosleep_enabled != set) {
@ -1000,7 +1005,7 @@ void pm_wakep_autosleep_enabled(bool set)
}
spin_unlock_irq(&ws->lock);
}
rcu_read_unlock();
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
#endif /* CONFIG_PM_AUTOSLEEP */
@ -1061,15 +1066,16 @@ static int print_wakeup_source_stats(struct seq_file *m,
static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
{
struct wakeup_source *ws;
int srcuidx;
seq_puts(m, "name\t\t\t\t\tactive_count\tevent_count\twakeup_count\t"
"expire_count\tactive_since\ttotal_time\tmax_time\t"
"last_change\tprevent_suspend_time\n");
rcu_read_lock();
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
print_wakeup_source_stats(m, ws);
rcu_read_unlock();
srcu_read_unlock(&wakeup_srcu, srcuidx);
print_wakeup_source_stats(m, &deleted_ws);

View file

@ -36,10 +36,60 @@ static DEFINE_SPINLOCK(driver_lock);
struct class *tpm_class;
dev_t tpm_devt;
/*
* tpm_chip_find_get - return tpm_chip for a given chip number
* @chip_num the device number for the chip
/**
* tpm_try_get_ops() - Get a ref to the tpm_chip
* @chip: Chip to ref
*
* The caller must already have some kind of locking to ensure that chip is
* valid. This function will lock the chip so that the ops member can be
* accessed safely. The locking prevents tpm_chip_unregister from
* completing, so it should not be held for long periods.
*
* Returns -ERRNO if the chip could not be got.
*/
int tpm_try_get_ops(struct tpm_chip *chip)
{
int rc = -EIO;
get_device(&chip->dev);
down_read(&chip->ops_sem);
if (!chip->ops)
goto out_lock;
if (!try_module_get(chip->dev.parent->driver->owner))
goto out_lock;
return 0;
out_lock:
up_read(&chip->ops_sem);
put_device(&chip->dev);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_try_get_ops);
/**
* tpm_put_ops() - Release a ref to the tpm_chip
* @chip: Chip to put
*
* This is the opposite pair to tpm_try_get_ops(). After this returns chip may
* be kfree'd.
*/
void tpm_put_ops(struct tpm_chip *chip)
{
module_put(chip->dev.parent->driver->owner);
up_read(&chip->ops_sem);
put_device(&chip->dev);
}
EXPORT_SYMBOL_GPL(tpm_put_ops);
/**
* tpm_chip_find_get() - return tpm_chip for a given chip number
* @chip_num: id to find
*
* The return'd chip has been tpm_try_get_ops'd and must be released via
* tpm_put_ops
*/
struct tpm_chip *tpm_chip_find_get(int chip_num)
{
struct tpm_chip *pos, *chip = NULL;
@ -49,10 +99,10 @@ struct tpm_chip *tpm_chip_find_get(int chip_num)
if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num)
continue;
if (try_module_get(pos->pdev->driver->owner)) {
/* rcu prevents chip from being free'd */
if (!tpm_try_get_ops(pos))
chip = pos;
break;
}
break;
}
rcu_read_unlock();
return chip;
@ -74,6 +124,41 @@ static void tpm_dev_release(struct device *dev)
kfree(chip);
}
/**
* tpm_class_shutdown() - prepare the TPM device for loss of power.
* @dev: device to which the chip is associated.
*
* Issues a TPM2_Shutdown command prior to loss of power, as required by the
* TPM 2.0 spec.
* Then, calls bus- and device- specific shutdown code.
*
* XXX: This codepath relies on the fact that sysfs is not enabled for
* TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2
* has sysfs support enabled before TPM sysfs's implicit locking is fixed.
*/
static int tpm_class_shutdown(struct device *dev)
{
struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
down_write(&chip->ops_sem);
tpm2_shutdown(chip, TPM2_SU_CLEAR);
chip->ops = NULL;
up_write(&chip->ops_sem);
}
/* Allow bus- and device-specific code to run. Note: since chip->ops
* is NULL, more-specific shutdown code will not be able to issue TPM
* commands.
*/
if (dev->bus && dev->bus->shutdown)
dev->bus->shutdown(dev);
else if (dev->driver && dev->driver->shutdown)
dev->driver->shutdown(dev);
return 0;
}
/**
* tpmm_chip_alloc() - allocate a new struct tpm_chip instance
* @dev: device to which the chip is associated
@ -94,6 +179,7 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
return ERR_PTR(-ENOMEM);
mutex_init(&chip->tpm_mutex);
init_rwsem(&chip->ops_sem);
INIT_LIST_HEAD(&chip->list);
chip->ops = ops;
@ -112,13 +198,12 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
scnprintf(chip->devname, sizeof(chip->devname), "tpm%d", chip->dev_num);
chip->pdev = dev;
dev_set_drvdata(dev, chip);
chip->dev.class = tpm_class;
chip->dev.class->shutdown = tpm_class_shutdown;
chip->dev.release = tpm_dev_release;
chip->dev.parent = chip->pdev;
chip->dev.parent = dev;
#ifdef CONFIG_ACPI
chip->dev.groups = chip->groups;
#endif
@ -133,7 +218,7 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
device_initialize(&chip->dev);
cdev_init(&chip->cdev, &tpm_fops);
chip->cdev.owner = chip->pdev->driver->owner;
chip->cdev.owner = dev->driver->owner;
chip->cdev.kobj.parent = &chip->dev.kobj;
devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
@ -173,6 +258,12 @@ static int tpm_add_char_device(struct tpm_chip *chip)
static void tpm_del_char_device(struct tpm_chip *chip)
{
cdev_del(&chip->cdev);
/* Make the driver uncallable. */
down_write(&chip->ops_sem);
chip->ops = NULL;
up_write(&chip->ops_sem);
device_del(&chip->dev);
}
@ -236,9 +327,8 @@ int tpm_chip_register(struct tpm_chip *chip)
chip->flags |= TPM_CHIP_FLAG_REGISTERED;
if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
rc = __compat_only_sysfs_link_entry_to_kobj(&chip->pdev->kobj,
&chip->dev.kobj,
"ppi");
rc = __compat_only_sysfs_link_entry_to_kobj(
&chip->dev.parent->kobj, &chip->dev.kobj, "ppi");
if (rc && rc != -ENOENT) {
tpm_chip_unregister(chip);
return rc;
@ -259,6 +349,9 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
* Takes the chip first away from the list of available TPM chips and then
* cleans up all the resources reserved by tpm_chip_register().
*
* Once this function returns the driver call backs in 'op's will not be
* running and will no longer start.
*
* NOTE: This function should be only called before deinitializing chip
* resources.
*/
@ -273,7 +366,7 @@ void tpm_chip_unregister(struct tpm_chip *chip)
synchronize_rcu();
if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
sysfs_remove_link(&chip->pdev->kobj, "ppi");
sysfs_remove_link(&chip->dev.parent->kobj, "ppi");
tpm1_chip_unregister(chip);
tpm_del_char_device(chip);

View file

@ -61,7 +61,7 @@ static int tpm_open(struct inode *inode, struct file *file)
* by the check of is_open variable, which is protected
* by driver_lock. */
if (test_and_set_bit(0, &chip->is_open)) {
dev_dbg(chip->pdev, "Another process owns this TPM\n");
dev_dbg(&chip->dev, "Another process owns this TPM\n");
return -EBUSY;
}
@ -79,7 +79,6 @@ static int tpm_open(struct inode *inode, struct file *file)
INIT_WORK(&priv->work, timeout_work);
file->private_data = priv;
get_device(chip->pdev);
return 0;
}
@ -137,9 +136,18 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
return -EFAULT;
}
/* atomic tpm command send and result receive */
/* atomic tpm command send and result receive. We only hold the ops
* lock during this period so that the tpm can be unregistered even if
* the char dev is held open.
*/
if (tpm_try_get_ops(priv->chip)) {
mutex_unlock(&priv->buffer_mutex);
return -EPIPE;
}
out_size = tpm_transmit(priv->chip, priv->data_buffer,
sizeof(priv->data_buffer), 0);
tpm_put_ops(priv->chip);
if (out_size < 0) {
mutex_unlock(&priv->buffer_mutex);
return out_size;
@ -166,7 +174,6 @@ static int tpm_release(struct inode *inode, struct file *file)
file->private_data = NULL;
atomic_set(&priv->data_pending, 0);
clear_bit(0, &priv->chip->is_open);
put_device(priv->chip->pdev);
kfree(priv);
return 0;
}

View file

@ -343,7 +343,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
if (count == 0)
return -ENODATA;
if (count > bufsiz) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"invalid count value %x %zx\n", count, bufsiz);
return -E2BIG;
}
@ -353,7 +353,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
rc = chip->ops->send(chip, (u8 *) buf, count);
if (rc < 0) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"tpm_transmit: tpm_send: error %zd\n", rc);
goto out;
}
@ -372,7 +372,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
goto out_recv;
if (chip->ops->req_canceled(chip, status)) {
dev_err(chip->pdev, "Operation Canceled\n");
dev_err(&chip->dev, "Operation Canceled\n");
rc = -ECANCELED;
goto out;
}
@ -382,14 +382,14 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
} while (time_before(jiffies, stop));
chip->ops->cancel(chip);
dev_err(chip->pdev, "Operation Timed out\n");
dev_err(&chip->dev, "Operation Timed out\n");
rc = -ETIME;
goto out;
out_recv:
rc = chip->ops->recv(chip, (u8 *) buf, bufsiz);
if (rc < 0)
dev_err(chip->pdev,
dev_err(&chip->dev,
"tpm_transmit: tpm_recv: error %zd\n", rc);
out:
if (!(flags & TPM_TRANSMIT_UNLOCKED))
@ -416,7 +416,7 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd,
err = be32_to_cpu(header->return_code);
if (err != 0 && desc)
dev_err(chip->pdev, "A TPM error (%d) occurred %s\n", err,
dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
desc);
return err;
@ -514,7 +514,7 @@ int tpm_get_timeouts(struct tpm_chip *chip)
if (rc == TPM_ERR_INVALID_POSTINIT) {
/* The TPM is not started, we are the first to talk to it.
Execute a startup command. */
dev_info(chip->pdev, "Issuing TPM_STARTUP");
dev_info(&chip->dev, "Issuing TPM_STARTUP");
if (tpm_startup(chip, TPM_ST_CLEAR))
return rc;
@ -526,7 +526,7 @@ int tpm_get_timeouts(struct tpm_chip *chip)
0, NULL);
}
if (rc) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"A TPM error (%zd) occurred attempting to determine the timeouts\n",
rc);
goto duration;
@ -565,7 +565,7 @@ int tpm_get_timeouts(struct tpm_chip *chip)
/* Report adjusted timeouts */
if (chip->vendor.timeout_adjusted) {
dev_info(chip->pdev,
dev_info(&chip->dev,
HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
old_timeout[0], new_timeout[0],
old_timeout[1], new_timeout[1],
@ -612,7 +612,7 @@ duration:
chip->vendor.duration[TPM_MEDIUM] *= 1000;
chip->vendor.duration[TPM_LONG] *= 1000;
chip->vendor.duration_adjusted = true;
dev_info(chip->pdev, "Adjusting TPM timeout parameters.");
dev_info(&chip->dev, "Adjusting TPM timeout parameters.");
}
return 0;
}
@ -687,7 +687,7 @@ int tpm_is_tpm2(u32 chip_num)
rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
tpm_chip_put(chip);
tpm_put_ops(chip);
return rc;
}
@ -716,7 +716,7 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
rc = tpm2_pcr_read(chip, pcr_idx, res_buf);
else
rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf);
tpm_chip_put(chip);
tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_read);
@ -751,7 +751,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
rc = tpm2_pcr_extend(chip, pcr_idx, hash);
tpm_chip_put(chip);
tpm_put_ops(chip);
return rc;
}
@ -761,7 +761,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, 0,
"attempting extend a PCR value");
tpm_chip_put(chip);
tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_extend);
@ -802,7 +802,9 @@ int tpm_do_selftest(struct tpm_chip *chip)
* around 300ms while the self test is ongoing, keep trying
* until the self test duration expires. */
if (rc == -ETIME) {
dev_info(chip->pdev, HW_ERR "TPM command timed out during continue self test");
dev_info(
&chip->dev, HW_ERR
"TPM command timed out during continue self test");
msleep(delay_msec);
continue;
}
@ -812,7 +814,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
rc = be32_to_cpu(cmd.header.out.return_code);
if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
dev_info(chip->pdev,
dev_info(&chip->dev,
"TPM is disabled/deactivated (0x%X)\n", rc);
/* TPM is disabled and/or deactivated; driver can
* proceed and TPM does handle commands for
@ -840,7 +842,7 @@ int tpm_send(u32 chip_num, void *cmd, size_t buflen)
rc = tpm_transmit_cmd(chip, cmd, buflen, 0, "attempting tpm_cmd");
tpm_chip_put(chip);
tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_send);
@ -966,10 +968,10 @@ int tpm_pm_suspend(struct device *dev)
}
if (rc)
dev_err(chip->pdev,
dev_err(&chip->dev,
"Error (%d) sending savestate before suspend\n", rc);
else if (try > 0)
dev_warn(chip->pdev, "TPM savestate took %dms\n",
dev_warn(&chip->dev, "TPM savestate took %dms\n",
try * TPM_TIMEOUT_RETRY);
return rc;
@ -1023,7 +1025,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
err = tpm2_get_random(chip, out, max);
tpm_chip_put(chip);
tpm_put_ops(chip);
return err;
}
@ -1045,7 +1047,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
num_bytes -= recd;
} while (retries-- && total < max);
tpm_chip_put(chip);
tpm_put_ops(chip);
return total ? total : -EIO;
}
EXPORT_SYMBOL_GPL(tpm_get_random);
@ -1071,7 +1073,7 @@ int tpm_seal_trusted(u32 chip_num, struct trusted_key_payload *payload,
rc = tpm2_seal_trusted(chip, payload, options);
tpm_chip_put(chip);
tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_seal_trusted);
@ -1097,7 +1099,8 @@ int tpm_unseal_trusted(u32 chip_num, struct trusted_key_payload *payload,
rc = tpm2_unseal_trusted(chip, payload, options);
tpm_chip_put(chip);
tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_unseal_trusted);

View file

@ -284,16 +284,28 @@ static const struct attribute_group tpm_dev_group = {
int tpm_sysfs_add_device(struct tpm_chip *chip)
{
int err;
err = sysfs_create_group(&chip->pdev->kobj,
/* XXX: If you wish to remove this restriction, you must first update
* tpm_sysfs to explicitly lock chip->ops.
*/
if (chip->flags & TPM_CHIP_FLAG_TPM2)
return 0;
err = sysfs_create_group(&chip->dev.parent->kobj,
&tpm_dev_group);
if (err)
dev_err(chip->pdev,
dev_err(&chip->dev,
"failed to create sysfs attributes, %d\n", err);
return err;
}
void tpm_sysfs_del_device(struct tpm_chip *chip)
{
sysfs_remove_group(&chip->pdev->kobj, &tpm_dev_group);
/* The sysfs routines rely on an implicit tpm_try_get_ops, this
* function is called before ops is null'd and the sysfs core
* synchronizes this removal so that no callbacks are running or can
* run again
*/
sysfs_remove_group(&chip->dev.parent->kobj, &tpm_dev_group);
}

View file

@ -171,11 +171,16 @@ enum tpm_chip_flags {
};
struct tpm_chip {
struct device *pdev; /* Device stuff */
struct device dev;
struct cdev cdev;
/* A driver callback under ops cannot be run unless ops_sem is held
* (sometimes implicitly, eg for the sysfs code). ops becomes null
* when the driver is unregistered, see tpm_try_get_ops.
*/
struct rw_semaphore ops_sem;
const struct tpm_class_ops *ops;
unsigned int flags;
int dev_num; /* /dev/tpm# */
@ -201,11 +206,6 @@ struct tpm_chip {
#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
static inline void tpm_chip_put(struct tpm_chip *chip)
{
module_put(chip->pdev->driver->owner);
}
static inline int tpm_read_index(int base, int index)
{
outb(index, base);
@ -517,6 +517,9 @@ extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
wait_queue_head_t *, bool);
struct tpm_chip *tpm_chip_find_get(int chip_num);
__must_check int tpm_try_get_ops(struct tpm_chip *chip);
void tpm_put_ops(struct tpm_chip *chip);
extern struct tpm_chip *tpmm_chip_alloc(struct device *dev,
const struct tpm_class_ops *ops);
extern int tpm_chip_register(struct tpm_chip *chip);

View file

@ -570,7 +570,7 @@ static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_FLUSH_CONTEXT);
if (rc) {
dev_warn(chip->pdev, "0x%08x was not flushed, out of memory\n",
dev_warn(&chip->dev, "0x%08x was not flushed, out of memory\n",
handle);
return;
}
@ -580,7 +580,7 @@ static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags,
"flushing context");
if (rc)
dev_warn(chip->pdev, "0x%08x was not flushed, rc=%d\n", handle,
dev_warn(&chip->dev, "0x%08x was not flushed, rc=%d\n", handle,
rc);
tpm_buf_destroy(&buf);
@ -753,7 +753,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
* except print the error code on a system failure.
*/
if (rc < 0)
dev_warn(chip->pdev, "transmit returned %d while stopping the TPM",
dev_warn(&chip->dev, "transmit returned %d while stopping the TPM",
rc);
}
EXPORT_SYMBOL_GPL(tpm2_shutdown);
@ -820,7 +820,7 @@ static int tpm2_start_selftest(struct tpm_chip *chip, bool full)
* immediately. This is a workaround for that.
*/
if (rc == TPM2_RC_TESTING) {
dev_warn(chip->pdev, "Got RC_TESTING, ignoring\n");
dev_warn(&chip->dev, "Got RC_TESTING, ignoring\n");
rc = 0;
}

View file

@ -49,7 +49,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
for (i = 0; i < 6; i++) {
status = ioread8(chip->vendor.iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
dev_err(chip->pdev, "error reading header\n");
dev_err(&chip->dev, "error reading header\n");
return -EIO;
}
*buf++ = ioread8(chip->vendor.iobase);
@ -60,12 +60,12 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
size = be32_to_cpu(*native_size);
if (count < size) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"Recv size(%d) less than available space\n", size);
for (; i < size; i++) { /* clear the waiting data anyway */
status = ioread8(chip->vendor.iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
dev_err(chip->pdev, "error reading data\n");
dev_err(&chip->dev, "error reading data\n");
return -EIO;
}
}
@ -76,7 +76,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
for (; i < size; i++) {
status = ioread8(chip->vendor.iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
dev_err(chip->pdev, "error reading data\n");
dev_err(&chip->dev, "error reading data\n");
return -EIO;
}
*buf++ = ioread8(chip->vendor.iobase);
@ -86,7 +86,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
status = ioread8(chip->vendor.iobase + 1);
if (status & ATML_STATUS_DATA_AVAIL) {
dev_err(chip->pdev, "data available is stuck\n");
dev_err(&chip->dev, "data available is stuck\n");
return -EIO;
}
@ -97,9 +97,9 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
int i;
dev_dbg(chip->pdev, "tpm_atml_send:\n");
dev_dbg(&chip->dev, "tpm_atml_send:\n");
for (i = 0; i < count; i++) {
dev_dbg(chip->pdev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
dev_dbg(&chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
iowrite8(buf[i], chip->vendor.iobase);
}

View file

@ -52,7 +52,7 @@ struct priv_data {
static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
{
struct priv_data *priv = chip->vendor.priv;
struct i2c_client *client = to_i2c_client(chip->pdev);
struct i2c_client *client = to_i2c_client(chip->dev.parent);
s32 status;
priv->len = 0;
@ -62,7 +62,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
status = i2c_master_send(client, buf, len);
dev_dbg(chip->pdev,
dev_dbg(&chip->dev,
"%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
(int)min_t(size_t, 64, len), buf, len, status);
return status;
@ -71,7 +71,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct priv_data *priv = chip->vendor.priv;
struct i2c_client *client = to_i2c_client(chip->pdev);
struct i2c_client *client = to_i2c_client(chip->dev.parent);
struct tpm_output_header *hdr =
(struct tpm_output_header *)priv->buffer;
u32 expected_len;
@ -88,7 +88,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return -ENOMEM;
if (priv->len >= expected_len) {
dev_dbg(chip->pdev,
dev_dbg(&chip->dev,
"%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
(int)min_t(size_t, 64, expected_len), buf, count,
expected_len);
@ -97,7 +97,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
rc = i2c_master_recv(client, buf, expected_len);
dev_dbg(chip->pdev,
dev_dbg(&chip->dev,
"%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
(int)min_t(size_t, 64, expected_len), buf, count,
expected_len);
@ -106,13 +106,13 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
static void i2c_atmel_cancel(struct tpm_chip *chip)
{
dev_err(chip->pdev, "TPM operation cancellation was requested, but is not supported");
dev_err(&chip->dev, "TPM operation cancellation was requested, but is not supported");
}
static u8 i2c_atmel_read_status(struct tpm_chip *chip)
{
struct priv_data *priv = chip->vendor.priv;
struct i2c_client *client = to_i2c_client(chip->pdev);
struct i2c_client *client = to_i2c_client(chip->dev.parent);
int rc;
/* The TPM fails the I2C read until it is ready, so we do the entire
@ -125,7 +125,7 @@ static u8 i2c_atmel_read_status(struct tpm_chip *chip)
/* Once the TPM has completed the command the command remains readable
* until another command is issued. */
rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer));
dev_dbg(chip->pdev,
dev_dbg(&chip->dev,
"%s: sts=%d", __func__, rc);
if (rc <= 0)
return 0;

View file

@ -446,7 +446,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
/* read first 10 bytes, including tag, paramsize, and result */
size = recv_data(chip, buf, TPM_HEADER_SIZE);
if (size < TPM_HEADER_SIZE) {
dev_err(chip->pdev, "Unable to read header\n");
dev_err(&chip->dev, "Unable to read header\n");
goto out;
}
@ -459,14 +459,14 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
size += recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE);
if (size < expected) {
dev_err(chip->pdev, "Unable to read remainder of result\n");
dev_err(&chip->dev, "Unable to read remainder of result\n");
size = -ETIME;
goto out;
}
wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
dev_err(chip->pdev, "Error left over data\n");
dev_err(&chip->dev, "Error left over data\n");
size = -EIO;
goto out;
}

View file

@ -96,13 +96,13 @@ static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size,
/* read TPM_STS register */
static u8 i2c_nuvoton_read_status(struct tpm_chip *chip)
{
struct i2c_client *client = to_i2c_client(chip->pdev);
struct i2c_client *client = to_i2c_client(chip->dev.parent);
s32 status;
u8 data;
status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data);
if (status <= 0) {
dev_err(chip->pdev, "%s() error return %d\n", __func__,
dev_err(&chip->dev, "%s() error return %d\n", __func__,
status);
data = TPM_STS_ERR_VAL;
}
@ -127,13 +127,13 @@ static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data)
/* write commandReady to TPM_STS register */
static void i2c_nuvoton_ready(struct tpm_chip *chip)
{
struct i2c_client *client = to_i2c_client(chip->pdev);
struct i2c_client *client = to_i2c_client(chip->dev.parent);
s32 status;
/* this causes the current command to be aborted */
status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY);
if (status < 0)
dev_err(chip->pdev,
dev_err(&chip->dev,
"%s() fail to write TPM_STS.commandReady\n", __func__);
}
@ -212,7 +212,7 @@ static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value,
return 0;
} while (time_before(jiffies, stop));
}
dev_err(chip->pdev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
dev_err(&chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
value);
return -ETIMEDOUT;
}
@ -240,7 +240,7 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client,
&chip->vendor.read_queue) == 0) {
burst_count = i2c_nuvoton_get_burstcount(client, chip);
if (burst_count < 0) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"%s() fail to read burstCount=%d\n", __func__,
burst_count);
return -EIO;
@ -249,12 +249,12 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client,
rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R,
bytes2read, &buf[size]);
if (rc < 0) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"%s() fail on i2c_nuvoton_read_buf()=%d\n",
__func__, rc);
return -EIO;
}
dev_dbg(chip->pdev, "%s(%d):", __func__, bytes2read);
dev_dbg(&chip->dev, "%s(%d):", __func__, bytes2read);
size += bytes2read;
}
@ -264,7 +264,7 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client,
/* Read TPM command results */
static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct device *dev = chip->pdev;
struct device *dev = chip->dev.parent;
struct i2c_client *client = to_i2c_client(dev);
s32 rc;
int expected, status, burst_count, retries, size = 0;
@ -334,7 +334,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
break;
}
i2c_nuvoton_ready(chip);
dev_dbg(chip->pdev, "%s() -> %d\n", __func__, size);
dev_dbg(&chip->dev, "%s() -> %d\n", __func__, size);
return size;
}
@ -347,7 +347,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
*/
static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
{
struct device *dev = chip->pdev;
struct device *dev = chip->dev.parent;
struct i2c_client *client = to_i2c_client(dev);
u32 ordinal;
size_t count = 0;

View file

@ -195,9 +195,9 @@ static int wait(struct tpm_chip *chip, int wait_for_bit)
}
if (i == TPM_MAX_TRIES) { /* timeout occurs */
if (wait_for_bit == STAT_XFE)
dev_err(chip->pdev, "Timeout in wait(STAT_XFE)\n");
dev_err(&chip->dev, "Timeout in wait(STAT_XFE)\n");
if (wait_for_bit == STAT_RDA)
dev_err(chip->pdev, "Timeout in wait(STAT_RDA)\n");
dev_err(&chip->dev, "Timeout in wait(STAT_RDA)\n");
return -EIO;
}
return 0;
@ -220,7 +220,7 @@ static void wait_and_send(struct tpm_chip *chip, u8 sendbyte)
static void tpm_wtx(struct tpm_chip *chip)
{
number_of_wtx++;
dev_info(chip->pdev, "Granting WTX (%02d / %02d)\n",
dev_info(&chip->dev, "Granting WTX (%02d / %02d)\n",
number_of_wtx, TPM_MAX_WTX_PACKAGES);
wait_and_send(chip, TPM_VL_VER);
wait_and_send(chip, TPM_CTRL_WTX);
@ -231,7 +231,7 @@ static void tpm_wtx(struct tpm_chip *chip)
static void tpm_wtx_abort(struct tpm_chip *chip)
{
dev_info(chip->pdev, "Aborting WTX\n");
dev_info(&chip->dev, "Aborting WTX\n");
wait_and_send(chip, TPM_VL_VER);
wait_and_send(chip, TPM_CTRL_WTX_ABORT);
wait_and_send(chip, 0x00);
@ -257,7 +257,7 @@ recv_begin:
}
if (buf[0] != TPM_VL_VER) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"Wrong transport protocol implementation!\n");
return -EIO;
}
@ -272,7 +272,7 @@ recv_begin:
}
if ((size == 0x6D00) && (buf[1] == 0x80)) {
dev_err(chip->pdev, "Error handling on vendor layer!\n");
dev_err(&chip->dev, "Error handling on vendor layer!\n");
return -EIO;
}
@ -284,7 +284,7 @@ recv_begin:
}
if (buf[1] == TPM_CTRL_WTX) {
dev_info(chip->pdev, "WTX-package received\n");
dev_info(&chip->dev, "WTX-package received\n");
if (number_of_wtx < TPM_MAX_WTX_PACKAGES) {
tpm_wtx(chip);
goto recv_begin;
@ -295,14 +295,14 @@ recv_begin:
}
if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) {
dev_info(chip->pdev, "WTX-abort acknowledged\n");
dev_info(&chip->dev, "WTX-abort acknowledged\n");
return size;
}
if (buf[1] == TPM_CTRL_ERROR) {
dev_err(chip->pdev, "ERROR-package received:\n");
dev_err(&chip->dev, "ERROR-package received:\n");
if (buf[4] == TPM_INF_NAK)
dev_err(chip->pdev,
dev_err(&chip->dev,
"-> Negative acknowledgement"
" - retransmit command!\n");
return -EIO;
@ -321,7 +321,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
ret = empty_fifo(chip, 1);
if (ret) {
dev_err(chip->pdev, "Timeout while clearing FIFO\n");
dev_err(&chip->dev, "Timeout while clearing FIFO\n");
return -EIO;
}

View file

@ -113,7 +113,7 @@ static int nsc_wait_for_ready(struct tpm_chip *chip)
}
while (time_before(jiffies, stop));
dev_info(chip->pdev, "wait for ready failed\n");
dev_info(&chip->dev, "wait for ready failed\n");
return -EBUSY;
}
@ -129,12 +129,12 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
return -EIO;
if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) {
dev_err(chip->pdev, "F0 timeout\n");
dev_err(&chip->dev, "F0 timeout\n");
return -EIO;
}
if ((data =
inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
dev_err(chip->pdev, "not in normal mode (0x%x)\n",
dev_err(&chip->dev, "not in normal mode (0x%x)\n",
data);
return -EIO;
}
@ -143,7 +143,7 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
for (p = buffer; p < &buffer[count]; p++) {
if (wait_for_stat
(chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"OBF timeout (while reading data)\n");
return -EIO;
}
@ -154,11 +154,11 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
if ((data & NSC_STATUS_F0) == 0 &&
(wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) {
dev_err(chip->pdev, "F0 not set\n");
dev_err(&chip->dev, "F0 not set\n");
return -EIO;
}
if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"expected end of command(0x%x)\n", data);
return -EIO;
}
@ -189,19 +189,19 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
return -EIO;
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
dev_err(chip->pdev, "IBF timeout\n");
dev_err(&chip->dev, "IBF timeout\n");
return -EIO;
}
outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND);
if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
dev_err(chip->pdev, "IBR timeout\n");
dev_err(&chip->dev, "IBR timeout\n");
return -EIO;
}
for (i = 0; i < count; i++) {
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
dev_err(chip->pdev,
dev_err(&chip->dev,
"IBF timeout (while writing data)\n");
return -EIO;
}
@ -209,7 +209,7 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
}
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
dev_err(chip->pdev, "IBF timeout\n");
dev_err(&chip->dev, "IBF timeout\n");
return -EIO;
}
outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND);

View file

@ -293,7 +293,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
/* read first 10 bytes, including tag, paramsize, and result */
if ((size =
recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
dev_err(chip->pdev, "Unable to read header\n");
dev_err(&chip->dev, "Unable to read header\n");
goto out;
}
@ -306,7 +306,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
if ((size +=
recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE)) < expected) {
dev_err(chip->pdev, "Unable to read remainder of result\n");
dev_err(&chip->dev, "Unable to read remainder of result\n");
size = -ETIME;
goto out;
}
@ -315,7 +315,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
&chip->vendor.int_queue, false);
status = tpm_tis_status(chip);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
dev_err(chip->pdev, "Error left over data\n");
dev_err(&chip->dev, "Error left over data\n");
size = -EIO;
goto out;
}
@ -401,7 +401,7 @@ static void disable_interrupts(struct tpm_chip *chip)
iowrite32(intmask,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
devm_free_irq(chip->pdev, chip->vendor.irq, chip);
devm_free_irq(&chip->dev, chip->vendor.irq, chip);
chip->vendor.irq = 0;
}
@ -463,7 +463,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
msleep(1);
if (!priv->irq_tested) {
disable_interrupts(chip);
dev_err(chip->pdev,
dev_err(&chip->dev,
FW_BUG "TPM interrupt not working, polling instead\n");
}
priv->irq_tested = true;
@ -533,7 +533,7 @@ static int probe_itpm(struct tpm_chip *chip)
rc = tpm_tis_send_data(chip, cmd_getticks, len);
if (rc == 0) {
dev_info(chip->pdev, "Detected an iTPM.\n");
dev_info(&chip->dev, "Detected an iTPM.\n");
rc = 1;
} else
rc = -EFAULT;
@ -766,7 +766,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
if (devm_request_irq
(dev, i, tis_int_probe, IRQF_SHARED,
chip->devname, chip) != 0) {
dev_info(chip->pdev,
dev_info(&chip->dev,
"Unable to request irq: %d for probe\n",
i);
continue;
@ -818,7 +818,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
if (devm_request_irq
(dev, chip->vendor.irq, tis_int_handler, IRQF_SHARED,
chip->devname, chip) != 0) {
dev_info(chip->pdev,
dev_info(&chip->dev,
"Unable to request irq: %d for use\n",
chip->vendor.irq);
chip->vendor.irq = 0;

View file

@ -963,7 +963,9 @@ static int atmel_sha_finup(struct ahash_request *req)
ctx->flags |= SHA_FLAGS_FINUP;
err1 = atmel_sha_update(req);
if (err1 == -EINPROGRESS || err1 == -EBUSY)
if (err1 == -EINPROGRESS ||
(err1 == -EBUSY && (ahash_request_flags(req) &
CRYPTO_TFM_REQ_MAY_BACKLOG)))
return err1;
/*

View file

@ -498,7 +498,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
/* in progress */
wait_for_completion_interruptible(&result.completion);
wait_for_completion(&result.completion);
ret = result.err;
#ifdef DEBUG
print_hex_dump(KERN_ERR,

View file

@ -103,7 +103,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
/* in progress */
wait_for_completion_interruptible(&result.completion);
wait_for_completion(&result.completion);
ret = result.err;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",

View file

@ -804,7 +804,7 @@ static void talitos_unregister_rng(struct device *dev)
* crypto alg
*/
#define TALITOS_CRA_PRIORITY 3000
#define TALITOS_MAX_KEY_SIZE 96
#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
struct talitos_ctx {
@ -1388,6 +1388,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
{
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
if (keylen > TALITOS_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
memcpy(&ctx->key, key, keylen);
ctx->keylen = keylen;

View file

@ -81,8 +81,10 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
return -ENOMEM;
size = roundup(size, PAGE_SIZE);
ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
if (ret != 0)
if (ret != 0) {
kfree(bo);
return ret;
}
bo->dumb = false;
virtio_gpu_init_ttm_placement(bo, pinned);

View file

@ -2287,6 +2287,10 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
if (cmd.port_num < rdma_start_port(ib_dev) ||
cmd.port_num > rdma_end_port(ib_dev))
return -EINVAL;
INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
out_len);
@ -2827,6 +2831,10 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
if (cmd.attr.port_num < rdma_start_port(ib_dev) ||
cmd.attr.port_num > rdma_end_port(ib_dev))
return -EINVAL;
uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
if (!uobj)
return -ENOMEM;

View file

@ -793,6 +793,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
int enabled;
u64 val;
if (cpu >= nr_cpu_ids)
return -EINVAL;
if (gic_irq_in_rdist(d))
return -EINVAL;

View file

@ -645,6 +645,8 @@ static int add_as_linear_device(struct dm_target *ti, char *dev)
android_verity_target.iterate_devices = dm_linear_iterate_devices,
android_verity_target.io_hints = NULL;
set_disk_ro(dm_disk(dm_table_get_md(ti->table)), 0);
err = dm_linear_ctr(ti, DM_LINEAR_ARGS, linear_table_args);
if (!err) {

View file

@ -1866,7 +1866,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
}
sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
sb->super_offset = rdev->sb_start;
sb->super_offset = cpu_to_le64(rdev->sb_start);
sb->sb_csum = calc_sb_1_csum(sb);
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
@ -2273,7 +2273,7 @@ static bool does_sb_need_changing(struct mddev *mddev)
/* Check if any mddev parameters have changed */
if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
(mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
(mddev->layout != le64_to_cpu(sb->layout)) ||
(mddev->layout != le32_to_cpu(sb->layout)) ||
(mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
(mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
return true;

View file

@ -355,12 +355,43 @@ static struct i2c_client saa7134_client_template = {
/* ----------------------------------------------------------- */
/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
{
u8 subaddr = 0x7, dmdregval;
u8 data[2];
int ret;
struct i2c_msg i2cgatemsg_r[] = { {.addr = 0x08, .flags = 0,
.buf = &subaddr, .len = 1},
{.addr = 0x08,
.flags = I2C_M_RD,
.buf = &dmdregval, .len = 1}
};
struct i2c_msg i2cgatemsg_w[] = { {.addr = 0x08, .flags = 0,
.buf = data, .len = 2} };
ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
if ((ret == 2) && (dmdregval & 0x2)) {
pr_debug("%s: DVB-T demod i2c gate was left closed\n",
dev->name);
data[0] = subaddr;
data[1] = (dmdregval & ~0x2);
if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
pr_err("%s: EEPROM i2c gate open failure\n",
dev->name);
}
}
static int
saa7134_i2c_eeprom(struct saa7134_dev *dev, unsigned char *eedata, int len)
{
unsigned char buf;
int i,err;
if (dev->board == SAA7134_BOARD_MD7134)
saa7134_i2c_eeprom_md7134_gate(dev);
dev->i2c_client.addr = 0xa0 >> 1;
buf = 0;
if (1 != (err = i2c_master_send(&dev->i2c_client,&buf,1))) {

View file

@ -1583,6 +1583,11 @@ static int bgmac_probe(struct bcma_device *core)
dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
}
/* This (reset &) enable is not preset in specs or reference driver but
* Broadcom does it in arch PCI code when enabling fake PCI device.
*/
bcma_core_enable(core, 0);
/* Allocation and references */
net_dev = alloc_etherdev(sizeof(*bgmac));
if (!net_dev)

View file

@ -907,7 +907,7 @@ static void decode_txts(struct dp83640_private *dp83640,
if (overflow) {
pr_debug("tx timestamp queue overflow, count %d\n", overflow);
while (skb) {
skb_complete_tx_timestamp(skb, NULL);
kfree_skb(skb);
skb = skb_dequeue(&dp83640->tx_queue);
}
return;

View file

@ -539,6 +539,8 @@ static int ksz9031_read_status(struct phy_device *phydev)
if ((regval & 0xFF) == 0xFF) {
phy_init_hw(phydev);
phydev->link = 0;
if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
phydev->drv->config_intr(phydev);
}
return 0;

View file

@ -733,15 +733,15 @@ static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
static void vrf_dev_uninit(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
struct slave_queue *queue = &vrf->queue;
struct list_head *head = &queue->all_slaves;
struct slave *slave, *next;
// struct slave_queue *queue = &vrf->queue;
// struct list_head *head = &queue->all_slaves;
// struct slave *slave, *next;
vrf_rtable_destroy(vrf);
vrf_rt6_destroy(vrf);
list_for_each_entry_safe(slave, next, head, list)
vrf_del_slave(dev, slave->dev);
// list_for_each_entry_safe(slave, next, head, list)
// vrf_del_slave(dev, slave->dev);
free_percpu(dev->dstats);
dev->dstats = NULL;
@ -914,6 +914,14 @@ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
static void vrf_dellink(struct net_device *dev, struct list_head *head)
{
struct net_vrf *vrf = netdev_priv(dev);
struct slave_queue *queue = &vrf->queue;
struct list_head *all_slaves = &queue->all_slaves;
struct slave *slave, *next;
list_for_each_entry_safe(slave, next, all_slaves, list)
vrf_del_slave(dev, slave->dev);
unregister_netdevice_queue(dev, head);
}

View file

@ -3139,7 +3139,7 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
(unsigned long)ar);
if (QCA_REV_6174(ar))
if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
ath10k_pci_override_ce_config(ar);
ret = ath10k_pci_alloc_pipes(ar);

View file

@ -4472,6 +4472,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
GFP_KERNEL);
} else if (ieee80211_is_action(mgmt->frame_control)) {
if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) {
brcmf_err("invalid action frame length\n");
err = -EINVAL;
goto exit;
}
af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
if (af_params == NULL) {
brcmf_err("unable to allocate frame\n");

View file

@ -2539,7 +2539,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
tasklet_hrtimer_init(&data->beacon_timer,
mac80211_hwsim_beacon,
CLOCK_MONOTONIC_RAW, HRTIMER_MODE_ABS);
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
spin_lock_bh(&hwsim_radio_lock);
list_add_tail(&data->list, &hwsim_radios);

View file

@ -741,6 +741,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
BUG_ON(!dev);
ioc = GET_IOC(dev);
if (!ioc)
return DMA_ERROR_CODE;
BUG_ON(size <= 0);
@ -805,6 +807,10 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
BUG_ON(!dev);
ioc = GET_IOC(dev);
if (!ioc) {
WARN_ON(!ioc);
return;
}
DBG_RUN("%s() iovp 0x%lx/%x\n",
__func__, (long)iova, size);
@ -908,6 +914,8 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
BUG_ON(!dev);
ioc = GET_IOC(dev);
if (!ioc)
return 0;
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
@ -980,6 +988,10 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
BUG_ON(!dev);
ioc = GET_IOC(dev);
if (!ioc) {
WARN_ON(!ioc);
return;
}
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
__func__, nents, sg_virt(sglist), sglist->length);

View file

@ -154,7 +154,10 @@ struct dino_device
};
/* Looks nice and keeps the compiler happy */
#define DINO_DEV(d) ((struct dino_device *) d)
#define DINO_DEV(d) ({ \
void *__pdata = d; \
BUG_ON(!__pdata); \
(struct dino_device *)__pdata; })
/*

View file

@ -111,8 +111,10 @@ static u32 lba_t32;
/* Looks nice and keeps the compiler happy */
#define LBA_DEV(d) ((struct lba_device *) (d))
#define LBA_DEV(d) ({ \
void *__pdata = d; \
BUG_ON(!__pdata); \
(struct lba_device *)__pdata; })
/*
** Only allow 8 subsidiary busses per LBA

View file

@ -691,6 +691,8 @@ static int sba_dma_supported( struct device *dev, u64 mask)
return 0;
ioc = GET_IOC(dev);
if (!ioc)
return 0;
/*
* check if mask is >= than the current max IO Virt Address
@ -722,6 +724,8 @@ sba_map_single(struct device *dev, void *addr, size_t size,
int pide;
ioc = GET_IOC(dev);
if (!ioc)
return DMA_ERROR_CODE;
/* save offset bits */
offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
@ -803,6 +807,10 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
ioc = GET_IOC(dev);
if (!ioc) {
WARN_ON(!ioc);
return;
}
offset = iova & ~IOVP_MASK;
iova ^= offset; /* clear offset bits */
size += offset;
@ -942,6 +950,8 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
ioc = GET_IOC(dev);
if (!ioc)
return 0;
/* Fast path single entry scatterlists. */
if (nents == 1) {
@ -1027,6 +1037,10 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
__func__, nents, sg_virt(sglist), sglist->length);
ioc = GET_IOC(dev);
if (!ioc) {
WARN_ON(!ioc);
return;
}
#ifdef SBA_COLLECT_STATS
ioc->usg_calls++;

View file

@ -195,6 +195,16 @@ static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
return 0;
}
static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg)
{
u32 tmp;
tmp = readl(reg);
tmp &= ~(mask << shift);
tmp |= value << shift;
writel(tmp, reg);
}
static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
unsigned group)
{
@ -212,8 +222,7 @@ static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
reg += bank * 0x20 + pin / 16 * 0x10;
shift = pin % 16 * 2;
writel(0x3 << shift, reg + CLR);
writel(g->muxsel[i] << shift, reg + SET);
mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg);
}
return 0;
@ -280,8 +289,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
/* mA */
if (config & MA_PRESENT) {
shift = pin % 8 * 4;
writel(0x3 << shift, reg + CLR);
writel(ma << shift, reg + SET);
mxs_pinctrl_rmwl(ma, 0x3, shift, reg);
}
/* vol */

View file

@ -732,8 +732,8 @@ static const char * const sdxc_c_groups[] = {
static const char * const nand_groups[] = {
"nand_io", "nand_io_ce0", "nand_io_ce1",
"nand_io_rb0", "nand_ale", "nand_cle",
"nand_wen_clk", "nand_ren_clk", "nand_dqs0",
"nand_dqs1"
"nand_wen_clk", "nand_ren_clk", "nand_dqs_0",
"nand_dqs_1"
};
static const char * const nor_groups[] = {

View file

@ -543,6 +543,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
ret = info->ops->init(pfc);
if (ret < 0)
return ret;
/* .init() may have overridden pfc->info */
info = pfc->info;
}
/* Enable dummy states for those platforms without pinctrl support */

View file

@ -1102,7 +1102,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP6_5_3, FMIN_E, SEL_FM_4),
PINMUX_IPSR_DATA(IP6_7_6, AUDIO_CLKOUT),
PINMUX_IPSR_MSEL(IP6_7_6, MSIOF1_SS1_B, SEL_SOF1_1),
PINMUX_IPSR_MSEL(IP6_5_3, TX2, SEL_SCIF2_0),
PINMUX_IPSR_MSEL(IP6_7_6, TX2, SEL_SCIF2_0),
PINMUX_IPSR_MSEL(IP6_7_6, SCIFA2_TXD, SEL_SCIFA2_0),
PINMUX_IPSR_DATA(IP6_9_8, IRQ0),
PINMUX_IPSR_MSEL(IP6_9_8, SCIFB1_RXD_D, SEL_SCIFB1_3),

View file

@ -394,7 +394,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x3, "owa")), /* DOUT */
SUNXI_FUNCTION(0x3, "spdif")), /* DOUT */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out")),

View file

@ -2911,6 +2911,7 @@ static int __init comedi_init(void)
dev = comedi_alloc_board_minor(NULL);
if (IS_ERR(dev)) {
comedi_cleanup_board_minors();
class_destroy(comedi_class);
cdev_del(&comedi_cdev);
unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
COMEDI_NUM_MINORS);

View file

@ -529,6 +529,9 @@ static int vnt_start(struct ieee80211_hw *hw)
goto free_all;
}
if (vnt_key_init_table(priv))
goto free_all;
priv->int_interval = 1; /* bInterval is set to 1 */
vnt_int_start_interrupt(priv);

View file

@ -2708,13 +2708,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
* related to the kernel should not use this.
*/
data = vt_get_shift_state();
ret = __put_user(data, p);
ret = put_user(data, p);
break;
case TIOCL_GETMOUSEREPORTING:
console_lock(); /* May be overkill */
data = mouse_reporting();
console_unlock();
ret = __put_user(data, p);
ret = put_user(data, p);
break;
case TIOCL_SETVESABLANK:
console_lock();
@ -2723,7 +2723,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
break;
case TIOCL_GETKMSGREDIRECT:
data = vt_get_kmsg_redirect();
ret = __put_user(data, p);
ret = put_user(data, p);
break;
case TIOCL_SETKMSGREDIRECT:
if (!capable(CAP_SYS_ADMIN)) {

View file

@ -223,6 +223,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Blackmagic Design UltraStudio SDI */
{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
/* Hauppauge HVR-950q */
{ USB_DEVICE(0x2040, 0x7200), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },

View file

@ -224,7 +224,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
dwc3_data->syscfg_reg_off = res->start;
dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
dwc3_data->rstc_pwrdn = devm_reset_control_get(dev, "powerdown");

View file

@ -134,6 +134,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */

View file

@ -1877,6 +1877,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&four_g_w100_blacklist
},
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },

View file

@ -158,6 +158,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */
{DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
{DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
{DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */

View file

@ -262,7 +262,11 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
kmem_cache_free(stub_priv_cache, priv);
kfree(urb->transfer_buffer);
urb->transfer_buffer = NULL;
kfree(urb->setup_packet);
urb->setup_packet = NULL;
usb_free_urb(urb);
}
}

View file

@ -28,7 +28,11 @@ static void stub_free_priv_and_urb(struct stub_priv *priv)
struct urb *urb = priv->urb;
kfree(urb->setup_packet);
urb->setup_packet = NULL;
kfree(urb->transfer_buffer);
urb->transfer_buffer = NULL;
list_del(&priv->list);
kmem_cache_free(stub_priv_cache, priv);
usb_free_urb(urb);

View file

@ -905,17 +905,60 @@ static int load_elf_binary(struct linux_binprm *bprm)
elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
vaddr = elf_ppnt->p_vaddr;
/*
* If we are loading ET_EXEC or we have already performed
* the ET_DYN load_addr calculations, proceed normally.
*/
if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
elf_flags |= MAP_FIXED;
} else if (loc->elf_ex.e_type == ET_DYN) {
/* Try and get dynamic programs out of the way of the
* default mmap base, as well as whatever program they
* might try to exec. This is because the brk will
* follow the loader, and is not movable. */
load_bias = ELF_ET_DYN_BASE - vaddr;
if (current->flags & PF_RANDOMIZE)
load_bias += arch_mmap_rnd();
load_bias = ELF_PAGESTART(load_bias);
/*
* This logic is run once for the first LOAD Program
* Header for ET_DYN binaries to calculate the
* randomization (load_bias) for all the LOAD
* Program Headers, and to calculate the entire
* size of the ELF mapping (total_size). (Note that
* load_addr_set is set to true later once the
* initial mapping is performed.)
*
* There are effectively two types of ET_DYN
* binaries: programs (i.e. PIE: ET_DYN with INTERP)
* and loaders (ET_DYN without INTERP, since they
* _are_ the ELF interpreter). The loaders must
* be loaded away from programs since the program
* may otherwise collide with the loader (especially
* for ET_EXEC which does not have a randomized
* position). For example to handle invocations of
* "./ld.so someprog" to test out a new version of
* the loader, the subsequent program that the
* loader loads must avoid the loader itself, so
* they cannot share the same load range. Sufficient
* room for the brk must be allocated with the
* loader as well, since brk must be available with
* the loader.
*
* Therefore, programs are loaded offset from
* ELF_ET_DYN_BASE and loaders are loaded into the
* independently randomized mmap region (0 load_bias
* without MAP_FIXED).
*/
if (elf_interpreter) {
load_bias = ELF_ET_DYN_BASE;
if (current->flags & PF_RANDOMIZE)
load_bias += arch_mmap_rnd();
elf_flags |= MAP_FIXED;
} else
load_bias = 0;
/*
* Since load_bias is used for all subsequent loading
* calculations, we must lower it by the first vaddr
* so that the remaining calculations based on the
* ELF vaddrs will be correctly offset. The result
* is then page aligned.
*/
load_bias = ELF_PAGESTART(load_bias - vaddr);
total_size = total_mapping_size(elf_phdata,
loc->elf_ex.e_phnum);
if (!total_size) {

View file

@ -206,8 +206,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
if (write) {
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
unsigned long ptr_size;
struct rlimit *rlim;
unsigned long ptr_size, limit;
/*
* Since the stack will hold pointers to the strings, we
@ -236,14 +235,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
return page;
/*
* Limit to 1/4-th the stack size for the argv+env strings.
* Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
* (whichever is smaller) for the argv+env strings.
* This ensures that:
* - the remaining binfmt code will not run out of stack space,
* - the program will have a reasonable amount of stack left
* to work from.
*/
rlim = current->signal->rlim;
if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
limit = _STK_LIM / 4 * 3;
limit = min(limit, rlimit(RLIMIT_STACK) / 4);
if (size > limit)
goto fail;
}

View file

@ -100,7 +100,7 @@ static ssize_t reserved_clusters_store(struct ext4_attr *a,
int ret;
ret = kstrtoull(skip_spaces(buf), 0, &val);
if (!ret || val >= clusters)
if (ret || val >= clusters)
return -EINVAL;
atomic64_set(&sbi->s_resv_clusters, val);

View file

@ -740,16 +740,10 @@ static int __init fcntl_init(void)
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
O_RDONLY | O_WRONLY | O_RDWR |
O_CREAT | O_EXCL | O_NOCTTY |
O_TRUNC | O_APPEND | /* O_NONBLOCK | */
__O_SYNC | O_DSYNC | FASYNC |
O_DIRECT | O_LARGEFILE | O_DIRECTORY |
O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
__FMODE_EXEC | O_PATH | __O_TMPFILE |
__FMODE_NONOTIFY
));
BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
HWEIGHT32(
(VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
__FMODE_EXEC | __FMODE_NONOTIFY));
fasync_cache = kmem_cache_create("fasync_cache",
sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);

View file

@ -80,9 +80,9 @@ static struct rhashtable_params ht_parms = {
static struct rhashtable gl_hash_table;
void gfs2_glock_free(struct gfs2_glock *gl)
static void gfs2_glock_dealloc(struct rcu_head *rcu)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
if (gl->gl_ops->go_flags & GLOF_ASPACE) {
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
@ -90,6 +90,13 @@ void gfs2_glock_free(struct gfs2_glock *gl)
kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(gfs2_glock_cachep, gl);
}
}
void gfs2_glock_free(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
}

View file

@ -367,6 +367,7 @@ struct gfs2_glock {
loff_t end;
} gl_vm;
};
struct rcu_head gl_rcu;
struct rhash_head gl_node;
};

View file

@ -57,6 +57,7 @@ struct mount {
struct mnt_namespace *mnt_ns; /* containing namespace */
struct mountpoint *mnt_mp; /* where is it mounted */
struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
struct list_head mnt_umounting; /* list entry for umount propagation */
#ifdef CONFIG_FSNOTIFY
struct hlist_head mnt_fsnotify_marks;
__u32 mnt_fsnotify_mask;

View file

@ -237,6 +237,7 @@ static struct mount *alloc_vfsmnt(const char *name)
INIT_LIST_HEAD(&mnt->mnt_slave_list);
INIT_LIST_HEAD(&mnt->mnt_slave);
INIT_HLIST_NODE(&mnt->mnt_mp_list);
INIT_LIST_HEAD(&mnt->mnt_umounting);
#ifdef CONFIG_FSNOTIFY
INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
#endif

View file

@ -898,6 +898,12 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
int lookup_flags = 0;
int acc_mode;
/*
* Clear out all open flags we don't know about so that we don't report
* them in fcntl(F_GETFD) or similar interfaces.
*/
flags &= VALID_OPEN_FLAGS;
if (flags & (O_CREAT | __O_TMPFILE))
op->mode = (mode & S_IALLUGO) | S_IFREG;
else

View file

@ -24,6 +24,11 @@ static inline struct mount *first_slave(struct mount *p)
return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
}
static inline struct mount *last_slave(struct mount *p)
{
return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
}
static inline struct mount *next_slave(struct mount *p)
{
return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
@ -164,6 +169,19 @@ static struct mount *propagation_next(struct mount *m,
}
}
static struct mount *skip_propagation_subtree(struct mount *m,
struct mount *origin)
{
/*
* Advance m such that propagation_next will not return
* the slaves of m.
*/
if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
m = last_slave(m);
return m;
}
static struct mount *next_group(struct mount *m, struct mount *origin)
{
while (1) {
@ -415,68 +433,107 @@ void propagate_mount_unlock(struct mount *mnt)
}
}
/*
* Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
*/
static void mark_umount_candidates(struct mount *mnt)
static void umount_one(struct mount *mnt, struct list_head *to_umount)
{
struct mount *parent = mnt->mnt_parent;
struct mount *m;
BUG_ON(parent == mnt);
for (m = propagation_next(parent, parent); m;
m = propagation_next(m, parent)) {
struct mount *child = __lookup_mnt(&m->mnt,
mnt->mnt_mountpoint);
if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
continue;
if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
SET_MNT_MARK(child);
}
}
CLEAR_MNT_MARK(mnt);
mnt->mnt.mnt_flags |= MNT_UMOUNT;
list_del_init(&mnt->mnt_child);
list_del_init(&mnt->mnt_umounting);
list_move_tail(&mnt->mnt_list, to_umount);
}
/*
* NOTE: unmounting 'mnt' naturally propagates to all other mounts its
* parent propagates to.
*/
static void __propagate_umount(struct mount *mnt)
static bool __propagate_umount(struct mount *mnt,
struct list_head *to_umount,
struct list_head *to_restore)
{
struct mount *parent = mnt->mnt_parent;
struct mount *m;
bool progress = false;
struct mount *child;
BUG_ON(parent == mnt);
/*
* The state of the parent won't change if this mount is
* already unmounted or marked as without children.
*/
if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED))
goto out;
for (m = propagation_next(parent, parent); m;
m = propagation_next(m, parent)) {
struct mount *topper;
struct mount *child = __lookup_mnt(&m->mnt,
mnt->mnt_mountpoint);
/*
* umount the child only if the child has no children
* and the child is marked safe to unmount.
*/
if (!child || !IS_MNT_MARKED(child))
/* Verify topper is the only grandchild that has not been
* speculatively unmounted.
*/
list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
if (child->mnt_mountpoint == mnt->mnt.mnt_root)
continue;
CLEAR_MNT_MARK(child);
if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child))
continue;
/* Found a mounted child */
goto children;
}
/* If there is exactly one mount covering all of child
* replace child with that mount.
*/
topper = find_topper(child);
if (topper)
mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
topper);
/* Mark mounts that can be unmounted if not locked */
SET_MNT_MARK(mnt);
progress = true;
if (list_empty(&child->mnt_mounts)) {
list_del_init(&child->mnt_child);
child->mnt.mnt_flags |= MNT_UMOUNT;
list_move_tail(&child->mnt_list, &mnt->mnt_list);
/* If a mount is without children and not locked umount it. */
if (!IS_MNT_LOCKED(mnt)) {
umount_one(mnt, to_umount);
} else {
children:
list_move_tail(&mnt->mnt_umounting, to_restore);
}
out:
return progress;
}
static void umount_list(struct list_head *to_umount,
struct list_head *to_restore)
{
struct mount *mnt, *child, *tmp;
list_for_each_entry(mnt, to_umount, mnt_list) {
list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) {
/* topper? */
if (child->mnt_mountpoint == mnt->mnt.mnt_root)
list_move_tail(&child->mnt_umounting, to_restore);
else
umount_one(child, to_umount);
}
}
}
static void restore_mounts(struct list_head *to_restore)
{
/* Restore mounts to a clean working state */
while (!list_empty(to_restore)) {
struct mount *mnt, *parent;
struct mountpoint *mp;
mnt = list_first_entry(to_restore, struct mount, mnt_umounting);
CLEAR_MNT_MARK(mnt);
list_del_init(&mnt->mnt_umounting);
/* Should this mount be reparented? */
mp = mnt->mnt_mp;
parent = mnt->mnt_parent;
while (parent->mnt.mnt_flags & MNT_UMOUNT) {
mp = parent->mnt_mp;
parent = parent->mnt_parent;
}
if (parent != mnt->mnt_parent)
mnt_change_mountpoint(parent, mp, mnt);
}
}
static void cleanup_umount_visitations(struct list_head *visited)
{
while (!list_empty(visited)) {
struct mount *mnt =
list_first_entry(visited, struct mount, mnt_umounting);
list_del_init(&mnt->mnt_umounting);
}
}
/*
* collect all mounts that receive propagation from the mount in @list,
* and return these additional mounts in the same list.
@ -487,12 +544,69 @@ static void __propagate_umount(struct mount *mnt)
int propagate_umount(struct list_head *list)
{
struct mount *mnt;
LIST_HEAD(to_restore);
LIST_HEAD(to_umount);
LIST_HEAD(visited);
list_for_each_entry_reverse(mnt, list, mnt_list)
mark_umount_candidates(mnt);
/* Find candidates for unmounting */
list_for_each_entry_reverse(mnt, list, mnt_list) {
struct mount *parent = mnt->mnt_parent;
struct mount *m;
/*
* If this mount has already been visited it is known that it's
* entire peer group and all of their slaves in the propagation
* tree for the mountpoint has already been visited and there is
* no need to visit them again.
*/
if (!list_empty(&mnt->mnt_umounting))
continue;
list_add_tail(&mnt->mnt_umounting, &visited);
for (m = propagation_next(parent, parent); m;
m = propagation_next(m, parent)) {
struct mount *child = __lookup_mnt(&m->mnt,
mnt->mnt_mountpoint);
if (!child)
continue;
if (!list_empty(&child->mnt_umounting)) {
/*
* If the child has already been visited it is
* know that it's entire peer group and all of
* their slaves in the propgation tree for the
* mountpoint has already been visited and there
* is no need to visit this subtree again.
*/
m = skip_propagation_subtree(m, parent);
continue;
} else if (child->mnt.mnt_flags & MNT_UMOUNT) {
/*
* We have come accross an partially unmounted
* mount in list that has not been visited yet.
* Remember it has been visited and continue
* about our merry way.
*/
list_add_tail(&child->mnt_umounting, &visited);
continue;
}
/* Check the child and parents while progress is made */
while (__propagate_umount(child,
&to_umount, &to_restore)) {
/* Is the parent a umount candidate? */
child = child->mnt_parent;
if (list_empty(&child->mnt_umounting))
break;
}
}
}
umount_list(&to_umount, &to_restore);
restore_mounts(&to_restore);
cleanup_umount_visitations(&visited);
list_splice_tail(&to_umount, list);
list_for_each_entry(mnt, list, mnt_list)
__propagate_umount(mnt);
return 0;
}

View file

@ -766,13 +766,9 @@ static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct
* afterwards in the other cases: we fsstack_copy_inode_size from
* the lower level.
*/
if (current->mm)
down_write(&current->mm->mmap_sem);
if (ia->ia_valid & ATTR_SIZE) {
err = inode_newsize_ok(&tmp, ia->ia_size);
if (err) {
if (current->mm)
up_write(&current->mm->mmap_sem);
goto out;
}
truncate_setsize(inode, ia->ia_size);
@ -795,8 +791,6 @@ static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct
err = notify_change2(lower_mnt, lower_dentry, &lower_ia, /* note: lower_ia */
NULL);
mutex_unlock(&d_inode(lower_dentry)->i_mutex);
if (current->mm)
up_write(&current->mm->mmap_sem);
if (err)
goto out;

View file

@ -364,41 +364,34 @@ out:
return err;
}
/* A feature which supports mount_nodev() with options */
static struct dentry *mount_nodev_with_options(struct vfsmount *mnt,
struct file_system_type *fs_type, int flags,
const char *dev_name, void *data,
int (*fill_super)(struct vfsmount *, struct super_block *,
const char *, void *, int))
struct sdcardfs_mount_private {
struct vfsmount *mnt;
const char *dev_name;
void *raw_data;
};
static int __sdcardfs_fill_super(
struct super_block *sb,
void *_priv, int silent)
{
int error;
struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
struct sdcardfs_mount_private *priv = _priv;
if (IS_ERR(s))
return ERR_CAST(s);
s->s_flags = flags;
error = fill_super(mnt, s, dev_name, data, flags & MS_SILENT ? 1 : 0);
if (error) {
deactivate_locked_super(s);
return ERR_PTR(error);
}
s->s_flags |= MS_ACTIVE;
return dget(s->s_root);
return sdcardfs_read_super(priv->mnt,
sb, priv->dev_name, priv->raw_data, silent);
}
static struct dentry *sdcardfs_mount(struct vfsmount *mnt,
struct file_system_type *fs_type, int flags,
const char *dev_name, void *raw_data)
{
/*
* dev_name is a lower_path_name,
* raw_data is a option string.
*/
return mount_nodev_with_options(mnt, fs_type, flags, dev_name,
raw_data, sdcardfs_read_super);
struct sdcardfs_mount_private priv = {
.mnt = mnt,
.dev_name = dev_name,
.raw_data = raw_data
};
return mount_nodev(fs_type, flags,
&priv, __sdcardfs_fill_super);
}
static struct dentry *sdcardfs_mount_wrn(struct file_system_type *fs_type,
@ -423,7 +416,7 @@ void sdcardfs_kill_sb(struct super_block *sb)
list_del(&sbi->list);
mutex_unlock(&sdcardfs_super_list_lock);
}
generic_shutdown_super(sb);
kill_anon_super(sb);
}
static struct file_system_type sdcardfs_fs_type = {

View file

@ -368,6 +368,7 @@ int subsys_virtual_register(struct bus_type *subsys,
* @suspend: Used to put the device to sleep mode, usually to a low power
* state.
* @resume: Used to bring the device from the sleep mode.
* @shutdown: Called at shut-down time to quiesce the device.
* @ns_type: Callbacks so sysfs can detemine namespaces.
* @namespace: Namespace of the device belongs to this class.
* @pm: The default device power management operations of this class.
@ -396,6 +397,7 @@ struct class {
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
int (*shutdown)(struct device *dev);
const struct kobj_ns_type_operations *ns_type;
const void *(*namespace)(struct device *dev);

View file

@ -3,6 +3,12 @@
#include <uapi/linux/fcntl.h>
/* list of all valid flags for the open/openat flags argument: */
#define VALID_OPEN_FLAGS \
(O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
#ifndef force_o_largefile
#define force_o_largefile() (BITS_PER_LONG != 32)

View file

@ -105,6 +105,7 @@ extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void);
extern void tick_nohz_irq_exit(void);
extern ktime_t tick_nohz_get_sleep_length(void);
extern unsigned long tick_nohz_get_idle_calls(void);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
#else /* !CONFIG_NO_HZ_COMMON */

View file

@ -579,9 +579,9 @@ extern void usb_ep0_reinit(struct usb_device *);
((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
#define EndpointRequest \
((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
#define EndpointOutRequest \
((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
/* class requests from the USB 2.0 hub spec, table 11-15 */
/* GetBusState and SetHubDescriptor are optional, omitted */

View file

@ -21,6 +21,7 @@ struct route_info {
#include <net/flow.h>
#include <net/ip6_fib.h>
#include <net/sock.h>
#include <net/lwtunnel.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/route.h>
@ -209,4 +210,11 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt,
return daddr;
}
static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b)
{
return a->dst.dev == b->dst.dev &&
a->rt6i_idev == b->rt6i_idev &&
ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) &&
!lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate);
}
#endif

View file

@ -37,9 +37,56 @@ enum {
BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
};
enum {
/**
* enum flat_binder_object_shifts: shift values for flat_binder_object_flags
* @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy.
*
*/
enum flat_binder_object_shifts {
FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9,
};
/**
* enum flat_binder_object_flags - flags for use in flat_binder_object.flags
*/
enum flat_binder_object_flags {
/**
* @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority
*
* These bits can be used to set the minimum scheduler priority
* at which transactions into this node should run. Valid values
* in these bits depend on the scheduler policy encoded in
* @FLAT_BINDER_FLAG_SCHED_POLICY_MASK.
*
* For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19]
* For SCHED_FIFO/SCHED_RR, the value can run between [1..99]
*/
FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
/**
* @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds.
*/
FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
/**
* @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy
*
* These two bits can be used to set the min scheduling policy at which
* transactions on this node should run. These match the UAPI
* scheduler policy values, eg:
* 00b: SCHED_NORMAL
* 01b: SCHED_FIFO
* 10b: SCHED_RR
* 11b: SCHED_BATCH
*/
FLAT_BINDER_FLAG_SCHED_POLICY_MASK =
3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT,
/**
* @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy
*
* Only when set, calls into this node will inherit a real-time
* scheduling policy from the caller (for synchronous transactions).
*/
FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
};
#ifdef BINDER_IPC_32BIT
@ -186,6 +233,19 @@ struct binder_version {
#define BINDER_CURRENT_PROTOCOL_VERSION 8
#endif
/*
* Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields.
* Set ptr to NULL for the first call to get the info for the first node, and
* then repeat the call passing the previously returned value to get the next
* nodes. ptr will be 0 when there are no more nodes.
*/
struct binder_node_debug_info {
binder_uintptr_t ptr;
binder_uintptr_t cookie;
__u32 has_strong_ref;
__u32 has_weak_ref;
};
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
@ -193,6 +253,7 @@ struct binder_version {
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
/*
* NOTE: Two special error codes you should check for when calling

View file

@ -1251,8 +1251,10 @@ retry:
timeo = MAX_SCHEDULE_TIMEOUT;
ret = netlink_attachskb(sock, nc, &timeo, NULL);
if (ret == 1)
if (ret == 1) {
sock = NULL;
goto retry;
}
if (ret) {
sock = NULL;
nc = NULL;

View file

@ -765,6 +765,11 @@ static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
if (err)
return err;
if (is_pointer_value(env, insn->src_reg)) {
verbose("R%d leaks addr into mem\n", insn->src_reg);
return -EACCES;
}
/* check whether atomic_add can read the memory */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1);

View file

@ -66,7 +66,7 @@ static inline int init_kernel_text(unsigned long addr)
return 0;
}
int core_kernel_text(unsigned long addr)
int notrace core_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext &&
addr < (unsigned long)_etext)

Some files were not shown because too many files have changed in this diff Show more