This is the 4.4.157 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAluitjwACgkQONu9yGCS aT7iuA/9FDL/m4yOFPh6lFP6b5JnpDoKniJM3R6eg8am9TYaCe0mwJImEy8yP8sH BOK/LECOJqV8Waw0ANQieJYZj/GsRXk9TOwUwvOCbhNwfu+e2x4/31dRIpxSQaCs dYROb4ISGd9wyLMKqgh0zqMxKKfb/Ija4oBjfz7xUJYoHFuc8hlfic6HUr8i/J76 kz5LJ5uPWyrBOKzQT15o0bz05LmnKBX8TyhpzzPBf/+eQ1jzh7uvpawcOz03u8iV 6VpNXCbTTUf863nmOxcEfuClI1GnCHstAHTKaEc6u5MUhkJKKqxWDTsO92qhnUne FXB7/UeVwsGA69Oy4nInJMGI7hHlJ6LR1CBA9SmfjzUvBY9P6nT2vrU6NYg0n3Bd tP7S69xXQUdkkvDNjphsOuexuResITJ48obg+Lx2ijCAHNosafKyN1It8t/euOAD xCeTxfLtXMCO+3z+UvOwFnKwgLImt1Bh8fGynjpk7fvIycrm+FP0iZ+2cw4NUiMU jKtjvQCWbfK64fZ5eIdxo/rKyX7hK3PRMw6r6rEvaW/z6Cm33Dvy+1Rn3fiXJpIS oEt7knHsoBraHtrUvbPXMc5S0ZNvoNLD3omWm1Ot+NlP3ogIi/ZFwvwUU537FZmL 2g8V16o0IliBOqNr3vkDyInv/5+LDVI22noc3bjEoi/LsoYe4j4= =2RHb -----END PGP SIGNATURE----- Merge 4.4.157 into android-4.4 Changes in 4.4.157 i2c: xiic: Make the start and the byte count write atomic i2c: i801: fix DNV's SMBCTRL register offset ALSA: hda - Fix cancel_work_sync() stall from jackpoll work cfq: Give a chance for arming slice idle timer in case of group_idle kthread: Fix use-after-free if kthread fork fails kthread: fix boot hang (regression) on MIPS/OpenRISC staging: rt5208: Fix a sleep-in-atomic bug in xd_copy_page staging/rts5208: Fix read overflow in memcpy block,blkcg: use __GFP_NOWARN for best-effort allocations in blkcg locking/rwsem-xadd: Fix missed wakeup due to reordering of load selinux: use GFP_NOWAIT in the AVC kmem_caches locking/osq_lock: Fix osq_lock queue corruption ARC: [plat-axs*]: Enable SWAP misc: mic: SCIF Fix scif_get_new_port() error handling ethtool: Remove trailing semicolon for static inline Bluetooth: h5: Fix missing dependency on BT_HCIUART_SERDEV gpio: tegra: Move driver registration to subsys_init level scsi: target: fix __transport_register_session locking md/raid5: fix data corruption of replacements after originals dropped misc: ti-st: Fix memory leak in the error path of probe() uio: potential double frees if __uio_register_device() fails tty: rocket: Fix possible buffer overwrite on register_PCI f2fs: do not set free of current section perf tools: Allow overriding MAX_NR_CPUS at compile time NFSv4.0 fix client reference leak in callback macintosh/via-pmu: Add missing mmio accessors ath10k: prevent active scans on potential unusable channels MIPS: Fix ISA virt/bus conversion for non-zero PHYS_OFFSET ata: libahci: Correct setting of DEVSLP register scsi: 3ware: fix return 0 on the error path of probe ath10k: disable bundle mgmt tx completion event support Bluetooth: hidp: Fix handling of strncpy for hid->name information x86/mm: Remove in_nmi() warning from vmalloc_fault() gpio: ml-ioh: Fix buffer underwrite on probe error path net: mvneta: fix mtu change on port without link MIPS: Octeon: add missing of_node_put() net: dcb: For wild-card lookups, use priority -1, not 0 Input: atmel_mxt_ts - only use first T9 instance partitions/aix: append null character to print data from disk partitions/aix: fix usage of uninitialized lv_info and lvname structures iommu/ipmmu-vmsa: Fix allocation in atomic context mfd: ti_am335x_tscadc: Fix struct clk memory leak f2fs: fix to do sanity check with {sit,nat}_ver_bitmap_bytesize MIPS: WARN_ON invalid DMA cache maintenance, not BUG_ON RDMA/cma: Do not ignore net namespace for unbound cm_id xhci: Fix use-after-free in xhci_free_virt_device vmw_balloon: include asm/io.h netfilter: x_tables: avoid stack-out-of-bounds read in xt_copy_counters_from_user drivers: net: cpsw: fix parsing of phy-handle DT property in dual_emac config net: ethernet: ti: cpsw: fix mdio device reference leak ethernet: ti: davinci_emac: add missing of_node_put after calling of_parse_phandle crypto: vmx - Fix sleep-in-atomic bugs mtd: ubi: wl: Fix error return code in ubi_wl_init() autofs: fix autofs_sbi() does not check super block type x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+ mm: get rid of vmacache_flush_all() entirely Linux 4.4.157 Change-Id: I30fc9e099e9065aff5e53c648d822c405525bb07 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
c139ea660b
70 changed files with 311 additions and 176 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 156
|
||||
SUBLEVEL = 157
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
CONFIG_CROSS_COMPILE="arc-linux-"
|
||||
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
||||
# CONFIG_SWAP is not set
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
# CONFIG_CROSS_MEMORY_ATTACH is not set
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
CONFIG_CROSS_COMPILE="arc-linux-"
|
||||
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
||||
# CONFIG_SWAP is not set
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
# CONFIG_CROSS_MEMORY_ATTACH is not set
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
CONFIG_CROSS_COMPILE="arc-linux-"
|
||||
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
|
||||
# CONFIG_SWAP is not set
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
# CONFIG_CROSS_MEMORY_ATTACH is not set
|
||||
|
|
|
@ -349,6 +349,7 @@ static int __init octeon_ehci_device_init(void)
|
|||
return 0;
|
||||
|
||||
pd = of_find_device_by_node(ehci_node);
|
||||
of_node_put(ehci_node);
|
||||
if (!pd)
|
||||
return 0;
|
||||
|
||||
|
@ -411,6 +412,7 @@ static int __init octeon_ohci_device_init(void)
|
|||
return 0;
|
||||
|
||||
pd = of_find_device_by_node(ohci_node);
|
||||
of_node_put(ohci_node);
|
||||
if (!pd)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -141,14 +141,14 @@ static inline void * phys_to_virt(unsigned long address)
|
|||
/*
|
||||
* ISA I/O bus memory addresses are 1:1 with the physical address.
|
||||
*/
|
||||
static inline unsigned long isa_virt_to_bus(volatile void * address)
|
||||
static inline unsigned long isa_virt_to_bus(volatile void *address)
|
||||
{
|
||||
return (unsigned long)address - PAGE_OFFSET;
|
||||
return virt_to_phys(address);
|
||||
}
|
||||
|
||||
static inline void * isa_bus_to_virt(unsigned long address)
|
||||
static inline void *isa_bus_to_virt(unsigned long address)
|
||||
{
|
||||
return (void *)(address + PAGE_OFFSET);
|
||||
return phys_to_virt(address);
|
||||
}
|
||||
|
||||
#define isa_page_to_bus page_to_phys
|
||||
|
|
|
@ -117,7 +117,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
struct thread_info *ti = task_thread_info(p);
|
||||
struct pt_regs *childregs, *regs = current_pt_regs();
|
||||
unsigned long childksp;
|
||||
p->set_child_tid = p->clear_child_tid = NULL;
|
||||
|
||||
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
|
||||
|
||||
|
|
|
@ -712,7 +712,8 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
|
|||
static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
|
||||
{
|
||||
/* Catch bad driver code */
|
||||
BUG_ON(size == 0);
|
||||
if (WARN_ON(size == 0))
|
||||
return;
|
||||
|
||||
preempt_disable();
|
||||
if (cpu_has_inclusive_pcaches) {
|
||||
|
@ -745,7 +746,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
|
|||
static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
|
||||
{
|
||||
/* Catch bad driver code */
|
||||
BUG_ON(size == 0);
|
||||
if (WARN_ON(size == 0))
|
||||
return;
|
||||
|
||||
preempt_disable();
|
||||
if (cpu_has_inclusive_pcaches) {
|
||||
|
|
|
@ -152,8 +152,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
|
||||
top_of_kernel_stack = sp;
|
||||
|
||||
p->set_child_tid = p->clear_child_tid = NULL;
|
||||
|
||||
/* Locate userspace context on stack... */
|
||||
sp -= STACK_FRAME_OVERHEAD; /* redzone */
|
||||
sp -= sizeof(struct pt_regs);
|
||||
|
|
|
@ -104,6 +104,8 @@ struct cpuinfo_x86 {
|
|||
__u8 x86_phys_bits;
|
||||
/* CPUID returned core id bits: */
|
||||
__u8 x86_coreid_bits;
|
||||
|
||||
__u8 x86_cache_bits;
|
||||
/* Max extended CPUID function supported: */
|
||||
__u32 extended_cpuid_level;
|
||||
/* Maximum supported CPUID level, -1=no CPUID: */
|
||||
|
@ -174,7 +176,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c);
|
|||
|
||||
static inline unsigned long long l1tf_pfn_limit(void)
|
||||
{
|
||||
return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
|
||||
return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
|
||||
}
|
||||
|
||||
extern void early_cpu_init(void);
|
||||
|
|
|
@ -634,6 +634,46 @@ void x86_spec_ctrl_setup_ap(void)
|
|||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "L1TF: " fmt
|
||||
|
||||
/*
|
||||
* These CPUs all support 44bits physical address space internally in the
|
||||
* cache but CPUID can report a smaller number of physical address bits.
|
||||
*
|
||||
* The L1TF mitigation uses the top most address bit for the inversion of
|
||||
* non present PTEs. When the installed memory reaches into the top most
|
||||
* address bit due to memory holes, which has been observed on machines
|
||||
* which report 36bits physical address bits and have 32G RAM installed,
|
||||
* then the mitigation range check in l1tf_select_mitigation() triggers.
|
||||
* This is a false positive because the mitigation is still possible due to
|
||||
* the fact that the cache uses 44bit internally. Use the cache bits
|
||||
* instead of the reported physical bits and adjust them on the affected
|
||||
* machines to 44bit if the reported bits are less than 44.
|
||||
*/
|
||||
static void override_cache_bits(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (c->x86 != 6)
|
||||
return;
|
||||
|
||||
switch (c->x86_model) {
|
||||
case INTEL_FAM6_NEHALEM:
|
||||
case INTEL_FAM6_WESTMERE:
|
||||
case INTEL_FAM6_SANDYBRIDGE:
|
||||
case INTEL_FAM6_IVYBRIDGE:
|
||||
case INTEL_FAM6_HASWELL_CORE:
|
||||
case INTEL_FAM6_HASWELL_ULT:
|
||||
case INTEL_FAM6_HASWELL_GT3E:
|
||||
case INTEL_FAM6_BROADWELL_CORE:
|
||||
case INTEL_FAM6_BROADWELL_GT3E:
|
||||
case INTEL_FAM6_SKYLAKE_MOBILE:
|
||||
case INTEL_FAM6_SKYLAKE_DESKTOP:
|
||||
case INTEL_FAM6_KABYLAKE_MOBILE:
|
||||
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
||||
if (c->x86_cache_bits < 44)
|
||||
c->x86_cache_bits = 44;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void __init l1tf_select_mitigation(void)
|
||||
{
|
||||
u64 half_pa;
|
||||
|
@ -641,16 +681,13 @@ static void __init l1tf_select_mitigation(void)
|
|||
if (!boot_cpu_has_bug(X86_BUG_L1TF))
|
||||
return;
|
||||
|
||||
override_cache_bits(&boot_cpu_data);
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 2
|
||||
pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
|
||||
return;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is extremely unlikely to happen because almost all
|
||||
* systems have far more MAX_PA/2 than RAM can be fit into
|
||||
* DIMM slots.
|
||||
*/
|
||||
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
|
||||
if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
|
||||
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
|
||||
|
|
|
@ -798,6 +798,8 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
|
|||
c->x86_phys_bits = 36;
|
||||
#endif
|
||||
|
||||
c->x86_cache_bits = c->x86_phys_bits;
|
||||
|
||||
if (c->extended_cpuid_level >= 0x8000000a)
|
||||
c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
|
||||
|
||||
|
|
|
@ -273,8 +273,6 @@ static noinline int vmalloc_fault(unsigned long address)
|
|||
if (!(address >= VMALLOC_START && address < VMALLOC_END))
|
||||
return -1;
|
||||
|
||||
WARN_ON_ONCE(in_nmi());
|
||||
|
||||
/*
|
||||
* Synchronize this task's top level page-table
|
||||
* with the 'reference' page table.
|
||||
|
|
|
@ -185,7 +185,8 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
|
|||
}
|
||||
|
||||
wb_congested = wb_congested_get_create(&q->backing_dev_info,
|
||||
blkcg->css.id, GFP_NOWAIT);
|
||||
blkcg->css.id,
|
||||
GFP_NOWAIT | __GFP_NOWARN);
|
||||
if (!wb_congested) {
|
||||
ret = -ENOMEM;
|
||||
goto err_put_css;
|
||||
|
@ -193,7 +194,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
|
|||
|
||||
/* allocate */
|
||||
if (!new_blkg) {
|
||||
new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT);
|
||||
new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
|
||||
if (unlikely(!new_blkg)) {
|
||||
ret = -ENOMEM;
|
||||
goto err_put_congested;
|
||||
|
@ -1022,7 +1023,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
|
|||
}
|
||||
|
||||
spin_lock_init(&blkcg->lock);
|
||||
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
|
||||
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
|
||||
INIT_HLIST_HEAD(&blkcg->blkg_list);
|
||||
#ifdef CONFIG_CGROUP_WRITEBACK
|
||||
INIT_LIST_HEAD(&blkcg->cgwb_list);
|
||||
|
@ -1238,7 +1239,7 @@ pd_prealloc:
|
|||
if (blkg->pd[pol->plid])
|
||||
continue;
|
||||
|
||||
pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node);
|
||||
pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
|
||||
if (!pd)
|
||||
swap(pd, pd_prealloc);
|
||||
if (!pd) {
|
||||
|
|
|
@ -2905,7 +2905,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
|
|||
* for devices that support queuing, otherwise we still have a problem
|
||||
* with sync vs async workloads.
|
||||
*/
|
||||
if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
|
||||
if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag &&
|
||||
!cfqd->cfq_group_idle)
|
||||
return;
|
||||
|
||||
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
|
||||
|
@ -3810,7 +3811,8 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
|
|||
goto out;
|
||||
}
|
||||
|
||||
cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
|
||||
cfqq = kmem_cache_alloc_node(cfq_pool,
|
||||
GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
|
||||
cfqd->queue->node);
|
||||
if (!cfqq) {
|
||||
cfqq = &cfqd->oom_cfqq;
|
||||
|
|
|
@ -177,7 +177,7 @@ int aix_partition(struct parsed_partitions *state)
|
|||
u32 vgda_sector = 0;
|
||||
u32 vgda_len = 0;
|
||||
int numlvs = 0;
|
||||
struct pvd *pvd;
|
||||
struct pvd *pvd = NULL;
|
||||
struct lv_info {
|
||||
unsigned short pps_per_lv;
|
||||
unsigned short pps_found;
|
||||
|
@ -231,10 +231,11 @@ int aix_partition(struct parsed_partitions *state)
|
|||
if (lvip[i].pps_per_lv)
|
||||
foundlvs += 1;
|
||||
}
|
||||
/* pvd loops depend on n[].name and lvip[].pps_per_lv */
|
||||
pvd = alloc_pvd(state, vgda_sector + 17);
|
||||
}
|
||||
put_dev_sector(sect);
|
||||
}
|
||||
pvd = alloc_pvd(state, vgda_sector + 17);
|
||||
if (pvd) {
|
||||
int numpps = be16_to_cpu(pvd->pp_count);
|
||||
int psn_part1 = be32_to_cpu(pvd->psn_part1);
|
||||
|
@ -281,10 +282,14 @@ int aix_partition(struct parsed_partitions *state)
|
|||
next_lp_ix += 1;
|
||||
}
|
||||
for (i = 0; i < state->limit; i += 1)
|
||||
if (lvip[i].pps_found && !lvip[i].lv_is_contiguous)
|
||||
if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
|
||||
char tmp[sizeof(n[i].name) + 1]; // null char
|
||||
|
||||
snprintf(tmp, sizeof(tmp), "%s", n[i].name);
|
||||
pr_warn("partition %s (%u pp's found) is "
|
||||
"not contiguous\n",
|
||||
n[i].name, lvip[i].pps_found);
|
||||
tmp, lvip[i].pps_found);
|
||||
}
|
||||
kfree(pvd);
|
||||
}
|
||||
kfree(n);
|
||||
|
|
|
@ -2113,6 +2113,8 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
|
|||
deto = 20;
|
||||
}
|
||||
|
||||
/* Make dito, mdat, deto bits to 0s */
|
||||
devslp &= ~GENMASK_ULL(24, 2);
|
||||
devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
|
||||
(mdat << PORT_DEVSLP_MDAT_OFFSET) |
|
||||
(deto << PORT_DEVSLP_DETO_OFFSET) |
|
||||
|
|
|
@ -125,6 +125,7 @@ config BT_HCIUART_LL
|
|||
config BT_HCIUART_3WIRE
|
||||
bool "Three-wire UART (H5) protocol support"
|
||||
depends on BT_HCIUART
|
||||
depends on BT_HCIUART_SERDEV
|
||||
help
|
||||
The HCI Three-wire UART Transport Layer makes it possible to
|
||||
user the Bluetooth HCI over a serial port interface. The HCI
|
||||
|
|
|
@ -111,24 +111,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
|
|||
ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
|
||||
nbytes);
|
||||
} else {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
enable_kernel_vsx();
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
ret = blkcipher_walk_virt(desc, &walk);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
enable_kernel_altivec();
|
||||
aes_p8_cbc_encrypt(walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
nbytes & AES_BLOCK_MASK,
|
||||
&ctx->enc_key, walk.iv, 1);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -152,24 +151,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
|
|||
ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src,
|
||||
nbytes);
|
||||
} else {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_altivec();
|
||||
enable_kernel_vsx();
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
ret = blkcipher_walk_virt(desc, &walk);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
enable_kernel_altivec();
|
||||
aes_p8_cbc_encrypt(walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
nbytes & AES_BLOCK_MASK,
|
||||
&ctx->dec_key, walk.iv, 0);
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -495,9 +495,10 @@ err_irq_alloc_descs:
|
|||
|
||||
chip = chip_save;
|
||||
err_gpiochip_add:
|
||||
chip = chip_save;
|
||||
while (--i >= 0) {
|
||||
chip--;
|
||||
gpiochip_remove(&chip->gpio);
|
||||
chip++;
|
||||
}
|
||||
kfree(chip_save);
|
||||
|
||||
|
|
|
@ -591,4 +591,4 @@ static int __init tegra_gpio_init(void)
|
|||
{
|
||||
return platform_driver_register(&tegra_gpio_driver);
|
||||
}
|
||||
postcore_initcall(tegra_gpio_init);
|
||||
subsys_initcall(tegra_gpio_init);
|
||||
|
|
|
@ -128,6 +128,7 @@
|
|||
|
||||
#define SBREG_BAR 0x10
|
||||
#define SBREG_SMBCTRL 0xc6000c
|
||||
#define SBREG_SMBCTRL_DNV 0xcf000c
|
||||
|
||||
/* Host status bits for SMBPCISTS */
|
||||
#define SMBPCISTS_INTS 0x08
|
||||
|
@ -1251,7 +1252,11 @@ static void i801_add_tco(struct i801_priv *priv)
|
|||
spin_unlock(&p2sb_spinlock);
|
||||
|
||||
res = &tco_res[ICH_RES_MEM_OFF];
|
||||
res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
|
||||
if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
|
||||
res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
|
||||
else
|
||||
res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
|
||||
|
||||
res->end = res->start + 3;
|
||||
res->flags = IORESOURCE_MEM;
|
||||
|
||||
|
|
|
@ -533,6 +533,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
|
|||
{
|
||||
u8 rx_watermark;
|
||||
struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
|
||||
unsigned long flags;
|
||||
|
||||
/* Clear and enable Rx full interrupt. */
|
||||
xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
|
||||
|
@ -548,6 +549,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
|
|||
rx_watermark = IIC_RX_FIFO_DEPTH;
|
||||
xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
|
||||
|
||||
local_irq_save(flags);
|
||||
if (!(msg->flags & I2C_M_NOSTART))
|
||||
/* write the address */
|
||||
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
|
||||
|
@ -558,6 +560,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
|
|||
|
||||
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
|
||||
msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (i2c->nmsgs == 1)
|
||||
/* very last, enable bus not busy as well */
|
||||
xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
|
||||
|
|
|
@ -1280,9 +1280,16 @@ static bool cma_match_net_dev(const struct rdma_cm_id *id,
|
|||
(addr->src_addr.ss_family == AF_IB ||
|
||||
cma_protocol_roce_dev_port(id->device, port_num));
|
||||
|
||||
return !addr->dev_addr.bound_dev_if ||
|
||||
(net_eq(dev_net(net_dev), addr->dev_addr.net) &&
|
||||
addr->dev_addr.bound_dev_if == net_dev->ifindex);
|
||||
/*
|
||||
* Net namespaces must match, and if the listner is listening
|
||||
* on a specific netdevice than netdevice must match as well.
|
||||
*/
|
||||
if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
|
||||
(!!addr->dev_addr.bound_dev_if ==
|
||||
(addr->dev_addr.bound_dev_if == net_dev->ifindex)))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct rdma_id_private *cma_find_listener(
|
||||
|
|
|
@ -1593,10 +1593,11 @@ static int mxt_get_object_table(struct mxt_data *data)
|
|||
break;
|
||||
case MXT_TOUCH_MULTI_T9:
|
||||
data->multitouch = MXT_TOUCH_MULTI_T9;
|
||||
/* Only handle messages from first T9 instance */
|
||||
data->T9_reportid_min = min_id;
|
||||
data->T9_reportid_max = max_id;
|
||||
data->num_touchids = object->num_report_ids
|
||||
* mxt_obj_instances(object);
|
||||
data->T9_reportid_max = min_id +
|
||||
object->num_report_ids - 1;
|
||||
data->num_touchids = object->num_report_ids;
|
||||
break;
|
||||
case MXT_SPT_MESSAGECOUNT_T44:
|
||||
data->T44_address = object->start_address;
|
||||
|
|
|
@ -44,7 +44,7 @@ struct ipmmu_vmsa_domain {
|
|||
struct io_pgtable_ops *iop;
|
||||
|
||||
unsigned int context_id;
|
||||
spinlock_t lock; /* Protects mappings */
|
||||
struct mutex mutex; /* Protects mappings */
|
||||
};
|
||||
|
||||
struct ipmmu_vmsa_archdata {
|
||||
|
@ -464,7 +464,7 @@ static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
|
|||
if (!domain)
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&domain->lock);
|
||||
mutex_init(&domain->mutex);
|
||||
|
||||
return &domain->io_domain;
|
||||
}
|
||||
|
@ -488,7 +488,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
|
|||
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
|
||||
struct ipmmu_vmsa_device *mmu = archdata->mmu;
|
||||
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
|
||||
unsigned long flags;
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -497,7 +496,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
|
|||
return -ENXIO;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
mutex_lock(&domain->mutex);
|
||||
|
||||
if (!domain->mmu) {
|
||||
/* The domain hasn't been used yet, initialize it. */
|
||||
|
@ -513,7 +512,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
|
|||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
mutex_unlock(&domain->mutex);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
|
|
@ -530,8 +530,9 @@ init_pmu(void)
|
|||
int timeout;
|
||||
struct adb_request req;
|
||||
|
||||
out_8(&via[B], via[B] | TREQ); /* negate TREQ */
|
||||
out_8(&via[DIRB], (via[DIRB] | TREQ) & ~TACK); /* TACK in, TREQ out */
|
||||
/* Negate TREQ. Set TACK to input and TREQ to output. */
|
||||
out_8(&via[B], in_8(&via[B]) | TREQ);
|
||||
out_8(&via[DIRB], (in_8(&via[DIRB]) | TREQ) & ~TACK);
|
||||
|
||||
pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
|
||||
timeout = 100000;
|
||||
|
@ -1453,8 +1454,8 @@ pmu_sr_intr(void)
|
|||
struct adb_request *req;
|
||||
int bite = 0;
|
||||
|
||||
if (via[B] & TREQ) {
|
||||
printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]);
|
||||
if (in_8(&via[B]) & TREQ) {
|
||||
printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via[B]));
|
||||
out_8(&via[IFR], SR_INT);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -4190,6 +4190,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
|
|||
s->failed++;
|
||||
if (rdev && !test_bit(Faulty, &rdev->flags))
|
||||
do_recovery = 1;
|
||||
else if (!rdev) {
|
||||
rdev = rcu_dereference(
|
||||
conf->disks[i].replacement);
|
||||
if (rdev && !test_bit(Faulty, &rdev->flags))
|
||||
do_recovery = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (test_bit(STRIPE_SYNCING, &sh->state)) {
|
||||
|
|
|
@ -224,14 +224,13 @@ static int ti_tscadc_probe(struct platform_device *pdev)
|
|||
* The TSC_ADC_SS controller design assumes the OCP clock is
|
||||
* at least 6x faster than the ADC clock.
|
||||
*/
|
||||
clk = clk_get(&pdev->dev, "adc_tsc_fck");
|
||||
clk = devm_clk_get(&pdev->dev, "adc_tsc_fck");
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(&pdev->dev, "failed to get TSC fck\n");
|
||||
err = PTR_ERR(clk);
|
||||
goto err_disable_clk;
|
||||
}
|
||||
clock_rate = clk_get_rate(clk);
|
||||
clk_put(clk);
|
||||
tscadc->clk_div = clock_rate / ADC_CLK;
|
||||
|
||||
/* TSCADC_CLKDIV needs to be configured to the value minus 1 */
|
||||
|
|
|
@ -370,11 +370,10 @@ int scif_bind(scif_epd_t epd, u16 pn)
|
|||
goto scif_bind_exit;
|
||||
}
|
||||
} else {
|
||||
pn = scif_get_new_port();
|
||||
if (!pn) {
|
||||
ret = -ENOSPC;
|
||||
ret = scif_get_new_port();
|
||||
if (ret < 0)
|
||||
goto scif_bind_exit;
|
||||
}
|
||||
pn = ret;
|
||||
}
|
||||
|
||||
ep->state = SCIFEP_BOUND;
|
||||
|
@ -648,13 +647,12 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
|
|||
err = -EISCONN;
|
||||
break;
|
||||
case SCIFEP_UNBOUND:
|
||||
ep->port.port = scif_get_new_port();
|
||||
if (!ep->port.port) {
|
||||
err = -ENOSPC;
|
||||
} else {
|
||||
ep->port.node = scif_info.nodeid;
|
||||
ep->conn_async_state = ASYNC_CONN_IDLE;
|
||||
}
|
||||
err = scif_get_new_port();
|
||||
if (err < 0)
|
||||
break;
|
||||
ep->port.port = err;
|
||||
ep->port.node = scif_info.nodeid;
|
||||
ep->conn_async_state = ASYNC_CONN_IDLE;
|
||||
/* Fall through */
|
||||
case SCIFEP_BOUND:
|
||||
/*
|
||||
|
|
|
@ -757,14 +757,14 @@ static int kim_probe(struct platform_device *pdev)
|
|||
err = gpio_request(kim_gdata->nshutdown, "kim");
|
||||
if (unlikely(err)) {
|
||||
pr_err(" gpio %d request failed ", kim_gdata->nshutdown);
|
||||
return err;
|
||||
goto err_sysfs_group;
|
||||
}
|
||||
|
||||
/* Configure nShutdown GPIO as output=0 */
|
||||
err = gpio_direction_output(kim_gdata->nshutdown, 0);
|
||||
if (unlikely(err)) {
|
||||
pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
|
||||
return err;
|
||||
goto err_sysfs_group;
|
||||
}
|
||||
/* get reference of pdev for request_firmware
|
||||
*/
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#include <linux/seq_file.h>
|
||||
#include <linux/vmw_vmci_defs.h>
|
||||
#include <linux/vmw_vmci_api.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/hypervisor.h>
|
||||
|
||||
MODULE_AUTHOR("VMware, Inc.");
|
||||
|
|
|
@ -1597,8 +1597,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
|||
cond_resched();
|
||||
|
||||
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
|
||||
if (!e)
|
||||
if (!e) {
|
||||
err = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
e->pnum = aeb->pnum;
|
||||
e->ec = aeb->ec;
|
||||
|
@ -1617,8 +1619,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
|
|||
cond_resched();
|
||||
|
||||
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
|
||||
if (!e)
|
||||
if (!e) {
|
||||
err = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
e->pnum = aeb->pnum;
|
||||
e->ec = aeb->ec;
|
||||
|
|
|
@ -2569,7 +2569,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
|
|||
}
|
||||
|
||||
mvneta_start_dev(pp);
|
||||
mvneta_port_up(pp);
|
||||
|
||||
netdev_update_features(dev);
|
||||
|
||||
|
|
|
@ -371,7 +371,6 @@ struct cpsw_priv {
|
|||
spinlock_t lock;
|
||||
struct platform_device *pdev;
|
||||
struct net_device *ndev;
|
||||
struct device_node *phy_node;
|
||||
struct napi_struct napi_rx;
|
||||
struct napi_struct napi_tx;
|
||||
struct device *dev;
|
||||
|
@ -1165,8 +1164,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
|
|||
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
|
||||
1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
|
||||
|
||||
if (priv->phy_node)
|
||||
slave->phy = of_phy_connect(priv->ndev, priv->phy_node,
|
||||
if (slave->data->phy_node)
|
||||
slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
|
||||
&cpsw_adjust_link, 0, slave->data->phy_if);
|
||||
else
|
||||
slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
|
||||
|
@ -1957,12 +1956,11 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
|
|||
slave->port_vlan = data->dual_emac_res_vlan;
|
||||
}
|
||||
|
||||
static int cpsw_probe_dt(struct cpsw_priv *priv,
|
||||
static int cpsw_probe_dt(struct cpsw_platform_data *data,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
struct device_node *slave_node;
|
||||
struct cpsw_platform_data *data = &priv->data;
|
||||
int i = 0, ret;
|
||||
u32 prop;
|
||||
|
||||
|
@ -2050,7 +2048,8 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
|
|||
if (strcmp(slave_node->name, "slave"))
|
||||
continue;
|
||||
|
||||
priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
|
||||
slave_data->phy_node = of_parse_phandle(slave_node,
|
||||
"phy-handle", 0);
|
||||
parp = of_get_property(slave_node, "phy_id", &lenp);
|
||||
if (of_phy_is_fixed_link(slave_node)) {
|
||||
struct device_node *phy_node;
|
||||
|
@ -2087,6 +2086,7 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
|
|||
}
|
||||
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
|
||||
PHY_ID_FMT, mdio->name, phyid);
|
||||
put_device(&mdio->dev);
|
||||
} else {
|
||||
dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i);
|
||||
goto no_phy_slave;
|
||||
|
@ -2291,7 +2291,7 @@ static int cpsw_probe(struct platform_device *pdev)
|
|||
/* Select default pin state */
|
||||
pinctrl_pm_select_default_state(&pdev->dev);
|
||||
|
||||
if (cpsw_probe_dt(priv, pdev)) {
|
||||
if (cpsw_probe_dt(&priv->data, pdev)) {
|
||||
dev_err(&pdev->dev, "cpsw: platform data missing\n");
|
||||
ret = -ENODEV;
|
||||
goto clean_runtime_disable_ret;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/phy.h>
|
||||
|
||||
struct cpsw_slave_data {
|
||||
struct device_node *phy_node;
|
||||
char phy_id[MII_BUS_ID_SIZE];
|
||||
int phy_if;
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
|
|
|
@ -2108,6 +2108,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
|
|||
cpdma_ctlr_destroy(priv->dma);
|
||||
|
||||
unregister_netdev(ndev);
|
||||
of_node_put(priv->phy_node);
|
||||
free_netdev(ndev);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2901,6 +2901,13 @@ static int ath10k_update_channel_list(struct ath10k *ar)
|
|||
passive = channel->flags & IEEE80211_CHAN_NO_IR;
|
||||
ch->passive = passive;
|
||||
|
||||
/* the firmware is ignoring the "radar" flag of the
|
||||
* channel and is scanning actively using Probe Requests
|
||||
* on "Radar detection"/DFS channels which are not
|
||||
* marked as "available"
|
||||
*/
|
||||
ch->passive |= ch->chan_radar;
|
||||
|
||||
ch->freq = channel->center_freq;
|
||||
ch->band_center_freq1 = channel->center_freq;
|
||||
ch->min_power = 0;
|
||||
|
|
|
@ -1424,6 +1424,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
|
|||
cfg->keep_alive_pattern_size = __cpu_to_le32(0);
|
||||
cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
|
||||
cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
|
||||
cfg->wmi_send_separate = __cpu_to_le32(0);
|
||||
cfg->num_ocb_vdevs = __cpu_to_le32(0);
|
||||
cfg->num_ocb_channels = __cpu_to_le32(0);
|
||||
cfg->num_ocb_schedules = __cpu_to_le32(0);
|
||||
cfg->host_capab = __cpu_to_le32(0);
|
||||
|
||||
ath10k_wmi_put_host_mem_chunks(ar, chunks);
|
||||
|
||||
|
|
|
@ -1209,6 +1209,11 @@ struct wmi_tlv_resource_config {
|
|||
__le32 keep_alive_pattern_size;
|
||||
__le32 max_tdls_concurrent_sleep_sta;
|
||||
__le32 max_tdls_concurrent_buffer_sta;
|
||||
__le32 wmi_send_separate;
|
||||
__le32 num_ocb_vdevs;
|
||||
__le32 num_ocb_channels;
|
||||
__le32 num_ocb_schedules;
|
||||
__le32 host_capab;
|
||||
} __packed;
|
||||
|
||||
struct wmi_tlv_init_cmd {
|
||||
|
|
|
@ -2045,6 +2045,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|||
|
||||
if (twa_initialize_device_extension(tw_dev)) {
|
||||
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
|
||||
retval = -ENOMEM;
|
||||
goto out_free_device_extension;
|
||||
}
|
||||
|
||||
|
@ -2067,6 +2068,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|||
tw_dev->base_addr = ioremap(mem_addr, mem_len);
|
||||
if (!tw_dev->base_addr) {
|
||||
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
|
||||
retval = -ENOMEM;
|
||||
goto out_release_mem_region;
|
||||
}
|
||||
|
||||
|
@ -2074,8 +2076,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|||
TW_DISABLE_INTERRUPTS(tw_dev);
|
||||
|
||||
/* Initialize the card */
|
||||
if (twa_reset_sequence(tw_dev, 0))
|
||||
if (twa_reset_sequence(tw_dev, 0)) {
|
||||
retval = -ENOMEM;
|
||||
goto out_iounmap;
|
||||
}
|
||||
|
||||
/* Set host specific parameters */
|
||||
if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
|
||||
|
|
|
@ -1600,6 +1600,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|||
|
||||
if (twl_initialize_device_extension(tw_dev)) {
|
||||
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
|
||||
retval = -ENOMEM;
|
||||
goto out_free_device_extension;
|
||||
}
|
||||
|
||||
|
@ -1614,6 +1615,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|||
tw_dev->base_addr = pci_iomap(pdev, 1, 0);
|
||||
if (!tw_dev->base_addr) {
|
||||
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
|
||||
retval = -ENOMEM;
|
||||
goto out_release_mem_region;
|
||||
}
|
||||
|
||||
|
@ -1623,6 +1625,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|||
/* Initialize the card */
|
||||
if (twl_reset_sequence(tw_dev, 0)) {
|
||||
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
|
||||
retval = -ENOMEM;
|
||||
goto out_iounmap;
|
||||
}
|
||||
|
||||
|
|
|
@ -2278,6 +2278,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|||
|
||||
if (tw_initialize_device_extension(tw_dev)) {
|
||||
printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension.");
|
||||
retval = -ENOMEM;
|
||||
goto out_free_device_extension;
|
||||
}
|
||||
|
||||
|
@ -2292,6 +2293,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|||
tw_dev->base_addr = pci_resource_start(pdev, 0);
|
||||
if (!tw_dev->base_addr) {
|
||||
printk(KERN_WARNING "3w-xxxx: Failed to get io address.");
|
||||
retval = -ENOMEM;
|
||||
goto out_release_mem_region;
|
||||
}
|
||||
|
||||
|
|
|
@ -536,7 +536,7 @@ static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
|
|||
|
||||
if (sendbytes > 8) {
|
||||
memcpy(buf, inquiry_buf, 8);
|
||||
memcpy(buf + 8, inquiry_string, sendbytes - 8);
|
||||
strncpy(buf + 8, inquiry_string, sendbytes - 8);
|
||||
if (pro_formatter_flag) {
|
||||
/* Additional Length */
|
||||
buf[4] = 0x33;
|
||||
|
|
|
@ -1252,7 +1252,7 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
|
|||
reg = 0;
|
||||
rtsx_read_register(chip, XD_CTL, ®);
|
||||
if (reg & (XD_ECC1_ERROR | XD_ECC2_ERROR)) {
|
||||
wait_timeout(100);
|
||||
mdelay(100);
|
||||
|
||||
if (detect_card_cd(chip,
|
||||
XD_CARD) != STATUS_SUCCESS) {
|
||||
|
|
|
@ -306,6 +306,7 @@ void __transport_register_session(
|
|||
{
|
||||
const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
|
||||
unsigned char buf[PR_REG_ISID_LEN];
|
||||
unsigned long flags;
|
||||
|
||||
se_sess->se_tpg = se_tpg;
|
||||
se_sess->fabric_sess_ptr = fabric_sess_ptr;
|
||||
|
@ -342,7 +343,7 @@ void __transport_register_session(
|
|||
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
|
||||
}
|
||||
|
||||
spin_lock_irq(&se_nacl->nacl_sess_lock);
|
||||
spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
|
||||
/*
|
||||
* The se_nacl->nacl_sess pointer will be set to the
|
||||
* last active I_T Nexus for each struct se_node_acl.
|
||||
|
@ -351,7 +352,7 @@ void __transport_register_session(
|
|||
|
||||
list_add_tail(&se_sess->sess_acl_list,
|
||||
&se_nacl->acl_sess_list);
|
||||
spin_unlock_irq(&se_nacl->nacl_sess_lock);
|
||||
spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
|
||||
}
|
||||
list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
|
||||
|
||||
|
|
|
@ -1915,7 +1915,7 @@ static __init int register_PCI(int i, struct pci_dev *dev)
|
|||
ByteIO_t UPCIRingInd = 0;
|
||||
|
||||
if (!dev || !pci_match_id(rocket_pci_ids, dev) ||
|
||||
pci_enable_device(dev))
|
||||
pci_enable_device(dev) || i >= NUM_BOARDS)
|
||||
return 0;
|
||||
|
||||
rcktpt_io_addr[i] = pci_resource_start(dev, 0);
|
||||
|
|
|
@ -833,8 +833,6 @@ int __uio_register_device(struct module *owner,
|
|||
if (ret)
|
||||
goto err_uio_dev_add_attributes;
|
||||
|
||||
info->uio_dev = idev;
|
||||
|
||||
if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
|
||||
/*
|
||||
* Note that we deliberately don't use devm_request_irq
|
||||
|
@ -850,6 +848,7 @@ int __uio_register_device(struct module *owner,
|
|||
goto err_request_irq;
|
||||
}
|
||||
|
||||
info->uio_dev = idev;
|
||||
return 0;
|
||||
|
||||
err_request_irq:
|
||||
|
|
|
@ -3675,6 +3675,9 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
|
|||
}
|
||||
|
||||
spin_lock_irqsave(&xhci->lock, flags);
|
||||
|
||||
virt_dev->udev = NULL;
|
||||
|
||||
/* Don't disable the slot if the host controller is dead. */
|
||||
state = readl(&xhci->op_regs->status);
|
||||
if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/magic.h>
|
||||
|
||||
/* This is the range of ioctl() numbers we claim as ours */
|
||||
#define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
|
||||
|
@ -135,7 +136,8 @@ struct autofs_sb_info {
|
|||
|
||||
static inline struct autofs_sb_info *autofs4_sbi(struct super_block *sb)
|
||||
{
|
||||
return (struct autofs_sb_info *)(sb->s_fs_info);
|
||||
return sb->s_magic != AUTOFS_SUPER_MAGIC ?
|
||||
NULL : (struct autofs_sb_info *)(sb->s_fs_info);
|
||||
}
|
||||
|
||||
static inline struct autofs_info *autofs4_dentry_ino(struct dentry *dentry)
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#include <linux/pagemap.h>
|
||||
#include <linux/parser.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/magic.h>
|
||||
#include "autofs_i.h"
|
||||
#include <linux/module.h>
|
||||
|
||||
|
|
|
@ -448,6 +448,8 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
|
|||
if (test_and_clear_bit(segno, free_i->free_segmap)) {
|
||||
free_i->free_segments++;
|
||||
|
||||
if (IS_CURSEC(sbi, secno))
|
||||
goto skip_free;
|
||||
next = find_next_bit(free_i->free_segmap,
|
||||
start_segno + sbi->segs_per_sec, start_segno);
|
||||
if (next >= start_segno + sbi->segs_per_sec) {
|
||||
|
@ -455,6 +457,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
|
|||
free_i->free_sections++;
|
||||
}
|
||||
}
|
||||
skip_free:
|
||||
spin_unlock(&free_i->segmap_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -2286,12 +2286,17 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
|||
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
|
||||
unsigned int ovp_segments, reserved_segments;
|
||||
unsigned int main_segs, blocks_per_seg;
|
||||
unsigned int sit_segs, nat_segs;
|
||||
unsigned int sit_bitmap_size, nat_bitmap_size;
|
||||
unsigned int log_blocks_per_seg;
|
||||
int i;
|
||||
|
||||
total = le32_to_cpu(raw_super->segment_count);
|
||||
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
|
||||
fsmeta += le32_to_cpu(raw_super->segment_count_sit);
|
||||
fsmeta += le32_to_cpu(raw_super->segment_count_nat);
|
||||
sit_segs = le32_to_cpu(raw_super->segment_count_sit);
|
||||
fsmeta += sit_segs;
|
||||
nat_segs = le32_to_cpu(raw_super->segment_count_nat);
|
||||
fsmeta += nat_segs;
|
||||
fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
|
||||
fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
|
||||
|
||||
|
@ -2322,6 +2327,18 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
|||
return 1;
|
||||
}
|
||||
|
||||
sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
|
||||
nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
|
||||
log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
|
||||
|
||||
if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
|
||||
nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
|
||||
f2fs_msg(sbi->sb, KERN_ERR,
|
||||
"Wrong bitmap size: sit: %u, nat:%u",
|
||||
sit_bitmap_size, nat_bitmap_size);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (unlikely(f2fs_cp_error(sbi))) {
|
||||
f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
|
||||
return 1;
|
||||
|
|
|
@ -911,16 +911,21 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
|
|||
|
||||
if (hdr_arg.minorversion == 0) {
|
||||
cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
|
||||
if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
|
||||
if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) {
|
||||
if (cps.clp)
|
||||
nfs_put_client(cps.clp);
|
||||
goto out_invalidcred;
|
||||
}
|
||||
}
|
||||
|
||||
cps.minorversion = hdr_arg.minorversion;
|
||||
hdr_res.taglen = hdr_arg.taglen;
|
||||
hdr_res.tag = hdr_arg.tag;
|
||||
if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
|
||||
if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) {
|
||||
if (cps.clp)
|
||||
nfs_put_client(cps.clp);
|
||||
return rpc_system_err;
|
||||
|
||||
}
|
||||
while (status == 0 && nops != hdr_arg.nops) {
|
||||
status = process_op(nops, rqstp, &xdr_in,
|
||||
argp, &xdr_out, resp, &cps);
|
||||
|
|
|
@ -400,7 +400,7 @@ struct kioctx_table;
|
|||
struct mm_struct {
|
||||
struct vm_area_struct *mmap; /* list of VMAs */
|
||||
struct rb_root mm_rb;
|
||||
u32 vmacache_seqnum; /* per-thread vmacache */
|
||||
u64 vmacache_seqnum; /* per-thread vmacache */
|
||||
#ifdef CONFIG_MMU
|
||||
unsigned long (*get_unmapped_area) (struct file *filp,
|
||||
unsigned long addr, unsigned long len,
|
||||
|
|
|
@ -1621,7 +1621,7 @@ struct task_struct {
|
|||
|
||||
struct mm_struct *mm, *active_mm;
|
||||
/* per-thread vma caching */
|
||||
u32 vmacache_seqnum;
|
||||
u64 vmacache_seqnum;
|
||||
struct vm_area_struct *vmacache[VMACACHE_SIZE];
|
||||
#if defined(SPLIT_RSS_COUNTING)
|
||||
struct task_rss_stat rss_stat;
|
||||
|
|
|
@ -88,7 +88,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
|
|||
#ifdef CONFIG_DEBUG_VM_VMACACHE
|
||||
VMACACHE_FIND_CALLS,
|
||||
VMACACHE_FIND_HITS,
|
||||
VMACACHE_FULL_FLUSHES,
|
||||
#endif
|
||||
NR_VM_EVENT_ITEMS
|
||||
};
|
||||
|
|
|
@ -15,7 +15,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
|
|||
memset(tsk->vmacache, 0, sizeof(tsk->vmacache));
|
||||
}
|
||||
|
||||
extern void vmacache_flush_all(struct mm_struct *mm);
|
||||
extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
|
||||
extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
|
||||
unsigned long addr);
|
||||
|
@ -29,10 +28,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
|
|||
static inline void vmacache_invalidate(struct mm_struct *mm)
|
||||
{
|
||||
mm->vmacache_seqnum++;
|
||||
|
||||
/* deal with overflows */
|
||||
if (unlikely(mm->vmacache_seqnum == 0))
|
||||
vmacache_flush_all(mm);
|
||||
}
|
||||
|
||||
#endif /* __LINUX_VMACACHE_H */
|
||||
|
|
|
@ -819,13 +819,13 @@ struct ethtool_rx_flow_spec {
|
|||
static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
|
||||
{
|
||||
return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
|
||||
};
|
||||
}
|
||||
|
||||
static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
|
||||
{
|
||||
return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
|
||||
ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* struct ethtool_rxnfc - command to get or set RX flow classification rules
|
||||
|
|
|
@ -1366,6 +1366,18 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
|
||||
cpufreq_task_times_init(p);
|
||||
|
||||
/*
|
||||
* This _must_ happen before we call free_task(), i.e. before we jump
|
||||
* to any of the bad_fork_* labels. This is to avoid freeing
|
||||
* p->set_child_tid which is (ab)used as a kthread's data pointer for
|
||||
* kernel threads (PF_KTHREAD).
|
||||
*/
|
||||
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
|
||||
/*
|
||||
* Clear TID on mm_release()?
|
||||
*/
|
||||
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
|
||||
|
||||
ftrace_graph_init_task(p);
|
||||
|
||||
rt_mutex_init_task(p);
|
||||
|
@ -1527,11 +1539,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
}
|
||||
}
|
||||
|
||||
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
|
||||
/*
|
||||
* Clear TID on mm_release()?
|
||||
*/
|
||||
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
|
||||
#ifdef CONFIG_BLOCK
|
||||
p->plug = NULL;
|
||||
#endif
|
||||
|
|
|
@ -104,6 +104,19 @@ bool osq_lock(struct optimistic_spin_queue *lock)
|
|||
|
||||
prev = decode_cpu(old);
|
||||
node->prev = prev;
|
||||
|
||||
/*
|
||||
* osq_lock() unqueue
|
||||
*
|
||||
* node->prev = prev osq_wait_next()
|
||||
* WMB MB
|
||||
* prev->next = node next->prev = prev // unqueue-C
|
||||
*
|
||||
* Here 'node->prev' and 'next->prev' are the same variable and we need
|
||||
* to ensure these stores happen in-order to avoid corrupting the list.
|
||||
*/
|
||||
smp_wmb();
|
||||
|
||||
WRITE_ONCE(prev->next, node);
|
||||
|
||||
/*
|
||||
|
|
|
@ -510,6 +510,33 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* __rwsem_down_write_failed_common(sem)
|
||||
* rwsem_optimistic_spin(sem)
|
||||
* osq_unlock(sem->osq)
|
||||
* ...
|
||||
* atomic_long_add_return(&sem->count)
|
||||
*
|
||||
* - VS -
|
||||
*
|
||||
* __up_write()
|
||||
* if (atomic_long_sub_return_release(&sem->count) < 0)
|
||||
* rwsem_wake(sem)
|
||||
* osq_is_locked(&sem->osq)
|
||||
*
|
||||
* And __up_write() must observe !osq_is_locked() when it observes the
|
||||
* atomic_long_add_return() in order to not miss a wakeup.
|
||||
*
|
||||
* This boils down to:
|
||||
*
|
||||
* [S.rel] X = 1 [RmW] r0 = (Y += 0)
|
||||
* MB RMB
|
||||
* [RmW] Y += 1 [L] r1 = X
|
||||
*
|
||||
* exists (r0=1 /\ r1=0)
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
/*
|
||||
* If a spinner is present, it is not necessary to do the wakeup.
|
||||
* Try to do wakeup only if the trylock succeeds to minimize
|
||||
|
|
|
@ -168,7 +168,7 @@ EXPORT_SYMBOL(dump_vma);
|
|||
|
||||
void dump_mm(const struct mm_struct *mm)
|
||||
{
|
||||
pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
|
||||
pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n"
|
||||
#ifdef CONFIG_MMU
|
||||
"get_unmapped_area %p\n"
|
||||
#endif
|
||||
|
@ -198,7 +198,7 @@ void dump_mm(const struct mm_struct *mm)
|
|||
#endif
|
||||
"%s", /* This is here to hold the comma */
|
||||
|
||||
mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
|
||||
mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
|
||||
#ifdef CONFIG_MMU
|
||||
mm->get_unmapped_area,
|
||||
#endif
|
||||
|
|
|
@ -5,44 +5,6 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/vmacache.h>
|
||||
|
||||
/*
|
||||
* Flush vma caches for threads that share a given mm.
|
||||
*
|
||||
* The operation is safe because the caller holds the mmap_sem
|
||||
* exclusively and other threads accessing the vma cache will
|
||||
* have mmap_sem held at least for read, so no extra locking
|
||||
* is required to maintain the vma cache.
|
||||
*/
|
||||
void vmacache_flush_all(struct mm_struct *mm)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
|
||||
count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
|
||||
|
||||
/*
|
||||
* Single threaded tasks need not iterate the entire
|
||||
* list of process. We can avoid the flushing as well
|
||||
* since the mm's seqnum was increased and don't have
|
||||
* to worry about other threads' seqnum. Current's
|
||||
* flush will occur upon the next lookup.
|
||||
*/
|
||||
if (atomic_read(&mm->mm_users) == 1)
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_process_thread(g, p) {
|
||||
/*
|
||||
* Only flush the vmacache pointers as the
|
||||
* mm seqnum is already set and curr's will
|
||||
* be set upon invalidation when the next
|
||||
* lookup is done.
|
||||
*/
|
||||
if (mm == p->mm)
|
||||
vmacache_flush(p);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* This task may be accessing a foreign mm via (for example)
|
||||
* get_user_pages()->find_vma(). The vmacache is task-local and this
|
||||
|
|
|
@ -774,7 +774,7 @@ static int hidp_setup_hid(struct hidp_session *session,
|
|||
hid->version = req->version;
|
||||
hid->country = req->country;
|
||||
|
||||
strncpy(hid->name, req->name, sizeof(req->name) - 1);
|
||||
strncpy(hid->name, req->name, sizeof(hid->name));
|
||||
|
||||
snprintf(hid->phys, sizeof(hid->phys), "%pMR",
|
||||
&l2cap_pi(session->ctrl_sock->sk)->chan->src);
|
||||
|
|
|
@ -1763,7 +1763,7 @@ static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
|
|||
if (itr->app.selector == app->selector &&
|
||||
itr->app.protocol == app->protocol &&
|
||||
itr->ifindex == ifindex &&
|
||||
(!prio || itr->app.priority == prio))
|
||||
((prio == -1) || itr->app.priority == prio))
|
||||
return itr;
|
||||
}
|
||||
|
||||
|
@ -1798,7 +1798,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
|
|||
u8 prio = 0;
|
||||
|
||||
spin_lock_bh(&dcb_lock);
|
||||
if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
|
||||
itr = dcb_app_lookup(app, dev->ifindex, -1);
|
||||
if (itr)
|
||||
prio = itr->app.priority;
|
||||
spin_unlock_bh(&dcb_lock);
|
||||
|
||||
|
@ -1826,7 +1827,8 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
|
|||
|
||||
spin_lock_bh(&dcb_lock);
|
||||
/* Search for existing match and replace */
|
||||
if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
|
||||
itr = dcb_app_lookup(new, dev->ifindex, -1);
|
||||
if (itr) {
|
||||
if (new->priority)
|
||||
itr->app.priority = new->priority;
|
||||
else {
|
||||
|
@ -1859,7 +1861,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
|
|||
u8 prio = 0;
|
||||
|
||||
spin_lock_bh(&dcb_lock);
|
||||
if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
|
||||
itr = dcb_app_lookup(app, dev->ifindex, -1);
|
||||
if (itr)
|
||||
prio |= 1 << itr->app.priority;
|
||||
spin_unlock_bh(&dcb_lock);
|
||||
|
||||
|
|
|
@ -876,7 +876,7 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
|
|||
if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
strlcpy(info->name, compat_tmp.name, sizeof(info->name));
|
||||
memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
|
||||
info->num_counters = compat_tmp.num_counters;
|
||||
user += sizeof(compat_tmp);
|
||||
} else
|
||||
|
@ -889,9 +889,9 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
|
|||
if (copy_from_user(info, user, sizeof(*info)) != 0)
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
info->name[sizeof(info->name) - 1] = '\0';
|
||||
user += sizeof(*info);
|
||||
}
|
||||
info->name[sizeof(info->name) - 1] = '\0';
|
||||
|
||||
size = sizeof(struct xt_counters);
|
||||
size *= info->num_counters;
|
||||
|
|
|
@ -348,27 +348,26 @@ static struct avc_xperms_decision_node
|
|||
struct avc_xperms_decision_node *xpd_node;
|
||||
struct extended_perms_decision *xpd;
|
||||
|
||||
xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
|
||||
GFP_ATOMIC | __GFP_NOMEMALLOC);
|
||||
xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT);
|
||||
if (!xpd_node)
|
||||
return NULL;
|
||||
|
||||
xpd = &xpd_node->xpd;
|
||||
if (which & XPERMS_ALLOWED) {
|
||||
xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
|
||||
GFP_ATOMIC | __GFP_NOMEMALLOC);
|
||||
GFP_NOWAIT);
|
||||
if (!xpd->allowed)
|
||||
goto error;
|
||||
}
|
||||
if (which & XPERMS_AUDITALLOW) {
|
||||
xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
|
||||
GFP_ATOMIC | __GFP_NOMEMALLOC);
|
||||
GFP_NOWAIT);
|
||||
if (!xpd->auditallow)
|
||||
goto error;
|
||||
}
|
||||
if (which & XPERMS_DONTAUDIT) {
|
||||
xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
|
||||
GFP_ATOMIC | __GFP_NOMEMALLOC);
|
||||
GFP_NOWAIT);
|
||||
if (!xpd->dontaudit)
|
||||
goto error;
|
||||
}
|
||||
|
@ -396,8 +395,7 @@ static struct avc_xperms_node *avc_xperms_alloc(void)
|
|||
{
|
||||
struct avc_xperms_node *xp_node;
|
||||
|
||||
xp_node = kmem_cache_zalloc(avc_xperms_cachep,
|
||||
GFP_ATOMIC|__GFP_NOMEMALLOC);
|
||||
xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT);
|
||||
if (!xp_node)
|
||||
return xp_node;
|
||||
INIT_LIST_HEAD(&xp_node->xpd_head);
|
||||
|
@ -550,7 +548,7 @@ static struct avc_node *avc_alloc_node(void)
|
|||
{
|
||||
struct avc_node *node;
|
||||
|
||||
node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC);
|
||||
node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT);
|
||||
if (!node)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -4019,7 +4019,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus)
|
|||
|
||||
list_for_each_codec(codec, bus) {
|
||||
/* FIXME: maybe a better way needed for forced reset */
|
||||
cancel_delayed_work_sync(&codec->jackpoll_work);
|
||||
if (current_work() != &codec->jackpoll_work.work)
|
||||
cancel_delayed_work_sync(&codec->jackpoll_work);
|
||||
#ifdef CONFIG_PM
|
||||
if (hda_codec_is_power_on(codec)) {
|
||||
hda_call_codec_suspend(codec);
|
||||
|
|
|
@ -29,7 +29,9 @@ static inline unsigned long long rdclock(void)
|
|||
return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
|
||||
}
|
||||
|
||||
#ifndef MAX_NR_CPUS
|
||||
#define MAX_NR_CPUS 1024
|
||||
#endif
|
||||
|
||||
extern const char *input_name;
|
||||
extern bool perf_host, perf_guest;
|
||||
|
|
Loading…
Add table
Reference in a new issue