Merge remote-tracking branch 'lts/linux-4.4.y' into linux-linaro-lsk-v4.4

This commit is contained in:
Alex Shi 2016-10-05 14:53:08 +02:00
commit be2a76aecf
84 changed files with 625 additions and 512 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 22
SUBLEVEL = 23
EXTRAVERSION =
NAME = Blurry Fish Butt
@ -128,6 +128,10 @@ _all:
# Cancel implicit rules on top Makefile
$(CURDIR)/Makefile Makefile: ;
ifneq ($(words $(subst :, ,$(CURDIR))), 1)
$(error main directory cannot contain spaces nor colons)
endif
ifneq ($(KBUILD_OUTPUT),)
# Invoke a second make in the output directory, passing relevant variables
# check that the output directory actually exists
@ -495,6 +499,12 @@ ifeq ($(KBUILD_EXTMOD),)
endif
endif
endif
# install and module_install need also be processed one by one
ifneq ($(filter install,$(MAKECMDGOALS)),)
ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
mixed-targets := 1
endif
endif
ifeq ($(mixed-targets),1)
# ===========================================================================
@ -606,11 +616,16 @@ ARCH_CFLAGS :=
include arch/$(SRCARCH)/Makefile
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
KBUILD_CFLAGS += -Os
else
ifdef CONFIG_PROFILE_ALL_BRANCHES
KBUILD_CFLAGS += -O2
else
KBUILD_CFLAGS += -O2
endif
endif
# Tell gcc to never replace conditional load with a non-conditional one
@ -1260,7 +1275,7 @@ help:
@echo ' firmware_install- Install all firmware to INSTALL_FW_PATH'
@echo ' (default: $$(INSTALL_MOD_PATH)/lib/firmware)'
@echo ' dir/ - Build all files in dir and below'
@echo ' dir/file.[oisS] - Build specified target only'
@echo ' dir/file.[ois] - Build specified target only'
@echo ' dir/file.lst - Build specified mixed source/assembly target only'
@echo ' (requires a recent binutils and recent build (System.map))'
@echo ' dir/file.ko - Build module including final link'
@ -1500,11 +1515,11 @@ image_name:
# Clear a bunch of variables before executing the submake
tools/: FORCE
$(Q)mkdir -p $(objtree)/tools
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(O) subdir=tools -C $(src)/tools/
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/
tools/%: FORCE
$(Q)mkdir -p $(objtree)/tools
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(O) subdir=tools -C $(src)/tools/ $*
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/ $*
# Single targets
# ---------------------------------------------------------------------------

View file

@ -279,7 +279,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
err = blkcipher_walk_done(desc, &walk,
walk.nbytes % AES_BLOCK_SIZE);
}
if (nbytes) {
if (walk.nbytes % AES_BLOCK_SIZE) {
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
u8 __aligned(8) tail[AES_BLOCK_SIZE];

View file

@ -83,7 +83,8 @@ static struct resource smc91x_resources[] = {
};
static struct smc91x_platdata smc91x_platdata = {
.flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_USE_DMA | SMC91X_NOWAIT,
};
static struct platform_device smc91x_device = {

View file

@ -120,7 +120,8 @@ static struct resource smc91x_resources[] = {
};
static struct smc91x_platdata xcep_smc91x_info = {
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA,
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_NOWAIT | SMC91X_USE_DMA,
};
static struct platform_device smc91x_device = {

View file

@ -95,7 +95,8 @@ static struct smsc911x_platform_config smsc911x_config = {
};
static struct smc91x_platdata smc91x_platdata = {
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_NOWAIT,
};
static struct platform_device realview_eth_device = {

View file

@ -45,7 +45,7 @@ static struct resource smc91x_resources[] = {
};
static struct smc91x_platdata smc91x_platdata = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
.flags = SMC91X_USE_16BIT | SMC91X_USE_8BIT | SMC91X_NOWAIT,
};
static struct platform_device smc91x_device = {

View file

@ -211,7 +211,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
err = blkcipher_walk_done(desc, &walk,
walk.nbytes % AES_BLOCK_SIZE);
}
if (nbytes) {
if (walk.nbytes % AES_BLOCK_SIZE) {
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
u8 __aligned(8) tail[AES_BLOCK_SIZE];

View file

@ -146,7 +146,8 @@ static struct platform_device hitachi_fb_device = {
#include <linux/smc91x.h>
static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_NOWAIT,
.leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX,
};

View file

@ -134,7 +134,8 @@ static struct platform_device net2272_bfin_device = {
#include <linux/smc91x.h>
static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_NOWAIT,
.leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX,
};

View file

@ -113,42 +113,6 @@ config SPINLOCK_TEST
help
Add several files to the debugfs to test spinlock speed.
if CPU_MIPSR6
choice
prompt "Compact branch policy"
default MIPS_COMPACT_BRANCHES_OPTIMAL
config MIPS_COMPACT_BRANCHES_NEVER
bool "Never (force delay slot branches)"
help
Pass the -mcompact-branches=never flag to the compiler in order to
force it to always emit branches with delay slots, and make no use
of the compact branch instructions introduced by MIPSr6. This is
useful if you suspect there may be an issue with compact branches in
either the compiler or the CPU.
config MIPS_COMPACT_BRANCHES_OPTIMAL
bool "Optimal (use where beneficial)"
help
Pass the -mcompact-branches=optimal flag to the compiler in order for
it to make use of compact branch instructions where it deems them
beneficial, and use branches with delay slots elsewhere. This is the
default compiler behaviour, and should be used unless you have a
reason to choose otherwise.
config MIPS_COMPACT_BRANCHES_ALWAYS
bool "Always (force compact branches)"
help
Pass the -mcompact-branches=always flag to the compiler in order to
force it to always emit compact branches, making no use of branch
instructions with delay slots. This can result in more compact code
which may be beneficial in some scenarios.
endchoice
endif # CPU_MIPSR6
config SCACHE_DEBUGFS
bool "L2 cache debugfs entries"
depends on DEBUG_FS

View file

@ -204,10 +204,6 @@ toolchain-msa := $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$(
cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
endif
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER) += -mcompact-branches=never
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS) += -mcompact-branches=always
#
# Firmware support
#

View file

@ -135,6 +135,7 @@
ldc1 $f28, THREAD_FPR28(\thread)
ldc1 $f30, THREAD_FPR30(\thread)
ctc1 \tmp, fcr31
.set pop
.endm
.macro fpu_restore_16odd thread

View file

@ -11,11 +11,13 @@
#define CP0_EBASE $15, 1
.macro kernel_entry_setup
#ifdef CONFIG_SMP
mfc0 t0, CP0_EBASE
andi t0, t0, 0x3ff # CPUNum
beqz t0, 1f
# CPUs other than zero goto smp_bootstrap
j smp_bootstrap
#endif /* CONFIG_SMP */
1:
.endm

View file

@ -1164,7 +1164,9 @@ fpu_emul:
regs->regs[31] = r31;
regs->cp0_epc = epc;
if (!used_math()) { /* First time FPU user. */
preempt_disable();
err = init_fpu();
preempt_enable();
set_used_math();
}
lose_fpu(1); /* Save FPU state for the emulator. */

View file

@ -593,14 +593,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
return -EOPNOTSUPP;
/* Avoid inadvertently triggering emulation */
if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
!(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
return -EOPNOTSUPP;
if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
return -EOPNOTSUPP;
/* FR = 0 not supported in MIPS R6 */
if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
return -EOPNOTSUPP;
/* Proceed with the mode switch */

View file

@ -174,6 +174,9 @@ asmlinkage void start_secondary(void)
cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu);
cpumask_set_cpu(cpu, &cpu_callin_map);
synchronise_count_slave(cpu);
set_cpu_online(cpu, true);
set_cpu_sibling_map(cpu);
@ -181,10 +184,6 @@ asmlinkage void start_secondary(void)
calculate_cpu_foreign_map();
cpumask_set_cpu(cpu, &cpu_callin_map);
synchronise_count_slave(cpu);
/*
* irq will be enabled in ->smp_finish(), enabling it too early
* is dangerous.

View file

@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = {
static void __init init_vdso_image(struct mips_vdso_image *image)
{
unsigned long num_pages, i;
unsigned long data_pfn;
BUG_ON(!PAGE_ALIGNED(image->data));
BUG_ON(!PAGE_ALIGNED(image->size));
num_pages = image->size / PAGE_SIZE;
for (i = 0; i < num_pages; i++) {
image->mapping.pages[i] =
virt_to_page(image->data + (i * PAGE_SIZE));
}
data_pfn = __phys_to_pfn(__pa_symbol(image->data));
for (i = 0; i < num_pages; i++)
image->mapping.pages[i] = pfn_to_page(data_pfn + i);
}
static int __init init_vdso(void)

View file

@ -234,6 +234,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
return blkcipher_walk_done(desc, walk, -EINVAL);
}
bsize = min(walk->walk_blocksize, n);
walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
BLKCIPHER_WALK_DIFF);
if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
@ -246,7 +248,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
}
}
bsize = min(walk->walk_blocksize, n);
n = scatterwalk_clamp(&walk->in, n);
n = scatterwalk_clamp(&walk->out, n);

View file

@ -1,8 +1,8 @@
/*
* echainiv: Encrypted Chain IV Generator
*
* This generator generates an IV based on a sequence number by xoring it
* with a salt and then encrypting it with the same key as used to encrypt
* This generator generates an IV based on a sequence number by multiplying
* it with a salt and then encrypting it with the same key as used to encrypt
* the plain text. This algorithm requires that the block size be equal
* to the IV size. It is mainly useful for CBC.
*
@ -23,81 +23,17 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/string.h>
#define MAX_IV_SIZE 16
static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
/* We don't care if we get preempted and read/write IVs from the next CPU. */
static void echainiv_read_iv(u8 *dst, unsigned size)
{
u32 *a = (u32 *)dst;
u32 __percpu *b = echainiv_iv;
for (; size >= 4; size -= 4) {
*a++ = this_cpu_read(*b);
b++;
}
}
static void echainiv_write_iv(const u8 *src, unsigned size)
{
const u32 *a = (const u32 *)src;
u32 __percpu *b = echainiv_iv;
for (; size >= 4; size -= 4) {
this_cpu_write(*b, *a);
a++;
b++;
}
}
static void echainiv_encrypt_complete2(struct aead_request *req, int err)
{
struct aead_request *subreq = aead_request_ctx(req);
struct crypto_aead *geniv;
unsigned int ivsize;
if (err == -EINPROGRESS)
return;
if (err)
goto out;
geniv = crypto_aead_reqtfm(req);
ivsize = crypto_aead_ivsize(geniv);
echainiv_write_iv(subreq->iv, ivsize);
if (req->iv != subreq->iv)
memcpy(req->iv, subreq->iv, ivsize);
out:
if (req->iv != subreq->iv)
kzfree(subreq->iv);
}
static void echainiv_encrypt_complete(struct crypto_async_request *base,
int err)
{
struct aead_request *req = base->data;
echainiv_encrypt_complete2(req, err);
aead_request_complete(req, err);
}
static int echainiv_encrypt(struct aead_request *req)
{
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
struct aead_request *subreq = aead_request_ctx(req);
crypto_completion_t compl;
void *data;
__be64 nseqno;
u64 seqno;
u8 *info;
unsigned int ivsize = crypto_aead_ivsize(geniv);
int err;
@ -107,8 +43,6 @@ static int echainiv_encrypt(struct aead_request *req)
aead_request_set_tfm(subreq, ctx->child);
compl = echainiv_encrypt_complete;
data = req;
info = req->iv;
if (req->src != req->dst) {
@ -123,29 +57,30 @@ static int echainiv_encrypt(struct aead_request *req)
return err;
}
if (unlikely(!IS_ALIGNED((unsigned long)info,
crypto_aead_alignmask(geniv) + 1))) {
info = kmalloc(ivsize, req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
GFP_ATOMIC);
if (!info)
return -ENOMEM;
memcpy(info, req->iv, ivsize);
}
aead_request_set_callback(subreq, req->base.flags, compl, data);
aead_request_set_callback(subreq, req->base.flags,
req->base.complete, req->base.data);
aead_request_set_crypt(subreq, req->dst, req->dst,
req->cryptlen, info);
aead_request_set_ad(subreq, req->assoclen);
crypto_xor(info, ctx->salt, ivsize);
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
echainiv_read_iv(info, ivsize);
memcpy(&nseqno, info + ivsize - 8, 8);
seqno = be64_to_cpu(nseqno);
memset(info, 0, ivsize);
err = crypto_aead_encrypt(subreq);
echainiv_encrypt_complete2(req, err);
return err;
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
do {
u64 a;
memcpy(&a, ctx->salt + ivsize - 8, 8);
a |= 1;
a *= seqno;
memcpy(info + ivsize - 8, &a, 8);
} while ((ivsize -= 8));
return crypto_aead_encrypt(subreq);
}
static int echainiv_decrypt(struct aead_request *req)
@ -192,8 +127,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
alg = crypto_spawn_aead_alg(spawn);
err = -EINVAL;
if (inst->alg.ivsize & (sizeof(u32) - 1) ||
inst->alg.ivsize > MAX_IV_SIZE)
if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
goto free_inst;
inst->alg.encrypt = echainiv_encrypt;
@ -202,7 +136,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
inst->alg.init = aead_init_geniv;
inst->alg.exit = aead_exit_geniv;
inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
inst->alg.base.cra_ctxsize += inst->alg.ivsize;

View file

@ -59,9 +59,11 @@ static void
nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
{
struct nvkm_device *device = pm->engine.subdev.device;
if (pm->sequence != pm->sequence) {
struct nv40_pm *nv40pm = container_of(pm, struct nv40_pm, base);
if (nv40pm->sequence != pm->sequence) {
nvkm_wr32(device, 0x400084, 0x00000020);
pm->sequence = pm->sequence;
nv40pm->sequence = pm->sequence;
}
}

View file

@ -136,6 +136,8 @@ static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
* correctly globaly, since that would require
* tracking all of our palettes. */
ret = qxl_bo_kmap(palette_bo, (void **)&pal);
if (ret)
return ret;
pal->num_ents = 2;
pal->unique = unique++;
if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {

View file

@ -773,13 +773,6 @@ static int pch_i2c_probe(struct pci_dev *pdev,
/* Set the number of I2C channel instance */
adap_info->ch_num = id->driver_data;
ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
KBUILD_MODNAME, adap_info);
if (ret) {
pch_pci_err(pdev, "request_irq FAILED\n");
goto err_request_irq;
}
for (i = 0; i < adap_info->ch_num; i++) {
pch_adap = &adap_info->pch_data[i].pch_adapter;
adap_info->pch_i2c_suspended = false;
@ -796,6 +789,17 @@ static int pch_i2c_probe(struct pci_dev *pdev,
adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i;
pch_adap->dev.parent = &pdev->dev;
}
ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
KBUILD_MODNAME, adap_info);
if (ret) {
pch_pci_err(pdev, "request_irq FAILED\n");
goto err_request_irq;
}
for (i = 0; i < adap_info->ch_num; i++) {
pch_adap = &adap_info->pch_data[i].pch_adapter;
pch_i2c_init(&adap_info->pch_data[i]);

View file

@ -727,7 +727,8 @@ static int qup_i2c_pm_resume_runtime(struct device *device)
#ifdef CONFIG_PM_SLEEP
static int qup_i2c_suspend(struct device *device)
{
qup_i2c_pm_suspend_runtime(device);
if (!pm_runtime_suspended(device))
return qup_i2c_pm_suspend_runtime(device);
return 0;
}

View file

@ -433,16 +433,15 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
scale_db = true;
case IIO_VAL_INT_PLUS_MICRO:
if (vals[1] < 0)
return sprintf(buf, "-%ld.%06u%s\n", abs(vals[0]),
-vals[1],
scale_db ? " dB" : "");
return sprintf(buf, "-%d.%06u%s\n", abs(vals[0]),
-vals[1], scale_db ? " dB" : "");
else
return sprintf(buf, "%d.%06u%s\n", vals[0], vals[1],
scale_db ? " dB" : "");
case IIO_VAL_INT_PLUS_NANO:
if (vals[1] < 0)
return sprintf(buf, "-%ld.%09u\n", abs(vals[0]),
-vals[1]);
return sprintf(buf, "-%d.%09u\n", abs(vals[0]),
-vals[1]);
else
return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
case IIO_VAL_FRACTIONAL:

View file

@ -1858,10 +1858,11 @@ static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
/*
* All PCI devices managed by this unit should have been destroyed.
*/
if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt)
if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
for_each_active_dev_scope(dmaru->devices,
dmaru->devices_cnt, i, dev)
return -EBUSY;
}
ret = dmar_ir_hotplug(dmaru, false);
if (ret == 0)

View file

@ -4182,10 +4182,11 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
if (!atsru)
return 0;
if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
i, dev)
return -EBUSY;
}
return 0;
}

View file

@ -1706,7 +1706,7 @@ static int vpfe_get_app_input_index(struct vpfe_device *vpfe,
sdinfo = &cfg->sub_devs[i];
client = v4l2_get_subdevdata(sdinfo->sd);
if (client->addr == curr_client->addr &&
client->adapter->nr == client->adapter->nr) {
client->adapter->nr == curr_client->adapter->nr) {
if (vpfe->current_input >= 1)
return -1;
*app_input_index = j + vpfe->current_input;

View file

@ -75,15 +75,15 @@ static int __init init_msp_flash(void)
printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);
msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL);
msp_flash = kcalloc(fcnt, sizeof(*msp_flash), GFP_KERNEL);
if (!msp_flash)
return -ENOMEM;
msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL);
msp_parts = kcalloc(fcnt, sizeof(*msp_parts), GFP_KERNEL);
if (!msp_parts)
goto free_msp_flash;
msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL);
msp_maps = kcalloc(fcnt, sizeof(*msp_maps), GFP_KERNEL);
if (!msp_maps)
goto free_msp_parts;

View file

@ -230,8 +230,10 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
info->mtd = mtd_concat_create(cdev, info->num_subdev,
plat->name);
if (info->mtd == NULL)
if (info->mtd == NULL) {
ret = -ENXIO;
goto err;
}
}
info->mtd->dev.parent = &pdev->dev;

View file

@ -1317,9 +1317,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
slave_dev->name);
}
/* already enslaved */
if (slave_dev->flags & IFF_SLAVE) {
netdev_dbg(bond_dev, "Error: Device was already enslaved\n");
/* already in-use? */
if (netdev_is_rx_handler_busy(slave_dev)) {
netdev_err(bond_dev,
"Error: Device is in use and cannot be enslaved\n");
return -EBUSY;
}

View file

@ -1268,11 +1268,10 @@ static int __maybe_unused flexcan_suspend(struct device *device)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
err = flexcan_chip_disable(priv);
if (err)
return err;
if (netif_running(dev)) {
err = flexcan_chip_disable(priv);
if (err)
return err;
netif_stop_queue(dev);
netif_device_detach(dev);
}
@ -1285,13 +1284,17 @@ static int __maybe_unused flexcan_resume(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
int err;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(dev)) {
netif_device_attach(dev);
netif_start_queue(dev);
err = flexcan_chip_enable(priv);
if (err)
return err;
}
return flexcan_chip_enable(priv);
return 0;
}
static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);

View file

@ -187,8 +187,8 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \
u32 mask) \
{ \
intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
priv->irq##which##_mask &= ~(mask); \
intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
} \
static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
u32 mask) \

View file

@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
return cmd->cmd_buf + (idx << cmd->log_stride);
}
static u8 xor8_buf(void *buf, int len)
static u8 xor8_buf(void *buf, size_t offset, int len)
{
u8 *ptr = buf;
u8 sum = 0;
int i;
int end = len + offset;
for (i = 0; i < len; i++)
for (i = offset; i < end; i++)
sum ^= ptr[i];
return sum;
@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len)
static int verify_block_sig(struct mlx5_cmd_prot_block *block)
{
if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
int xor_len = sizeof(*block) - sizeof(block->data) - 1;
if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
return -EINVAL;
if (xor8_buf(block, sizeof(*block)) != 0xff)
if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
return -EINVAL;
return 0;
}
static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
int csum)
static void calc_block_sig(struct mlx5_cmd_prot_block *block)
{
block->token = token;
if (csum) {
block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
sizeof(block->data) - 2);
block->sig = ~xor8_buf(block, sizeof(*block) - 1);
}
int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
}
static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
static void calc_chain_sig(struct mlx5_cmd_msg *msg)
{
struct mlx5_cmd_mailbox *next = msg->next;
int size = msg->len;
int blen = size - min_t(int, sizeof(msg->first.data), size);
int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
/ MLX5_CMD_DATA_BLOCK_SIZE;
int i = 0;
while (next) {
calc_block_sig(next->buf, token, csum);
for (i = 0; i < n && next; i++) {
calc_block_sig(next->buf);
next = next->next;
}
}
static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
{
ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
calc_chain_sig(ent->in, ent->token, csum);
calc_chain_sig(ent->out, ent->token, csum);
ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
if (csum) {
calc_chain_sig(ent->in);
calc_chain_sig(ent->out);
}
}
static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
struct mlx5_cmd_mailbox *next = ent->out->next;
int err;
u8 sig;
int size = ent->out->len;
int blen = size - min_t(int, sizeof(ent->out->first.data), size);
int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
/ MLX5_CMD_DATA_BLOCK_SIZE;
int i = 0;
sig = xor8_buf(ent->lay, sizeof(*ent->lay));
sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
if (sig != 0xff)
return -EINVAL;
while (next) {
for (i = 0; i < n && next; i++) {
err = verify_block_sig(next->buf);
if (err)
return err;
@ -641,7 +655,6 @@ static void cmd_work_handler(struct work_struct *work)
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
ent->token = alloc_token(cmd);
cmd->ent_arr[ent->idx] = ent;
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
@ -755,7 +768,8 @@ static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out, void *uout, int uout_size,
mlx5_cmd_cbk_t callback,
void *context, int page_queue, u8 *status)
void *context, int page_queue, u8 *status,
u8 token)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent;
@ -772,6 +786,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
if (IS_ERR(ent))
return PTR_ERR(ent);
ent->token = token;
if (!callback)
init_completion(&ent->done);
@ -844,7 +860,8 @@ static const struct file_operations fops = {
.write = dbg_write,
};
static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
u8 token)
{
struct mlx5_cmd_prot_block *block;
struct mlx5_cmd_mailbox *next;
@ -870,6 +887,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
memcpy(block->data, from, copy);
from += copy;
size -= copy;
block->token = token;
next = next->next;
}
@ -939,7 +957,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev,
}
static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
gfp_t flags, int size)
gfp_t flags, int size,
u8 token)
{
struct mlx5_cmd_mailbox *tmp, *head = NULL;
struct mlx5_cmd_prot_block *block;
@ -968,6 +987,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
tmp->next = head;
block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
block->block_num = cpu_to_be32(n - i - 1);
block->token = token;
head = tmp;
}
msg->next = head;
@ -1351,7 +1371,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
}
if (IS_ERR(msg))
msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
return msg;
}
@ -1376,6 +1396,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int err;
u8 status = 0;
u32 drv_synd;
u8 token;
if (pci_channel_offline(dev->pdev) ||
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@ -1394,20 +1415,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
return err;
}
err = mlx5_copy_to_msg(inb, in, in_size);
token = alloc_token(&dev->cmd);
err = mlx5_copy_to_msg(inb, in, in_size, token);
if (err) {
mlx5_core_warn(dev, "err %d\n", err);
goto out_in;
}
outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
if (IS_ERR(outb)) {
err = PTR_ERR(outb);
goto out_in;
}
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
pages_queue, &status);
pages_queue, &status, token);
if (err)
goto out_out;
@ -1475,7 +1498,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
INIT_LIST_HEAD(&cmd->cache.med.head);
for (i = 0; i < NUM_LONG_LISTS; i++) {
msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
if (IS_ERR(msg)) {
err = PTR_ERR(msg);
goto ex_err;
@ -1485,7 +1508,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
}
for (i = 0; i < NUM_MED_LISTS; i++) {
msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
if (IS_ERR(msg)) {
err = PTR_ERR(msg);
goto ex_err;

View file

@ -2269,6 +2269,13 @@ static int smc_drv_probe(struct platform_device *pdev)
if (pd) {
memcpy(&lp->cfg, pd, sizeof(lp->cfg));
lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) {
dev_err(&pdev->dev,
"at least one of 8-bit or 16-bit access support is required.\n");
ret = -ENXIO;
goto out_free_netdev;
}
}
#if IS_BUILTIN(CONFIG_OF)

View file

@ -36,6 +36,27 @@
#include <linux/dmaengine.h>
#include <linux/smc91x.h>
/*
* Any 16-bit access is performed with two 8-bit accesses if the hardware
* can't do it directly. Most registers are 16-bit so those are mandatory.
*/
#define SMC_outw_b(x, a, r) \
do { \
unsigned int __val16 = (x); \
unsigned int __reg = (r); \
SMC_outb(__val16, a, __reg); \
SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \
} while (0)
#define SMC_inw_b(a, r) \
({ \
unsigned int __val16; \
unsigned int __reg = r; \
__val16 = SMC_inb(a, __reg); \
__val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
__val16; \
})
/*
* Define your architecture specific bus configuration parameters here.
*/
@ -55,10 +76,30 @@
#define SMC_IO_SHIFT (lp->io_shift)
#define SMC_inb(a, r) readb((a) + (r))
#define SMC_inw(a, r) readw((a) + (r))
#define SMC_inw(a, r) \
({ \
unsigned int __smc_r = r; \
SMC_16BIT(lp) ? readw((a) + __smc_r) : \
SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \
({ BUG(); 0; }); \
})
#define SMC_inl(a, r) readl((a) + (r))
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
#define SMC_outw(v, a, r) \
do { \
unsigned int __v = v, __smc_r = r; \
if (SMC_16BIT(lp)) \
__SMC_outw(__v, a, __smc_r); \
else if (SMC_8BIT(lp)) \
SMC_outw_b(__v, a, __smc_r); \
else \
BUG(); \
} while (0)
#define SMC_outl(v, a, r) writel(v, (a) + (r))
#define SMC_insb(a, r, p, l) readsb((a) + (r), p, l)
#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l)
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
@ -66,7 +107,7 @@
#define SMC_IRQ_FLAGS (-1) /* from resource */
/* We actually can't write halfwords properly if not word aligned */
static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
{
if ((machine_is_mainstone() || machine_is_stargate2() ||
machine_is_pxa_idp()) && reg & 2) {
@ -405,24 +446,8 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
#if ! SMC_CAN_USE_16BIT
/*
* Any 16-bit access is performed with two 8-bit accesses if the hardware
* can't do it directly. Most registers are 16-bit so those are mandatory.
*/
#define SMC_outw(x, ioaddr, reg) \
do { \
unsigned int __val16 = (x); \
SMC_outb( __val16, ioaddr, reg ); \
SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
} while (0)
#define SMC_inw(ioaddr, reg) \
({ \
unsigned int __val16; \
__val16 = SMC_inb( ioaddr, reg ); \
__val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
__val16; \
})
#define SMC_outw(x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg)
#define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg)
#define SMC_insw(a, r, p, l) BUG()
#define SMC_outsw(a, r, p, l) BUG()

View file

@ -640,8 +640,10 @@ phy_err:
int phy_start_interrupts(struct phy_device *phydev)
{
atomic_set(&phydev->irq_disable, 0);
if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
phydev) < 0) {
if (request_irq(phydev->irq, phy_interrupt,
IRQF_SHARED,
"phy_interrupt",
phydev) < 0) {
pr_warn("%s: Can't get IRQ %d (PHY)\n",
phydev->bus->name, phydev->irq);
phydev->irq = PHY_POLL;

View file

@ -869,8 +869,8 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->interface_modes |=
BIT(NL80211_IFTYPE_P2P_DEVICE);
hw->wiphy->iface_combinations = if_comb;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
hw->wiphy->iface_combinations = if_comb;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
}
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;

View file

@ -1019,12 +1019,13 @@ il3945_hw_txq_ctx_free(struct il_priv *il)
int txq_id;
/* Tx queues */
if (il->txq)
if (il->txq) {
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
if (txq_id == IL39_CMD_QUEUE_NUM)
il_cmd_queue_free(il);
else
il_tx_queue_free(il, txq_id);
}
/* free tx queue structure */
il_free_txq_mem(il);

View file

@ -901,7 +901,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
/* bound gain by 2 bits value max, 3rd bit is sign */
data->delta_gain_code[i] =
min(abs(delta_g),
(long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
(s32) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
if (delta_g < 0)
/*

View file

@ -457,13 +457,16 @@ static inline void max17042_write_model_data(struct max17042_chip *chip,
}
static inline void max17042_read_model_data(struct max17042_chip *chip,
u8 addr, u32 *data, int size)
u8 addr, u16 *data, int size)
{
struct regmap *map = chip->regmap;
int i;
u32 tmp;
for (i = 0; i < size; i++)
regmap_read(map, addr + i, &data[i]);
for (i = 0; i < size; i++) {
regmap_read(map, addr + i, &tmp);
data[i] = (u16)tmp;
}
}
static inline int max17042_model_data_compare(struct max17042_chip *chip,
@ -486,7 +489,7 @@ static int max17042_init_model(struct max17042_chip *chip)
{
int ret;
int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
u32 *temp_data;
u16 *temp_data;
temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
if (!temp_data)
@ -501,7 +504,7 @@ static int max17042_init_model(struct max17042_chip *chip)
ret = max17042_model_data_compare(
chip,
chip->pdata->config_data->cell_char_tbl,
(u16 *)temp_data,
temp_data,
table_size);
max10742_lock_model(chip);
@ -514,7 +517,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip)
{
int i;
int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
u32 *temp_data;
u16 *temp_data;
int ret = 0;
temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);

View file

@ -53,13 +53,16 @@ static int hisi_reboot_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) {
pr_err("failed to find reboot-offset property\n");
iounmap(base);
return -EINVAL;
}
err = register_restart_handler(&hisi_restart_nb);
if (err)
if (err) {
dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n",
err);
iounmap(base);
}
return err;
}

View file

@ -205,6 +205,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
if (!charger)
return -ENOMEM;
platform_set_drvdata(pdev, charger);
charger->tps = tps;
charger->dev = &pdev->dev;

View file

@ -889,7 +889,7 @@ EXPORT_SYMBOL_GPL(devm_pwm_put);
*/
bool pwm_can_sleep(struct pwm_device *pwm)
{
return pwm->chip->can_sleep;
return true;
}
EXPORT_SYMBOL_GPL(pwm_can_sleep);

View file

@ -5941,11 +5941,11 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (fusion->ld_drv_map[i])
free_pages((ulong)fusion->ld_drv_map[i],
fusion->drv_map_pages);
if (fusion->pd_seq_sync)
dma_free_coherent(&instance->pdev->dev,
pd_seq_map_sz,
fusion->pd_seq_sync[i],
fusion->pd_seq_phys[i]);
if (fusion->pd_seq_sync[i])
dma_free_coherent(&instance->pdev->dev,
pd_seq_map_sz,
fusion->pd_seq_sync[i],
fusion->pd_seq_phys[i]);
}
free_pages((ulong)instance->ctrl_context,
instance->ctrl_context_pages);

View file

@ -236,7 +236,7 @@ static int ad7192_setup(struct ad7192_state *st,
st->mclk = pdata->ext_clk_hz;
else
st->mclk = AD7192_INT_FREQ_MHZ;
break;
break;
default:
ret = -EINVAL;
goto out;

View file

@ -79,9 +79,13 @@ struct autofs_info {
};
#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */
#define AUTOFS_INF_NO_RCU (1<<1) /* the dentry is being considered
#define AUTOFS_INF_WANT_EXPIRE (1<<1) /* the dentry is being considered
* for expiry, so RCU_walk is
* not permitted
* not permitted. If it progresses to
* actual expiry attempt, the flag is
* not cleared when EXPIRING is set -
* in that case it gets cleared only
* when it comes to clearing EXPIRING.
*/
#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */

View file

@ -315,19 +315,17 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
if (ino->flags & AUTOFS_INF_PENDING)
goto out;
if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
ino->flags |= AUTOFS_INF_NO_RCU;
ino->flags |= AUTOFS_INF_WANT_EXPIRE;
spin_unlock(&sbi->fs_lock);
synchronize_rcu();
spin_lock(&sbi->fs_lock);
if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
ino->flags |= AUTOFS_INF_EXPIRING;
smp_mb();
ino->flags &= ~AUTOFS_INF_NO_RCU;
init_completion(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
return root;
}
ino->flags &= ~AUTOFS_INF_NO_RCU;
ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
}
out:
spin_unlock(&sbi->fs_lock);
@ -417,6 +415,7 @@ static struct dentry *should_expire(struct dentry *dentry,
}
return NULL;
}
/*
* Find an eligible tree to time-out
* A tree is eligible if :-
@ -432,6 +431,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
struct dentry *root = sb->s_root;
struct dentry *dentry;
struct dentry *expired;
struct dentry *found;
struct autofs_info *ino;
if (!root)
@ -442,48 +442,54 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
dentry = NULL;
while ((dentry = get_next_positive_subdir(dentry, root))) {
int flags = how;
spin_lock(&sbi->fs_lock);
ino = autofs4_dentry_ino(dentry);
if (ino->flags & AUTOFS_INF_NO_RCU)
expired = NULL;
else
expired = should_expire(dentry, mnt, timeout, how);
if (!expired) {
if (ino->flags & AUTOFS_INF_WANT_EXPIRE) {
spin_unlock(&sbi->fs_lock);
continue;
}
spin_unlock(&sbi->fs_lock);
expired = should_expire(dentry, mnt, timeout, flags);
if (!expired)
continue;
spin_lock(&sbi->fs_lock);
ino = autofs4_dentry_ino(expired);
ino->flags |= AUTOFS_INF_NO_RCU;
ino->flags |= AUTOFS_INF_WANT_EXPIRE;
spin_unlock(&sbi->fs_lock);
synchronize_rcu();
spin_lock(&sbi->fs_lock);
if (should_expire(expired, mnt, timeout, how)) {
if (expired != dentry)
dput(dentry);
goto found;
}
ino->flags &= ~AUTOFS_INF_NO_RCU;
/* Make sure a reference is not taken on found if
* things have changed.
*/
flags &= ~AUTOFS_EXP_LEAVES;
found = should_expire(expired, mnt, timeout, how);
if (!found || found != expired)
/* Something has changed, continue */
goto next;
if (expired != dentry)
dput(dentry);
spin_lock(&sbi->fs_lock);
goto found;
next:
spin_lock(&sbi->fs_lock);
ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
spin_unlock(&sbi->fs_lock);
if (expired != dentry)
dput(expired);
spin_unlock(&sbi->fs_lock);
}
return NULL;
found:
DPRINTK("returning %p %pd", expired, expired);
ino->flags |= AUTOFS_INF_EXPIRING;
smp_mb();
ino->flags &= ~AUTOFS_INF_NO_RCU;
init_completion(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
spin_lock(&sbi->lookup_lock);
spin_lock(&expired->d_parent->d_lock);
spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
list_move(&expired->d_parent->d_subdirs, &expired->d_child);
spin_unlock(&expired->d_lock);
spin_unlock(&expired->d_parent->d_lock);
spin_unlock(&sbi->lookup_lock);
return expired;
}
@ -492,15 +498,27 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
int status;
int state;
/* Block on any pending expire */
if (!(ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU)))
if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
return 0;
if (rcu_walk)
return -ECHILD;
retry:
spin_lock(&sbi->fs_lock);
if (ino->flags & AUTOFS_INF_EXPIRING) {
state = ino->flags & (AUTOFS_INF_WANT_EXPIRE | AUTOFS_INF_EXPIRING);
if (state == AUTOFS_INF_WANT_EXPIRE) {
spin_unlock(&sbi->fs_lock);
/*
* Possibly being selected for expire, wait until
* it's selected or not.
*/
schedule_timeout_uninterruptible(HZ/10);
goto retry;
}
if (state & AUTOFS_INF_EXPIRING) {
spin_unlock(&sbi->fs_lock);
DPRINTK("waiting for expire %p name=%pd", dentry, dentry);
@ -551,7 +569,7 @@ int autofs4_expire_run(struct super_block *sb,
ino = autofs4_dentry_ino(dentry);
/* avoid rapid-fire expire attempts if expiry fails */
ino->last_used = now;
ino->flags &= ~AUTOFS_INF_EXPIRING;
ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
@ -579,7 +597,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
spin_lock(&sbi->fs_lock);
/* avoid rapid-fire expire attempts if expiry fails */
ino->last_used = now;
ino->flags &= ~AUTOFS_INF_EXPIRING;
ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
dput(dentry);

View file

@ -455,7 +455,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
* a mount-trap.
*/
struct inode *inode;
if (ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU))
if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
return 0;
if (d_mountpoint(dentry))
return 0;

View file

@ -1619,6 +1619,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
int namelen;
int ret = 0;
if (!S_ISDIR(file_inode(file)->i_mode))
return -ENOTDIR;
ret = mnt_want_write_file(file);
if (ret)
goto out;
@ -1676,6 +1679,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
struct btrfs_ioctl_vol_args *vol_args;
int ret;
if (!S_ISDIR(file_inode(file)->i_mode))
return -ENOTDIR;
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args))
return PTR_ERR(vol_args);
@ -1699,6 +1705,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
bool readonly = false;
struct btrfs_qgroup_inherit *inherit = NULL;
if (!S_ISDIR(file_inode(file)->i_mode))
return -ENOTDIR;
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args))
return PTR_ERR(vol_args);
@ -2345,6 +2354,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
int ret;
int err = 0;
if (!S_ISDIR(dir->i_mode))
return -ENOTDIR;
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args))
return PTR_ERR(vol_args);

View file

@ -959,10 +959,11 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
if (S_ISLNK(root_inode->i_mode)) {
char *name = follow_link(host_root_path);
if (IS_ERR(name))
if (IS_ERR(name)) {
err = PTR_ERR(name);
else
err = read_name(root_inode, name);
goto out_put;
}
err = read_name(root_inode, name);
kfree(name);
if (err)
goto out_put;

View file

@ -67,18 +67,7 @@ static int fanotify_get_response(struct fsnotify_group *group,
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
wait_event(group->fanotify_data.access_waitq, event->response ||
atomic_read(&group->fanotify_data.bypass_perm));
if (!event->response) { /* bypass_perm set */
/*
* Event was canceled because group is being destroyed. Remove
* it from group's event list because we are responsible for
* freeing the permission event.
*/
fsnotify_remove_event(group, &event->fae.fse);
return 0;
}
wait_event(group->fanotify_data.access_waitq, event->response);
/* userspace responded, convert to something usable */
switch (event->response) {

View file

@ -358,16 +358,20 @@ static int fanotify_release(struct inode *ignored, struct file *file)
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
struct fanotify_perm_event_info *event, *next;
struct fsnotify_event *fsn_event;
/*
* There may be still new events arriving in the notification queue
* but since userspace cannot use fanotify fd anymore, no event can
* enter or leave access_list by now.
* Stop new events from arriving in the notification queue. since
* userspace cannot use fanotify fd anymore, no event can enter or
* leave access_list by now either.
*/
fsnotify_group_stop_queueing(group);
/*
* Process all permission events on access_list and notification queue
* and simulate reply from userspace.
*/
spin_lock(&group->fanotify_data.access_lock);
atomic_inc(&group->fanotify_data.bypass_perm);
list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
fae.fse.list) {
pr_debug("%s: found group=%p event=%p\n", __func__, group,
@ -379,12 +383,21 @@ static int fanotify_release(struct inode *ignored, struct file *file)
spin_unlock(&group->fanotify_data.access_lock);
/*
* Since bypass_perm is set, newly queued events will not wait for
* access response. Wake up the already sleeping ones now.
* synchronize_srcu() in fsnotify_destroy_group() will wait for all
* processes sleeping in fanotify_handle_event() waiting for access
* response and thus also for all permission events to be freed.
* Destroy all non-permission events. For permission events just
* dequeue them and set the response. They will be freed once the
* response is consumed and fanotify_get_response() returns.
*/
mutex_lock(&group->notification_mutex);
while (!fsnotify_notify_queue_is_empty(group)) {
fsn_event = fsnotify_remove_first_event(group);
if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS))
fsnotify_destroy_event(group, fsn_event);
else
FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
}
mutex_unlock(&group->notification_mutex);
/* Response for all permission events it set, wakeup waiters */
wake_up(&group->fanotify_data.access_waitq);
#endif
@ -755,7 +768,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
spin_lock_init(&group->fanotify_data.access_lock);
init_waitqueue_head(&group->fanotify_data.access_waitq);
INIT_LIST_HEAD(&group->fanotify_data.access_list);
atomic_set(&group->fanotify_data.bypass_perm, 0);
#endif
switch (flags & FAN_ALL_CLASS_BITS) {
case FAN_CLASS_NOTIF:

View file

@ -39,6 +39,17 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
kfree(group);
}
/*
* Stop queueing new events for this group. Once this function returns
* fsnotify_add_event() will not add any new events to the group's queue.
*/
void fsnotify_group_stop_queueing(struct fsnotify_group *group)
{
mutex_lock(&group->notification_mutex);
group->shutdown = true;
mutex_unlock(&group->notification_mutex);
}
/*
* Trying to get rid of a group. Remove all marks, flush all events and release
* the group reference.
@ -47,6 +58,14 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
*/
void fsnotify_destroy_group(struct fsnotify_group *group)
{
/*
* Stop queueing new events. The code below is careful enough to not
* require this but fanotify needs to stop queuing events even before
* fsnotify_destroy_group() is called and this makes the other callers
* of fsnotify_destroy_group() to see the same behavior.
*/
fsnotify_group_stop_queueing(group);
/* clear all inode marks for this group */
fsnotify_clear_marks_by_group(group);

View file

@ -82,7 +82,8 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
* Add an event to the group notification queue. The group can later pull this
* event off the queue to deal with. The function returns 0 if the event was
* added to the queue, 1 if the event was merged with some other queued event,
* 2 if the queue of events has overflown.
* 2 if the event was not queued - either the queue of events has overflown
* or the group is shutting down.
*/
int fsnotify_add_event(struct fsnotify_group *group,
struct fsnotify_event *event,
@ -96,6 +97,11 @@ int fsnotify_add_event(struct fsnotify_group *group,
mutex_lock(&group->notification_mutex);
if (group->shutdown) {
mutex_unlock(&group->notification_mutex);
return 2;
}
if (group->q_len >= group->max_events) {
ret = 2;
/* Queue overflow event only if it isn't already queued */
@ -125,21 +131,6 @@ queue:
return ret;
}
/*
* Remove @event from group's notification queue. It is the responsibility of
* the caller to destroy the event.
*/
void fsnotify_remove_event(struct fsnotify_group *group,
struct fsnotify_event *event)
{
mutex_lock(&group->notification_mutex);
if (!list_empty(&event->list)) {
list_del_init(&event->list);
group->q_len--;
}
mutex_unlock(&group->notification_mutex);
}
/*
* Remove and return the first event from the notification list. It is the
* responsibility of the caller to destroy the obtained event

View file

@ -262,7 +262,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
struct dlm_lock *lock, int flags, int type)
{
enum dlm_status status;
u8 old_owner = res->owner;
mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
@ -329,7 +328,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
lock->convert_pending = 0;
/* if it failed, move it back to granted queue.
* if master returns DLM_NORMAL and then down before sending ast,
* it may have already been moved to granted queue, reset to
@ -338,12 +336,14 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
if (status != DLM_NOTQUEUED)
dlm_error(status);
dlm_revert_pending_convert(res, lock);
} else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
(old_owner != res->owner)) {
mlog(0, "res %.*s is in recovering or has been recovered.\n",
res->lockname.len, res->lockname.name);
} else if (!lock->convert_pending) {
mlog(0, "%s: res %.*s, owner died and lock has been moved back "
"to granted list, retry convert.\n",
dlm->name, res->lockname.len, res->lockname.name);
status = DLM_RECOVERING;
}
lock->convert_pending = 0;
bail:
spin_unlock(&res->spinlock);

View file

@ -1536,7 +1536,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
u64 start, u64 len)
{
int ret = 0;
u64 tmpend, end = start + len;
u64 tmpend = 0;
u64 end = start + len;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
unsigned int csize = osb->s_clustersize;
handle_t *handle;
@ -1568,18 +1569,31 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
}
/*
* We want to get the byte offset of the end of the 1st cluster.
* If start is on a cluster boundary and end is somewhere in another
* cluster, we have not COWed the cluster starting at start, unless
* end is also within the same cluster. So, in this case, we skip this
* first call to ocfs2_zero_range_for_truncate() truncate and move on
* to the next one.
*/
tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
if (tmpend > end)
tmpend = end;
if ((start & (csize - 1)) != 0) {
/*
* We want to get the byte offset of the end of the 1st
* cluster.
*/
tmpend = (u64)osb->s_clustersize +
(start & ~(osb->s_clustersize - 1));
if (tmpend > end)
tmpend = end;
trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
(unsigned long long)tmpend);
trace_ocfs2_zero_partial_clusters_range1(
(unsigned long long)start,
(unsigned long long)tmpend);
ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
if (ret)
mlog_errno(ret);
ret = ocfs2_zero_range_for_truncate(inode, handle, start,
tmpend);
if (ret)
mlog_errno(ret);
}
if (tmpend < end) {
/*

View file

@ -1153,8 +1153,9 @@ int balance_internal(struct tree_balance *tb,
insert_ptr);
}
memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
insert_ptr[0] = new_insert_ptr;
if (new_insert_ptr)
memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
return order;
}

View file

@ -1535,7 +1535,7 @@ xfs_wait_buftarg(
* ensure here that all reference counts have been dropped before we
* start walking the LRU list.
*/
drain_workqueue(btp->bt_mount->m_buf_workqueue);
flush_workqueue(btp->bt_mount->m_buf_workqueue);
/* loop until there is nothing left on the lru list. */
while (list_lru_count(&btp->bt_lru)) {

View file

@ -148,6 +148,7 @@ struct fsnotify_group {
#define FS_PRIO_1 1 /* fanotify content based access control */
#define FS_PRIO_2 2 /* fanotify pre-content access */
unsigned int priority;
bool shutdown; /* group is being shut down, don't queue more events */
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
struct mutex mark_mutex; /* protect marks_list */
@ -179,7 +180,6 @@ struct fsnotify_group {
spinlock_t access_lock;
struct list_head access_list;
wait_queue_head_t access_waitq;
atomic_t bypass_perm;
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
int f_flags;
unsigned int max_marks;
@ -308,6 +308,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op
extern void fsnotify_get_group(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_alloc_group */
extern void fsnotify_put_group(struct fsnotify_group *group);
/* group destruction begins, stop queuing new events */
extern void fsnotify_group_stop_queueing(struct fsnotify_group *group);
/* destroy group */
extern void fsnotify_destroy_group(struct fsnotify_group *group);
/* fasync handler function */
@ -320,8 +322,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
struct fsnotify_event *event,
int (*merge)(struct list_head *,
struct fsnotify_event *));
/* Remove passed event from groups notification queue */
extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
/* true if the group notification queue is empty */
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */

View file

@ -202,26 +202,26 @@ extern int _cond_resched(void);
/**
* abs - return absolute value of an argument
* @x: the value. If it is unsigned type, it is converted to signed type first
* (s64, long or int depending on its size).
* @x: the value. If it is unsigned type, it is converted to signed type first.
* char is treated as if it was signed (regardless of whether it really is)
* but the macro's return type is preserved as char.
*
* Return: an absolute value of x. If x is 64-bit, macro's return type is s64,
* otherwise it is signed long.
* Return: an absolute value of x.
*/
#define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \
s64 __x = (x); \
(__x < 0) ? -__x : __x; \
}), ({ \
long ret; \
if (sizeof(x) == sizeof(long)) { \
long __x = (x); \
ret = (__x < 0) ? -__x : __x; \
} else { \
int __x = (x); \
ret = (__x < 0) ? -__x : __x; \
} \
ret; \
}))
#define abs(x) __abs_choose_expr(x, long long, \
__abs_choose_expr(x, long, \
__abs_choose_expr(x, int, \
__abs_choose_expr(x, short, \
__abs_choose_expr(x, char, \
__builtin_choose_expr( \
__builtin_types_compatible_p(typeof(x), char), \
(char)({ signed char __x = (x); __x<0?-__x:__x; }), \
((void)0)))))))
#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
__builtin_types_compatible_p(typeof(x), signed type) || \
__builtin_types_compatible_p(typeof(x), unsigned type), \
({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
/**
* reciprocal_scale - "scale" a value into range [0, ep_ro)

View file

@ -3036,6 +3036,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
napi->skb = NULL;
}
bool netdev_is_rx_handler_busy(struct net_device *dev);
int netdev_rx_handler_register(struct net_device *dev,
rx_handler_func_t *rx_handler,
void *rx_handler_data);

View file

@ -601,56 +601,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
*/
static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
{
int ret = 0;
char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
return ret;
return 0;
if (unlikely(uaddr > end))
return -EFAULT;
/*
* Writing zeroes into userspace here is OK, because we know that if
* the zero gets there, we'll be overwriting it.
*/
while (uaddr <= end) {
ret = __put_user(0, uaddr);
if (ret != 0)
return ret;
do {
if (unlikely(__put_user(0, uaddr) != 0))
return -EFAULT;
uaddr += PAGE_SIZE;
}
} while (uaddr <= end);
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK))
ret = __put_user(0, end);
return __put_user(0, end);
return ret;
return 0;
}
static inline int fault_in_multipages_readable(const char __user *uaddr,
int size)
{
volatile char c;
int ret = 0;
const char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
return ret;
return 0;
while (uaddr <= end) {
ret = __get_user(c, uaddr);
if (ret != 0)
return ret;
if (unlikely(uaddr > end))
return -EFAULT;
do {
if (unlikely(__get_user(c, uaddr) != 0))
return -EFAULT;
uaddr += PAGE_SIZE;
}
} while (uaddr <= end);
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK)) {
ret = __get_user(c, end);
(void)c;
return __get_user(c, end);
}
return ret;
return 0;
}
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,

View file

@ -1,6 +1,16 @@
#ifndef __SMC91X_H__
#define __SMC91X_H__
/*
* These bits define which access sizes a platform can support, rather
* than the maximal access size. So, if your platform can do 16-bit
* and 32-bit accesses to the SMC91x device, but not 8-bit, set both
* SMC91X_USE_16BIT and SMC91X_USE_32BIT.
*
* The SMC91x driver requires at least one of SMC91X_USE_8BIT or
* SMC91X_USE_16BIT to be supported - just setting SMC91X_USE_32BIT is
* an invalid configuration.
*/
#define SMC91X_USE_8BIT (1 << 0)
#define SMC91X_USE_16BIT (1 << 1)
#define SMC91X_USE_32BIT (1 << 2)

View file

@ -52,7 +52,7 @@ struct unix_sock {
struct sock sk;
struct unix_address *addr;
struct path path;
struct mutex readlock;
struct mutex iolock, bindlock;
struct sock *peer;
struct list_head link;
atomic_long_t inflight;

View file

@ -1510,6 +1510,8 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
{
if (sk->sk_send_head == skb_unlinked)
sk->sk_send_head = NULL;
if (tcp_sk(sk)->highest_sack == skb_unlinked)
tcp_sk(sk)->highest_sack = NULL;
}
static inline void tcp_init_send_head(struct sock *sk)

View file

@ -2079,7 +2079,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
* which could have been changed by cpuset just after it inherits the
* state from the parent and before it sits on the cgroup's task list.
*/
void cpuset_fork(struct task_struct *task)
void cpuset_fork(struct task_struct *task, void *priv)
{
if (task_css_is_root(task, cpuset_cgrp_id))
return;

View file

@ -299,12 +299,12 @@ static int create_image(int platform_mode)
save_processor_state();
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true);
error = swsusp_arch_suspend();
/* Restore control flow magically appears here */
restore_processor_state();
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
if (error)
printk(KERN_ERR "PM: Error %d creating hibernation image\n",
error);
/* Restore control flow magically appears here */
restore_processor_state();
if (!in_suspend)
events_check_enabled = false;

View file

@ -765,9 +765,9 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
*/
static bool rtree_next_node(struct memory_bitmap *bm)
{
bm->cur.node = list_entry(bm->cur.node->list.next,
struct rtree_node, list);
if (&bm->cur.node->list != &bm->cur.zone->leaves) {
if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
bm->cur.node = list_entry(bm->cur.node->list.next,
struct rtree_node, list);
bm->cur.node_pfn += BM_BITS_PER_BLOCK;
bm->cur.node_bit = 0;
touch_softlockup_watchdog();
@ -775,9 +775,9 @@ static bool rtree_next_node(struct memory_bitmap *bm)
}
/* No more nodes, goto next zone */
bm->cur.zone = list_entry(bm->cur.zone->list.next,
if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
bm->cur.zone = list_entry(bm->cur.zone->list.next,
struct mem_zone_bm_rtree, list);
if (&bm->cur.zone->list != &bm->zones) {
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
struct rtree_node, list);
bm->cur.node_pfn = 0;

View file

@ -1,4 +1,8 @@
# We are fully aware of the dangers of __builtin_return_address()
FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
KBUILD_CFLAGS += $(FRAME_CFLAGS)
# Do not instrument the tracer itself:
ifdef CONFIG_FUNCTION_TRACER

View file

@ -4727,19 +4727,20 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
struct trace_iterator *iter = filp->private_data;
ssize_t sret;
/* return any leftover data */
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
if (sret != -EBUSY)
return sret;
trace_seq_init(&iter->seq);
/*
* Avoid more than one consumer on a single file descriptor
* This is just a matter of traces coherency, the ring buffer itself
* is protected.
*/
mutex_lock(&iter->mutex);
/* return any leftover data */
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
if (sret != -EBUSY)
goto out;
trace_seq_init(&iter->seq);
if (iter->trace->read) {
sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
if (sret)
@ -5766,9 +5767,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
return -EBUSY;
#endif
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
if (*ppos & (PAGE_SIZE - 1))
return -EINVAL;
@ -5778,6 +5776,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
len &= PAGE_MASK;
}
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
again:
trace_access_lock(iter->cpu_file);
entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
@ -5835,19 +5836,21 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
/* did we read anything? */
if (!spd.nr_pages) {
if (ret)
return ret;
goto out;
ret = -EAGAIN;
if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
return -EAGAIN;
goto out;
ret = wait_on_pipe(iter, true);
if (ret)
return ret;
goto out;
goto again;
}
ret = splice_to_pipe(pipe, &spd);
out:
splice_shrink_spd(&spd);
return ret;

View file

@ -2159,23 +2159,6 @@ out:
}
}
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
static void init_tlb_ubc(void)
{
/*
* This deliberately does not clear the cpumask as it's expensive
* and unnecessary. If there happens to be data in there then the
* first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
* then will be cleared.
*/
current->tlb_ubc.flush_required = false;
}
#else
static inline void init_tlb_ubc(void)
{
}
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
/*
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
*/
@ -2210,8 +2193,6 @@ static void shrink_lruvec(struct lruvec *lruvec, int swappiness,
scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
sc->priority == DEF_PRIORITY);
init_tlb_ubc();
blk_start_plug(&plug);
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
nr[LRU_INACTIVE_FILE]) {

View file

@ -1113,7 +1113,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
} else {
err = br_ip6_multicast_add_group(br, port,
&grec->grec_mca, vid);
if (!err)
if (err)
break;
}
}

View file

@ -286,7 +286,7 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len)
else
skb_trim(skb, len);
return cfpkt_getlen(pkt);
return cfpkt_getlen(pkt);
}
/* Need to expand SKB */

View file

@ -3721,6 +3721,22 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
return skb;
}
/**
* netdev_is_rx_handler_busy - check if receive handler is registered
* @dev: device to check
*
* Check if a receive handler is already registered for a given device.
* Return true if there one.
*
* The caller must hold the rtnl_mutex.
*/
bool netdev_is_rx_handler_busy(struct net_device *dev)
{
ASSERT_RTNL();
return dev && rtnl_dereference(dev->rx_handler);
}
EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
/**
* netdev_rx_handler_register - register receive handler
* @dev: device to register a handler for

View file

@ -2453,9 +2453,7 @@ struct fib_route_iter {
static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
loff_t pos)
{
struct fib_table *tb = iter->main_tb;
struct key_vector *l, **tp = &iter->tnode;
struct trie *t;
t_key key;
/* use cache location of next-to-find key */
@ -2463,8 +2461,6 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
pos -= iter->pos;
key = iter->key;
} else {
t = (struct trie *)tb->tb_data;
iter->tnode = t->kv;
iter->pos = 0;
key = 0;
}
@ -2505,12 +2501,12 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
return NULL;
iter->main_tb = tb;
t = (struct trie *)tb->tb_data;
iter->tnode = t->kv;
if (*pos != 0)
return fib_route_get_idx(iter, *pos);
t = (struct trie *)tb->tb_data;
iter->tnode = t->kv;
iter->pos = 0;
iter->key = 0;

View file

@ -540,6 +540,33 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
.get_link_net = ip_tunnel_get_link_net,
};
static bool is_vti_tunnel(const struct net_device *dev)
{
return dev->netdev_ops == &vti_netdev_ops;
}
static int vti_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct ip_tunnel *tunnel = netdev_priv(dev);
if (!is_vti_tunnel(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_DOWN:
if (!net_eq(tunnel->net, dev_net(dev)))
xfrm_garbage_collect(tunnel->net);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block vti_notifier_block __read_mostly = {
.notifier_call = vti_device_event,
};
static int __init vti_init(void)
{
const char *msg;
@ -547,6 +574,8 @@ static int __init vti_init(void)
pr_info("IPv4 over IPsec tunneling driver\n");
register_netdevice_notifier(&vti_notifier_block);
msg = "tunnel device";
err = register_pernet_device(&vti_net_ops);
if (err < 0)
@ -579,6 +608,7 @@ xfrm_proto_ah_failed:
xfrm_proto_esp_failed:
unregister_pernet_device(&vti_net_ops);
pernet_dev_failed:
unregister_netdevice_notifier(&vti_notifier_block);
pr_err("vti init: failed to register %s\n", msg);
return err;
}
@ -590,6 +620,7 @@ static void __exit vti_fini(void)
xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
unregister_pernet_device(&vti_net_ops);
unregister_netdevice_notifier(&vti_notifier_block);
}
module_init(vti_init);

View file

@ -808,8 +808,14 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
tcp_sk(sk)->snd_nxt;
/* RFC 7323 2.3
* The window field (SEG.WND) of every outgoing segment, with the
* exception of <SYN> segments, MUST be right-shifted by
* Rcv.Wind.Shift bits:
*/
tcp_v4_send_ack(sock_net(sk), skb, seq,
tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
tcp_time_stamp,
req->ts_recent,
0,

View file

@ -75,7 +75,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!tcp_is_cwnd_limited(sk))
return;
if (tp->snd_cwnd <= tp->snd_ssthresh)
if (tcp_in_slow_start(tp))
tcp_slow_start(tp, acked);
else if (!yeah->doing_reno_now) {

View file

@ -150,8 +150,10 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
rt = (struct rt6_info *) dst;
np = inet6_sk(sk);
if (!np)
return -EBADF;
if (!np) {
err = -EBADF;
goto dst_err_out;
}
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
@ -186,6 +188,9 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
release_sock(sk);
dst_err_out:
dst_release(dst);
if (err)
return err;

View file

@ -932,9 +932,15 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
* sk->sk_state == TCP_SYN_RECV -> for Fast Open.
*/
/* RFC 7323 2.3
* The window field (SEG.WND) of every outgoing segment, with the
* exception of <SYN> segments, MUST be right-shifted by
* Rcv.Wind.Shift bits:
*/
tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
0, 0);

View file

@ -185,8 +185,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
self->magic = IAS_MAGIC;
self->mode = mode;
if (mode == IAS_CLIENT)
iriap_register_lsap(self, slsap_sel, mode);
if (mode == IAS_CLIENT) {
if (iriap_register_lsap(self, slsap_sel, mode)) {
kfree(self);
return NULL;
}
}
self->confirm = callback;
self->priv = priv;

View file

@ -2111,7 +2111,8 @@ restart:
TIPC_CONN_MSG, SHORT_H_SIZE,
0, dnode, onode, dport, oport,
TIPC_CONN_SHUTDOWN);
tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
if (skb)
tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
}
tsk->connected = 0;
sock->state = SS_DISCONNECTING;

View file

@ -661,11 +661,11 @@ static int unix_set_peek_off(struct sock *sk, int val)
{
struct unix_sock *u = unix_sk(sk);
if (mutex_lock_interruptible(&u->readlock))
if (mutex_lock_interruptible(&u->iolock))
return -EINTR;
sk->sk_peek_off = val;
mutex_unlock(&u->readlock);
mutex_unlock(&u->iolock);
return 0;
}
@ -778,7 +778,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
spin_lock_init(&u->lock);
atomic_long_set(&u->inflight, 0);
INIT_LIST_HEAD(&u->link);
mutex_init(&u->readlock); /* single task reading lock */
mutex_init(&u->iolock); /* single task reading lock */
mutex_init(&u->bindlock); /* single task binding lock */
init_waitqueue_head(&u->peer_wait);
init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
unix_insert_socket(unix_sockets_unbound(sk), sk);
@ -847,7 +848,7 @@ static int unix_autobind(struct socket *sock)
int err;
unsigned int retries = 0;
err = mutex_lock_interruptible(&u->readlock);
err = mutex_lock_interruptible(&u->bindlock);
if (err)
return err;
@ -894,7 +895,7 @@ retry:
spin_unlock(&unix_table_lock);
err = 0;
out: mutex_unlock(&u->readlock);
out: mutex_unlock(&u->bindlock);
return err;
}
@ -953,20 +954,32 @@ fail:
return NULL;
}
static int unix_mknod(struct dentry *dentry, struct path *path, umode_t mode,
struct path *res)
static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
{
int err;
struct dentry *dentry;
struct path path;
int err = 0;
/*
* Get the parent directory, calculate the hash for last
* component.
*/
dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
err = PTR_ERR(dentry);
if (IS_ERR(dentry))
return err;
err = security_path_mknod(path, dentry, mode, 0);
/*
* All right, let's create it.
*/
err = security_path_mknod(&path, dentry, mode, 0);
if (!err) {
err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
if (!err) {
res->mnt = mntget(path->mnt);
res->mnt = mntget(path.mnt);
res->dentry = dget(dentry);
}
}
done_path_create(&path, dentry);
return err;
}
@ -977,12 +990,10 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct unix_sock *u = unix_sk(sk);
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
char *sun_path = sunaddr->sun_path;
int err, name_err;
int err;
unsigned int hash;
struct unix_address *addr;
struct hlist_head *list;
struct path path;
struct dentry *dentry;
err = -EINVAL;
if (sunaddr->sun_family != AF_UNIX)
@ -998,34 +1009,14 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
addr_len = err;
name_err = 0;
dentry = NULL;
if (sun_path[0]) {
/* Get the parent directory, calculate the hash for last
* component.
*/
dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
if (IS_ERR(dentry)) {
/* delay report until after 'already bound' check */
name_err = PTR_ERR(dentry);
dentry = NULL;
}
}
err = mutex_lock_interruptible(&u->readlock);
err = mutex_lock_interruptible(&u->bindlock);
if (err)
goto out_path;
goto out;
err = -EINVAL;
if (u->addr)
goto out_up;
if (name_err) {
err = name_err == -EEXIST ? -EADDRINUSE : name_err;
goto out_up;
}
err = -ENOMEM;
addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
if (!addr)
@ -1036,11 +1027,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
addr->hash = hash ^ sk->sk_type;
atomic_set(&addr->refcnt, 1);
if (dentry) {
struct path u_path;
if (sun_path[0]) {
struct path path;
umode_t mode = S_IFSOCK |
(SOCK_INODE(sock)->i_mode & ~current_umask());
err = unix_mknod(dentry, &path, mode, &u_path);
err = unix_mknod(sun_path, mode, &path);
if (err) {
if (err == -EEXIST)
err = -EADDRINUSE;
@ -1048,9 +1039,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out_up;
}
addr->hash = UNIX_HASH_SIZE;
hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
spin_lock(&unix_table_lock);
u->path = u_path;
u->path = path;
list = &unix_socket_table[hash];
} else {
spin_lock(&unix_table_lock);
@ -1072,11 +1063,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
out_unlock:
spin_unlock(&unix_table_lock);
out_up:
mutex_unlock(&u->readlock);
out_path:
if (dentry)
done_path_create(&path, dentry);
mutex_unlock(&u->bindlock);
out:
return err;
}
@ -1971,17 +1958,17 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
if (false) {
alloc_skb:
unix_state_unlock(other);
mutex_unlock(&unix_sk(other)->readlock);
mutex_unlock(&unix_sk(other)->iolock);
newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
&err, 0);
if (!newskb)
goto err;
}
/* we must acquire readlock as we modify already present
/* we must acquire iolock as we modify already present
* skbs in the sk_receive_queue and mess with skb->len
*/
err = mutex_lock_interruptible(&unix_sk(other)->readlock);
err = mutex_lock_interruptible(&unix_sk(other)->iolock);
if (err) {
err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
goto err;
@ -2048,7 +2035,7 @@ alloc_skb:
}
unix_state_unlock(other);
mutex_unlock(&unix_sk(other)->readlock);
mutex_unlock(&unix_sk(other)->iolock);
other->sk_data_ready(other);
scm_destroy(&scm);
@ -2057,7 +2044,7 @@ alloc_skb:
err_state_unlock:
unix_state_unlock(other);
err_unlock:
mutex_unlock(&unix_sk(other)->readlock);
mutex_unlock(&unix_sk(other)->iolock);
err:
kfree_skb(newskb);
if (send_sigpipe && !(flags & MSG_NOSIGNAL))
@ -2122,7 +2109,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
if (flags&MSG_OOB)
goto out;
err = mutex_lock_interruptible(&u->readlock);
err = mutex_lock_interruptible(&u->iolock);
if (unlikely(err)) {
/* recvmsg() in non blocking mode is supposed to return -EAGAIN
* sk_rcvtimeo is not honored by mutex_lock_interruptible()
@ -2198,7 +2185,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
out_free:
skb_free_datagram(sk, skb);
out_unlock:
mutex_unlock(&u->readlock);
mutex_unlock(&u->iolock);
out:
return err;
}
@ -2293,7 +2280,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
*/
mutex_lock(&u->readlock);
mutex_lock(&u->iolock);
if (flags & MSG_PEEK)
skip = sk_peek_offset(sk, flags);
@ -2334,7 +2321,7 @@ again:
break;
}
mutex_unlock(&u->readlock);
mutex_unlock(&u->iolock);
timeo = unix_stream_data_wait(sk, timeo, last,
last_len);
@ -2345,7 +2332,7 @@ again:
goto out;
}
mutex_lock(&u->readlock);
mutex_lock(&u->iolock);
continue;
unlock:
unix_state_unlock(sk);
@ -2448,7 +2435,7 @@ unlock:
}
} while (size);
mutex_unlock(&u->readlock);
mutex_unlock(&u->iolock);
if (state->msg)
scm_recv(sock, state->msg, &scm, flags);
else
@ -2489,9 +2476,9 @@ static ssize_t skb_unix_socket_splice(struct sock *sk,
int ret;
struct unix_sock *u = unix_sk(sk);
mutex_unlock(&u->readlock);
mutex_unlock(&u->iolock);
ret = splice_to_pipe(pipe, spd);
mutex_lock(&u->readlock);
mutex_lock(&u->iolock);
return ret;
}

View file

@ -6628,7 +6628,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
params.n_counter_offsets_presp = len / sizeof(u16);
if (rdev->wiphy.max_num_csa_counters &&
(params.n_counter_offsets_beacon >
(params.n_counter_offsets_presp >
rdev->wiphy.max_num_csa_counters))
return -EINVAL;