This is the 4.4.23 stable release
-----BEGIN PGP SIGNATURE----- iQIcBAABCAAGBQJX7iBiAAoJEDjbvchgkmk+aIQQAIAZ97gsrZInLRZaLJCMS6Me 4zZRry3pUDtrLkBglerFiKrJTG/mFzasJxyyHuvNU++C9Nu8GdIkslnZ6/g+BO4P xaX4PLeM4nCq33f8R5QX5dfM8qaCwWEdD01xK17Agrfcw8nljomPu3B1o8HnaFhb jZbmQ9I2yIpDivNorbHZAWZWV3fmk4brDbO/X60X6k4nn42ZSp5f2M2NlcirzR9/ to5ZVEY51nrShXCJcoaNEMMd/lxPrsv1j5rI+WYibDlOJ4RTEy/UK+yJFgZqAXL9 mou/A9D0p0uKAKH85s/5wpjvQ/7QsFRasW1HM1nEd8B3TqS2Xi9k+nYAD6S0HvE4 IPKwPTpV9J+7ZixWSE4lorpHKZhhla+DVP09ZEZwJQlrxs/sGPQw2EgdhY5Kid1J Bd7dyxIUieF5sDFJnwYnsCGdtJaSaKW/Kpscz5q70bI2h8SugZYdIBpJdHTTe5cX vvfy+JaChpdcTLTgWh4XvgtWsabS2W2hFH6uBIkhy9hjhiflotBEG6WFOo3vrEC/ lTqRx9AphBb9fW8hIIKNhI8gKEsAF7xzZ7/YHounGrBXCiJTbiogyysvNHkebHfd LbWtwMTSYrNNsu4ixiobofGu29PEDQW/i/emkUlF9jbKIL09bSGRaGet/qQOY6EJ WoHyxZAZCT+3xGuvhTcj =5gCn -----END PGP SIGNATURE----- Merge tag 'v4.4.23' into android-4.4.y This is the 4.4.23 stable release
This commit is contained in:
commit
09f6247a9c
83 changed files with 623 additions and 512 deletions
25
Makefile
25
Makefile
|
@ -1,6 +1,6 @@
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 4
|
PATCHLEVEL = 4
|
||||||
SUBLEVEL = 22
|
SUBLEVEL = 23
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Blurry Fish Butt
|
NAME = Blurry Fish Butt
|
||||||
|
|
||||||
|
@ -128,6 +128,10 @@ _all:
|
||||||
# Cancel implicit rules on top Makefile
|
# Cancel implicit rules on top Makefile
|
||||||
$(CURDIR)/Makefile Makefile: ;
|
$(CURDIR)/Makefile Makefile: ;
|
||||||
|
|
||||||
|
ifneq ($(words $(subst :, ,$(CURDIR))), 1)
|
||||||
|
$(error main directory cannot contain spaces nor colons)
|
||||||
|
endif
|
||||||
|
|
||||||
ifneq ($(KBUILD_OUTPUT),)
|
ifneq ($(KBUILD_OUTPUT),)
|
||||||
# Invoke a second make in the output directory, passing relevant variables
|
# Invoke a second make in the output directory, passing relevant variables
|
||||||
# check that the output directory actually exists
|
# check that the output directory actually exists
|
||||||
|
@ -495,6 +499,12 @@ ifeq ($(KBUILD_EXTMOD),)
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
# install and module_install need also be processed one by one
|
||||||
|
ifneq ($(filter install,$(MAKECMDGOALS)),)
|
||||||
|
ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
|
||||||
|
mixed-targets := 1
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
ifeq ($(mixed-targets),1)
|
ifeq ($(mixed-targets),1)
|
||||||
# ===========================================================================
|
# ===========================================================================
|
||||||
|
@ -606,12 +616,17 @@ ARCH_CFLAGS :=
|
||||||
include arch/$(SRCARCH)/Makefile
|
include arch/$(SRCARCH)/Makefile
|
||||||
|
|
||||||
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
|
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
|
||||||
|
KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
|
||||||
|
|
||||||
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
|
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
|
||||||
KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
|
KBUILD_CFLAGS += -Os
|
||||||
|
else
|
||||||
|
ifdef CONFIG_PROFILE_ALL_BRANCHES
|
||||||
|
KBUILD_CFLAGS += -O2
|
||||||
else
|
else
|
||||||
KBUILD_CFLAGS += -O2
|
KBUILD_CFLAGS += -O2
|
||||||
endif
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
# Tell gcc to never replace conditional load with a non-conditional one
|
# Tell gcc to never replace conditional load with a non-conditional one
|
||||||
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
|
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
|
||||||
|
@ -1260,7 +1275,7 @@ help:
|
||||||
@echo ' firmware_install- Install all firmware to INSTALL_FW_PATH'
|
@echo ' firmware_install- Install all firmware to INSTALL_FW_PATH'
|
||||||
@echo ' (default: $$(INSTALL_MOD_PATH)/lib/firmware)'
|
@echo ' (default: $$(INSTALL_MOD_PATH)/lib/firmware)'
|
||||||
@echo ' dir/ - Build all files in dir and below'
|
@echo ' dir/ - Build all files in dir and below'
|
||||||
@echo ' dir/file.[oisS] - Build specified target only'
|
@echo ' dir/file.[ois] - Build specified target only'
|
||||||
@echo ' dir/file.lst - Build specified mixed source/assembly target only'
|
@echo ' dir/file.lst - Build specified mixed source/assembly target only'
|
||||||
@echo ' (requires a recent binutils and recent build (System.map))'
|
@echo ' (requires a recent binutils and recent build (System.map))'
|
||||||
@echo ' dir/file.ko - Build module including final link'
|
@echo ' dir/file.ko - Build module including final link'
|
||||||
|
@ -1500,11 +1515,11 @@ image_name:
|
||||||
# Clear a bunch of variables before executing the submake
|
# Clear a bunch of variables before executing the submake
|
||||||
tools/: FORCE
|
tools/: FORCE
|
||||||
$(Q)mkdir -p $(objtree)/tools
|
$(Q)mkdir -p $(objtree)/tools
|
||||||
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(O) subdir=tools -C $(src)/tools/
|
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/
|
||||||
|
|
||||||
tools/%: FORCE
|
tools/%: FORCE
|
||||||
$(Q)mkdir -p $(objtree)/tools
|
$(Q)mkdir -p $(objtree)/tools
|
||||||
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(O) subdir=tools -C $(src)/tools/ $*
|
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(shell cd $(objtree) && /bin/pwd) subdir=tools -C $(src)/tools/ $*
|
||||||
|
|
||||||
# Single targets
|
# Single targets
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
|
|
|
@ -279,7 +279,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||||
err = blkcipher_walk_done(desc, &walk,
|
err = blkcipher_walk_done(desc, &walk,
|
||||||
walk.nbytes % AES_BLOCK_SIZE);
|
walk.nbytes % AES_BLOCK_SIZE);
|
||||||
}
|
}
|
||||||
if (nbytes) {
|
if (walk.nbytes % AES_BLOCK_SIZE) {
|
||||||
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
|
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
|
||||||
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
|
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
|
||||||
u8 __aligned(8) tail[AES_BLOCK_SIZE];
|
u8 __aligned(8) tail[AES_BLOCK_SIZE];
|
||||||
|
|
|
@ -83,7 +83,8 @@ static struct resource smc91x_resources[] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct smc91x_platdata smc91x_platdata = {
|
static struct smc91x_platdata smc91x_platdata = {
|
||||||
.flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
|
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
|
||||||
|
SMC91X_USE_DMA | SMC91X_NOWAIT,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct platform_device smc91x_device = {
|
static struct platform_device smc91x_device = {
|
||||||
|
|
|
@ -120,7 +120,8 @@ static struct resource smc91x_resources[] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct smc91x_platdata xcep_smc91x_info = {
|
static struct smc91x_platdata xcep_smc91x_info = {
|
||||||
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA,
|
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
|
||||||
|
SMC91X_NOWAIT | SMC91X_USE_DMA,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct platform_device smc91x_device = {
|
static struct platform_device smc91x_device = {
|
||||||
|
|
|
@ -95,7 +95,8 @@ static struct smsc911x_platform_config smsc911x_config = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct smc91x_platdata smc91x_platdata = {
|
static struct smc91x_platdata smc91x_platdata = {
|
||||||
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
|
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
|
||||||
|
SMC91X_NOWAIT,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct platform_device realview_eth_device = {
|
static struct platform_device realview_eth_device = {
|
||||||
|
|
|
@ -45,7 +45,7 @@ static struct resource smc91x_resources[] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct smc91x_platdata smc91x_platdata = {
|
static struct smc91x_platdata smc91x_platdata = {
|
||||||
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
|
.flags = SMC91X_USE_16BIT | SMC91X_USE_8BIT | SMC91X_NOWAIT,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct platform_device smc91x_device = {
|
static struct platform_device smc91x_device = {
|
||||||
|
|
|
@ -211,7 +211,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||||
err = blkcipher_walk_done(desc, &walk,
|
err = blkcipher_walk_done(desc, &walk,
|
||||||
walk.nbytes % AES_BLOCK_SIZE);
|
walk.nbytes % AES_BLOCK_SIZE);
|
||||||
}
|
}
|
||||||
if (nbytes) {
|
if (walk.nbytes % AES_BLOCK_SIZE) {
|
||||||
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
|
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
|
||||||
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
|
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
|
||||||
u8 __aligned(8) tail[AES_BLOCK_SIZE];
|
u8 __aligned(8) tail[AES_BLOCK_SIZE];
|
||||||
|
|
|
@ -146,7 +146,8 @@ static struct platform_device hitachi_fb_device = {
|
||||||
#include <linux/smc91x.h>
|
#include <linux/smc91x.h>
|
||||||
|
|
||||||
static struct smc91x_platdata smc91x_info = {
|
static struct smc91x_platdata smc91x_info = {
|
||||||
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
|
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
|
||||||
|
SMC91X_NOWAIT,
|
||||||
.leda = RPC_LED_100_10,
|
.leda = RPC_LED_100_10,
|
||||||
.ledb = RPC_LED_TX_RX,
|
.ledb = RPC_LED_TX_RX,
|
||||||
};
|
};
|
||||||
|
|
|
@ -134,7 +134,8 @@ static struct platform_device net2272_bfin_device = {
|
||||||
#include <linux/smc91x.h>
|
#include <linux/smc91x.h>
|
||||||
|
|
||||||
static struct smc91x_platdata smc91x_info = {
|
static struct smc91x_platdata smc91x_info = {
|
||||||
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
|
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
|
||||||
|
SMC91X_NOWAIT,
|
||||||
.leda = RPC_LED_100_10,
|
.leda = RPC_LED_100_10,
|
||||||
.ledb = RPC_LED_TX_RX,
|
.ledb = RPC_LED_TX_RX,
|
||||||
};
|
};
|
||||||
|
|
|
@ -113,42 +113,6 @@ config SPINLOCK_TEST
|
||||||
help
|
help
|
||||||
Add several files to the debugfs to test spinlock speed.
|
Add several files to the debugfs to test spinlock speed.
|
||||||
|
|
||||||
if CPU_MIPSR6
|
|
||||||
|
|
||||||
choice
|
|
||||||
prompt "Compact branch policy"
|
|
||||||
default MIPS_COMPACT_BRANCHES_OPTIMAL
|
|
||||||
|
|
||||||
config MIPS_COMPACT_BRANCHES_NEVER
|
|
||||||
bool "Never (force delay slot branches)"
|
|
||||||
help
|
|
||||||
Pass the -mcompact-branches=never flag to the compiler in order to
|
|
||||||
force it to always emit branches with delay slots, and make no use
|
|
||||||
of the compact branch instructions introduced by MIPSr6. This is
|
|
||||||
useful if you suspect there may be an issue with compact branches in
|
|
||||||
either the compiler or the CPU.
|
|
||||||
|
|
||||||
config MIPS_COMPACT_BRANCHES_OPTIMAL
|
|
||||||
bool "Optimal (use where beneficial)"
|
|
||||||
help
|
|
||||||
Pass the -mcompact-branches=optimal flag to the compiler in order for
|
|
||||||
it to make use of compact branch instructions where it deems them
|
|
||||||
beneficial, and use branches with delay slots elsewhere. This is the
|
|
||||||
default compiler behaviour, and should be used unless you have a
|
|
||||||
reason to choose otherwise.
|
|
||||||
|
|
||||||
config MIPS_COMPACT_BRANCHES_ALWAYS
|
|
||||||
bool "Always (force compact branches)"
|
|
||||||
help
|
|
||||||
Pass the -mcompact-branches=always flag to the compiler in order to
|
|
||||||
force it to always emit compact branches, making no use of branch
|
|
||||||
instructions with delay slots. This can result in more compact code
|
|
||||||
which may be beneficial in some scenarios.
|
|
||||||
|
|
||||||
endchoice
|
|
||||||
|
|
||||||
endif # CPU_MIPSR6
|
|
||||||
|
|
||||||
config SCACHE_DEBUGFS
|
config SCACHE_DEBUGFS
|
||||||
bool "L2 cache debugfs entries"
|
bool "L2 cache debugfs entries"
|
||||||
depends on DEBUG_FS
|
depends on DEBUG_FS
|
||||||
|
|
|
@ -204,10 +204,6 @@ toolchain-msa := $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$(
|
||||||
cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
|
cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
|
||||||
endif
|
endif
|
||||||
|
|
||||||
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER) += -mcompact-branches=never
|
|
||||||
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal
|
|
||||||
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS) += -mcompact-branches=always
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Firmware support
|
# Firmware support
|
||||||
#
|
#
|
||||||
|
|
|
@ -135,6 +135,7 @@
|
||||||
ldc1 $f28, THREAD_FPR28(\thread)
|
ldc1 $f28, THREAD_FPR28(\thread)
|
||||||
ldc1 $f30, THREAD_FPR30(\thread)
|
ldc1 $f30, THREAD_FPR30(\thread)
|
||||||
ctc1 \tmp, fcr31
|
ctc1 \tmp, fcr31
|
||||||
|
.set pop
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro fpu_restore_16odd thread
|
.macro fpu_restore_16odd thread
|
||||||
|
|
|
@ -11,11 +11,13 @@
|
||||||
#define CP0_EBASE $15, 1
|
#define CP0_EBASE $15, 1
|
||||||
|
|
||||||
.macro kernel_entry_setup
|
.macro kernel_entry_setup
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
mfc0 t0, CP0_EBASE
|
mfc0 t0, CP0_EBASE
|
||||||
andi t0, t0, 0x3ff # CPUNum
|
andi t0, t0, 0x3ff # CPUNum
|
||||||
beqz t0, 1f
|
beqz t0, 1f
|
||||||
# CPUs other than zero goto smp_bootstrap
|
# CPUs other than zero goto smp_bootstrap
|
||||||
j smp_bootstrap
|
j smp_bootstrap
|
||||||
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
1:
|
1:
|
||||||
.endm
|
.endm
|
||||||
|
|
|
@ -1164,7 +1164,9 @@ fpu_emul:
|
||||||
regs->regs[31] = r31;
|
regs->regs[31] = r31;
|
||||||
regs->cp0_epc = epc;
|
regs->cp0_epc = epc;
|
||||||
if (!used_math()) { /* First time FPU user. */
|
if (!used_math()) { /* First time FPU user. */
|
||||||
|
preempt_disable();
|
||||||
err = init_fpu();
|
err = init_fpu();
|
||||||
|
preempt_enable();
|
||||||
set_used_math();
|
set_used_math();
|
||||||
}
|
}
|
||||||
lose_fpu(1); /* Save FPU state for the emulator. */
|
lose_fpu(1); /* Save FPU state for the emulator. */
|
||||||
|
|
|
@ -593,14 +593,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
/* Avoid inadvertently triggering emulation */
|
/* Avoid inadvertently triggering emulation */
|
||||||
if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
|
if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
|
||||||
!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
|
!(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
|
if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
/* FR = 0 not supported in MIPS R6 */
|
/* FR = 0 not supported in MIPS R6 */
|
||||||
if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
|
if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
/* Proceed with the mode switch */
|
/* Proceed with the mode switch */
|
||||||
|
|
|
@ -174,6 +174,9 @@ asmlinkage void start_secondary(void)
|
||||||
cpumask_set_cpu(cpu, &cpu_coherent_mask);
|
cpumask_set_cpu(cpu, &cpu_coherent_mask);
|
||||||
notify_cpu_starting(cpu);
|
notify_cpu_starting(cpu);
|
||||||
|
|
||||||
|
cpumask_set_cpu(cpu, &cpu_callin_map);
|
||||||
|
synchronise_count_slave(cpu);
|
||||||
|
|
||||||
set_cpu_online(cpu, true);
|
set_cpu_online(cpu, true);
|
||||||
|
|
||||||
set_cpu_sibling_map(cpu);
|
set_cpu_sibling_map(cpu);
|
||||||
|
@ -181,10 +184,6 @@ asmlinkage void start_secondary(void)
|
||||||
|
|
||||||
calculate_cpu_foreign_map();
|
calculate_cpu_foreign_map();
|
||||||
|
|
||||||
cpumask_set_cpu(cpu, &cpu_callin_map);
|
|
||||||
|
|
||||||
synchronise_count_slave(cpu);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* irq will be enabled in ->smp_finish(), enabling it too early
|
* irq will be enabled in ->smp_finish(), enabling it too early
|
||||||
* is dangerous.
|
* is dangerous.
|
||||||
|
|
|
@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = {
|
||||||
static void __init init_vdso_image(struct mips_vdso_image *image)
|
static void __init init_vdso_image(struct mips_vdso_image *image)
|
||||||
{
|
{
|
||||||
unsigned long num_pages, i;
|
unsigned long num_pages, i;
|
||||||
|
unsigned long data_pfn;
|
||||||
|
|
||||||
BUG_ON(!PAGE_ALIGNED(image->data));
|
BUG_ON(!PAGE_ALIGNED(image->data));
|
||||||
BUG_ON(!PAGE_ALIGNED(image->size));
|
BUG_ON(!PAGE_ALIGNED(image->size));
|
||||||
|
|
||||||
num_pages = image->size / PAGE_SIZE;
|
num_pages = image->size / PAGE_SIZE;
|
||||||
|
|
||||||
for (i = 0; i < num_pages; i++) {
|
data_pfn = __phys_to_pfn(__pa_symbol(image->data));
|
||||||
image->mapping.pages[i] =
|
for (i = 0; i < num_pages; i++)
|
||||||
virt_to_page(image->data + (i * PAGE_SIZE));
|
image->mapping.pages[i] = pfn_to_page(data_pfn + i);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init init_vdso(void)
|
static int __init init_vdso(void)
|
||||||
|
|
|
@ -234,6 +234,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
|
||||||
return blkcipher_walk_done(desc, walk, -EINVAL);
|
return blkcipher_walk_done(desc, walk, -EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bsize = min(walk->walk_blocksize, n);
|
||||||
|
|
||||||
walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
|
walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
|
||||||
BLKCIPHER_WALK_DIFF);
|
BLKCIPHER_WALK_DIFF);
|
||||||
if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
|
if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
|
||||||
|
@ -246,7 +248,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bsize = min(walk->walk_blocksize, n);
|
|
||||||
n = scatterwalk_clamp(&walk->in, n);
|
n = scatterwalk_clamp(&walk->in, n);
|
||||||
n = scatterwalk_clamp(&walk->out, n);
|
n = scatterwalk_clamp(&walk->out, n);
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
/*
|
/*
|
||||||
* echainiv: Encrypted Chain IV Generator
|
* echainiv: Encrypted Chain IV Generator
|
||||||
*
|
*
|
||||||
* This generator generates an IV based on a sequence number by xoring it
|
* This generator generates an IV based on a sequence number by multiplying
|
||||||
* with a salt and then encrypting it with the same key as used to encrypt
|
* it with a salt and then encrypting it with the same key as used to encrypt
|
||||||
* the plain text. This algorithm requires that the block size be equal
|
* the plain text. This algorithm requires that the block size be equal
|
||||||
* to the IV size. It is mainly useful for CBC.
|
* to the IV size. It is mainly useful for CBC.
|
||||||
*
|
*
|
||||||
|
@ -23,81 +23,17 @@
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/percpu.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/spinlock.h>
|
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
|
|
||||||
#define MAX_IV_SIZE 16
|
|
||||||
|
|
||||||
static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
|
|
||||||
|
|
||||||
/* We don't care if we get preempted and read/write IVs from the next CPU. */
|
|
||||||
static void echainiv_read_iv(u8 *dst, unsigned size)
|
|
||||||
{
|
|
||||||
u32 *a = (u32 *)dst;
|
|
||||||
u32 __percpu *b = echainiv_iv;
|
|
||||||
|
|
||||||
for (; size >= 4; size -= 4) {
|
|
||||||
*a++ = this_cpu_read(*b);
|
|
||||||
b++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void echainiv_write_iv(const u8 *src, unsigned size)
|
|
||||||
{
|
|
||||||
const u32 *a = (const u32 *)src;
|
|
||||||
u32 __percpu *b = echainiv_iv;
|
|
||||||
|
|
||||||
for (; size >= 4; size -= 4) {
|
|
||||||
this_cpu_write(*b, *a);
|
|
||||||
a++;
|
|
||||||
b++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void echainiv_encrypt_complete2(struct aead_request *req, int err)
|
|
||||||
{
|
|
||||||
struct aead_request *subreq = aead_request_ctx(req);
|
|
||||||
struct crypto_aead *geniv;
|
|
||||||
unsigned int ivsize;
|
|
||||||
|
|
||||||
if (err == -EINPROGRESS)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (err)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
geniv = crypto_aead_reqtfm(req);
|
|
||||||
ivsize = crypto_aead_ivsize(geniv);
|
|
||||||
|
|
||||||
echainiv_write_iv(subreq->iv, ivsize);
|
|
||||||
|
|
||||||
if (req->iv != subreq->iv)
|
|
||||||
memcpy(req->iv, subreq->iv, ivsize);
|
|
||||||
|
|
||||||
out:
|
|
||||||
if (req->iv != subreq->iv)
|
|
||||||
kzfree(subreq->iv);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void echainiv_encrypt_complete(struct crypto_async_request *base,
|
|
||||||
int err)
|
|
||||||
{
|
|
||||||
struct aead_request *req = base->data;
|
|
||||||
|
|
||||||
echainiv_encrypt_complete2(req, err);
|
|
||||||
aead_request_complete(req, err);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int echainiv_encrypt(struct aead_request *req)
|
static int echainiv_encrypt(struct aead_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
|
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
|
||||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
|
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||||
struct aead_request *subreq = aead_request_ctx(req);
|
struct aead_request *subreq = aead_request_ctx(req);
|
||||||
crypto_completion_t compl;
|
__be64 nseqno;
|
||||||
void *data;
|
u64 seqno;
|
||||||
u8 *info;
|
u8 *info;
|
||||||
unsigned int ivsize = crypto_aead_ivsize(geniv);
|
unsigned int ivsize = crypto_aead_ivsize(geniv);
|
||||||
int err;
|
int err;
|
||||||
|
@ -107,8 +43,6 @@ static int echainiv_encrypt(struct aead_request *req)
|
||||||
|
|
||||||
aead_request_set_tfm(subreq, ctx->child);
|
aead_request_set_tfm(subreq, ctx->child);
|
||||||
|
|
||||||
compl = echainiv_encrypt_complete;
|
|
||||||
data = req;
|
|
||||||
info = req->iv;
|
info = req->iv;
|
||||||
|
|
||||||
if (req->src != req->dst) {
|
if (req->src != req->dst) {
|
||||||
|
@ -123,29 +57,30 @@ static int echainiv_encrypt(struct aead_request *req)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(!IS_ALIGNED((unsigned long)info,
|
aead_request_set_callback(subreq, req->base.flags,
|
||||||
crypto_aead_alignmask(geniv) + 1))) {
|
req->base.complete, req->base.data);
|
||||||
info = kmalloc(ivsize, req->base.flags &
|
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
|
|
||||||
GFP_ATOMIC);
|
|
||||||
if (!info)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
memcpy(info, req->iv, ivsize);
|
|
||||||
}
|
|
||||||
|
|
||||||
aead_request_set_callback(subreq, req->base.flags, compl, data);
|
|
||||||
aead_request_set_crypt(subreq, req->dst, req->dst,
|
aead_request_set_crypt(subreq, req->dst, req->dst,
|
||||||
req->cryptlen, info);
|
req->cryptlen, info);
|
||||||
aead_request_set_ad(subreq, req->assoclen);
|
aead_request_set_ad(subreq, req->assoclen);
|
||||||
|
|
||||||
crypto_xor(info, ctx->salt, ivsize);
|
memcpy(&nseqno, info + ivsize - 8, 8);
|
||||||
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
|
seqno = be64_to_cpu(nseqno);
|
||||||
echainiv_read_iv(info, ivsize);
|
memset(info, 0, ivsize);
|
||||||
|
|
||||||
err = crypto_aead_encrypt(subreq);
|
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
|
||||||
echainiv_encrypt_complete2(req, err);
|
|
||||||
return err;
|
do {
|
||||||
|
u64 a;
|
||||||
|
|
||||||
|
memcpy(&a, ctx->salt + ivsize - 8, 8);
|
||||||
|
|
||||||
|
a |= 1;
|
||||||
|
a *= seqno;
|
||||||
|
|
||||||
|
memcpy(info + ivsize - 8, &a, 8);
|
||||||
|
} while ((ivsize -= 8));
|
||||||
|
|
||||||
|
return crypto_aead_encrypt(subreq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int echainiv_decrypt(struct aead_request *req)
|
static int echainiv_decrypt(struct aead_request *req)
|
||||||
|
@ -192,8 +127,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
|
||||||
alg = crypto_spawn_aead_alg(spawn);
|
alg = crypto_spawn_aead_alg(spawn);
|
||||||
|
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
if (inst->alg.ivsize & (sizeof(u32) - 1) ||
|
if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
|
||||||
inst->alg.ivsize > MAX_IV_SIZE)
|
|
||||||
goto free_inst;
|
goto free_inst;
|
||||||
|
|
||||||
inst->alg.encrypt = echainiv_encrypt;
|
inst->alg.encrypt = echainiv_encrypt;
|
||||||
|
@ -202,7 +136,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
|
||||||
inst->alg.init = aead_init_geniv;
|
inst->alg.init = aead_init_geniv;
|
||||||
inst->alg.exit = aead_exit_geniv;
|
inst->alg.exit = aead_exit_geniv;
|
||||||
|
|
||||||
inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
|
|
||||||
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
|
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
|
||||||
inst->alg.base.cra_ctxsize += inst->alg.ivsize;
|
inst->alg.base.cra_ctxsize += inst->alg.ivsize;
|
||||||
|
|
||||||
|
|
|
@ -59,9 +59,11 @@ static void
|
||||||
nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
|
nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
|
||||||
{
|
{
|
||||||
struct nvkm_device *device = pm->engine.subdev.device;
|
struct nvkm_device *device = pm->engine.subdev.device;
|
||||||
if (pm->sequence != pm->sequence) {
|
struct nv40_pm *nv40pm = container_of(pm, struct nv40_pm, base);
|
||||||
|
|
||||||
|
if (nv40pm->sequence != pm->sequence) {
|
||||||
nvkm_wr32(device, 0x400084, 0x00000020);
|
nvkm_wr32(device, 0x400084, 0x00000020);
|
||||||
pm->sequence = pm->sequence;
|
nv40pm->sequence = pm->sequence;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -136,6 +136,8 @@ static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
|
||||||
* correctly globaly, since that would require
|
* correctly globaly, since that would require
|
||||||
* tracking all of our palettes. */
|
* tracking all of our palettes. */
|
||||||
ret = qxl_bo_kmap(palette_bo, (void **)&pal);
|
ret = qxl_bo_kmap(palette_bo, (void **)&pal);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
pal->num_ents = 2;
|
pal->num_ents = 2;
|
||||||
pal->unique = unique++;
|
pal->unique = unique++;
|
||||||
if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
|
if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
|
||||||
|
|
|
@ -773,13 +773,6 @@ static int pch_i2c_probe(struct pci_dev *pdev,
|
||||||
/* Set the number of I2C channel instance */
|
/* Set the number of I2C channel instance */
|
||||||
adap_info->ch_num = id->driver_data;
|
adap_info->ch_num = id->driver_data;
|
||||||
|
|
||||||
ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
|
|
||||||
KBUILD_MODNAME, adap_info);
|
|
||||||
if (ret) {
|
|
||||||
pch_pci_err(pdev, "request_irq FAILED\n");
|
|
||||||
goto err_request_irq;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < adap_info->ch_num; i++) {
|
for (i = 0; i < adap_info->ch_num; i++) {
|
||||||
pch_adap = &adap_info->pch_data[i].pch_adapter;
|
pch_adap = &adap_info->pch_data[i].pch_adapter;
|
||||||
adap_info->pch_i2c_suspended = false;
|
adap_info->pch_i2c_suspended = false;
|
||||||
|
@ -796,6 +789,17 @@ static int pch_i2c_probe(struct pci_dev *pdev,
|
||||||
adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i;
|
adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i;
|
||||||
|
|
||||||
pch_adap->dev.parent = &pdev->dev;
|
pch_adap->dev.parent = &pdev->dev;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
|
||||||
|
KBUILD_MODNAME, adap_info);
|
||||||
|
if (ret) {
|
||||||
|
pch_pci_err(pdev, "request_irq FAILED\n");
|
||||||
|
goto err_request_irq;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < adap_info->ch_num; i++) {
|
||||||
|
pch_adap = &adap_info->pch_data[i].pch_adapter;
|
||||||
|
|
||||||
pch_i2c_init(&adap_info->pch_data[i]);
|
pch_i2c_init(&adap_info->pch_data[i]);
|
||||||
|
|
||||||
|
|
|
@ -727,7 +727,8 @@ static int qup_i2c_pm_resume_runtime(struct device *device)
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
static int qup_i2c_suspend(struct device *device)
|
static int qup_i2c_suspend(struct device *device)
|
||||||
{
|
{
|
||||||
qup_i2c_pm_suspend_runtime(device);
|
if (!pm_runtime_suspended(device))
|
||||||
|
return qup_i2c_pm_suspend_runtime(device);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -433,15 +433,14 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
|
||||||
scale_db = true;
|
scale_db = true;
|
||||||
case IIO_VAL_INT_PLUS_MICRO:
|
case IIO_VAL_INT_PLUS_MICRO:
|
||||||
if (vals[1] < 0)
|
if (vals[1] < 0)
|
||||||
return sprintf(buf, "-%ld.%06u%s\n", abs(vals[0]),
|
return sprintf(buf, "-%d.%06u%s\n", abs(vals[0]),
|
||||||
-vals[1],
|
-vals[1], scale_db ? " dB" : "");
|
||||||
scale_db ? " dB" : "");
|
|
||||||
else
|
else
|
||||||
return sprintf(buf, "%d.%06u%s\n", vals[0], vals[1],
|
return sprintf(buf, "%d.%06u%s\n", vals[0], vals[1],
|
||||||
scale_db ? " dB" : "");
|
scale_db ? " dB" : "");
|
||||||
case IIO_VAL_INT_PLUS_NANO:
|
case IIO_VAL_INT_PLUS_NANO:
|
||||||
if (vals[1] < 0)
|
if (vals[1] < 0)
|
||||||
return sprintf(buf, "-%ld.%09u\n", abs(vals[0]),
|
return sprintf(buf, "-%d.%09u\n", abs(vals[0]),
|
||||||
-vals[1]);
|
-vals[1]);
|
||||||
else
|
else
|
||||||
return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
|
return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
|
||||||
|
|
|
@ -1858,10 +1858,11 @@ static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
|
||||||
/*
|
/*
|
||||||
* All PCI devices managed by this unit should have been destroyed.
|
* All PCI devices managed by this unit should have been destroyed.
|
||||||
*/
|
*/
|
||||||
if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt)
|
if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
|
||||||
for_each_active_dev_scope(dmaru->devices,
|
for_each_active_dev_scope(dmaru->devices,
|
||||||
dmaru->devices_cnt, i, dev)
|
dmaru->devices_cnt, i, dev)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
ret = dmar_ir_hotplug(dmaru, false);
|
ret = dmar_ir_hotplug(dmaru, false);
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
|
|
|
@ -4182,10 +4182,11 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
|
||||||
if (!atsru)
|
if (!atsru)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
|
if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
|
||||||
for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
|
for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
|
||||||
i, dev)
|
i, dev)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1706,7 +1706,7 @@ static int vpfe_get_app_input_index(struct vpfe_device *vpfe,
|
||||||
sdinfo = &cfg->sub_devs[i];
|
sdinfo = &cfg->sub_devs[i];
|
||||||
client = v4l2_get_subdevdata(sdinfo->sd);
|
client = v4l2_get_subdevdata(sdinfo->sd);
|
||||||
if (client->addr == curr_client->addr &&
|
if (client->addr == curr_client->addr &&
|
||||||
client->adapter->nr == client->adapter->nr) {
|
client->adapter->nr == curr_client->adapter->nr) {
|
||||||
if (vpfe->current_input >= 1)
|
if (vpfe->current_input >= 1)
|
||||||
return -1;
|
return -1;
|
||||||
*app_input_index = j + vpfe->current_input;
|
*app_input_index = j + vpfe->current_input;
|
||||||
|
|
|
@ -75,15 +75,15 @@ static int __init init_msp_flash(void)
|
||||||
|
|
||||||
printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);
|
printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);
|
||||||
|
|
||||||
msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL);
|
msp_flash = kcalloc(fcnt, sizeof(*msp_flash), GFP_KERNEL);
|
||||||
if (!msp_flash)
|
if (!msp_flash)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL);
|
msp_parts = kcalloc(fcnt, sizeof(*msp_parts), GFP_KERNEL);
|
||||||
if (!msp_parts)
|
if (!msp_parts)
|
||||||
goto free_msp_flash;
|
goto free_msp_flash;
|
||||||
|
|
||||||
msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL);
|
msp_maps = kcalloc(fcnt, sizeof(*msp_maps), GFP_KERNEL);
|
||||||
if (!msp_maps)
|
if (!msp_maps)
|
||||||
goto free_msp_parts;
|
goto free_msp_parts;
|
||||||
|
|
||||||
|
|
|
@ -230,8 +230,10 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
|
||||||
|
|
||||||
info->mtd = mtd_concat_create(cdev, info->num_subdev,
|
info->mtd = mtd_concat_create(cdev, info->num_subdev,
|
||||||
plat->name);
|
plat->name);
|
||||||
if (info->mtd == NULL)
|
if (info->mtd == NULL) {
|
||||||
ret = -ENXIO;
|
ret = -ENXIO;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
info->mtd->dev.parent = &pdev->dev;
|
info->mtd->dev.parent = &pdev->dev;
|
||||||
|
|
||||||
|
|
|
@ -1317,9 +1317,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
||||||
slave_dev->name);
|
slave_dev->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* already enslaved */
|
/* already in-use? */
|
||||||
if (slave_dev->flags & IFF_SLAVE) {
|
if (netdev_is_rx_handler_busy(slave_dev)) {
|
||||||
netdev_dbg(bond_dev, "Error: Device was already enslaved\n");
|
netdev_err(bond_dev,
|
||||||
|
"Error: Device is in use and cannot be enslaved\n");
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1268,11 +1268,10 @@ static int __maybe_unused flexcan_suspend(struct device *device)
|
||||||
struct flexcan_priv *priv = netdev_priv(dev);
|
struct flexcan_priv *priv = netdev_priv(dev);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (netif_running(dev)) {
|
||||||
err = flexcan_chip_disable(priv);
|
err = flexcan_chip_disable(priv);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
if (netif_running(dev)) {
|
|
||||||
netif_stop_queue(dev);
|
netif_stop_queue(dev);
|
||||||
netif_device_detach(dev);
|
netif_device_detach(dev);
|
||||||
}
|
}
|
||||||
|
@ -1285,13 +1284,17 @@ static int __maybe_unused flexcan_resume(struct device *device)
|
||||||
{
|
{
|
||||||
struct net_device *dev = dev_get_drvdata(device);
|
struct net_device *dev = dev_get_drvdata(device);
|
||||||
struct flexcan_priv *priv = netdev_priv(dev);
|
struct flexcan_priv *priv = netdev_priv(dev);
|
||||||
|
int err;
|
||||||
|
|
||||||
priv->can.state = CAN_STATE_ERROR_ACTIVE;
|
priv->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||||
if (netif_running(dev)) {
|
if (netif_running(dev)) {
|
||||||
netif_device_attach(dev);
|
netif_device_attach(dev);
|
||||||
netif_start_queue(dev);
|
netif_start_queue(dev);
|
||||||
|
err = flexcan_chip_enable(priv);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
return flexcan_chip_enable(priv);
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
|
static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
|
||||||
|
|
|
@ -187,8 +187,8 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
|
||||||
static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \
|
static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \
|
||||||
u32 mask) \
|
u32 mask) \
|
||||||
{ \
|
{ \
|
||||||
intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
|
|
||||||
priv->irq##which##_mask &= ~(mask); \
|
priv->irq##which##_mask &= ~(mask); \
|
||||||
|
intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
|
||||||
} \
|
} \
|
||||||
static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
|
static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
|
||||||
u32 mask) \
|
u32 mask) \
|
||||||
|
|
|
@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
|
||||||
return cmd->cmd_buf + (idx << cmd->log_stride);
|
return cmd->cmd_buf + (idx << cmd->log_stride);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u8 xor8_buf(void *buf, int len)
|
static u8 xor8_buf(void *buf, size_t offset, int len)
|
||||||
{
|
{
|
||||||
u8 *ptr = buf;
|
u8 *ptr = buf;
|
||||||
u8 sum = 0;
|
u8 sum = 0;
|
||||||
int i;
|
int i;
|
||||||
|
int end = len + offset;
|
||||||
|
|
||||||
for (i = 0; i < len; i++)
|
for (i = offset; i < end; i++)
|
||||||
sum ^= ptr[i];
|
sum ^= ptr[i];
|
||||||
|
|
||||||
return sum;
|
return sum;
|
||||||
|
@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len)
|
||||||
|
|
||||||
static int verify_block_sig(struct mlx5_cmd_prot_block *block)
|
static int verify_block_sig(struct mlx5_cmd_prot_block *block)
|
||||||
{
|
{
|
||||||
if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
|
size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
|
||||||
|
int xor_len = sizeof(*block) - sizeof(block->data) - 1;
|
||||||
|
|
||||||
|
if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (xor8_buf(block, sizeof(*block)) != 0xff)
|
if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
|
static void calc_block_sig(struct mlx5_cmd_prot_block *block)
|
||||||
int csum)
|
|
||||||
{
|
{
|
||||||
block->token = token;
|
int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
|
||||||
if (csum) {
|
size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
|
||||||
block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
|
|
||||||
sizeof(block->data) - 2);
|
block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
|
||||||
block->sig = ~xor8_buf(block, sizeof(*block) - 1);
|
block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
|
static void calc_chain_sig(struct mlx5_cmd_msg *msg)
|
||||||
{
|
{
|
||||||
struct mlx5_cmd_mailbox *next = msg->next;
|
struct mlx5_cmd_mailbox *next = msg->next;
|
||||||
|
int size = msg->len;
|
||||||
|
int blen = size - min_t(int, sizeof(msg->first.data), size);
|
||||||
|
int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
|
||||||
|
/ MLX5_CMD_DATA_BLOCK_SIZE;
|
||||||
|
int i = 0;
|
||||||
|
|
||||||
while (next) {
|
for (i = 0; i < n && next; i++) {
|
||||||
calc_block_sig(next->buf, token, csum);
|
calc_block_sig(next->buf);
|
||||||
next = next->next;
|
next = next->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
|
static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
|
||||||
{
|
{
|
||||||
ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
|
ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
|
||||||
calc_chain_sig(ent->in, ent->token, csum);
|
if (csum) {
|
||||||
calc_chain_sig(ent->out, ent->token, csum);
|
calc_chain_sig(ent->in);
|
||||||
|
calc_chain_sig(ent->out);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void poll_timeout(struct mlx5_cmd_work_ent *ent)
|
static void poll_timeout(struct mlx5_cmd_work_ent *ent)
|
||||||
|
@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
|
||||||
struct mlx5_cmd_mailbox *next = ent->out->next;
|
struct mlx5_cmd_mailbox *next = ent->out->next;
|
||||||
int err;
|
int err;
|
||||||
u8 sig;
|
u8 sig;
|
||||||
|
int size = ent->out->len;
|
||||||
|
int blen = size - min_t(int, sizeof(ent->out->first.data), size);
|
||||||
|
int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
|
||||||
|
/ MLX5_CMD_DATA_BLOCK_SIZE;
|
||||||
|
int i = 0;
|
||||||
|
|
||||||
sig = xor8_buf(ent->lay, sizeof(*ent->lay));
|
sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
|
||||||
if (sig != 0xff)
|
if (sig != 0xff)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
while (next) {
|
for (i = 0; i < n && next; i++) {
|
||||||
err = verify_block_sig(next->buf);
|
err = verify_block_sig(next->buf);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
@ -641,7 +655,6 @@ static void cmd_work_handler(struct work_struct *work)
|
||||||
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
|
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
ent->token = alloc_token(cmd);
|
|
||||||
cmd->ent_arr[ent->idx] = ent;
|
cmd->ent_arr[ent->idx] = ent;
|
||||||
lay = get_inst(cmd, ent->idx);
|
lay = get_inst(cmd, ent->idx);
|
||||||
ent->lay = lay;
|
ent->lay = lay;
|
||||||
|
@ -755,7 +768,8 @@ static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
|
||||||
static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
||||||
struct mlx5_cmd_msg *out, void *uout, int uout_size,
|
struct mlx5_cmd_msg *out, void *uout, int uout_size,
|
||||||
mlx5_cmd_cbk_t callback,
|
mlx5_cmd_cbk_t callback,
|
||||||
void *context, int page_queue, u8 *status)
|
void *context, int page_queue, u8 *status,
|
||||||
|
u8 token)
|
||||||
{
|
{
|
||||||
struct mlx5_cmd *cmd = &dev->cmd;
|
struct mlx5_cmd *cmd = &dev->cmd;
|
||||||
struct mlx5_cmd_work_ent *ent;
|
struct mlx5_cmd_work_ent *ent;
|
||||||
|
@ -772,6 +786,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
||||||
if (IS_ERR(ent))
|
if (IS_ERR(ent))
|
||||||
return PTR_ERR(ent);
|
return PTR_ERR(ent);
|
||||||
|
|
||||||
|
ent->token = token;
|
||||||
|
|
||||||
if (!callback)
|
if (!callback)
|
||||||
init_completion(&ent->done);
|
init_completion(&ent->done);
|
||||||
|
|
||||||
|
@ -844,7 +860,8 @@ static const struct file_operations fops = {
|
||||||
.write = dbg_write,
|
.write = dbg_write,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
|
static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
|
||||||
|
u8 token)
|
||||||
{
|
{
|
||||||
struct mlx5_cmd_prot_block *block;
|
struct mlx5_cmd_prot_block *block;
|
||||||
struct mlx5_cmd_mailbox *next;
|
struct mlx5_cmd_mailbox *next;
|
||||||
|
@ -870,6 +887,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
|
||||||
memcpy(block->data, from, copy);
|
memcpy(block->data, from, copy);
|
||||||
from += copy;
|
from += copy;
|
||||||
size -= copy;
|
size -= copy;
|
||||||
|
block->token = token;
|
||||||
next = next->next;
|
next = next->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -939,7 +957,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
|
static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
|
||||||
gfp_t flags, int size)
|
gfp_t flags, int size,
|
||||||
|
u8 token)
|
||||||
{
|
{
|
||||||
struct mlx5_cmd_mailbox *tmp, *head = NULL;
|
struct mlx5_cmd_mailbox *tmp, *head = NULL;
|
||||||
struct mlx5_cmd_prot_block *block;
|
struct mlx5_cmd_prot_block *block;
|
||||||
|
@ -968,6 +987,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
|
||||||
tmp->next = head;
|
tmp->next = head;
|
||||||
block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
|
block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
|
||||||
block->block_num = cpu_to_be32(n - i - 1);
|
block->block_num = cpu_to_be32(n - i - 1);
|
||||||
|
block->token = token;
|
||||||
head = tmp;
|
head = tmp;
|
||||||
}
|
}
|
||||||
msg->next = head;
|
msg->next = head;
|
||||||
|
@ -1351,7 +1371,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_ERR(msg))
|
if (IS_ERR(msg))
|
||||||
msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
|
msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
|
||||||
|
|
||||||
return msg;
|
return msg;
|
||||||
}
|
}
|
||||||
|
@ -1376,6 +1396,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||||
int err;
|
int err;
|
||||||
u8 status = 0;
|
u8 status = 0;
|
||||||
u32 drv_synd;
|
u32 drv_synd;
|
||||||
|
u8 token;
|
||||||
|
|
||||||
if (pci_channel_offline(dev->pdev) ||
|
if (pci_channel_offline(dev->pdev) ||
|
||||||
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
|
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
|
||||||
|
@ -1394,20 +1415,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = mlx5_copy_to_msg(inb, in, in_size);
|
token = alloc_token(&dev->cmd);
|
||||||
|
|
||||||
|
err = mlx5_copy_to_msg(inb, in, in_size, token);
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_core_warn(dev, "err %d\n", err);
|
mlx5_core_warn(dev, "err %d\n", err);
|
||||||
goto out_in;
|
goto out_in;
|
||||||
}
|
}
|
||||||
|
|
||||||
outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
|
outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
|
||||||
if (IS_ERR(outb)) {
|
if (IS_ERR(outb)) {
|
||||||
err = PTR_ERR(outb);
|
err = PTR_ERR(outb);
|
||||||
goto out_in;
|
goto out_in;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
|
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
|
||||||
pages_queue, &status);
|
pages_queue, &status, token);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_out;
|
goto out_out;
|
||||||
|
|
||||||
|
@ -1475,7 +1498,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
|
||||||
INIT_LIST_HEAD(&cmd->cache.med.head);
|
INIT_LIST_HEAD(&cmd->cache.med.head);
|
||||||
|
|
||||||
for (i = 0; i < NUM_LONG_LISTS; i++) {
|
for (i = 0; i < NUM_LONG_LISTS; i++) {
|
||||||
msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
|
msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
|
||||||
if (IS_ERR(msg)) {
|
if (IS_ERR(msg)) {
|
||||||
err = PTR_ERR(msg);
|
err = PTR_ERR(msg);
|
||||||
goto ex_err;
|
goto ex_err;
|
||||||
|
@ -1485,7 +1508,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < NUM_MED_LISTS; i++) {
|
for (i = 0; i < NUM_MED_LISTS; i++) {
|
||||||
msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
|
msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
|
||||||
if (IS_ERR(msg)) {
|
if (IS_ERR(msg)) {
|
||||||
err = PTR_ERR(msg);
|
err = PTR_ERR(msg);
|
||||||
goto ex_err;
|
goto ex_err;
|
||||||
|
|
|
@ -2269,6 +2269,13 @@ static int smc_drv_probe(struct platform_device *pdev)
|
||||||
if (pd) {
|
if (pd) {
|
||||||
memcpy(&lp->cfg, pd, sizeof(lp->cfg));
|
memcpy(&lp->cfg, pd, sizeof(lp->cfg));
|
||||||
lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
|
lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
|
||||||
|
|
||||||
|
if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) {
|
||||||
|
dev_err(&pdev->dev,
|
||||||
|
"at least one of 8-bit or 16-bit access support is required.\n");
|
||||||
|
ret = -ENXIO;
|
||||||
|
goto out_free_netdev;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if IS_BUILTIN(CONFIG_OF)
|
#if IS_BUILTIN(CONFIG_OF)
|
||||||
|
|
|
@ -36,6 +36,27 @@
|
||||||
#include <linux/dmaengine.h>
|
#include <linux/dmaengine.h>
|
||||||
#include <linux/smc91x.h>
|
#include <linux/smc91x.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Any 16-bit access is performed with two 8-bit accesses if the hardware
|
||||||
|
* can't do it directly. Most registers are 16-bit so those are mandatory.
|
||||||
|
*/
|
||||||
|
#define SMC_outw_b(x, a, r) \
|
||||||
|
do { \
|
||||||
|
unsigned int __val16 = (x); \
|
||||||
|
unsigned int __reg = (r); \
|
||||||
|
SMC_outb(__val16, a, __reg); \
|
||||||
|
SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define SMC_inw_b(a, r) \
|
||||||
|
({ \
|
||||||
|
unsigned int __val16; \
|
||||||
|
unsigned int __reg = r; \
|
||||||
|
__val16 = SMC_inb(a, __reg); \
|
||||||
|
__val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
|
||||||
|
__val16; \
|
||||||
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Define your architecture specific bus configuration parameters here.
|
* Define your architecture specific bus configuration parameters here.
|
||||||
*/
|
*/
|
||||||
|
@ -55,10 +76,30 @@
|
||||||
#define SMC_IO_SHIFT (lp->io_shift)
|
#define SMC_IO_SHIFT (lp->io_shift)
|
||||||
|
|
||||||
#define SMC_inb(a, r) readb((a) + (r))
|
#define SMC_inb(a, r) readb((a) + (r))
|
||||||
#define SMC_inw(a, r) readw((a) + (r))
|
#define SMC_inw(a, r) \
|
||||||
|
({ \
|
||||||
|
unsigned int __smc_r = r; \
|
||||||
|
SMC_16BIT(lp) ? readw((a) + __smc_r) : \
|
||||||
|
SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \
|
||||||
|
({ BUG(); 0; }); \
|
||||||
|
})
|
||||||
|
|
||||||
#define SMC_inl(a, r) readl((a) + (r))
|
#define SMC_inl(a, r) readl((a) + (r))
|
||||||
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
|
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
|
||||||
|
#define SMC_outw(v, a, r) \
|
||||||
|
do { \
|
||||||
|
unsigned int __v = v, __smc_r = r; \
|
||||||
|
if (SMC_16BIT(lp)) \
|
||||||
|
__SMC_outw(__v, a, __smc_r); \
|
||||||
|
else if (SMC_8BIT(lp)) \
|
||||||
|
SMC_outw_b(__v, a, __smc_r); \
|
||||||
|
else \
|
||||||
|
BUG(); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#define SMC_outl(v, a, r) writel(v, (a) + (r))
|
#define SMC_outl(v, a, r) writel(v, (a) + (r))
|
||||||
|
#define SMC_insb(a, r, p, l) readsb((a) + (r), p, l)
|
||||||
|
#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l)
|
||||||
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
|
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
|
||||||
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
|
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
|
||||||
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
|
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
|
||||||
|
@ -66,7 +107,7 @@
|
||||||
#define SMC_IRQ_FLAGS (-1) /* from resource */
|
#define SMC_IRQ_FLAGS (-1) /* from resource */
|
||||||
|
|
||||||
/* We actually can't write halfwords properly if not word aligned */
|
/* We actually can't write halfwords properly if not word aligned */
|
||||||
static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
|
static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
|
||||||
{
|
{
|
||||||
if ((machine_is_mainstone() || machine_is_stargate2() ||
|
if ((machine_is_mainstone() || machine_is_stargate2() ||
|
||||||
machine_is_pxa_idp()) && reg & 2) {
|
machine_is_pxa_idp()) && reg & 2) {
|
||||||
|
@ -405,24 +446,8 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
|
||||||
|
|
||||||
#if ! SMC_CAN_USE_16BIT
|
#if ! SMC_CAN_USE_16BIT
|
||||||
|
|
||||||
/*
|
#define SMC_outw(x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg)
|
||||||
* Any 16-bit access is performed with two 8-bit accesses if the hardware
|
#define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg)
|
||||||
* can't do it directly. Most registers are 16-bit so those are mandatory.
|
|
||||||
*/
|
|
||||||
#define SMC_outw(x, ioaddr, reg) \
|
|
||||||
do { \
|
|
||||||
unsigned int __val16 = (x); \
|
|
||||||
SMC_outb( __val16, ioaddr, reg ); \
|
|
||||||
SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
|
|
||||||
} while (0)
|
|
||||||
#define SMC_inw(ioaddr, reg) \
|
|
||||||
({ \
|
|
||||||
unsigned int __val16; \
|
|
||||||
__val16 = SMC_inb( ioaddr, reg ); \
|
|
||||||
__val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
|
|
||||||
__val16; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define SMC_insw(a, r, p, l) BUG()
|
#define SMC_insw(a, r, p, l) BUG()
|
||||||
#define SMC_outsw(a, r, p, l) BUG()
|
#define SMC_outsw(a, r, p, l) BUG()
|
||||||
|
|
||||||
|
|
|
@ -640,7 +640,9 @@ phy_err:
|
||||||
int phy_start_interrupts(struct phy_device *phydev)
|
int phy_start_interrupts(struct phy_device *phydev)
|
||||||
{
|
{
|
||||||
atomic_set(&phydev->irq_disable, 0);
|
atomic_set(&phydev->irq_disable, 0);
|
||||||
if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
|
if (request_irq(phydev->irq, phy_interrupt,
|
||||||
|
IRQF_SHARED,
|
||||||
|
"phy_interrupt",
|
||||||
phydev) < 0) {
|
phydev) < 0) {
|
||||||
pr_warn("%s: Can't get IRQ %d (PHY)\n",
|
pr_warn("%s: Can't get IRQ %d (PHY)\n",
|
||||||
phydev->bus->name, phydev->irq);
|
phydev->bus->name, phydev->irq);
|
||||||
|
|
|
@ -1019,12 +1019,13 @@ il3945_hw_txq_ctx_free(struct il_priv *il)
|
||||||
int txq_id;
|
int txq_id;
|
||||||
|
|
||||||
/* Tx queues */
|
/* Tx queues */
|
||||||
if (il->txq)
|
if (il->txq) {
|
||||||
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
|
for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
|
||||||
if (txq_id == IL39_CMD_QUEUE_NUM)
|
if (txq_id == IL39_CMD_QUEUE_NUM)
|
||||||
il_cmd_queue_free(il);
|
il_cmd_queue_free(il);
|
||||||
else
|
else
|
||||||
il_tx_queue_free(il, txq_id);
|
il_tx_queue_free(il, txq_id);
|
||||||
|
}
|
||||||
|
|
||||||
/* free tx queue structure */
|
/* free tx queue structure */
|
||||||
il_free_txq_mem(il);
|
il_free_txq_mem(il);
|
||||||
|
|
|
@ -901,7 +901,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
|
||||||
/* bound gain by 2 bits value max, 3rd bit is sign */
|
/* bound gain by 2 bits value max, 3rd bit is sign */
|
||||||
data->delta_gain_code[i] =
|
data->delta_gain_code[i] =
|
||||||
min(abs(delta_g),
|
min(abs(delta_g),
|
||||||
(long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
|
(s32) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
|
||||||
|
|
||||||
if (delta_g < 0)
|
if (delta_g < 0)
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -457,13 +457,16 @@ static inline void max17042_write_model_data(struct max17042_chip *chip,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void max17042_read_model_data(struct max17042_chip *chip,
|
static inline void max17042_read_model_data(struct max17042_chip *chip,
|
||||||
u8 addr, u32 *data, int size)
|
u8 addr, u16 *data, int size)
|
||||||
{
|
{
|
||||||
struct regmap *map = chip->regmap;
|
struct regmap *map = chip->regmap;
|
||||||
int i;
|
int i;
|
||||||
|
u32 tmp;
|
||||||
|
|
||||||
for (i = 0; i < size; i++)
|
for (i = 0; i < size; i++) {
|
||||||
regmap_read(map, addr + i, &data[i]);
|
regmap_read(map, addr + i, &tmp);
|
||||||
|
data[i] = (u16)tmp;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int max17042_model_data_compare(struct max17042_chip *chip,
|
static inline int max17042_model_data_compare(struct max17042_chip *chip,
|
||||||
|
@ -486,7 +489,7 @@ static int max17042_init_model(struct max17042_chip *chip)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
|
int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
|
||||||
u32 *temp_data;
|
u16 *temp_data;
|
||||||
|
|
||||||
temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
|
temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
|
||||||
if (!temp_data)
|
if (!temp_data)
|
||||||
|
@ -501,7 +504,7 @@ static int max17042_init_model(struct max17042_chip *chip)
|
||||||
ret = max17042_model_data_compare(
|
ret = max17042_model_data_compare(
|
||||||
chip,
|
chip,
|
||||||
chip->pdata->config_data->cell_char_tbl,
|
chip->pdata->config_data->cell_char_tbl,
|
||||||
(u16 *)temp_data,
|
temp_data,
|
||||||
table_size);
|
table_size);
|
||||||
|
|
||||||
max10742_lock_model(chip);
|
max10742_lock_model(chip);
|
||||||
|
@ -514,7 +517,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
|
int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
|
||||||
u32 *temp_data;
|
u16 *temp_data;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
|
temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
|
||||||
|
|
|
@ -53,13 +53,16 @@ static int hisi_reboot_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) {
|
if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) {
|
||||||
pr_err("failed to find reboot-offset property\n");
|
pr_err("failed to find reboot-offset property\n");
|
||||||
|
iounmap(base);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = register_restart_handler(&hisi_restart_nb);
|
err = register_restart_handler(&hisi_restart_nb);
|
||||||
if (err)
|
if (err) {
|
||||||
dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n",
|
dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n",
|
||||||
err);
|
err);
|
||||||
|
iounmap(base);
|
||||||
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -205,6 +205,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
|
||||||
if (!charger)
|
if (!charger)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
platform_set_drvdata(pdev, charger);
|
||||||
charger->tps = tps;
|
charger->tps = tps;
|
||||||
charger->dev = &pdev->dev;
|
charger->dev = &pdev->dev;
|
||||||
|
|
||||||
|
|
|
@ -889,7 +889,7 @@ EXPORT_SYMBOL_GPL(devm_pwm_put);
|
||||||
*/
|
*/
|
||||||
bool pwm_can_sleep(struct pwm_device *pwm)
|
bool pwm_can_sleep(struct pwm_device *pwm)
|
||||||
{
|
{
|
||||||
return pwm->chip->can_sleep;
|
return true;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pwm_can_sleep);
|
EXPORT_SYMBOL_GPL(pwm_can_sleep);
|
||||||
|
|
||||||
|
|
|
@ -5941,7 +5941,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
|
||||||
if (fusion->ld_drv_map[i])
|
if (fusion->ld_drv_map[i])
|
||||||
free_pages((ulong)fusion->ld_drv_map[i],
|
free_pages((ulong)fusion->ld_drv_map[i],
|
||||||
fusion->drv_map_pages);
|
fusion->drv_map_pages);
|
||||||
if (fusion->pd_seq_sync)
|
if (fusion->pd_seq_sync[i])
|
||||||
dma_free_coherent(&instance->pdev->dev,
|
dma_free_coherent(&instance->pdev->dev,
|
||||||
pd_seq_map_sz,
|
pd_seq_map_sz,
|
||||||
fusion->pd_seq_sync[i],
|
fusion->pd_seq_sync[i],
|
||||||
|
|
|
@ -79,9 +79,13 @@ struct autofs_info {
|
||||||
};
|
};
|
||||||
|
|
||||||
#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */
|
#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */
|
||||||
#define AUTOFS_INF_NO_RCU (1<<1) /* the dentry is being considered
|
#define AUTOFS_INF_WANT_EXPIRE (1<<1) /* the dentry is being considered
|
||||||
* for expiry, so RCU_walk is
|
* for expiry, so RCU_walk is
|
||||||
* not permitted
|
* not permitted. If it progresses to
|
||||||
|
* actual expiry attempt, the flag is
|
||||||
|
* not cleared when EXPIRING is set -
|
||||||
|
* in that case it gets cleared only
|
||||||
|
* when it comes to clearing EXPIRING.
|
||||||
*/
|
*/
|
||||||
#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
|
#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
|
||||||
|
|
||||||
|
|
|
@ -315,19 +315,17 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
|
||||||
if (ino->flags & AUTOFS_INF_PENDING)
|
if (ino->flags & AUTOFS_INF_PENDING)
|
||||||
goto out;
|
goto out;
|
||||||
if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
|
if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
|
||||||
ino->flags |= AUTOFS_INF_NO_RCU;
|
ino->flags |= AUTOFS_INF_WANT_EXPIRE;
|
||||||
spin_unlock(&sbi->fs_lock);
|
spin_unlock(&sbi->fs_lock);
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
spin_lock(&sbi->fs_lock);
|
spin_lock(&sbi->fs_lock);
|
||||||
if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
|
if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
|
||||||
ino->flags |= AUTOFS_INF_EXPIRING;
|
ino->flags |= AUTOFS_INF_EXPIRING;
|
||||||
smp_mb();
|
|
||||||
ino->flags &= ~AUTOFS_INF_NO_RCU;
|
|
||||||
init_completion(&ino->expire_complete);
|
init_completion(&ino->expire_complete);
|
||||||
spin_unlock(&sbi->fs_lock);
|
spin_unlock(&sbi->fs_lock);
|
||||||
return root;
|
return root;
|
||||||
}
|
}
|
||||||
ino->flags &= ~AUTOFS_INF_NO_RCU;
|
ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
spin_unlock(&sbi->fs_lock);
|
spin_unlock(&sbi->fs_lock);
|
||||||
|
@ -417,6 +415,7 @@ static struct dentry *should_expire(struct dentry *dentry,
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find an eligible tree to time-out
|
* Find an eligible tree to time-out
|
||||||
* A tree is eligible if :-
|
* A tree is eligible if :-
|
||||||
|
@ -432,6 +431,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
|
||||||
struct dentry *root = sb->s_root;
|
struct dentry *root = sb->s_root;
|
||||||
struct dentry *dentry;
|
struct dentry *dentry;
|
||||||
struct dentry *expired;
|
struct dentry *expired;
|
||||||
|
struct dentry *found;
|
||||||
struct autofs_info *ino;
|
struct autofs_info *ino;
|
||||||
|
|
||||||
if (!root)
|
if (!root)
|
||||||
|
@ -442,48 +442,54 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
|
||||||
|
|
||||||
dentry = NULL;
|
dentry = NULL;
|
||||||
while ((dentry = get_next_positive_subdir(dentry, root))) {
|
while ((dentry = get_next_positive_subdir(dentry, root))) {
|
||||||
|
int flags = how;
|
||||||
|
|
||||||
spin_lock(&sbi->fs_lock);
|
spin_lock(&sbi->fs_lock);
|
||||||
ino = autofs4_dentry_ino(dentry);
|
ino = autofs4_dentry_ino(dentry);
|
||||||
if (ino->flags & AUTOFS_INF_NO_RCU)
|
if (ino->flags & AUTOFS_INF_WANT_EXPIRE) {
|
||||||
expired = NULL;
|
|
||||||
else
|
|
||||||
expired = should_expire(dentry, mnt, timeout, how);
|
|
||||||
if (!expired) {
|
|
||||||
spin_unlock(&sbi->fs_lock);
|
spin_unlock(&sbi->fs_lock);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
spin_unlock(&sbi->fs_lock);
|
||||||
|
|
||||||
|
expired = should_expire(dentry, mnt, timeout, flags);
|
||||||
|
if (!expired)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
spin_lock(&sbi->fs_lock);
|
||||||
ino = autofs4_dentry_ino(expired);
|
ino = autofs4_dentry_ino(expired);
|
||||||
ino->flags |= AUTOFS_INF_NO_RCU;
|
ino->flags |= AUTOFS_INF_WANT_EXPIRE;
|
||||||
spin_unlock(&sbi->fs_lock);
|
spin_unlock(&sbi->fs_lock);
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
spin_lock(&sbi->fs_lock);
|
|
||||||
if (should_expire(expired, mnt, timeout, how)) {
|
/* Make sure a reference is not taken on found if
|
||||||
|
* things have changed.
|
||||||
|
*/
|
||||||
|
flags &= ~AUTOFS_EXP_LEAVES;
|
||||||
|
found = should_expire(expired, mnt, timeout, how);
|
||||||
|
if (!found || found != expired)
|
||||||
|
/* Something has changed, continue */
|
||||||
|
goto next;
|
||||||
|
|
||||||
if (expired != dentry)
|
if (expired != dentry)
|
||||||
dput(dentry);
|
dput(dentry);
|
||||||
goto found;
|
|
||||||
}
|
|
||||||
|
|
||||||
ino->flags &= ~AUTOFS_INF_NO_RCU;
|
spin_lock(&sbi->fs_lock);
|
||||||
|
goto found;
|
||||||
|
next:
|
||||||
|
spin_lock(&sbi->fs_lock);
|
||||||
|
ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
|
||||||
|
spin_unlock(&sbi->fs_lock);
|
||||||
if (expired != dentry)
|
if (expired != dentry)
|
||||||
dput(expired);
|
dput(expired);
|
||||||
spin_unlock(&sbi->fs_lock);
|
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
found:
|
found:
|
||||||
DPRINTK("returning %p %pd", expired, expired);
|
DPRINTK("returning %p %pd", expired, expired);
|
||||||
ino->flags |= AUTOFS_INF_EXPIRING;
|
ino->flags |= AUTOFS_INF_EXPIRING;
|
||||||
smp_mb();
|
|
||||||
ino->flags &= ~AUTOFS_INF_NO_RCU;
|
|
||||||
init_completion(&ino->expire_complete);
|
init_completion(&ino->expire_complete);
|
||||||
spin_unlock(&sbi->fs_lock);
|
spin_unlock(&sbi->fs_lock);
|
||||||
spin_lock(&sbi->lookup_lock);
|
|
||||||
spin_lock(&expired->d_parent->d_lock);
|
|
||||||
spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
|
|
||||||
list_move(&expired->d_parent->d_subdirs, &expired->d_child);
|
|
||||||
spin_unlock(&expired->d_lock);
|
|
||||||
spin_unlock(&expired->d_parent->d_lock);
|
|
||||||
spin_unlock(&sbi->lookup_lock);
|
|
||||||
return expired;
|
return expired;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -492,15 +498,27 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
|
||||||
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
|
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
|
||||||
struct autofs_info *ino = autofs4_dentry_ino(dentry);
|
struct autofs_info *ino = autofs4_dentry_ino(dentry);
|
||||||
int status;
|
int status;
|
||||||
|
int state;
|
||||||
|
|
||||||
/* Block on any pending expire */
|
/* Block on any pending expire */
|
||||||
if (!(ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU)))
|
if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
|
||||||
return 0;
|
return 0;
|
||||||
if (rcu_walk)
|
if (rcu_walk)
|
||||||
return -ECHILD;
|
return -ECHILD;
|
||||||
|
|
||||||
|
retry:
|
||||||
spin_lock(&sbi->fs_lock);
|
spin_lock(&sbi->fs_lock);
|
||||||
if (ino->flags & AUTOFS_INF_EXPIRING) {
|
state = ino->flags & (AUTOFS_INF_WANT_EXPIRE | AUTOFS_INF_EXPIRING);
|
||||||
|
if (state == AUTOFS_INF_WANT_EXPIRE) {
|
||||||
|
spin_unlock(&sbi->fs_lock);
|
||||||
|
/*
|
||||||
|
* Possibly being selected for expire, wait until
|
||||||
|
* it's selected or not.
|
||||||
|
*/
|
||||||
|
schedule_timeout_uninterruptible(HZ/10);
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
if (state & AUTOFS_INF_EXPIRING) {
|
||||||
spin_unlock(&sbi->fs_lock);
|
spin_unlock(&sbi->fs_lock);
|
||||||
|
|
||||||
DPRINTK("waiting for expire %p name=%pd", dentry, dentry);
|
DPRINTK("waiting for expire %p name=%pd", dentry, dentry);
|
||||||
|
@ -551,7 +569,7 @@ int autofs4_expire_run(struct super_block *sb,
|
||||||
ino = autofs4_dentry_ino(dentry);
|
ino = autofs4_dentry_ino(dentry);
|
||||||
/* avoid rapid-fire expire attempts if expiry fails */
|
/* avoid rapid-fire expire attempts if expiry fails */
|
||||||
ino->last_used = now;
|
ino->last_used = now;
|
||||||
ino->flags &= ~AUTOFS_INF_EXPIRING;
|
ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
|
||||||
complete_all(&ino->expire_complete);
|
complete_all(&ino->expire_complete);
|
||||||
spin_unlock(&sbi->fs_lock);
|
spin_unlock(&sbi->fs_lock);
|
||||||
|
|
||||||
|
@ -579,7 +597,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
|
||||||
spin_lock(&sbi->fs_lock);
|
spin_lock(&sbi->fs_lock);
|
||||||
/* avoid rapid-fire expire attempts if expiry fails */
|
/* avoid rapid-fire expire attempts if expiry fails */
|
||||||
ino->last_used = now;
|
ino->last_used = now;
|
||||||
ino->flags &= ~AUTOFS_INF_EXPIRING;
|
ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
|
||||||
complete_all(&ino->expire_complete);
|
complete_all(&ino->expire_complete);
|
||||||
spin_unlock(&sbi->fs_lock);
|
spin_unlock(&sbi->fs_lock);
|
||||||
dput(dentry);
|
dput(dentry);
|
||||||
|
|
|
@ -455,7 +455,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
|
||||||
* a mount-trap.
|
* a mount-trap.
|
||||||
*/
|
*/
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
if (ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU))
|
if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
|
||||||
return 0;
|
return 0;
|
||||||
if (d_mountpoint(dentry))
|
if (d_mountpoint(dentry))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1619,6 +1619,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
|
||||||
int namelen;
|
int namelen;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
if (!S_ISDIR(file_inode(file)->i_mode))
|
||||||
|
return -ENOTDIR;
|
||||||
|
|
||||||
ret = mnt_want_write_file(file);
|
ret = mnt_want_write_file(file);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1676,6 +1679,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
|
||||||
struct btrfs_ioctl_vol_args *vol_args;
|
struct btrfs_ioctl_vol_args *vol_args;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (!S_ISDIR(file_inode(file)->i_mode))
|
||||||
|
return -ENOTDIR;
|
||||||
|
|
||||||
vol_args = memdup_user(arg, sizeof(*vol_args));
|
vol_args = memdup_user(arg, sizeof(*vol_args));
|
||||||
if (IS_ERR(vol_args))
|
if (IS_ERR(vol_args))
|
||||||
return PTR_ERR(vol_args);
|
return PTR_ERR(vol_args);
|
||||||
|
@ -1699,6 +1705,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
|
||||||
bool readonly = false;
|
bool readonly = false;
|
||||||
struct btrfs_qgroup_inherit *inherit = NULL;
|
struct btrfs_qgroup_inherit *inherit = NULL;
|
||||||
|
|
||||||
|
if (!S_ISDIR(file_inode(file)->i_mode))
|
||||||
|
return -ENOTDIR;
|
||||||
|
|
||||||
vol_args = memdup_user(arg, sizeof(*vol_args));
|
vol_args = memdup_user(arg, sizeof(*vol_args));
|
||||||
if (IS_ERR(vol_args))
|
if (IS_ERR(vol_args))
|
||||||
return PTR_ERR(vol_args);
|
return PTR_ERR(vol_args);
|
||||||
|
@ -2345,6 +2354,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
|
||||||
int ret;
|
int ret;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
|
if (!S_ISDIR(dir->i_mode))
|
||||||
|
return -ENOTDIR;
|
||||||
|
|
||||||
vol_args = memdup_user(arg, sizeof(*vol_args));
|
vol_args = memdup_user(arg, sizeof(*vol_args));
|
||||||
if (IS_ERR(vol_args))
|
if (IS_ERR(vol_args))
|
||||||
return PTR_ERR(vol_args);
|
return PTR_ERR(vol_args);
|
||||||
|
|
|
@ -959,9 +959,10 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
|
||||||
|
|
||||||
if (S_ISLNK(root_inode->i_mode)) {
|
if (S_ISLNK(root_inode->i_mode)) {
|
||||||
char *name = follow_link(host_root_path);
|
char *name = follow_link(host_root_path);
|
||||||
if (IS_ERR(name))
|
if (IS_ERR(name)) {
|
||||||
err = PTR_ERR(name);
|
err = PTR_ERR(name);
|
||||||
else
|
goto out_put;
|
||||||
|
}
|
||||||
err = read_name(root_inode, name);
|
err = read_name(root_inode, name);
|
||||||
kfree(name);
|
kfree(name);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -67,18 +67,7 @@ static int fanotify_get_response(struct fsnotify_group *group,
|
||||||
|
|
||||||
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
|
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
|
||||||
|
|
||||||
wait_event(group->fanotify_data.access_waitq, event->response ||
|
wait_event(group->fanotify_data.access_waitq, event->response);
|
||||||
atomic_read(&group->fanotify_data.bypass_perm));
|
|
||||||
|
|
||||||
if (!event->response) { /* bypass_perm set */
|
|
||||||
/*
|
|
||||||
* Event was canceled because group is being destroyed. Remove
|
|
||||||
* it from group's event list because we are responsible for
|
|
||||||
* freeing the permission event.
|
|
||||||
*/
|
|
||||||
fsnotify_remove_event(group, &event->fae.fse);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* userspace responded, convert to something usable */
|
/* userspace responded, convert to something usable */
|
||||||
switch (event->response) {
|
switch (event->response) {
|
||||||
|
|
|
@ -358,16 +358,20 @@ static int fanotify_release(struct inode *ignored, struct file *file)
|
||||||
|
|
||||||
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
|
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
|
||||||
struct fanotify_perm_event_info *event, *next;
|
struct fanotify_perm_event_info *event, *next;
|
||||||
|
struct fsnotify_event *fsn_event;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There may be still new events arriving in the notification queue
|
* Stop new events from arriving in the notification queue. since
|
||||||
* but since userspace cannot use fanotify fd anymore, no event can
|
* userspace cannot use fanotify fd anymore, no event can enter or
|
||||||
* enter or leave access_list by now.
|
* leave access_list by now either.
|
||||||
|
*/
|
||||||
|
fsnotify_group_stop_queueing(group);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Process all permission events on access_list and notification queue
|
||||||
|
* and simulate reply from userspace.
|
||||||
*/
|
*/
|
||||||
spin_lock(&group->fanotify_data.access_lock);
|
spin_lock(&group->fanotify_data.access_lock);
|
||||||
|
|
||||||
atomic_inc(&group->fanotify_data.bypass_perm);
|
|
||||||
|
|
||||||
list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
|
list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
|
||||||
fae.fse.list) {
|
fae.fse.list) {
|
||||||
pr_debug("%s: found group=%p event=%p\n", __func__, group,
|
pr_debug("%s: found group=%p event=%p\n", __func__, group,
|
||||||
|
@ -379,12 +383,21 @@ static int fanotify_release(struct inode *ignored, struct file *file)
|
||||||
spin_unlock(&group->fanotify_data.access_lock);
|
spin_unlock(&group->fanotify_data.access_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Since bypass_perm is set, newly queued events will not wait for
|
* Destroy all non-permission events. For permission events just
|
||||||
* access response. Wake up the already sleeping ones now.
|
* dequeue them and set the response. They will be freed once the
|
||||||
* synchronize_srcu() in fsnotify_destroy_group() will wait for all
|
* response is consumed and fanotify_get_response() returns.
|
||||||
* processes sleeping in fanotify_handle_event() waiting for access
|
|
||||||
* response and thus also for all permission events to be freed.
|
|
||||||
*/
|
*/
|
||||||
|
mutex_lock(&group->notification_mutex);
|
||||||
|
while (!fsnotify_notify_queue_is_empty(group)) {
|
||||||
|
fsn_event = fsnotify_remove_first_event(group);
|
||||||
|
if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS))
|
||||||
|
fsnotify_destroy_event(group, fsn_event);
|
||||||
|
else
|
||||||
|
FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
|
||||||
|
}
|
||||||
|
mutex_unlock(&group->notification_mutex);
|
||||||
|
|
||||||
|
/* Response for all permission events it set, wakeup waiters */
|
||||||
wake_up(&group->fanotify_data.access_waitq);
|
wake_up(&group->fanotify_data.access_waitq);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -755,7 +768,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
|
||||||
spin_lock_init(&group->fanotify_data.access_lock);
|
spin_lock_init(&group->fanotify_data.access_lock);
|
||||||
init_waitqueue_head(&group->fanotify_data.access_waitq);
|
init_waitqueue_head(&group->fanotify_data.access_waitq);
|
||||||
INIT_LIST_HEAD(&group->fanotify_data.access_list);
|
INIT_LIST_HEAD(&group->fanotify_data.access_list);
|
||||||
atomic_set(&group->fanotify_data.bypass_perm, 0);
|
|
||||||
#endif
|
#endif
|
||||||
switch (flags & FAN_ALL_CLASS_BITS) {
|
switch (flags & FAN_ALL_CLASS_BITS) {
|
||||||
case FAN_CLASS_NOTIF:
|
case FAN_CLASS_NOTIF:
|
||||||
|
|
|
@ -39,6 +39,17 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
|
||||||
kfree(group);
|
kfree(group);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Stop queueing new events for this group. Once this function returns
|
||||||
|
* fsnotify_add_event() will not add any new events to the group's queue.
|
||||||
|
*/
|
||||||
|
void fsnotify_group_stop_queueing(struct fsnotify_group *group)
|
||||||
|
{
|
||||||
|
mutex_lock(&group->notification_mutex);
|
||||||
|
group->shutdown = true;
|
||||||
|
mutex_unlock(&group->notification_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Trying to get rid of a group. Remove all marks, flush all events and release
|
* Trying to get rid of a group. Remove all marks, flush all events and release
|
||||||
* the group reference.
|
* the group reference.
|
||||||
|
@ -47,6 +58,14 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
|
||||||
*/
|
*/
|
||||||
void fsnotify_destroy_group(struct fsnotify_group *group)
|
void fsnotify_destroy_group(struct fsnotify_group *group)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* Stop queueing new events. The code below is careful enough to not
|
||||||
|
* require this but fanotify needs to stop queuing events even before
|
||||||
|
* fsnotify_destroy_group() is called and this makes the other callers
|
||||||
|
* of fsnotify_destroy_group() to see the same behavior.
|
||||||
|
*/
|
||||||
|
fsnotify_group_stop_queueing(group);
|
||||||
|
|
||||||
/* clear all inode marks for this group */
|
/* clear all inode marks for this group */
|
||||||
fsnotify_clear_marks_by_group(group);
|
fsnotify_clear_marks_by_group(group);
|
||||||
|
|
||||||
|
|
|
@ -82,7 +82,8 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
|
||||||
* Add an event to the group notification queue. The group can later pull this
|
* Add an event to the group notification queue. The group can later pull this
|
||||||
* event off the queue to deal with. The function returns 0 if the event was
|
* event off the queue to deal with. The function returns 0 if the event was
|
||||||
* added to the queue, 1 if the event was merged with some other queued event,
|
* added to the queue, 1 if the event was merged with some other queued event,
|
||||||
* 2 if the queue of events has overflown.
|
* 2 if the event was not queued - either the queue of events has overflown
|
||||||
|
* or the group is shutting down.
|
||||||
*/
|
*/
|
||||||
int fsnotify_add_event(struct fsnotify_group *group,
|
int fsnotify_add_event(struct fsnotify_group *group,
|
||||||
struct fsnotify_event *event,
|
struct fsnotify_event *event,
|
||||||
|
@ -96,6 +97,11 @@ int fsnotify_add_event(struct fsnotify_group *group,
|
||||||
|
|
||||||
mutex_lock(&group->notification_mutex);
|
mutex_lock(&group->notification_mutex);
|
||||||
|
|
||||||
|
if (group->shutdown) {
|
||||||
|
mutex_unlock(&group->notification_mutex);
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
|
||||||
if (group->q_len >= group->max_events) {
|
if (group->q_len >= group->max_events) {
|
||||||
ret = 2;
|
ret = 2;
|
||||||
/* Queue overflow event only if it isn't already queued */
|
/* Queue overflow event only if it isn't already queued */
|
||||||
|
@ -125,21 +131,6 @@ queue:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Remove @event from group's notification queue. It is the responsibility of
|
|
||||||
* the caller to destroy the event.
|
|
||||||
*/
|
|
||||||
void fsnotify_remove_event(struct fsnotify_group *group,
|
|
||||||
struct fsnotify_event *event)
|
|
||||||
{
|
|
||||||
mutex_lock(&group->notification_mutex);
|
|
||||||
if (!list_empty(&event->list)) {
|
|
||||||
list_del_init(&event->list);
|
|
||||||
group->q_len--;
|
|
||||||
}
|
|
||||||
mutex_unlock(&group->notification_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Remove and return the first event from the notification list. It is the
|
* Remove and return the first event from the notification list. It is the
|
||||||
* responsibility of the caller to destroy the obtained event
|
* responsibility of the caller to destroy the obtained event
|
||||||
|
|
|
@ -262,7 +262,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
|
||||||
struct dlm_lock *lock, int flags, int type)
|
struct dlm_lock *lock, int flags, int type)
|
||||||
{
|
{
|
||||||
enum dlm_status status;
|
enum dlm_status status;
|
||||||
u8 old_owner = res->owner;
|
|
||||||
|
|
||||||
mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
|
mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
|
||||||
lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
|
lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
|
||||||
|
@ -329,7 +328,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
|
||||||
|
|
||||||
spin_lock(&res->spinlock);
|
spin_lock(&res->spinlock);
|
||||||
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
|
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
|
||||||
lock->convert_pending = 0;
|
|
||||||
/* if it failed, move it back to granted queue.
|
/* if it failed, move it back to granted queue.
|
||||||
* if master returns DLM_NORMAL and then down before sending ast,
|
* if master returns DLM_NORMAL and then down before sending ast,
|
||||||
* it may have already been moved to granted queue, reset to
|
* it may have already been moved to granted queue, reset to
|
||||||
|
@ -338,12 +336,14 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
|
||||||
if (status != DLM_NOTQUEUED)
|
if (status != DLM_NOTQUEUED)
|
||||||
dlm_error(status);
|
dlm_error(status);
|
||||||
dlm_revert_pending_convert(res, lock);
|
dlm_revert_pending_convert(res, lock);
|
||||||
} else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
|
} else if (!lock->convert_pending) {
|
||||||
(old_owner != res->owner)) {
|
mlog(0, "%s: res %.*s, owner died and lock has been moved back "
|
||||||
mlog(0, "res %.*s is in recovering or has been recovered.\n",
|
"to granted list, retry convert.\n",
|
||||||
res->lockname.len, res->lockname.name);
|
dlm->name, res->lockname.len, res->lockname.name);
|
||||||
status = DLM_RECOVERING;
|
status = DLM_RECOVERING;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lock->convert_pending = 0;
|
||||||
bail:
|
bail:
|
||||||
spin_unlock(&res->spinlock);
|
spin_unlock(&res->spinlock);
|
||||||
|
|
||||||
|
|
|
@ -1536,7 +1536,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
|
||||||
u64 start, u64 len)
|
u64 start, u64 len)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
u64 tmpend, end = start + len;
|
u64 tmpend = 0;
|
||||||
|
u64 end = start + len;
|
||||||
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
||||||
unsigned int csize = osb->s_clustersize;
|
unsigned int csize = osb->s_clustersize;
|
||||||
handle_t *handle;
|
handle_t *handle;
|
||||||
|
@ -1568,18 +1569,31 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We want to get the byte offset of the end of the 1st cluster.
|
* If start is on a cluster boundary and end is somewhere in another
|
||||||
|
* cluster, we have not COWed the cluster starting at start, unless
|
||||||
|
* end is also within the same cluster. So, in this case, we skip this
|
||||||
|
* first call to ocfs2_zero_range_for_truncate() truncate and move on
|
||||||
|
* to the next one.
|
||||||
*/
|
*/
|
||||||
tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
|
if ((start & (csize - 1)) != 0) {
|
||||||
|
/*
|
||||||
|
* We want to get the byte offset of the end of the 1st
|
||||||
|
* cluster.
|
||||||
|
*/
|
||||||
|
tmpend = (u64)osb->s_clustersize +
|
||||||
|
(start & ~(osb->s_clustersize - 1));
|
||||||
if (tmpend > end)
|
if (tmpend > end)
|
||||||
tmpend = end;
|
tmpend = end;
|
||||||
|
|
||||||
trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
|
trace_ocfs2_zero_partial_clusters_range1(
|
||||||
|
(unsigned long long)start,
|
||||||
(unsigned long long)tmpend);
|
(unsigned long long)tmpend);
|
||||||
|
|
||||||
ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
|
ret = ocfs2_zero_range_for_truncate(inode, handle, start,
|
||||||
|
tmpend);
|
||||||
if (ret)
|
if (ret)
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
|
}
|
||||||
|
|
||||||
if (tmpend < end) {
|
if (tmpend < end) {
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1153,8 +1153,9 @@ int balance_internal(struct tree_balance *tb,
|
||||||
insert_ptr);
|
insert_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
|
|
||||||
insert_ptr[0] = new_insert_ptr;
|
insert_ptr[0] = new_insert_ptr;
|
||||||
|
if (new_insert_ptr)
|
||||||
|
memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
|
||||||
|
|
||||||
return order;
|
return order;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1535,7 +1535,7 @@ xfs_wait_buftarg(
|
||||||
* ensure here that all reference counts have been dropped before we
|
* ensure here that all reference counts have been dropped before we
|
||||||
* start walking the LRU list.
|
* start walking the LRU list.
|
||||||
*/
|
*/
|
||||||
drain_workqueue(btp->bt_mount->m_buf_workqueue);
|
flush_workqueue(btp->bt_mount->m_buf_workqueue);
|
||||||
|
|
||||||
/* loop until there is nothing left on the lru list. */
|
/* loop until there is nothing left on the lru list. */
|
||||||
while (list_lru_count(&btp->bt_lru)) {
|
while (list_lru_count(&btp->bt_lru)) {
|
||||||
|
|
|
@ -148,6 +148,7 @@ struct fsnotify_group {
|
||||||
#define FS_PRIO_1 1 /* fanotify content based access control */
|
#define FS_PRIO_1 1 /* fanotify content based access control */
|
||||||
#define FS_PRIO_2 2 /* fanotify pre-content access */
|
#define FS_PRIO_2 2 /* fanotify pre-content access */
|
||||||
unsigned int priority;
|
unsigned int priority;
|
||||||
|
bool shutdown; /* group is being shut down, don't queue more events */
|
||||||
|
|
||||||
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
|
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
|
||||||
struct mutex mark_mutex; /* protect marks_list */
|
struct mutex mark_mutex; /* protect marks_list */
|
||||||
|
@ -179,7 +180,6 @@ struct fsnotify_group {
|
||||||
spinlock_t access_lock;
|
spinlock_t access_lock;
|
||||||
struct list_head access_list;
|
struct list_head access_list;
|
||||||
wait_queue_head_t access_waitq;
|
wait_queue_head_t access_waitq;
|
||||||
atomic_t bypass_perm;
|
|
||||||
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
|
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
|
||||||
int f_flags;
|
int f_flags;
|
||||||
unsigned int max_marks;
|
unsigned int max_marks;
|
||||||
|
@ -308,6 +308,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op
|
||||||
extern void fsnotify_get_group(struct fsnotify_group *group);
|
extern void fsnotify_get_group(struct fsnotify_group *group);
|
||||||
/* drop reference on a group from fsnotify_alloc_group */
|
/* drop reference on a group from fsnotify_alloc_group */
|
||||||
extern void fsnotify_put_group(struct fsnotify_group *group);
|
extern void fsnotify_put_group(struct fsnotify_group *group);
|
||||||
|
/* group destruction begins, stop queuing new events */
|
||||||
|
extern void fsnotify_group_stop_queueing(struct fsnotify_group *group);
|
||||||
/* destroy group */
|
/* destroy group */
|
||||||
extern void fsnotify_destroy_group(struct fsnotify_group *group);
|
extern void fsnotify_destroy_group(struct fsnotify_group *group);
|
||||||
/* fasync handler function */
|
/* fasync handler function */
|
||||||
|
@ -320,8 +322,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
|
||||||
struct fsnotify_event *event,
|
struct fsnotify_event *event,
|
||||||
int (*merge)(struct list_head *,
|
int (*merge)(struct list_head *,
|
||||||
struct fsnotify_event *));
|
struct fsnotify_event *));
|
||||||
/* Remove passed event from groups notification queue */
|
|
||||||
extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
|
|
||||||
/* true if the group notification queue is empty */
|
/* true if the group notification queue is empty */
|
||||||
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
|
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
|
||||||
/* return, but do not dequeue the first event on the notification queue */
|
/* return, but do not dequeue the first event on the notification queue */
|
||||||
|
|
|
@ -202,26 +202,26 @@ extern int _cond_resched(void);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* abs - return absolute value of an argument
|
* abs - return absolute value of an argument
|
||||||
* @x: the value. If it is unsigned type, it is converted to signed type first
|
* @x: the value. If it is unsigned type, it is converted to signed type first.
|
||||||
* (s64, long or int depending on its size).
|
* char is treated as if it was signed (regardless of whether it really is)
|
||||||
|
* but the macro's return type is preserved as char.
|
||||||
*
|
*
|
||||||
* Return: an absolute value of x. If x is 64-bit, macro's return type is s64,
|
* Return: an absolute value of x.
|
||||||
* otherwise it is signed long.
|
|
||||||
*/
|
*/
|
||||||
#define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \
|
#define abs(x) __abs_choose_expr(x, long long, \
|
||||||
s64 __x = (x); \
|
__abs_choose_expr(x, long, \
|
||||||
(__x < 0) ? -__x : __x; \
|
__abs_choose_expr(x, int, \
|
||||||
}), ({ \
|
__abs_choose_expr(x, short, \
|
||||||
long ret; \
|
__abs_choose_expr(x, char, \
|
||||||
if (sizeof(x) == sizeof(long)) { \
|
__builtin_choose_expr( \
|
||||||
long __x = (x); \
|
__builtin_types_compatible_p(typeof(x), char), \
|
||||||
ret = (__x < 0) ? -__x : __x; \
|
(char)({ signed char __x = (x); __x<0?-__x:__x; }), \
|
||||||
} else { \
|
((void)0)))))))
|
||||||
int __x = (x); \
|
|
||||||
ret = (__x < 0) ? -__x : __x; \
|
#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
|
||||||
} \
|
__builtin_types_compatible_p(typeof(x), signed type) || \
|
||||||
ret; \
|
__builtin_types_compatible_p(typeof(x), unsigned type), \
|
||||||
}))
|
({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* reciprocal_scale - "scale" a value into range [0, ep_ro)
|
* reciprocal_scale - "scale" a value into range [0, ep_ro)
|
||||||
|
|
|
@ -3036,6 +3036,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
|
||||||
napi->skb = NULL;
|
napi->skb = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool netdev_is_rx_handler_busy(struct net_device *dev);
|
||||||
int netdev_rx_handler_register(struct net_device *dev,
|
int netdev_rx_handler_register(struct net_device *dev,
|
||||||
rx_handler_func_t *rx_handler,
|
rx_handler_func_t *rx_handler,
|
||||||
void *rx_handler_data);
|
void *rx_handler_data);
|
||||||
|
|
|
@ -601,56 +601,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
|
||||||
*/
|
*/
|
||||||
static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
|
static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
|
||||||
char __user *end = uaddr + size - 1;
|
char __user *end = uaddr + size - 1;
|
||||||
|
|
||||||
if (unlikely(size == 0))
|
if (unlikely(size == 0))
|
||||||
return ret;
|
return 0;
|
||||||
|
|
||||||
|
if (unlikely(uaddr > end))
|
||||||
|
return -EFAULT;
|
||||||
/*
|
/*
|
||||||
* Writing zeroes into userspace here is OK, because we know that if
|
* Writing zeroes into userspace here is OK, because we know that if
|
||||||
* the zero gets there, we'll be overwriting it.
|
* the zero gets there, we'll be overwriting it.
|
||||||
*/
|
*/
|
||||||
while (uaddr <= end) {
|
do {
|
||||||
ret = __put_user(0, uaddr);
|
if (unlikely(__put_user(0, uaddr) != 0))
|
||||||
if (ret != 0)
|
return -EFAULT;
|
||||||
return ret;
|
|
||||||
uaddr += PAGE_SIZE;
|
uaddr += PAGE_SIZE;
|
||||||
}
|
} while (uaddr <= end);
|
||||||
|
|
||||||
/* Check whether the range spilled into the next page. */
|
/* Check whether the range spilled into the next page. */
|
||||||
if (((unsigned long)uaddr & PAGE_MASK) ==
|
if (((unsigned long)uaddr & PAGE_MASK) ==
|
||||||
((unsigned long)end & PAGE_MASK))
|
((unsigned long)end & PAGE_MASK))
|
||||||
ret = __put_user(0, end);
|
return __put_user(0, end);
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int fault_in_multipages_readable(const char __user *uaddr,
|
static inline int fault_in_multipages_readable(const char __user *uaddr,
|
||||||
int size)
|
int size)
|
||||||
{
|
{
|
||||||
volatile char c;
|
volatile char c;
|
||||||
int ret = 0;
|
|
||||||
const char __user *end = uaddr + size - 1;
|
const char __user *end = uaddr + size - 1;
|
||||||
|
|
||||||
if (unlikely(size == 0))
|
if (unlikely(size == 0))
|
||||||
return ret;
|
return 0;
|
||||||
|
|
||||||
while (uaddr <= end) {
|
if (unlikely(uaddr > end))
|
||||||
ret = __get_user(c, uaddr);
|
return -EFAULT;
|
||||||
if (ret != 0)
|
|
||||||
return ret;
|
do {
|
||||||
|
if (unlikely(__get_user(c, uaddr) != 0))
|
||||||
|
return -EFAULT;
|
||||||
uaddr += PAGE_SIZE;
|
uaddr += PAGE_SIZE;
|
||||||
}
|
} while (uaddr <= end);
|
||||||
|
|
||||||
/* Check whether the range spilled into the next page. */
|
/* Check whether the range spilled into the next page. */
|
||||||
if (((unsigned long)uaddr & PAGE_MASK) ==
|
if (((unsigned long)uaddr & PAGE_MASK) ==
|
||||||
((unsigned long)end & PAGE_MASK)) {
|
((unsigned long)end & PAGE_MASK)) {
|
||||||
ret = __get_user(c, end);
|
return __get_user(c, end);
|
||||||
(void)c;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
||||||
|
|
|
@ -1,6 +1,16 @@
|
||||||
#ifndef __SMC91X_H__
|
#ifndef __SMC91X_H__
|
||||||
#define __SMC91X_H__
|
#define __SMC91X_H__
|
||||||
|
|
||||||
|
/*
|
||||||
|
* These bits define which access sizes a platform can support, rather
|
||||||
|
* than the maximal access size. So, if your platform can do 16-bit
|
||||||
|
* and 32-bit accesses to the SMC91x device, but not 8-bit, set both
|
||||||
|
* SMC91X_USE_16BIT and SMC91X_USE_32BIT.
|
||||||
|
*
|
||||||
|
* The SMC91x driver requires at least one of SMC91X_USE_8BIT or
|
||||||
|
* SMC91X_USE_16BIT to be supported - just setting SMC91X_USE_32BIT is
|
||||||
|
* an invalid configuration.
|
||||||
|
*/
|
||||||
#define SMC91X_USE_8BIT (1 << 0)
|
#define SMC91X_USE_8BIT (1 << 0)
|
||||||
#define SMC91X_USE_16BIT (1 << 1)
|
#define SMC91X_USE_16BIT (1 << 1)
|
||||||
#define SMC91X_USE_32BIT (1 << 2)
|
#define SMC91X_USE_32BIT (1 << 2)
|
||||||
|
|
|
@ -52,7 +52,7 @@ struct unix_sock {
|
||||||
struct sock sk;
|
struct sock sk;
|
||||||
struct unix_address *addr;
|
struct unix_address *addr;
|
||||||
struct path path;
|
struct path path;
|
||||||
struct mutex readlock;
|
struct mutex iolock, bindlock;
|
||||||
struct sock *peer;
|
struct sock *peer;
|
||||||
struct list_head link;
|
struct list_head link;
|
||||||
atomic_long_t inflight;
|
atomic_long_t inflight;
|
||||||
|
|
|
@ -2106,7 +2106,7 @@ static int cpuset_allow_attach(struct cgroup_taskset *tset)
|
||||||
* which could have been changed by cpuset just after it inherits the
|
* which could have been changed by cpuset just after it inherits the
|
||||||
* state from the parent and before it sits on the cgroup's task list.
|
* state from the parent and before it sits on the cgroup's task list.
|
||||||
*/
|
*/
|
||||||
void cpuset_fork(struct task_struct *task)
|
void cpuset_fork(struct task_struct *task, void *priv)
|
||||||
{
|
{
|
||||||
if (task_css_is_root(task, cpuset_cgrp_id))
|
if (task_css_is_root(task, cpuset_cgrp_id))
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -299,12 +299,12 @@ static int create_image(int platform_mode)
|
||||||
save_processor_state();
|
save_processor_state();
|
||||||
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true);
|
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true);
|
||||||
error = swsusp_arch_suspend();
|
error = swsusp_arch_suspend();
|
||||||
|
/* Restore control flow magically appears here */
|
||||||
|
restore_processor_state();
|
||||||
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
|
trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
|
||||||
if (error)
|
if (error)
|
||||||
printk(KERN_ERR "PM: Error %d creating hibernation image\n",
|
printk(KERN_ERR "PM: Error %d creating hibernation image\n",
|
||||||
error);
|
error);
|
||||||
/* Restore control flow magically appears here */
|
|
||||||
restore_processor_state();
|
|
||||||
if (!in_suspend)
|
if (!in_suspend)
|
||||||
events_check_enabled = false;
|
events_check_enabled = false;
|
||||||
|
|
||||||
|
|
|
@ -765,9 +765,9 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
|
||||||
*/
|
*/
|
||||||
static bool rtree_next_node(struct memory_bitmap *bm)
|
static bool rtree_next_node(struct memory_bitmap *bm)
|
||||||
{
|
{
|
||||||
|
if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
|
||||||
bm->cur.node = list_entry(bm->cur.node->list.next,
|
bm->cur.node = list_entry(bm->cur.node->list.next,
|
||||||
struct rtree_node, list);
|
struct rtree_node, list);
|
||||||
if (&bm->cur.node->list != &bm->cur.zone->leaves) {
|
|
||||||
bm->cur.node_pfn += BM_BITS_PER_BLOCK;
|
bm->cur.node_pfn += BM_BITS_PER_BLOCK;
|
||||||
bm->cur.node_bit = 0;
|
bm->cur.node_bit = 0;
|
||||||
touch_softlockup_watchdog();
|
touch_softlockup_watchdog();
|
||||||
|
@ -775,9 +775,9 @@ static bool rtree_next_node(struct memory_bitmap *bm)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* No more nodes, goto next zone */
|
/* No more nodes, goto next zone */
|
||||||
|
if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
|
||||||
bm->cur.zone = list_entry(bm->cur.zone->list.next,
|
bm->cur.zone = list_entry(bm->cur.zone->list.next,
|
||||||
struct mem_zone_bm_rtree, list);
|
struct mem_zone_bm_rtree, list);
|
||||||
if (&bm->cur.zone->list != &bm->zones) {
|
|
||||||
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
|
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
|
||||||
struct rtree_node, list);
|
struct rtree_node, list);
|
||||||
bm->cur.node_pfn = 0;
|
bm->cur.node_pfn = 0;
|
||||||
|
|
|
@ -1,4 +1,8 @@
|
||||||
|
|
||||||
|
# We are fully aware of the dangers of __builtin_return_address()
|
||||||
|
FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
|
||||||
|
KBUILD_CFLAGS += $(FRAME_CFLAGS)
|
||||||
|
|
||||||
# Do not instrument the tracer itself:
|
# Do not instrument the tracer itself:
|
||||||
|
|
||||||
ifdef CONFIG_FUNCTION_TRACER
|
ifdef CONFIG_FUNCTION_TRACER
|
||||||
|
|
|
@ -4816,19 +4816,20 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
|
||||||
struct trace_iterator *iter = filp->private_data;
|
struct trace_iterator *iter = filp->private_data;
|
||||||
ssize_t sret;
|
ssize_t sret;
|
||||||
|
|
||||||
/* return any leftover data */
|
|
||||||
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
|
|
||||||
if (sret != -EBUSY)
|
|
||||||
return sret;
|
|
||||||
|
|
||||||
trace_seq_init(&iter->seq);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Avoid more than one consumer on a single file descriptor
|
* Avoid more than one consumer on a single file descriptor
|
||||||
* This is just a matter of traces coherency, the ring buffer itself
|
* This is just a matter of traces coherency, the ring buffer itself
|
||||||
* is protected.
|
* is protected.
|
||||||
*/
|
*/
|
||||||
mutex_lock(&iter->mutex);
|
mutex_lock(&iter->mutex);
|
||||||
|
|
||||||
|
/* return any leftover data */
|
||||||
|
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
|
||||||
|
if (sret != -EBUSY)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
trace_seq_init(&iter->seq);
|
||||||
|
|
||||||
if (iter->trace->read) {
|
if (iter->trace->read) {
|
||||||
sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
|
sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
|
||||||
if (sret)
|
if (sret)
|
||||||
|
@ -5855,9 +5856,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (splice_grow_spd(pipe, &spd))
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
if (*ppos & (PAGE_SIZE - 1))
|
if (*ppos & (PAGE_SIZE - 1))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -5867,6 +5865,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
||||||
len &= PAGE_MASK;
|
len &= PAGE_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (splice_grow_spd(pipe, &spd))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
again:
|
again:
|
||||||
trace_access_lock(iter->cpu_file);
|
trace_access_lock(iter->cpu_file);
|
||||||
entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
|
entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
|
||||||
|
@ -5924,19 +5925,21 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
||||||
/* did we read anything? */
|
/* did we read anything? */
|
||||||
if (!spd.nr_pages) {
|
if (!spd.nr_pages) {
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
goto out;
|
||||||
|
|
||||||
|
ret = -EAGAIN;
|
||||||
if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
|
if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
|
||||||
return -EAGAIN;
|
goto out;
|
||||||
|
|
||||||
ret = wait_on_pipe(iter, true);
|
ret = wait_on_pipe(iter, true);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
goto out;
|
||||||
|
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = splice_to_pipe(pipe, &spd);
|
ret = splice_to_pipe(pipe, &spd);
|
||||||
|
out:
|
||||||
splice_shrink_spd(&spd);
|
splice_shrink_spd(&spd);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
19
mm/vmscan.c
19
mm/vmscan.c
|
@ -2159,23 +2159,6 @@ out:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
|
|
||||||
static void init_tlb_ubc(void)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* This deliberately does not clear the cpumask as it's expensive
|
|
||||||
* and unnecessary. If there happens to be data in there then the
|
|
||||||
* first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
|
|
||||||
* then will be cleared.
|
|
||||||
*/
|
|
||||||
current->tlb_ubc.flush_required = false;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static inline void init_tlb_ubc(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
|
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
|
||||||
*/
|
*/
|
||||||
|
@ -2210,8 +2193,6 @@ static void shrink_lruvec(struct lruvec *lruvec, int swappiness,
|
||||||
scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
|
scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
|
||||||
sc->priority == DEF_PRIORITY);
|
sc->priority == DEF_PRIORITY);
|
||||||
|
|
||||||
init_tlb_ubc();
|
|
||||||
|
|
||||||
blk_start_plug(&plug);
|
blk_start_plug(&plug);
|
||||||
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
|
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
|
||||||
nr[LRU_INACTIVE_FILE]) {
|
nr[LRU_INACTIVE_FILE]) {
|
||||||
|
|
|
@ -1113,7 +1113,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
|
||||||
} else {
|
} else {
|
||||||
err = br_ip6_multicast_add_group(br, port,
|
err = br_ip6_multicast_add_group(br, port,
|
||||||
&grec->grec_mca, vid);
|
&grec->grec_mca, vid);
|
||||||
if (!err)
|
if (err)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3721,6 +3721,22 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* netdev_is_rx_handler_busy - check if receive handler is registered
|
||||||
|
* @dev: device to check
|
||||||
|
*
|
||||||
|
* Check if a receive handler is already registered for a given device.
|
||||||
|
* Return true if there one.
|
||||||
|
*
|
||||||
|
* The caller must hold the rtnl_mutex.
|
||||||
|
*/
|
||||||
|
bool netdev_is_rx_handler_busy(struct net_device *dev)
|
||||||
|
{
|
||||||
|
ASSERT_RTNL();
|
||||||
|
return dev && rtnl_dereference(dev->rx_handler);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* netdev_rx_handler_register - register receive handler
|
* netdev_rx_handler_register - register receive handler
|
||||||
* @dev: device to register a handler for
|
* @dev: device to register a handler for
|
||||||
|
|
|
@ -2453,9 +2453,7 @@ struct fib_route_iter {
|
||||||
static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
|
static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
|
||||||
loff_t pos)
|
loff_t pos)
|
||||||
{
|
{
|
||||||
struct fib_table *tb = iter->main_tb;
|
|
||||||
struct key_vector *l, **tp = &iter->tnode;
|
struct key_vector *l, **tp = &iter->tnode;
|
||||||
struct trie *t;
|
|
||||||
t_key key;
|
t_key key;
|
||||||
|
|
||||||
/* use cache location of next-to-find key */
|
/* use cache location of next-to-find key */
|
||||||
|
@ -2463,8 +2461,6 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
|
||||||
pos -= iter->pos;
|
pos -= iter->pos;
|
||||||
key = iter->key;
|
key = iter->key;
|
||||||
} else {
|
} else {
|
||||||
t = (struct trie *)tb->tb_data;
|
|
||||||
iter->tnode = t->kv;
|
|
||||||
iter->pos = 0;
|
iter->pos = 0;
|
||||||
key = 0;
|
key = 0;
|
||||||
}
|
}
|
||||||
|
@ -2505,12 +2501,12 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
iter->main_tb = tb;
|
iter->main_tb = tb;
|
||||||
|
t = (struct trie *)tb->tb_data;
|
||||||
|
iter->tnode = t->kv;
|
||||||
|
|
||||||
if (*pos != 0)
|
if (*pos != 0)
|
||||||
return fib_route_get_idx(iter, *pos);
|
return fib_route_get_idx(iter, *pos);
|
||||||
|
|
||||||
t = (struct trie *)tb->tb_data;
|
|
||||||
iter->tnode = t->kv;
|
|
||||||
iter->pos = 0;
|
iter->pos = 0;
|
||||||
iter->key = 0;
|
iter->key = 0;
|
||||||
|
|
||||||
|
|
|
@ -540,6 +540,33 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
|
||||||
.get_link_net = ip_tunnel_get_link_net,
|
.get_link_net = ip_tunnel_get_link_net,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static bool is_vti_tunnel(const struct net_device *dev)
|
||||||
|
{
|
||||||
|
return dev->netdev_ops == &vti_netdev_ops;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vti_device_event(struct notifier_block *unused,
|
||||||
|
unsigned long event, void *ptr)
|
||||||
|
{
|
||||||
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||||
|
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||||
|
|
||||||
|
if (!is_vti_tunnel(dev))
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
|
switch (event) {
|
||||||
|
case NETDEV_DOWN:
|
||||||
|
if (!net_eq(tunnel->net, dev_net(dev)))
|
||||||
|
xfrm_garbage_collect(tunnel->net);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct notifier_block vti_notifier_block __read_mostly = {
|
||||||
|
.notifier_call = vti_device_event,
|
||||||
|
};
|
||||||
|
|
||||||
static int __init vti_init(void)
|
static int __init vti_init(void)
|
||||||
{
|
{
|
||||||
const char *msg;
|
const char *msg;
|
||||||
|
@ -547,6 +574,8 @@ static int __init vti_init(void)
|
||||||
|
|
||||||
pr_info("IPv4 over IPsec tunneling driver\n");
|
pr_info("IPv4 over IPsec tunneling driver\n");
|
||||||
|
|
||||||
|
register_netdevice_notifier(&vti_notifier_block);
|
||||||
|
|
||||||
msg = "tunnel device";
|
msg = "tunnel device";
|
||||||
err = register_pernet_device(&vti_net_ops);
|
err = register_pernet_device(&vti_net_ops);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
|
@ -579,6 +608,7 @@ xfrm_proto_ah_failed:
|
||||||
xfrm_proto_esp_failed:
|
xfrm_proto_esp_failed:
|
||||||
unregister_pernet_device(&vti_net_ops);
|
unregister_pernet_device(&vti_net_ops);
|
||||||
pernet_dev_failed:
|
pernet_dev_failed:
|
||||||
|
unregister_netdevice_notifier(&vti_notifier_block);
|
||||||
pr_err("vti init: failed to register %s\n", msg);
|
pr_err("vti init: failed to register %s\n", msg);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -590,6 +620,7 @@ static void __exit vti_fini(void)
|
||||||
xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
|
xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
|
||||||
xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
|
xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
|
||||||
unregister_pernet_device(&vti_net_ops);
|
unregister_pernet_device(&vti_net_ops);
|
||||||
|
unregister_netdevice_notifier(&vti_notifier_block);
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(vti_init);
|
module_init(vti_init);
|
||||||
|
|
|
@ -808,8 +808,14 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
|
||||||
u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
|
u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
|
||||||
tcp_sk(sk)->snd_nxt;
|
tcp_sk(sk)->snd_nxt;
|
||||||
|
|
||||||
|
/* RFC 7323 2.3
|
||||||
|
* The window field (SEG.WND) of every outgoing segment, with the
|
||||||
|
* exception of <SYN> segments, MUST be right-shifted by
|
||||||
|
* Rcv.Wind.Shift bits:
|
||||||
|
*/
|
||||||
tcp_v4_send_ack(sock_net(sk), skb, seq,
|
tcp_v4_send_ack(sock_net(sk), skb, seq,
|
||||||
tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
|
tcp_rsk(req)->rcv_nxt,
|
||||||
|
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
|
||||||
tcp_time_stamp,
|
tcp_time_stamp,
|
||||||
req->ts_recent,
|
req->ts_recent,
|
||||||
0,
|
0,
|
||||||
|
|
|
@ -75,7 +75,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
if (!tcp_is_cwnd_limited(sk))
|
if (!tcp_is_cwnd_limited(sk))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (tp->snd_cwnd <= tp->snd_ssthresh)
|
if (tcp_in_slow_start(tp))
|
||||||
tcp_slow_start(tp, acked);
|
tcp_slow_start(tp, acked);
|
||||||
|
|
||||||
else if (!yeah->doing_reno_now) {
|
else if (!yeah->doing_reno_now) {
|
||||||
|
|
|
@ -152,8 +152,10 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||||
rt = (struct rt6_info *) dst;
|
rt = (struct rt6_info *) dst;
|
||||||
|
|
||||||
np = inet6_sk(sk);
|
np = inet6_sk(sk);
|
||||||
if (!np)
|
if (!np) {
|
||||||
return -EBADF;
|
err = -EBADF;
|
||||||
|
goto dst_err_out;
|
||||||
|
}
|
||||||
|
|
||||||
pfh.icmph.type = user_icmph.icmp6_type;
|
pfh.icmph.type = user_icmph.icmp6_type;
|
||||||
pfh.icmph.code = user_icmph.icmp6_code;
|
pfh.icmph.code = user_icmph.icmp6_code;
|
||||||
|
@ -183,6 +185,9 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||||
}
|
}
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
|
|
||||||
|
dst_err_out:
|
||||||
|
dst_release(dst);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
|
|
@ -933,9 +933,15 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
|
||||||
/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
|
/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
|
||||||
* sk->sk_state == TCP_SYN_RECV -> for Fast Open.
|
* sk->sk_state == TCP_SYN_RECV -> for Fast Open.
|
||||||
*/
|
*/
|
||||||
|
/* RFC 7323 2.3
|
||||||
|
* The window field (SEG.WND) of every outgoing segment, with the
|
||||||
|
* exception of <SYN> segments, MUST be right-shifted by
|
||||||
|
* Rcv.Wind.Shift bits:
|
||||||
|
*/
|
||||||
tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
|
tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
|
||||||
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
|
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
|
||||||
tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
|
tcp_rsk(req)->rcv_nxt,
|
||||||
|
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
|
||||||
tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
|
tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
|
||||||
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
|
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
|
||||||
0, 0);
|
0, 0);
|
||||||
|
|
|
@ -185,8 +185,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
|
||||||
|
|
||||||
self->magic = IAS_MAGIC;
|
self->magic = IAS_MAGIC;
|
||||||
self->mode = mode;
|
self->mode = mode;
|
||||||
if (mode == IAS_CLIENT)
|
if (mode == IAS_CLIENT) {
|
||||||
iriap_register_lsap(self, slsap_sel, mode);
|
if (iriap_register_lsap(self, slsap_sel, mode)) {
|
||||||
|
kfree(self);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
self->confirm = callback;
|
self->confirm = callback;
|
||||||
self->priv = priv;
|
self->priv = priv;
|
||||||
|
|
|
@ -2111,6 +2111,7 @@ restart:
|
||||||
TIPC_CONN_MSG, SHORT_H_SIZE,
|
TIPC_CONN_MSG, SHORT_H_SIZE,
|
||||||
0, dnode, onode, dport, oport,
|
0, dnode, onode, dport, oport,
|
||||||
TIPC_CONN_SHUTDOWN);
|
TIPC_CONN_SHUTDOWN);
|
||||||
|
if (skb)
|
||||||
tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
|
tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
|
||||||
}
|
}
|
||||||
tsk->connected = 0;
|
tsk->connected = 0;
|
||||||
|
|
|
@ -661,11 +661,11 @@ static int unix_set_peek_off(struct sock *sk, int val)
|
||||||
{
|
{
|
||||||
struct unix_sock *u = unix_sk(sk);
|
struct unix_sock *u = unix_sk(sk);
|
||||||
|
|
||||||
if (mutex_lock_interruptible(&u->readlock))
|
if (mutex_lock_interruptible(&u->iolock))
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
|
||||||
sk->sk_peek_off = val;
|
sk->sk_peek_off = val;
|
||||||
mutex_unlock(&u->readlock);
|
mutex_unlock(&u->iolock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -778,7 +778,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
|
||||||
spin_lock_init(&u->lock);
|
spin_lock_init(&u->lock);
|
||||||
atomic_long_set(&u->inflight, 0);
|
atomic_long_set(&u->inflight, 0);
|
||||||
INIT_LIST_HEAD(&u->link);
|
INIT_LIST_HEAD(&u->link);
|
||||||
mutex_init(&u->readlock); /* single task reading lock */
|
mutex_init(&u->iolock); /* single task reading lock */
|
||||||
|
mutex_init(&u->bindlock); /* single task binding lock */
|
||||||
init_waitqueue_head(&u->peer_wait);
|
init_waitqueue_head(&u->peer_wait);
|
||||||
init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
|
init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
|
||||||
unix_insert_socket(unix_sockets_unbound(sk), sk);
|
unix_insert_socket(unix_sockets_unbound(sk), sk);
|
||||||
|
@ -847,7 +848,7 @@ static int unix_autobind(struct socket *sock)
|
||||||
int err;
|
int err;
|
||||||
unsigned int retries = 0;
|
unsigned int retries = 0;
|
||||||
|
|
||||||
err = mutex_lock_interruptible(&u->readlock);
|
err = mutex_lock_interruptible(&u->bindlock);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -894,7 +895,7 @@ retry:
|
||||||
spin_unlock(&unix_table_lock);
|
spin_unlock(&unix_table_lock);
|
||||||
err = 0;
|
err = 0;
|
||||||
|
|
||||||
out: mutex_unlock(&u->readlock);
|
out: mutex_unlock(&u->bindlock);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -953,20 +954,32 @@ fail:
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int unix_mknod(struct dentry *dentry, struct path *path, umode_t mode,
|
static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
|
||||||
struct path *res)
|
|
||||||
{
|
{
|
||||||
int err;
|
struct dentry *dentry;
|
||||||
|
struct path path;
|
||||||
|
int err = 0;
|
||||||
|
/*
|
||||||
|
* Get the parent directory, calculate the hash for last
|
||||||
|
* component.
|
||||||
|
*/
|
||||||
|
dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
|
||||||
|
err = PTR_ERR(dentry);
|
||||||
|
if (IS_ERR(dentry))
|
||||||
|
return err;
|
||||||
|
|
||||||
err = security_path_mknod(path, dentry, mode, 0);
|
/*
|
||||||
|
* All right, let's create it.
|
||||||
|
*/
|
||||||
|
err = security_path_mknod(&path, dentry, mode, 0);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
|
err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
res->mnt = mntget(path->mnt);
|
res->mnt = mntget(path.mnt);
|
||||||
res->dentry = dget(dentry);
|
res->dentry = dget(dentry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
done_path_create(&path, dentry);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -977,12 +990,10 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
||||||
struct unix_sock *u = unix_sk(sk);
|
struct unix_sock *u = unix_sk(sk);
|
||||||
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
|
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
|
||||||
char *sun_path = sunaddr->sun_path;
|
char *sun_path = sunaddr->sun_path;
|
||||||
int err, name_err;
|
int err;
|
||||||
unsigned int hash;
|
unsigned int hash;
|
||||||
struct unix_address *addr;
|
struct unix_address *addr;
|
||||||
struct hlist_head *list;
|
struct hlist_head *list;
|
||||||
struct path path;
|
|
||||||
struct dentry *dentry;
|
|
||||||
|
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
if (sunaddr->sun_family != AF_UNIX)
|
if (sunaddr->sun_family != AF_UNIX)
|
||||||
|
@ -998,34 +1009,14 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
||||||
goto out;
|
goto out;
|
||||||
addr_len = err;
|
addr_len = err;
|
||||||
|
|
||||||
name_err = 0;
|
err = mutex_lock_interruptible(&u->bindlock);
|
||||||
dentry = NULL;
|
|
||||||
if (sun_path[0]) {
|
|
||||||
/* Get the parent directory, calculate the hash for last
|
|
||||||
* component.
|
|
||||||
*/
|
|
||||||
dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
|
|
||||||
|
|
||||||
if (IS_ERR(dentry)) {
|
|
||||||
/* delay report until after 'already bound' check */
|
|
||||||
name_err = PTR_ERR(dentry);
|
|
||||||
dentry = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = mutex_lock_interruptible(&u->readlock);
|
|
||||||
if (err)
|
if (err)
|
||||||
goto out_path;
|
goto out;
|
||||||
|
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
if (u->addr)
|
if (u->addr)
|
||||||
goto out_up;
|
goto out_up;
|
||||||
|
|
||||||
if (name_err) {
|
|
||||||
err = name_err == -EEXIST ? -EADDRINUSE : name_err;
|
|
||||||
goto out_up;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
|
addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
|
||||||
if (!addr)
|
if (!addr)
|
||||||
|
@ -1036,11 +1027,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
||||||
addr->hash = hash ^ sk->sk_type;
|
addr->hash = hash ^ sk->sk_type;
|
||||||
atomic_set(&addr->refcnt, 1);
|
atomic_set(&addr->refcnt, 1);
|
||||||
|
|
||||||
if (dentry) {
|
if (sun_path[0]) {
|
||||||
struct path u_path;
|
struct path path;
|
||||||
umode_t mode = S_IFSOCK |
|
umode_t mode = S_IFSOCK |
|
||||||
(SOCK_INODE(sock)->i_mode & ~current_umask());
|
(SOCK_INODE(sock)->i_mode & ~current_umask());
|
||||||
err = unix_mknod(dentry, &path, mode, &u_path);
|
err = unix_mknod(sun_path, mode, &path);
|
||||||
if (err) {
|
if (err) {
|
||||||
if (err == -EEXIST)
|
if (err == -EEXIST)
|
||||||
err = -EADDRINUSE;
|
err = -EADDRINUSE;
|
||||||
|
@ -1048,9 +1039,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
||||||
goto out_up;
|
goto out_up;
|
||||||
}
|
}
|
||||||
addr->hash = UNIX_HASH_SIZE;
|
addr->hash = UNIX_HASH_SIZE;
|
||||||
hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
|
hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
|
||||||
spin_lock(&unix_table_lock);
|
spin_lock(&unix_table_lock);
|
||||||
u->path = u_path;
|
u->path = path;
|
||||||
list = &unix_socket_table[hash];
|
list = &unix_socket_table[hash];
|
||||||
} else {
|
} else {
|
||||||
spin_lock(&unix_table_lock);
|
spin_lock(&unix_table_lock);
|
||||||
|
@ -1072,11 +1063,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock(&unix_table_lock);
|
spin_unlock(&unix_table_lock);
|
||||||
out_up:
|
out_up:
|
||||||
mutex_unlock(&u->readlock);
|
mutex_unlock(&u->bindlock);
|
||||||
out_path:
|
|
||||||
if (dentry)
|
|
||||||
done_path_create(&path, dentry);
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -1971,17 +1958,17 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
|
||||||
if (false) {
|
if (false) {
|
||||||
alloc_skb:
|
alloc_skb:
|
||||||
unix_state_unlock(other);
|
unix_state_unlock(other);
|
||||||
mutex_unlock(&unix_sk(other)->readlock);
|
mutex_unlock(&unix_sk(other)->iolock);
|
||||||
newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
|
newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
|
||||||
&err, 0);
|
&err, 0);
|
||||||
if (!newskb)
|
if (!newskb)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* we must acquire readlock as we modify already present
|
/* we must acquire iolock as we modify already present
|
||||||
* skbs in the sk_receive_queue and mess with skb->len
|
* skbs in the sk_receive_queue and mess with skb->len
|
||||||
*/
|
*/
|
||||||
err = mutex_lock_interruptible(&unix_sk(other)->readlock);
|
err = mutex_lock_interruptible(&unix_sk(other)->iolock);
|
||||||
if (err) {
|
if (err) {
|
||||||
err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
|
err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
|
||||||
goto err;
|
goto err;
|
||||||
|
@ -2048,7 +2035,7 @@ alloc_skb:
|
||||||
}
|
}
|
||||||
|
|
||||||
unix_state_unlock(other);
|
unix_state_unlock(other);
|
||||||
mutex_unlock(&unix_sk(other)->readlock);
|
mutex_unlock(&unix_sk(other)->iolock);
|
||||||
|
|
||||||
other->sk_data_ready(other);
|
other->sk_data_ready(other);
|
||||||
scm_destroy(&scm);
|
scm_destroy(&scm);
|
||||||
|
@ -2057,7 +2044,7 @@ alloc_skb:
|
||||||
err_state_unlock:
|
err_state_unlock:
|
||||||
unix_state_unlock(other);
|
unix_state_unlock(other);
|
||||||
err_unlock:
|
err_unlock:
|
||||||
mutex_unlock(&unix_sk(other)->readlock);
|
mutex_unlock(&unix_sk(other)->iolock);
|
||||||
err:
|
err:
|
||||||
kfree_skb(newskb);
|
kfree_skb(newskb);
|
||||||
if (send_sigpipe && !(flags & MSG_NOSIGNAL))
|
if (send_sigpipe && !(flags & MSG_NOSIGNAL))
|
||||||
|
@ -2122,7 +2109,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||||
if (flags&MSG_OOB)
|
if (flags&MSG_OOB)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
err = mutex_lock_interruptible(&u->readlock);
|
err = mutex_lock_interruptible(&u->iolock);
|
||||||
if (unlikely(err)) {
|
if (unlikely(err)) {
|
||||||
/* recvmsg() in non blocking mode is supposed to return -EAGAIN
|
/* recvmsg() in non blocking mode is supposed to return -EAGAIN
|
||||||
* sk_rcvtimeo is not honored by mutex_lock_interruptible()
|
* sk_rcvtimeo is not honored by mutex_lock_interruptible()
|
||||||
|
@ -2198,7 +2185,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||||
out_free:
|
out_free:
|
||||||
skb_free_datagram(sk, skb);
|
skb_free_datagram(sk, skb);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&u->readlock);
|
mutex_unlock(&u->iolock);
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -2293,7 +2280,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
|
||||||
/* Lock the socket to prevent queue disordering
|
/* Lock the socket to prevent queue disordering
|
||||||
* while sleeps in memcpy_tomsg
|
* while sleeps in memcpy_tomsg
|
||||||
*/
|
*/
|
||||||
mutex_lock(&u->readlock);
|
mutex_lock(&u->iolock);
|
||||||
|
|
||||||
if (flags & MSG_PEEK)
|
if (flags & MSG_PEEK)
|
||||||
skip = sk_peek_offset(sk, flags);
|
skip = sk_peek_offset(sk, flags);
|
||||||
|
@ -2334,7 +2321,7 @@ again:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&u->readlock);
|
mutex_unlock(&u->iolock);
|
||||||
|
|
||||||
timeo = unix_stream_data_wait(sk, timeo, last,
|
timeo = unix_stream_data_wait(sk, timeo, last,
|
||||||
last_len);
|
last_len);
|
||||||
|
@ -2345,7 +2332,7 @@ again:
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&u->readlock);
|
mutex_lock(&u->iolock);
|
||||||
continue;
|
continue;
|
||||||
unlock:
|
unlock:
|
||||||
unix_state_unlock(sk);
|
unix_state_unlock(sk);
|
||||||
|
@ -2448,7 +2435,7 @@ unlock:
|
||||||
}
|
}
|
||||||
} while (size);
|
} while (size);
|
||||||
|
|
||||||
mutex_unlock(&u->readlock);
|
mutex_unlock(&u->iolock);
|
||||||
if (state->msg)
|
if (state->msg)
|
||||||
scm_recv(sock, state->msg, &scm, flags);
|
scm_recv(sock, state->msg, &scm, flags);
|
||||||
else
|
else
|
||||||
|
@ -2489,9 +2476,9 @@ static ssize_t skb_unix_socket_splice(struct sock *sk,
|
||||||
int ret;
|
int ret;
|
||||||
struct unix_sock *u = unix_sk(sk);
|
struct unix_sock *u = unix_sk(sk);
|
||||||
|
|
||||||
mutex_unlock(&u->readlock);
|
mutex_unlock(&u->iolock);
|
||||||
ret = splice_to_pipe(pipe, spd);
|
ret = splice_to_pipe(pipe, spd);
|
||||||
mutex_lock(&u->readlock);
|
mutex_lock(&u->iolock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -6628,7 +6628,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
|
||||||
|
|
||||||
params.n_counter_offsets_presp = len / sizeof(u16);
|
params.n_counter_offsets_presp = len / sizeof(u16);
|
||||||
if (rdev->wiphy.max_num_csa_counters &&
|
if (rdev->wiphy.max_num_csa_counters &&
|
||||||
(params.n_counter_offsets_beacon >
|
(params.n_counter_offsets_presp >
|
||||||
rdev->wiphy.max_num_csa_counters))
|
rdev->wiphy.max_num_csa_counters))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue