This is the 4.4.45 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAliJpBoACgkQONu9yGCS
 aT54KRAAm2BjHOgU3FlM/mTal6ZVNIPKS/Xy9W0YXdQ+9URDKWNb0fwuqWAsf7LP
 n6ozLIB2n8FNlMWro7VHVNXKiUtw3BSRcjNamMm61XQcR1g0xY4iW6uhtpoTblAG
 PdeK3WAUfROxJEAxciFSTqfPKgSDQeaQRDSG10KTP5qIAPQM0T0/VU+20K0w7Cbf
 UZEJaGDOZS0XIRvNOak2DvQQxeXzwfvY5JTdx/MBOHw6e1MPfndeuhRFDJrIeOZC
 hKaG1ipkMQANcftHWTmJQ0gZEZMgVokqDtyQO3hqyrqLgVChM24j6mD7KvguCfPQ
 +ixC5oDQzBMQnp2uienP6FbDg1BZjHxO2R8z0vscXk++QtB3Mjxk8LBKZqeA636k
 E1fuGCrRf6Ec/0d7loMqOOO4KCUxOu+0JuhmlvmQDtrtGvQa5Qqd5WEF8ecOm6Y+
 5yKI11P5yiFANEkz4ysfTlyEltvIxp4Psu0YBrnVM6x5vNYEnr9wuGdikL21FI6F
 kS2FRB9+u2H4n2qNz7PGMt0tPub/F34W7RvD/zII4wqRrFz3wtw3UufAGgiT6X2n
 EIye5DErGfDcpHJ13kKYd7kCXl1u1y8tsBISRqYxl1sqshIZis0ktsb3ZtE5NMXF
 Qbh72lvpUU78E452ER1XDmk6keb98zUWbOtlBfbqJZ4iVpQ4GGY=
 =lShl
 -----END PGP SIGNATURE-----

Merge tag 'v4.4.45' into android-4.4.y

This is the 4.4.45 stable release
This commit is contained in:
Dmitry Shmidt 2017-01-26 13:42:20 -08:00
commit e9a82a4cbe
83 changed files with 786 additions and 284 deletions

View file

@ -77,7 +77,7 @@ Examples:
clks: ccm@53f80000{
compatible = "fsl,imx31-ccm";
reg = <0x53f80000 0x4000>;
interrupts = <0 31 0x04 0 53 0x04>;
interrupts = <31>, <53>;
#clock-cells = <1>;
};

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 43
SUBLEVEL = 45
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -85,6 +85,7 @@
#size-cells = <1>;
compatible = "m25p64";
spi-max-frequency = <30000000>;
m25p,fast-read;
reg = <0>;
partition@0 {
label = "U-Boot-SPL";

View file

@ -30,11 +30,11 @@
};
};
avic: avic-interrupt-controller@60000000 {
avic: interrupt-controller@68000000 {
compatible = "fsl,imx31-avic", "fsl,avic";
interrupt-controller;
#interrupt-cells = <1>;
reg = <0x60000000 0x100000>;
reg = <0x68000000 0x100000>;
};
soc {
@ -110,13 +110,6 @@
interrupts = <19>;
clocks = <&clks 25>;
};
clks: ccm@53f80000{
compatible = "fsl,imx31-ccm";
reg = <0x53f80000 0x4000>;
interrupts = <0 31 0x04 0 53 0x04>;
#clock-cells = <1>;
};
};
aips@53f00000 { /* AIPS2 */
@ -126,6 +119,13 @@
reg = <0x53f00000 0x100000>;
ranges;
clks: ccm@53f80000{
compatible = "fsl,imx31-ccm";
reg = <0x53f80000 0x4000>;
interrupts = <31>, <53>;
#clock-cells = <1>;
};
gpt: timer@53f90000 {
compatible = "fsl,imx31-gpt";
reg = <0x53f90000 0x4000>;

View file

@ -319,8 +319,6 @@
compatible = "fsl,imx6q-nitrogen6_max-sgtl5000",
"fsl,imx-audio-sgtl5000";
model = "imx6q-nitrogen6_max-sgtl5000";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sgtl5000>;
ssi-controller = <&ssi1>;
audio-codec = <&codec>;
audio-routing =
@ -401,6 +399,8 @@
codec: sgtl5000@0a {
compatible = "fsl,sgtl5000";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sgtl5000>;
reg = <0x0a>;
clocks = <&clks 201>;
VDDA-supply = <&reg_2p5v>;

View file

@ -81,6 +81,9 @@
#define ARM_CPU_XSCALE_ARCH_V2 0x4000
#define ARM_CPU_XSCALE_ARCH_V3 0x6000
/* Qualcomm implemented cores */
#define ARM_CPU_PART_SCORPION 0x510002d0
extern unsigned int processor_id;
#ifdef CONFIG_CPU_CP15

View file

@ -1066,6 +1066,22 @@ static int __init arch_hw_breakpoint_init(void)
return 0;
}
/*
* Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
* whenever a WFI is issued, even if the core is not powered down, in
* violation of the architecture. When DBGPRSR.SPD is set, accesses to
* breakpoint and watchpoint registers are treated as undefined, so
* this results in boot time and runtime failures when these are
* accessed and we unexpectedly take a trap.
*
* It's not clear if/how this can be worked around, so we blacklist
* Scorpion CPUs to avoid these issues.
*/
if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
return 0;
}
has_ossr = core_has_os_save_restore();
/* Determine how many BRPs/WRPs are available. */

View file

@ -9,6 +9,7 @@
*/
#include <linux/preempt.h>
#include <linux/smp.h>
#include <linux/uaccess.h>
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>
@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
static inline void ipi_flush_tlb_page(void *arg)
{
struct tlb_args *ta = (struct tlb_args *)arg;
unsigned int __ua_flags = uaccess_save_and_enable();
local_flush_tlb_page(ta->ta_vma, ta->ta_start);
uaccess_restore(__ua_flags);
}
static inline void ipi_flush_tlb_kernel_page(void *arg)
@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
static inline void ipi_flush_tlb_range(void *arg)
{
struct tlb_args *ta = (struct tlb_args *)arg;
unsigned int __ua_flags = uaccess_save_and_enable();
local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
uaccess_restore(__ua_flags);
}
static inline void ipi_flush_tlb_kernel_range(void *arg)

View file

@ -134,8 +134,8 @@ bool prcmu_pending_irq(void)
*/
bool prcmu_is_cpu_in_wfi(int cpu)
{
return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 :
PRCM_ARM_WFI_STANDBY_WFI0;
return readl(PRCM_ARM_WFI_STANDBY) &
(cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
}
/*

View file

@ -77,6 +77,7 @@ struct user_fpsimd_state {
__uint128_t vregs[32];
__u32 fpsr;
__u32 fpcr;
__u32 __reserved[2];
};
struct user_hwdebug_state {

View file

@ -678,7 +678,7 @@ el0_inv:
mov x0, sp
mov x1, #BAD_SYNC
mov x2, x25
bl bad_mode
bl bad_el0_sync
b ret_to_user
ENDPROC(el0_sync)

View file

@ -450,6 +450,8 @@ static int hw_break_set(struct task_struct *target,
/* (address, ctrl) registers */
limit = regset->n * regset->size;
while (count && offset < limit) {
if (count < PTRACE_HBP_ADDR_SZ)
return -EINVAL;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
offset, offset + PTRACE_HBP_ADDR_SZ);
if (ret)
@ -459,6 +461,8 @@ static int hw_break_set(struct task_struct *target,
return ret;
offset += PTRACE_HBP_ADDR_SZ;
if (!count)
break;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
offset, offset + PTRACE_HBP_CTRL_SZ);
if (ret)
@ -495,7 +499,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
struct user_pt_regs newregs;
struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
if (ret)
@ -525,7 +529,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
struct user_fpsimd_state newstate;
struct user_fpsimd_state newstate =
target->thread.fpsimd_state.user_fpsimd;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
if (ret)
@ -549,7 +554,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
unsigned long tls;
unsigned long tls = target->thread.tp_value;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret)
@ -575,7 +580,8 @@ static int system_call_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int syscallno, ret;
int syscallno = task_pt_regs(target)->syscallno;
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
if (ret)
@ -847,7 +853,7 @@ static int compat_tls_set(struct task_struct *target,
const void __user *ubuf)
{
int ret;
compat_ulong_t tls;
compat_ulong_t tls = target->thread.tp_value;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret)

View file

@ -469,16 +469,33 @@ const char *esr_get_class_string(u32 esr)
}
/*
* bad_mode handles the impossible case in the exception vector.
* bad_mode handles the impossible case in the exception vector. This is always
* fatal.
*/
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
{
console_verbose();
pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
handler[reason], esr, esr_get_class_string(esr));
die("Oops - bad mode", regs, 0);
local_irq_disable();
panic("bad mode");
}
/*
* bad_el0_sync handles unexpected, but potentially recoverable synchronous
* exceptions taken from EL0. Unlike bad_mode, this returns.
*/
asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
{
siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
handler[reason], esr, esr_get_class_string(esr));
pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
smp_processor_id(), esr, esr_get_class_string(esr));
__show_regs(regs);
info.si_signo = SIGILL;
@ -486,7 +503,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
info.si_code = ILL_ILLOPC;
info.si_addr = pc;
arm64_notify_die("Oops - bad mode", regs, &info, 0);
current->thread.fault_address = 0;
current->thread.fault_code = 0;
force_sig_info(info.si_signo, &info, current);
}
void __pte_error(const char *file, int line, unsigned long val)

View file

@ -180,6 +180,7 @@ static int ibmebus_create_device(struct device_node *dn)
static int ibmebus_create_devices(const struct of_device_id *matches)
{
struct device_node *root, *child;
struct device *dev;
int ret = 0;
root = of_find_node_by_path("/");
@ -188,9 +189,12 @@ static int ibmebus_create_devices(const struct of_device_id *matches)
if (!of_match_node(matches, child))
continue;
if (bus_find_device(&ibmebus_bus_type, NULL, child,
ibmebus_match_node))
dev = bus_find_device(&ibmebus_bus_type, NULL, child,
ibmebus_match_node);
if (dev) {
put_device(dev);
continue;
}
ret = ibmebus_create_device(child);
if (ret) {
@ -262,6 +266,7 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
const char *buf, size_t count)
{
struct device_node *dn = NULL;
struct device *dev;
char *path;
ssize_t rc = 0;
@ -269,8 +274,10 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
if (!path)
return -ENOMEM;
if (bus_find_device(&ibmebus_bus_type, NULL, path,
ibmebus_match_path)) {
dev = bus_find_device(&ibmebus_bus_type, NULL, path,
ibmebus_match_path);
if (dev) {
put_device(dev);
printk(KERN_WARNING "%s: %s has already been probed\n",
__func__, path);
rc = -EEXIST;
@ -307,6 +314,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus,
if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path,
ibmebus_match_path))) {
of_device_unregister(to_platform_device(dev));
put_device(dev);
kfree(path);
return count;

View file

@ -1875,6 +1875,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
.irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ack_level,
.irq_set_affinity = ioapic_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@ -1886,6 +1887,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
.irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ir_ack_level,
.irq_set_affinity = ioapic_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE,
};

View file

@ -1129,7 +1129,7 @@ static __init int setup_disablecpuid(char *arg)
{
int bit;
if (get_option(&arg, &bit) && bit < NCAPINTS*32)
if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
setup_clear_cpu_cap(bit);
else
return 0;

View file

@ -180,7 +180,8 @@ GLOBAL(ftrace_graph_call)
jmp ftrace_stub
#endif
GLOBAL(ftrace_stub)
/* This is weak to keep gas from relaxing the jumps */
WEAK(ftrace_stub)
retq
END(ftrace_caller)

View file

@ -172,6 +172,7 @@
#define NearBranch ((u64)1 << 52) /* Near branches */
#define No16 ((u64)1 << 53) /* No 16 bit operand */
#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
#define Aligned16 ((u64)1 << 55) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
@ -434,6 +435,26 @@ FOP_END;
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
FOP_END;
/*
* XXX: inoutclob user must know where the argument is being expanded.
* Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
*/
#define asm_safe(insn, inoutclob...) \
({ \
int _fault = 0; \
\
asm volatile("1:" insn "\n" \
"2:\n" \
".pushsection .fixup, \"ax\"\n" \
"3: movl $1, %[_fault]\n" \
" jmp 2b\n" \
".popsection\n" \
_ASM_EXTABLE(1b, 3b) \
: [_fault] "+qm"(_fault) inoutclob ); \
\
_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
})
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
enum x86_intercept_stage stage)
@ -620,21 +641,24 @@ static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
* depending on whether they're AVX encoded or not.
*
* Also included is CMPXCHG16B which is not a vector instruction, yet it is
* subject to the same check.
* subject to the same check. FXSAVE and FXRSTOR are checked here too as their
* 512 bytes of data must be aligned to a 16 byte boundary.
*/
static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
{
if (likely(size < 16))
return false;
return 1;
if (ctxt->d & Aligned)
return true;
return size;
else if (ctxt->d & Unaligned)
return false;
return 1;
else if (ctxt->d & Avx)
return false;
return 1;
else if (ctxt->d & Aligned16)
return 16;
else
return true;
return size;
}
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
@ -692,7 +716,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
}
break;
}
if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
if (la & (insn_alignment(ctxt, size) - 1))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
bad:
@ -779,6 +803,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
}
static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
unsigned int size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
}
/*
* Prefetch the remaining bytes of the instruction without crossing page
* boundary if they are not in fetch_cache yet.
@ -1532,7 +1570,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
&ctxt->exception);
}
/* Does not support long mode */
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg, u8 cpl,
enum x86_transfer_type transfer,
@ -1569,20 +1606,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
rpl = selector & 3;
/* NULL selector is not valid for TR, CS and SS (except for long mode) */
if ((seg == VCPU_SREG_CS
|| (seg == VCPU_SREG_SS
&& (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
|| seg == VCPU_SREG_TR)
&& null_selector)
goto exception;
/* TR should be in GDT only */
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
goto exception;
if (null_selector) /* for NULL selector skip all following checks */
/* NULL selector is not valid for TR, CS and (except for long mode) SS */
if (null_selector) {
if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
goto exception;
if (seg == VCPU_SREG_SS) {
if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
goto exception;
/*
* ctxt->ops->set_segment expects the CPL to be in
* SS.DPL, so fake an expand-up 32-bit data segment.
*/
seg_desc.type = 3;
seg_desc.p = 1;
seg_desc.s = 1;
seg_desc.dpl = cpl;
seg_desc.d = 1;
seg_desc.g = 1;
}
/* Skip all following checks */
goto load;
}
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
@ -1698,6 +1749,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg)
{
u8 cpl = ctxt->ops->cpl(ctxt);
/*
* None of MOV, POP and LSS can load a NULL selector in CPL=3, but
* they can load it at CPL<3 (Intel's manual says only LSS can,
* but it's wrong).
*
* However, the Intel manual says that putting IST=1/DPL=3 in
* an interrupt gate will result in SS=3 (the AMD manual instead
* says it doesn't), so allow SS=3 in __load_segment_descriptor
* and only forbid it here.
*/
if (seg == VCPU_SREG_SS && selector == 3 &&
ctxt->mode == X86EMUL_MODE_PROT64)
return emulate_exception(ctxt, GP_VECTOR, 0, true);
return __load_segment_descriptor(ctxt, selector, seg, cpl,
X86_TRANSFER_NONE, NULL);
}
@ -3646,7 +3712,7 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
}
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return segmented_write(ctxt, ctxt->dst.addr.mem,
return segmented_write_std(ctxt, ctxt->dst.addr.mem,
&desc_ptr, 2 + ctxt->op_bytes);
}
@ -3830,6 +3896,131 @@ static int em_movsxd(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
static int check_fxsr(struct x86_emulate_ctxt *ctxt)
{
u32 eax = 1, ebx, ecx = 0, edx;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
if (!(edx & FFL(FXSR)))
return emulate_ud(ctxt);
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
/*
* Don't emulate a case that should never be hit, instead of working
* around a lack of fxsave64/fxrstor64 on old compilers.
*/
if (ctxt->mode >= X86EMUL_MODE_PROT64)
return X86EMUL_UNHANDLEABLE;
return X86EMUL_CONTINUE;
}
/*
* FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
* 1) 16 bit mode
* 2) 32 bit mode
* - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
* preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
* save and restore
* 3) 64-bit mode with REX.W prefix
* - like (2), but XMM 8-15 are being saved and restored
* 4) 64-bit mode without REX.W prefix
* - like (3), but FIP and FDP are 64 bit
*
* Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
* desired result. (4) is not emulated.
*
* Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
* and FPU DS) should match.
*/
static int em_fxsave(struct x86_emulate_ctxt *ctxt)
{
struct fxregs_state fx_state;
size_t size;
int rc;
rc = check_fxsr(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->ops->get_fpu(ctxt);
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
ctxt->ops->put_fpu(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
else
size = offsetof(struct fxregs_state, xmm_space[0]);
return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
}
static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
struct fxregs_state *new)
{
int rc = X86EMUL_CONTINUE;
struct fxregs_state old;
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
if (rc != X86EMUL_CONTINUE)
return rc;
/*
* 64 bit host will restore XMM 8-15, which is not correct on non-64
* bit guests. Load the current values in order to preserve 64 bit
* XMMs after fxrstor.
*/
#ifdef CONFIG_X86_64
/* XXX: accessing XMM 8-15 very awkwardly */
memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
#endif
/*
* Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
* does save and restore MXCSR.
*/
if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
memcpy(new->xmm_space, old.xmm_space, 8 * 16);
return rc;
}
static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
{
struct fxregs_state fx_state;
int rc;
rc = check_fxsr(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
if (rc != X86EMUL_CONTINUE)
return rc;
if (fx_state.mxcsr >> 16)
return emulate_gp(ctxt, 0);
ctxt->ops->get_fpu(ctxt);
if (ctxt->mode < X86EMUL_MODE_PROT64)
rc = fxrstor_fixup(ctxt, &fx_state);
if (rc == X86EMUL_CONTINUE)
rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
ctxt->ops->put_fpu(ctxt);
return rc;
}
static bool valid_cr(int nr)
{
switch (nr) {
@ -4182,7 +4373,9 @@ static const struct gprefix pfx_0f_ae_7 = {
};
static const struct group_dual group15 = { {
N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
I(ModRM | Aligned16, em_fxsave),
I(ModRM | Aligned16, em_fxrstor),
N, N, N, N, N, GP(0, &pfx_0f_ae_7),
}, {
N, N, N, N, N, N, N, N,
} };
@ -5054,21 +5247,13 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
bool fault = false;
int rc;
ctxt->ops->get_fpu(ctxt);
asm volatile("1: fwait \n\t"
"2: \n\t"
".pushsection .fixup,\"ax\" \n\t"
"3: \n\t"
"movb $1, %[fault] \n\t"
"jmp 2b \n\t"
".popsection \n\t"
_ASM_EXTABLE(1b, 3b)
: [fault]"+qm"(fault));
rc = asm_safe("fwait");
ctxt->ops->put_fpu(ctxt);
if (unlikely(fault))
if (unlikely(rc != X86EMUL_CONTINUE))
return emulate_exception(ctxt, MF_VECTOR, 0, false);
return X86EMUL_CONTINUE;

View file

@ -2187,3 +2187,9 @@ void kvm_lapic_init(void)
jump_label_rate_limit(&apic_hw_disabled, HZ);
jump_label_rate_limit(&apic_sw_disabled, HZ);
}
void kvm_lapic_exit(void)
{
static_key_deferred_flush(&apic_hw_disabled);
static_key_deferred_flush(&apic_sw_disabled);
}

View file

@ -95,6 +95,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
void kvm_lapic_init(void);
void kvm_lapic_exit(void);
static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off)
{

View file

@ -5842,6 +5842,7 @@ out:
void kvm_arch_exit(void)
{
kvm_lapic_exit();
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))

View file

@ -114,6 +114,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
},
},
/* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
{
.callback = set_nouse_crs,
.ident = "Supermicro X8DTH",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
},
},
/* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
{

View file

@ -842,7 +842,7 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
return WORK_CPU_UNBOUND;
if (--hctx->next_cpu_batch <= 0) {
int cpu = hctx->next_cpu, next_cpu;
int next_cpu;
next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
if (next_cpu >= nr_cpu_ids)
@ -850,8 +850,6 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
hctx->next_cpu = next_cpu;
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
return cpu;
}
return hctx->next_cpu;

View file

@ -1572,7 +1572,7 @@ static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
{
struct cfq_group_data *cgd;
cgd = kzalloc(sizeof(*cgd), GFP_KERNEL);
cgd = kzalloc(sizeof(*cgd), gfp);
if (!cgd)
return NULL;
return &cgd->cpd;

View file

@ -847,6 +847,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
if (ghes_read_estatus(ghes, 1)) {
ghes_clear_estatus(ghes);
continue;
} else {
ret = NMI_HANDLED;
}
sev = ghes_severity(ghes->estatus->error_severity);
@ -858,11 +860,10 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
__process_error(ghes);
ghes_clear_estatus(ghes);
ret = NMI_HANDLED;
}
#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
if (ret == NMI_HANDLED)
irq_work_queue(&ghes_proc_irq_work);
#endif
atomic_dec(&ghes_in_nmi);

View file

@ -171,6 +171,7 @@ static int vexpress_config_populate(struct device_node *node)
{
struct device_node *bridge;
struct device *parent;
int ret;
bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
if (!bridge)
@ -181,7 +182,11 @@ static int vexpress_config_populate(struct device_node *node)
if (WARN_ON(!parent))
return -ENODEV;
return of_platform_populate(node, NULL, NULL, parent);
ret = of_platform_populate(node, NULL, NULL, parent);
put_device(parent);
return ret;
}
static int __init vexpress_config_init(void)

View file

@ -482,6 +482,7 @@ static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt)
if (mct_int_type == MCT_INT_SPI) {
if (evt->irq != -1)
disable_irq_nosync(evt->irq);
exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
} else {
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
}

View file

@ -373,8 +373,14 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
if (unlikely(rebooting) && new_index != get_nominal_index())
return 0;
if (!throttled)
if (!throttled) {
/* we don't want to be preempted while
* checking if the CPU frequency has been throttled
*/
preempt_disable();
powernv_cpufreq_throttle_check(NULL);
preempt_enable();
}
freq_data.pstate_id = powernv_freqs[new_index].driver_data;

View file

@ -445,6 +445,9 @@ struct dma_pl330_chan {
/* for cyclic capability */
bool cyclic;
/* for runtime pm tracking */
bool active;
};
struct pl330_dmac {
@ -1994,6 +1997,7 @@ static void pl330_tasklet(unsigned long data)
_stop(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
power_down = true;
pch->active = false;
} else {
/* Make sure the PL330 Channel thread is active */
spin_lock(&pch->thread->dmac->lock);
@ -2015,6 +2019,7 @@ static void pl330_tasklet(unsigned long data)
desc->status = PREP;
list_move_tail(&desc->node, &pch->work_list);
if (power_down) {
pch->active = true;
spin_lock(&pch->thread->dmac->lock);
_start(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
@ -2129,6 +2134,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
unsigned long flags;
struct pl330_dmac *pl330 = pch->dmac;
LIST_HEAD(list);
bool power_down = false;
pm_runtime_get_sync(pl330->ddma.dev);
spin_lock_irqsave(&pch->lock, flags);
@ -2139,6 +2145,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
pch->thread->req[0].desc = NULL;
pch->thread->req[1].desc = NULL;
pch->thread->req_running = -1;
power_down = pch->active;
pch->active = false;
/* Mark all desc done */
list_for_each_entry(desc, &pch->submitted_list, node) {
@ -2156,6 +2164,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
pm_runtime_mark_last_busy(pl330->ddma.dev);
if (power_down)
pm_runtime_put_autosuspend(pl330->ddma.dev);
pm_runtime_put_autosuspend(pl330->ddma.dev);
return 0;
@ -2302,6 +2312,7 @@ static void pl330_issue_pending(struct dma_chan *chan)
* updated on work_list emptiness status.
*/
WARN_ON(list_empty(&pch->submitted_list));
pch->active = true;
pm_runtime_get_sync(pch->dmac->ddma.dev);
}
list_splice_tail_init(&pch->submitted_list, &pch->work_list);

View file

@ -3008,19 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6817) ||
(rdev->pdev->device == 0x6806))
max_mclk = 120000;
} else if (rdev->family == CHIP_VERDE) {
if ((rdev->pdev->revision == 0x81) ||
(rdev->pdev->revision == 0x83) ||
(rdev->pdev->revision == 0x87) ||
(rdev->pdev->device == 0x6820) ||
(rdev->pdev->device == 0x6821) ||
(rdev->pdev->device == 0x6822) ||
(rdev->pdev->device == 0x6823) ||
(rdev->pdev->device == 0x682A) ||
(rdev->pdev->device == 0x682B)) {
max_sclk = 75000;
max_mclk = 80000;
}
} else if (rdev->family == CHIP_OLAND) {
if ((rdev->pdev->revision == 0xC7) ||
(rdev->pdev->revision == 0x80) ||

View file

@ -148,26 +148,36 @@ static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev)
struct usb_interface *usbif = to_usb_interface(dev->parent);
struct usb_device *usbdev = interface_to_usbdev(usbif);
int brightness;
char data[8];
char *data;
data = kmalloc(8, GFP_KERNEL);
if (!data)
return -ENOMEM;
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
K90_REQUEST_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0, 0, data, 8,
USB_CTRL_SET_TIMEOUT);
if (ret < 0) {
if (ret < 5) {
dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
ret);
return -EIO;
ret = -EIO;
goto out;
}
brightness = data[4];
if (brightness < 0 || brightness > 3) {
dev_warn(dev,
"Read invalid backlight brightness: %02hhx.\n",
data[4]);
return -EIO;
ret = -EIO;
goto out;
}
return brightness;
ret = brightness;
out:
kfree(data);
return ret;
}
static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev)
@ -253,17 +263,22 @@ static ssize_t k90_show_macro_mode(struct device *dev,
struct usb_interface *usbif = to_usb_interface(dev->parent);
struct usb_device *usbdev = interface_to_usbdev(usbif);
const char *macro_mode;
char data[8];
char *data;
data = kmalloc(2, GFP_KERNEL);
if (!data)
return -ENOMEM;
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
K90_REQUEST_GET_MODE,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0, 0, data, 2,
USB_CTRL_SET_TIMEOUT);
if (ret < 0) {
if (ret < 1) {
dev_warn(dev, "Failed to get K90 initial mode (error %d).\n",
ret);
return -EIO;
ret = -EIO;
goto out;
}
switch (data[0]) {
@ -277,10 +292,15 @@ static ssize_t k90_show_macro_mode(struct device *dev,
default:
dev_warn(dev, "K90 in unknown mode: %02hhx.\n",
data[0]);
return -EIO;
ret = -EIO;
goto out;
}
return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
out:
kfree(data);
return ret;
}
static ssize_t k90_store_macro_mode(struct device *dev,
@ -320,26 +340,36 @@ static ssize_t k90_show_current_profile(struct device *dev,
struct usb_interface *usbif = to_usb_interface(dev->parent);
struct usb_device *usbdev = interface_to_usbdev(usbif);
int current_profile;
char data[8];
char *data;
data = kmalloc(8, GFP_KERNEL);
if (!data)
return -ENOMEM;
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
K90_REQUEST_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0, 0, data, 8,
USB_CTRL_SET_TIMEOUT);
if (ret < 0) {
if (ret < 8) {
dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
ret);
return -EIO;
ret = -EIO;
goto out;
}
current_profile = data[7];
if (current_profile < 1 || current_profile > 3) {
dev_warn(dev, "Read invalid current profile: %02hhx.\n",
data[7]);
return -EIO;
ret = -EIO;
goto out;
}
return snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
out:
kfree(data);
return ret;
}
static ssize_t k90_store_current_profile(struct device *dev,

View file

@ -1400,7 +1400,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
if (i2c_check_addr_validity(addr, info.flags)) {
dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
info.addr, node->full_name);
addr, node->full_name);
return ERR_PTR(-EINVAL);
}

View file

@ -329,7 +329,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
unsigned long arg)
{
struct i2c_smbus_ioctl_data data_arg;
union i2c_smbus_data temp;
union i2c_smbus_data temp = {};
int datasize, res;
if (copy_from_user(&data_arg,

View file

@ -113,7 +113,9 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
!(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
--ah->av.eth.stat_rate;
}
ah->av.eth.sl_tclass_flowlabel |=
cpu_to_be32((ah_attr->grh.traffic_class << 20) |
ah_attr->grh.flow_label);
/*
* HW requires multicast LID so we just choose one.
*/
@ -121,7 +123,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
ah->av.ib.dlid = cpu_to_be16(0xc000);
memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);
ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29);
return &ah->ibah;
}

View file

@ -630,9 +630,11 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
if (err)
goto out;
props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
(((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
IB_WIDTH_4X : IB_WIDTH_1X;
props->active_speed = IB_SPEED_QDR;
props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
IB_SPEED_FDR : IB_SPEED_QDR;
props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
@ -2401,14 +2403,19 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_steer_qp_release;
}
bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
bitmap_zero(ibdev->ib_uc_qpns_bitmap,
ibdev->steer_qpn_count);
err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
dev, ibdev->steer_qpn_base,
ibdev->steer_qpn_base +
ibdev->steer_qpn_count - 1);
if (err)
goto err_steer_free_bitmap;
} else {
bitmap_fill(ibdev->ib_uc_qpns_bitmap,
ibdev->steer_qpn_count);
}
}
for (j = 1; j <= ibdev->dev->caps.num_ports; j++)

View file

@ -1207,7 +1207,8 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
if (is_qp0(dev, mqp))
mlx4_CLOSE_PORT(dev->dev, mqp->port);
if (dev->qp1_proxy[mqp->port - 1] == mqp) {
if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI &&
dev->qp1_proxy[mqp->port - 1] == mqp) {
mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
dev->qp1_proxy[mqp->port - 1] = NULL;
mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);

View file

@ -614,6 +614,33 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
return 0;
}
static void wait_for_async_commands(struct mlx5_ib_dev *dev)
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent;
int total = 0;
int i;
int j;
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
for (j = 0 ; j < 1000; j++) {
if (!ent->pending)
break;
msleep(50);
}
}
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
total += ent->pending;
}
if (total)
mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
else
mlx5_ib_warn(dev, "done with all pending requests\n");
}
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
{
int i;
@ -627,6 +654,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
clean_keys(dev, i);
destroy_workqueue(dev->cache.wq);
wait_for_async_commands(dev);
del_timer_sync(&dev->delay_timer);
return 0;

View file

@ -1035,8 +1035,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
tx_qp = ib_create_qp(priv->pd, &attr);
if (PTR_ERR(tx_qp) == -EINVAL) {
ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
priv->ca->name);
attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
tx_qp = ib_create_qp(priv->pd, &attr);
}

View file

@ -1238,6 +1238,12 @@ static int xpad_init_input(struct usb_xpad *xpad)
input_dev->name = xpad->name;
input_dev->phys = xpad->phys;
usb_to_input_id(xpad->udev, &input_dev->id);
if (xpad->xtype == XTYPE_XBOX360W) {
/* x360w controllers and the receiver have different ids */
input_dev->id.product = 0x02a1;
}
input_dev->dev.parent = &xpad->intf->dev;
input_set_drvdata(input_dev, xpad);

View file

@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
},
},
{ }
};

View file

@ -905,9 +905,9 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
case QUEUE_HEADER_NORMAL:
report_count = ts->buf[FW_HDR_COUNT];
if (report_count > 3) {
if (report_count == 0 || report_count > 3) {
dev_err(&client->dev,
"too large report count: %*ph\n",
"bad report count: %*ph\n",
HEADER_SIZE, ts->buf);
break;
}

View file

@ -214,6 +214,8 @@ static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params)
if (params->dlen > 24 || params->dlen <= 0)
return -EINVAL;
pctrl = devm_pinctrl_get(ppi->dev);
if (IS_ERR(pctrl))
return PTR_ERR(pctrl);
pstate = pinctrl_lookup_state(pctrl,
pin_state[(params->dlen + 7) / 8 - 1]);
if (pinctrl_select_state(pctrl, pstate))

View file

@ -263,6 +263,8 @@ static void ite_set_carrier_params(struct ite_dev *dev)
if (allowance > ITE_RXDCR_MAX)
allowance = ITE_RXDCR_MAX;
use_demodulator = true;
}
}

View file

@ -309,6 +309,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
cmd1 = cmd->arg;
if (cmd->opcode == MMC_STOP_TRANSMISSION)
cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
if (host->sdio_irq_en) {
ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
@ -417,8 +420,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
ssp->base + HW_SSP_BLOCK_SIZE);
}
if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
(cmd->opcode == SD_IO_RW_EXTENDED))
if (cmd->opcode == SD_IO_RW_EXTENDED)
cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
cmd1 = cmd->arg;

View file

@ -531,7 +531,7 @@ config MTD_NAND_FSMC
Flexible Static Memory Controller (FSMC)
config MTD_NAND_XWAY
tristate "Support for NAND on Lantiq XWAY SoC"
bool "Support for NAND on Lantiq XWAY SoC"
depends on LANTIQ && SOC_TYPE_XWAY
select MTD_NAND_PLATFORM
help

View file

@ -110,13 +110,26 @@ static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
{
struct usb_device *usb_dev = atusb->usb_dev;
int ret;
uint8_t *buffer;
uint8_t value;
buffer = kmalloc(1, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
0, reg, &value, 1, 1000);
return ret >= 0 ? value : ret;
0, reg, buffer, 1, 1000);
if (ret >= 0) {
value = buffer[0];
kfree(buffer);
return value;
} else {
kfree(buffer);
return ret;
}
}
static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask,
@ -517,9 +530,13 @@ static struct ieee802154_ops atusb_ops = {
static int atusb_get_and_show_revision(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
unsigned char buffer[3];
unsigned char *buffer;
int ret;
buffer = kmalloc(3, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/* Get a couple of the ATMega Firmware values */
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
@ -535,15 +552,20 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
}
kfree(buffer);
return ret;
}
static int atusb_get_and_show_build(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
char build[ATUSB_BUILD_SIZE + 1];
char *build;
int ret;
build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL);
if (!build)
return -ENOMEM;
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
build, ATUSB_BUILD_SIZE, 1000);
@ -552,6 +574,7 @@ static int atusb_get_and_show_build(struct atusb *atusb)
dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
}
kfree(build);
return ret;
}

View file

@ -1019,6 +1019,7 @@ void set_pcie_port_type(struct pci_dev *pdev)
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (!pos)
return;
pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
pdev->pcie_flags_reg = reg16;
@ -1026,13 +1027,14 @@ void set_pcie_port_type(struct pci_dev *pdev)
pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
/*
* A Root Port is always the upstream end of a Link. No PCIe
* component has two Links. Two Links are connected by a Switch
* that has a Port on each Link and internal logic to connect the
* two Ports.
* A Root Port or a PCI-to-PCIe bridge is always the upstream end
* of a Link. No PCIe component has two Links. Two Links are
* connected by a Switch that has a Port on each Link and internal
* logic to connect the two Ports.
*/
type = pci_pcie_type(pdev);
if (type == PCI_EXP_TYPE_ROOT_PORT)
if (type == PCI_EXP_TYPE_ROOT_PORT ||
type == PCI_EXP_TYPE_PCIE_BRIDGE)
pdev->has_secondary_link = 1;
else if (type == PCI_EXP_TYPE_UPSTREAM ||
type == PCI_EXP_TYPE_DOWNSTREAM) {

View file

@ -246,7 +246,7 @@ static int meson_pmx_request_gpio(struct pinctrl_dev *pcdev,
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
meson_pmx_disable_other_groups(pc, range->pin_base + offset, -1);
meson_pmx_disable_other_groups(pc, offset, -1);
return 0;
}

View file

@ -483,7 +483,8 @@ static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin,
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
return true;
return pin->configs &
(SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN);
case PIN_CONFIG_BIAS_PULL_UP:
return pin->configs & SH_PFC_PIN_CFG_PULL_UP;

View file

@ -3365,7 +3365,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
sizeof(struct ct6_dsd), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!ctx_cachep)
goto fail_free_gid_list;
goto fail_free_srb_mempool;
}
ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
ctx_cachep);
@ -3518,7 +3518,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
GFP_KERNEL);
if (!ha->loop_id_map)
goto fail_async_pd;
goto fail_loop_id_map;
else {
qla2x00_set_reserved_loop_ids(ha);
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
@ -3527,6 +3527,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
return 0;
fail_loop_id_map:
dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
fail_async_pd:
dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
fail_ex_init_cb:
@ -3554,6 +3556,10 @@ fail_free_ms_iocb:
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
ha->ms_iocb = NULL;
ha->ms_iocb_dma = 0;
if (ha->sns_cmd)
dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
ha->sns_cmd, ha->sns_cmd_dma);
fail_dma_pool:
if (IS_QLA82XX(ha) || ql2xenabledif) {
dma_pool_destroy(ha->fcp_cmnd_dma_pool);
@ -3571,9 +3577,11 @@ fail_free_nvram:
kfree(ha->nvram);
ha->nvram = NULL;
fail_free_ctx_mempool:
if (ha->ctx_mempool)
mempool_destroy(ha->ctx_mempool);
ha->ctx_mempool = NULL;
fail_free_srb_mempool:
if (ha->srb_mempool)
mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
fail_free_gid_list:

View file

@ -470,6 +470,14 @@ static void atmel_stop_tx(struct uart_port *port)
/* disable PDC transmit */
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
}
/*
* Disable the transmitter.
* This is mandatory when DMA is used, otherwise the DMA buffer
* is fully transmitted.
*/
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
/* Disable interrupts */
atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
@ -502,6 +510,9 @@ static void atmel_start_tx(struct uart_port *port)
/* Enable interrupts */
atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
/* re-enable the transmitter */
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
}
/*

View file

@ -939,8 +939,8 @@ static const struct input_device_id sysrq_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT,
.evbit = { BIT_MASK(EV_KEY) },
.keybit = { BIT_MASK(KEY_LEFTALT) },
.evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) },
.keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) },
},
{ },
};

View file

@ -913,17 +913,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
spin_lock_irqsave(&xhci->lock, flags);
ep->stop_cmds_pending--;
if (xhci->xhc_state & XHCI_STATE_REMOVING) {
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
if (xhci->xhc_state & XHCI_STATE_DYING) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Stop EP timer ran, but another timer marked "
"xHCI as DYING, exiting.");
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Stop EP timer ran, but no command pending, "

View file

@ -1569,19 +1569,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
xhci_urb_free_priv(urb_priv);
return ret;
}
if ((xhci->xhc_state & XHCI_STATE_DYING) ||
(xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Ep 0x%x: URB %p to be canceled on "
"non-responsive xHCI host.",
urb->ep->desc.bEndpointAddress, urb);
/* Let the stop endpoint command watchdog timer (which set this
* state) finish cleaning up the endpoint TD lists. We must
* have caught it in the middle of dropping a lock and giving
* back an URB.
*/
goto done;
}
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];

View file

@ -99,6 +99,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request,
r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
value, index, NULL, 0, DEFAULT_TIMEOUT);
if (r < 0)
dev_err(&dev->dev, "failed to send control message: %d\n", r);
return r;
}
@ -116,7 +118,20 @@ static int ch341_control_in(struct usb_device *dev,
r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
value, index, buf, bufsize, DEFAULT_TIMEOUT);
if (r < bufsize) {
if (r >= 0) {
dev_err(&dev->dev,
"short control message received (%d < %u)\n",
r, bufsize);
r = -EIO;
}
dev_err(&dev->dev, "failed to receive control message: %d\n",
r);
return r;
}
return 0;
}
static int ch341_set_baudrate(struct usb_device *dev,
@ -158,9 +173,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control)
static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
{
const unsigned int size = 2;
char *buffer;
int r;
const unsigned size = 8;
unsigned long flags;
buffer = kmalloc(size, GFP_KERNEL);
@ -171,14 +186,9 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
if (r < 0)
goto out;
/* setup the private status if available */
if (r == 2) {
r = 0;
spin_lock_irqsave(&priv->lock, flags);
priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
spin_unlock_irqrestore(&priv->lock, flags);
} else
r = -EPROTO;
out: kfree(buffer);
return r;
@ -188,9 +198,9 @@ out: kfree(buffer);
static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
{
const unsigned int size = 2;
char *buffer;
int r;
const unsigned size = 8;
buffer = kmalloc(size, GFP_KERNEL);
if (!buffer)
@ -253,7 +263,6 @@ static int ch341_port_probe(struct usb_serial_port *port)
spin_lock_init(&priv->lock);
priv->baud_rate = DEFAULT_BAUD_RATE;
priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
r = ch341_configure(port->serial->dev, priv);
if (r < 0)
@ -315,7 +324,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
r = ch341_configure(serial->dev, priv);
if (r)
goto out;
return r;
if (tty)
ch341_set_termios(tty, port, NULL);
@ -325,12 +334,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
if (r) {
dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n",
__func__, r);
goto out;
return r;
}
r = usb_serial_generic_open(tty, port);
if (r)
goto err_kill_interrupt_urb;
out: return r;
return 0;
err_kill_interrupt_urb:
usb_kill_urb(port->interrupt_in_urb);
return r;
}
/* Old_termios contains the original termios settings and
@ -345,26 +361,25 @@ static void ch341_set_termios(struct tty_struct *tty,
baud_rate = tty_get_baud_rate(tty);
priv->baud_rate = baud_rate;
if (baud_rate) {
spin_lock_irqsave(&priv->lock, flags);
priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
spin_unlock_irqrestore(&priv->lock, flags);
priv->baud_rate = baud_rate;
ch341_set_baudrate(port->serial->dev, priv);
} else {
spin_lock_irqsave(&priv->lock, flags);
priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
spin_unlock_irqrestore(&priv->lock, flags);
}
ch341_set_handshake(port->serial->dev, priv->line_control);
/* Unimplemented:
* (cflag & CSIZE) : data bits [5, 8]
* (cflag & PARENB) : parity {NONE, EVEN, ODD}
* (cflag & CSTOPB) : stop bits [1, 2]
*/
spin_lock_irqsave(&priv->lock, flags);
if (C_BAUD(tty) == B0)
priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
spin_unlock_irqrestore(&priv->lock, flags);
ch341_set_handshake(port->serial->dev, priv->line_control);
}
static void ch341_break_ctl(struct tty_struct *tty, int break_state)
@ -539,14 +554,23 @@ static int ch341_tiocmget(struct tty_struct *tty)
static int ch341_reset_resume(struct usb_serial *serial)
{
struct ch341_private *priv;
priv = usb_get_serial_port_data(serial->port[0]);
struct usb_serial_port *port = serial->port[0];
struct ch341_private *priv = usb_get_serial_port_data(port);
int ret;
/* reconfigure ch341 serial port after bus-reset */
ch341_configure(serial->dev, priv);
return 0;
if (test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
if (ret) {
dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
ret);
return ret;
}
}
return usb_serial_generic_resume(serial);
}
static struct usb_serial_driver ch341_device = {

View file

@ -192,10 +192,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
status_buf, KLSI_STATUSBUF_LEN,
10000
);
if (rc < 0)
dev_err(&port->dev, "Reading line status failed (error = %d)\n",
rc);
else {
if (rc != KLSI_STATUSBUF_LEN) {
dev_err(&port->dev, "reading line status failed: %d\n", rc);
if (rc >= 0)
rc = -EIO;
} else {
status = get_unaligned_le16(status_buf);
dev_info(&port->serial->dev->dev, "read status %x %x\n",

View file

@ -467,7 +467,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
*pci_base = (dma_addr_t)vme_base + pci_offset;
*pci_base = (dma_addr_t)*vme_base + pci_offset;
*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
*enabled = 0;

View file

@ -2520,11 +2520,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (ref && ref->seq &&
btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
spin_unlock(&locked_ref->lock);
btrfs_delayed_ref_unlock(locked_ref);
spin_lock(&delayed_refs->lock);
locked_ref->processing = 0;
delayed_refs->num_heads_ready++;
spin_unlock(&delayed_refs->lock);
btrfs_delayed_ref_unlock(locked_ref);
locked_ref = NULL;
cond_resched();
count++;
@ -2570,7 +2570,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
*/
if (must_insert_reserved)
locked_ref->must_insert_reserved = 1;
spin_lock(&delayed_refs->lock);
locked_ref->processing = 0;
delayed_refs->num_heads_ready++;
spin_unlock(&delayed_refs->lock);
btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
btrfs_delayed_ref_unlock(locked_ref);
return ret;

View file

@ -274,12 +274,13 @@ static int parse_reply_info_extra(void **p, void *end,
struct ceph_mds_reply_info_parsed *info,
u64 features)
{
if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
u32 op = le32_to_cpu(info->head->op);
if (op == CEPH_MDS_OP_GETFILELOCK)
return parse_reply_info_filelock(p, end, info, features);
else if (info->head->op == CEPH_MDS_OP_READDIR ||
info->head->op == CEPH_MDS_OP_LSSNAP)
else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
return parse_reply_info_dir(p, end, info, features);
else if (info->head->op == CEPH_MDS_OP_CREATE)
else if (op == CEPH_MDS_OP_CREATE)
return parse_reply_info_create(p, end, info, features);
else
return -EIO;

View file

@ -1322,9 +1322,12 @@ int d_set_mounted(struct dentry *dentry)
}
spin_lock(&dentry->d_lock);
if (!d_unlinked(dentry)) {
ret = -EBUSY;
if (!d_mountpoint(dentry)) {
dentry->d_flags |= DCACHE_MOUNTED;
ret = 0;
}
}
spin_unlock(&dentry->d_lock);
out:
write_sequnlock(&rename_lock);

View file

@ -2091,7 +2091,6 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
struct fuse_req *req;
req = list_entry(head->next, struct fuse_req, list);
req->out.h.error = -ECONNABORTED;
clear_bit(FR_PENDING, &req->flags);
clear_bit(FR_SENT, &req->flags);
list_del_init(&req->list);
request_end(fc, req);
@ -2169,6 +2168,8 @@ void fuse_abort_conn(struct fuse_conn *fc)
spin_lock(&fiq->waitq.lock);
fiq->connected = 0;
list_splice_init(&fiq->pending, &to_end2);
list_for_each_entry(req, &to_end2, list)
clear_bit(FR_PENDING, &req->flags);
while (forget_pending(fiq))
kfree(dequeue_forget(fiq, 1, NULL));
wake_up_all_locked(&fiq->waitq);

View file

@ -743,26 +743,50 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
return NULL;
}
static struct mountpoint *new_mountpoint(struct dentry *dentry)
static struct mountpoint *get_mountpoint(struct dentry *dentry)
{
struct hlist_head *chain = mp_hash(dentry);
struct mountpoint *mp;
struct mountpoint *mp, *new = NULL;
int ret;
mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
if (!mp)
return ERR_PTR(-ENOMEM);
ret = d_set_mounted(dentry);
if (ret) {
kfree(mp);
return ERR_PTR(ret);
if (d_mountpoint(dentry)) {
mountpoint:
read_seqlock_excl(&mount_lock);
mp = lookup_mountpoint(dentry);
read_sequnlock_excl(&mount_lock);
if (mp)
goto done;
}
mp->m_dentry = dentry;
mp->m_count = 1;
hlist_add_head(&mp->m_hash, chain);
INIT_HLIST_HEAD(&mp->m_list);
if (!new)
new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
if (!new)
return ERR_PTR(-ENOMEM);
/* Exactly one processes may set d_mounted */
ret = d_set_mounted(dentry);
/* Someone else set d_mounted? */
if (ret == -EBUSY)
goto mountpoint;
/* The dentry is not available as a mountpoint? */
mp = ERR_PTR(ret);
if (ret)
goto done;
/* Add the new mountpoint to the hash table */
read_seqlock_excl(&mount_lock);
new->m_dentry = dentry;
new->m_count = 1;
hlist_add_head(&new->m_hash, mp_hash(dentry));
INIT_HLIST_HEAD(&new->m_list);
read_sequnlock_excl(&mount_lock);
mp = new;
new = NULL;
done:
kfree(new);
return mp;
}
@ -1557,11 +1581,11 @@ void __detach_mounts(struct dentry *dentry)
struct mount *mnt;
namespace_lock();
lock_mount_hash();
mp = lookup_mountpoint(dentry);
if (IS_ERR_OR_NULL(mp))
goto out_unlock;
lock_mount_hash();
event++;
while (!hlist_empty(&mp->m_list)) {
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
@ -1571,9 +1595,9 @@ void __detach_mounts(struct dentry *dentry)
}
else umount_tree(mnt, UMOUNT_CONNECTED);
}
unlock_mount_hash();
put_mountpoint(mp);
out_unlock:
unlock_mount_hash();
namespace_unlock();
}
@ -1962,9 +1986,7 @@ retry:
namespace_lock();
mnt = lookup_mnt(path);
if (likely(!mnt)) {
struct mountpoint *mp = lookup_mountpoint(dentry);
if (!mp)
mp = new_mountpoint(dentry);
struct mountpoint *mp = get_mountpoint(dentry);
if (IS_ERR(mp)) {
namespace_unlock();
mutex_unlock(&dentry->d_inode->i_mutex);
@ -1983,7 +2005,11 @@ retry:
static void unlock_mount(struct mountpoint *where)
{
struct dentry *dentry = where->m_dentry;
read_seqlock_excl(&mount_lock);
put_mountpoint(where);
read_sequnlock_excl(&mount_lock);
namespace_unlock();
mutex_unlock(&dentry->d_inode->i_mutex);
}
@ -3055,9 +3081,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
touch_mnt_namespace(current->nsproxy->mnt_ns);
/* A moved mount should not expire automatically */
list_del_init(&new_mnt->mnt_expire);
put_mountpoint(root_mp);
unlock_mount_hash();
chroot_fs_refs(&root, &new);
put_mountpoint(root_mp);
error = 0;
out4:
unlock_mount(old_mp);

View file

@ -462,7 +462,7 @@ void nfs_force_use_readdirplus(struct inode *dir)
{
if (!list_empty(&NFS_I(dir)->open_files)) {
nfs_advise_use_readdirplus(dir);
nfs_zap_mapping(dir, dir->i_mapping);
invalidate_mapping_pages(dir->i_mapping, 0, -1);
}
}
@ -847,17 +847,6 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc)
goto out;
}
static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
{
struct nfs_inode *nfsi = NFS_I(dir);
if (nfs_attribute_cache_expired(dir))
return true;
if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
return true;
return false;
}
/* The file offset position represents the dirent entry number. A
last cookie cache takes care of the common case of reading the
whole directory.
@ -890,7 +879,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0;
nfs_block_sillyrename(dentry);
if (ctx->pos == 0 || nfs_dir_mapping_need_revalidate(inode))
if (ctx->pos == 0 || nfs_attribute_cache_expired(inode))
res = nfs_revalidate_mapping(inode, file->f_mapping);
if (res < 0)
goto out;

View file

@ -283,7 +283,8 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
s->nfs_client->cl_rpcclient->cl_auth->au_flavor);
out_test_devid:
if (filelayout_test_devid_unavailable(devid))
if (ret->ds_clp == NULL ||
filelayout_test_devid_unavailable(devid))
ret = NULL;
out:
return ret;

View file

@ -1185,13 +1185,11 @@ bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
* i_lock */
spin_lock(&ino->i_lock);
lo = nfsi->layout;
if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
sleep = true;
spin_unlock(&ino->i_lock);
if (sleep)
if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
sleep = true;
}
spin_unlock(&ino->i_lock);
return sleep;
}

View file

@ -3321,6 +3321,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
lockres->l_level, new_level);
/*
* On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
* expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
* we can recover correctly from node failure. Otherwise, we may get
* invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
*/
if (!ocfs2_is_o2cb_active() &&
lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
lvb = 1;
if (lvb)
dlm_flags |= DLM_LKF_VALBLK;

View file

@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
*/
static struct ocfs2_stack_plugin *active_stack;
inline int ocfs2_is_o2cb_active(void)
{
return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
}
EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
{
struct ocfs2_stack_plugin *p;

View file

@ -298,4 +298,7 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
int ocfs2_is_o2cb_active(void);
#endif /* STACKGLUE_H */

View file

@ -903,11 +903,10 @@ int simple_set_acl(struct inode *inode, struct posix_acl *acl, int type)
int error;
if (type == ACL_TYPE_ACCESS) {
error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0)
return 0;
if (error == 0)
acl = NULL;
error = posix_acl_update_mode(inode,
&inode->i_mode, &acl);
if (error)
return error;
}
inode->i_ctime = CURRENT_TIME;

View file

@ -703,7 +703,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
ctl_dir = container_of(head, struct ctl_dir, header);
if (!dir_emit_dots(file, ctx))
return 0;
goto out;
pos = 2;
@ -713,6 +713,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
break;
}
}
out:
sysctl_head_finish(head);
return 0;
}

View file

@ -34,6 +34,11 @@
#include <linux/slab.h>
#include "ubifs.h"
static int try_read_node(const struct ubifs_info *c, void *buf, int type,
int len, int lnum, int offs);
static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
struct ubifs_zbranch *zbr, void *node);
/*
* Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
* @NAME_LESS: name corresponding to the first argument is less than second
@ -402,7 +407,19 @@ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
return 0;
}
if (c->replaying) {
err = fallible_read_node(c, &zbr->key, zbr, node);
/*
* When the node was not found, return -ENOENT, 0 otherwise.
* Negative return codes stay as-is.
*/
if (err == 0)
err = -ENOENT;
else if (err == 1)
err = 0;
} else {
err = ubifs_tnc_read_node(c, zbr, node);
}
if (err)
return err;
@ -2766,6 +2783,10 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
if (nm->name) {
if (err) {
/* Handle collisions */
if (c->replaying)
err = fallible_resolve_collision(c, key, &znode, &n,
nm, 0);
else
err = resolve_collision(c, key, &znode, &n, nm);
dbg_tnc("rc returned %d, znode %p, n %d",
err, znode, n);

View file

@ -14,6 +14,7 @@ struct static_key_deferred {
#ifdef HAVE_JUMP_LABEL
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
extern void static_key_deferred_flush(struct static_key_deferred *key);
extern void
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
STATIC_KEY_CHECK_USE();
static_key_slow_dec(&key->key);
}
static inline void static_key_deferred_flush(struct static_key_deferred *key)
{
STATIC_KEY_CHECK_USE();
}
static inline void
jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)

View file

@ -138,6 +138,13 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key)
}
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
void static_key_deferred_flush(struct static_key_deferred *key)
{
STATIC_KEY_CHECK_USE();
flush_delayed_work(&key->work);
}
EXPORT_SYMBOL_GPL(static_key_deferred_flush);
void jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{

View file

@ -159,7 +159,9 @@ static void devm_memremap_pages_release(struct device *dev, void *res)
struct page_map *page_map = res;
/* pages are dead and unused, undo the arch mapping */
mem_hotplug_begin();
arch_remove_memory(page_map->res.start, resource_size(&page_map->res));
mem_hotplug_done();
}
void *devm_memremap_pages(struct device *dev, struct resource *res)
@ -189,7 +191,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res)
if (nid < 0)
nid = numa_mem_id();
mem_hotplug_begin();
error = arch_add_memory(nid, res->start, resource_size(res), true);
mem_hotplug_done();
if (error) {
devres_free(page_map);
return ERR_PTR(error);

View file

@ -1723,23 +1723,32 @@ free:
}
/*
* When releasing a hugetlb pool reservation, any surplus pages that were
* allocated to satisfy the reservation must be explicitly freed if they were
* never used.
* Called with hugetlb_lock held.
* This routine has two main purposes:
* 1) Decrement the reservation count (resv_huge_pages) by the value passed
* in unused_resv_pages. This corresponds to the prior adjustments made
* to the associated reservation map.
* 2) Free any unused surplus pages that may have been allocated to satisfy
* the reservation. As many as unused_resv_pages may be freed.
*
* Called with hugetlb_lock held. However, the lock could be dropped (and
* reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
* we must make sure nobody else can claim pages we are in the process of
* freeing. Do this by ensuring resv_huge_page always is greater than the
* number of huge pages we plan to free when dropping the lock.
*/
static void return_unused_surplus_pages(struct hstate *h,
unsigned long unused_resv_pages)
{
unsigned long nr_pages;
/* Uncommit the reservation */
h->resv_huge_pages -= unused_resv_pages;
/* Cannot return gigantic pages currently */
if (hstate_is_gigantic(h))
return;
goto out;
/*
* Part (or even all) of the reservation could have been backed
* by pre-allocated pages. Only free surplus pages.
*/
nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
/*
@ -1749,12 +1758,22 @@ static void return_unused_surplus_pages(struct hstate *h,
* when the nodes with surplus pages have no free pages.
* free_pool_huge_page() will balance the the freed pages across the
* on-line nodes with memory and will handle the hstate accounting.
*
* Note that we decrement resv_huge_pages as we free the pages. If
* we drop the lock, resv_huge_pages will still be sufficiently large
* to cover subsequent pages we may free.
*/
while (nr_pages--) {
h->resv_huge_pages--;
unused_resv_pages--;
if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
break;
goto out;
cond_resched_lock(&hugetlb_lock);
}
out:
/* Fully uncommit the reservation */
h->resv_huge_pages -= unused_resv_pages;
}

View file

@ -1481,7 +1481,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
case RPC_GSS_PROC_DESTROY:
if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
goto auth_err;
rsci->h.expiry_time = get_seconds();
rsci->h.expiry_time = seconds_since_boot();
set_bit(CACHE_NEGATIVE, &rsci->h.flags);
if (resv->iov_len + 4 > PAGE_SIZE)
goto drop;

View file

@ -346,8 +346,6 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
atomic_inc(&rdma_stat_read);
return ret;
err:
ib_dma_unmap_sg(xprt->sc_cm_id->device,
frmr->sg, frmr->sg_nents, frmr->direction);
svc_rdma_put_context(ctxt, 0);
svc_rdma_put_frmr(xprt, frmr);
return ret;

View file

@ -13168,13 +13168,17 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
bool schedule_destroy_work = false;
bool schedule_scan_stop = false;
struct cfg80211_sched_scan_request *sched_scan_req =
rcu_dereference(rdev->sched_scan_req);
if (sched_scan_req && notify->portid &&
sched_scan_req->owner_nlportid == notify->portid)
schedule_scan_stop = true;
sched_scan_req->owner_nlportid == notify->portid) {
sched_scan_req->owner_nlportid = 0;
if (rdev->ops->sched_scan_stop &&
rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
schedule_work(&rdev->sched_scan_stop_wk);
}
list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) {
cfg80211_mlme_unregister_socket(wdev, notify->portid);
@ -13205,12 +13209,6 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
spin_unlock(&rdev->destroy_list_lock);
schedule_work(&rdev->destroy_work);
}
} else if (schedule_scan_stop) {
sched_scan_req->owner_nlportid = 0;
if (rdev->ops->sched_scan_stop &&
rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
schedule_work(&rdev->sched_scan_stop_wk);
}
}

View file

@ -95,7 +95,8 @@ static void register_python_scripting(struct scripting_ops *scripting_ops)
if (err)
die("error registering py script extension");
scripting_context = malloc(sizeof(struct scripting_context));
if (scripting_context == NULL)
scripting_context = malloc(sizeof(*scripting_context));
}
#ifdef NO_LIBPYTHON
@ -159,7 +160,8 @@ static void register_perl_scripting(struct scripting_ops *scripting_ops)
if (err)
die("error registering pl script extension");
scripting_context = malloc(sizeof(struct scripting_context));
if (scripting_context == NULL)
scripting_context = malloc(sizeof(*scripting_context));
}
#ifdef NO_LIBPERL

View file

@ -83,7 +83,7 @@ ifdef INSTALL_PATH
done;
@# Ask all targets to emit their test scripts
echo "#!/bin/bash" > $(ALL_SCRIPT)
echo "#!/bin/sh" > $(ALL_SCRIPT)
echo "cd \$$(dirname \$$0)" >> $(ALL_SCRIPT)
echo "ROOT=\$$PWD" >> $(ALL_SCRIPT)

View file

@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
echo "--------------------"
echo "running socket test"

View file

@ -66,7 +66,7 @@ int pmc56_overflow(void)
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
mtspr(SPRN_PMC2, pmc_sample_period(sample_period));
mtspr(SPRN_PMC5, 0);
mtspr(SPRN_PMC6, 0);

View file

@ -188,7 +188,7 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
mutex_lock(&lock);
list_for_each_entry(tmp, &consumers, node) {
if (tmp->token == consumer->token) {
if (tmp->token == consumer->token || tmp == consumer) {
mutex_unlock(&lock);
module_put(THIS_MODULE);
return -EBUSY;
@ -235,7 +235,7 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer)
mutex_lock(&lock);
list_for_each_entry(tmp, &consumers, node) {
if (tmp->token != consumer->token)
if (tmp != consumer)
continue;
list_for_each_entry(producer, &producers, node) {