Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
This commit is contained in:
commit
b4bbeeb816
45 changed files with 490 additions and 194 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 4
|
PATCHLEVEL = 4
|
||||||
SUBLEVEL = 43
|
SUBLEVEL = 44
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Blurry Fish Butt
|
NAME = Blurry Fish Butt
|
||||||
|
|
||||||
|
|
|
@ -180,6 +180,7 @@ static int ibmebus_create_device(struct device_node *dn)
|
||||||
static int ibmebus_create_devices(const struct of_device_id *matches)
|
static int ibmebus_create_devices(const struct of_device_id *matches)
|
||||||
{
|
{
|
||||||
struct device_node *root, *child;
|
struct device_node *root, *child;
|
||||||
|
struct device *dev;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
root = of_find_node_by_path("/");
|
root = of_find_node_by_path("/");
|
||||||
|
@ -188,9 +189,12 @@ static int ibmebus_create_devices(const struct of_device_id *matches)
|
||||||
if (!of_match_node(matches, child))
|
if (!of_match_node(matches, child))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (bus_find_device(&ibmebus_bus_type, NULL, child,
|
dev = bus_find_device(&ibmebus_bus_type, NULL, child,
|
||||||
ibmebus_match_node))
|
ibmebus_match_node);
|
||||||
|
if (dev) {
|
||||||
|
put_device(dev);
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
ret = ibmebus_create_device(child);
|
ret = ibmebus_create_device(child);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -262,6 +266,7 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
|
||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct device_node *dn = NULL;
|
struct device_node *dn = NULL;
|
||||||
|
struct device *dev;
|
||||||
char *path;
|
char *path;
|
||||||
ssize_t rc = 0;
|
ssize_t rc = 0;
|
||||||
|
|
||||||
|
@ -269,8 +274,10 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
|
||||||
if (!path)
|
if (!path)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (bus_find_device(&ibmebus_bus_type, NULL, path,
|
dev = bus_find_device(&ibmebus_bus_type, NULL, path,
|
||||||
ibmebus_match_path)) {
|
ibmebus_match_path);
|
||||||
|
if (dev) {
|
||||||
|
put_device(dev);
|
||||||
printk(KERN_WARNING "%s: %s has already been probed\n",
|
printk(KERN_WARNING "%s: %s has already been probed\n",
|
||||||
__func__, path);
|
__func__, path);
|
||||||
rc = -EEXIST;
|
rc = -EEXIST;
|
||||||
|
@ -307,6 +314,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus,
|
||||||
if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path,
|
if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path,
|
||||||
ibmebus_match_path))) {
|
ibmebus_match_path))) {
|
||||||
of_device_unregister(to_platform_device(dev));
|
of_device_unregister(to_platform_device(dev));
|
||||||
|
put_device(dev);
|
||||||
|
|
||||||
kfree(path);
|
kfree(path);
|
||||||
return count;
|
return count;
|
||||||
|
|
|
@ -1129,7 +1129,7 @@ static __init int setup_disablecpuid(char *arg)
|
||||||
{
|
{
|
||||||
int bit;
|
int bit;
|
||||||
|
|
||||||
if (get_option(&arg, &bit) && bit < NCAPINTS*32)
|
if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
|
||||||
setup_clear_cpu_cap(bit);
|
setup_clear_cpu_cap(bit);
|
||||||
else
|
else
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -172,6 +172,7 @@
|
||||||
#define NearBranch ((u64)1 << 52) /* Near branches */
|
#define NearBranch ((u64)1 << 52) /* Near branches */
|
||||||
#define No16 ((u64)1 << 53) /* No 16 bit operand */
|
#define No16 ((u64)1 << 53) /* No 16 bit operand */
|
||||||
#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
|
#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
|
||||||
|
#define Aligned16 ((u64)1 << 55) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
|
||||||
|
|
||||||
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
|
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
|
||||||
|
|
||||||
|
@ -434,6 +435,26 @@ FOP_END;
|
||||||
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
|
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
|
||||||
FOP_END;
|
FOP_END;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* XXX: inoutclob user must know where the argument is being expanded.
|
||||||
|
* Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
|
||||||
|
*/
|
||||||
|
#define asm_safe(insn, inoutclob...) \
|
||||||
|
({ \
|
||||||
|
int _fault = 0; \
|
||||||
|
\
|
||||||
|
asm volatile("1:" insn "\n" \
|
||||||
|
"2:\n" \
|
||||||
|
".pushsection .fixup, \"ax\"\n" \
|
||||||
|
"3: movl $1, %[_fault]\n" \
|
||||||
|
" jmp 2b\n" \
|
||||||
|
".popsection\n" \
|
||||||
|
_ASM_EXTABLE(1b, 3b) \
|
||||||
|
: [_fault] "+qm"(_fault) inoutclob ); \
|
||||||
|
\
|
||||||
|
_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
|
||||||
|
})
|
||||||
|
|
||||||
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
|
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
|
||||||
enum x86_intercept intercept,
|
enum x86_intercept intercept,
|
||||||
enum x86_intercept_stage stage)
|
enum x86_intercept_stage stage)
|
||||||
|
@ -620,21 +641,24 @@ static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
|
||||||
* depending on whether they're AVX encoded or not.
|
* depending on whether they're AVX encoded or not.
|
||||||
*
|
*
|
||||||
* Also included is CMPXCHG16B which is not a vector instruction, yet it is
|
* Also included is CMPXCHG16B which is not a vector instruction, yet it is
|
||||||
* subject to the same check.
|
* subject to the same check. FXSAVE and FXRSTOR are checked here too as their
|
||||||
|
* 512 bytes of data must be aligned to a 16 byte boundary.
|
||||||
*/
|
*/
|
||||||
static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
|
static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
|
||||||
{
|
{
|
||||||
if (likely(size < 16))
|
if (likely(size < 16))
|
||||||
return false;
|
return 1;
|
||||||
|
|
||||||
if (ctxt->d & Aligned)
|
if (ctxt->d & Aligned)
|
||||||
return true;
|
return size;
|
||||||
else if (ctxt->d & Unaligned)
|
else if (ctxt->d & Unaligned)
|
||||||
return false;
|
return 1;
|
||||||
else if (ctxt->d & Avx)
|
else if (ctxt->d & Avx)
|
||||||
return false;
|
return 1;
|
||||||
|
else if (ctxt->d & Aligned16)
|
||||||
|
return 16;
|
||||||
else
|
else
|
||||||
return true;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
|
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
|
||||||
|
@ -692,7 +716,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
|
if (la & (insn_alignment(ctxt, size) - 1))
|
||||||
return emulate_gp(ctxt, 0);
|
return emulate_gp(ctxt, 0);
|
||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
bad:
|
bad:
|
||||||
|
@ -779,6 +803,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
|
||||||
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
|
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
|
||||||
|
struct segmented_address addr,
|
||||||
|
void *data,
|
||||||
|
unsigned int size)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
ulong linear;
|
||||||
|
|
||||||
|
rc = linearize(ctxt, addr, size, true, &linear);
|
||||||
|
if (rc != X86EMUL_CONTINUE)
|
||||||
|
return rc;
|
||||||
|
return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Prefetch the remaining bytes of the instruction without crossing page
|
* Prefetch the remaining bytes of the instruction without crossing page
|
||||||
* boundary if they are not in fetch_cache yet.
|
* boundary if they are not in fetch_cache yet.
|
||||||
|
@ -1532,7 +1570,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||||
&ctxt->exception);
|
&ctxt->exception);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Does not support long mode */
|
|
||||||
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||||
u16 selector, int seg, u8 cpl,
|
u16 selector, int seg, u8 cpl,
|
||||||
enum x86_transfer_type transfer,
|
enum x86_transfer_type transfer,
|
||||||
|
@ -1569,20 +1606,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||||
|
|
||||||
rpl = selector & 3;
|
rpl = selector & 3;
|
||||||
|
|
||||||
/* NULL selector is not valid for TR, CS and SS (except for long mode) */
|
|
||||||
if ((seg == VCPU_SREG_CS
|
|
||||||
|| (seg == VCPU_SREG_SS
|
|
||||||
&& (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
|
|
||||||
|| seg == VCPU_SREG_TR)
|
|
||||||
&& null_selector)
|
|
||||||
goto exception;
|
|
||||||
|
|
||||||
/* TR should be in GDT only */
|
/* TR should be in GDT only */
|
||||||
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
|
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
|
||||||
goto exception;
|
goto exception;
|
||||||
|
|
||||||
if (null_selector) /* for NULL selector skip all following checks */
|
/* NULL selector is not valid for TR, CS and (except for long mode) SS */
|
||||||
|
if (null_selector) {
|
||||||
|
if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
|
||||||
|
goto exception;
|
||||||
|
|
||||||
|
if (seg == VCPU_SREG_SS) {
|
||||||
|
if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
|
||||||
|
goto exception;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ctxt->ops->set_segment expects the CPL to be in
|
||||||
|
* SS.DPL, so fake an expand-up 32-bit data segment.
|
||||||
|
*/
|
||||||
|
seg_desc.type = 3;
|
||||||
|
seg_desc.p = 1;
|
||||||
|
seg_desc.s = 1;
|
||||||
|
seg_desc.dpl = cpl;
|
||||||
|
seg_desc.d = 1;
|
||||||
|
seg_desc.g = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Skip all following checks */
|
||||||
goto load;
|
goto load;
|
||||||
|
}
|
||||||
|
|
||||||
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
|
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
|
||||||
if (ret != X86EMUL_CONTINUE)
|
if (ret != X86EMUL_CONTINUE)
|
||||||
|
@ -1698,6 +1749,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||||
u16 selector, int seg)
|
u16 selector, int seg)
|
||||||
{
|
{
|
||||||
u8 cpl = ctxt->ops->cpl(ctxt);
|
u8 cpl = ctxt->ops->cpl(ctxt);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* None of MOV, POP and LSS can load a NULL selector in CPL=3, but
|
||||||
|
* they can load it at CPL<3 (Intel's manual says only LSS can,
|
||||||
|
* but it's wrong).
|
||||||
|
*
|
||||||
|
* However, the Intel manual says that putting IST=1/DPL=3 in
|
||||||
|
* an interrupt gate will result in SS=3 (the AMD manual instead
|
||||||
|
* says it doesn't), so allow SS=3 in __load_segment_descriptor
|
||||||
|
* and only forbid it here.
|
||||||
|
*/
|
||||||
|
if (seg == VCPU_SREG_SS && selector == 3 &&
|
||||||
|
ctxt->mode == X86EMUL_MODE_PROT64)
|
||||||
|
return emulate_exception(ctxt, GP_VECTOR, 0, true);
|
||||||
|
|
||||||
return __load_segment_descriptor(ctxt, selector, seg, cpl,
|
return __load_segment_descriptor(ctxt, selector, seg, cpl,
|
||||||
X86_TRANSFER_NONE, NULL);
|
X86_TRANSFER_NONE, NULL);
|
||||||
}
|
}
|
||||||
|
@ -3646,7 +3712,7 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
|
||||||
}
|
}
|
||||||
/* Disable writeback. */
|
/* Disable writeback. */
|
||||||
ctxt->dst.type = OP_NONE;
|
ctxt->dst.type = OP_NONE;
|
||||||
return segmented_write(ctxt, ctxt->dst.addr.mem,
|
return segmented_write_std(ctxt, ctxt->dst.addr.mem,
|
||||||
&desc_ptr, 2 + ctxt->op_bytes);
|
&desc_ptr, 2 + ctxt->op_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3830,6 +3896,131 @@ static int em_movsxd(struct x86_emulate_ctxt *ctxt)
|
||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int check_fxsr(struct x86_emulate_ctxt *ctxt)
|
||||||
|
{
|
||||||
|
u32 eax = 1, ebx, ecx = 0, edx;
|
||||||
|
|
||||||
|
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
|
||||||
|
if (!(edx & FFL(FXSR)))
|
||||||
|
return emulate_ud(ctxt);
|
||||||
|
|
||||||
|
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
|
||||||
|
return emulate_nm(ctxt);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Don't emulate a case that should never be hit, instead of working
|
||||||
|
* around a lack of fxsave64/fxrstor64 on old compilers.
|
||||||
|
*/
|
||||||
|
if (ctxt->mode >= X86EMUL_MODE_PROT64)
|
||||||
|
return X86EMUL_UNHANDLEABLE;
|
||||||
|
|
||||||
|
return X86EMUL_CONTINUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
|
||||||
|
* 1) 16 bit mode
|
||||||
|
* 2) 32 bit mode
|
||||||
|
* - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
|
||||||
|
* preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
|
||||||
|
* save and restore
|
||||||
|
* 3) 64-bit mode with REX.W prefix
|
||||||
|
* - like (2), but XMM 8-15 are being saved and restored
|
||||||
|
* 4) 64-bit mode without REX.W prefix
|
||||||
|
* - like (3), but FIP and FDP are 64 bit
|
||||||
|
*
|
||||||
|
* Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
|
||||||
|
* desired result. (4) is not emulated.
|
||||||
|
*
|
||||||
|
* Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
|
||||||
|
* and FPU DS) should match.
|
||||||
|
*/
|
||||||
|
static int em_fxsave(struct x86_emulate_ctxt *ctxt)
|
||||||
|
{
|
||||||
|
struct fxregs_state fx_state;
|
||||||
|
size_t size;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
rc = check_fxsr(ctxt);
|
||||||
|
if (rc != X86EMUL_CONTINUE)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
ctxt->ops->get_fpu(ctxt);
|
||||||
|
|
||||||
|
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
|
||||||
|
|
||||||
|
ctxt->ops->put_fpu(ctxt);
|
||||||
|
|
||||||
|
if (rc != X86EMUL_CONTINUE)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
|
||||||
|
size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
|
||||||
|
else
|
||||||
|
size = offsetof(struct fxregs_state, xmm_space[0]);
|
||||||
|
|
||||||
|
return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
|
||||||
|
struct fxregs_state *new)
|
||||||
|
{
|
||||||
|
int rc = X86EMUL_CONTINUE;
|
||||||
|
struct fxregs_state old;
|
||||||
|
|
||||||
|
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
|
||||||
|
if (rc != X86EMUL_CONTINUE)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 64 bit host will restore XMM 8-15, which is not correct on non-64
|
||||||
|
* bit guests. Load the current values in order to preserve 64 bit
|
||||||
|
* XMMs after fxrstor.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
/* XXX: accessing XMM 8-15 very awkwardly */
|
||||||
|
memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
|
||||||
|
* does save and restore MXCSR.
|
||||||
|
*/
|
||||||
|
if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
|
||||||
|
memcpy(new->xmm_space, old.xmm_space, 8 * 16);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
|
||||||
|
{
|
||||||
|
struct fxregs_state fx_state;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
rc = check_fxsr(ctxt);
|
||||||
|
if (rc != X86EMUL_CONTINUE)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
|
||||||
|
if (rc != X86EMUL_CONTINUE)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
if (fx_state.mxcsr >> 16)
|
||||||
|
return emulate_gp(ctxt, 0);
|
||||||
|
|
||||||
|
ctxt->ops->get_fpu(ctxt);
|
||||||
|
|
||||||
|
if (ctxt->mode < X86EMUL_MODE_PROT64)
|
||||||
|
rc = fxrstor_fixup(ctxt, &fx_state);
|
||||||
|
|
||||||
|
if (rc == X86EMUL_CONTINUE)
|
||||||
|
rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
|
||||||
|
|
||||||
|
ctxt->ops->put_fpu(ctxt);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
static bool valid_cr(int nr)
|
static bool valid_cr(int nr)
|
||||||
{
|
{
|
||||||
switch (nr) {
|
switch (nr) {
|
||||||
|
@ -4182,7 +4373,9 @@ static const struct gprefix pfx_0f_ae_7 = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct group_dual group15 = { {
|
static const struct group_dual group15 = { {
|
||||||
N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
|
I(ModRM | Aligned16, em_fxsave),
|
||||||
|
I(ModRM | Aligned16, em_fxrstor),
|
||||||
|
N, N, N, N, N, GP(0, &pfx_0f_ae_7),
|
||||||
}, {
|
}, {
|
||||||
N, N, N, N, N, N, N, N,
|
N, N, N, N, N, N, N, N,
|
||||||
} };
|
} };
|
||||||
|
@ -5054,21 +5247,13 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
|
||||||
|
|
||||||
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
|
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
|
||||||
{
|
{
|
||||||
bool fault = false;
|
int rc;
|
||||||
|
|
||||||
ctxt->ops->get_fpu(ctxt);
|
ctxt->ops->get_fpu(ctxt);
|
||||||
asm volatile("1: fwait \n\t"
|
rc = asm_safe("fwait");
|
||||||
"2: \n\t"
|
|
||||||
".pushsection .fixup,\"ax\" \n\t"
|
|
||||||
"3: \n\t"
|
|
||||||
"movb $1, %[fault] \n\t"
|
|
||||||
"jmp 2b \n\t"
|
|
||||||
".popsection \n\t"
|
|
||||||
_ASM_EXTABLE(1b, 3b)
|
|
||||||
: [fault]"+qm"(fault));
|
|
||||||
ctxt->ops->put_fpu(ctxt);
|
ctxt->ops->put_fpu(ctxt);
|
||||||
|
|
||||||
if (unlikely(fault))
|
if (unlikely(rc != X86EMUL_CONTINUE))
|
||||||
return emulate_exception(ctxt, MF_VECTOR, 0, false);
|
return emulate_exception(ctxt, MF_VECTOR, 0, false);
|
||||||
|
|
||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
|
|
|
@ -2187,3 +2187,9 @@ void kvm_lapic_init(void)
|
||||||
jump_label_rate_limit(&apic_hw_disabled, HZ);
|
jump_label_rate_limit(&apic_hw_disabled, HZ);
|
||||||
jump_label_rate_limit(&apic_sw_disabled, HZ);
|
jump_label_rate_limit(&apic_sw_disabled, HZ);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvm_lapic_exit(void)
|
||||||
|
{
|
||||||
|
static_key_deferred_flush(&apic_hw_disabled);
|
||||||
|
static_key_deferred_flush(&apic_sw_disabled);
|
||||||
|
}
|
||||||
|
|
|
@ -95,6 +95,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
|
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
|
||||||
void kvm_lapic_init(void);
|
void kvm_lapic_init(void);
|
||||||
|
void kvm_lapic_exit(void);
|
||||||
|
|
||||||
static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off)
|
static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off)
|
||||||
{
|
{
|
||||||
|
|
|
@ -5842,6 +5842,7 @@ out:
|
||||||
|
|
||||||
void kvm_arch_exit(void)
|
void kvm_arch_exit(void)
|
||||||
{
|
{
|
||||||
|
kvm_lapic_exit();
|
||||||
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
|
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
|
||||||
|
|
||||||
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
|
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
|
||||||
|
|
|
@ -842,7 +842,7 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
|
||||||
return WORK_CPU_UNBOUND;
|
return WORK_CPU_UNBOUND;
|
||||||
|
|
||||||
if (--hctx->next_cpu_batch <= 0) {
|
if (--hctx->next_cpu_batch <= 0) {
|
||||||
int cpu = hctx->next_cpu, next_cpu;
|
int next_cpu;
|
||||||
|
|
||||||
next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
|
next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
|
||||||
if (next_cpu >= nr_cpu_ids)
|
if (next_cpu >= nr_cpu_ids)
|
||||||
|
@ -850,8 +850,6 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
|
||||||
|
|
||||||
hctx->next_cpu = next_cpu;
|
hctx->next_cpu = next_cpu;
|
||||||
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
|
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
|
||||||
|
|
||||||
return cpu;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return hctx->next_cpu;
|
return hctx->next_cpu;
|
||||||
|
|
|
@ -1572,7 +1572,7 @@ static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct cfq_group_data *cgd;
|
struct cfq_group_data *cgd;
|
||||||
|
|
||||||
cgd = kzalloc(sizeof(*cgd), GFP_KERNEL);
|
cgd = kzalloc(sizeof(*cgd), gfp);
|
||||||
if (!cgd)
|
if (!cgd)
|
||||||
return NULL;
|
return NULL;
|
||||||
return &cgd->cpd;
|
return &cgd->cpd;
|
||||||
|
|
|
@ -847,6 +847,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
|
||||||
if (ghes_read_estatus(ghes, 1)) {
|
if (ghes_read_estatus(ghes, 1)) {
|
||||||
ghes_clear_estatus(ghes);
|
ghes_clear_estatus(ghes);
|
||||||
continue;
|
continue;
|
||||||
|
} else {
|
||||||
|
ret = NMI_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
sev = ghes_severity(ghes->estatus->error_severity);
|
sev = ghes_severity(ghes->estatus->error_severity);
|
||||||
|
@ -858,11 +860,10 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
|
||||||
|
|
||||||
__process_error(ghes);
|
__process_error(ghes);
|
||||||
ghes_clear_estatus(ghes);
|
ghes_clear_estatus(ghes);
|
||||||
|
|
||||||
ret = NMI_HANDLED;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
|
#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||||
|
if (ret == NMI_HANDLED)
|
||||||
irq_work_queue(&ghes_proc_irq_work);
|
irq_work_queue(&ghes_proc_irq_work);
|
||||||
#endif
|
#endif
|
||||||
atomic_dec(&ghes_in_nmi);
|
atomic_dec(&ghes_in_nmi);
|
||||||
|
|
|
@ -171,6 +171,7 @@ static int vexpress_config_populate(struct device_node *node)
|
||||||
{
|
{
|
||||||
struct device_node *bridge;
|
struct device_node *bridge;
|
||||||
struct device *parent;
|
struct device *parent;
|
||||||
|
int ret;
|
||||||
|
|
||||||
bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
|
bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
|
||||||
if (!bridge)
|
if (!bridge)
|
||||||
|
@ -181,7 +182,11 @@ static int vexpress_config_populate(struct device_node *node)
|
||||||
if (WARN_ON(!parent))
|
if (WARN_ON(!parent))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
return of_platform_populate(node, NULL, NULL, parent);
|
ret = of_platform_populate(node, NULL, NULL, parent);
|
||||||
|
|
||||||
|
put_device(parent);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init vexpress_config_init(void)
|
static int __init vexpress_config_init(void)
|
||||||
|
|
|
@ -373,8 +373,14 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
|
||||||
if (unlikely(rebooting) && new_index != get_nominal_index())
|
if (unlikely(rebooting) && new_index != get_nominal_index())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!throttled)
|
if (!throttled) {
|
||||||
|
/* we don't want to be preempted while
|
||||||
|
* checking if the CPU frequency has been throttled
|
||||||
|
*/
|
||||||
|
preempt_disable();
|
||||||
powernv_cpufreq_throttle_check(NULL);
|
powernv_cpufreq_throttle_check(NULL);
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
freq_data.pstate_id = powernv_freqs[new_index].driver_data;
|
freq_data.pstate_id = powernv_freqs[new_index].driver_data;
|
||||||
|
|
||||||
|
|
|
@ -3008,19 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
||||||
(rdev->pdev->device == 0x6817) ||
|
(rdev->pdev->device == 0x6817) ||
|
||||||
(rdev->pdev->device == 0x6806))
|
(rdev->pdev->device == 0x6806))
|
||||||
max_mclk = 120000;
|
max_mclk = 120000;
|
||||||
} else if (rdev->family == CHIP_VERDE) {
|
|
||||||
if ((rdev->pdev->revision == 0x81) ||
|
|
||||||
(rdev->pdev->revision == 0x83) ||
|
|
||||||
(rdev->pdev->revision == 0x87) ||
|
|
||||||
(rdev->pdev->device == 0x6820) ||
|
|
||||||
(rdev->pdev->device == 0x6821) ||
|
|
||||||
(rdev->pdev->device == 0x6822) ||
|
|
||||||
(rdev->pdev->device == 0x6823) ||
|
|
||||||
(rdev->pdev->device == 0x682A) ||
|
|
||||||
(rdev->pdev->device == 0x682B)) {
|
|
||||||
max_sclk = 75000;
|
|
||||||
max_mclk = 80000;
|
|
||||||
}
|
|
||||||
} else if (rdev->family == CHIP_OLAND) {
|
} else if (rdev->family == CHIP_OLAND) {
|
||||||
if ((rdev->pdev->revision == 0xC7) ||
|
if ((rdev->pdev->revision == 0xC7) ||
|
||||||
(rdev->pdev->revision == 0x80) ||
|
(rdev->pdev->revision == 0x80) ||
|
||||||
|
|
|
@ -1400,7 +1400,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
|
||||||
|
|
||||||
if (i2c_check_addr_validity(addr, info.flags)) {
|
if (i2c_check_addr_validity(addr, info.flags)) {
|
||||||
dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
|
dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
|
||||||
info.addr, node->full_name);
|
addr, node->full_name);
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -329,7 +329,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
|
||||||
unsigned long arg)
|
unsigned long arg)
|
||||||
{
|
{
|
||||||
struct i2c_smbus_ioctl_data data_arg;
|
struct i2c_smbus_ioctl_data data_arg;
|
||||||
union i2c_smbus_data temp;
|
union i2c_smbus_data temp = {};
|
||||||
int datasize, res;
|
int datasize, res;
|
||||||
|
|
||||||
if (copy_from_user(&data_arg,
|
if (copy_from_user(&data_arg,
|
||||||
|
|
|
@ -1238,6 +1238,12 @@ static int xpad_init_input(struct usb_xpad *xpad)
|
||||||
input_dev->name = xpad->name;
|
input_dev->name = xpad->name;
|
||||||
input_dev->phys = xpad->phys;
|
input_dev->phys = xpad->phys;
|
||||||
usb_to_input_id(xpad->udev, &input_dev->id);
|
usb_to_input_id(xpad->udev, &input_dev->id);
|
||||||
|
|
||||||
|
if (xpad->xtype == XTYPE_XBOX360W) {
|
||||||
|
/* x360w controllers and the receiver have different ids */
|
||||||
|
input_dev->id.product = 0x02a1;
|
||||||
|
}
|
||||||
|
|
||||||
input_dev->dev.parent = &xpad->intf->dev;
|
input_dev->dev.parent = &xpad->intf->dev;
|
||||||
|
|
||||||
input_set_drvdata(input_dev, xpad);
|
input_set_drvdata(input_dev, xpad);
|
||||||
|
|
|
@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
|
||||||
DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
|
DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
|
||||||
|
},
|
||||||
|
},
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -905,9 +905,9 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
|
||||||
|
|
||||||
case QUEUE_HEADER_NORMAL:
|
case QUEUE_HEADER_NORMAL:
|
||||||
report_count = ts->buf[FW_HDR_COUNT];
|
report_count = ts->buf[FW_HDR_COUNT];
|
||||||
if (report_count > 3) {
|
if (report_count == 0 || report_count > 3) {
|
||||||
dev_err(&client->dev,
|
dev_err(&client->dev,
|
||||||
"too large report count: %*ph\n",
|
"bad report count: %*ph\n",
|
||||||
HEADER_SIZE, ts->buf);
|
HEADER_SIZE, ts->buf);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -246,7 +246,7 @@ static int meson_pmx_request_gpio(struct pinctrl_dev *pcdev,
|
||||||
{
|
{
|
||||||
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
|
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
|
||||||
|
|
||||||
meson_pmx_disable_other_groups(pc, range->pin_base + offset, -1);
|
meson_pmx_disable_other_groups(pc, offset, -1);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -483,7 +483,8 @@ static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin,
|
||||||
|
|
||||||
switch (param) {
|
switch (param) {
|
||||||
case PIN_CONFIG_BIAS_DISABLE:
|
case PIN_CONFIG_BIAS_DISABLE:
|
||||||
return true;
|
return pin->configs &
|
||||||
|
(SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN);
|
||||||
|
|
||||||
case PIN_CONFIG_BIAS_PULL_UP:
|
case PIN_CONFIG_BIAS_PULL_UP:
|
||||||
return pin->configs & SH_PFC_PIN_CFG_PULL_UP;
|
return pin->configs & SH_PFC_PIN_CFG_PULL_UP;
|
||||||
|
|
|
@ -470,6 +470,14 @@ static void atmel_stop_tx(struct uart_port *port)
|
||||||
/* disable PDC transmit */
|
/* disable PDC transmit */
|
||||||
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
|
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Disable the transmitter.
|
||||||
|
* This is mandatory when DMA is used, otherwise the DMA buffer
|
||||||
|
* is fully transmitted.
|
||||||
|
*/
|
||||||
|
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
|
||||||
|
|
||||||
/* Disable interrupts */
|
/* Disable interrupts */
|
||||||
atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
|
atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
|
||||||
|
|
||||||
|
@ -502,6 +510,9 @@ static void atmel_start_tx(struct uart_port *port)
|
||||||
|
|
||||||
/* Enable interrupts */
|
/* Enable interrupts */
|
||||||
atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
|
atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
|
||||||
|
|
||||||
|
/* re-enable the transmitter */
|
||||||
|
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -939,8 +939,8 @@ static const struct input_device_id sysrq_ids[] = {
|
||||||
{
|
{
|
||||||
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
|
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
|
||||||
INPUT_DEVICE_ID_MATCH_KEYBIT,
|
INPUT_DEVICE_ID_MATCH_KEYBIT,
|
||||||
.evbit = { BIT_MASK(EV_KEY) },
|
.evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) },
|
||||||
.keybit = { BIT_MASK(KEY_LEFTALT) },
|
.keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) },
|
||||||
},
|
},
|
||||||
{ },
|
{ },
|
||||||
};
|
};
|
||||||
|
|
|
@ -913,17 +913,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
|
||||||
spin_lock_irqsave(&xhci->lock, flags);
|
spin_lock_irqsave(&xhci->lock, flags);
|
||||||
|
|
||||||
ep->stop_cmds_pending--;
|
ep->stop_cmds_pending--;
|
||||||
if (xhci->xhc_state & XHCI_STATE_REMOVING) {
|
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (xhci->xhc_state & XHCI_STATE_DYING) {
|
|
||||||
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
|
||||||
"Stop EP timer ran, but another timer marked "
|
|
||||||
"xHCI as DYING, exiting.");
|
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
|
if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
|
||||||
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
"Stop EP timer ran, but no command pending, "
|
"Stop EP timer ran, but no command pending, "
|
||||||
|
|
|
@ -1569,19 +1569,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
||||||
xhci_urb_free_priv(urb_priv);
|
xhci_urb_free_priv(urb_priv);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
if ((xhci->xhc_state & XHCI_STATE_DYING) ||
|
|
||||||
(xhci->xhc_state & XHCI_STATE_HALTED)) {
|
|
||||||
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
|
||||||
"Ep 0x%x: URB %p to be canceled on "
|
|
||||||
"non-responsive xHCI host.",
|
|
||||||
urb->ep->desc.bEndpointAddress, urb);
|
|
||||||
/* Let the stop endpoint command watchdog timer (which set this
|
|
||||||
* state) finish cleaning up the endpoint TD lists. We must
|
|
||||||
* have caught it in the middle of dropping a lock and giving
|
|
||||||
* back an URB.
|
|
||||||
*/
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
|
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
|
||||||
ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
|
ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
|
||||||
|
|
|
@ -99,6 +99,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request,
|
||||||
r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
|
r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
|
||||||
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
|
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
|
||||||
value, index, NULL, 0, DEFAULT_TIMEOUT);
|
value, index, NULL, 0, DEFAULT_TIMEOUT);
|
||||||
|
if (r < 0)
|
||||||
|
dev_err(&dev->dev, "failed to send control message: %d\n", r);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -116,7 +118,20 @@ static int ch341_control_in(struct usb_device *dev,
|
||||||
r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
|
r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
|
||||||
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
|
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
|
||||||
value, index, buf, bufsize, DEFAULT_TIMEOUT);
|
value, index, buf, bufsize, DEFAULT_TIMEOUT);
|
||||||
|
if (r < bufsize) {
|
||||||
|
if (r >= 0) {
|
||||||
|
dev_err(&dev->dev,
|
||||||
|
"short control message received (%d < %u)\n",
|
||||||
|
r, bufsize);
|
||||||
|
r = -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_err(&dev->dev, "failed to receive control message: %d\n",
|
||||||
|
r);
|
||||||
return r;
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ch341_set_baudrate(struct usb_device *dev,
|
static int ch341_set_baudrate(struct usb_device *dev,
|
||||||
|
@ -158,9 +173,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control)
|
||||||
|
|
||||||
static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
|
static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
|
||||||
{
|
{
|
||||||
|
const unsigned int size = 2;
|
||||||
char *buffer;
|
char *buffer;
|
||||||
int r;
|
int r;
|
||||||
const unsigned size = 8;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
buffer = kmalloc(size, GFP_KERNEL);
|
buffer = kmalloc(size, GFP_KERNEL);
|
||||||
|
@ -171,14 +186,9 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* setup the private status if available */
|
|
||||||
if (r == 2) {
|
|
||||||
r = 0;
|
|
||||||
spin_lock_irqsave(&priv->lock, flags);
|
spin_lock_irqsave(&priv->lock, flags);
|
||||||
priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
|
priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
|
||||||
spin_unlock_irqrestore(&priv->lock, flags);
|
spin_unlock_irqrestore(&priv->lock, flags);
|
||||||
} else
|
|
||||||
r = -EPROTO;
|
|
||||||
|
|
||||||
out: kfree(buffer);
|
out: kfree(buffer);
|
||||||
return r;
|
return r;
|
||||||
|
@ -188,9 +198,9 @@ out: kfree(buffer);
|
||||||
|
|
||||||
static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
|
static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
|
||||||
{
|
{
|
||||||
|
const unsigned int size = 2;
|
||||||
char *buffer;
|
char *buffer;
|
||||||
int r;
|
int r;
|
||||||
const unsigned size = 8;
|
|
||||||
|
|
||||||
buffer = kmalloc(size, GFP_KERNEL);
|
buffer = kmalloc(size, GFP_KERNEL);
|
||||||
if (!buffer)
|
if (!buffer)
|
||||||
|
@ -253,7 +263,6 @@ static int ch341_port_probe(struct usb_serial_port *port)
|
||||||
|
|
||||||
spin_lock_init(&priv->lock);
|
spin_lock_init(&priv->lock);
|
||||||
priv->baud_rate = DEFAULT_BAUD_RATE;
|
priv->baud_rate = DEFAULT_BAUD_RATE;
|
||||||
priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
|
|
||||||
|
|
||||||
r = ch341_configure(port->serial->dev, priv);
|
r = ch341_configure(port->serial->dev, priv);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
|
@ -315,7 +324,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
|
||||||
|
|
||||||
r = ch341_configure(serial->dev, priv);
|
r = ch341_configure(serial->dev, priv);
|
||||||
if (r)
|
if (r)
|
||||||
goto out;
|
return r;
|
||||||
|
|
||||||
if (tty)
|
if (tty)
|
||||||
ch341_set_termios(tty, port, NULL);
|
ch341_set_termios(tty, port, NULL);
|
||||||
|
@ -325,12 +334,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n",
|
dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n",
|
||||||
__func__, r);
|
__func__, r);
|
||||||
goto out;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = usb_serial_generic_open(tty, port);
|
r = usb_serial_generic_open(tty, port);
|
||||||
|
if (r)
|
||||||
|
goto err_kill_interrupt_urb;
|
||||||
|
|
||||||
out: return r;
|
return 0;
|
||||||
|
|
||||||
|
err_kill_interrupt_urb:
|
||||||
|
usb_kill_urb(port->interrupt_in_urb);
|
||||||
|
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Old_termios contains the original termios settings and
|
/* Old_termios contains the original termios settings and
|
||||||
|
@ -345,26 +361,25 @@ static void ch341_set_termios(struct tty_struct *tty,
|
||||||
|
|
||||||
baud_rate = tty_get_baud_rate(tty);
|
baud_rate = tty_get_baud_rate(tty);
|
||||||
|
|
||||||
priv->baud_rate = baud_rate;
|
|
||||||
|
|
||||||
if (baud_rate) {
|
if (baud_rate) {
|
||||||
spin_lock_irqsave(&priv->lock, flags);
|
priv->baud_rate = baud_rate;
|
||||||
priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
|
|
||||||
spin_unlock_irqrestore(&priv->lock, flags);
|
|
||||||
ch341_set_baudrate(port->serial->dev, priv);
|
ch341_set_baudrate(port->serial->dev, priv);
|
||||||
} else {
|
|
||||||
spin_lock_irqsave(&priv->lock, flags);
|
|
||||||
priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
|
|
||||||
spin_unlock_irqrestore(&priv->lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ch341_set_handshake(port->serial->dev, priv->line_control);
|
|
||||||
|
|
||||||
/* Unimplemented:
|
/* Unimplemented:
|
||||||
* (cflag & CSIZE) : data bits [5, 8]
|
* (cflag & CSIZE) : data bits [5, 8]
|
||||||
* (cflag & PARENB) : parity {NONE, EVEN, ODD}
|
* (cflag & PARENB) : parity {NONE, EVEN, ODD}
|
||||||
* (cflag & CSTOPB) : stop bits [1, 2]
|
* (cflag & CSTOPB) : stop bits [1, 2]
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
spin_lock_irqsave(&priv->lock, flags);
|
||||||
|
if (C_BAUD(tty) == B0)
|
||||||
|
priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
|
||||||
|
else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
|
||||||
|
priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
|
||||||
|
spin_unlock_irqrestore(&priv->lock, flags);
|
||||||
|
|
||||||
|
ch341_set_handshake(port->serial->dev, priv->line_control);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ch341_break_ctl(struct tty_struct *tty, int break_state)
|
static void ch341_break_ctl(struct tty_struct *tty, int break_state)
|
||||||
|
@ -539,14 +554,23 @@ static int ch341_tiocmget(struct tty_struct *tty)
|
||||||
|
|
||||||
static int ch341_reset_resume(struct usb_serial *serial)
|
static int ch341_reset_resume(struct usb_serial *serial)
|
||||||
{
|
{
|
||||||
struct ch341_private *priv;
|
struct usb_serial_port *port = serial->port[0];
|
||||||
|
struct ch341_private *priv = usb_get_serial_port_data(port);
|
||||||
priv = usb_get_serial_port_data(serial->port[0]);
|
int ret;
|
||||||
|
|
||||||
/* reconfigure ch341 serial port after bus-reset */
|
/* reconfigure ch341 serial port after bus-reset */
|
||||||
ch341_configure(serial->dev, priv);
|
ch341_configure(serial->dev, priv);
|
||||||
|
|
||||||
return 0;
|
if (test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
|
||||||
|
ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
|
||||||
|
ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return usb_serial_generic_resume(serial);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct usb_serial_driver ch341_device = {
|
static struct usb_serial_driver ch341_device = {
|
||||||
|
|
|
@ -192,10 +192,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
|
||||||
status_buf, KLSI_STATUSBUF_LEN,
|
status_buf, KLSI_STATUSBUF_LEN,
|
||||||
10000
|
10000
|
||||||
);
|
);
|
||||||
if (rc < 0)
|
if (rc != KLSI_STATUSBUF_LEN) {
|
||||||
dev_err(&port->dev, "Reading line status failed (error = %d)\n",
|
dev_err(&port->dev, "reading line status failed: %d\n", rc);
|
||||||
rc);
|
if (rc >= 0)
|
||||||
else {
|
rc = -EIO;
|
||||||
|
} else {
|
||||||
status = get_unaligned_le16(status_buf);
|
status = get_unaligned_le16(status_buf);
|
||||||
|
|
||||||
dev_info(&port->serial->dev->dev, "read status %x %x\n",
|
dev_info(&port->serial->dev->dev, "read status %x %x\n",
|
||||||
|
|
|
@ -467,7 +467,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
|
||||||
vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
|
vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
|
||||||
pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
|
pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
|
||||||
|
|
||||||
*pci_base = (dma_addr_t)vme_base + pci_offset;
|
*pci_base = (dma_addr_t)*vme_base + pci_offset;
|
||||||
*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
|
*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
|
||||||
|
|
||||||
*enabled = 0;
|
*enabled = 0;
|
||||||
|
|
|
@ -2520,11 +2520,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
||||||
if (ref && ref->seq &&
|
if (ref && ref->seq &&
|
||||||
btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
|
btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
|
||||||
spin_unlock(&locked_ref->lock);
|
spin_unlock(&locked_ref->lock);
|
||||||
btrfs_delayed_ref_unlock(locked_ref);
|
|
||||||
spin_lock(&delayed_refs->lock);
|
spin_lock(&delayed_refs->lock);
|
||||||
locked_ref->processing = 0;
|
locked_ref->processing = 0;
|
||||||
delayed_refs->num_heads_ready++;
|
delayed_refs->num_heads_ready++;
|
||||||
spin_unlock(&delayed_refs->lock);
|
spin_unlock(&delayed_refs->lock);
|
||||||
|
btrfs_delayed_ref_unlock(locked_ref);
|
||||||
locked_ref = NULL;
|
locked_ref = NULL;
|
||||||
cond_resched();
|
cond_resched();
|
||||||
count++;
|
count++;
|
||||||
|
@ -2570,7 +2570,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
||||||
*/
|
*/
|
||||||
if (must_insert_reserved)
|
if (must_insert_reserved)
|
||||||
locked_ref->must_insert_reserved = 1;
|
locked_ref->must_insert_reserved = 1;
|
||||||
|
spin_lock(&delayed_refs->lock);
|
||||||
locked_ref->processing = 0;
|
locked_ref->processing = 0;
|
||||||
|
delayed_refs->num_heads_ready++;
|
||||||
|
spin_unlock(&delayed_refs->lock);
|
||||||
btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
|
btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
|
||||||
btrfs_delayed_ref_unlock(locked_ref);
|
btrfs_delayed_ref_unlock(locked_ref);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -1322,9 +1322,12 @@ int d_set_mounted(struct dentry *dentry)
|
||||||
}
|
}
|
||||||
spin_lock(&dentry->d_lock);
|
spin_lock(&dentry->d_lock);
|
||||||
if (!d_unlinked(dentry)) {
|
if (!d_unlinked(dentry)) {
|
||||||
|
ret = -EBUSY;
|
||||||
|
if (!d_mountpoint(dentry)) {
|
||||||
dentry->d_flags |= DCACHE_MOUNTED;
|
dentry->d_flags |= DCACHE_MOUNTED;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
spin_unlock(&dentry->d_lock);
|
spin_unlock(&dentry->d_lock);
|
||||||
out:
|
out:
|
||||||
write_sequnlock(&rename_lock);
|
write_sequnlock(&rename_lock);
|
||||||
|
|
|
@ -743,26 +743,50 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct mountpoint *new_mountpoint(struct dentry *dentry)
|
static struct mountpoint *get_mountpoint(struct dentry *dentry)
|
||||||
{
|
{
|
||||||
struct hlist_head *chain = mp_hash(dentry);
|
struct mountpoint *mp, *new = NULL;
|
||||||
struct mountpoint *mp;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
|
if (d_mountpoint(dentry)) {
|
||||||
if (!mp)
|
mountpoint:
|
||||||
return ERR_PTR(-ENOMEM);
|
read_seqlock_excl(&mount_lock);
|
||||||
|
mp = lookup_mountpoint(dentry);
|
||||||
ret = d_set_mounted(dentry);
|
read_sequnlock_excl(&mount_lock);
|
||||||
if (ret) {
|
if (mp)
|
||||||
kfree(mp);
|
goto done;
|
||||||
return ERR_PTR(ret);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mp->m_dentry = dentry;
|
if (!new)
|
||||||
mp->m_count = 1;
|
new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
|
||||||
hlist_add_head(&mp->m_hash, chain);
|
if (!new)
|
||||||
INIT_HLIST_HEAD(&mp->m_list);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
|
||||||
|
/* Exactly one processes may set d_mounted */
|
||||||
|
ret = d_set_mounted(dentry);
|
||||||
|
|
||||||
|
/* Someone else set d_mounted? */
|
||||||
|
if (ret == -EBUSY)
|
||||||
|
goto mountpoint;
|
||||||
|
|
||||||
|
/* The dentry is not available as a mountpoint? */
|
||||||
|
mp = ERR_PTR(ret);
|
||||||
|
if (ret)
|
||||||
|
goto done;
|
||||||
|
|
||||||
|
/* Add the new mountpoint to the hash table */
|
||||||
|
read_seqlock_excl(&mount_lock);
|
||||||
|
new->m_dentry = dentry;
|
||||||
|
new->m_count = 1;
|
||||||
|
hlist_add_head(&new->m_hash, mp_hash(dentry));
|
||||||
|
INIT_HLIST_HEAD(&new->m_list);
|
||||||
|
read_sequnlock_excl(&mount_lock);
|
||||||
|
|
||||||
|
mp = new;
|
||||||
|
new = NULL;
|
||||||
|
done:
|
||||||
|
kfree(new);
|
||||||
return mp;
|
return mp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1557,11 +1581,11 @@ void __detach_mounts(struct dentry *dentry)
|
||||||
struct mount *mnt;
|
struct mount *mnt;
|
||||||
|
|
||||||
namespace_lock();
|
namespace_lock();
|
||||||
|
lock_mount_hash();
|
||||||
mp = lookup_mountpoint(dentry);
|
mp = lookup_mountpoint(dentry);
|
||||||
if (IS_ERR_OR_NULL(mp))
|
if (IS_ERR_OR_NULL(mp))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
lock_mount_hash();
|
|
||||||
event++;
|
event++;
|
||||||
while (!hlist_empty(&mp->m_list)) {
|
while (!hlist_empty(&mp->m_list)) {
|
||||||
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
|
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
|
||||||
|
@ -1571,9 +1595,9 @@ void __detach_mounts(struct dentry *dentry)
|
||||||
}
|
}
|
||||||
else umount_tree(mnt, UMOUNT_CONNECTED);
|
else umount_tree(mnt, UMOUNT_CONNECTED);
|
||||||
}
|
}
|
||||||
unlock_mount_hash();
|
|
||||||
put_mountpoint(mp);
|
put_mountpoint(mp);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
|
unlock_mount_hash();
|
||||||
namespace_unlock();
|
namespace_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1962,9 +1986,7 @@ retry:
|
||||||
namespace_lock();
|
namespace_lock();
|
||||||
mnt = lookup_mnt(path);
|
mnt = lookup_mnt(path);
|
||||||
if (likely(!mnt)) {
|
if (likely(!mnt)) {
|
||||||
struct mountpoint *mp = lookup_mountpoint(dentry);
|
struct mountpoint *mp = get_mountpoint(dentry);
|
||||||
if (!mp)
|
|
||||||
mp = new_mountpoint(dentry);
|
|
||||||
if (IS_ERR(mp)) {
|
if (IS_ERR(mp)) {
|
||||||
namespace_unlock();
|
namespace_unlock();
|
||||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||||
|
@ -1983,7 +2005,11 @@ retry:
|
||||||
static void unlock_mount(struct mountpoint *where)
|
static void unlock_mount(struct mountpoint *where)
|
||||||
{
|
{
|
||||||
struct dentry *dentry = where->m_dentry;
|
struct dentry *dentry = where->m_dentry;
|
||||||
|
|
||||||
|
read_seqlock_excl(&mount_lock);
|
||||||
put_mountpoint(where);
|
put_mountpoint(where);
|
||||||
|
read_sequnlock_excl(&mount_lock);
|
||||||
|
|
||||||
namespace_unlock();
|
namespace_unlock();
|
||||||
mutex_unlock(&dentry->d_inode->i_mutex);
|
mutex_unlock(&dentry->d_inode->i_mutex);
|
||||||
}
|
}
|
||||||
|
@ -3055,9 +3081,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
|
||||||
touch_mnt_namespace(current->nsproxy->mnt_ns);
|
touch_mnt_namespace(current->nsproxy->mnt_ns);
|
||||||
/* A moved mount should not expire automatically */
|
/* A moved mount should not expire automatically */
|
||||||
list_del_init(&new_mnt->mnt_expire);
|
list_del_init(&new_mnt->mnt_expire);
|
||||||
|
put_mountpoint(root_mp);
|
||||||
unlock_mount_hash();
|
unlock_mount_hash();
|
||||||
chroot_fs_refs(&root, &new);
|
chroot_fs_refs(&root, &new);
|
||||||
put_mountpoint(root_mp);
|
|
||||||
error = 0;
|
error = 0;
|
||||||
out4:
|
out4:
|
||||||
unlock_mount(old_mp);
|
unlock_mount(old_mp);
|
||||||
|
|
15
fs/nfs/dir.c
15
fs/nfs/dir.c
|
@ -462,7 +462,7 @@ void nfs_force_use_readdirplus(struct inode *dir)
|
||||||
{
|
{
|
||||||
if (!list_empty(&NFS_I(dir)->open_files)) {
|
if (!list_empty(&NFS_I(dir)->open_files)) {
|
||||||
nfs_advise_use_readdirplus(dir);
|
nfs_advise_use_readdirplus(dir);
|
||||||
nfs_zap_mapping(dir, dir->i_mapping);
|
invalidate_mapping_pages(dir->i_mapping, 0, -1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -847,17 +847,6 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
|
|
||||||
{
|
|
||||||
struct nfs_inode *nfsi = NFS_I(dir);
|
|
||||||
|
|
||||||
if (nfs_attribute_cache_expired(dir))
|
|
||||||
return true;
|
|
||||||
if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
|
|
||||||
return true;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The file offset position represents the dirent entry number. A
|
/* The file offset position represents the dirent entry number. A
|
||||||
last cookie cache takes care of the common case of reading the
|
last cookie cache takes care of the common case of reading the
|
||||||
whole directory.
|
whole directory.
|
||||||
|
@ -890,7 +879,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
|
||||||
desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0;
|
desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0;
|
||||||
|
|
||||||
nfs_block_sillyrename(dentry);
|
nfs_block_sillyrename(dentry);
|
||||||
if (ctx->pos == 0 || nfs_dir_mapping_need_revalidate(inode))
|
if (ctx->pos == 0 || nfs_attribute_cache_expired(inode))
|
||||||
res = nfs_revalidate_mapping(inode, file->f_mapping);
|
res = nfs_revalidate_mapping(inode, file->f_mapping);
|
||||||
if (res < 0)
|
if (res < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -283,7 +283,8 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
|
||||||
s->nfs_client->cl_rpcclient->cl_auth->au_flavor);
|
s->nfs_client->cl_rpcclient->cl_auth->au_flavor);
|
||||||
|
|
||||||
out_test_devid:
|
out_test_devid:
|
||||||
if (filelayout_test_devid_unavailable(devid))
|
if (ret->ds_clp == NULL ||
|
||||||
|
filelayout_test_devid_unavailable(devid))
|
||||||
ret = NULL;
|
ret = NULL;
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -1185,13 +1185,11 @@ bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
|
||||||
* i_lock */
|
* i_lock */
|
||||||
spin_lock(&ino->i_lock);
|
spin_lock(&ino->i_lock);
|
||||||
lo = nfsi->layout;
|
lo = nfsi->layout;
|
||||||
if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
|
if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
|
||||||
sleep = true;
|
|
||||||
spin_unlock(&ino->i_lock);
|
|
||||||
|
|
||||||
if (sleep)
|
|
||||||
rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
|
rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
|
||||||
|
sleep = true;
|
||||||
|
}
|
||||||
|
spin_unlock(&ino->i_lock);
|
||||||
return sleep;
|
return sleep;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3321,6 +3321,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
|
||||||
mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
|
mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
|
||||||
lockres->l_level, new_level);
|
lockres->l_level, new_level);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
|
||||||
|
* expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
|
||||||
|
* we can recover correctly from node failure. Otherwise, we may get
|
||||||
|
* invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
|
||||||
|
*/
|
||||||
|
if (!ocfs2_is_o2cb_active() &&
|
||||||
|
lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
|
||||||
|
lvb = 1;
|
||||||
|
|
||||||
if (lvb)
|
if (lvb)
|
||||||
dlm_flags |= DLM_LKF_VALBLK;
|
dlm_flags |= DLM_LKF_VALBLK;
|
||||||
|
|
||||||
|
|
|
@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
|
||||||
*/
|
*/
|
||||||
static struct ocfs2_stack_plugin *active_stack;
|
static struct ocfs2_stack_plugin *active_stack;
|
||||||
|
|
||||||
|
inline int ocfs2_is_o2cb_active(void)
|
||||||
|
{
|
||||||
|
return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
|
||||||
|
|
||||||
static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
|
static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
|
||||||
{
|
{
|
||||||
struct ocfs2_stack_plugin *p;
|
struct ocfs2_stack_plugin *p;
|
||||||
|
|
|
@ -298,4 +298,7 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
|
||||||
int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
|
int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
|
||||||
void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
|
void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
|
||||||
|
|
||||||
|
/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
|
||||||
|
int ocfs2_is_o2cb_active(void);
|
||||||
|
|
||||||
#endif /* STACKGLUE_H */
|
#endif /* STACKGLUE_H */
|
||||||
|
|
|
@ -703,7 +703,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
|
||||||
ctl_dir = container_of(head, struct ctl_dir, header);
|
ctl_dir = container_of(head, struct ctl_dir, header);
|
||||||
|
|
||||||
if (!dir_emit_dots(file, ctx))
|
if (!dir_emit_dots(file, ctx))
|
||||||
return 0;
|
goto out;
|
||||||
|
|
||||||
pos = 2;
|
pos = 2;
|
||||||
|
|
||||||
|
@ -713,6 +713,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
out:
|
||||||
sysctl_head_finish(head);
|
sysctl_head_finish(head);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,6 +14,7 @@ struct static_key_deferred {
|
||||||
|
|
||||||
#ifdef HAVE_JUMP_LABEL
|
#ifdef HAVE_JUMP_LABEL
|
||||||
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
|
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
|
||||||
|
extern void static_key_deferred_flush(struct static_key_deferred *key);
|
||||||
extern void
|
extern void
|
||||||
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
|
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
|
||||||
|
|
||||||
|
@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
|
||||||
STATIC_KEY_CHECK_USE();
|
STATIC_KEY_CHECK_USE();
|
||||||
static_key_slow_dec(&key->key);
|
static_key_slow_dec(&key->key);
|
||||||
}
|
}
|
||||||
|
static inline void static_key_deferred_flush(struct static_key_deferred *key)
|
||||||
|
{
|
||||||
|
STATIC_KEY_CHECK_USE();
|
||||||
|
}
|
||||||
static inline void
|
static inline void
|
||||||
jump_label_rate_limit(struct static_key_deferred *key,
|
jump_label_rate_limit(struct static_key_deferred *key,
|
||||||
unsigned long rl)
|
unsigned long rl)
|
||||||
|
|
|
@ -138,6 +138,13 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
|
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
|
||||||
|
|
||||||
|
void static_key_deferred_flush(struct static_key_deferred *key)
|
||||||
|
{
|
||||||
|
STATIC_KEY_CHECK_USE();
|
||||||
|
flush_delayed_work(&key->work);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(static_key_deferred_flush);
|
||||||
|
|
||||||
void jump_label_rate_limit(struct static_key_deferred *key,
|
void jump_label_rate_limit(struct static_key_deferred *key,
|
||||||
unsigned long rl)
|
unsigned long rl)
|
||||||
{
|
{
|
||||||
|
|
|
@ -159,7 +159,9 @@ static void devm_memremap_pages_release(struct device *dev, void *res)
|
||||||
struct page_map *page_map = res;
|
struct page_map *page_map = res;
|
||||||
|
|
||||||
/* pages are dead and unused, undo the arch mapping */
|
/* pages are dead and unused, undo the arch mapping */
|
||||||
|
mem_hotplug_begin();
|
||||||
arch_remove_memory(page_map->res.start, resource_size(&page_map->res));
|
arch_remove_memory(page_map->res.start, resource_size(&page_map->res));
|
||||||
|
mem_hotplug_done();
|
||||||
}
|
}
|
||||||
|
|
||||||
void *devm_memremap_pages(struct device *dev, struct resource *res)
|
void *devm_memremap_pages(struct device *dev, struct resource *res)
|
||||||
|
@ -189,7 +191,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res)
|
||||||
if (nid < 0)
|
if (nid < 0)
|
||||||
nid = numa_mem_id();
|
nid = numa_mem_id();
|
||||||
|
|
||||||
|
mem_hotplug_begin();
|
||||||
error = arch_add_memory(nid, res->start, resource_size(res), true);
|
error = arch_add_memory(nid, res->start, resource_size(res), true);
|
||||||
|
mem_hotplug_done();
|
||||||
if (error) {
|
if (error) {
|
||||||
devres_free(page_map);
|
devres_free(page_map);
|
||||||
return ERR_PTR(error);
|
return ERR_PTR(error);
|
||||||
|
|
37
mm/hugetlb.c
37
mm/hugetlb.c
|
@ -1723,23 +1723,32 @@ free:
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When releasing a hugetlb pool reservation, any surplus pages that were
|
* This routine has two main purposes:
|
||||||
* allocated to satisfy the reservation must be explicitly freed if they were
|
* 1) Decrement the reservation count (resv_huge_pages) by the value passed
|
||||||
* never used.
|
* in unused_resv_pages. This corresponds to the prior adjustments made
|
||||||
* Called with hugetlb_lock held.
|
* to the associated reservation map.
|
||||||
|
* 2) Free any unused surplus pages that may have been allocated to satisfy
|
||||||
|
* the reservation. As many as unused_resv_pages may be freed.
|
||||||
|
*
|
||||||
|
* Called with hugetlb_lock held. However, the lock could be dropped (and
|
||||||
|
* reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
|
||||||
|
* we must make sure nobody else can claim pages we are in the process of
|
||||||
|
* freeing. Do this by ensuring resv_huge_page always is greater than the
|
||||||
|
* number of huge pages we plan to free when dropping the lock.
|
||||||
*/
|
*/
|
||||||
static void return_unused_surplus_pages(struct hstate *h,
|
static void return_unused_surplus_pages(struct hstate *h,
|
||||||
unsigned long unused_resv_pages)
|
unsigned long unused_resv_pages)
|
||||||
{
|
{
|
||||||
unsigned long nr_pages;
|
unsigned long nr_pages;
|
||||||
|
|
||||||
/* Uncommit the reservation */
|
|
||||||
h->resv_huge_pages -= unused_resv_pages;
|
|
||||||
|
|
||||||
/* Cannot return gigantic pages currently */
|
/* Cannot return gigantic pages currently */
|
||||||
if (hstate_is_gigantic(h))
|
if (hstate_is_gigantic(h))
|
||||||
return;
|
goto out;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Part (or even all) of the reservation could have been backed
|
||||||
|
* by pre-allocated pages. Only free surplus pages.
|
||||||
|
*/
|
||||||
nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
|
nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1749,12 +1758,22 @@ static void return_unused_surplus_pages(struct hstate *h,
|
||||||
* when the nodes with surplus pages have no free pages.
|
* when the nodes with surplus pages have no free pages.
|
||||||
* free_pool_huge_page() will balance the the freed pages across the
|
* free_pool_huge_page() will balance the the freed pages across the
|
||||||
* on-line nodes with memory and will handle the hstate accounting.
|
* on-line nodes with memory and will handle the hstate accounting.
|
||||||
|
*
|
||||||
|
* Note that we decrement resv_huge_pages as we free the pages. If
|
||||||
|
* we drop the lock, resv_huge_pages will still be sufficiently large
|
||||||
|
* to cover subsequent pages we may free.
|
||||||
*/
|
*/
|
||||||
while (nr_pages--) {
|
while (nr_pages--) {
|
||||||
|
h->resv_huge_pages--;
|
||||||
|
unused_resv_pages--;
|
||||||
if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
|
if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
|
||||||
break;
|
goto out;
|
||||||
cond_resched_lock(&hugetlb_lock);
|
cond_resched_lock(&hugetlb_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
/* Fully uncommit the reservation */
|
||||||
|
h->resv_huge_pages -= unused_resv_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -13168,13 +13168,17 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
|
||||||
|
|
||||||
list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
|
list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
|
||||||
bool schedule_destroy_work = false;
|
bool schedule_destroy_work = false;
|
||||||
bool schedule_scan_stop = false;
|
|
||||||
struct cfg80211_sched_scan_request *sched_scan_req =
|
struct cfg80211_sched_scan_request *sched_scan_req =
|
||||||
rcu_dereference(rdev->sched_scan_req);
|
rcu_dereference(rdev->sched_scan_req);
|
||||||
|
|
||||||
if (sched_scan_req && notify->portid &&
|
if (sched_scan_req && notify->portid &&
|
||||||
sched_scan_req->owner_nlportid == notify->portid)
|
sched_scan_req->owner_nlportid == notify->portid) {
|
||||||
schedule_scan_stop = true;
|
sched_scan_req->owner_nlportid = 0;
|
||||||
|
|
||||||
|
if (rdev->ops->sched_scan_stop &&
|
||||||
|
rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
|
||||||
|
schedule_work(&rdev->sched_scan_stop_wk);
|
||||||
|
}
|
||||||
|
|
||||||
list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) {
|
list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) {
|
||||||
cfg80211_mlme_unregister_socket(wdev, notify->portid);
|
cfg80211_mlme_unregister_socket(wdev, notify->portid);
|
||||||
|
@ -13205,12 +13209,6 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
|
||||||
spin_unlock(&rdev->destroy_list_lock);
|
spin_unlock(&rdev->destroy_list_lock);
|
||||||
schedule_work(&rdev->destroy_work);
|
schedule_work(&rdev->destroy_work);
|
||||||
}
|
}
|
||||||
} else if (schedule_scan_stop) {
|
|
||||||
sched_scan_req->owner_nlportid = 0;
|
|
||||||
|
|
||||||
if (rdev->ops->sched_scan_stop &&
|
|
||||||
rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
|
|
||||||
schedule_work(&rdev->sched_scan_stop_wk);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -83,7 +83,7 @@ ifdef INSTALL_PATH
|
||||||
done;
|
done;
|
||||||
|
|
||||||
@# Ask all targets to emit their test scripts
|
@# Ask all targets to emit their test scripts
|
||||||
echo "#!/bin/bash" > $(ALL_SCRIPT)
|
echo "#!/bin/sh" > $(ALL_SCRIPT)
|
||||||
echo "cd \$$(dirname \$$0)" >> $(ALL_SCRIPT)
|
echo "cd \$$(dirname \$$0)" >> $(ALL_SCRIPT)
|
||||||
echo "ROOT=\$$PWD" >> $(ALL_SCRIPT)
|
echo "ROOT=\$$PWD" >> $(ALL_SCRIPT)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/bin/bash
|
#!/bin/sh
|
||||||
|
|
||||||
echo "--------------------"
|
echo "--------------------"
|
||||||
echo "running socket test"
|
echo "running socket test"
|
||||||
|
|
|
@ -188,7 +188,7 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
|
||||||
mutex_lock(&lock);
|
mutex_lock(&lock);
|
||||||
|
|
||||||
list_for_each_entry(tmp, &consumers, node) {
|
list_for_each_entry(tmp, &consumers, node) {
|
||||||
if (tmp->token == consumer->token) {
|
if (tmp->token == consumer->token || tmp == consumer) {
|
||||||
mutex_unlock(&lock);
|
mutex_unlock(&lock);
|
||||||
module_put(THIS_MODULE);
|
module_put(THIS_MODULE);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
@ -235,7 +235,7 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer)
|
||||||
mutex_lock(&lock);
|
mutex_lock(&lock);
|
||||||
|
|
||||||
list_for_each_entry(tmp, &consumers, node) {
|
list_for_each_entry(tmp, &consumers, node) {
|
||||||
if (tmp->token != consumer->token)
|
if (tmp != consumer)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
list_for_each_entry(producer, &producers, node) {
|
list_for_each_entry(producer, &producers, node) {
|
||||||
|
|
Loading…
Add table
Reference in a new issue