Merge "Merge android-4.4.135 (c9d74f2
) into msm-4.4"
This commit is contained in:
commit
14efecefa7
289 changed files with 1873 additions and 633 deletions
|
@ -112,9 +112,11 @@ $low_water_mark is expressed in blocks of size $data_block_size. If
|
|||
free space on the data device drops below this level then a dm event
|
||||
will be triggered which a userspace daemon should catch allowing it to
|
||||
extend the pool device. Only one such event will be sent.
|
||||
Resuming a device with a new table itself triggers an event so the
|
||||
userspace daemon can use this to detect a situation where a new table
|
||||
already exceeds the threshold.
|
||||
|
||||
No special event is triggered if a just resumed device's free space is below
|
||||
the low water mark. However, resuming a device always triggers an
|
||||
event; a userspace daemon should verify that free space exceeds the low
|
||||
water mark when handling this event.
|
||||
|
||||
A low water mark for the metadata device is maintained in the kernel and
|
||||
will trigger a dm event if free space on the metadata device drops below
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 133
|
||||
SUBLEVEL = 135
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -11,6 +11,10 @@
|
|||
* Atomic exchange.
|
||||
* Since it can be used to implement critical sections
|
||||
* it must clobber "memory" (also for interrupts in UP).
|
||||
*
|
||||
* The leading and the trailing memory barriers guarantee that these
|
||||
* operations are fully ordered.
|
||||
*
|
||||
*/
|
||||
|
||||
static inline unsigned long
|
||||
|
@ -18,6 +22,7 @@ ____xchg(_u8, volatile char *m, unsigned long val)
|
|||
{
|
||||
unsigned long ret, tmp, addr64;
|
||||
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
" andnot %4,7,%3\n"
|
||||
" insbl %1,%4,%1\n"
|
||||
|
@ -42,6 +47,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
|
|||
{
|
||||
unsigned long ret, tmp, addr64;
|
||||
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
" andnot %4,7,%3\n"
|
||||
" inswl %1,%4,%1\n"
|
||||
|
@ -66,6 +72,7 @@ ____xchg(_u32, volatile int *m, unsigned long val)
|
|||
{
|
||||
unsigned long dummy;
|
||||
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
"1: ldl_l %0,%4\n"
|
||||
" bis $31,%3,%1\n"
|
||||
|
@ -86,6 +93,7 @@ ____xchg(_u64, volatile long *m, unsigned long val)
|
|||
{
|
||||
unsigned long dummy;
|
||||
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
"1: ldq_l %0,%4\n"
|
||||
" bis $31,%3,%1\n"
|
||||
|
@ -127,10 +135,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
|
|||
* store NEW in MEM. Return the initial value in MEM. Success is
|
||||
* indicated by comparing RETURN with OLD.
|
||||
*
|
||||
* The memory barrier should be placed in SMP only when we actually
|
||||
* make the change. If we don't change anything (so if the returned
|
||||
* prev is equal to old) then we aren't acquiring anything new and
|
||||
* we don't need any memory barrier as far I can tell.
|
||||
* The leading and the trailing memory barriers guarantee that these
|
||||
* operations are fully ordered.
|
||||
*
|
||||
* The trailing memory barrier is placed in SMP unconditionally, in
|
||||
* order to guarantee that dependency ordering is preserved when a
|
||||
* dependency is headed by an unsuccessful operation.
|
||||
*/
|
||||
|
||||
static inline unsigned long
|
||||
|
@ -138,6 +148,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
|
|||
{
|
||||
unsigned long prev, tmp, cmp, addr64;
|
||||
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
" andnot %5,7,%4\n"
|
||||
" insbl %1,%5,%1\n"
|
||||
|
@ -149,8 +160,8 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
|
|||
" or %1,%2,%2\n"
|
||||
" stq_c %2,0(%4)\n"
|
||||
" beq %2,3f\n"
|
||||
__ASM__MB
|
||||
"2:\n"
|
||||
__ASM__MB
|
||||
".subsection 2\n"
|
||||
"3: br 1b\n"
|
||||
".previous"
|
||||
|
@ -165,6 +176,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
|
|||
{
|
||||
unsigned long prev, tmp, cmp, addr64;
|
||||
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
" andnot %5,7,%4\n"
|
||||
" inswl %1,%5,%1\n"
|
||||
|
@ -176,8 +188,8 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
|
|||
" or %1,%2,%2\n"
|
||||
" stq_c %2,0(%4)\n"
|
||||
" beq %2,3f\n"
|
||||
__ASM__MB
|
||||
"2:\n"
|
||||
__ASM__MB
|
||||
".subsection 2\n"
|
||||
"3: br 1b\n"
|
||||
".previous"
|
||||
|
@ -192,6 +204,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
|
|||
{
|
||||
unsigned long prev, cmp;
|
||||
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
"1: ldl_l %0,%5\n"
|
||||
" cmpeq %0,%3,%1\n"
|
||||
|
@ -199,8 +212,8 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
|
|||
" mov %4,%1\n"
|
||||
" stl_c %1,%2\n"
|
||||
" beq %1,3f\n"
|
||||
__ASM__MB
|
||||
"2:\n"
|
||||
__ASM__MB
|
||||
".subsection 2\n"
|
||||
"3: br 1b\n"
|
||||
".previous"
|
||||
|
@ -215,6 +228,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
|
|||
{
|
||||
unsigned long prev, cmp;
|
||||
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
"1: ldq_l %0,%5\n"
|
||||
" cmpeq %0,%3,%1\n"
|
||||
|
@ -222,8 +236,8 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
|
|||
" mov %4,%1\n"
|
||||
" stq_c %1,%2\n"
|
||||
" beq %1,3f\n"
|
||||
__ASM__MB
|
||||
"2:\n"
|
||||
__ASM__MB
|
||||
".subsection 2\n"
|
||||
"3: br 1b\n"
|
||||
".previous"
|
||||
|
|
|
@ -479,7 +479,6 @@ config ARC_CURR_IN_REG
|
|||
|
||||
config ARC_EMUL_UNALIGNED
|
||||
bool "Emulate unaligned memory access (userspace only)"
|
||||
default N
|
||||
select SYSCTL_ARCH_UNALIGN_NO_WARN
|
||||
select SYSCTL_ARCH_UNALIGN_ALLOW
|
||||
depends on ISA_ARCOMPACT
|
||||
|
|
|
@ -738,7 +738,7 @@
|
|||
timer@fffec600 {
|
||||
compatible = "arm,cortex-a9-twd-timer";
|
||||
reg = <0xfffec600 0x100>;
|
||||
interrupts = <1 13 0xf04>;
|
||||
interrupts = <1 13 0xf01>;
|
||||
clocks = <&mpu_periph_clk>;
|
||||
};
|
||||
|
||||
|
|
|
@ -11,8 +11,6 @@ struct mm_struct;
|
|||
|
||||
void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
|
||||
|
||||
extern char vdso_start, vdso_end;
|
||||
|
||||
extern unsigned int vdso_total_pages;
|
||||
|
||||
#else /* CONFIG_VDSO */
|
||||
|
|
|
@ -39,6 +39,8 @@
|
|||
|
||||
static struct page **vdso_text_pagelist;
|
||||
|
||||
extern char vdso_start[], vdso_end[];
|
||||
|
||||
/* Total number of pages needed for the data and text portions of the VDSO. */
|
||||
unsigned int vdso_total_pages __ro_after_init;
|
||||
|
||||
|
@ -179,13 +181,13 @@ static int __init vdso_init(void)
|
|||
unsigned int text_pages;
|
||||
int i;
|
||||
|
||||
if (memcmp(&vdso_start, "\177ELF", 4)) {
|
||||
if (memcmp(vdso_start, "\177ELF", 4)) {
|
||||
pr_err("VDSO is not a valid ELF object!\n");
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
|
||||
pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start);
|
||||
text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
|
||||
pr_debug("vdso: %i text pages at base %pK\n", text_pages, vdso_start);
|
||||
|
||||
/* Allocate the VDSO text pagelist */
|
||||
vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
|
||||
|
@ -200,7 +202,7 @@ static int __init vdso_init(void)
|
|||
for (i = 0; i < text_pages; i++) {
|
||||
struct page *page;
|
||||
|
||||
page = virt_to_page(&vdso_start + i * PAGE_SIZE);
|
||||
page = virt_to_page(vdso_start + i * PAGE_SIZE);
|
||||
vdso_text_pagelist[i] = page;
|
||||
}
|
||||
|
||||
|
@ -211,7 +213,7 @@ static int __init vdso_init(void)
|
|||
|
||||
cntvct_ok = cntvct_functional();
|
||||
|
||||
patch_vdso(&vdso_start);
|
||||
patch_vdso(vdso_start);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1031,17 +1031,17 @@ static int clk_debugfs_register_one(struct clk *c)
|
|||
return -ENOMEM;
|
||||
c->dent = d;
|
||||
|
||||
d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
|
||||
d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount);
|
||||
if (!d) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
|
||||
d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate);
|
||||
if (!d) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
|
||||
d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags);
|
||||
if (!d) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
|
|
|
@ -231,7 +231,7 @@ static void omap_pm_end(void)
|
|||
cpu_idle_poll_ctrl(false);
|
||||
}
|
||||
|
||||
static void omap_pm_finish(void)
|
||||
static void omap_pm_wake(void)
|
||||
{
|
||||
if (cpu_is_omap34xx())
|
||||
omap_prcm_irq_complete();
|
||||
|
@ -241,7 +241,7 @@ static const struct platform_suspend_ops omap_pm_ops = {
|
|||
.begin = omap_pm_begin,
|
||||
.end = omap_pm_end,
|
||||
.enter = omap_pm_enter,
|
||||
.finish = omap_pm_finish,
|
||||
.wake = omap_pm_wake,
|
||||
.valid = suspend_valid_only_mem,
|
||||
};
|
||||
|
||||
|
|
|
@ -136,12 +136,6 @@ static struct clock_event_device clockevent_gpt = {
|
|||
.tick_resume = omap2_gp_timer_shutdown,
|
||||
};
|
||||
|
||||
static struct property device_disabled = {
|
||||
.name = "status",
|
||||
.length = sizeof("disabled"),
|
||||
.value = "disabled",
|
||||
};
|
||||
|
||||
static const struct of_device_id omap_timer_match[] __initconst = {
|
||||
{ .compatible = "ti,omap2420-timer", },
|
||||
{ .compatible = "ti,omap3430-timer", },
|
||||
|
@ -183,8 +177,17 @@ static struct device_node * __init omap_get_timer_dt(const struct of_device_id *
|
|||
of_get_property(np, "ti,timer-secure", NULL)))
|
||||
continue;
|
||||
|
||||
if (!of_device_is_compatible(np, "ti,omap-counter32k"))
|
||||
of_add_property(np, &device_disabled);
|
||||
if (!of_device_is_compatible(np, "ti,omap-counter32k")) {
|
||||
struct property *prop;
|
||||
|
||||
prop = kzalloc(sizeof(*prop), GFP_KERNEL);
|
||||
if (!prop)
|
||||
return NULL;
|
||||
prop->name = "status";
|
||||
prop->value = "disabled";
|
||||
prop->length = strlen(prop->value);
|
||||
of_add_property(np, prop);
|
||||
}
|
||||
return np;
|
||||
}
|
||||
|
||||
|
|
|
@ -854,11 +854,8 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
|
|||
timer->irq = irq->start;
|
||||
timer->pdev = pdev;
|
||||
|
||||
/* Skip pm_runtime_enable for OMAP1 */
|
||||
if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
|
||||
pm_runtime_enable(dev);
|
||||
pm_runtime_irq_safe(dev);
|
||||
}
|
||||
pm_runtime_enable(dev);
|
||||
pm_runtime_irq_safe(dev);
|
||||
|
||||
if (!timer->reserved) {
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
|
|
|
@ -116,8 +116,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|||
" cbnz %w1, 1f\n"
|
||||
" add %w1, %w0, %3\n"
|
||||
" casa %w0, %w1, %2\n"
|
||||
" and %w1, %w1, #0xffff\n"
|
||||
" eor %w1, %w1, %w0, lsr #16\n"
|
||||
" sub %w1, %w1, %3\n"
|
||||
" eor %w1, %w1, %w0\n"
|
||||
"1:")
|
||||
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
|
||||
: "I" (1 << TICKET_SHIFT)
|
||||
|
|
|
@ -135,7 +135,11 @@ static struct platform_device mcf_fec0 = {
|
|||
.id = 0,
|
||||
.num_resources = ARRAY_SIZE(mcf_fec0_resources),
|
||||
.resource = mcf_fec0_resources,
|
||||
.dev.platform_data = FEC_PDATA,
|
||||
.dev = {
|
||||
.dma_mask = &mcf_fec0.dev.coherent_dma_mask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
.platform_data = FEC_PDATA,
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef MCFFEC_BASE1
|
||||
|
@ -167,7 +171,11 @@ static struct platform_device mcf_fec1 = {
|
|||
.id = 1,
|
||||
.num_resources = ARRAY_SIZE(mcf_fec1_resources),
|
||||
.resource = mcf_fec1_resources,
|
||||
.dev.platform_data = FEC_PDATA,
|
||||
.dev = {
|
||||
.dma_mask = &mcf_fec1.dev.coherent_dma_mask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
.platform_data = FEC_PDATA,
|
||||
}
|
||||
};
|
||||
#endif /* MCFFEC_BASE1 */
|
||||
#endif /* CONFIG_FEC */
|
||||
|
|
|
@ -2240,7 +2240,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
|
|||
|
||||
parent_irq = irq_of_parse_and_map(ciu_node, 0);
|
||||
if (!parent_irq) {
|
||||
pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
|
||||
pr_err("ERROR: Couldn't acquire parent_irq for %s\n",
|
||||
ciu_node->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -2252,7 +2252,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
|
|||
|
||||
addr = of_get_address(ciu_node, 0, NULL, NULL);
|
||||
if (!addr) {
|
||||
pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
|
||||
pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
host_data->raw_reg = (u64)phys_to_virt(
|
||||
|
@ -2260,7 +2260,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
|
|||
|
||||
addr = of_get_address(ciu_node, 1, NULL, NULL);
|
||||
if (!addr) {
|
||||
pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
|
||||
pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
host_data->en_reg = (u64)phys_to_virt(
|
||||
|
@ -2268,7 +2268,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
|
|||
|
||||
r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
|
||||
if (r) {
|
||||
pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
|
||||
pr_err("ERROR: Couldn't read cavium,max-bits from %s\n",
|
||||
ciu_node->name);
|
||||
return r;
|
||||
}
|
||||
|
@ -2278,7 +2278,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
|
|||
&octeon_irq_domain_cib_ops,
|
||||
host_data);
|
||||
if (!cib_domain) {
|
||||
pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
|
||||
pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -167,7 +167,7 @@
|
|||
#define AR71XX_AHB_DIV_MASK 0x7
|
||||
|
||||
#define AR724X_PLL_REG_CPU_CONFIG 0x00
|
||||
#define AR724X_PLL_REG_PCIE_CONFIG 0x18
|
||||
#define AR724X_PLL_REG_PCIE_CONFIG 0x10
|
||||
|
||||
#define AR724X_PLL_FB_SHIFT 0
|
||||
#define AR724X_PLL_FB_MASK 0x3ff
|
||||
|
|
|
@ -482,7 +482,7 @@ static int fpr_get_msa(struct task_struct *target,
|
|||
/*
|
||||
* Copy the floating-point context to the supplied NT_PRFPREG buffer.
|
||||
* Choose the appropriate helper for general registers, and then copy
|
||||
* the FCSR register separately.
|
||||
* the FCSR and FIR registers separately.
|
||||
*/
|
||||
static int fpr_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
|
@ -490,6 +490,7 @@ static int fpr_get(struct task_struct *target,
|
|||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
|
||||
const int fir_pos = fcr31_pos + sizeof(u32);
|
||||
int err;
|
||||
|
||||
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
|
||||
|
@ -502,6 +503,12 @@ static int fpr_get(struct task_struct *target,
|
|||
err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.fcr31,
|
||||
fcr31_pos, fcr31_pos + sizeof(u32));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&boot_cpu_data.fpu_id,
|
||||
fir_pos, fir_pos + sizeof(u32));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -550,7 +557,8 @@ static int fpr_set_msa(struct task_struct *target,
|
|||
/*
|
||||
* Copy the supplied NT_PRFPREG buffer to the floating-point context.
|
||||
* Choose the appropriate helper for general registers, and then copy
|
||||
* the FCSR register separately.
|
||||
* the FCSR register separately. Ignore the incoming FIR register
|
||||
* contents though, as the register is read-only.
|
||||
*
|
||||
* We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
|
||||
* which is supposed to have been guaranteed by the kernel before
|
||||
|
@ -564,6 +572,7 @@ static int fpr_set(struct task_struct *target,
|
|||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
|
||||
const int fir_pos = fcr31_pos + sizeof(u32);
|
||||
u32 fcr31;
|
||||
int err;
|
||||
|
||||
|
@ -591,6 +600,11 @@ static int fpr_set(struct task_struct *target,
|
|||
ptrace_setfcr31(target, fcr31);
|
||||
}
|
||||
|
||||
if (count > 0)
|
||||
err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
fir_pos,
|
||||
fir_pos + sizeof(u32));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -815,7 +829,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
fregs = get_fpu_regs(child);
|
||||
|
||||
#ifdef CONFIG_32BIT
|
||||
if (test_thread_flag(TIF_32BIT_FPREGS)) {
|
||||
if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
|
||||
/*
|
||||
* The odd registers are actually the high
|
||||
* order bits of the values stored in the even
|
||||
|
@ -904,7 +918,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
|
||||
init_fp_ctx(child);
|
||||
#ifdef CONFIG_32BIT
|
||||
if (test_thread_flag(TIF_32BIT_FPREGS)) {
|
||||
if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
|
||||
/*
|
||||
* The odd registers are actually the high
|
||||
* order bits of the values stored in the even
|
||||
|
|
|
@ -97,7 +97,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
break;
|
||||
}
|
||||
fregs = get_fpu_regs(child);
|
||||
if (test_thread_flag(TIF_32BIT_FPREGS)) {
|
||||
if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
|
||||
/*
|
||||
* The odd registers are actually the high
|
||||
* order bits of the values stored in the even
|
||||
|
@ -203,7 +203,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
sizeof(child->thread.fpu));
|
||||
child->thread.fpu.fcr31 = 0;
|
||||
}
|
||||
if (test_thread_flag(TIF_32BIT_FPREGS)) {
|
||||
if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
|
||||
/*
|
||||
* The odd registers are actually the high
|
||||
* order bits of the values stored in the even
|
||||
|
|
|
@ -40,7 +40,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|||
{ "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
|
||||
{ "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
|
||||
{ "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
|
||||
{ "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
|
||||
{ "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
|
||||
{ "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
|
||||
{ "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
|
||||
{ "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
|
||||
|
|
|
@ -186,7 +186,7 @@ static void __init rbtx4939_update_ioc_pen(void)
|
|||
|
||||
#define RBTX4939_MAX_7SEGLEDS 8
|
||||
|
||||
#if IS_ENABLED(CONFIG_LEDS_CLASS)
|
||||
#if IS_BUILTIN(CONFIG_LEDS_CLASS)
|
||||
static u8 led_val[RBTX4939_MAX_7SEGLEDS];
|
||||
struct rbtx4939_led_data {
|
||||
struct led_classdev cdev;
|
||||
|
@ -261,7 +261,7 @@ static inline void rbtx4939_led_setup(void)
|
|||
|
||||
static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_LEDS_CLASS)
|
||||
#if IS_BUILTIN(CONFIG_LEDS_CLASS)
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
/* bit7: reserved for LED class */
|
||||
|
|
|
@ -5,5 +5,6 @@ static inline bool arch_irq_work_has_interrupt(void)
|
|||
{
|
||||
return true;
|
||||
}
|
||||
extern void arch_irq_work_raise(void);
|
||||
|
||||
#endif /* _ASM_POWERPC_IRQ_WORK_H */
|
||||
|
|
|
@ -3002,15 +3002,17 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
|||
goto up_out;
|
||||
|
||||
psize = vma_kernel_pagesize(vma);
|
||||
porder = __ilog2(psize);
|
||||
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
/* We can handle 4k, 64k or 16M pages in the VRMA */
|
||||
err = -EINVAL;
|
||||
if (!(psize == 0x1000 || psize == 0x10000 ||
|
||||
psize == 0x1000000))
|
||||
goto out_srcu;
|
||||
if (psize >= 0x1000000)
|
||||
psize = 0x1000000;
|
||||
else if (psize >= 0x10000)
|
||||
psize = 0x10000;
|
||||
else
|
||||
psize = 0x1000;
|
||||
porder = __ilog2(psize);
|
||||
|
||||
/* Update VRMASD field in the LPCR */
|
||||
senc = slb_pgsize_encoding(psize);
|
||||
|
|
|
@ -551,7 +551,7 @@ static int numa_setup_cpu(unsigned long lcpu)
|
|||
nid = of_node_to_nid_single(cpu);
|
||||
|
||||
out_present:
|
||||
if (nid < 0 || !node_online(nid))
|
||||
if (nid < 0 || !node_possible(nid))
|
||||
nid = first_online_node;
|
||||
|
||||
map_cpu_to_node(lcpu, nid);
|
||||
|
@ -951,6 +951,32 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
|
|||
NODE_DATA(nid)->node_spanned_pages = spanned_pages;
|
||||
}
|
||||
|
||||
static void __init find_possible_nodes(void)
|
||||
{
|
||||
struct device_node *rtas;
|
||||
u32 numnodes, i;
|
||||
|
||||
if (min_common_depth <= 0)
|
||||
return;
|
||||
|
||||
rtas = of_find_node_by_path("/rtas");
|
||||
if (!rtas)
|
||||
return;
|
||||
|
||||
if (of_property_read_u32_index(rtas,
|
||||
"ibm,max-associativity-domains",
|
||||
min_common_depth, &numnodes))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < numnodes; i++) {
|
||||
if (!node_possible(i))
|
||||
node_set(i, node_possible_map);
|
||||
}
|
||||
|
||||
out:
|
||||
of_node_put(rtas);
|
||||
}
|
||||
|
||||
void __init initmem_init(void)
|
||||
{
|
||||
int nid, cpu;
|
||||
|
@ -966,12 +992,15 @@ void __init initmem_init(void)
|
|||
memblock_dump_all();
|
||||
|
||||
/*
|
||||
* Reduce the possible NUMA nodes to the online NUMA nodes,
|
||||
* since we do not support node hotplug. This ensures that we
|
||||
* lower the maximum NUMA node ID to what is actually present.
|
||||
* Modify the set of possible NUMA nodes to reflect information
|
||||
* available about the set of online nodes, and the set of nodes
|
||||
* that we expect to make use of for this platform's affinity
|
||||
* calculations.
|
||||
*/
|
||||
nodes_and(node_possible_map, node_possible_map, node_online_map);
|
||||
|
||||
find_possible_nodes();
|
||||
|
||||
for_each_online_node(nid) {
|
||||
unsigned long start_pfn, end_pfn;
|
||||
|
||||
|
@ -1304,6 +1333,40 @@ static long vphn_get_associativity(unsigned long cpu,
|
|||
return rc;
|
||||
}
|
||||
|
||||
static inline int find_and_online_cpu_nid(int cpu)
|
||||
{
|
||||
__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
|
||||
int new_nid;
|
||||
|
||||
/* Use associativity from first thread for all siblings */
|
||||
vphn_get_associativity(cpu, associativity);
|
||||
new_nid = associativity_to_nid(associativity);
|
||||
if (new_nid < 0 || !node_possible(new_nid))
|
||||
new_nid = first_online_node;
|
||||
|
||||
if (NODE_DATA(new_nid) == NULL) {
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
/*
|
||||
* Need to ensure that NODE_DATA is initialized for a node from
|
||||
* available memory (see memblock_alloc_try_nid). If unable to
|
||||
* init the node, then default to nearest node that has memory
|
||||
* installed.
|
||||
*/
|
||||
if (try_online_node(new_nid))
|
||||
new_nid = first_online_node;
|
||||
#else
|
||||
/*
|
||||
* Default to using the nearest node that has memory installed.
|
||||
* Otherwise, it would be necessary to patch the kernel MM code
|
||||
* to deal with more memoryless-node error conditions.
|
||||
*/
|
||||
new_nid = first_online_node;
|
||||
#endif
|
||||
}
|
||||
|
||||
return new_nid;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the CPU maps and sysfs entries for a single CPU when its NUMA
|
||||
* characteristics change. This function doesn't perform any locking and is
|
||||
|
@ -1369,7 +1432,6 @@ int arch_update_cpu_topology(void)
|
|||
{
|
||||
unsigned int cpu, sibling, changed = 0;
|
||||
struct topology_update_data *updates, *ud;
|
||||
__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
|
||||
cpumask_t updated_cpus;
|
||||
struct device *dev;
|
||||
int weight, new_nid, i = 0;
|
||||
|
@ -1404,11 +1466,7 @@ int arch_update_cpu_topology(void)
|
|||
continue;
|
||||
}
|
||||
|
||||
/* Use associativity from first thread for all siblings */
|
||||
vphn_get_associativity(cpu, associativity);
|
||||
new_nid = associativity_to_nid(associativity);
|
||||
if (new_nid < 0 || !node_online(new_nid))
|
||||
new_nid = first_online_node;
|
||||
new_nid = find_and_online_cpu_nid(cpu);
|
||||
|
||||
if (new_nid == numa_cpu_lookup_table[cpu]) {
|
||||
cpumask_andnot(&cpu_associativity_changes_mask,
|
||||
|
|
|
@ -329,6 +329,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
|
|||
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
|
||||
PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
|
||||
break;
|
||||
case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
|
||||
PPC_LWZ_OFFS(r_A, r_skb, K);
|
||||
break;
|
||||
case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
|
||||
PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
|
||||
break;
|
||||
|
|
|
@ -448,6 +448,16 @@ static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
|
|||
/* invalid entry */
|
||||
continue;
|
||||
|
||||
/*
|
||||
* BHRB rolling buffer could very much contain the kernel
|
||||
* addresses at this point. Check the privileges before
|
||||
* exporting it to userspace (avoid exposure of regions
|
||||
* where we could have speculative execution)
|
||||
*/
|
||||
if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) &&
|
||||
is_kernel_addr(addr))
|
||||
continue;
|
||||
|
||||
/* Branches are read most recent first (ie. mfbhrb 0 is
|
||||
* the most recent branch).
|
||||
* There are two types of valid entries:
|
||||
|
@ -1188,6 +1198,7 @@ static void power_pmu_disable(struct pmu *pmu)
|
|||
*/
|
||||
write_mmcr0(cpuhw, val);
|
||||
mb();
|
||||
isync();
|
||||
|
||||
/*
|
||||
* Disable instruction sampling if it was enabled
|
||||
|
@ -1196,12 +1207,26 @@ static void power_pmu_disable(struct pmu *pmu)
|
|||
mtspr(SPRN_MMCRA,
|
||||
cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
|
||||
mb();
|
||||
isync();
|
||||
}
|
||||
|
||||
cpuhw->disabled = 1;
|
||||
cpuhw->n_added = 0;
|
||||
|
||||
ebb_switch_out(mmcr0);
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/*
|
||||
* These are readable by userspace, may contain kernel
|
||||
* addresses and are not switched by context switch, so clear
|
||||
* them now to avoid leaking anything to userspace in general
|
||||
* including to another process.
|
||||
*/
|
||||
if (ppmu->flags & PPMU_ARCH_207S) {
|
||||
mtspr(SPRN_SDAR, 0);
|
||||
mtspr(SPRN_SIAR, 0);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
|
|
@ -626,7 +626,7 @@ static inline u32 mpic_physmask(u32 cpumask)
|
|||
int i;
|
||||
u32 mask = 0;
|
||||
|
||||
for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
|
||||
for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1)
|
||||
mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
|
||||
return mask;
|
||||
}
|
||||
|
|
|
@ -2,10 +2,15 @@
|
|||
#ifndef _ASM_S390_NOSPEC_ASM_H
|
||||
#define _ASM_S390_NOSPEC_ASM_H
|
||||
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_EXPOLINE
|
||||
|
||||
_LC_BR_R1 = __LC_BR_R1
|
||||
|
||||
/*
|
||||
* The expoline macros are used to create thunks in the same format
|
||||
* as gcc generates them. The 'comdat' section flag makes sure that
|
||||
|
@ -101,13 +106,21 @@
|
|||
.endm
|
||||
|
||||
.macro __THUNK_EX_BR reg,ruse
|
||||
# Be very careful when adding instructions to this macro!
|
||||
# The ALTERNATIVE replacement code has a .+10 which targets
|
||||
# the "br \reg" after the code has been patched.
|
||||
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
|
||||
exrl 0,555f
|
||||
j .
|
||||
#else
|
||||
.ifc \reg,%r1
|
||||
ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35
|
||||
j .
|
||||
.else
|
||||
larl \ruse,555f
|
||||
ex 0,0(\ruse)
|
||||
j .
|
||||
.endif
|
||||
#endif
|
||||
555: br \reg
|
||||
.endm
|
||||
|
|
|
@ -170,6 +170,7 @@ int main(void)
|
|||
OFFSET(__LC_MACHINE_FLAGS, _lowcore, machine_flags);
|
||||
OFFSET(__LC_GMAP, _lowcore, gmap);
|
||||
OFFSET(__LC_PASTE, _lowcore, paste);
|
||||
OFFSET(__LC_BR_R1, _lowcore, br_r1_trampoline);
|
||||
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
|
||||
OFFSET(__LC_DUMP_REIPL, _lowcore, ipib);
|
||||
/* hardware defined lowcore locations 0x1000 - 0x18ff */
|
||||
|
|
|
@ -8,12 +8,16 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/nospec-insn.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
GEN_BR_THUNK %r1
|
||||
GEN_BR_THUNK %r14
|
||||
|
||||
.section .kprobes.text, "ax"
|
||||
|
||||
ENTRY(ftrace_stub)
|
||||
br %r14
|
||||
BR_EX %r14
|
||||
|
||||
#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
|
||||
|
@ -21,7 +25,7 @@ ENTRY(ftrace_stub)
|
|||
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
|
||||
|
||||
ENTRY(_mcount)
|
||||
br %r14
|
||||
BR_EX %r14
|
||||
|
||||
ENTRY(ftrace_caller)
|
||||
.globl ftrace_regs_caller
|
||||
|
@ -49,7 +53,7 @@ ENTRY(ftrace_caller)
|
|||
#endif
|
||||
lgr %r3,%r14
|
||||
la %r5,STACK_PTREGS(%r15)
|
||||
basr %r14,%r1
|
||||
BASR_EX %r14,%r1
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
# The j instruction gets runtime patched to a nop instruction.
|
||||
# See ftrace_enable_ftrace_graph_caller.
|
||||
|
@ -64,7 +68,7 @@ ftrace_graph_caller_end:
|
|||
#endif
|
||||
lg %r1,(STACK_PTREGS_PSW+8)(%r15)
|
||||
lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
|
||||
br %r1
|
||||
BR_EX %r1
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
|
||||
|
@ -77,6 +81,6 @@ ENTRY(return_to_handler)
|
|||
aghi %r15,STACK_FRAME_OVERHEAD
|
||||
lgr %r14,%r2
|
||||
lmg %r2,%r5,32(%r15)
|
||||
br %r14
|
||||
BR_EX %r14
|
||||
|
||||
#endif
|
||||
|
|
|
@ -255,7 +255,7 @@ debug_trap:
|
|||
mov.l @r8, r8
|
||||
jsr @r8
|
||||
nop
|
||||
bra __restore_all
|
||||
bra ret_from_exception
|
||||
nop
|
||||
CFI_ENDPROC
|
||||
|
||||
|
|
|
@ -74,7 +74,11 @@ ATOMIC_OP(xor)
|
|||
#define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
|
||||
|
||||
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
static inline int atomic_xchg(atomic_t *v, int new)
|
||||
{
|
||||
return xchg(&v->counter, new);
|
||||
}
|
||||
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
|
|
|
@ -1368,7 +1368,7 @@ void setup_local_APIC(void)
|
|||
* TODO: set up through-local-APIC from through-I/O-APIC? --macro
|
||||
*/
|
||||
value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
|
||||
if (!cpu && (pic_mode || !value)) {
|
||||
if (!cpu && (pic_mode || !value || skip_ioapic_setup)) {
|
||||
value = APIC_DM_EXTINT;
|
||||
apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
|
||||
} else {
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/libfdt.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/of_pci.h>
|
||||
|
@ -199,19 +200,22 @@ static struct of_ioapic_type of_ioapic_type[] =
|
|||
static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
unsigned int nr_irqs, void *arg)
|
||||
{
|
||||
struct of_phandle_args *irq_data = (void *)arg;
|
||||
struct irq_fwspec *fwspec = (struct irq_fwspec *)arg;
|
||||
struct of_ioapic_type *it;
|
||||
struct irq_alloc_info tmp;
|
||||
int type_index;
|
||||
|
||||
if (WARN_ON(irq_data->args_count < 2))
|
||||
return -EINVAL;
|
||||
if (irq_data->args[1] >= ARRAY_SIZE(of_ioapic_type))
|
||||
if (WARN_ON(fwspec->param_count < 2))
|
||||
return -EINVAL;
|
||||
|
||||
it = &of_ioapic_type[irq_data->args[1]];
|
||||
type_index = fwspec->param[1];
|
||||
if (type_index >= ARRAY_SIZE(of_ioapic_type))
|
||||
return -EINVAL;
|
||||
|
||||
it = &of_ioapic_type[type_index];
|
||||
ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity);
|
||||
tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain));
|
||||
tmp.ioapic_pin = irq_data->args[0];
|
||||
tmp.ioapic_pin = fwspec->param[0];
|
||||
|
||||
return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp);
|
||||
}
|
||||
|
@ -276,14 +280,15 @@ static void __init x86_flattree_get_config(void)
|
|||
|
||||
map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)128);
|
||||
|
||||
initial_boot_params = dt = early_memremap(initial_dtb, map_len);
|
||||
size = of_get_flat_dt_size();
|
||||
dt = early_memremap(initial_dtb, map_len);
|
||||
size = fdt_totalsize(dt);
|
||||
if (map_len < size) {
|
||||
early_memunmap(dt, map_len);
|
||||
initial_boot_params = dt = early_memremap(initial_dtb, size);
|
||||
dt = early_memremap(initial_dtb, size);
|
||||
map_len = size;
|
||||
}
|
||||
|
||||
early_init_dt_verify(dt);
|
||||
unflatten_and_copy_device_tree();
|
||||
early_memunmap(dt, map_len);
|
||||
}
|
||||
|
|
|
@ -1344,6 +1344,7 @@ static void remove_siblinginfo(int cpu)
|
|||
cpumask_clear(topology_core_cpumask(cpu));
|
||||
c->phys_proc_id = 0;
|
||||
c->cpu_core_id = 0;
|
||||
c->booted_cores = 0;
|
||||
cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
|
||||
}
|
||||
|
||||
|
|
|
@ -288,8 +288,16 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
|
|||
if (!kvm_vcpu_has_lapic(vcpu))
|
||||
return;
|
||||
|
||||
/*
|
||||
* KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
|
||||
* which doesn't have EOI register; Some buggy OSes (e.g. Windows with
|
||||
* Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
|
||||
* version first and level-triggered interrupts never get EOIed in
|
||||
* IOAPIC.
|
||||
*/
|
||||
feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
|
||||
if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
|
||||
if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) &&
|
||||
!ioapic_in_kernel(vcpu->kvm))
|
||||
v |= APIC_LVR_DIRECTED_EOI;
|
||||
apic_set_reg(apic, APIC_LVR, v);
|
||||
}
|
||||
|
|
|
@ -2319,6 +2319,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
|
|||
return;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(vmx->emulation_required);
|
||||
|
||||
if (kvm_exception_is_soft(nr)) {
|
||||
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
|
||||
vmx->vcpu.arch.event_exit_inst_len);
|
||||
|
@ -6037,12 +6039,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (err != EMULATE_DONE) {
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
|
||||
vcpu->run->internal.ndata = 0;
|
||||
return 0;
|
||||
}
|
||||
if (err != EMULATE_DONE)
|
||||
goto emulation_error;
|
||||
|
||||
if (vmx->emulation_required && !vmx->rmode.vm86_active &&
|
||||
vcpu->arch.exception.pending)
|
||||
goto emulation_error;
|
||||
|
||||
if (vcpu->arch.halt_request) {
|
||||
vcpu->arch.halt_request = 0;
|
||||
|
@ -6058,6 +6060,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
|
|||
|
||||
out:
|
||||
return ret;
|
||||
|
||||
emulation_error:
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
|
||||
vcpu->run->internal.ndata = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __grow_ple_window(int val)
|
||||
|
|
|
@ -3973,13 +3973,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
|||
mutex_unlock(&kvm->lock);
|
||||
break;
|
||||
case KVM_XEN_HVM_CONFIG: {
|
||||
struct kvm_xen_hvm_config xhc;
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
|
||||
sizeof(struct kvm_xen_hvm_config)))
|
||||
if (copy_from_user(&xhc, argp, sizeof(xhc)))
|
||||
goto out;
|
||||
r = -EINVAL;
|
||||
if (kvm->arch.xen_hvm_config.flags)
|
||||
if (xhc.flags)
|
||||
goto out;
|
||||
memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlb.h>
|
||||
|
@ -600,6 +601,10 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
|
|||
(mtrr != MTRR_TYPE_WRBACK))
|
||||
return 0;
|
||||
|
||||
/* Bail out if we are we on a populated non-leaf entry: */
|
||||
if (pud_present(*pud) && !pud_huge(*pud))
|
||||
return 0;
|
||||
|
||||
prot = pgprot_4k_2_large(prot);
|
||||
|
||||
set_pte((pte_t *)pud, pfn_pte(
|
||||
|
@ -628,6 +633,10 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Bail out if we are we on a populated non-leaf entry: */
|
||||
if (pmd_present(*pmd) && !pmd_huge(*pmd))
|
||||
return 0;
|
||||
|
||||
prot = pgprot_4k_2_large(prot);
|
||||
|
||||
set_pte((pte_t *)pmd, pfn_pte(
|
||||
|
|
|
@ -142,7 +142,7 @@ static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
|
|||
#endif
|
||||
}
|
||||
|
||||
int swsusp_arch_resume(void)
|
||||
asmlinkage int swsusp_arch_resume(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ static int set_up_temporary_mappings(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int swsusp_arch_resume(void)
|
||||
asmlinkage int swsusp_arch_resume(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
|
|
|
@ -108,6 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
|
|||
cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
|
||||
if (cpumask_empty(tmp)) {
|
||||
mutex_unlock(&round_robin_lock);
|
||||
free_cpumask_var(tmp);
|
||||
return;
|
||||
}
|
||||
for_each_cpu(cpu, tmp) {
|
||||
|
@ -125,6 +126,8 @@ static void round_robin_cpu(unsigned int tsk_index)
|
|||
mutex_unlock(&round_robin_lock);
|
||||
|
||||
set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
|
||||
|
||||
free_cpumask_var(tmp);
|
||||
}
|
||||
|
||||
static void exit_round_robin(unsigned int tsk_index)
|
||||
|
|
|
@ -204,6 +204,7 @@ u32 acpi_ev_fixed_event_detect(void)
|
|||
u32 fixed_status;
|
||||
u32 fixed_enable;
|
||||
u32 i;
|
||||
acpi_status status;
|
||||
|
||||
ACPI_FUNCTION_NAME(ev_fixed_event_detect);
|
||||
|
||||
|
@ -211,8 +212,12 @@ u32 acpi_ev_fixed_event_detect(void)
|
|||
* Read the fixed feature status and enable registers, as all the cases
|
||||
* depend on their values. Ignore errors here.
|
||||
*/
|
||||
(void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
|
||||
(void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
|
||||
status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
|
||||
status |=
|
||||
acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return (int_status);
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
|
||||
"Fixed Event Block: Enable %08X Status %08X\n",
|
||||
|
|
|
@ -308,6 +308,14 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
|
|||
/* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
|
||||
|
||||
status = AE_OK;
|
||||
} else if (ACPI_FAILURE(status)) {
|
||||
|
||||
/* If return_object exists, delete it */
|
||||
|
||||
if (info->return_object) {
|
||||
acpi_ut_remove_reference(info->return_object);
|
||||
info->return_object = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
|
||||
|
|
|
@ -161,7 +161,7 @@ int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (ignore_ppc) {
|
||||
if (ignore_ppc || !pr->performance) {
|
||||
/*
|
||||
* Only when it is notification event, the _OST object
|
||||
* will be evaluated. Otherwise it is skipped.
|
||||
|
|
|
@ -4187,6 +4187,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||
/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
|
||||
{ "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
|
||||
|
||||
/* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
|
||||
SD7SN6S256G and SD8SN8U256G */
|
||||
{ "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
|
||||
|
||||
/* devices which puke on READ_NATIVE_MAX */
|
||||
{ "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
|
||||
{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
|
||||
|
@ -4247,6 +4251,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||
{ "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
|
||||
|
||||
/* devices that don't properly handle queued TRIM commands */
|
||||
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
|
|
|
@ -230,6 +230,8 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode)
|
|||
struct pcd_unit *cd = bdev->bd_disk->private_data;
|
||||
int ret;
|
||||
|
||||
check_disk_change(bdev);
|
||||
|
||||
mutex_lock(&pcd_mutex);
|
||||
ret = cdrom_open(&cd->info, bdev, mode);
|
||||
mutex_unlock(&pcd_mutex);
|
||||
|
|
|
@ -336,6 +336,9 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
|
||||
{ USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
|
||||
|
||||
/* Additional Realtek 8723BU Bluetooth devices */
|
||||
{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
|
||||
|
||||
/* Additional Realtek 8821AE Bluetooth devices */
|
||||
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
|
||||
{ USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
|
||||
|
@ -343,6 +346,9 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
{ USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
|
||||
{ USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
|
||||
|
||||
/* Additional Realtek 8822BE Bluetooth devices */
|
||||
{ USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
|
||||
|
||||
/* Silicon Wave based devices */
|
||||
{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
|
||||
|
||||
|
|
|
@ -1154,9 +1154,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
|
|||
|
||||
cd_dbg(CD_OPEN, "entering cdrom_open\n");
|
||||
|
||||
/* open is event synchronization point, check events first */
|
||||
check_disk_change(bdev);
|
||||
|
||||
/* if this was a O_NONBLOCK open and we should honor the flags,
|
||||
* do a quick open without drive/disc integrity checks. */
|
||||
cdi->use_count++;
|
||||
|
|
|
@ -497,6 +497,9 @@ static struct cdrom_device_ops gdrom_ops = {
|
|||
static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
check_disk_change(bdev);
|
||||
|
||||
mutex_lock(&gdrom_mutex);
|
||||
ret = cdrom_open(gd.cd_info, bdev, mode);
|
||||
mutex_unlock(&gdrom_mutex);
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#define RNG_CR 0x00
|
||||
|
@ -46,6 +47,7 @@ struct stm32_rng_private {
|
|||
struct hwrng rng;
|
||||
void __iomem *base;
|
||||
struct clk *clk;
|
||||
struct reset_control *rst;
|
||||
};
|
||||
|
||||
static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
||||
|
@ -140,6 +142,13 @@ static int stm32_rng_probe(struct platform_device *ofdev)
|
|||
if (IS_ERR(priv->clk))
|
||||
return PTR_ERR(priv->clk);
|
||||
|
||||
priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
|
||||
if (!IS_ERR(priv->rst)) {
|
||||
reset_control_assert(priv->rst);
|
||||
udelay(2);
|
||||
reset_control_deassert(priv->rst);
|
||||
}
|
||||
|
||||
dev_set_drvdata(dev, priv);
|
||||
|
||||
priv->rng.name = dev_driver_string(dev),
|
||||
|
|
|
@ -251,8 +251,9 @@ static int ipmi_powernv_probe(struct platform_device *pdev)
|
|||
ipmi->irq = opal_event_request(prop);
|
||||
}
|
||||
|
||||
if (request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
|
||||
"opal-ipmi", ipmi)) {
|
||||
rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
|
||||
"opal-ipmi", ipmi);
|
||||
if (rc) {
|
||||
dev_warn(dev, "Unable to request irq\n");
|
||||
goto err_dispose;
|
||||
}
|
||||
|
|
|
@ -757,7 +757,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
|||
ssif_info->ssif_state = SSIF_NORMAL;
|
||||
ipmi_ssif_unlock_cond(ssif_info, flags);
|
||||
pr_warn(PFX "Error getting flags: %d %d, %x\n",
|
||||
result, len, data[2]);
|
||||
result, len, (len >= 3) ? data[2] : 0);
|
||||
} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
|
||||
|| data[1] != IPMI_GET_MSG_FLAGS_CMD) {
|
||||
/*
|
||||
|
@ -779,7 +779,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
|||
if ((result < 0) || (len < 3) || (data[2] != 0)) {
|
||||
/* Error clearing flags */
|
||||
pr_warn(PFX "Error clearing flags: %d %d, %x\n",
|
||||
result, len, data[2]);
|
||||
result, len, (len >= 3) ? data[2] : 0);
|
||||
} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
|
||||
|| data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
|
||||
pr_warn(PFX "Invalid response clearing flags: %x %x\n",
|
||||
|
|
|
@ -2244,6 +2244,9 @@ static int clk_core_get_phase(struct clk_core *core)
|
|||
int ret;
|
||||
|
||||
clk_prepare_lock();
|
||||
/* Always try to update cached phase if possible */
|
||||
if (core->ops->get_phase)
|
||||
core->phase = core->ops->get_phase(core->hw);
|
||||
ret = core->phase;
|
||||
clk_prepare_unlock();
|
||||
|
||||
|
|
|
@ -60,6 +60,12 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
|
|||
u16 degrees;
|
||||
u32 delay_num = 0;
|
||||
|
||||
/* See the comment for rockchip_mmc_set_phase below */
|
||||
if (!rate) {
|
||||
pr_err("%s: invalid clk rate\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
|
||||
|
||||
degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
|
||||
|
@ -86,6 +92,23 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
|
|||
u32 raw_value;
|
||||
u32 delay;
|
||||
|
||||
/*
|
||||
* The below calculation is based on the output clock from
|
||||
* MMC host to the card, which expects the phase clock inherits
|
||||
* the clock rate from its parent, namely the output clock
|
||||
* provider of MMC host. However, things may go wrong if
|
||||
* (1) It is orphan.
|
||||
* (2) It is assigned to the wrong parent.
|
||||
*
|
||||
* This check help debug the case (1), which seems to be the
|
||||
* most likely problem we often face and which makes it difficult
|
||||
* for people to debug unstable mmc tuning results.
|
||||
*/
|
||||
if (!rate) {
|
||||
pr_err("%s: invalid clk rate\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nineties = degrees / 90;
|
||||
remainder = (degrees % 90);
|
||||
|
||||
|
|
|
@ -683,7 +683,7 @@ static struct samsung_pll_rate_table exynos3250_epll_rates[] = {
|
|||
PLL_36XX_RATE(144000000, 96, 2, 3, 0),
|
||||
PLL_36XX_RATE( 96000000, 128, 2, 4, 0),
|
||||
PLL_36XX_RATE( 84000000, 112, 2, 4, 0),
|
||||
PLL_36XX_RATE( 80000004, 106, 2, 4, 43691),
|
||||
PLL_36XX_RATE( 80000003, 106, 2, 4, 43691),
|
||||
PLL_36XX_RATE( 73728000, 98, 2, 4, 19923),
|
||||
PLL_36XX_RATE( 67737598, 270, 3, 5, 62285),
|
||||
PLL_36XX_RATE( 65535999, 174, 2, 5, 49982),
|
||||
|
@ -719,7 +719,7 @@ static struct samsung_pll_rate_table exynos3250_vpll_rates[] = {
|
|||
PLL_36XX_RATE(148352005, 98, 2, 3, 59070),
|
||||
PLL_36XX_RATE(108000000, 144, 2, 4, 0),
|
||||
PLL_36XX_RATE( 74250000, 99, 2, 4, 0),
|
||||
PLL_36XX_RATE( 74176002, 98, 3, 4, 59070),
|
||||
PLL_36XX_RATE( 74176002, 98, 2, 4, 59070),
|
||||
PLL_36XX_RATE( 54054000, 216, 3, 5, 14156),
|
||||
PLL_36XX_RATE( 54000000, 144, 2, 5, 0),
|
||||
{ /* sentinel */ }
|
||||
|
|
|
@ -711,13 +711,13 @@ static struct samsung_pll_rate_table epll_24mhz_tbl[] __initdata = {
|
|||
/* sorted in descending order */
|
||||
/* PLL_36XX_RATE(rate, m, p, s, k) */
|
||||
PLL_36XX_RATE(192000000, 64, 2, 2, 0),
|
||||
PLL_36XX_RATE(180633600, 90, 3, 2, 20762),
|
||||
PLL_36XX_RATE(180633605, 90, 3, 2, 20762),
|
||||
PLL_36XX_RATE(180000000, 90, 3, 2, 0),
|
||||
PLL_36XX_RATE(73728000, 98, 2, 4, 19923),
|
||||
PLL_36XX_RATE(67737600, 90, 2, 4, 20762),
|
||||
PLL_36XX_RATE(67737602, 90, 2, 4, 20762),
|
||||
PLL_36XX_RATE(49152000, 98, 3, 4, 19923),
|
||||
PLL_36XX_RATE(45158400, 90, 3, 4, 20762),
|
||||
PLL_36XX_RATE(32768000, 131, 3, 5, 4719),
|
||||
PLL_36XX_RATE(45158401, 90, 3, 4, 20762),
|
||||
PLL_36XX_RATE(32768001, 131, 3, 5, 4719),
|
||||
{ },
|
||||
};
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ static struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initdata = {
|
|||
PLL_36XX_RATE(480000000, 160, 2, 2, 0),
|
||||
PLL_36XX_RATE(432000000, 144, 2, 2, 0),
|
||||
PLL_36XX_RATE(400000000, 200, 3, 2, 0),
|
||||
PLL_36XX_RATE(394073130, 459, 7, 2, 49282),
|
||||
PLL_36XX_RATE(394073128, 459, 7, 2, 49282),
|
||||
PLL_36XX_RATE(333000000, 111, 2, 2, 0),
|
||||
PLL_36XX_RATE(300000000, 100, 2, 2, 0),
|
||||
PLL_36XX_RATE(266000000, 266, 3, 3, 0),
|
||||
|
|
|
@ -747,7 +747,7 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
|
|||
PLL_35XX_RATE(800000000U, 400, 6, 1),
|
||||
PLL_35XX_RATE(733000000U, 733, 12, 1),
|
||||
PLL_35XX_RATE(700000000U, 175, 3, 1),
|
||||
PLL_35XX_RATE(667000000U, 222, 4, 1),
|
||||
PLL_35XX_RATE(666000000U, 222, 4, 1),
|
||||
PLL_35XX_RATE(633000000U, 211, 4, 1),
|
||||
PLL_35XX_RATE(600000000U, 500, 5, 2),
|
||||
PLL_35XX_RATE(552000000U, 460, 5, 2),
|
||||
|
@ -773,12 +773,12 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
|
|||
/* AUD_PLL */
|
||||
static struct samsung_pll_rate_table exynos5443_aud_pll_rates[] = {
|
||||
PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
|
||||
PLL_36XX_RATE(393216000U, 197, 3, 2, -25690),
|
||||
PLL_36XX_RATE(393216003U, 197, 3, 2, -25690),
|
||||
PLL_36XX_RATE(384000000U, 128, 2, 2, 0),
|
||||
PLL_36XX_RATE(368640000U, 246, 4, 2, -15729),
|
||||
PLL_36XX_RATE(361507200U, 181, 3, 2, -16148),
|
||||
PLL_36XX_RATE(338688000U, 113, 2, 2, -6816),
|
||||
PLL_36XX_RATE(294912000U, 98, 1, 3, 19923),
|
||||
PLL_36XX_RATE(368639991U, 246, 4, 2, -15729),
|
||||
PLL_36XX_RATE(361507202U, 181, 3, 2, -16148),
|
||||
PLL_36XX_RATE(338687988U, 113, 2, 2, -6816),
|
||||
PLL_36XX_RATE(294912002U, 98, 1, 3, 19923),
|
||||
PLL_36XX_RATE(288000000U, 96, 1, 3, 0),
|
||||
PLL_36XX_RATE(252000000U, 84, 1, 3, 0),
|
||||
{ /* sentinel */ }
|
||||
|
|
|
@ -168,7 +168,7 @@ static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
|
|||
PLL_35XX_RATE(226000000, 105, 1, 1),
|
||||
PLL_35XX_RATE(210000000, 132, 2, 1),
|
||||
/* 2410 common */
|
||||
PLL_35XX_RATE(203000000, 161, 3, 1),
|
||||
PLL_35XX_RATE(202800000, 161, 3, 1),
|
||||
PLL_35XX_RATE(192000000, 88, 1, 1),
|
||||
PLL_35XX_RATE(186000000, 85, 1, 1),
|
||||
PLL_35XX_RATE(180000000, 82, 1, 1),
|
||||
|
@ -178,18 +178,18 @@ static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
|
|||
PLL_35XX_RATE(147000000, 90, 2, 1),
|
||||
PLL_35XX_RATE(135000000, 82, 2, 1),
|
||||
PLL_35XX_RATE(124000000, 116, 1, 2),
|
||||
PLL_35XX_RATE(118000000, 150, 2, 2),
|
||||
PLL_35XX_RATE(118500000, 150, 2, 2),
|
||||
PLL_35XX_RATE(113000000, 105, 1, 2),
|
||||
PLL_35XX_RATE(101000000, 127, 2, 2),
|
||||
PLL_35XX_RATE(101250000, 127, 2, 2),
|
||||
PLL_35XX_RATE(90000000, 112, 2, 2),
|
||||
PLL_35XX_RATE(85000000, 105, 2, 2),
|
||||
PLL_35XX_RATE(84750000, 105, 2, 2),
|
||||
PLL_35XX_RATE(79000000, 71, 1, 2),
|
||||
PLL_35XX_RATE(68000000, 82, 2, 2),
|
||||
PLL_35XX_RATE(56000000, 142, 2, 3),
|
||||
PLL_35XX_RATE(67500000, 82, 2, 2),
|
||||
PLL_35XX_RATE(56250000, 142, 2, 3),
|
||||
PLL_35XX_RATE(48000000, 120, 2, 3),
|
||||
PLL_35XX_RATE(51000000, 161, 3, 3),
|
||||
PLL_35XX_RATE(50700000, 161, 3, 3),
|
||||
PLL_35XX_RATE(45000000, 82, 1, 3),
|
||||
PLL_35XX_RATE(34000000, 82, 2, 3),
|
||||
PLL_35XX_RATE(33750000, 82, 2, 3),
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
|
||||
|
|
|
@ -281,7 +281,7 @@ static int __init __ftm_clk_init(struct device_node *np, char *cnt_name,
|
|||
|
||||
static unsigned long __init ftm_clk_init(struct device_node *np)
|
||||
{
|
||||
unsigned long freq;
|
||||
long freq;
|
||||
|
||||
freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt");
|
||||
if (freq <= 0)
|
||||
|
|
|
@ -100,9 +100,19 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
policy->cpuinfo.max_freq = policy->max;
|
||||
policy->shared_type = cpu->shared_type;
|
||||
|
||||
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
|
||||
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
|
||||
int i;
|
||||
|
||||
cpumask_copy(policy->cpus, cpu->shared_cpu_map);
|
||||
else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
|
||||
|
||||
for_each_cpu(i, policy->cpus) {
|
||||
if (unlikely(i == policy->cpu))
|
||||
continue;
|
||||
|
||||
memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
|
||||
sizeof(cpu->perf_caps));
|
||||
}
|
||||
} else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
|
||||
/* Support only SW_ANY for now. */
|
||||
pr_debug("Unsupported CPU co-ord type\n");
|
||||
return -EFAULT;
|
||||
|
@ -166,8 +176,13 @@ static int __init cppc_cpufreq_init(void)
|
|||
return ret;
|
||||
|
||||
out:
|
||||
for_each_possible_cpu(i)
|
||||
kfree(all_cpu_data[i]);
|
||||
for_each_possible_cpu(i) {
|
||||
cpu = all_cpu_data[i];
|
||||
if (!cpu)
|
||||
break;
|
||||
free_cpumask_var(cpu->shared_cpu_map);
|
||||
kfree(cpu);
|
||||
}
|
||||
|
||||
kfree(all_cpu_data);
|
||||
return -ENODEV;
|
||||
|
|
|
@ -422,6 +422,7 @@ static struct platform_driver sun4i_ss_driver = {
|
|||
|
||||
module_platform_driver(sun4i_ss_driver);
|
||||
|
||||
MODULE_ALIAS("platform:sun4i-ss");
|
||||
MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
|
||||
|
|
|
@ -1533,7 +1533,7 @@ static void pl330_dotask(unsigned long data)
|
|||
/* Returns 1 if state was updated, 0 otherwise */
|
||||
static int pl330_update(struct pl330_dmac *pl330)
|
||||
{
|
||||
struct dma_pl330_desc *descdone, *tmp;
|
||||
struct dma_pl330_desc *descdone;
|
||||
unsigned long flags;
|
||||
void __iomem *regs;
|
||||
u32 val;
|
||||
|
@ -1611,7 +1611,9 @@ static int pl330_update(struct pl330_dmac *pl330)
|
|||
}
|
||||
|
||||
/* Now that we are in no hurry, do the callbacks */
|
||||
list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) {
|
||||
while (!list_empty(&pl330->req_done)) {
|
||||
descdone = list_first_entry(&pl330->req_done,
|
||||
struct dma_pl330_desc, rqd);
|
||||
list_del(&descdone->rqd);
|
||||
spin_unlock_irqrestore(&pl330->lock, flags);
|
||||
dma_pl330_rqcb(descdone, PL330_ERR_NONE);
|
||||
|
|
|
@ -851,7 +851,7 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
|
|||
|
||||
rcar_dmac_chan_configure_desc(chan, desc);
|
||||
|
||||
max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
|
||||
max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
|
||||
|
||||
/*
|
||||
* Allocate and fill the transfer chunk descriptors. We own the only
|
||||
|
|
|
@ -1130,7 +1130,13 @@ static int context_add_buffer(struct context *ctx)
|
|||
return -ENOMEM;
|
||||
|
||||
offset = (void *)&desc->buffer - (void *)desc;
|
||||
desc->buffer_size = PAGE_SIZE - offset;
|
||||
/*
|
||||
* Some controllers, like JMicron ones, always issue 0x20-byte DMA reads
|
||||
* for descriptors, even 0x10-byte ones. This can cause page faults when
|
||||
* an IOMMU is in use and the oversized read crosses a page boundary.
|
||||
* Work around this by always leaving at least 0x10 bytes of padding.
|
||||
*/
|
||||
desc->buffer_size = PAGE_SIZE - offset - 0x10;
|
||||
desc->buffer_bus = bus_addr + offset;
|
||||
desc->used = 0;
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ EXPORT_SYMBOL_GPL(dmi_kobj);
|
|||
* of and an antecedent to, SMBIOS, which stands for System
|
||||
* Management BIOS. See further: http://www.dmtf.org/standards
|
||||
*/
|
||||
static const char dmi_empty_string[] = " ";
|
||||
static const char dmi_empty_string[] = "";
|
||||
|
||||
static u32 dmi_ver __initdata;
|
||||
static u32 dmi_len;
|
||||
|
@ -44,25 +44,21 @@ static int dmi_memdev_nr;
|
|||
static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
|
||||
{
|
||||
const u8 *bp = ((u8 *) dm) + dm->length;
|
||||
const u8 *nsp;
|
||||
|
||||
if (s) {
|
||||
s--;
|
||||
while (s > 0 && *bp) {
|
||||
while (--s > 0 && *bp)
|
||||
bp += strlen(bp) + 1;
|
||||
s--;
|
||||
}
|
||||
|
||||
if (*bp != 0) {
|
||||
size_t len = strlen(bp)+1;
|
||||
size_t cmp_len = len > 8 ? 8 : len;
|
||||
|
||||
if (!memcmp(bp, dmi_empty_string, cmp_len))
|
||||
return dmi_empty_string;
|
||||
/* Strings containing only spaces are considered empty */
|
||||
nsp = bp;
|
||||
while (*nsp == ' ')
|
||||
nsp++;
|
||||
if (*nsp != '\0')
|
||||
return bp;
|
||||
}
|
||||
}
|
||||
|
||||
return "";
|
||||
return dmi_empty_string;
|
||||
}
|
||||
|
||||
static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
|
||||
|
|
|
@ -569,7 +569,7 @@
|
|||
#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
|
||||
#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
|
||||
#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
|
||||
#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
|
||||
#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | (0xff << 0))
|
||||
|
||||
/* Real input DMA size register */
|
||||
#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
|
||||
|
|
|
@ -67,7 +67,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
|
|||
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
|
||||
*/
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vma->vm_pgoff = 0;
|
||||
|
||||
ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
|
||||
obj->size, &rk_obj->dma_attrs);
|
||||
|
@ -99,6 +98,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
|
||||
* whole buffer from the start.
|
||||
*/
|
||||
vma->vm_pgoff = 0;
|
||||
|
||||
obj = vma->vm_private_data;
|
||||
|
||||
return rockchip_drm_gem_object_mmap(obj, vma);
|
||||
|
|
|
@ -208,6 +208,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
|
|||
case VIRTGPU_PARAM_3D_FEATURES:
|
||||
value = vgdev->has_virgl_3d == true ? 1 : 0;
|
||||
break;
|
||||
case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
|
||||
value = 1;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -483,7 +486,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
|
|||
{
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct drm_virtgpu_get_caps *args = data;
|
||||
int size;
|
||||
unsigned size, host_caps_size;
|
||||
int i;
|
||||
int found_valid = -1;
|
||||
int ret;
|
||||
|
@ -492,6 +495,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
|
|||
if (vgdev->num_capsets == 0)
|
||||
return -ENOSYS;
|
||||
|
||||
/* don't allow userspace to pass 0 */
|
||||
if (args->size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&vgdev->display_info_lock);
|
||||
for (i = 0; i < vgdev->num_capsets; i++) {
|
||||
if (vgdev->capsets[i].id == args->cap_set_id) {
|
||||
|
@ -507,11 +514,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
size = vgdev->capsets[found_valid].max_size;
|
||||
if (args->size > size) {
|
||||
spin_unlock(&vgdev->display_info_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
host_caps_size = vgdev->capsets[found_valid].max_size;
|
||||
/* only copy to user the minimum of the host caps size or the guest caps size */
|
||||
size = min(args->size, host_caps_size);
|
||||
|
||||
list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
|
||||
if (cache_ent->id == args->cap_set_id &&
|
||||
|
|
|
@ -37,6 +37,8 @@ static uint kovaplus_convert_event_cpi(uint value)
|
|||
static void kovaplus_profile_activated(struct kovaplus_device *kovaplus,
|
||||
uint new_profile_index)
|
||||
{
|
||||
if (new_profile_index >= ARRAY_SIZE(kovaplus->profile_settings))
|
||||
return;
|
||||
kovaplus->actual_profile = new_profile_index;
|
||||
kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level;
|
||||
kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x;
|
||||
|
|
|
@ -1390,7 +1390,7 @@ static void nct6775_update_pwm(struct device *dev)
|
|||
duty_is_dc = data->REG_PWM_MODE[i] &&
|
||||
(nct6775_read_value(data, data->REG_PWM_MODE[i])
|
||||
& data->PWM_MODE_MASK[i]);
|
||||
data->pwm_mode[i] = duty_is_dc;
|
||||
data->pwm_mode[i] = !duty_is_dc;
|
||||
|
||||
fanmodecfg = nct6775_read_value(data, data->REG_FAN_MODE[i]);
|
||||
for (j = 0; j < ARRAY_SIZE(data->REG_PWM); j++) {
|
||||
|
@ -2267,7 +2267,7 @@ show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf)
|
|||
struct nct6775_data *data = nct6775_update_device(dev);
|
||||
struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
|
||||
|
||||
return sprintf(buf, "%d\n", !data->pwm_mode[sattr->index]);
|
||||
return sprintf(buf, "%d\n", data->pwm_mode[sattr->index]);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -2288,9 +2288,9 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
|
|||
if (val > 1)
|
||||
return -EINVAL;
|
||||
|
||||
/* Setting DC mode is not supported for all chips/channels */
|
||||
/* Setting DC mode (0) is not supported for all chips/channels */
|
||||
if (data->REG_PWM_MODE[nr] == 0) {
|
||||
if (val)
|
||||
if (!val)
|
||||
return -EINVAL;
|
||||
return count;
|
||||
}
|
||||
|
@ -2299,7 +2299,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
|
|||
data->pwm_mode[nr] = val;
|
||||
reg = nct6775_read_value(data, data->REG_PWM_MODE[nr]);
|
||||
reg &= ~data->PWM_MODE_MASK[nr];
|
||||
if (val)
|
||||
if (!val)
|
||||
reg |= data->PWM_MODE_MASK[nr];
|
||||
nct6775_write_value(data, data->REG_PWM_MODE[nr], reg);
|
||||
mutex_unlock(&data->update_lock);
|
||||
|
|
|
@ -141,7 +141,7 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
|
|||
const struct adm1275_data *data = to_adm1275_data(info);
|
||||
int ret = 0;
|
||||
|
||||
if (page)
|
||||
if (page > 0)
|
||||
return -ENXIO;
|
||||
|
||||
switch (reg) {
|
||||
|
@ -218,7 +218,7 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
|
|||
const struct adm1275_data *data = to_adm1275_data(info);
|
||||
int ret;
|
||||
|
||||
if (page)
|
||||
if (page > 0)
|
||||
return -ENXIO;
|
||||
|
||||
switch (reg) {
|
||||
|
|
|
@ -45,7 +45,7 @@ static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (page)
|
||||
if (page > 0)
|
||||
return -ENXIO;
|
||||
|
||||
switch (reg) {
|
||||
|
|
|
@ -856,12 +856,16 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
|
|||
*/
|
||||
if (of_device_is_compatible(np, "marvell,mv78230-i2c")) {
|
||||
drv_data->offload_enabled = true;
|
||||
drv_data->errata_delay = true;
|
||||
/* The delay is only needed in standard mode (100kHz) */
|
||||
if (bus_freq <= 100000)
|
||||
drv_data->errata_delay = true;
|
||||
}
|
||||
|
||||
if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) {
|
||||
drv_data->offload_enabled = false;
|
||||
drv_data->errata_delay = true;
|
||||
/* The delay is only needed in standard mode (100kHz) */
|
||||
if (bus_freq <= 100000)
|
||||
drv_data->errata_delay = true;
|
||||
}
|
||||
|
||||
if (of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
|
||||
|
|
|
@ -1593,6 +1593,8 @@ static int idecd_open(struct block_device *bdev, fmode_t mode)
|
|||
struct cdrom_info *info;
|
||||
int rc = -ENXIO;
|
||||
|
||||
check_disk_change(bdev);
|
||||
|
||||
mutex_lock(&ide_cd_mutex);
|
||||
info = ide_cd_get(bdev->bd_disk);
|
||||
if (!info)
|
||||
|
|
|
@ -1295,7 +1295,7 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
|
|||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
|
||||
if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
optval = memdup_user((void __user *) (unsigned long) cmd.optval,
|
||||
|
|
|
@ -3161,12 +3161,9 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
|
|||
int err;
|
||||
|
||||
err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
|
||||
if (err) {
|
||||
if (err)
|
||||
mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
|
||||
return err;
|
||||
}
|
||||
|
||||
kfree(xrcd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1953,6 +1953,9 @@ static struct net_device *ipoib_add_port(const char *format,
|
|||
goto event_failed;
|
||||
}
|
||||
|
||||
/* call event handler to ensure pkey in sync */
|
||||
queue_work(ipoib_workqueue, &priv->flush_heavy);
|
||||
|
||||
result = register_netdev(priv->dev);
|
||||
if (result) {
|
||||
printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
|
||||
|
|
|
@ -750,7 +750,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
|
|||
MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
|
||||
tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
|
||||
|
||||
pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
|
||||
pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
|
||||
gic_write_sgi1r(val);
|
||||
}
|
||||
|
||||
|
|
|
@ -285,8 +285,10 @@ do { \
|
|||
break; \
|
||||
\
|
||||
mutex_unlock(&(ca)->set->bucket_lock); \
|
||||
if (kthread_should_stop()) \
|
||||
if (kthread_should_stop()) { \
|
||||
set_current_state(TASK_RUNNING); \
|
||||
return 0; \
|
||||
} \
|
||||
\
|
||||
try_to_freeze(); \
|
||||
schedule(); \
|
||||
|
|
|
@ -904,7 +904,7 @@ void bcache_write_super(struct cache_set *);
|
|||
|
||||
int bch_flash_dev_create(struct cache_set *c, uint64_t size);
|
||||
|
||||
int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
|
||||
int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *);
|
||||
void bch_cached_dev_detach(struct cached_dev *);
|
||||
void bch_cached_dev_run(struct cached_dev *);
|
||||
void bcache_device_stop(struct bcache_device *);
|
||||
|
|
|
@ -1869,14 +1869,17 @@ void bch_initial_gc_finish(struct cache_set *c)
|
|||
*/
|
||||
for_each_cache(ca, c, i) {
|
||||
for_each_bucket(b, ca) {
|
||||
if (fifo_full(&ca->free[RESERVE_PRIO]))
|
||||
if (fifo_full(&ca->free[RESERVE_PRIO]) &&
|
||||
fifo_full(&ca->free[RESERVE_BTREE]))
|
||||
break;
|
||||
|
||||
if (bch_can_invalidate_bucket(ca, b) &&
|
||||
!GC_MARK(b)) {
|
||||
__bch_invalidate_one_bucket(ca, b);
|
||||
fifo_push(&ca->free[RESERVE_PRIO],
|
||||
b - ca->buckets);
|
||||
if (!fifo_push(&ca->free[RESERVE_PRIO],
|
||||
b - ca->buckets))
|
||||
fifo_push(&ca->free[RESERVE_BTREE],
|
||||
b - ca->buckets);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -633,11 +633,11 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
|
|||
static void search_free(struct closure *cl)
|
||||
{
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
bio_complete(s);
|
||||
|
||||
if (s->iop.bio)
|
||||
bio_put(s->iop.bio);
|
||||
|
||||
bio_complete(s);
|
||||
closure_debug_destroy(cl);
|
||||
mempool_free(s, s->d->c->search);
|
||||
}
|
||||
|
|
|
@ -936,7 +936,8 @@ void bch_cached_dev_detach(struct cached_dev *dc)
|
|||
cached_dev_put(dc);
|
||||
}
|
||||
|
||||
int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
|
||||
int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
|
||||
uint8_t *set_uuid)
|
||||
{
|
||||
uint32_t rtime = cpu_to_le32(get_seconds());
|
||||
struct uuid_entry *u;
|
||||
|
@ -945,7 +946,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
|
|||
|
||||
bdevname(dc->bdev, buf);
|
||||
|
||||
if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))
|
||||
if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
|
||||
(!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
|
||||
return -ENOENT;
|
||||
|
||||
if (dc->disk.c) {
|
||||
|
@ -1189,7 +1191,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
|
|||
|
||||
list_add(&dc->list, &uncached_devices);
|
||||
list_for_each_entry(c, &bch_cache_sets, list)
|
||||
bch_cached_dev_attach(dc, c);
|
||||
bch_cached_dev_attach(dc, c, NULL);
|
||||
|
||||
if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
|
||||
BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
|
||||
|
@ -1711,7 +1713,7 @@ static void run_cache_set(struct cache_set *c)
|
|||
bcache_write_super(c);
|
||||
|
||||
list_for_each_entry_safe(dc, t, &uncached_devices, list)
|
||||
bch_cached_dev_attach(dc, c);
|
||||
bch_cached_dev_attach(dc, c, NULL);
|
||||
|
||||
flash_devs_run(c);
|
||||
|
||||
|
@ -1828,6 +1830,7 @@ void bch_cache_release(struct kobject *kobj)
|
|||
static int cache_alloc(struct cache_sb *sb, struct cache *ca)
|
||||
{
|
||||
size_t free;
|
||||
size_t btree_buckets;
|
||||
struct bucket *b;
|
||||
|
||||
__module_get(THIS_MODULE);
|
||||
|
@ -1837,9 +1840,19 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
|
|||
ca->journal.bio.bi_max_vecs = 8;
|
||||
ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
|
||||
|
||||
/*
|
||||
* when ca->sb.njournal_buckets is not zero, journal exists,
|
||||
* and in bch_journal_replay(), tree node may split,
|
||||
* so bucket of RESERVE_BTREE type is needed,
|
||||
* the worst situation is all journal buckets are valid journal,
|
||||
* and all the keys need to replay,
|
||||
* so the number of RESERVE_BTREE type buckets should be as much
|
||||
* as journal buckets
|
||||
*/
|
||||
btree_buckets = ca->sb.njournal_buckets ?: 8;
|
||||
free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
|
||||
|
||||
if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
|
||||
if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) ||
|
||||
!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
|
||||
!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
|
||||
!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
|
||||
|
|
|
@ -191,7 +191,7 @@ STORE(__cached_dev)
|
|||
{
|
||||
struct cached_dev *dc = container_of(kobj, struct cached_dev,
|
||||
disk.kobj);
|
||||
ssize_t v = size;
|
||||
ssize_t v;
|
||||
struct cache_set *c;
|
||||
struct kobj_uevent_env *env;
|
||||
|
||||
|
@ -263,17 +263,20 @@ STORE(__cached_dev)
|
|||
}
|
||||
|
||||
if (attr == &sysfs_attach) {
|
||||
if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
|
||||
uint8_t set_uuid[16];
|
||||
|
||||
if (bch_parse_uuid(buf, set_uuid) < 16)
|
||||
return -EINVAL;
|
||||
|
||||
v = -ENOENT;
|
||||
list_for_each_entry(c, &bch_cache_sets, list) {
|
||||
v = bch_cached_dev_attach(dc, c);
|
||||
v = bch_cached_dev_attach(dc, c, set_uuid);
|
||||
if (!v)
|
||||
return size;
|
||||
}
|
||||
|
||||
pr_err("Can't attach %s: cache set not found", buf);
|
||||
size = v;
|
||||
return v;
|
||||
}
|
||||
|
||||
if (attr == &sysfs_detach && dc->disk.c)
|
||||
|
|
|
@ -425,19 +425,28 @@ static int bch_writeback_thread(void *arg)
|
|||
|
||||
while (!kthread_should_stop()) {
|
||||
down_write(&dc->writeback_lock);
|
||||
if (!atomic_read(&dc->has_dirty) ||
|
||||
(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
|
||||
!dc->writeback_running)) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
/*
|
||||
* If the bache device is detaching, skip here and continue
|
||||
* to perform writeback. Otherwise, if no dirty data on cache,
|
||||
* or there is dirty data on cache but writeback is disabled,
|
||||
* the writeback thread should sleep here and wait for others
|
||||
* to wake up it.
|
||||
*/
|
||||
if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
|
||||
(!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
|
||||
up_write(&dc->writeback_lock);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
if (kthread_should_stop())
|
||||
if (kthread_should_stop()) {
|
||||
set_current_state(TASK_RUNNING);
|
||||
return 0;
|
||||
}
|
||||
|
||||
try_to_freeze();
|
||||
schedule();
|
||||
continue;
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
searched_full_index = refill_dirty(dc);
|
||||
|
||||
|
@ -447,6 +456,14 @@ static int bch_writeback_thread(void *arg)
|
|||
cached_dev_put(dc);
|
||||
SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
|
||||
bch_write_bdev_super(dc, NULL);
|
||||
/*
|
||||
* If bcache device is detaching via sysfs interface,
|
||||
* writeback thread should stop after there is no dirty
|
||||
* data on cache. BCACHE_DEV_DETACHING flag is set in
|
||||
* bch_cached_dev_detach().
|
||||
*/
|
||||
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
|
||||
break;
|
||||
}
|
||||
|
||||
up_write(&dc->writeback_lock);
|
||||
|
|
|
@ -1686,6 +1686,17 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
|
|||
struct md_rdev *repl =
|
||||
conf->mirrors[conf->raid_disks + number].rdev;
|
||||
freeze_array(conf, 0);
|
||||
if (atomic_read(&repl->nr_pending)) {
|
||||
/* It means that some queued IO of retry_list
|
||||
* hold repl. Thus, we cannot set replacement
|
||||
* as NULL, avoiding rdev NULL pointer
|
||||
* dereference in sync_request_write and
|
||||
* handle_write_finished.
|
||||
*/
|
||||
err = -EBUSY;
|
||||
unfreeze_array(conf);
|
||||
goto abort;
|
||||
}
|
||||
clear_bit(Replacement, &repl->flags);
|
||||
p->rdev = repl;
|
||||
conf->mirrors[conf->raid_disks + number].rdev = NULL;
|
||||
|
|
|
@ -2630,7 +2630,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
|
|||
for (m = 0; m < conf->copies; m++) {
|
||||
int dev = r10_bio->devs[m].devnum;
|
||||
rdev = conf->mirrors[dev].rdev;
|
||||
if (r10_bio->devs[m].bio == NULL)
|
||||
if (r10_bio->devs[m].bio == NULL ||
|
||||
r10_bio->devs[m].bio->bi_end_io == NULL)
|
||||
continue;
|
||||
if (!r10_bio->devs[m].bio->bi_error) {
|
||||
rdev_clear_badblocks(
|
||||
|
@ -2645,7 +2646,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
|
|||
md_error(conf->mddev, rdev);
|
||||
}
|
||||
rdev = conf->mirrors[dev].replacement;
|
||||
if (r10_bio->devs[m].repl_bio == NULL)
|
||||
if (r10_bio->devs[m].repl_bio == NULL ||
|
||||
r10_bio->devs[m].repl_bio->bi_end_io == NULL)
|
||||
continue;
|
||||
|
||||
if (!r10_bio->devs[m].repl_bio->bi_error) {
|
||||
|
|
|
@ -2028,15 +2028,16 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
|
|||
static int grow_stripes(struct r5conf *conf, int num)
|
||||
{
|
||||
struct kmem_cache *sc;
|
||||
size_t namelen = sizeof(conf->cache_name[0]);
|
||||
int devs = max(conf->raid_disks, conf->previous_raid_disks);
|
||||
|
||||
if (conf->mddev->gendisk)
|
||||
sprintf(conf->cache_name[0],
|
||||
snprintf(conf->cache_name[0], namelen,
|
||||
"raid%d-%s", conf->level, mdname(conf->mddev));
|
||||
else
|
||||
sprintf(conf->cache_name[0],
|
||||
snprintf(conf->cache_name[0], namelen,
|
||||
"raid%d-%p", conf->level, conf->mddev);
|
||||
sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
|
||||
snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
|
||||
|
||||
conf->active_name = 0;
|
||||
sc = kmem_cache_create(conf->cache_name[conf->active_name],
|
||||
|
|
|
@ -2124,6 +2124,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
|
|||
&dev->i2c_bus[2].i2c_adap,
|
||||
"cx25840", 0x88 >> 1, NULL);
|
||||
if (dev->sd_cx25840) {
|
||||
/* set host data for clk_freq configuration */
|
||||
v4l2_set_subdev_hostdata(dev->sd_cx25840,
|
||||
&dev->clk_freq);
|
||||
|
||||
dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE;
|
||||
v4l2_subdev_call(dev->sd_cx25840, core, load_fw);
|
||||
}
|
||||
|
|
|
@ -872,6 +872,16 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
|
|||
if (cx23885_boards[dev->board].clk_freq > 0)
|
||||
dev->clk_freq = cx23885_boards[dev->board].clk_freq;
|
||||
|
||||
if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
|
||||
dev->pci->subsystem_device == 0x7137) {
|
||||
/* Hauppauge ImpactVCBe device ID 0x7137 is populated
|
||||
* with an 888, and a 25Mhz crystal, instead of the
|
||||
* usual third overtone 50Mhz. The default clock rate must
|
||||
* be overridden so the cx25840 is properly configured
|
||||
*/
|
||||
dev->clk_freq = 25000000;
|
||||
}
|
||||
|
||||
dev->pci_bus = dev->pci->bus->number;
|
||||
dev->pci_slot = PCI_SLOT(dev->pci->devfn);
|
||||
cx23885_irq_add(dev, 0x001f00);
|
||||
|
|
|
@ -871,6 +871,10 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
|
|||
dev->nr = ++cx25821_devcount;
|
||||
sprintf(dev->name, "cx25821[%d]", dev->nr);
|
||||
|
||||
if (dev->nr >= ARRAY_SIZE(card)) {
|
||||
CX25821_INFO("dev->nr >= %zd", ARRAY_SIZE(card));
|
||||
return -ENODEV;
|
||||
}
|
||||
if (dev->pci->device != 0x8210) {
|
||||
pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
|
||||
__func__, dev->pci->device);
|
||||
|
@ -886,9 +890,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
|
|||
dev->channels[i].sram_channels = &cx25821_sram_channels[i];
|
||||
}
|
||||
|
||||
if (dev->nr > 1)
|
||||
CX25821_INFO("dev->nr > 1!");
|
||||
|
||||
/* board config */
|
||||
dev->board = 1; /* card[dev->nr]; */
|
||||
dev->_max_num_decoders = MAX_DECODERS;
|
||||
|
|
|
@ -1268,16 +1268,17 @@ static void __camif_subdev_try_format(struct camif_dev *camif,
|
|||
{
|
||||
const struct s3c_camif_variant *variant = camif->variant;
|
||||
const struct vp_pix_limits *pix_lim;
|
||||
int i = ARRAY_SIZE(camif_mbus_formats);
|
||||
unsigned int i;
|
||||
|
||||
/* FIXME: constraints against codec or preview path ? */
|
||||
pix_lim = &variant->vp_pix_limits[VP_CODEC];
|
||||
|
||||
while (i-- >= 0)
|
||||
for (i = 0; i < ARRAY_SIZE(camif_mbus_formats); i++)
|
||||
if (camif_mbus_formats[i] == mf->code)
|
||||
break;
|
||||
|
||||
mf->code = camif_mbus_formats[i];
|
||||
if (i == ARRAY_SIZE(camif_mbus_formats))
|
||||
mf->code = camif_mbus_formats[0];
|
||||
|
||||
if (pad == CAMIF_SD_PAD_SINK) {
|
||||
v4l_bound_align_image(&mf->width, 8, CAMIF_MAX_PIX_WIDTH,
|
||||
|
|
|
@ -187,7 +187,7 @@
|
|||
USB 2.0 spec says bulk packet size is always 512 bytes
|
||||
*/
|
||||
#define EM28XX_BULK_PACKET_MULTIPLIER 384
|
||||
#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 384
|
||||
#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 94
|
||||
|
||||
#define EM28XX_INTERLACED_DEFAULT 1
|
||||
|
||||
|
|
|
@ -2698,6 +2698,8 @@ mptctl_hp_targetinfo(unsigned long arg)
|
|||
__FILE__, __LINE__, iocnum);
|
||||
return -ENODEV;
|
||||
}
|
||||
if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
|
||||
return -EINVAL;
|
||||
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
|
||||
ioc->name));
|
||||
|
||||
|
|
|
@ -32,6 +32,8 @@ struct sdhci_iproc_host {
|
|||
const struct sdhci_iproc_data *data;
|
||||
u32 shadow_cmd;
|
||||
u32 shadow_blk;
|
||||
bool is_cmd_shadowed;
|
||||
bool is_blk_shadowed;
|
||||
};
|
||||
|
||||
#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
|
||||
|
@ -47,8 +49,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg)
|
|||
|
||||
static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
|
||||
{
|
||||
u32 val = sdhci_iproc_readl(host, (reg & ~3));
|
||||
u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
|
||||
u32 val;
|
||||
u16 word;
|
||||
|
||||
if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
|
||||
/* Get the saved transfer mode */
|
||||
val = iproc_host->shadow_cmd;
|
||||
} else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
|
||||
iproc_host->is_blk_shadowed) {
|
||||
/* Get the saved block info */
|
||||
val = iproc_host->shadow_blk;
|
||||
} else {
|
||||
val = sdhci_iproc_readl(host, (reg & ~3));
|
||||
}
|
||||
word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
|
||||
return word;
|
||||
}
|
||||
|
||||
|
@ -104,13 +120,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
|
|||
|
||||
if (reg == SDHCI_COMMAND) {
|
||||
/* Write the block now as we are issuing a command */
|
||||
if (iproc_host->shadow_blk != 0) {
|
||||
if (iproc_host->is_blk_shadowed) {
|
||||
sdhci_iproc_writel(host, iproc_host->shadow_blk,
|
||||
SDHCI_BLOCK_SIZE);
|
||||
iproc_host->shadow_blk = 0;
|
||||
iproc_host->is_blk_shadowed = false;
|
||||
}
|
||||
oldval = iproc_host->shadow_cmd;
|
||||
} else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
|
||||
iproc_host->is_cmd_shadowed = false;
|
||||
} else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
|
||||
iproc_host->is_blk_shadowed) {
|
||||
/* Block size and count are stored in shadow reg */
|
||||
oldval = iproc_host->shadow_blk;
|
||||
} else {
|
||||
|
@ -122,9 +140,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
|
|||
if (reg == SDHCI_TRANSFER_MODE) {
|
||||
/* Save the transfer mode until the command is issued */
|
||||
iproc_host->shadow_cmd = newval;
|
||||
iproc_host->is_cmd_shadowed = true;
|
||||
} else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
|
||||
/* Save the block info until the command is issued */
|
||||
iproc_host->shadow_blk = newval;
|
||||
iproc_host->is_blk_shadowed = true;
|
||||
} else {
|
||||
/* Command or other regular 32-bit write */
|
||||
sdhci_iproc_writel(host, newval, reg & ~3);
|
||||
|
|
|
@ -531,7 +531,8 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
|
|||
int i;
|
||||
|
||||
for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
|
||||
int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
|
||||
u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1);
|
||||
unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN;
|
||||
|
||||
slot = &ring->slots[i];
|
||||
dev_kfree_skb(slot->skb);
|
||||
|
|
|
@ -2925,6 +2925,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
|
|||
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
|
||||
struct hwrm_vnic_tpa_cfg_input req = {0};
|
||||
|
||||
if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
|
||||
return 0;
|
||||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
|
||||
|
||||
if (tpa_flags) {
|
||||
|
|
|
@ -1726,6 +1726,8 @@ static int enic_open(struct net_device *netdev)
|
|||
}
|
||||
|
||||
for (i = 0; i < enic->rq_count; i++) {
|
||||
/* enable rq before updating rq desc */
|
||||
vnic_rq_enable(&enic->rq[i]);
|
||||
vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
|
||||
/* Need at least one buffer on ring to get going */
|
||||
if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
|
||||
|
@ -1737,8 +1739,6 @@ static int enic_open(struct net_device *netdev)
|
|||
|
||||
for (i = 0; i < enic->wq_count; i++)
|
||||
vnic_wq_enable(&enic->wq[i]);
|
||||
for (i = 0; i < enic->rq_count; i++)
|
||||
vnic_rq_enable(&enic->rq[i]);
|
||||
|
||||
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
|
||||
enic_dev_add_station_addr(enic);
|
||||
|
@ -1765,8 +1765,12 @@ static int enic_open(struct net_device *netdev)
|
|||
return 0;
|
||||
|
||||
err_out_free_rq:
|
||||
for (i = 0; i < enic->rq_count; i++)
|
||||
for (i = 0; i < enic->rq_count; i++) {
|
||||
err = vnic_rq_disable(&enic->rq[i]);
|
||||
if (err)
|
||||
return err;
|
||||
vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
|
||||
}
|
||||
enic_dev_notify_unset(enic);
|
||||
err_out_free_intr:
|
||||
enic_unset_affinity_hint(enic);
|
||||
|
|
|
@ -3053,9 +3053,6 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
|
|||
if (ndev->features & NETIF_F_RXCSUM)
|
||||
gfar_rx_checksum(skb, fcb);
|
||||
|
||||
/* Tell the skb what kind of packet this is */
|
||||
skb->protocol = eth_type_trans(skb, ndev);
|
||||
|
||||
/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
|
||||
* Even if vlan rx accel is disabled, on some chips
|
||||
* RXFCB_VLN is pseudo randomly set.
|
||||
|
@ -3126,13 +3123,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
|
|||
continue;
|
||||
}
|
||||
|
||||
gfar_process_frame(ndev, skb);
|
||||
|
||||
/* Increment the number of packets */
|
||||
total_pkts++;
|
||||
total_bytes += skb->len;
|
||||
|
||||
skb_record_rx_queue(skb, rx_queue->qindex);
|
||||
|
||||
gfar_process_frame(ndev, skb);
|
||||
skb->protocol = eth_type_trans(skb, ndev);
|
||||
|
||||
/* Send the packet up the stack */
|
||||
napi_gro_receive(&rx_queue->grp->napi_rx, skb);
|
||||
|
|
|
@ -1574,7 +1574,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
|
|||
* we have already determined whether we have link or not.
|
||||
*/
|
||||
if (!mac->autoneg)
|
||||
return -E1000_ERR_CONFIG;
|
||||
return 1;
|
||||
|
||||
/* Auto-Neg is enabled. Auto Speed Detection takes care
|
||||
* of MAC speed/duplex configuration. So we only need to
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue