Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
Conflicts:
in fs/proc/task_mmu.c:
looks like vma_get_anon_name() want have a name for anonymous
vma when there is no name used in vma. commit: 586278d78b
The name show is after any other names, so it maybe covered.
but anyway, it just a show here.
This commit is contained in:
commit
5f87c475f8
442 changed files with 5375 additions and 2925 deletions
|
@ -347,7 +347,7 @@ address perms offset dev inode pathname
|
|||
a7cb1000-a7cb2000 ---p 00000000 00:00 0
|
||||
a7cb2000-a7eb2000 rw-p 00000000 00:00 0
|
||||
a7eb2000-a7eb3000 ---p 00000000 00:00 0
|
||||
a7eb3000-a7ed5000 rw-p 00000000 00:00 0 [stack:1001]
|
||||
a7eb3000-a7ed5000 rw-p 00000000 00:00 0
|
||||
a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6
|
||||
a8008000-a800a000 r--p 00133000 03:00 4222 /lib/libc.so.6
|
||||
a800a000-a800b000 rw-p 00135000 03:00 4222 /lib/libc.so.6
|
||||
|
@ -379,7 +379,6 @@ is not associated with a file:
|
|||
|
||||
[heap] = the heap of the program
|
||||
[stack] = the stack of the main process
|
||||
[stack:1001] = the stack of the thread with tid 1001
|
||||
[vdso] = the "virtual dynamic shared object",
|
||||
the kernel system call handler
|
||||
[anon:<name>] = an anonymous mapping that has been
|
||||
|
@ -389,10 +388,8 @@ is not associated with a file:
|
|||
|
||||
The /proc/PID/task/TID/maps is a view of the virtual memory from the viewpoint
|
||||
of the individual tasks of a process. In this file you will see a mapping marked
|
||||
as [stack] if that task sees it as a stack. This is a key difference from the
|
||||
content of /proc/PID/maps, where you will see all mappings that are being used
|
||||
as stack by all of those tasks. Hence, for the example above, the task-level
|
||||
map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
|
||||
as [stack] if that task sees it as a stack. Hence, for the example above, the
|
||||
task-level map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
|
||||
|
||||
08048000-08049000 r-xp 00000000 03:00 8312 /opt/test
|
||||
08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test
|
||||
|
|
|
@ -271,3 +271,9 @@ Since the private key is used to sign modules, viruses and malware could use
|
|||
the private key to sign modules and compromise the operating system. The
|
||||
private key must be either destroyed or moved to a secure location and not kept
|
||||
in the root node of the kernel source tree.
|
||||
|
||||
If you use the same private key to sign modules for multiple kernel
|
||||
configurations, you must ensure that the module version information is
|
||||
sufficient to prevent loading a module into a different kernel. Either
|
||||
set CONFIG_MODVERSIONS=y or ensure that each configuration has a different
|
||||
kernel release string by changing EXTRAVERSION or CONFIG_LOCALVERSION.
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 18
|
||||
SUBLEVEL = 21
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -18,6 +18,20 @@ cflags-y += -fno-common -pipe -fno-builtin -D__linux__
|
|||
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
|
||||
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
|
||||
|
||||
is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
|
||||
|
||||
ifdef CONFIG_ISA_ARCOMPACT
|
||||
ifeq ($(is_700), 0)
|
||||
$(error Toolchain not configured for ARCompact builds)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_ISA_ARCV2
|
||||
ifeq ($(is_700), 1)
|
||||
$(error Toolchain not configured for ARCv2 builds)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_ARC_CURR_IN_REG
|
||||
# For a global register defintion, make sure it gets passed to every file
|
||||
# We had a customer reported bug where some code built in kernel was NOT using
|
||||
|
|
|
@ -374,12 +374,6 @@ static inline int is_isa_arcompact(void)
|
|||
return IS_ENABLED(CONFIG_ISA_ARCOMPACT);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_ISA_ARCOMPACT) && !defined(_CPU_DEFAULT_A7)
|
||||
#error "Toolchain not configured for ARCompact builds"
|
||||
#elif defined(CONFIG_ISA_ARCV2) && !defined(_CPU_DEFAULT_HS)
|
||||
#error "Toolchain not configured for ARCv2 builds"
|
||||
#endif
|
||||
|
||||
#endif /* __ASEMBLY__ */
|
||||
|
||||
#endif /* _ASM_ARC_ARCREGS_H */
|
||||
|
|
|
@ -142,7 +142,7 @@
|
|||
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
; Retrieve orig r25 and save it with rest of callee_regs
|
||||
ld.as r12, [r12, PT_user_r25]
|
||||
ld r12, [r12, PT_user_r25]
|
||||
PUSH r12
|
||||
#else
|
||||
PUSH r25
|
||||
|
@ -198,7 +198,7 @@
|
|||
|
||||
; SP is back to start of pt_regs
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
st.as r12, [sp, PT_user_r25]
|
||||
st r12, [sp, PT_user_r25]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
|
|
@ -188,10 +188,10 @@ static inline int arch_irqs_disabled(void)
|
|||
.endm
|
||||
|
||||
.macro IRQ_ENABLE scratch
|
||||
TRACE_ASM_IRQ_ENABLE
|
||||
lr \scratch, [status32]
|
||||
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
|
||||
flag \scratch
|
||||
TRACE_ASM_IRQ_ENABLE
|
||||
.endm
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -110,7 +110,7 @@
|
|||
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
|
||||
|
||||
/* Set of bits not changed in pte_modify */
|
||||
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
|
||||
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
|
||||
|
||||
/* More Abbrevaited helpers */
|
||||
#define PAGE_U_NONE __pgprot(___DEF)
|
||||
|
|
|
@ -914,6 +914,15 @@ void arc_cache_init(void)
|
|||
|
||||
printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
|
||||
|
||||
/*
|
||||
* Only master CPU needs to execute rest of function:
|
||||
* - Assume SMP so all cores will have same cache config so
|
||||
* any geomtry checks will be same for all
|
||||
* - IOC setup / dma callbacks only need to be setup once
|
||||
*/
|
||||
if (cpu)
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
|
||||
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
|
||||
|
||||
|
|
|
@ -84,6 +84,7 @@
|
|||
regulator-name = "emac-3v3";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
startup-delay-us = <20000>;
|
||||
enable-active-high;
|
||||
gpio = <&pio 7 15 GPIO_ACTIVE_HIGH>;
|
||||
};
|
||||
|
|
|
@ -66,6 +66,7 @@
|
|||
regulator-name = "emac-3v3";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
startup-delay-us = <20000>;
|
||||
enable-active-high;
|
||||
gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>;
|
||||
};
|
||||
|
|
|
@ -80,6 +80,7 @@
|
|||
regulator-name = "emac-3v3";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
startup-delay-us = <20000>;
|
||||
enable-active-high;
|
||||
gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>; /* PH19 */
|
||||
};
|
||||
|
|
|
@ -79,6 +79,7 @@
|
|||
regulator-name = "emac-3v3";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
startup-delay-us = <20000>;
|
||||
enable-active-high;
|
||||
gpio = <&pio 0 2 GPIO_ACTIVE_HIGH>;
|
||||
};
|
||||
|
|
|
@ -429,6 +429,15 @@ config CAVIUM_ERRATUM_22375
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_ERRATUM_23144
|
||||
bool "Cavium erratum 23144: ITS SYNC hang on dual socket system"
|
||||
depends on NUMA
|
||||
default y
|
||||
help
|
||||
ITS SYNC command hang for cross node io and collections/cpu mapping.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_ERRATUM_23154
|
||||
bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
|
||||
default y
|
||||
|
@ -439,6 +448,17 @@ config CAVIUM_ERRATUM_23154
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_ERRATUM_27456
|
||||
bool "Cavium erratum 27456: Broadcast TLBI instructions may cause icache corruption"
|
||||
default y
|
||||
help
|
||||
On ThunderX T88 pass 1.x through 2.1 parts, broadcast TLBI
|
||||
instructions may cause the icache to become corrupted if it
|
||||
contains data for a non-current ASID. The fix is to
|
||||
invalidate the icache when changing the mm context.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
endmenu
|
||||
|
||||
|
||||
|
|
|
@ -262,6 +262,8 @@
|
|||
#io-channel-cells = <1>;
|
||||
clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
|
||||
clock-names = "saradc", "apb_pclk";
|
||||
resets = <&cru SRST_SARADC>;
|
||||
reset-names = "saradc-apb";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
@ -517,7 +519,7 @@
|
|||
#address-cells = <0>;
|
||||
|
||||
reg = <0x0 0xffb71000 0x0 0x1000>,
|
||||
<0x0 0xffb72000 0x0 0x1000>,
|
||||
<0x0 0xffb72000 0x0 0x2000>,
|
||||
<0x0 0xffb74000 0x0 0x2000>,
|
||||
<0x0 0xffb76000 0x0 0x2000>;
|
||||
interrupts = <GIC_PPI 9
|
||||
|
|
|
@ -103,6 +103,7 @@ static inline u64 gic_read_iar_common(void)
|
|||
u64 irqstat;
|
||||
|
||||
asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
|
||||
dsb(sy);
|
||||
return irqstat;
|
||||
}
|
||||
|
||||
|
|
|
@ -35,6 +35,8 @@
|
|||
#define ARM64_ALT_PAN_NOT_UAO 10
|
||||
|
||||
#define ARM64_NCAPS 11
|
||||
#define ARM64_WORKAROUND_CAVIUM_27456 12
|
||||
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
|
|
@ -140,6 +140,7 @@ typedef struct user_fpsimd_state elf_fpregset_t;
|
|||
|
||||
#define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT);
|
||||
|
||||
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
|
||||
#define ARCH_DLINFO \
|
||||
do { \
|
||||
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
|
||||
|
|
|
@ -107,8 +107,6 @@
|
|||
#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
|
||||
TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
|
||||
|
||||
#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B)
|
||||
|
||||
/* VTCR_EL2 Registers bits */
|
||||
#define VTCR_EL2_RES1 (1 << 31)
|
||||
#define VTCR_EL2_PS_MASK (7 << 16)
|
||||
|
|
|
@ -117,6 +117,8 @@ struct pt_regs {
|
|||
};
|
||||
u64 orig_x0;
|
||||
u64 syscallno;
|
||||
u64 orig_addr_limit;
|
||||
u64 unused; // maintain 16 byte alignment
|
||||
};
|
||||
|
||||
#define arch_has_single_step() (1)
|
||||
|
|
|
@ -19,4 +19,6 @@
|
|||
/* vDSO location */
|
||||
#define AT_SYSINFO_EHDR 33
|
||||
|
||||
#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -58,6 +58,7 @@ int main(void)
|
|||
DEFINE(S_PC, offsetof(struct pt_regs, pc));
|
||||
DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
|
||||
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
|
||||
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
|
||||
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
|
||||
BLANK();
|
||||
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
|
||||
|
|
|
@ -87,6 +87,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
.capability = ARM64_WORKAROUND_CAVIUM_23154,
|
||||
MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_CAVIUM_ERRATUM_27456
|
||||
{
|
||||
/* Cavium ThunderX, T88 pass 1.x - 2.1 */
|
||||
.desc = "Cavium erratum 27456",
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_27456,
|
||||
MIDR_RANGE(MIDR_THUNDERX, 0x00,
|
||||
(1 << MIDR_VARIANT_SHIFT) | 1),
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
|
|
@ -152,7 +152,6 @@ static int debug_monitors_init(void)
|
|||
/* Clear the OS lock. */
|
||||
on_each_cpu(clear_os_lock, NULL, 1);
|
||||
isb();
|
||||
local_dbg_enable();
|
||||
|
||||
/* Register hotplug handler. */
|
||||
__register_cpu_notifier(&os_lock_nb);
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <asm/errno.h>
|
||||
#include <asm/esr.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
|
@ -97,7 +98,13 @@
|
|||
mov x29, xzr // fp pointed to user-space
|
||||
.else
|
||||
add x21, sp, #S_FRAME_SIZE
|
||||
.endif
|
||||
get_thread_info tsk
|
||||
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
|
||||
ldr x20, [tsk, #TI_ADDR_LIMIT]
|
||||
str x20, [sp, #S_ORIG_ADDR_LIMIT]
|
||||
mov x20, #TASK_SIZE_64
|
||||
str x20, [tsk, #TI_ADDR_LIMIT]
|
||||
.endif /* \el == 0 */
|
||||
mrs x22, elr_el1
|
||||
mrs x23, spsr_el1
|
||||
stp lr, x21, [sp, #S_LR]
|
||||
|
@ -128,6 +135,12 @@
|
|||
.endm
|
||||
|
||||
.macro kernel_exit, el
|
||||
.if \el != 0
|
||||
/* Restore the task's original addr_limit. */
|
||||
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
|
||||
str x20, [tsk, #TI_ADDR_LIMIT]
|
||||
.endif
|
||||
|
||||
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
|
||||
.if \el == 0
|
||||
ct_user_enter
|
||||
|
|
|
@ -187,7 +187,6 @@ asmlinkage void secondary_start_kernel(void)
|
|||
set_cpu_online(cpu, true);
|
||||
complete(&cpu_running);
|
||||
|
||||
local_dbg_enable();
|
||||
local_irq_enable();
|
||||
local_async_enable();
|
||||
|
||||
|
@ -333,8 +332,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
|||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
cpuinfo_store_boot_cpu();
|
||||
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
|
||||
cpuinfo_store_boot_cpu();
|
||||
}
|
||||
|
||||
static u64 __init of_get_cpu_mpidr(struct device_node *dn)
|
||||
|
|
|
@ -64,7 +64,7 @@ __do_hyp_init:
|
|||
mrs x4, tcr_el1
|
||||
ldr x5, =TCR_EL2_MASK
|
||||
and x4, x4, x5
|
||||
ldr x5, =TCR_EL2_FLAGS
|
||||
mov x5, #TCR_EL2_RES1
|
||||
orr x4, x4, x5
|
||||
|
||||
#ifndef CONFIG_ARM64_VA_BITS_48
|
||||
|
@ -85,15 +85,18 @@ __do_hyp_init:
|
|||
ldr_l x5, idmap_t0sz
|
||||
bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
|
||||
#endif
|
||||
msr tcr_el2, x4
|
||||
|
||||
ldr x4, =VTCR_EL2_FLAGS
|
||||
/*
|
||||
* Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
|
||||
* VTCR_EL2.
|
||||
* TCR_EL2 and VTCR_EL2.
|
||||
*/
|
||||
mrs x5, ID_AA64MMFR0_EL1
|
||||
bfi x4, x5, #16, #3
|
||||
|
||||
msr tcr_el2, x4
|
||||
|
||||
ldr x4, =VTCR_EL2_FLAGS
|
||||
bfi x4, x5, #16, #3
|
||||
|
||||
msr vtcr_el2, x4
|
||||
|
||||
mrs x4, mair_el1
|
||||
|
|
|
@ -748,9 +748,9 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
|
|||
/*
|
||||
* Check whether the physical FDT address is set and meets the minimum
|
||||
* alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
|
||||
* at least 8 bytes so that we can always access the size field of the
|
||||
* FDT header after mapping the first chunk, double check here if that
|
||||
* is indeed the case.
|
||||
* at least 8 bytes so that we can always access the magic and size
|
||||
* fields of the FDT header after mapping the first chunk, double check
|
||||
* here if that is indeed the case.
|
||||
*/
|
||||
BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
|
||||
if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
|
||||
|
@ -778,7 +778,7 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
|
|||
create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
|
||||
dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
|
||||
|
||||
if (fdt_check_header(dt_virt) != 0)
|
||||
if (fdt_magic(dt_virt) != FDT_MAGIC)
|
||||
return NULL;
|
||||
|
||||
*size = fdt_totalsize(dt_virt);
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
#include <asm/hwcap.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/alternative.h>
|
||||
|
||||
#include "proc-macros.S"
|
||||
|
||||
|
@ -137,7 +139,17 @@ ENTRY(cpu_do_switch_mm)
|
|||
bfi x0, x1, #48, #16 // set the ASID
|
||||
msr ttbr0_el1, x0 // set TTBR0
|
||||
isb
|
||||
alternative_if_not ARM64_WORKAROUND_CAVIUM_27456
|
||||
ret
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
alternative_else
|
||||
ic iallu
|
||||
dsb nsh
|
||||
isb
|
||||
ret
|
||||
alternative_endif
|
||||
ENDPROC(cpu_do_switch_mm)
|
||||
|
||||
.pushsection ".idmap.text", "ax"
|
||||
|
@ -182,6 +194,8 @@ ENTRY(__cpu_setup)
|
|||
msr cpacr_el1, x0 // Enable FP/ASIMD
|
||||
mov x0, #1 << 12 // Reset mdscr_el1 and disable
|
||||
msr mdscr_el1, x0 // access to the DCC from EL0
|
||||
isb // Unmask debug exceptions now,
|
||||
enable_dbg // since this is per-cpu
|
||||
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
|
||||
/*
|
||||
* Memory region attributes for LPAE:
|
||||
|
|
|
@ -61,7 +61,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|||
" CMPT %0, #HI(0x02000000)\n" \
|
||||
" BNZ 1b\n" \
|
||||
: "=&d" (temp), "=&da" (result) \
|
||||
: "da" (&v->counter), "bd" (i) \
|
||||
: "da" (&v->counter), "br" (i) \
|
||||
: "cc"); \
|
||||
\
|
||||
smp_mb(); \
|
||||
|
|
|
@ -73,7 +73,7 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
|
|||
" DCACHE [%2], %0\n"
|
||||
#endif
|
||||
"2:\n"
|
||||
: "=&d" (temp), "=&da" (retval)
|
||||
: "=&d" (temp), "=&d" (retval)
|
||||
: "da" (m), "bd" (old), "da" (new)
|
||||
: "cc"
|
||||
);
|
||||
|
|
|
@ -23,7 +23,7 @@ static struct clocksource clocksource_mips = {
|
|||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
static u64 notrace r4k_read_sched_clock(void)
|
||||
static u64 __maybe_unused notrace r4k_read_sched_clock(void)
|
||||
{
|
||||
return read_c0_count();
|
||||
}
|
||||
|
@ -82,7 +82,9 @@ int __init init_r4k_clocksource(void)
|
|||
|
||||
clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
|
||||
|
||||
#ifndef CONFIG_CPU_FREQ
|
||||
sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1629,8 +1629,14 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
|
|||
|
||||
preempt_disable();
|
||||
if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
|
||||
if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
|
||||
kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
|
||||
if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
|
||||
kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
|
||||
kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
|
||||
__func__, va, vcpu, read_c0_entryhi());
|
||||
er = EMULATE_FAIL;
|
||||
preempt_enable();
|
||||
goto done;
|
||||
}
|
||||
} else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
|
||||
KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
|
||||
int index;
|
||||
|
@ -1665,14 +1671,19 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
|
|||
run, vcpu);
|
||||
preempt_enable();
|
||||
goto dont_update_pc;
|
||||
} else {
|
||||
/*
|
||||
* We fault an entry from the guest tlb to the
|
||||
* shadow host TLB
|
||||
*/
|
||||
kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
|
||||
NULL,
|
||||
NULL);
|
||||
}
|
||||
/*
|
||||
* We fault an entry from the guest tlb to the
|
||||
* shadow host TLB
|
||||
*/
|
||||
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
|
||||
NULL, NULL)) {
|
||||
kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
|
||||
__func__, va, index, vcpu,
|
||||
read_c0_entryhi());
|
||||
er = EMULATE_FAIL;
|
||||
preempt_enable();
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -2633,8 +2644,13 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
|
|||
* OK we have a Guest TLB entry, now inject it into the
|
||||
* shadow host TLB
|
||||
*/
|
||||
kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
|
||||
NULL);
|
||||
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
|
||||
NULL, NULL)) {
|
||||
kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
|
||||
__func__, va, index, vcpu,
|
||||
read_c0_entryhi());
|
||||
er = EMULATE_FAIL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -276,7 +276,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
|
|||
}
|
||||
|
||||
gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
|
||||
if (gfn >= kvm->arch.guest_pmap_npages) {
|
||||
if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
|
||||
kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
|
||||
gfn, badvaddr);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
|
@ -361,25 +361,39 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
|
|||
unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
pfn_t pfn0, pfn1;
|
||||
gfn_t gfn0, gfn1;
|
||||
long tlb_lo[2];
|
||||
|
||||
if ((tlb->tlb_hi & VPN2_MASK) == 0) {
|
||||
pfn0 = 0;
|
||||
pfn1 = 0;
|
||||
} else {
|
||||
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
|
||||
>> PAGE_SHIFT) < 0)
|
||||
return -1;
|
||||
tlb_lo[0] = tlb->tlb_lo0;
|
||||
tlb_lo[1] = tlb->tlb_lo1;
|
||||
|
||||
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
|
||||
>> PAGE_SHIFT) < 0)
|
||||
return -1;
|
||||
/*
|
||||
* The commpage address must not be mapped to anything else if the guest
|
||||
* TLB contains entries nearby, or commpage accesses will break.
|
||||
*/
|
||||
if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
|
||||
VPN2_MASK & (PAGE_MASK << 1)))
|
||||
tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
|
||||
|
||||
pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
|
||||
>> PAGE_SHIFT];
|
||||
pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
|
||||
>> PAGE_SHIFT];
|
||||
gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
|
||||
gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
|
||||
if (gfn0 >= kvm->arch.guest_pmap_npages ||
|
||||
gfn1 >= kvm->arch.guest_pmap_npages) {
|
||||
kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
|
||||
__func__, gfn0, gfn1, tlb->tlb_hi);
|
||||
kvm_mips_dump_guest_tlbs(vcpu);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (kvm_mips_map_page(kvm, gfn0) < 0)
|
||||
return -1;
|
||||
|
||||
if (kvm_mips_map_page(kvm, gfn1) < 0)
|
||||
return -1;
|
||||
|
||||
pfn0 = kvm->arch.guest_pmap[gfn0];
|
||||
pfn1 = kvm->arch.guest_pmap[gfn1];
|
||||
|
||||
if (hpa0)
|
||||
*hpa0 = pfn0 << PAGE_SHIFT;
|
||||
|
||||
|
@ -391,9 +405,9 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
|
|||
kvm_mips_get_kernel_asid(vcpu) :
|
||||
kvm_mips_get_user_asid(vcpu));
|
||||
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
|
||||
(tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
|
||||
(tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V);
|
||||
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
|
||||
(tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
|
||||
(tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V);
|
||||
|
||||
kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
|
||||
tlb->tlb_lo0, tlb->tlb_lo1);
|
||||
|
@ -794,10 +808,16 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
|
|||
local_irq_restore(flags);
|
||||
return KVM_INVALID_INST;
|
||||
}
|
||||
kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
|
||||
&vcpu->arch.
|
||||
guest_tlb[index],
|
||||
NULL, NULL);
|
||||
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
|
||||
&vcpu->arch.guest_tlb[index],
|
||||
NULL, NULL)) {
|
||||
kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
|
||||
__func__, opc, index, vcpu,
|
||||
read_c0_entryhi());
|
||||
kvm_mips_dump_guest_tlbs(vcpu);
|
||||
local_irq_restore(flags);
|
||||
return KVM_INVALID_INST;
|
||||
}
|
||||
inst = *(opc);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
#define SMBUS_PCI_REG64 0x64
|
||||
#define SMBUS_PCI_REGB4 0xb4
|
||||
|
||||
#define HPET_MIN_CYCLES 64
|
||||
#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
|
||||
#define HPET_MIN_CYCLES 16
|
||||
#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES * 12)
|
||||
|
||||
static DEFINE_SPINLOCK(hpet_lock);
|
||||
DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
|
||||
|
@ -157,14 +157,14 @@ static int hpet_tick_resume(struct clock_event_device *evt)
|
|||
static int hpet_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
unsigned int cnt;
|
||||
int res;
|
||||
u32 cnt;
|
||||
s32 res;
|
||||
|
||||
cnt = hpet_read(HPET_COUNTER);
|
||||
cnt += delta;
|
||||
cnt += (u32) delta;
|
||||
hpet_write(HPET_T0_CMP, cnt);
|
||||
|
||||
res = (int)(cnt - hpet_read(HPET_COUNTER));
|
||||
res = (s32)(cnt - hpet_read(HPET_COUNTER));
|
||||
|
||||
return res < HPET_MIN_CYCLES ? -ETIME : 0;
|
||||
}
|
||||
|
@ -230,7 +230,7 @@ void __init setup_hpet_timer(void)
|
|||
|
||||
cd = &per_cpu(hpet_clockevent_device, cpu);
|
||||
cd->name = "hpet";
|
||||
cd->rating = 320;
|
||||
cd->rating = 100;
|
||||
cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
|
||||
cd->set_state_shutdown = hpet_set_state_shutdown;
|
||||
cd->set_state_periodic = hpet_set_state_periodic;
|
||||
|
|
|
@ -65,7 +65,7 @@ static struct insn insn_table[] = {
|
|||
#ifndef CONFIG_CPU_MIPSR6
|
||||
{ insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
#else
|
||||
{ insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
|
||||
{ insn_cache, M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
|
||||
#endif
|
||||
{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
|
||||
|
|
|
@ -97,10 +97,10 @@
|
|||
#define ENOTCONN 235 /* Transport endpoint is not connected */
|
||||
#define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */
|
||||
#define ETOOMANYREFS 237 /* Too many references: cannot splice */
|
||||
#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
|
||||
#define ETIMEDOUT 238 /* Connection timed out */
|
||||
#define ECONNREFUSED 239 /* Connection refused */
|
||||
#define EREMOTERELEASE 240 /* Remote peer released connection */
|
||||
#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
|
||||
#define EREMOTERELEASE 240 /* Remote peer released connection */
|
||||
#define EHOSTDOWN 241 /* Host is down */
|
||||
#define EHOSTUNREACH 242 /* No route to host */
|
||||
|
||||
|
|
|
@ -164,6 +164,7 @@ struct coprocessor_request_block {
|
|||
#define ICSWX_INITIATED (0x8)
|
||||
#define ICSWX_BUSY (0x4)
|
||||
#define ICSWX_REJECTED (0x2)
|
||||
#define ICSWX_XERS0 (0x1) /* undefined or set from XERSO. */
|
||||
|
||||
static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb)
|
||||
{
|
||||
|
|
|
@ -677,7 +677,7 @@ int eeh_pci_enable(struct eeh_pe *pe, int function)
|
|||
/* Check if the request is finished successfully */
|
||||
if (active_flag) {
|
||||
rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
|
||||
if (rc <= 0)
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
if (rc & active_flag)
|
||||
|
|
|
@ -110,17 +110,11 @@ _GLOBAL(tm_reclaim)
|
|||
std r3, STK_PARAM(R3)(r1)
|
||||
SAVE_NVGPRS(r1)
|
||||
|
||||
/* We need to setup MSR for VSX register save instructions. Here we
|
||||
* also clear the MSR RI since when we do the treclaim, we won't have a
|
||||
* valid kernel pointer for a while. We clear RI here as it avoids
|
||||
* adding another mtmsr closer to the treclaim. This makes the region
|
||||
* maked as non-recoverable wider than it needs to be but it saves on
|
||||
* inserting another mtmsrd later.
|
||||
*/
|
||||
/* We need to setup MSR for VSX register save instructions. */
|
||||
mfmsr r14
|
||||
mr r15, r14
|
||||
ori r15, r15, MSR_FP
|
||||
li r16, MSR_RI
|
||||
li r16, 0
|
||||
ori r16, r16, MSR_EE /* IRQs hard off */
|
||||
andc r15, r15, r16
|
||||
oris r15, r15, MSR_VEC@h
|
||||
|
@ -176,7 +170,17 @@ dont_backup_fp:
|
|||
1: tdeqi r6, 0
|
||||
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
|
||||
|
||||
/* The moment we treclaim, ALL of our GPRs will switch
|
||||
/* Clear MSR RI since we are about to change r1, EE is already off. */
|
||||
li r4, 0
|
||||
mtmsrd r4, 1
|
||||
|
||||
/*
|
||||
* BE CAREFUL HERE:
|
||||
* At this point we can't take an SLB miss since we have MSR_RI
|
||||
* off. Load only to/from the stack/paca which are in SLB bolted regions
|
||||
* until we turn MSR RI back on.
|
||||
*
|
||||
* The moment we treclaim, ALL of our GPRs will switch
|
||||
* to user register state. (FPRs, CCR etc. also!)
|
||||
* Use an sprg and a tm_scratch in the PACA to shuffle.
|
||||
*/
|
||||
|
@ -197,6 +201,11 @@ dont_backup_fp:
|
|||
|
||||
/* Store the PPR in r11 and reset to decent value */
|
||||
std r11, GPR11(r1) /* Temporary stash */
|
||||
|
||||
/* Reset MSR RI so we can take SLB faults again */
|
||||
li r11, MSR_RI
|
||||
mtmsrd r11, 1
|
||||
|
||||
mfspr r11, SPRN_PPR
|
||||
HMT_MEDIUM
|
||||
|
||||
|
@ -397,11 +406,6 @@ restore_gprs:
|
|||
ld r5, THREAD_TM_DSCR(r3)
|
||||
ld r6, THREAD_TM_PPR(r3)
|
||||
|
||||
/* Clear the MSR RI since we are about to change R1. EE is already off
|
||||
*/
|
||||
li r4, 0
|
||||
mtmsrd r4, 1
|
||||
|
||||
REST_GPR(0, r7) /* GPR0 */
|
||||
REST_2GPRS(2, r7) /* GPR2-3 */
|
||||
REST_GPR(4, r7) /* GPR4 */
|
||||
|
@ -439,10 +443,33 @@ restore_gprs:
|
|||
ld r6, _CCR(r7)
|
||||
mtcr r6
|
||||
|
||||
REST_GPR(1, r7) /* GPR1 */
|
||||
REST_GPR(5, r7) /* GPR5-7 */
|
||||
REST_GPR(6, r7)
|
||||
ld r7, GPR7(r7)
|
||||
|
||||
/*
|
||||
* Store r1 and r5 on the stack so that we can access them
|
||||
* after we clear MSR RI.
|
||||
*/
|
||||
|
||||
REST_GPR(5, r7)
|
||||
std r5, -8(r1)
|
||||
ld r5, GPR1(r7)
|
||||
std r5, -16(r1)
|
||||
|
||||
REST_GPR(7, r7)
|
||||
|
||||
/* Clear MSR RI since we are about to change r1. EE is already off */
|
||||
li r5, 0
|
||||
mtmsrd r5, 1
|
||||
|
||||
/*
|
||||
* BE CAREFUL HERE:
|
||||
* At this point we can't take an SLB miss since we have MSR_RI
|
||||
* off. Load only to/from the stack/paca which are in SLB bolted regions
|
||||
* until we turn MSR RI back on.
|
||||
*/
|
||||
|
||||
ld r5, -8(r1)
|
||||
ld r1, -16(r1)
|
||||
|
||||
/* Commit register state as checkpointed state: */
|
||||
TRECHKPT
|
||||
|
|
|
@ -655,112 +655,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
|||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
BEGIN_FTR_SECTION
|
||||
b skip_tm
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_TM)
|
||||
|
||||
/* Turn on TM/FP/VSX/VMX so we can restore them. */
|
||||
mfmsr r5
|
||||
li r6, MSR_TM >> 32
|
||||
sldi r6, r6, 32
|
||||
or r5, r5, r6
|
||||
ori r5, r5, MSR_FP
|
||||
oris r5, r5, (MSR_VEC | MSR_VSX)@h
|
||||
mtmsrd r5
|
||||
|
||||
/*
|
||||
* The user may change these outside of a transaction, so they must
|
||||
* always be context switched.
|
||||
*/
|
||||
ld r5, VCPU_TFHAR(r4)
|
||||
ld r6, VCPU_TFIAR(r4)
|
||||
ld r7, VCPU_TEXASR(r4)
|
||||
mtspr SPRN_TFHAR, r5
|
||||
mtspr SPRN_TFIAR, r6
|
||||
mtspr SPRN_TEXASR, r7
|
||||
|
||||
ld r5, VCPU_MSR(r4)
|
||||
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
|
||||
beq skip_tm /* TM not active in guest */
|
||||
|
||||
/* Make sure the failure summary is set, otherwise we'll program check
|
||||
* when we trechkpt. It's possible that this might have been not set
|
||||
* on a kvmppc_set_one_reg() call but we shouldn't let this crash the
|
||||
* host.
|
||||
*/
|
||||
oris r7, r7, (TEXASR_FS)@h
|
||||
mtspr SPRN_TEXASR, r7
|
||||
|
||||
/*
|
||||
* We need to load up the checkpointed state for the guest.
|
||||
* We need to do this early as it will blow away any GPRs, VSRs and
|
||||
* some SPRs.
|
||||
*/
|
||||
|
||||
mr r31, r4
|
||||
addi r3, r31, VCPU_FPRS_TM
|
||||
bl load_fp_state
|
||||
addi r3, r31, VCPU_VRS_TM
|
||||
bl load_vr_state
|
||||
mr r4, r31
|
||||
lwz r7, VCPU_VRSAVE_TM(r4)
|
||||
mtspr SPRN_VRSAVE, r7
|
||||
|
||||
ld r5, VCPU_LR_TM(r4)
|
||||
lwz r6, VCPU_CR_TM(r4)
|
||||
ld r7, VCPU_CTR_TM(r4)
|
||||
ld r8, VCPU_AMR_TM(r4)
|
||||
ld r9, VCPU_TAR_TM(r4)
|
||||
mtlr r5
|
||||
mtcr r6
|
||||
mtctr r7
|
||||
mtspr SPRN_AMR, r8
|
||||
mtspr SPRN_TAR, r9
|
||||
|
||||
/*
|
||||
* Load up PPR and DSCR values but don't put them in the actual SPRs
|
||||
* till the last moment to avoid running with userspace PPR and DSCR for
|
||||
* too long.
|
||||
*/
|
||||
ld r29, VCPU_DSCR_TM(r4)
|
||||
ld r30, VCPU_PPR_TM(r4)
|
||||
|
||||
std r2, PACATMSCRATCH(r13) /* Save TOC */
|
||||
|
||||
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
|
||||
li r5, 0
|
||||
mtmsrd r5, 1
|
||||
|
||||
/* Load GPRs r0-r28 */
|
||||
reg = 0
|
||||
.rept 29
|
||||
ld reg, VCPU_GPRS_TM(reg)(r31)
|
||||
reg = reg + 1
|
||||
.endr
|
||||
|
||||
mtspr SPRN_DSCR, r29
|
||||
mtspr SPRN_PPR, r30
|
||||
|
||||
/* Load final GPRs */
|
||||
ld 29, VCPU_GPRS_TM(29)(r31)
|
||||
ld 30, VCPU_GPRS_TM(30)(r31)
|
||||
ld 31, VCPU_GPRS_TM(31)(r31)
|
||||
|
||||
/* TM checkpointed state is now setup. All GPRs are now volatile. */
|
||||
TRECHKPT
|
||||
|
||||
/* Now let's get back the state we need. */
|
||||
HMT_MEDIUM
|
||||
GET_PACA(r13)
|
||||
ld r29, HSTATE_DSCR(r13)
|
||||
mtspr SPRN_DSCR, r29
|
||||
ld r4, HSTATE_KVM_VCPU(r13)
|
||||
ld r1, HSTATE_HOST_R1(r13)
|
||||
ld r2, PACATMSCRATCH(r13)
|
||||
|
||||
/* Set the MSR RI since we have our registers back. */
|
||||
li r5, MSR_RI
|
||||
mtmsrd r5, 1
|
||||
skip_tm:
|
||||
bl kvmppc_restore_tm
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
||||
#endif
|
||||
|
||||
/* Load guest PMU registers */
|
||||
|
@ -841,12 +737,6 @@ BEGIN_FTR_SECTION
|
|||
/* Skip next section on POWER7 */
|
||||
b 8f
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
||||
/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
|
||||
mfmsr r8
|
||||
li r0, 1
|
||||
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
|
||||
mtmsrd r8
|
||||
|
||||
/* Load up POWER8-specific registers */
|
||||
ld r5, VCPU_IAMR(r4)
|
||||
lwz r6, VCPU_PSPB(r4)
|
||||
|
@ -1436,106 +1326,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
|||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
BEGIN_FTR_SECTION
|
||||
b 2f
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_TM)
|
||||
/* Turn on TM. */
|
||||
mfmsr r8
|
||||
li r0, 1
|
||||
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
|
||||
mtmsrd r8
|
||||
|
||||
ld r5, VCPU_MSR(r9)
|
||||
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
|
||||
beq 1f /* TM not active in guest. */
|
||||
|
||||
li r3, TM_CAUSE_KVM_RESCHED
|
||||
|
||||
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
|
||||
li r5, 0
|
||||
mtmsrd r5, 1
|
||||
|
||||
/* All GPRs are volatile at this point. */
|
||||
TRECLAIM(R3)
|
||||
|
||||
/* Temporarily store r13 and r9 so we have some regs to play with */
|
||||
SET_SCRATCH0(r13)
|
||||
GET_PACA(r13)
|
||||
std r9, PACATMSCRATCH(r13)
|
||||
ld r9, HSTATE_KVM_VCPU(r13)
|
||||
|
||||
/* Get a few more GPRs free. */
|
||||
std r29, VCPU_GPRS_TM(29)(r9)
|
||||
std r30, VCPU_GPRS_TM(30)(r9)
|
||||
std r31, VCPU_GPRS_TM(31)(r9)
|
||||
|
||||
/* Save away PPR and DSCR soon so don't run with user values. */
|
||||
mfspr r31, SPRN_PPR
|
||||
HMT_MEDIUM
|
||||
mfspr r30, SPRN_DSCR
|
||||
ld r29, HSTATE_DSCR(r13)
|
||||
mtspr SPRN_DSCR, r29
|
||||
|
||||
/* Save all but r9, r13 & r29-r31 */
|
||||
reg = 0
|
||||
.rept 29
|
||||
.if (reg != 9) && (reg != 13)
|
||||
std reg, VCPU_GPRS_TM(reg)(r9)
|
||||
.endif
|
||||
reg = reg + 1
|
||||
.endr
|
||||
/* ... now save r13 */
|
||||
GET_SCRATCH0(r4)
|
||||
std r4, VCPU_GPRS_TM(13)(r9)
|
||||
/* ... and save r9 */
|
||||
ld r4, PACATMSCRATCH(r13)
|
||||
std r4, VCPU_GPRS_TM(9)(r9)
|
||||
|
||||
/* Reload stack pointer and TOC. */
|
||||
ld r1, HSTATE_HOST_R1(r13)
|
||||
ld r2, PACATOC(r13)
|
||||
|
||||
/* Set MSR RI now we have r1 and r13 back. */
|
||||
li r5, MSR_RI
|
||||
mtmsrd r5, 1
|
||||
|
||||
/* Save away checkpinted SPRs. */
|
||||
std r31, VCPU_PPR_TM(r9)
|
||||
std r30, VCPU_DSCR_TM(r9)
|
||||
mflr r5
|
||||
mfcr r6
|
||||
mfctr r7
|
||||
mfspr r8, SPRN_AMR
|
||||
mfspr r10, SPRN_TAR
|
||||
std r5, VCPU_LR_TM(r9)
|
||||
stw r6, VCPU_CR_TM(r9)
|
||||
std r7, VCPU_CTR_TM(r9)
|
||||
std r8, VCPU_AMR_TM(r9)
|
||||
std r10, VCPU_TAR_TM(r9)
|
||||
|
||||
/* Restore r12 as trap number. */
|
||||
lwz r12, VCPU_TRAP(r9)
|
||||
|
||||
/* Save FP/VSX. */
|
||||
addi r3, r9, VCPU_FPRS_TM
|
||||
bl store_fp_state
|
||||
addi r3, r9, VCPU_VRS_TM
|
||||
bl store_vr_state
|
||||
mfspr r6, SPRN_VRSAVE
|
||||
stw r6, VCPU_VRSAVE_TM(r9)
|
||||
1:
|
||||
/*
|
||||
* We need to save these SPRs after the treclaim so that the software
|
||||
* error code is recorded correctly in the TEXASR. Also the user may
|
||||
* change these outside of a transaction, so they must always be
|
||||
* context switched.
|
||||
*/
|
||||
mfspr r5, SPRN_TFHAR
|
||||
mfspr r6, SPRN_TFIAR
|
||||
mfspr r7, SPRN_TEXASR
|
||||
std r5, VCPU_TFHAR(r9)
|
||||
std r6, VCPU_TFIAR(r9)
|
||||
std r7, VCPU_TEXASR(r9)
|
||||
2:
|
||||
bl kvmppc_save_tm
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
||||
#endif
|
||||
|
||||
/* Increment yield count if they have a VPA */
|
||||
|
@ -2245,6 +2037,13 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
|
|||
/* save FP state */
|
||||
bl kvmppc_save_fp
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
BEGIN_FTR_SECTION
|
||||
ld r9, HSTATE_KVM_VCPU(r13)
|
||||
bl kvmppc_save_tm
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set DEC to the smaller of DEC and HDEC, so that we wake
|
||||
* no later than the end of our timeslice (HDEC interrupts
|
||||
|
@ -2321,6 +2120,12 @@ kvm_end_cede:
|
|||
bl kvmhv_accumulate_time
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
BEGIN_FTR_SECTION
|
||||
bl kvmppc_restore_tm
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
||||
#endif
|
||||
|
||||
/* load up FP state */
|
||||
bl kvmppc_load_fp
|
||||
|
||||
|
@ -2629,6 +2434,239 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
|||
mr r4,r31
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/*
|
||||
* Save transactional state and TM-related registers.
|
||||
* Called with r9 pointing to the vcpu struct.
|
||||
* This can modify all checkpointed registers, but
|
||||
* restores r1, r2 and r9 (vcpu pointer) before exit.
|
||||
*/
|
||||
kvmppc_save_tm:
|
||||
mflr r0
|
||||
std r0, PPC_LR_STKOFF(r1)
|
||||
|
||||
/* Turn on TM. */
|
||||
mfmsr r8
|
||||
li r0, 1
|
||||
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
|
||||
mtmsrd r8
|
||||
|
||||
ld r5, VCPU_MSR(r9)
|
||||
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
|
||||
beq 1f /* TM not active in guest. */
|
||||
|
||||
std r1, HSTATE_HOST_R1(r13)
|
||||
li r3, TM_CAUSE_KVM_RESCHED
|
||||
|
||||
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
|
||||
li r5, 0
|
||||
mtmsrd r5, 1
|
||||
|
||||
/* All GPRs are volatile at this point. */
|
||||
TRECLAIM(R3)
|
||||
|
||||
/* Temporarily store r13 and r9 so we have some regs to play with */
|
||||
SET_SCRATCH0(r13)
|
||||
GET_PACA(r13)
|
||||
std r9, PACATMSCRATCH(r13)
|
||||
ld r9, HSTATE_KVM_VCPU(r13)
|
||||
|
||||
/* Get a few more GPRs free. */
|
||||
std r29, VCPU_GPRS_TM(29)(r9)
|
||||
std r30, VCPU_GPRS_TM(30)(r9)
|
||||
std r31, VCPU_GPRS_TM(31)(r9)
|
||||
|
||||
/* Save away PPR and DSCR soon so don't run with user values. */
|
||||
mfspr r31, SPRN_PPR
|
||||
HMT_MEDIUM
|
||||
mfspr r30, SPRN_DSCR
|
||||
ld r29, HSTATE_DSCR(r13)
|
||||
mtspr SPRN_DSCR, r29
|
||||
|
||||
/* Save all but r9, r13 & r29-r31 */
|
||||
reg = 0
|
||||
.rept 29
|
||||
.if (reg != 9) && (reg != 13)
|
||||
std reg, VCPU_GPRS_TM(reg)(r9)
|
||||
.endif
|
||||
reg = reg + 1
|
||||
.endr
|
||||
/* ... now save r13 */
|
||||
GET_SCRATCH0(r4)
|
||||
std r4, VCPU_GPRS_TM(13)(r9)
|
||||
/* ... and save r9 */
|
||||
ld r4, PACATMSCRATCH(r13)
|
||||
std r4, VCPU_GPRS_TM(9)(r9)
|
||||
|
||||
/* Reload stack pointer and TOC. */
|
||||
ld r1, HSTATE_HOST_R1(r13)
|
||||
ld r2, PACATOC(r13)
|
||||
|
||||
/* Set MSR RI now we have r1 and r13 back. */
|
||||
li r5, MSR_RI
|
||||
mtmsrd r5, 1
|
||||
|
||||
/* Save away checkpinted SPRs. */
|
||||
std r31, VCPU_PPR_TM(r9)
|
||||
std r30, VCPU_DSCR_TM(r9)
|
||||
mflr r5
|
||||
mfcr r6
|
||||
mfctr r7
|
||||
mfspr r8, SPRN_AMR
|
||||
mfspr r10, SPRN_TAR
|
||||
std r5, VCPU_LR_TM(r9)
|
||||
stw r6, VCPU_CR_TM(r9)
|
||||
std r7, VCPU_CTR_TM(r9)
|
||||
std r8, VCPU_AMR_TM(r9)
|
||||
std r10, VCPU_TAR_TM(r9)
|
||||
|
||||
/* Restore r12 as trap number. */
|
||||
lwz r12, VCPU_TRAP(r9)
|
||||
|
||||
/* Save FP/VSX. */
|
||||
addi r3, r9, VCPU_FPRS_TM
|
||||
bl store_fp_state
|
||||
addi r3, r9, VCPU_VRS_TM
|
||||
bl store_vr_state
|
||||
mfspr r6, SPRN_VRSAVE
|
||||
stw r6, VCPU_VRSAVE_TM(r9)
|
||||
1:
|
||||
/*
|
||||
* We need to save these SPRs after the treclaim so that the software
|
||||
* error code is recorded correctly in the TEXASR. Also the user may
|
||||
* change these outside of a transaction, so they must always be
|
||||
* context switched.
|
||||
*/
|
||||
mfspr r5, SPRN_TFHAR
|
||||
mfspr r6, SPRN_TFIAR
|
||||
mfspr r7, SPRN_TEXASR
|
||||
std r5, VCPU_TFHAR(r9)
|
||||
std r6, VCPU_TFIAR(r9)
|
||||
std r7, VCPU_TEXASR(r9)
|
||||
|
||||
ld r0, PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
/*
|
||||
* Restore transactional state and TM-related registers.
|
||||
* Called with r4 pointing to the vcpu struct.
|
||||
* This potentially modifies all checkpointed registers.
|
||||
* It restores r1, r2, r4 from the PACA.
|
||||
*/
|
||||
kvmppc_restore_tm:
|
||||
mflr r0
|
||||
std r0, PPC_LR_STKOFF(r1)
|
||||
|
||||
/* Turn on TM/FP/VSX/VMX so we can restore them. */
|
||||
mfmsr r5
|
||||
li r6, MSR_TM >> 32
|
||||
sldi r6, r6, 32
|
||||
or r5, r5, r6
|
||||
ori r5, r5, MSR_FP
|
||||
oris r5, r5, (MSR_VEC | MSR_VSX)@h
|
||||
mtmsrd r5
|
||||
|
||||
/*
|
||||
* The user may change these outside of a transaction, so they must
|
||||
* always be context switched.
|
||||
*/
|
||||
ld r5, VCPU_TFHAR(r4)
|
||||
ld r6, VCPU_TFIAR(r4)
|
||||
ld r7, VCPU_TEXASR(r4)
|
||||
mtspr SPRN_TFHAR, r5
|
||||
mtspr SPRN_TFIAR, r6
|
||||
mtspr SPRN_TEXASR, r7
|
||||
|
||||
ld r5, VCPU_MSR(r4)
|
||||
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
|
||||
beqlr /* TM not active in guest */
|
||||
std r1, HSTATE_HOST_R1(r13)
|
||||
|
||||
/* Make sure the failure summary is set, otherwise we'll program check
|
||||
* when we trechkpt. It's possible that this might have been not set
|
||||
* on a kvmppc_set_one_reg() call but we shouldn't let this crash the
|
||||
* host.
|
||||
*/
|
||||
oris r7, r7, (TEXASR_FS)@h
|
||||
mtspr SPRN_TEXASR, r7
|
||||
|
||||
/*
|
||||
* We need to load up the checkpointed state for the guest.
|
||||
* We need to do this early as it will blow away any GPRs, VSRs and
|
||||
* some SPRs.
|
||||
*/
|
||||
|
||||
mr r31, r4
|
||||
addi r3, r31, VCPU_FPRS_TM
|
||||
bl load_fp_state
|
||||
addi r3, r31, VCPU_VRS_TM
|
||||
bl load_vr_state
|
||||
mr r4, r31
|
||||
lwz r7, VCPU_VRSAVE_TM(r4)
|
||||
mtspr SPRN_VRSAVE, r7
|
||||
|
||||
ld r5, VCPU_LR_TM(r4)
|
||||
lwz r6, VCPU_CR_TM(r4)
|
||||
ld r7, VCPU_CTR_TM(r4)
|
||||
ld r8, VCPU_AMR_TM(r4)
|
||||
ld r9, VCPU_TAR_TM(r4)
|
||||
mtlr r5
|
||||
mtcr r6
|
||||
mtctr r7
|
||||
mtspr SPRN_AMR, r8
|
||||
mtspr SPRN_TAR, r9
|
||||
|
||||
/*
|
||||
* Load up PPR and DSCR values but don't put them in the actual SPRs
|
||||
* till the last moment to avoid running with userspace PPR and DSCR for
|
||||
* too long.
|
||||
*/
|
||||
ld r29, VCPU_DSCR_TM(r4)
|
||||
ld r30, VCPU_PPR_TM(r4)
|
||||
|
||||
std r2, PACATMSCRATCH(r13) /* Save TOC */
|
||||
|
||||
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
|
||||
li r5, 0
|
||||
mtmsrd r5, 1
|
||||
|
||||
/* Load GPRs r0-r28 */
|
||||
reg = 0
|
||||
.rept 29
|
||||
ld reg, VCPU_GPRS_TM(reg)(r31)
|
||||
reg = reg + 1
|
||||
.endr
|
||||
|
||||
mtspr SPRN_DSCR, r29
|
||||
mtspr SPRN_PPR, r30
|
||||
|
||||
/* Load final GPRs */
|
||||
ld 29, VCPU_GPRS_TM(29)(r31)
|
||||
ld 30, VCPU_GPRS_TM(30)(r31)
|
||||
ld 31, VCPU_GPRS_TM(31)(r31)
|
||||
|
||||
/* TM checkpointed state is now setup. All GPRs are now volatile. */
|
||||
TRECHKPT
|
||||
|
||||
/* Now let's get back the state we need. */
|
||||
HMT_MEDIUM
|
||||
GET_PACA(r13)
|
||||
ld r29, HSTATE_DSCR(r13)
|
||||
mtspr SPRN_DSCR, r29
|
||||
ld r4, HSTATE_KVM_VCPU(r13)
|
||||
ld r1, HSTATE_HOST_R1(r13)
|
||||
ld r2, PACATMSCRATCH(r13)
|
||||
|
||||
/* Set the MSR RI since we have our registers back. */
|
||||
li r5, MSR_RI
|
||||
mtmsrd r5, 1
|
||||
|
||||
ld r0, PPC_LR_STKOFF(r1)
|
||||
mtlr r0
|
||||
blr
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We come here if we get any exception or interrupt while we are
|
||||
* executing host real mode code while in guest MMU context.
|
||||
|
|
|
@ -669,11 +669,13 @@ static const struct file_operations prng_tdes_fops = {
|
|||
static struct miscdevice prng_sha512_dev = {
|
||||
.name = "prandom",
|
||||
.minor = MISC_DYNAMIC_MINOR,
|
||||
.mode = 0644,
|
||||
.fops = &prng_sha512_fops,
|
||||
};
|
||||
static struct miscdevice prng_tdes_dev = {
|
||||
.name = "prandom",
|
||||
.minor = MISC_DYNAMIC_MINOR,
|
||||
.mode = 0644,
|
||||
.fops = &prng_tdes_fops,
|
||||
};
|
||||
|
||||
|
|
|
@ -23,6 +23,8 @@ enum zpci_ioat_dtype {
|
|||
#define ZPCI_IOTA_FS_2G 2
|
||||
#define ZPCI_KEY (PAGE_DEFAULT_KEY << 5)
|
||||
|
||||
#define ZPCI_TABLE_SIZE_RT (1UL << 42)
|
||||
|
||||
#define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
|
||||
#define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
|
||||
#define ZPCI_IOTA_RSTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS)
|
||||
|
|
|
@ -701,8 +701,7 @@ static int zpci_restore(struct device *dev)
|
|||
goto out;
|
||||
|
||||
zpci_map_resources(pdev);
|
||||
zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
|
||||
zdev->start_dma + zdev->iommu_size - 1,
|
||||
zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
|
||||
(u64) zdev->dma_table);
|
||||
|
||||
out:
|
||||
|
|
|
@ -458,7 +458,19 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
|
|||
goto out_clean;
|
||||
}
|
||||
|
||||
zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
|
||||
/*
|
||||
* Restrict the iommu bitmap size to the minimum of the following:
|
||||
* - main memory size
|
||||
* - 3-level pagetable address limit minus start_dma offset
|
||||
* - DMA address range allowed by the hardware (clp query pci fn)
|
||||
*
|
||||
* Also set zdev->end_dma to the actual end address of the usable
|
||||
* range, instead of the theoretical maximum as reported by hardware.
|
||||
*/
|
||||
zdev->iommu_size = min3((u64) high_memory,
|
||||
ZPCI_TABLE_SIZE_RT - zdev->start_dma,
|
||||
zdev->end_dma - zdev->start_dma + 1);
|
||||
zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
|
||||
zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
|
||||
zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
|
||||
if (!zdev->iommu_bitmap) {
|
||||
|
@ -466,10 +478,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
|
|||
goto out_reg;
|
||||
}
|
||||
|
||||
rc = zpci_register_ioat(zdev,
|
||||
0,
|
||||
zdev->start_dma + PAGE_OFFSET,
|
||||
zdev->start_dma + zdev->iommu_size - 1,
|
||||
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
|
||||
(u64) zdev->dma_table);
|
||||
if (rc)
|
||||
goto out_reg;
|
||||
|
|
|
@ -81,7 +81,7 @@
|
|||
.altinstr_replacement : { *(.altinstr_replacement) }
|
||||
/* .exit.text is discard at runtime, not link time, to deal with references
|
||||
from .altinstructions and .eh_frame */
|
||||
.exit.text : { *(.exit.text) }
|
||||
.exit.text : { EXIT_TEXT }
|
||||
.exit.data : { *(.exit.data) }
|
||||
|
||||
.preinit_array : {
|
||||
|
|
|
@ -86,7 +86,14 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
|
|||
|
||||
static inline void __native_flush_tlb(void)
|
||||
{
|
||||
/*
|
||||
* If current->mm == NULL then we borrow a mm which may change during a
|
||||
* task switch and therefore we must not be preempted while we write CR3
|
||||
* back:
|
||||
*/
|
||||
preempt_disable();
|
||||
native_write_cr3(native_read_cr3());
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline void __native_flush_tlb_global_irq_disabled(void)
|
||||
|
|
|
@ -1587,6 +1587,9 @@ void __init enable_IR_x2apic(void)
|
|||
unsigned long flags;
|
||||
int ret, ir_stat;
|
||||
|
||||
if (skip_ioapic_setup)
|
||||
return;
|
||||
|
||||
ir_stat = irq_remapping_prepare();
|
||||
if (ir_stat < 0 && !x2apic_supported())
|
||||
return;
|
||||
|
|
|
@ -152,6 +152,11 @@ static struct clocksource hyperv_cs = {
|
|||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
static unsigned char hv_get_nmi_reason(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init ms_hyperv_init_platform(void)
|
||||
{
|
||||
/*
|
||||
|
@ -191,6 +196,13 @@ static void __init ms_hyperv_init_platform(void)
|
|||
machine_ops.crash_shutdown = hv_machine_crash_shutdown;
|
||||
#endif
|
||||
mark_tsc_unstable("running on Hyper-V");
|
||||
|
||||
/*
|
||||
* Generation 2 instances don't support reading the NMI status from
|
||||
* 0x61 port.
|
||||
*/
|
||||
if (efi_enabled(EFI_BOOT))
|
||||
x86_platform.get_nmi_reason = hv_get_nmi_reason;
|
||||
}
|
||||
|
||||
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
|
||||
|
|
|
@ -211,6 +211,20 @@ static void __put_rmid(u32 rmid)
|
|||
list_add_tail(&entry->list, &cqm_rmid_limbo_lru);
|
||||
}
|
||||
|
||||
static void cqm_cleanup(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!cqm_rmid_ptrs)
|
||||
return;
|
||||
|
||||
for (i = 0; i < cqm_max_rmid; i++)
|
||||
kfree(cqm_rmid_ptrs[i]);
|
||||
|
||||
kfree(cqm_rmid_ptrs);
|
||||
cqm_rmid_ptrs = NULL;
|
||||
}
|
||||
|
||||
static int intel_cqm_setup_rmid_cache(void)
|
||||
{
|
||||
struct cqm_rmid_entry *entry;
|
||||
|
@ -218,7 +232,7 @@ static int intel_cqm_setup_rmid_cache(void)
|
|||
int r = 0;
|
||||
|
||||
nr_rmids = cqm_max_rmid + 1;
|
||||
cqm_rmid_ptrs = kmalloc(sizeof(struct cqm_rmid_entry *) *
|
||||
cqm_rmid_ptrs = kzalloc(sizeof(struct cqm_rmid_entry *) *
|
||||
nr_rmids, GFP_KERNEL);
|
||||
if (!cqm_rmid_ptrs)
|
||||
return -ENOMEM;
|
||||
|
@ -249,11 +263,9 @@ static int intel_cqm_setup_rmid_cache(void)
|
|||
mutex_unlock(&cache_mutex);
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
while (r--)
|
||||
kfree(cqm_rmid_ptrs[r]);
|
||||
|
||||
kfree(cqm_rmid_ptrs);
|
||||
fail:
|
||||
cqm_cleanup();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -281,9 +293,13 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
|
|||
|
||||
/*
|
||||
* Events that target same task are placed into the same cache group.
|
||||
* Mark it as a multi event group, so that we update ->count
|
||||
* for every event rather than just the group leader later.
|
||||
*/
|
||||
if (a->hw.target == b->hw.target)
|
||||
if (a->hw.target == b->hw.target) {
|
||||
b->hw.is_group_event = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Are we an inherited event?
|
||||
|
@ -849,6 +865,7 @@ static void intel_cqm_setup_event(struct perf_event *event,
|
|||
bool conflict = false;
|
||||
u32 rmid;
|
||||
|
||||
event->hw.is_group_event = false;
|
||||
list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
|
||||
rmid = iter->hw.cqm_rmid;
|
||||
|
||||
|
@ -940,7 +957,9 @@ static u64 intel_cqm_event_count(struct perf_event *event)
|
|||
return __perf_event_count(event);
|
||||
|
||||
/*
|
||||
* Only the group leader gets to report values. This stops us
|
||||
* Only the group leader gets to report values except in case of
|
||||
* multiple events in the same group, we still need to read the
|
||||
* other events.This stops us
|
||||
* reporting duplicate values to userspace, and gives us a clear
|
||||
* rule for which task gets to report the values.
|
||||
*
|
||||
|
@ -948,7 +967,7 @@ static u64 intel_cqm_event_count(struct perf_event *event)
|
|||
* specific packages - we forfeit that ability when we create
|
||||
* task events.
|
||||
*/
|
||||
if (!cqm_group_leader(event))
|
||||
if (!cqm_group_leader(event) && !event->hw.is_group_event)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -1315,7 +1334,7 @@ static const struct x86_cpu_id intel_cqm_match[] = {
|
|||
|
||||
static int __init intel_cqm_init(void)
|
||||
{
|
||||
char *str, scale[20];
|
||||
char *str = NULL, scale[20];
|
||||
int i, cpu, ret;
|
||||
|
||||
if (!x86_match_cpu(intel_cqm_match))
|
||||
|
@ -1375,16 +1394,25 @@ static int __init intel_cqm_init(void)
|
|||
cqm_pick_event_reader(i);
|
||||
}
|
||||
|
||||
__perf_cpu_notifier(intel_cqm_cpu_notifier);
|
||||
|
||||
ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
pr_err("Intel CQM perf registration failed: %d\n", ret);
|
||||
else
|
||||
pr_info("Intel CQM monitoring enabled\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
pr_info("Intel CQM monitoring enabled\n");
|
||||
|
||||
/*
|
||||
* Register the hot cpu notifier once we are sure cqm
|
||||
* is enabled to avoid notifier leak.
|
||||
*/
|
||||
__perf_cpu_notifier(intel_cqm_cpu_notifier);
|
||||
out:
|
||||
cpu_notifier_register_done();
|
||||
if (ret) {
|
||||
kfree(str);
|
||||
cqm_cleanup();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -357,20 +357,22 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
|
|||
*cursor &= 0xfe;
|
||||
}
|
||||
/*
|
||||
* Similar treatment for VEX3 prefix.
|
||||
* TODO: add XOP/EVEX treatment when insn decoder supports them
|
||||
* Similar treatment for VEX3/EVEX prefix.
|
||||
* TODO: add XOP treatment when insn decoder supports them
|
||||
*/
|
||||
if (insn->vex_prefix.nbytes == 3) {
|
||||
if (insn->vex_prefix.nbytes >= 3) {
|
||||
/*
|
||||
* vex2: c5 rvvvvLpp (has no b bit)
|
||||
* vex3/xop: c4/8f rxbmmmmm wvvvvLpp
|
||||
* evex: 62 rxbR00mm wvvvv1pp zllBVaaa
|
||||
* (evex will need setting of both b and x since
|
||||
* in non-sib encoding evex.x is 4th bit of MODRM.rm)
|
||||
* Setting VEX3.b (setting because it has inverted meaning):
|
||||
* Setting VEX3.b (setting because it has inverted meaning).
|
||||
* Setting EVEX.x since (in non-SIB encoding) EVEX.x
|
||||
* is the 4th bit of MODRM.rm, and needs the same treatment.
|
||||
* For VEX3-encoded insns, VEX3.x value has no effect in
|
||||
* non-SIB encoding, the change is superfluous but harmless.
|
||||
*/
|
||||
cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1;
|
||||
*cursor |= 0x20;
|
||||
*cursor |= 0x60;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -415,12 +417,10 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
|
|||
|
||||
reg = MODRM_REG(insn); /* Fetch modrm.reg */
|
||||
reg2 = 0xff; /* Fetch vex.vvvv */
|
||||
if (insn->vex_prefix.nbytes == 2)
|
||||
reg2 = insn->vex_prefix.bytes[1];
|
||||
else if (insn->vex_prefix.nbytes == 3)
|
||||
if (insn->vex_prefix.nbytes)
|
||||
reg2 = insn->vex_prefix.bytes[2];
|
||||
/*
|
||||
* TODO: add XOP, EXEV vvvv reading.
|
||||
* TODO: add XOP vvvv reading.
|
||||
*
|
||||
* vex.vvvv field is in bits 6-3, bits are inverted.
|
||||
* But in 32-bit mode, high-order bit may be ignored.
|
||||
|
|
|
@ -539,6 +539,7 @@ static void mtrr_lookup_var_start(struct mtrr_iter *iter)
|
|||
|
||||
iter->fixed = false;
|
||||
iter->start_max = iter->start;
|
||||
iter->range = NULL;
|
||||
iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
|
||||
|
||||
__mtrr_lookup_var_next(iter);
|
||||
|
|
|
@ -8124,6 +8124,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
|
|||
if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
|
||||
(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
|
||||
exit_reason != EXIT_REASON_EPT_VIOLATION &&
|
||||
exit_reason != EXIT_REASON_PML_FULL &&
|
||||
exit_reason != EXIT_REASON_TASK_SWITCH)) {
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
|
||||
|
@ -8736,6 +8737,22 @@ static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
|
|||
put_cpu();
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that the current vmcs of the logical processor is the
|
||||
* vmcs01 of the vcpu before calling free_nested().
|
||||
*/
|
||||
static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
int r;
|
||||
|
||||
r = vcpu_load(vcpu);
|
||||
BUG_ON(r);
|
||||
vmx_load_vmcs01(vcpu);
|
||||
free_nested(vmx);
|
||||
vcpu_put(vcpu);
|
||||
}
|
||||
|
||||
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
@ -8744,8 +8761,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
|
|||
vmx_destroy_pml_buffer(vmx);
|
||||
free_vpid(vmx->vpid);
|
||||
leave_guest_mode(vcpu);
|
||||
vmx_load_vmcs01(vcpu);
|
||||
free_nested(vmx);
|
||||
vmx_free_vcpu_nested(vcpu);
|
||||
free_loaded_vmcs(vmx->loaded_vmcs);
|
||||
kfree(vmx->guest_msrs);
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
|
||||
/* Quirks for the listed devices */
|
||||
#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
|
||||
#define PCI_DEVICE_ID_INTEL_MRFL_HSU 0x1191
|
||||
|
||||
/* Fixed BAR fields */
|
||||
#define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */
|
||||
|
@ -224,14 +225,21 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
|
|||
|
||||
/* Special treatment for IRQ0 */
|
||||
if (dev->irq == 0) {
|
||||
/*
|
||||
* Skip HS UART common registers device since it has
|
||||
* IRQ0 assigned and not used by the kernel.
|
||||
*/
|
||||
if (dev->device == PCI_DEVICE_ID_INTEL_MRFL_HSU)
|
||||
return -EBUSY;
|
||||
/*
|
||||
* TNG has IRQ0 assigned to eMMC controller. But there
|
||||
* are also other devices with bogus PCI configuration
|
||||
* that have IRQ0 assigned. This check ensures that
|
||||
* eMMC gets it.
|
||||
* eMMC gets it. The rest of devices still could be
|
||||
* enabled without interrupt line being allocated.
|
||||
*/
|
||||
if (dev->device != PCI_DEVICE_ID_INTEL_MRFL_MMC)
|
||||
return -EBUSY;
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
|
14
backported-features
Normal file
14
backported-features
Normal file
|
@ -0,0 +1,14 @@
|
|||
LSK backported features
|
||||
|
||||
1, The kaslr and kaslr-pax_usercopy branches base on LSK directly.
|
||||
v4.4/topic/mm-kaslr
|
||||
v4.4/topic/mm-kaslr-pax_usercopy
|
||||
|
||||
2, Coresight and openCSD are used for Juno board 'perf' tool implement.
|
||||
origin/v4.4/topic/coresight
|
||||
origin/v4.4/topic/perf-opencsd-4.4-github
|
||||
|
||||
3, OPTEE base on LSK mainline, but isn't included of mainline.
|
||||
|
||||
Feature introducation:
|
||||
https://wiki.linaro.org/lsk/features
|
15
block/bio.c
15
block/bio.c
|
@ -584,6 +584,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
|
|||
bio->bi_rw = bio_src->bi_rw;
|
||||
bio->bi_iter = bio_src->bi_iter;
|
||||
bio->bi_io_vec = bio_src->bi_io_vec;
|
||||
|
||||
bio_clone_blkcg_association(bio, bio_src);
|
||||
}
|
||||
EXPORT_SYMBOL(__bio_clone_fast);
|
||||
|
||||
|
@ -689,6 +691,8 @@ integrity_clone:
|
|||
}
|
||||
}
|
||||
|
||||
bio_clone_blkcg_association(bio, bio_src);
|
||||
|
||||
return bio;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_clone_bioset);
|
||||
|
@ -2014,6 +2018,17 @@ void bio_disassociate_task(struct bio *bio)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_clone_blkcg_association - clone blkcg association from src to dst bio
|
||||
* @dst: destination bio
|
||||
* @src: source bio
|
||||
*/
|
||||
void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
|
||||
{
|
||||
if (src->bi_css)
|
||||
WARN_ON(bio_associate_blkcg(dst, src->bi_css));
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_CGROUP */
|
||||
|
||||
static void __init biovec_init_slabs(void)
|
||||
|
|
|
@ -517,7 +517,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
|
|||
|
||||
void blk_set_queue_dying(struct request_queue *q)
|
||||
{
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
queue_flag_set(QUEUE_FLAG_DYING, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
if (q->mq_ops)
|
||||
blk_mq_wake_waiters(q);
|
||||
|
|
|
@ -92,8 +92,30 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
|
|||
bool do_split = true;
|
||||
struct bio *new = NULL;
|
||||
const unsigned max_sectors = get_max_io_size(q, bio);
|
||||
unsigned bvecs = 0;
|
||||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
/*
|
||||
* With arbitrary bio size, the incoming bio may be very
|
||||
* big. We have to split the bio into small bios so that
|
||||
* each holds at most BIO_MAX_PAGES bvecs because
|
||||
* bio_clone() can fail to allocate big bvecs.
|
||||
*
|
||||
* It should have been better to apply the limit per
|
||||
* request queue in which bio_clone() is involved,
|
||||
* instead of globally. The biggest blocker is the
|
||||
* bio_clone() in bio bounce.
|
||||
*
|
||||
* If bio is splitted by this reason, we should have
|
||||
* allowed to continue bios merging, but don't do
|
||||
* that now for making the change simple.
|
||||
*
|
||||
* TODO: deal with bio bounce's bio_clone() gracefully
|
||||
* and convert the global limit into per-queue limit.
|
||||
*/
|
||||
if (bvecs++ >= BIO_MAX_PAGES)
|
||||
goto split;
|
||||
|
||||
/*
|
||||
* If the queue doesn't support SG gaps and adding this
|
||||
* offset would create a gap, disallow it.
|
||||
|
|
|
@ -601,8 +601,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
|||
* If a request wasn't started before the queue was
|
||||
* marked dying, kill it here or it'll go unnoticed.
|
||||
*/
|
||||
if (unlikely(blk_queue_dying(rq->q)))
|
||||
blk_mq_complete_request(rq, -EIO);
|
||||
if (unlikely(blk_queue_dying(rq->q))) {
|
||||
rq->errors = -EIO;
|
||||
blk_mq_end_request(rq, rq->errors);
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (rq->cmd_flags & REQ_NO_TIMEOUT)
|
||||
|
|
|
@ -612,7 +612,7 @@ void add_disk(struct gendisk *disk)
|
|||
|
||||
/* Register BDI before referencing it from bdev */
|
||||
bdi = &disk->queue->backing_dev_info;
|
||||
bdi_register_dev(bdi, disk_devt(disk));
|
||||
bdi_register_owner(bdi, disk_to_dev(disk));
|
||||
|
||||
blk_register_region(disk_devt(disk), disk->minors, NULL,
|
||||
exact_match, exact_lock, disk);
|
||||
|
|
|
@ -216,8 +216,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data)
|
|||
continue;
|
||||
|
||||
cpc_ptr = per_cpu(cpc_desc_ptr, i);
|
||||
if (!cpc_ptr)
|
||||
continue;
|
||||
if (!cpc_ptr) {
|
||||
retval = -EFAULT;
|
||||
goto err_ret;
|
||||
}
|
||||
|
||||
pdomain = &(cpc_ptr->domain_info);
|
||||
cpumask_set_cpu(i, pr->shared_cpu_map);
|
||||
|
@ -239,8 +241,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data)
|
|||
continue;
|
||||
|
||||
match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
|
||||
if (!match_cpc_ptr)
|
||||
continue;
|
||||
if (!match_cpc_ptr) {
|
||||
retval = -EFAULT;
|
||||
goto err_ret;
|
||||
}
|
||||
|
||||
match_pdomain = &(match_cpc_ptr->domain_info);
|
||||
if (match_pdomain->domain != pdomain->domain)
|
||||
|
@ -270,8 +274,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data)
|
|||
continue;
|
||||
|
||||
match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
|
||||
if (!match_cpc_ptr)
|
||||
continue;
|
||||
if (!match_cpc_ptr) {
|
||||
retval = -EFAULT;
|
||||
goto err_ret;
|
||||
}
|
||||
|
||||
match_pdomain = &(match_cpc_ptr->domain_info);
|
||||
if (match_pdomain->domain != pdomain->domain)
|
||||
|
@ -502,9 +508,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
|||
/* Store CPU Logical ID */
|
||||
cpc_ptr->cpu_id = pr->id;
|
||||
|
||||
/* Plug it into this CPUs CPC descriptor. */
|
||||
per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
|
||||
|
||||
/* Parse PSD data for this CPU */
|
||||
ret = acpi_get_psd(cpc_ptr, handle);
|
||||
if (ret)
|
||||
|
@ -517,6 +520,9 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
/* Plug PSD data into this CPUs CPC descriptor. */
|
||||
per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
|
||||
|
||||
/* Everything looks okay */
|
||||
pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
|
||||
|
||||
|
|
|
@ -101,6 +101,7 @@ enum ec_command {
|
|||
#define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
|
||||
#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
|
||||
* when trying to clear the EC */
|
||||
#define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */
|
||||
|
||||
enum {
|
||||
EC_FLAGS_QUERY_PENDING, /* Query is pending */
|
||||
|
@ -121,6 +122,10 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
|
|||
module_param(ec_delay, uint, 0644);
|
||||
MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
|
||||
|
||||
static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
|
||||
module_param(ec_max_queries, uint, 0644);
|
||||
MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
|
||||
|
||||
static bool ec_busy_polling __read_mostly;
|
||||
module_param(ec_busy_polling, bool, 0644);
|
||||
MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
|
||||
|
@ -174,6 +179,7 @@ static void acpi_ec_event_processor(struct work_struct *work);
|
|||
|
||||
struct acpi_ec *boot_ec, *first_ec;
|
||||
EXPORT_SYMBOL(first_ec);
|
||||
static struct workqueue_struct *ec_query_wq;
|
||||
|
||||
static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
|
||||
static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
|
||||
|
@ -1097,7 +1103,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
|
|||
* work queue execution.
|
||||
*/
|
||||
ec_dbg_evt("Query(0x%02x) scheduled", value);
|
||||
if (!schedule_work(&q->work)) {
|
||||
if (!queue_work(ec_query_wq, &q->work)) {
|
||||
ec_dbg_evt("Query(0x%02x) overlapped", value);
|
||||
result = -EBUSY;
|
||||
}
|
||||
|
@ -1657,15 +1663,41 @@ static struct acpi_driver acpi_ec_driver = {
|
|||
},
|
||||
};
|
||||
|
||||
static inline int acpi_ec_query_init(void)
|
||||
{
|
||||
if (!ec_query_wq) {
|
||||
ec_query_wq = alloc_workqueue("kec_query", 0,
|
||||
ec_max_queries);
|
||||
if (!ec_query_wq)
|
||||
return -ENODEV;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void acpi_ec_query_exit(void)
|
||||
{
|
||||
if (ec_query_wq) {
|
||||
destroy_workqueue(ec_query_wq);
|
||||
ec_query_wq = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int __init acpi_ec_init(void)
|
||||
{
|
||||
int result = 0;
|
||||
int result;
|
||||
|
||||
/* register workqueue for _Qxx evaluations */
|
||||
result = acpi_ec_query_init();
|
||||
if (result)
|
||||
goto err_exit;
|
||||
/* Now register the driver for the EC */
|
||||
result = acpi_bus_register_driver(&acpi_ec_driver);
|
||||
if (result < 0)
|
||||
return -ENODEV;
|
||||
if (result)
|
||||
goto err_exit;
|
||||
|
||||
err_exit:
|
||||
if (result)
|
||||
acpi_ec_query_exit();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -1675,5 +1707,6 @@ static void __exit acpi_ec_exit(void)
|
|||
{
|
||||
|
||||
acpi_bus_unregister_driver(&acpi_ec_driver);
|
||||
acpi_ec_query_exit();
|
||||
}
|
||||
#endif /* 0 */
|
||||
|
|
|
@ -1072,11 +1072,12 @@ static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
|
|||
{
|
||||
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
|
||||
u64 offset = nfit_blk->stat_offset + mmio->size * bw;
|
||||
const u32 STATUS_MASK = 0x80000037;
|
||||
|
||||
if (mmio->num_lines)
|
||||
offset = to_interleave_offset(offset, mmio);
|
||||
|
||||
return readl(mmio->addr.base + offset);
|
||||
return readl(mmio->addr.base + offset) & STATUS_MASK;
|
||||
}
|
||||
|
||||
static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
|
||||
|
|
|
@ -327,10 +327,18 @@ int __init acpi_numa_init(void)
|
|||
|
||||
/* SRAT: Static Resource Affinity Table */
|
||||
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
|
||||
acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
|
||||
acpi_parse_x2apic_affinity, 0);
|
||||
acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
|
||||
acpi_parse_processor_affinity, 0);
|
||||
struct acpi_subtable_proc srat_proc[2];
|
||||
|
||||
memset(srat_proc, 0, sizeof(srat_proc));
|
||||
srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY;
|
||||
srat_proc[0].handler = acpi_parse_processor_affinity;
|
||||
srat_proc[1].id = ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY;
|
||||
srat_proc[1].handler = acpi_parse_x2apic_affinity;
|
||||
|
||||
acpi_table_parse_entries_array(ACPI_SIG_SRAT,
|
||||
sizeof(struct acpi_table_srat),
|
||||
srat_proc, ARRAY_SIZE(srat_proc), 0);
|
||||
|
||||
cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
|
||||
acpi_parse_memory_affinity,
|
||||
NR_NODE_MEMBLKS);
|
||||
|
|
|
@ -1958,7 +1958,7 @@ int __init acpi_scan_init(void)
|
|||
|
||||
static struct acpi_probe_entry *ape;
|
||||
static int acpi_probe_count;
|
||||
static DEFINE_SPINLOCK(acpi_probe_lock);
|
||||
static DEFINE_MUTEX(acpi_probe_mutex);
|
||||
|
||||
static int __init acpi_match_madt(struct acpi_subtable_header *header,
|
||||
const unsigned long end)
|
||||
|
@ -1977,7 +1977,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
|
|||
if (acpi_disabled)
|
||||
return 0;
|
||||
|
||||
spin_lock(&acpi_probe_lock);
|
||||
mutex_lock(&acpi_probe_mutex);
|
||||
for (ape = ap_head; nr; ape++, nr--) {
|
||||
if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) {
|
||||
acpi_probe_count = 0;
|
||||
|
@ -1990,7 +1990,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
|
|||
count++;
|
||||
}
|
||||
}
|
||||
spin_unlock(&acpi_probe_lock);
|
||||
mutex_unlock(&acpi_probe_mutex);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
|
|
@ -555,23 +555,22 @@ static void acpi_global_event_handler(u32 event_type, acpi_handle device,
|
|||
static int get_status(u32 index, acpi_event_status *status,
|
||||
acpi_handle *handle)
|
||||
{
|
||||
int result = 0;
|
||||
int result;
|
||||
|
||||
if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
|
||||
goto end;
|
||||
return -EINVAL;
|
||||
|
||||
if (index < num_gpes) {
|
||||
result = acpi_get_gpe_device(index, handle);
|
||||
if (result) {
|
||||
ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
|
||||
"Invalid GPE 0x%x", index));
|
||||
goto end;
|
||||
return result;
|
||||
}
|
||||
result = acpi_get_gpe_status(*handle, index, status);
|
||||
} else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
|
||||
result = acpi_get_event_status(index - num_gpes, status);
|
||||
|
||||
end:
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -2056,12 +2056,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
|
||||
* supported by this firmware loading method. This check has been
|
||||
* put in place to ensure correct forward compatibility options
|
||||
* when newer hardware variants come along.
|
||||
/* At the moment the iBT 3.0 hardware variants 0x0b (LnP/SfP)
|
||||
* and 0x0c (WsP) are supported by this firmware loading method.
|
||||
*
|
||||
* This check has been put in place to ensure correct forward
|
||||
* compatibility options when newer hardware variants come along.
|
||||
*/
|
||||
if (ver->hw_variant != 0x0b) {
|
||||
if (ver->hw_variant != 0x0b && ver->hw_variant != 0x0c) {
|
||||
BT_ERR("%s: Unsupported Intel hardware variant (%u)",
|
||||
hdev->name, ver->hw_variant);
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -1234,8 +1234,7 @@ static int intel_probe(struct platform_device *pdev)
|
|||
|
||||
idev->pdev = pdev;
|
||||
|
||||
idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset",
|
||||
GPIOD_OUT_LOW);
|
||||
idev->reset = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
|
||||
if (IS_ERR(idev->reset)) {
|
||||
dev_err(&pdev->dev, "Unable to retrieve gpio\n");
|
||||
return PTR_ERR(idev->reset);
|
||||
|
@ -1247,8 +1246,7 @@ static int intel_probe(struct platform_device *pdev)
|
|||
|
||||
dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n");
|
||||
|
||||
host_wake = devm_gpiod_get_optional(&pdev->dev, "host-wake",
|
||||
GPIOD_IN);
|
||||
host_wake = devm_gpiod_get(&pdev->dev, "host-wake", GPIOD_IN);
|
||||
if (IS_ERR(host_wake)) {
|
||||
dev_err(&pdev->dev, "Unable to retrieve IRQ\n");
|
||||
goto no_irq;
|
||||
|
|
|
@ -118,6 +118,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct exynos_rng *exynos_rng;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
|
||||
GFP_KERNEL);
|
||||
|
@ -145,7 +146,13 @@ static int exynos_rng_probe(struct platform_device *pdev)
|
|||
pm_runtime_use_autosuspend(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
return devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
|
||||
ret = devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
|
||||
if (ret) {
|
||||
pm_runtime_dont_use_autosuspend(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
|
|
@ -948,6 +948,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
|
|||
/* award one bit for the contents of the fast pool */
|
||||
credit_entropy_bits(r, credit + 1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(add_interrupt_randomness);
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
void add_disk_randomness(struct gendisk *disk)
|
||||
|
@ -1460,12 +1461,16 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
|
|||
static ssize_t
|
||||
urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
|
||||
{
|
||||
static int maxwarn = 10;
|
||||
int ret;
|
||||
|
||||
if (unlikely(nonblocking_pool.initialized == 0))
|
||||
printk_once(KERN_NOTICE "random: %s urandom read "
|
||||
"with %d bits of entropy available\n",
|
||||
current->comm, nonblocking_pool.entropy_total);
|
||||
if (unlikely(nonblocking_pool.initialized == 0) &&
|
||||
maxwarn > 0) {
|
||||
maxwarn--;
|
||||
printk(KERN_NOTICE "random: %s: uninitialized urandom read "
|
||||
"(%zd bytes read, %d bits of entropy available)\n",
|
||||
current->comm, nbytes, nonblocking_pool.entropy_total);
|
||||
}
|
||||
|
||||
nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
|
||||
ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
|
||||
|
@ -1869,12 +1874,18 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
|
|||
{
|
||||
struct entropy_store *poolp = &input_pool;
|
||||
|
||||
/* Suspend writing if we're above the trickle threshold.
|
||||
* We'll be woken up again once below random_write_wakeup_thresh,
|
||||
* or when the calling thread is about to terminate.
|
||||
*/
|
||||
wait_event_interruptible(random_write_wait, kthread_should_stop() ||
|
||||
if (unlikely(nonblocking_pool.initialized == 0))
|
||||
poolp = &nonblocking_pool;
|
||||
else {
|
||||
/* Suspend writing if we're above the trickle
|
||||
* threshold. We'll be woken up again once below
|
||||
* random_write_wakeup_thresh, or when the calling
|
||||
* thread is about to terminate.
|
||||
*/
|
||||
wait_event_interruptible(random_write_wait,
|
||||
kthread_should_stop() ||
|
||||
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
|
||||
}
|
||||
mix_pool_bytes(poolp, buffer, count);
|
||||
credit_entropy_bits(poolp, entropy);
|
||||
}
|
||||
|
|
|
@ -351,7 +351,8 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||
/* Set new divider */
|
||||
data = xgene_clk_read(pclk->param.divider_reg +
|
||||
pclk->param.reg_divider_offset);
|
||||
data &= ~((1 << pclk->param.reg_divider_width) - 1);
|
||||
data &= ~((1 << pclk->param.reg_divider_width) - 1)
|
||||
<< pclk->param.reg_divider_shift;
|
||||
data |= divider;
|
||||
xgene_clk_write(data, pclk->param.divider_reg +
|
||||
pclk->param.reg_divider_offset);
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
|
||||
static DEFINE_MUTEX(userspace_mutex);
|
||||
|
@ -31,6 +32,7 @@ static DEFINE_MUTEX(userspace_mutex);
|
|||
static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
unsigned int *setspeed = policy->governor_data;
|
||||
|
||||
pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
|
||||
|
||||
|
@ -38,6 +40,8 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
|
|||
if (!per_cpu(cpu_is_managed, policy->cpu))
|
||||
goto err;
|
||||
|
||||
*setspeed = freq;
|
||||
|
||||
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
|
||||
err:
|
||||
mutex_unlock(&userspace_mutex);
|
||||
|
@ -49,19 +53,45 @@ static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
|
|||
return sprintf(buf, "%u\n", policy->cur);
|
||||
}
|
||||
|
||||
static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int *setspeed;
|
||||
|
||||
setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
|
||||
if (!setspeed)
|
||||
return -ENOMEM;
|
||||
|
||||
policy->governor_data = setspeed;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
|
||||
unsigned int event)
|
||||
{
|
||||
unsigned int *setspeed = policy->governor_data;
|
||||
unsigned int cpu = policy->cpu;
|
||||
int rc = 0;
|
||||
|
||||
if (event == CPUFREQ_GOV_POLICY_INIT)
|
||||
return cpufreq_userspace_policy_init(policy);
|
||||
|
||||
if (!setspeed)
|
||||
return -EINVAL;
|
||||
|
||||
switch (event) {
|
||||
case CPUFREQ_GOV_POLICY_EXIT:
|
||||
mutex_lock(&userspace_mutex);
|
||||
policy->governor_data = NULL;
|
||||
kfree(setspeed);
|
||||
mutex_unlock(&userspace_mutex);
|
||||
break;
|
||||
case CPUFREQ_GOV_START:
|
||||
BUG_ON(!policy->cur);
|
||||
pr_debug("started managing cpu %u\n", cpu);
|
||||
|
||||
mutex_lock(&userspace_mutex);
|
||||
per_cpu(cpu_is_managed, cpu) = 1;
|
||||
*setspeed = policy->cur;
|
||||
mutex_unlock(&userspace_mutex);
|
||||
break;
|
||||
case CPUFREQ_GOV_STOP:
|
||||
|
@ -69,20 +99,23 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
|
|||
|
||||
mutex_lock(&userspace_mutex);
|
||||
per_cpu(cpu_is_managed, cpu) = 0;
|
||||
*setspeed = 0;
|
||||
mutex_unlock(&userspace_mutex);
|
||||
break;
|
||||
case CPUFREQ_GOV_LIMITS:
|
||||
mutex_lock(&userspace_mutex);
|
||||
pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
|
||||
cpu, policy->min, policy->max,
|
||||
policy->cur);
|
||||
pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
|
||||
cpu, policy->min, policy->max, policy->cur, *setspeed);
|
||||
|
||||
if (policy->max < policy->cur)
|
||||
if (policy->max < *setspeed)
|
||||
__cpufreq_driver_target(policy, policy->max,
|
||||
CPUFREQ_RELATION_H);
|
||||
else if (policy->min > policy->cur)
|
||||
else if (policy->min > *setspeed)
|
||||
__cpufreq_driver_target(policy, policy->min,
|
||||
CPUFREQ_RELATION_L);
|
||||
else
|
||||
__cpufreq_driver_target(policy, *setspeed,
|
||||
CPUFREQ_RELATION_L);
|
||||
mutex_unlock(&userspace_mutex);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -662,7 +662,7 @@ static int core_get_max_pstate(void)
|
|||
if (err)
|
||||
goto skip_tar;
|
||||
|
||||
tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
|
||||
tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
|
||||
err = rdmsrl_safe(tdp_msr, &tdp_ratio);
|
||||
if (err)
|
||||
goto skip_tar;
|
||||
|
|
|
@ -441,6 +441,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
|
|||
OP_ALG_AAI_CTR_MOD128);
|
||||
const bool is_rfc3686 = alg->caam.rfc3686;
|
||||
|
||||
if (!ctx->authsize)
|
||||
return 0;
|
||||
|
||||
/* NULL encryption / decryption */
|
||||
if (!ctx->enckeylen)
|
||||
return aead_null_set_sh_desc(aead);
|
||||
|
@ -553,7 +556,10 @@ skip_enc:
|
|||
|
||||
/* Read and write assoclen bytes */
|
||||
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
|
||||
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
|
||||
if (alg->caam.geniv)
|
||||
append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
|
||||
else
|
||||
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
|
||||
|
||||
/* Skip assoc data */
|
||||
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
|
||||
|
@ -562,6 +568,14 @@ skip_enc:
|
|||
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
|
||||
KEY_VLF);
|
||||
|
||||
if (alg->caam.geniv) {
|
||||
append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT |
|
||||
(ctx1_iv_off << LDST_OFFSET_SHIFT));
|
||||
append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
|
||||
(ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
|
||||
}
|
||||
|
||||
/* Load Counter into CONTEXT1 reg */
|
||||
if (is_rfc3686)
|
||||
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
|
||||
|
@ -614,7 +628,7 @@ skip_enc:
|
|||
keys_fit_inline = true;
|
||||
|
||||
/* aead_givencrypt shared descriptor */
|
||||
desc = ctx->sh_desc_givenc;
|
||||
desc = ctx->sh_desc_enc;
|
||||
|
||||
/* Note: Context registers are saved. */
|
||||
init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
|
||||
|
@ -645,13 +659,13 @@ copy_iv:
|
|||
append_operation(desc, ctx->class2_alg_type |
|
||||
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
|
||||
|
||||
/* ivsize + cryptlen = seqoutlen - authsize */
|
||||
append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
|
||||
|
||||
/* Read and write assoclen bytes */
|
||||
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
|
||||
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
|
||||
|
||||
/* ivsize + cryptlen = seqoutlen - authsize */
|
||||
append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
|
||||
|
||||
/* Skip assoc data */
|
||||
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
|
||||
|
||||
|
@ -697,7 +711,7 @@ copy_iv:
|
|||
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
|
||||
desc_bytes(desc),
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
|
||||
if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
|
||||
dev_err(jrdev, "unable to map shared descriptor\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -2147,7 +2161,7 @@ static void init_authenc_job(struct aead_request *req,
|
|||
|
||||
init_aead_job(req, edesc, all_contig, encrypt);
|
||||
|
||||
if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
|
||||
if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
|
||||
append_load_as_imm(desc, req->iv, ivsize,
|
||||
LDST_CLASS_1_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT |
|
||||
|
@ -2534,20 +2548,6 @@ static int aead_decrypt(struct aead_request *req)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int aead_givdecrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
unsigned int ivsize = crypto_aead_ivsize(aead);
|
||||
|
||||
if (req->cryptlen < ivsize)
|
||||
return -EINVAL;
|
||||
|
||||
req->cryptlen -= ivsize;
|
||||
req->assoclen += ivsize;
|
||||
|
||||
return aead_decrypt(req);
|
||||
}
|
||||
|
||||
/*
|
||||
* allocate and map the ablkcipher extended descriptor for ablkcipher
|
||||
*/
|
||||
|
@ -3207,7 +3207,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3253,7 +3253,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3299,7 +3299,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA224_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3345,7 +3345,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA256_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3391,7 +3391,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA384_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3437,7 +3437,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA512_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3483,7 +3483,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3531,7 +3531,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3579,7 +3579,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA224_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3627,7 +3627,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA256_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3675,7 +3675,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA384_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3723,7 +3723,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.maxauthsize = SHA512_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3769,7 +3769,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3815,7 +3815,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3861,7 +3861,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA224_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3907,7 +3907,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA256_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3953,7 +3953,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA384_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -3999,7 +3999,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.maxauthsize = SHA512_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -4048,7 +4048,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = CTR_RFC3686_IV_SIZE,
|
||||
.maxauthsize = MD5_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -4099,7 +4099,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = CTR_RFC3686_IV_SIZE,
|
||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -4150,7 +4150,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = CTR_RFC3686_IV_SIZE,
|
||||
.maxauthsize = SHA224_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -4201,7 +4201,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = CTR_RFC3686_IV_SIZE,
|
||||
.maxauthsize = SHA256_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -4252,7 +4252,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = CTR_RFC3686_IV_SIZE,
|
||||
.maxauthsize = SHA384_DIGEST_SIZE,
|
||||
},
|
||||
|
@ -4303,7 +4303,7 @@ static struct caam_aead_alg driver_aeads[] = {
|
|||
.setkey = aead_setkey,
|
||||
.setauthsize = aead_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_givdecrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = CTR_RFC3686_IV_SIZE,
|
||||
.maxauthsize = SHA512_DIGEST_SIZE,
|
||||
},
|
||||
|
|
|
@ -1873,6 +1873,7 @@ caam_hash_alloc(struct caam_hash_template *template,
|
|||
template->name);
|
||||
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
|
||||
template->driver_name);
|
||||
t_alg->ahash_alg.setkey = NULL;
|
||||
}
|
||||
alg->cra_module = THIS_MODULE;
|
||||
alg->cra_init = caam_hash_cra_init;
|
||||
|
|
|
@ -442,6 +442,14 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
|
|||
(unsigned int)ccw,
|
||||
(unsigned int)be32_to_cpu(crb->ccw));
|
||||
|
||||
/*
|
||||
* NX842 coprocessor sets 3rd bit in CR register with XER[S0].
|
||||
* XER[S0] is the integer summary overflow bit which is nothing
|
||||
* to do NX. Since this bit can be set with other return values,
|
||||
* mask this bit.
|
||||
*/
|
||||
ret &= ~ICSWX_XERS0;
|
||||
|
||||
switch (ret) {
|
||||
case ICSWX_INITIATED:
|
||||
ret = wait_for_csb(wmem, csb);
|
||||
|
@ -454,10 +462,6 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
|
|||
pr_err_ratelimited("ICSWX rejected\n");
|
||||
ret = -EPROTO;
|
||||
break;
|
||||
default:
|
||||
pr_err_ratelimited("Invalid ICSWX return code %x\n", ret);
|
||||
ret = -EPROTO;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
|
|
|
@ -392,7 +392,7 @@ static void nx_of_update_msc(struct device *dev,
|
|||
((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
|
||||
i < msc->triplets;
|
||||
i++) {
|
||||
if (msc->fc > NX_MAX_FC || msc->mode > NX_MAX_MODE) {
|
||||
if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) {
|
||||
dev_err(dev, "unknown function code/mode "
|
||||
"combo: %d/%d (ignored)\n", msc->fc,
|
||||
msc->mode);
|
||||
|
|
|
@ -1262,8 +1262,8 @@ static struct crypto_alg qat_algs[] = { {
|
|||
.setkey = qat_alg_ablkcipher_xts_setkey,
|
||||
.decrypt = qat_alg_ablkcipher_decrypt,
|
||||
.encrypt = qat_alg_ablkcipher_encrypt,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -191,7 +191,7 @@ struct crypto_alg p8_aes_cbc_alg = {
|
|||
.cra_init = p8_aes_cbc_init,
|
||||
.cra_exit = p8_aes_cbc_exit,
|
||||
.cra_blkcipher = {
|
||||
.ivsize = 0,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = p8_aes_cbc_setkey,
|
||||
|
|
|
@ -175,7 +175,7 @@ struct crypto_alg p8_aes_ctr_alg = {
|
|||
.cra_init = p8_aes_ctr_init,
|
||||
.cra_exit = p8_aes_ctr_exit,
|
||||
.cra_blkcipher = {
|
||||
.ivsize = 0,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = p8_aes_ctr_setkey,
|
||||
|
|
|
@ -139,6 +139,26 @@ my $vmr = sub {
|
|||
" vor $vx,$vy,$vy";
|
||||
};
|
||||
|
||||
# Some ABIs specify vrsave, special-purpose register #256, as reserved
|
||||
# for system use.
|
||||
my $no_vrsave = ($flavour =~ /linux-ppc64le/);
|
||||
my $mtspr = sub {
|
||||
my ($f,$idx,$ra) = @_;
|
||||
if ($idx == 256 && $no_vrsave) {
|
||||
" or $ra,$ra,$ra";
|
||||
} else {
|
||||
" mtspr $idx,$ra";
|
||||
}
|
||||
};
|
||||
my $mfspr = sub {
|
||||
my ($f,$rd,$idx) = @_;
|
||||
if ($idx == 256 && $no_vrsave) {
|
||||
" li $rd,-1";
|
||||
} else {
|
||||
" mfspr $rd,$idx";
|
||||
}
|
||||
};
|
||||
|
||||
# PowerISA 2.06 stuff
|
||||
sub vsxmem_op {
|
||||
my ($f, $vrt, $ra, $rb, $op) = @_;
|
||||
|
|
|
@ -600,27 +600,30 @@ static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
|
|||
{
|
||||
struct usb_dmac_chan *chan = dev;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
u32 mask = USB_DMACHCR_TE;
|
||||
u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP;
|
||||
u32 mask = 0;
|
||||
u32 chcr;
|
||||
bool xfer_end = false;
|
||||
|
||||
spin_lock(&chan->vc.lock);
|
||||
|
||||
chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
|
||||
if (chcr & check_bits)
|
||||
mask |= USB_DMACHCR_DE | check_bits;
|
||||
if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) {
|
||||
mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP;
|
||||
if (chcr & USB_DMACHCR_DE)
|
||||
xfer_end = true;
|
||||
ret |= IRQ_HANDLED;
|
||||
}
|
||||
if (chcr & USB_DMACHCR_NULL) {
|
||||
/* An interruption of TE will happen after we set FTE */
|
||||
mask |= USB_DMACHCR_NULL;
|
||||
chcr |= USB_DMACHCR_FTE;
|
||||
ret |= IRQ_HANDLED;
|
||||
}
|
||||
usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
|
||||
if (mask)
|
||||
usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
|
||||
|
||||
if (chcr & check_bits) {
|
||||
if (xfer_end)
|
||||
usb_dmac_isr_transfer_end(chan);
|
||||
ret |= IRQ_HANDLED;
|
||||
}
|
||||
|
||||
spin_unlock(&chan->vc.lock);
|
||||
|
||||
|
|
|
@ -966,7 +966,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci,
|
|||
mci->ue_mc += count;
|
||||
|
||||
if (!enable_per_layer_report) {
|
||||
mci->ce_noinfo_count += count;
|
||||
mci->ue_noinfo_count += count;
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -313,7 +313,6 @@ static struct device_type csrow_attr_type = {
|
|||
* possible dynamic channel DIMM Label attribute files
|
||||
*
|
||||
*/
|
||||
|
||||
DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
|
||||
channel_dimm_label_show, channel_dimm_label_store, 0);
|
||||
DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
|
||||
|
@ -326,6 +325,10 @@ DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
|
|||
channel_dimm_label_show, channel_dimm_label_store, 4);
|
||||
DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
|
||||
channel_dimm_label_show, channel_dimm_label_store, 5);
|
||||
DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR,
|
||||
channel_dimm_label_show, channel_dimm_label_store, 6);
|
||||
DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR,
|
||||
channel_dimm_label_show, channel_dimm_label_store, 7);
|
||||
|
||||
/* Total possible dynamic DIMM Label attribute file table */
|
||||
static struct attribute *dynamic_csrow_dimm_attr[] = {
|
||||
|
@ -335,6 +338,8 @@ static struct attribute *dynamic_csrow_dimm_attr[] = {
|
|||
&dev_attr_legacy_ch3_dimm_label.attr.attr,
|
||||
&dev_attr_legacy_ch4_dimm_label.attr.attr,
|
||||
&dev_attr_legacy_ch5_dimm_label.attr.attr,
|
||||
&dev_attr_legacy_ch6_dimm_label.attr.attr,
|
||||
&dev_attr_legacy_ch7_dimm_label.attr.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -351,6 +356,10 @@ DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
|
|||
channel_ce_count_show, NULL, 4);
|
||||
DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
|
||||
channel_ce_count_show, NULL, 5);
|
||||
DEVICE_CHANNEL(ch6_ce_count, S_IRUGO,
|
||||
channel_ce_count_show, NULL, 6);
|
||||
DEVICE_CHANNEL(ch7_ce_count, S_IRUGO,
|
||||
channel_ce_count_show, NULL, 7);
|
||||
|
||||
/* Total possible dynamic ce_count attribute file table */
|
||||
static struct attribute *dynamic_csrow_ce_count_attr[] = {
|
||||
|
@ -360,6 +369,8 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = {
|
|||
&dev_attr_legacy_ch3_ce_count.attr.attr,
|
||||
&dev_attr_legacy_ch4_ce_count.attr.attr,
|
||||
&dev_attr_legacy_ch5_ce_count.attr.attr,
|
||||
&dev_attr_legacy_ch6_ce_count.attr.attr,
|
||||
&dev_attr_legacy_ch7_ce_count.attr.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -371,9 +382,16 @@ static umode_t csrow_dev_is_visible(struct kobject *kobj,
|
|||
|
||||
if (idx >= csrow->nr_channels)
|
||||
return 0;
|
||||
|
||||
if (idx >= ARRAY_SIZE(dynamic_csrow_ce_count_attr) - 1) {
|
||||
WARN_ONCE(1, "idx: %d\n", idx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Only expose populated DIMMs */
|
||||
if (!csrow->channels[idx]->dimm->nr_pages)
|
||||
return 0;
|
||||
|
||||
return attr->mode;
|
||||
}
|
||||
|
||||
|
|
|
@ -50,6 +50,7 @@ config GPIO_DEVRES
|
|||
config OF_GPIO
|
||||
def_bool y
|
||||
depends on OF
|
||||
depends on HAS_IOMEM
|
||||
|
||||
config GPIO_ACPI
|
||||
def_bool y
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
* Moorestown platform Langwell chip.
|
||||
* Medfield platform Penwell chip.
|
||||
* Clovertrail platform Cloverview chip.
|
||||
* Merrifield platform Tangier chip.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
@ -64,10 +63,6 @@ enum GPIO_REG {
|
|||
/* intel_mid gpio driver data */
|
||||
struct intel_mid_gpio_ddata {
|
||||
u16 ngpio; /* number of gpio pins */
|
||||
u32 gplr_offset; /* offset of first GPLR register from base */
|
||||
u32 flis_base; /* base address of FLIS registers */
|
||||
u32 flis_len; /* length of FLIS registers */
|
||||
u32 (*get_flis_offset)(int gpio);
|
||||
u32 chip_irq_type; /* chip interrupt type */
|
||||
};
|
||||
|
||||
|
@ -257,15 +252,6 @@ static const struct intel_mid_gpio_ddata gpio_cloverview_core = {
|
|||
.chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
|
||||
};
|
||||
|
||||
static const struct intel_mid_gpio_ddata gpio_tangier = {
|
||||
.ngpio = 192,
|
||||
.gplr_offset = 4,
|
||||
.flis_base = 0xff0c0000,
|
||||
.flis_len = 0x8000,
|
||||
.get_flis_offset = NULL,
|
||||
.chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
|
||||
};
|
||||
|
||||
static const struct pci_device_id intel_gpio_ids[] = {
|
||||
{
|
||||
/* Lincroft */
|
||||
|
@ -292,11 +278,6 @@ static const struct pci_device_id intel_gpio_ids[] = {
|
|||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7),
|
||||
.driver_data = (kernel_ulong_t)&gpio_cloverview_core,
|
||||
},
|
||||
{
|
||||
/* Tangier */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1199),
|
||||
.driver_data = (kernel_ulong_t)&gpio_tangier,
|
||||
},
|
||||
{ 0 }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, intel_gpio_ids);
|
||||
|
|
|
@ -86,7 +86,7 @@ MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids);
|
|||
#define MAX_BANK 5
|
||||
#define BANK_SZ 8
|
||||
|
||||
#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ)
|
||||
#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ)
|
||||
|
||||
struct pca953x_chip {
|
||||
unsigned gpio_start;
|
||||
|
|
|
@ -710,9 +710,9 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
|
|||
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
|
||||
int amdgpu_gart_init(struct amdgpu_device *adev);
|
||||
void amdgpu_gart_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
|
||||
void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
||||
int pages);
|
||||
int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
|
||||
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
|
||||
int pages, struct page **pagelist,
|
||||
dma_addr_t *dma_addr, uint32_t flags);
|
||||
|
||||
|
|
|
@ -331,6 +331,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
|
|||
(le16_to_cpu(path->usConnObjectId) &
|
||||
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
|
||||
|
||||
/* Skip TV/CV support */
|
||||
if ((le16_to_cpu(path->usDeviceTag) ==
|
||||
ATOM_DEVICE_TV1_SUPPORT) ||
|
||||
(le16_to_cpu(path->usDeviceTag) ==
|
||||
ATOM_DEVICE_CV_SUPPORT))
|
||||
continue;
|
||||
|
||||
if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
|
||||
DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
|
||||
con_obj_id, le16_to_cpu(path->usDeviceTag));
|
||||
continue;
|
||||
}
|
||||
|
||||
connector_type =
|
||||
object_connector_convert[con_obj_id];
|
||||
connector_object_id = con_obj_id;
|
||||
|
@ -566,28 +579,19 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
|
|||
le16_to_cpu(firmware_info->info.usReferenceClock);
|
||||
ppll->reference_div = 0;
|
||||
|
||||
if (crev < 2)
|
||||
ppll->pll_out_min =
|
||||
le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
|
||||
else
|
||||
ppll->pll_out_min =
|
||||
le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
|
||||
ppll->pll_out_min =
|
||||
le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
|
||||
ppll->pll_out_max =
|
||||
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
|
||||
|
||||
if (crev >= 4) {
|
||||
ppll->lcd_pll_out_min =
|
||||
le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
|
||||
if (ppll->lcd_pll_out_min == 0)
|
||||
ppll->lcd_pll_out_min = ppll->pll_out_min;
|
||||
ppll->lcd_pll_out_max =
|
||||
le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
|
||||
if (ppll->lcd_pll_out_max == 0)
|
||||
ppll->lcd_pll_out_max = ppll->pll_out_max;
|
||||
} else {
|
||||
ppll->lcd_pll_out_min =
|
||||
le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
|
||||
if (ppll->lcd_pll_out_min == 0)
|
||||
ppll->lcd_pll_out_min = ppll->pll_out_min;
|
||||
ppll->lcd_pll_out_max =
|
||||
le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
|
||||
if (ppll->lcd_pll_out_max == 0)
|
||||
ppll->lcd_pll_out_max = ppll->pll_out_max;
|
||||
}
|
||||
|
||||
if (ppll->pll_out_min == 0)
|
||||
ppll->pll_out_min = 64800;
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "amdgpu_acpi.h"
|
||||
|
||||
|
@ -256,6 +257,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state)
|
|||
if (!info)
|
||||
return -EIO;
|
||||
kfree(info);
|
||||
|
||||
/* 200ms delay is required after off */
|
||||
if (state == 0)
|
||||
msleep(200);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1690,7 +1690,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
|||
DRM_MODE_SCALE_NONE);
|
||||
/* no HPD on analog connectors */
|
||||
amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
||||
connector->interlace_allowed = true;
|
||||
connector->doublescan_allowed = true;
|
||||
break;
|
||||
|
@ -1893,8 +1892,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
|
||||
if (i2c_bus->valid)
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
||||
if (i2c_bus->valid) {
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
|
||||
DRM_CONNECTOR_POLL_DISCONNECT;
|
||||
}
|
||||
} else
|
||||
connector->polled = DRM_CONNECTOR_POLL_HPD;
|
||||
|
||||
|
|
|
@ -1793,7 +1793,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
|
|||
}
|
||||
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
|
||||
/*
|
||||
* Most of the connector probing functions try to acquire runtime pm
|
||||
* refs to ensure that the GPU is powered on when connector polling is
|
||||
* performed. Since we're calling this from a runtime PM callback,
|
||||
* trying to acquire rpm refs will cause us to deadlock.
|
||||
*
|
||||
* Since we're guaranteed to be holding the rpm lock, it's safe to
|
||||
* temporarily disable the rpm helpers so this doesn't deadlock us.
|
||||
*/
|
||||
#ifdef CONFIG_PM
|
||||
dev->dev->power.disable_depth++;
|
||||
#endif
|
||||
drm_helper_hpd_irq_event(dev);
|
||||
#ifdef CONFIG_PM
|
||||
dev->dev->power.disable_depth--;
|
||||
#endif
|
||||
|
||||
if (fbcon) {
|
||||
amdgpu_fbdev_set_suspend(adev, 0);
|
||||
|
|
|
@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
|
|||
* Unbinds the requested pages from the gart page table and
|
||||
* replaces them with the dummy page (all asics).
|
||||
*/
|
||||
void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
|
||||
void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
||||
int pages)
|
||||
{
|
||||
unsigned t;
|
||||
|
@ -269,7 +269,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
|
|||
* (all asics).
|
||||
* Returns 0 for success, -EINVAL for failure.
|
||||
*/
|
||||
int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
|
||||
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
|
||||
int pages, struct page **pagelist, dma_addr_t *dma_addr,
|
||||
uint32_t flags)
|
||||
{
|
||||
|
|
|
@ -288,7 +288,7 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
|
|||
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned i;
|
||||
int r;
|
||||
int r, ret = 0;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
|
@ -309,10 +309,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
|
|||
} else {
|
||||
/* still not good, but we can live with it */
|
||||
DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
|
||||
ret = r;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -233,8 +233,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
|||
|
||||
adev = amdgpu_get_adev(bo->bdev);
|
||||
ring = adev->mman.buffer_funcs_ring;
|
||||
old_start = old_mem->start << PAGE_SHIFT;
|
||||
new_start = new_mem->start << PAGE_SHIFT;
|
||||
old_start = (u64)old_mem->start << PAGE_SHIFT;
|
||||
new_start = (u64)new_mem->start << PAGE_SHIFT;
|
||||
|
||||
switch (old_mem->mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
|
|
|
@ -243,7 +243,7 @@ static void amdgpu_atombios_dp_get_adjust_train(const u8 link_status[DP_LINK_STA
|
|||
|
||||
/* convert bits per color to bits per pixel */
|
||||
/* get bpc from the EDID */
|
||||
static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
|
||||
static unsigned amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
|
||||
{
|
||||
if (bpc == 0)
|
||||
return 24;
|
||||
|
@ -251,64 +251,32 @@ static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
|
|||
return bpc * 3;
|
||||
}
|
||||
|
||||
/* get the max pix clock supported by the link rate and lane num */
|
||||
static int amdgpu_atombios_dp_get_max_dp_pix_clock(int link_rate,
|
||||
int lane_num,
|
||||
int bpp)
|
||||
{
|
||||
return (link_rate * lane_num * 8) / bpp;
|
||||
}
|
||||
|
||||
/***** amdgpu specific DP functions *****/
|
||||
|
||||
/* First get the min lane# when low rate is used according to pixel clock
|
||||
* (prefer low rate), second check max lane# supported by DP panel,
|
||||
* if the max lane# < low rate lane# then use max lane# instead.
|
||||
*/
|
||||
static int amdgpu_atombios_dp_get_dp_lane_number(struct drm_connector *connector,
|
||||
static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector,
|
||||
const u8 dpcd[DP_DPCD_SIZE],
|
||||
int pix_clock)
|
||||
unsigned pix_clock,
|
||||
unsigned *dp_lanes, unsigned *dp_rate)
|
||||
{
|
||||
int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
|
||||
int max_link_rate = drm_dp_max_link_rate(dpcd);
|
||||
int max_lane_num = drm_dp_max_lane_count(dpcd);
|
||||
int lane_num;
|
||||
int max_dp_pix_clock;
|
||||
unsigned bpp =
|
||||
amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
|
||||
static const unsigned link_rates[3] = { 162000, 270000, 540000 };
|
||||
unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
|
||||
unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
|
||||
unsigned lane_num, i, max_pix_clock;
|
||||
|
||||
for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
|
||||
max_dp_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
|
||||
if (pix_clock <= max_dp_pix_clock)
|
||||
break;
|
||||
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
|
||||
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
|
||||
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
|
||||
if (max_pix_clock >= pix_clock) {
|
||||
*dp_lanes = lane_num;
|
||||
*dp_rate = link_rates[i];
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return lane_num;
|
||||
}
|
||||
|
||||
static int amdgpu_atombios_dp_get_dp_link_clock(struct drm_connector *connector,
|
||||
const u8 dpcd[DP_DPCD_SIZE],
|
||||
int pix_clock)
|
||||
{
|
||||
int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
|
||||
int lane_num, max_pix_clock;
|
||||
|
||||
if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
|
||||
ENCODER_OBJECT_ID_NUTMEG)
|
||||
return 270000;
|
||||
|
||||
lane_num = amdgpu_atombios_dp_get_dp_lane_number(connector, dpcd, pix_clock);
|
||||
max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(162000, lane_num, bpp);
|
||||
if (pix_clock <= max_pix_clock)
|
||||
return 162000;
|
||||
max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(270000, lane_num, bpp);
|
||||
if (pix_clock <= max_pix_clock)
|
||||
return 270000;
|
||||
if (amdgpu_connector_is_dp12_capable(connector)) {
|
||||
max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(540000, lane_num, bpp);
|
||||
if (pix_clock <= max_pix_clock)
|
||||
return 540000;
|
||||
}
|
||||
|
||||
return drm_dp_max_link_rate(dpcd);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
|
||||
|
@ -422,6 +390,7 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
|
|||
{
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
struct amdgpu_connector_atom_dig *dig_connector;
|
||||
int ret;
|
||||
|
||||
if (!amdgpu_connector->con_priv)
|
||||
return;
|
||||
|
@ -429,10 +398,14 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
|
|||
|
||||
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
|
||||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
|
||||
dig_connector->dp_clock =
|
||||
amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
|
||||
dig_connector->dp_lane_count =
|
||||
amdgpu_atombios_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
|
||||
ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
|
||||
mode->clock,
|
||||
&dig_connector->dp_lane_count,
|
||||
&dig_connector->dp_clock);
|
||||
if (ret) {
|
||||
dig_connector->dp_clock = 0;
|
||||
dig_connector->dp_lane_count = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -441,14 +414,17 @@ int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
|
|||
{
|
||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||
struct amdgpu_connector_atom_dig *dig_connector;
|
||||
int dp_clock;
|
||||
unsigned dp_lanes, dp_clock;
|
||||
int ret;
|
||||
|
||||
if (!amdgpu_connector->con_priv)
|
||||
return MODE_CLOCK_HIGH;
|
||||
dig_connector = amdgpu_connector->con_priv;
|
||||
|
||||
dp_clock =
|
||||
amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
|
||||
ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
|
||||
mode->clock, &dp_lanes, &dp_clock);
|
||||
if (ret)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
if ((dp_clock == 540000) &&
|
||||
(!amdgpu_connector_is_dp12_capable(connector)))
|
||||
|
|
|
@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
|
|||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
|
||||
if (dig->backlight_level == 0)
|
||||
amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
|
||||
ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
|
||||
|
|
|
@ -52,6 +52,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
|
|||
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
|
||||
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
|
||||
static int cik_sdma_soft_reset(void *handle);
|
||||
|
||||
MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
|
||||
MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
|
||||
|
@ -1030,6 +1031,8 @@ static int cik_sdma_resume(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
cik_sdma_soft_reset(handle);
|
||||
|
||||
return cik_sdma_hw_init(adev);
|
||||
}
|
||||
|
||||
|
|
|
@ -1955,10 +1955,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
|
|||
}
|
||||
} else { /*pi->caps_vce_pg*/
|
||||
cz_update_vce_dpm(adev);
|
||||
cz_enable_vce_dpm(adev, true);
|
||||
cz_enable_vce_dpm(adev, !gate);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs cz_dpm_ip_funcs = {
|
||||
|
|
|
@ -167,6 +167,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
|
|||
break;
|
||||
case CHIP_KAVERI:
|
||||
case CHIP_KABINI:
|
||||
case CHIP_MULLINS:
|
||||
return 0;
|
||||
default: BUG();
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue