From 6c2d6ce4aaf766c1fd0f57a5ebca8c03c82b80fa Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 19 Nov 2015 15:49:23 +0100 Subject: [PATCH 01/78] ARM: 8458/1: bL_switcher: add GIC dependency It is not possible to build the bL_switcher code if the GIC driver is disabled, because it relies on calling into some gic specific interfaces, and that would result in this build error: arch/arm/common/built-in.o: In function `bL_switch_to': :(.text+0x1230): undefined reference to `gic_get_sgir_physaddr' :(.text+0x1244): undefined reference to `gic_send_sgi' :(.text+0x1268): undefined reference to `gic_migrate_target' arch/arm/common/built-in.o: In function `bL_switcher_enable.part.4': :(.text.unlikely+0x2f8): undefined reference to `gic_get_cpu_id' This adds a Kconfig dependency to ensure we only build the big-little switcher if the GIC driver is present as well. Almost all ARMv7 platforms come with a GIC anyway, but it is possible to build a kernel that disables all platforms. Signed-off-by: Arnd Bergmann Acked-by: Nicolas Pitre Signed-off-by: Russell King (cherry picked from commit 6c044fecdf78be3fda159a5036bb33700cdd5e59) Signed-off-by: Alex Shi --- arch/arm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 34e1569a11ee..ceee97090051 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1422,7 +1422,7 @@ config BIG_LITTLE config BL_SWITCHER bool "big.LITTLE switcher support" - depends on BIG_LITTLE && MCPM && HOTPLUG_CPU + depends on BIG_LITTLE && MCPM && HOTPLUG_CPU && ARM_GIC select ARM_CPU_SUSPEND select CPU_PM help From c6cdbc5e461b146a5723ad54d079c198ce6beb45 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Mon, 1 Feb 2016 18:01:29 +0100 Subject: [PATCH 02/78] ARM: 8510/1: rework ARM_CPU_SUSPEND dependencies The code enabled by the ARM_CPU_SUSPEND config option is used by kernel subsystems for purposes that go beyond system suspend so its config entry should be augmented to take more default options into account and avoid forcing its selection to prevent dependencies override. To achieve this goal, this patch reworks the ARM_CPU_SUSPEND config entry and updates its default config value (by adding the BL_SWITCHER option to it) and its dependencies (ARCH_SUSPEND_POSSIBLE), so that the symbol is still selected by default by the subsystems requiring it and at the same time enforcing the dependencies correctly. Signed-off-by: Lorenzo Pieralisi Cc: Nicolas Pitre Signed-off-by: Russell King (cherry picked from commit 1b9bdf5c1661873a10e193b8cbb803a87fe5c4a1) Signed-off-by: Alex Shi --- arch/arm/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ceee97090051..20ae5d27da73 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1423,7 +1423,6 @@ config BIG_LITTLE config BL_SWITCHER bool "big.LITTLE switcher support" depends on BIG_LITTLE && MCPM && HOTPLUG_CPU && ARM_GIC - select ARM_CPU_SUSPEND select CPU_PM help The big.LITTLE "switcher" provides the core functionality to @@ -2140,7 +2139,8 @@ config ARCH_SUSPEND_POSSIBLE def_bool y config ARM_CPU_SUSPEND - def_bool PM_SLEEP + def_bool PM_SLEEP || BL_SWITCHER + depends on ARCH_SUSPEND_POSSIBLE config ARCH_HIBERNATION_POSSIBLE bool From ef2209ec3ee8a3c9238d329d1e6afa9b22766962 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Mon, 4 Jan 2016 15:37:32 +0100 Subject: [PATCH 03/78] ARM: 8478/2: arm/arm64: add arm-smccc Adds helpers to do SMC and HVC based on ARM SMC Calling Convention. CONFIG_HAVE_ARM_SMCCC is enabled for architectures that may support the SMC or HVC instruction. It's the responsibility of the caller to know if the SMC instruction is supported by the platform. This patch doesn't provide an implementation of the declared functions. Later patches will bring in implementations and set CONFIG_HAVE_ARM_SMCCC for ARM and ARM64 respectively. Reviewed-by: Lorenzo Pieralisi Signed-off-by: Jens Wiklander Signed-off-by: Russell King (cherry picked from commit 98dd64f34f47ce19b388d9015f767f48393a81eb) Signed-off-by: Alex Shi --- drivers/firmware/Kconfig | 3 ++ include/linux/arm-smccc.h | 104 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 107 insertions(+) create mode 100644 include/linux/arm-smccc.h diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index cf478fe6b335..49a3a1185bb6 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -173,6 +173,9 @@ config QCOM_SCM_64 def_bool y depends on QCOM_SCM && ARM64 +config HAVE_ARM_SMCCC + bool + source "drivers/firmware/broadcom/Kconfig" source "drivers/firmware/google/Kconfig" source "drivers/firmware/efi/Kconfig" diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h new file mode 100644 index 000000000000..b5abfda80465 --- /dev/null +++ b/include/linux/arm-smccc.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __LINUX_ARM_SMCCC_H +#define __LINUX_ARM_SMCCC_H + +#include +#include + +/* + * This file provides common defines for ARM SMC Calling Convention as + * specified in + * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html + */ + +#define ARM_SMCCC_STD_CALL 0 +#define ARM_SMCCC_FAST_CALL 1 +#define ARM_SMCCC_TYPE_SHIFT 31 + +#define ARM_SMCCC_SMC_32 0 +#define ARM_SMCCC_SMC_64 1 +#define ARM_SMCCC_CALL_CONV_SHIFT 30 + +#define ARM_SMCCC_OWNER_MASK 0x3F +#define ARM_SMCCC_OWNER_SHIFT 24 + +#define ARM_SMCCC_FUNC_MASK 0xFFFF + +#define ARM_SMCCC_IS_FAST_CALL(smc_val) \ + ((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT)) +#define ARM_SMCCC_IS_64(smc_val) \ + ((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT)) +#define ARM_SMCCC_FUNC_NUM(smc_val) ((smc_val) & ARM_SMCCC_FUNC_MASK) +#define ARM_SMCCC_OWNER_NUM(smc_val) \ + (((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK) + +#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \ + (((type) << ARM_SMCCC_TYPE_SHIFT) | \ + ((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \ + (((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \ + ((func_num) & ARM_SMCCC_FUNC_MASK)) + +#define ARM_SMCCC_OWNER_ARCH 0 +#define ARM_SMCCC_OWNER_CPU 1 +#define ARM_SMCCC_OWNER_SIP 2 +#define ARM_SMCCC_OWNER_OEM 3 +#define ARM_SMCCC_OWNER_STANDARD 4 +#define ARM_SMCCC_OWNER_TRUSTED_APP 48 +#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49 +#define ARM_SMCCC_OWNER_TRUSTED_OS 50 +#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 + +/** + * struct arm_smccc_res - Result from SMC/HVC call + * @a0-a3 result values from registers 0 to 3 + */ +struct arm_smccc_res { + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; +}; + +/** + * arm_smccc_smc() - make SMC calls + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This function is used to make SMC calls following SMC Calling Convention. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the SMC instruction. The return values are updated with the content + * from register 0 to 3 on return from the SMC instruction. + */ +asmlinkage void arm_smccc_smc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long a6, unsigned long a7, + struct arm_smccc_res *res); + +/** + * arm_smccc_hvc() - make HVC calls + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This function is used to make HVC calls following SMC Calling + * Convention. The content of the supplied param are copied to registers 0 + * to 7 prior to the HVC instruction. The return values are updated with + * the content from register 0 to 3 on return from the HVC instruction. + */ +asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long a6, unsigned long a7, + struct arm_smccc_res *res); + +#endif /*__LINUX_ARM_SMCCC_H*/ From fb0b4725d32fd59f18236123570d23121df87d8c Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Mon, 4 Jan 2016 15:42:55 +0100 Subject: [PATCH 04/78] ARM: 8479/2: add implementation for arm-smccc Adds implementation for arm-smccc and enables CONFIG_HAVE_SMCCC for architectures that may support arm-smccc. It's the responsibility of the caller to know if the SMC instruction is supported by the platform. Reviewed-by: Lars Persson Signed-off-by: Jens Wiklander Signed-off-by: Russell King (cherry picked from commit b329f95d70f3f955093e9a2b18ac1ed3587a8f73) Signed-off-by: Alex Shi --- arch/arm/Kconfig | 1 + arch/arm/kernel/Makefile | 2 ++ arch/arm/kernel/armksyms.c | 6 ++++ arch/arm/kernel/smccc-call.S | 62 ++++++++++++++++++++++++++++++++++++ 4 files changed, 71 insertions(+) create mode 100644 arch/arm/kernel/smccc-call.S diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 20ae5d27da73..61bec5eca9c3 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -37,6 +37,7 @@ config ARM select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) select HAVE_ARCH_TRACEHOOK + select HAVE_ARM_SMCCC if CPU_V7 select HAVE_BPF_JIT select HAVE_CC_STACKPROTECTOR select HAVE_CONTEXT_TRACKING diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index af9e59bf3831..d2d00424f044 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -92,4 +92,6 @@ obj-y += psci-call.o obj-$(CONFIG_SMP) += psci_smp.o endif +obj-$(CONFIG_HAVE_ARM_SMCCC) += smccc-call.o + extra-y := $(head-y) vmlinux.lds diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index f89811fb9a55..7e45f69a0ddc 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -175,3 +176,8 @@ EXPORT_SYMBOL(__gnu_mcount_nc); EXPORT_SYMBOL(__pv_phys_pfn_offset); EXPORT_SYMBOL(__pv_offset); #endif + +#ifdef CONFIG_HAVE_ARM_SMCCC +EXPORT_SYMBOL(arm_smccc_smc); +EXPORT_SYMBOL(arm_smccc_hvc); +#endif diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S new file mode 100644 index 000000000000..2e48b674aab1 --- /dev/null +++ b/arch/arm/kernel/smccc-call.S @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include + +#include +#include +#include + + /* + * Wrap c macros in asm macros to delay expansion until after the + * SMCCC asm macro is expanded. + */ + .macro SMCCC_SMC + __SMC(0) + .endm + + .macro SMCCC_HVC + __HVC(0) + .endm + + .macro SMCCC instr +UNWIND( .fnstart) + mov r12, sp + push {r4-r7} +UNWIND( .save {r4-r7}) + ldm r12, {r4-r7} + \instr + pop {r4-r7} + ldr r12, [sp, #(4 * 4)] + stm r12, {r0-r3} + bx lr +UNWIND( .fnend) + .endm + +/* + * void smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2, + * unsigned long a3, unsigned long a4, unsigned long a5, + * unsigned long a6, unsigned long a7, struct arm_smccc_res *res) + */ +ENTRY(arm_smccc_smc) + SMCCC SMCCC_SMC +ENDPROC(arm_smccc_smc) + +/* + * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, + * unsigned long a3, unsigned long a4, unsigned long a5, + * unsigned long a6, unsigned long a7, struct arm_smccc_res *res) + */ +ENTRY(arm_smccc_hvc) + SMCCC SMCCC_HVC +ENDPROC(arm_smccc_hvc) From 71bc50d5e49b12620121c332952522b7aa4e1486 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Mon, 4 Jan 2016 15:44:32 +0100 Subject: [PATCH 05/78] ARM: 8480/2: arm64: add implementation for arm-smccc Adds implementation for arm-smccc and enables CONFIG_HAVE_SMCCC. Acked-by: Will Deacon Signed-off-by: Jens Wiklander Signed-off-by: Russell King (cherry picked from commit 14457459f9ca2ff8521686168ea179edc3a56a44) Signed-off-by: Alex Shi Conflicts: arch/arm64/kernel/arm64ksyms.c --- arch/arm64/Kconfig | 1 + arch/arm64/kernel/Makefile | 2 +- arch/arm64/kernel/arm64ksyms.c | 5 ++++ arch/arm64/kernel/asm-offsets.c | 3 +++ arch/arm64/kernel/smccc-call.S | 43 +++++++++++++++++++++++++++++++++ 5 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/kernel/smccc-call.S diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 491163763e8e..7107dc965f32 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -95,6 +95,7 @@ config ARM64 select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE select HAVE_CONTEXT_TRACKING + select HAVE_ARM_SMCCC help ARM 64-bit (AArch64) Linux support. diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index cfa290b7dfaf..0a987eb0c927 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -17,7 +17,7 @@ arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \ return_address.o cpuinfo.o cpu_errata.o \ cpufeature.o alternative.o cacheinfo.o \ - smp.o smp_spin_table.o topology.o + smp.o smp_spin_table.o topology.o smccc-call.o extra-$(CONFIG_EFI) := efi-entry.o diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index fbec403a2466..fce2b1166a4c 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -70,3 +71,7 @@ EXPORT_SYMBOL(test_and_change_bit); EXPORT_SYMBOL(_mcount); NOKPROBE_SYMBOL(_mcount); #endif + + /* arm-smccc */ +EXPORT_SYMBOL(arm_smccc_smc); +EXPORT_SYMBOL(arm_smccc_hvc); diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index dd2925192405..2a948bfcaf87 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -28,6 +28,7 @@ #include #include #include +#include int main(void) { @@ -173,5 +174,7 @@ int main(void) DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); #endif + DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0)); + DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2)); return 0; } diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S new file mode 100644 index 000000000000..ae0496fa4235 --- /dev/null +++ b/arch/arm64/kernel/smccc-call.S @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License Version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include + + .macro SMCCC instr + .cfi_startproc + \instr #0 + ldr x4, [sp] + stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS] + stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS] + ret + .cfi_endproc + .endm + +/* + * void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2, + * unsigned long a3, unsigned long a4, unsigned long a5, + * unsigned long a6, unsigned long a7, struct arm_smccc_res *res) + */ +ENTRY(arm_smccc_smc) + SMCCC smc +ENDPROC(arm_smccc_smc) + +/* + * void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, + * unsigned long a3, unsigned long a4, unsigned long a5, + * unsigned long a6, unsigned long a7, struct arm_smccc_res *res) + */ +ENTRY(arm_smccc_hvc) + SMCCC hvc +ENDPROC(arm_smccc_hvc) From 58c98d459e665603a141d94396e9db521acef266 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Mon, 4 Jan 2016 15:46:47 +0100 Subject: [PATCH 06/78] ARM: 8481/2: drivers: psci: replace psci firmware calls Switch to use a generic interface for issuing SMC/HVC based on ARM SMC Calling Convention. Removes now the now unused psci-call.S. Acked-by: Will Deacon Reviewed-by: Mark Rutland Tested-by: Mark Rutland Acked-by: Lorenzo Pieralisi Tested-by: Lorenzo Pieralisi Signed-off-by: Jens Wiklander Signed-off-by: Russell King (cherry picked from commit e679660dbb8347f275fe5d83a5dd59c1fb6c8e63) Signed-off-by: Alex Shi --- arch/arm/Kconfig | 2 +- arch/arm/kernel/Makefile | 1 - arch/arm/kernel/psci-call.S | 31 ------------------------------- arch/arm64/kernel/Makefile | 2 +- arch/arm64/kernel/psci-call.S | 28 ---------------------------- drivers/firmware/psci.c | 23 +++++++++++++++++++++-- 6 files changed, 23 insertions(+), 64 deletions(-) delete mode 100644 arch/arm/kernel/psci-call.S delete mode 100644 arch/arm64/kernel/psci-call.S diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 61bec5eca9c3..2d5eebb38798 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1481,7 +1481,7 @@ config HOTPLUG_CPU config ARM_PSCI bool "Support for the ARM Power State Coordination Interface (PSCI)" - depends on CPU_V7 + depends on HAVE_ARM_SMCCC select ARM_PSCI_FW help Say Y here if you want Linux to communicate with system firmware diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index d2d00424f044..80856def2465 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -88,7 +88,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o ifeq ($(CONFIG_ARM_PSCI),y) -obj-y += psci-call.o obj-$(CONFIG_SMP) += psci_smp.o endif diff --git a/arch/arm/kernel/psci-call.S b/arch/arm/kernel/psci-call.S deleted file mode 100644 index a78e9e1e206d..000000000000 --- a/arch/arm/kernel/psci-call.S +++ /dev/null @@ -1,31 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2015 ARM Limited - * - * Author: Mark Rutland - */ - -#include - -#include -#include - -/* int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */ -ENTRY(__invoke_psci_fn_hvc) - __HVC(0) - bx lr -ENDPROC(__invoke_psci_fn_hvc) - -/* int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */ -ENTRY(__invoke_psci_fn_smc) - __SMC(0) - bx lr -ENDPROC(__invoke_psci_fn_smc) diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 0a987eb0c927..85f6924d6394 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -14,7 +14,7 @@ CFLAGS_REMOVE_return_address.o = -pg arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ - hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \ + hyp-stub.o psci.o cpu_ops.o insn.o \ return_address.o cpuinfo.o cpu_errata.o \ cpufeature.o alternative.o cacheinfo.o \ smp.o smp_spin_table.o topology.o smccc-call.o diff --git a/arch/arm64/kernel/psci-call.S b/arch/arm64/kernel/psci-call.S deleted file mode 100644 index cf83e61cd3b5..000000000000 --- a/arch/arm64/kernel/psci-call.S +++ /dev/null @@ -1,28 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2015 ARM Limited - * - * Author: Will Deacon - */ - -#include - -/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ -ENTRY(__invoke_psci_fn_hvc) - hvc #0 - ret -ENDPROC(__invoke_psci_fn_hvc) - -/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ -ENTRY(__invoke_psci_fn_smc) - smc #0 - ret -ENDPROC(__invoke_psci_fn_smc) diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index d24f35d74b27..f25cd79c8a79 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -13,6 +13,7 @@ #define pr_fmt(fmt) "psci: " fmt +#include #include #include #include @@ -58,8 +59,6 @@ struct psci_operations psci_ops; typedef unsigned long (psci_fn)(unsigned long, unsigned long, unsigned long, unsigned long); -asmlinkage psci_fn __invoke_psci_fn_hvc; -asmlinkage psci_fn __invoke_psci_fn_smc; static psci_fn *invoke_psci_fn; enum psci_function { @@ -107,6 +106,26 @@ bool psci_power_state_is_valid(u32 state) return !(state & ~valid_mask); } +static unsigned long __invoke_psci_fn_hvc(unsigned long function_id, + unsigned long arg0, unsigned long arg1, + unsigned long arg2) +{ + struct arm_smccc_res res; + + arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res); + return res.a0; +} + +static unsigned long __invoke_psci_fn_smc(unsigned long function_id, + unsigned long arg0, unsigned long arg1, + unsigned long arg2) +{ + struct arm_smccc_res res; + + arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res); + return res.a0; +} + static int psci_to_linux_errno(int errno) { switch (errno) { From 80d2b4c95b2c4e5b8c143703150ab67b8a198e63 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Mon, 1 Feb 2016 18:01:30 +0100 Subject: [PATCH 07/78] ARM: 8511/1: ARM64: kernel: PSCI: move PSCI idle management code to drivers/firmware ARM64 PSCI kernel interfaces that initialize idle states and implement the suspend API to enter them are generic and can be shared with the ARM architecture. To achieve that goal, this patch moves ARM64 PSCI idle management code to drivers/firmware, so that the interface to initialize and enter idle states can actually be shared by ARM and ARM64 arches back-ends. The ARM generic CPUidle implementation also requires the definition of a cpuidle_ops section entry for the kernel to initialize the CPUidle operations at boot based on the enable-method (ie ARM64 has the statically initialized cpu_ops counterparts for that purpose); therefore this patch also adds the required section entry on CONFIG_ARM for PSCI so that the kernel can initialize the PSCI CPUidle back-end when PSCI is the probed enable-method. On ARM64 this patch provides no functional change. Signed-off-by: Lorenzo Pieralisi Acked-by: Daniel Lezcano Acked-by: Catalin Marinas [arch/arm64] Acked-by: Mark Rutland Tested-by: Jisheng Zhang Cc: Will Deacon Cc: Sudeep Holla Cc: Daniel Lezcano Cc: Catalin Marinas Cc: Mark Rutland Cc: Jisheng Zhang Signed-off-by: Russell King (cherry picked from commit 8b6f2499ac45d5a0ab2e4b6f9613ab3f60416be1) Signed-off-by: Alex Shi --- arch/arm/Kconfig | 2 +- arch/arm64/kernel/psci.c | 99 +------------------------------- drivers/firmware/psci.c | 120 +++++++++++++++++++++++++++++++++++++++ include/linux/psci.h | 3 + 4 files changed, 126 insertions(+), 98 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2d5eebb38798..61d7eb3c1ff5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -2140,7 +2140,7 @@ config ARCH_SUSPEND_POSSIBLE def_bool y config ARM_CPU_SUSPEND - def_bool PM_SLEEP || BL_SWITCHER + def_bool PM_SLEEP || BL_SWITCHER || ARM_PSCI_FW depends on ARCH_SUSPEND_POSSIBLE config ARCH_HIBERNATION_POSSIBLE diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index f67f35b6edb1..42816bebb1e0 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -20,7 +20,6 @@ #include #include #include -#include #include @@ -28,73 +27,6 @@ #include #include #include -#include - -static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); - -static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu) -{ - int i, ret, count = 0; - u32 *psci_states; - struct device_node *state_node, *cpu_node; - - cpu_node = of_get_cpu_node(cpu, NULL); - if (!cpu_node) - return -ENODEV; - - /* - * If the PSCI cpu_suspend function hook has not been initialized - * idle states must not be enabled, so bail out - */ - if (!psci_ops.cpu_suspend) - return -EOPNOTSUPP; - - /* Count idle states */ - while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", - count))) { - count++; - of_node_put(state_node); - } - - if (!count) - return -ENODEV; - - psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); - if (!psci_states) - return -ENOMEM; - - for (i = 0; i < count; i++) { - u32 state; - - state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); - - ret = of_property_read_u32(state_node, - "arm,psci-suspend-param", - &state); - if (ret) { - pr_warn(" * %s missing arm,psci-suspend-param property\n", - state_node->full_name); - of_node_put(state_node); - goto free_mem; - } - - of_node_put(state_node); - pr_debug("psci-power-state %#x index %d\n", state, i); - if (!psci_power_state_is_valid(state)) { - pr_warn("Invalid PSCI power state %#x\n", state); - ret = -EINVAL; - goto free_mem; - } - psci_states[i] = state; - } - /* Idle states parsed correctly, initialize per-cpu pointer */ - per_cpu(psci_power_state, cpu) = psci_states; - return 0; - -free_mem: - kfree(psci_states); - return ret; -} static int __init cpu_psci_cpu_init(unsigned int cpu) { @@ -178,38 +110,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu) } #endif -static int psci_suspend_finisher(unsigned long index) -{ - u32 *state = __this_cpu_read(psci_power_state); - - return psci_ops.cpu_suspend(state[index - 1], - virt_to_phys(cpu_resume)); -} - -static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index) -{ - int ret; - u32 *state = __this_cpu_read(psci_power_state); - /* - * idle state index 0 corresponds to wfi, should never be called - * from the cpu_suspend operations - */ - if (WARN_ON_ONCE(!index)) - return -EINVAL; - - if (!psci_power_state_loses_context(state[index - 1])) - ret = psci_ops.cpu_suspend(state[index - 1], 0); - else - ret = cpu_suspend(index, psci_suspend_finisher); - - return ret; -} - const struct cpu_operations cpu_psci_ops = { .name = "psci", #ifdef CONFIG_CPU_IDLE - .cpu_init_idle = cpu_psci_cpu_init_idle, - .cpu_suspend = cpu_psci_cpu_suspend, + .cpu_init_idle = psci_cpu_init_idle, + .cpu_suspend = psci_cpu_suspend_enter, #endif .cpu_init = cpu_psci_cpu_init, .cpu_prepare = cpu_psci_cpu_prepare, diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index f25cd79c8a79..11bfee8b79a9 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -14,6 +14,7 @@ #define pr_fmt(fmt) "psci: " fmt #include +#include #include #include #include @@ -21,10 +22,12 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -244,6 +247,123 @@ static int __init psci_features(u32 psci_func_id) psci_func_id, 0, 0); } +#ifdef CONFIG_CPU_IDLE +static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); + +static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) +{ + int i, ret, count = 0; + u32 *psci_states; + struct device_node *state_node; + + /* + * If the PSCI cpu_suspend function hook has not been initialized + * idle states must not be enabled, so bail out + */ + if (!psci_ops.cpu_suspend) + return -EOPNOTSUPP; + + /* Count idle states */ + while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", + count))) { + count++; + of_node_put(state_node); + } + + if (!count) + return -ENODEV; + + psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); + if (!psci_states) + return -ENOMEM; + + for (i = 0; i < count; i++) { + u32 state; + + state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); + + ret = of_property_read_u32(state_node, + "arm,psci-suspend-param", + &state); + if (ret) { + pr_warn(" * %s missing arm,psci-suspend-param property\n", + state_node->full_name); + of_node_put(state_node); + goto free_mem; + } + + of_node_put(state_node); + pr_debug("psci-power-state %#x index %d\n", state, i); + if (!psci_power_state_is_valid(state)) { + pr_warn("Invalid PSCI power state %#x\n", state); + ret = -EINVAL; + goto free_mem; + } + psci_states[i] = state; + } + /* Idle states parsed correctly, initialize per-cpu pointer */ + per_cpu(psci_power_state, cpu) = psci_states; + return 0; + +free_mem: + kfree(psci_states); + return ret; +} + +int psci_cpu_init_idle(unsigned int cpu) +{ + struct device_node *cpu_node; + int ret; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (!cpu_node) + return -ENODEV; + + ret = psci_dt_cpu_init_idle(cpu_node, cpu); + + of_node_put(cpu_node); + + return ret; +} + +static int psci_suspend_finisher(unsigned long index) +{ + u32 *state = __this_cpu_read(psci_power_state); + + return psci_ops.cpu_suspend(state[index - 1], + virt_to_phys(cpu_resume)); +} + +int psci_cpu_suspend_enter(unsigned long index) +{ + int ret; + u32 *state = __this_cpu_read(psci_power_state); + /* + * idle state index 0 corresponds to wfi, should never be called + * from the cpu_suspend operations + */ + if (WARN_ON_ONCE(!index)) + return -EINVAL; + + if (!psci_power_state_loses_context(state[index - 1])) + ret = psci_ops.cpu_suspend(state[index - 1], 0); + else + ret = cpu_suspend(index, psci_suspend_finisher); + + return ret; +} + +/* ARM specific CPU idle operations */ +#ifdef CONFIG_ARM +static struct cpuidle_ops psci_cpuidle_ops __initdata = { + .suspend = psci_cpu_suspend_enter, + .init = psci_dt_cpu_init_idle, +}; + +CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops); +#endif +#endif + static int psci_system_suspend(unsigned long unused) { return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), diff --git a/include/linux/psci.h b/include/linux/psci.h index 12c4865457ad..393efe2edf9a 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -24,6 +24,9 @@ bool psci_tos_resident_on(int cpu); bool psci_power_state_loses_context(u32 state); bool psci_power_state_is_valid(u32 state); +int psci_cpu_init_idle(unsigned int cpu); +int psci_cpu_suspend_enter(unsigned long index); + struct psci_operations { int (*cpu_suspend)(u32 state, unsigned long entry_point); int (*cpu_off)(u32 state); From 4dc09da3c9e2f713474636c19acf4ada815b3504 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 9 Dec 2015 12:44:38 +0000 Subject: [PATCH 08/78] arm64: mm: fold alternatives into .init Currently we treat the alternatives separately from other data that's only used during initialisation, using separate .altinstructions and .altinstr_replacement linker sections. These are freed for general allocation separately from .init*. This is problematic as: * We do not remove execute permissions, as we do for .init, leaving the memory executable. * We pad between them, making the kernel Image bianry up to PAGE_SIZE bytes larger than necessary. This patch moves the two sections into the contiguous region used for .init*. This saves some memory, ensures that we remove execute permissions, and allows us to remove some code made redundant by this reorganisation. Signed-off-by: Mark Rutland Cc: Andre Przywara Cc: Catalin Marinas Cc: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Will Deacon (cherry picked from commit 9aa4ec1571da62366cfddc20f3b923609604fe63) Signed-off-by: Alex Shi --- arch/arm64/include/asm/alternative.h | 1 - arch/arm64/kernel/alternative.c | 6 ------ arch/arm64/kernel/vmlinux.lds.S | 5 ++--- arch/arm64/mm/init.c | 1 - 4 files changed, 2 insertions(+), 11 deletions(-) diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index d56ec0715157..e4962f04201e 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -19,7 +19,6 @@ struct alt_instr { void __init apply_alternatives_all(void); void apply_alternatives(void *start, size_t length); -void free_alternatives_memory(void); #define ALTINSTR_ENTRY(feature) \ " .word 661b - .\n" /* label */ \ diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index ab9db0e9818c..d2ee1b21a10d 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -158,9 +158,3 @@ void apply_alternatives(void *start, size_t length) __apply_alternatives(®ion); } - -void free_alternatives_memory(void) -{ - free_reserved_area(__alt_instructions, __alt_instructions_end, - 0, "alternatives"); -} diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 2fb8560724a4..f04c4762977b 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -145,9 +145,6 @@ SECTIONS PERCPU_SECTION(L1_CACHE_BYTES) - . = ALIGN(PAGE_SIZE); - __init_end = .; - . = ALIGN(4); .altinstructions : { __alt_instructions = .; @@ -159,6 +156,8 @@ SECTIONS } . = ALIGN(PAGE_SIZE); + __init_end = .; + _data = .; _sdata = .; RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 4cb98aa8c27b..b37e2590d690 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -360,7 +360,6 @@ void free_initmem(void) { fixup_init(); free_initmem_default(0); - free_alternatives_memory(); } #ifdef CONFIG_BLK_DEV_INITRD From f73e4f144708989ee1e154c76a63095b816a9749 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:44:55 +0000 Subject: [PATCH 09/78] asm-generic: Fix local variable shadow in __set_fixmap_offset Currently __set_fixmap_offset is a macro function which has a local variable called 'addr'. If a caller passes a 'phys' parameter which is derived from a variable also called 'addr', the local variable will shadow this, and the compiler will complain about the use of an uninitialized variable. To avoid the issue with namespace clashes, 'addr' is prefixed with a liberal sprinkling of underscores. Turning __set_fixmap_offset into a static inline breaks the build for several architectures. Fixing this properly requires updates to a number of architectures to make them agree on the prototype of __set_fixmap (it could be done as a subsequent patch series). Signed-off-by: Mark Rutland Cc: Arnd Bergmann [catalin.marinas@arm.com: squashed the original function patch and macro fixup] Signed-off-by: Catalin Marinas (cherry picked from commit 3694bd76781b76c4f8d2ecd85018feeb1609f0e5) Signed-off-by: Alex Shi --- include/asm-generic/fixmap.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h index 1cbb8338edf3..827e4d3bbc7a 100644 --- a/include/asm-generic/fixmap.h +++ b/include/asm-generic/fixmap.h @@ -70,12 +70,12 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) #endif /* Return a pointer with offset calculated */ -#define __set_fixmap_offset(idx, phys, flags) \ -({ \ - unsigned long addr; \ - __set_fixmap(idx, phys, flags); \ - addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \ - addr; \ +#define __set_fixmap_offset(idx, phys, flags) \ +({ \ + unsigned long ________addr; \ + __set_fixmap(idx, phys, flags); \ + ________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \ + ________addr; \ }) #define set_fixmap_offset(idx, phys) \ From 2a83372c6208ae6f562c94166ce221fba9e60534 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 9 Dec 2015 12:44:36 +0000 Subject: [PATCH 10/78] arm64: mm: remove pointless PAGE_MASKing As pgd_offset{,_k} shift the input address by PGDIR_SHIFT, the sub-page bits will always be shifted out. There is no need to apply PAGE_MASK before this. Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Will Deacon (cherry picked from commit e2c30ee320eb96304896c7ab84499e5bc5e5fb6e) Signed-off-by: Alex Shi --- arch/arm64/mm/mmu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 653735a8c58a..41b62ef82847 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -280,7 +280,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt, &phys, virt); return; } - __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt, + __create_mapping(&init_mm, pgd_offset_k(virt), phys, virt, size, prot, early_alloc); } @@ -301,7 +301,7 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt, return; } - return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), + return __create_mapping(&init_mm, pgd_offset_k(virt), phys, virt, size, prot, late_alloc); } From 0b5c41b49b108eb8ef9448330a42e0fde504a2b6 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:44:56 +0000 Subject: [PATCH 11/78] arm64: mm: specialise pagetable allocators We pass a size parameter to early_alloc and late_alloc, but these are only ever used to allocate single pages. In late_alloc we always allocate a single page. Both allocators provide us with zeroed pages (such that all entries are invalid), but we have no barriers between allocating a page and adding that page to existing (live) tables. A concurrent page table walk may see stale data, leading to a number of issues. This patch specialises the two allocators for page tables. The size parameter is removed and the necessary dsb(ishst) is folded into each. To make it clear that the functions are intended for use for page table allocation, they are renamed to {early,late}_pgtable_alloc, with the related function pointed renamed to pgtable_alloc. As the dsb(ishst) is now in the allocator, the existing barrier for the zero page is redundant and thus is removed. The previously missing include of barrier.h is added. Signed-off-by: Mark Rutland Reviewed-by: Catalin Marinas Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Tested-by: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit 21ab99c289d350f4ae454bc069870009db6df20e) Signed-off-by: Alex Shi --- arch/arm64/mm/mmu.c | 52 +++++++++++++++++++++++---------------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 41b62ef82847..f823302e0047 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -30,6 +30,7 @@ #include #include +#include #include #include #include @@ -62,15 +63,18 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, } EXPORT_SYMBOL(phys_mem_access_prot); -static void __init *early_alloc(unsigned long sz) +static void __init *early_pgtable_alloc(void) { phys_addr_t phys; void *ptr; - phys = memblock_alloc(sz, sz); + phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE); BUG_ON(!phys); ptr = __va(phys); - memset(ptr, 0, sz); + memset(ptr, 0, PAGE_SIZE); + + /* Ensure the zeroed page is visible to the page table walker */ + dsb(ishst); return ptr; } @@ -95,12 +99,12 @@ static void split_pmd(pmd_t *pmd, pte_t *pte) static void alloc_init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot, - void *(*alloc)(unsigned long size)) + void *(*pgtable_alloc)(void)) { pte_t *pte; if (pmd_none(*pmd) || pmd_sect(*pmd)) { - pte = alloc(PTRS_PER_PTE * sizeof(pte_t)); + pte = pgtable_alloc(); if (pmd_sect(*pmd)) split_pmd(pmd, pte); __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE); @@ -130,7 +134,7 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd) static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, - void *(*alloc)(unsigned long size)) + void *(*pgtable_alloc)(void)) { pmd_t *pmd; unsigned long next; @@ -139,7 +143,7 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, * Check for initial section mappings in the pgd/pud and remove them. */ if (pud_none(*pud) || pud_sect(*pud)) { - pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t)); + pmd = pgtable_alloc(); if (pud_sect(*pud)) { /* * need to have the 1G of mappings continue to be @@ -174,7 +178,7 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, } } else { alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), - prot, alloc); + prot, pgtable_alloc); } phys += next - addr; } while (pmd++, addr = next, addr != end); @@ -195,13 +199,13 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next, static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, - void *(*alloc)(unsigned long size)) + void *(*pgtable_alloc)(void)) { pud_t *pud; unsigned long next; if (pgd_none(*pgd)) { - pud = alloc(PTRS_PER_PUD * sizeof(pud_t)); + pud = pgtable_alloc(); pgd_populate(mm, pgd, pud); } BUG_ON(pgd_bad(*pgd)); @@ -234,7 +238,8 @@ static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, } } } else { - alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc); + alloc_init_pmd(mm, pud, addr, next, phys, prot, + pgtable_alloc); } phys += next - addr; } while (pud++, addr = next, addr != end); @@ -247,7 +252,7 @@ static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, static void __create_mapping(struct mm_struct *mm, pgd_t *pgd, phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot, - void *(*alloc)(unsigned long size)) + void *(*pgtable_alloc)(void)) { unsigned long addr, length, end, next; @@ -257,18 +262,18 @@ static void __create_mapping(struct mm_struct *mm, pgd_t *pgd, end = addr + length; do { next = pgd_addr_end(addr, end); - alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc); + alloc_init_pud(mm, pgd, addr, next, phys, prot, pgtable_alloc); phys += next - addr; } while (pgd++, addr = next, addr != end); } -static void *late_alloc(unsigned long size) +static void *late_pgtable_alloc(void) { - void *ptr; - - BUG_ON(size > PAGE_SIZE); - ptr = (void *)__get_free_page(PGALLOC_GFP); + void *ptr = (void *)__get_free_page(PGALLOC_GFP); BUG_ON(!ptr); + + /* Ensure the zeroed page is visible to the page table walker */ + dsb(ishst); return ptr; } @@ -281,7 +286,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt, return; } __create_mapping(&init_mm, pgd_offset_k(virt), phys, virt, - size, prot, early_alloc); + size, prot, early_pgtable_alloc); } void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, @@ -289,7 +294,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, pgprot_t prot) { __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot, - late_alloc); + late_pgtable_alloc); } static void create_mapping_late(phys_addr_t phys, unsigned long virt, @@ -302,7 +307,7 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt, } return __create_mapping(&init_mm, pgd_offset_k(virt), - phys, virt, size, prot, late_alloc); + phys, virt, size, prot, late_pgtable_alloc); } #ifdef CONFIG_DEBUG_RODATA @@ -450,15 +455,12 @@ void __init paging_init(void) fixup_executable(); /* allocate the zero page. */ - zero_page = early_alloc(PAGE_SIZE); + zero_page = early_pgtable_alloc(); bootmem_init(); empty_zero_page = virt_to_page(zero_page); - /* Ensure the zero page is visible to the page table walker */ - dsb(ishst); - /* * TTBR0 is only used for the identity mapping at this stage. Make it * point to zero page to avoid speculatively fetching new entries. From 04fcc8d8a52c0f667c54434aa7ad45126ba20ab0 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 6 Jan 2016 11:05:27 +0000 Subject: [PATCH 12/78] arm64: head.S: use memset to clear BSS Currently we use an open-coded memzero to clear the BSS. As it is a trivial implementation, it is sub-optimal. Our optimised memset doesn't use the stack, is position-independent, and for the memzero case can use of DC ZVA to clear large blocks efficiently. In __mmap_switched the MMU is on and there are no live caller-saved registers, so we can safely call an uninstrumented memset. This patch changes __mmap_switched to use memset when clearing the BSS. We use the __pi_memset alias so as to avoid any instrumentation in all kernel configurations. Cc: Catalin Marinas Cc: Marc Zyngier Reviewed-by: Ard Biesheuvel Signed-off-by: Mark Rutland Signed-off-by: Will Deacon (cherry picked from commit 2a803c4db615d85126c5c7afd5849a3cfde71422) Signed-off-by: Alex Shi --- arch/arm64/kernel/head.S | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index b685257926f0..4d37c413da4d 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -415,14 +415,13 @@ ENDPROC(__create_page_tables) */ .set initial_sp, init_thread_union + THREAD_START_SP __mmap_switched: - adr_l x6, __bss_start - adr_l x7, __bss_stop + // Clear BSS + adr_l x0, __bss_start + mov x1, xzr + adr_l x2, __bss_stop + sub x2, x2, x0 + bl __pi_memset -1: cmp x6, x7 - b.hs 2f - str xzr, [x6], #8 // Clear BSS - b 1b -2: adr_l sp, initial_sp, x4 str_l x21, __fdt_pointer, x5 // Save FDT pointer str_l x24, memstart_addr, x6 // Save PHYS_OFFSET From 82e12e68cb9f5df9b99ed552202f28934420da5d Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:44:57 +0000 Subject: [PATCH 13/78] arm64: mm: place empty_zero_page in bss Currently the zero page is set up in paging_init, and thus we cannot use the zero page earlier. We use the zero page as a reserved TTBR value from which no TLB entries may be allocated (e.g. when uninstalling the idmap). To enable such usage earlier (as may be required for invasive changes to the kernel page tables), and to minimise the time that the idmap is active, we need to be able to use the zero page before paging_init. This patch follows the example set by x86, by allocating the zero page at compile time, in .bss. This means that the zero page itself is available immediately upon entry to start_kernel (as we zero .bss before this), and also means that the zero page takes up no space in the raw Image binary. The associated struct page is allocated in bootmem_init, and remains unavailable until this time. Outside of arch code, the only users of empty_zero_page assume that the empty_zero_page symbol refers to the zeroed memory itself, and that ZERO_PAGE(x) must be used to acquire the associated struct page, following the example of x86. This patch also brings arm64 inline with these assumptions. Signed-off-by: Mark Rutland Reviewed-by: Catalin Marinas Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Tested-by: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit 5227cfa71f9e8574373f4d0e9e754942d76cdf67) Signed-off-by: Alex Shi --- arch/arm64/include/asm/mmu_context.h | 2 +- arch/arm64/include/asm/pgtable.h | 4 ++-- arch/arm64/kernel/head.S | 1 + arch/arm64/mm/mmu.c | 9 +-------- 4 files changed, 5 insertions(+), 11 deletions(-) diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 24165784b803..600eacb9f7d5 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -48,7 +48,7 @@ static inline void contextidr_thread_switch(struct task_struct *next) */ static inline void cpu_set_reserved_ttbr0(void) { - unsigned long ttbr = page_to_phys(empty_zero_page); + unsigned long ttbr = virt_to_phys(empty_zero_page); asm( " msr ttbr0_el1, %0 // set TTBR0\n" diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 67c2ad6d33b7..a11053af0537 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -123,8 +123,8 @@ extern void __pgd_error(const char *file, int line, unsigned long val); * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ -extern struct page *empty_zero_page; -#define ZERO_PAGE(vaddr) (empty_zero_page) +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; +#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 4d37c413da4d..7d095f69bbb6 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -421,6 +421,7 @@ __mmap_switched: adr_l x2, __bss_stop sub x2, x2, x0 bl __pi_memset + dsb ishst // Make zero page visible to PTW adr_l sp, initial_sp, x4 str_l x21, __fdt_pointer, x5 // Save FDT pointer diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index f823302e0047..7319529a4a0c 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -49,7 +49,7 @@ u64 idmap_t0sz = TCR_T0SZ(VA_BITS); * Empty_zero_page is a special page that is used for zero-initialized data * and COW. */ -struct page *empty_zero_page; +unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, @@ -449,18 +449,11 @@ void fixup_init(void) */ void __init paging_init(void) { - void *zero_page; - map_mem(); fixup_executable(); - /* allocate the zero page. */ - zero_page = early_pgtable_alloc(); - bootmem_init(); - empty_zero_page = virt_to_page(zero_page); - /* * TTBR0 is only used for the identity mapping at this stage. Make it * point to zero page to avoid speculatively fetching new entries. From 530067f9965d8fbc91135613ce7f9cf948d029b4 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:44:58 +0000 Subject: [PATCH 14/78] arm64: unify idmap removal We currently open-code the removal of the idmap and restoration of the current task's MMU state in a few places. Before introducing yet more copies of this sequence, unify these to call a new helper, cpu_uninstall_idmap. Signed-off-by: Mark Rutland Reviewed-by: Catalin Marinas Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Tested-by: Jeremy Linton Cc: Laura Abbott Cc: Lorenzo Pieralisi Cc: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit 9e8e865bbe294a69666a1996bda3e87825b258c0) Signed-off-by: Alex Shi --- arch/arm64/include/asm/mmu_context.h | 25 +++++++++++++++++++++++++ arch/arm64/kernel/setup.c | 1 + arch/arm64/kernel/smp.c | 4 +--- arch/arm64/kernel/suspend.c | 20 ++++---------------- arch/arm64/mm/mmu.c | 4 +--- 5 files changed, 32 insertions(+), 22 deletions(-) diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 600eacb9f7d5..b1b2514d8883 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -27,6 +27,7 @@ #include #include #include +#include #ifdef CONFIG_PID_IN_CONTEXTIDR static inline void contextidr_thread_switch(struct task_struct *next) @@ -89,6 +90,30 @@ static inline void cpu_set_default_tcr_t0sz(void) : "r"(TCR_T0SZ(VA_BITS)), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH)); } +/* + * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm. + * + * The idmap lives in the same VA range as userspace, but uses global entries + * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from + * speculative TLB fetches, we must temporarily install the reserved page + * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ. + * + * If current is a not a user task, the mm covers the TTBR1_EL1 page tables, + * which should not be installed in TTBR0_EL1. In this case we can leave the + * reserved page tables in place. + */ +static inline void cpu_uninstall_idmap(void) +{ + struct mm_struct *mm = current->active_mm; + + cpu_set_reserved_ttbr0(); + local_flush_tlb_all(); + cpu_set_default_tcr_t0sz(); + + if (mm != &init_mm) + cpu_switch_mm(mm->pgd, mm); +} + /* * It would be nice to return ASIDs back to the allocator, but unfortunately * that introduces a race with a generation rollover where we could erroneously diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 8119479147db..f6621ba071f9 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -62,6 +62,7 @@ #include #include #include +#include phys_addr_t __fdt_pointer __initdata; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index f3c3d8fee5ba..3df454ff37d5 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -149,9 +149,7 @@ asmlinkage void secondary_start_kernel(void) * TTBR0 is only used for the identity mapping at this stage. Make it * point to zero page to avoid speculatively fetching new entries. */ - cpu_set_reserved_ttbr0(); - local_flush_tlb_all(); - cpu_set_default_tcr_t0sz(); + cpu_uninstall_idmap(); preempt_disable(); trace_hardirqs_off(); diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 1095aa483a1c..66055392f445 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -60,7 +60,6 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) */ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { - struct mm_struct *mm = current->active_mm; int ret; unsigned long flags; @@ -87,22 +86,11 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ret = __cpu_suspend_enter(arg, fn); if (ret == 0) { /* - * We are resuming from reset with TTBR0_EL1 set to the - * idmap to enable the MMU; set the TTBR0 to the reserved - * page tables to prevent speculative TLB allocations, flush - * the local tlb and set the default tcr_el1.t0sz so that - * the TTBR0 address space set-up is properly restored. - * If the current active_mm != &init_mm we entered cpu_suspend - * with mappings in TTBR0 that must be restored, so we switch - * them back to complete the address space configuration - * restoration before returning. + * We are resuming from reset with the idmap active in TTBR0_EL1. + * We must uninstall the idmap and restore the expected MMU + * state before we can possibly return to userspace. */ - cpu_set_reserved_ttbr0(); - local_flush_tlb_all(); - cpu_set_default_tcr_t0sz(); - - if (mm != &init_mm) - cpu_switch_mm(mm->pgd, mm); + cpu_uninstall_idmap(); /* * Restore per-cpu offset before any kernel diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 7319529a4a0c..74898e5be57a 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -458,9 +458,7 @@ void __init paging_init(void) * TTBR0 is only used for the identity mapping at this stage. Make it * point to zero page to avoid speculatively fetching new entries. */ - cpu_set_reserved_ttbr0(); - local_flush_tlb_all(); - cpu_set_default_tcr_t0sz(); + cpu_uninstall_idmap(); } /* From 39c5c6a146a72950ba6c04c6dc35aa6eb8830bb5 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:44:59 +0000 Subject: [PATCH 15/78] arm64: unmap idmap earlier During boot we leave the idmap in place until paging_init, as we previously had to wait for the zero page to become allocated and accessible. Now that we have a statically-allocated zero page, we can uninstall the idmap much earlier in the boot process, making it far easier to spot accidental use of physical addresses. This also brings the cold boot path in line with the secondary boot path. Signed-off-by: Mark Rutland Reviewed-by: Catalin Marinas Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Tested-by: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit 86ccce896cb0aa800a7a6dcd29b41ffc4eeb1a75) Signed-off-by: Alex Shi --- arch/arm64/kernel/setup.c | 6 ++++++ arch/arm64/mm/mmu.c | 6 ------ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f6621ba071f9..cfed56f0ad26 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -314,6 +314,12 @@ void __init setup_arch(char **cmdline_p) */ local_async_enable(); + /* + * TTBR0 is only used for the identity mapping at this stage. Make it + * point to zero page to avoid speculatively fetching new entries. + */ + cpu_uninstall_idmap(); + efi_init(); arm64_memblock_init(); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 74898e5be57a..1e2ae807130f 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -453,12 +453,6 @@ void __init paging_init(void) fixup_executable(); bootmem_init(); - - /* - * TTBR0 is only used for the identity mapping at this stage. Make it - * point to zero page to avoid speculatively fetching new entries. - */ - cpu_uninstall_idmap(); } /* From b07984a249555dc0c3dca60d6a28d1832cf4a0f2 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:45:00 +0000 Subject: [PATCH 16/78] arm64: add function to install the idmap In some cases (e.g. when making invasive changes to the kernel page tables) we will need to execute code from the idmap. Add a new helper which may be used to install the idmap, complementing the existing cpu_uninstall_idmap. Signed-off-by: Mark Rutland Reviewed-by: Catalin Marinas Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Tested-by: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit 609116d202a8c5fd3fe393eb85373cbee906df68) Signed-off-by: Alex Shi --- arch/arm64/include/asm/mmu_context.h | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index b1b2514d8883..944f2730a940 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -74,7 +74,7 @@ static inline bool __cpu_uses_extended_idmap(void) /* * Set TCR.T0SZ to its default value (based on VA_BITS) */ -static inline void cpu_set_default_tcr_t0sz(void) +static inline void __cpu_set_tcr_t0sz(unsigned long t0sz) { unsigned long tcr; @@ -87,9 +87,12 @@ static inline void cpu_set_default_tcr_t0sz(void) " msr tcr_el1, %0 ;" " isb" : "=&r" (tcr) - : "r"(TCR_T0SZ(VA_BITS)), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH)); + : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH)); } +#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)) +#define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz) + /* * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm. * @@ -114,6 +117,15 @@ static inline void cpu_uninstall_idmap(void) cpu_switch_mm(mm->pgd, mm); } +static inline void cpu_install_idmap(void) +{ + cpu_set_reserved_ttbr0(); + local_flush_tlb_all(); + cpu_set_idmap_tcr_t0sz(); + + cpu_switch_mm(idmap_pg_dir, &init_mm); +} + /* * It would be nice to return ASIDs back to the allocator, but unfortunately * that introduces a race with a generation rollover where we could erroneously From 0195e934d13c1afa3bcc7ae933178a8d34c8b02a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 11 Dec 2015 11:04:31 +0000 Subject: [PATCH 17/78] arm64: mm: place __cpu_setup in .text We drop __cpu_setup in .text.init, which ends up being part of .text. The .text.init section was a legacy section name which has been unused elsewhere for a long time. The ".text.init" name is misleading if read as a synonym for ".init.text". Any CPU may execute __cpu_setup before turning the MMU on, so it should simply live in .text. Remove the pointless section assignment. This will leave __cpu_setup in the .text section. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Will Deacon (cherry picked from commit f00083cae331e5d3eecade6b4fdc35d0825e73ef) Signed-off-by: Alex Shi --- arch/arm64/mm/proc.S | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 18201e9e8cc7..3a44efb8d495 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -152,8 +152,6 @@ alternative_else alternative_endif ENDPROC(cpu_do_switch_mm) - .section ".text.init", #alloc, #execinstr - /* * __cpu_setup * From 03fb711099a3dd20c1d037f866775b2791d3ff6e Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:45:01 +0000 Subject: [PATCH 18/78] arm64: mm: add code to safely replace TTBR1_EL1 If page tables are modified without suitable TLB maintenance, the ARM architecture permits multiple TLB entries to be allocated for the same VA. When this occurs, it is permitted that TLB conflict aborts are raised in response to synchronous data/instruction accesses, and/or and amalgamation of the TLB entries may be used as a result of a TLB lookup. The presence of conflicting TLB entries may result in a variety of behaviours detrimental to the system (e.g. erroneous physical addresses may be used by I-cache fetches and/or page table walks). Some of these cases may result in unexpected changes of hardware state, and/or result in the (asynchronous) delivery of SError. To avoid these issues, we must avoid situations where conflicting entries may be allocated into TLBs. For user and module mappings we can follow a strict break-before-make approach, but this cannot work for modifications to the swapper page tables that cover the kernel text and data. Instead, this patch adds code which is intended to be executed from the idmap, which can safely unmap the swapper page tables as it only requires the idmap to be active. This enables us to uninstall the active TTBR1_EL1 entry, invalidate TLBs, then install a new TTBR1_EL1 entry without potentially unmapping code or data required for the sequence. This avoids the risk of conflict, but requires that updates are staged in a copy of the swapper page tables prior to being installed. Signed-off-by: Mark Rutland Reviewed-by: Catalin Marinas Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Tested-by: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit 50e1881ddde2a986c7d0d2150985239e5e3d7d96) Signed-off-by: Alex Shi --- arch/arm64/include/asm/mmu_context.h | 19 +++++++++++++++++++ arch/arm64/mm/proc.S | 28 ++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 944f2730a940..a00f7cf35bbd 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -126,6 +126,25 @@ static inline void cpu_install_idmap(void) cpu_switch_mm(idmap_pg_dir, &init_mm); } +/* + * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, + * avoiding the possibility of conflicting TLB entries being allocated. + */ +static inline void cpu_replace_ttbr1(pgd_t *pgd) +{ + typedef void (ttbr_replace_func)(phys_addr_t); + extern ttbr_replace_func idmap_cpu_replace_ttbr1; + ttbr_replace_func *replace_phys; + + phys_addr_t pgd_phys = virt_to_phys(pgd); + + replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1); + + cpu_install_idmap(); + replace_phys(pgd_phys); + cpu_uninstall_idmap(); +} + /* * It would be nice to return ASIDs back to the allocator, but unfortunately * that introduces a race with a generation rollover where we could erroneously diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 3a44efb8d495..a92738e8b1eb 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -152,6 +152,34 @@ alternative_else alternative_endif ENDPROC(cpu_do_switch_mm) + .pushsection ".idmap.text", "ax" +/* + * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd) + * + * This is the low-level counterpart to cpu_replace_ttbr1, and should not be + * called by anything else. It can only be executed from a TTBR0 mapping. + */ +ENTRY(idmap_cpu_replace_ttbr1) + mrs x2, daif + msr daifset, #0xf + + adrp x1, empty_zero_page + msr ttbr1_el1, x1 + isb + + tlbi vmalle1 + dsb nsh + isb + + msr ttbr1_el1, x0 + isb + + msr daif, x2 + + ret +ENDPROC(idmap_cpu_replace_ttbr1) + .popsection + /* * __cpu_setup * From b67b1ab417fe73a632583cdd7b710e4450c9d4ec Mon Sep 17 00:00:00 2001 From: Ashok Kumar Date: Thu, 17 Dec 2015 01:38:32 -0800 Subject: [PATCH 19/78] arm64: Use PoU cache instr for I/D coherency In systems with three levels of cache(PoU at L1 and PoC at L3), PoC cache flush instructions flushes L2 and L3 caches which could affect performance. For cache flushes for I and D coherency, PoU should suffice. So changing all I and D coherency related cache flushes to PoU. Introduced a new __clean_dcache_area_pou API for dcache flush till PoU and provided a common macro for __flush_dcache_area and __clean_dcache_area_pou. Also, now in __sync_icache_dcache, icache invalidation for non-aliasing VIPT icache is done only for that particular page instead of the earlier __flush_icache_all. Reviewed-by: Catalin Marinas Reviewed-by: Mark Rutland Signed-off-by: Ashok Kumar Signed-off-by: Will Deacon (cherry picked from commit 0a28714c53fd4f7aea709be7577dfbe0095c8c3e) Signed-off-by: Alex Shi Conflicts: arch/arm64/mm/proc-macros.S --- arch/arm64/include/asm/cacheflush.h | 1 + arch/arm64/mm/cache.S | 28 ++++++++++++++---------- arch/arm64/mm/flush.c | 33 ++++++++++++++++------------- arch/arm64/mm/proc-macros.S | 21 ++++++++++++++++++ 4 files changed, 57 insertions(+), 26 deletions(-) diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 54efedaf331f..7fc294c3bc5b 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -68,6 +68,7 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(void *addr, size_t len); +extern void __clean_dcache_area_pou(void *addr, size_t len); extern long __flush_cache_user_range(unsigned long start, unsigned long end); static inline void flush_cache_mm(struct mm_struct *mm) diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index cfa44a6adc0a..6df07069a025 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -81,25 +81,31 @@ ENDPROC(__flush_cache_user_range) /* * __flush_dcache_area(kaddr, size) * - * Ensure that the data held in the page kaddr is written back to the - * page in question. + * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * are cleaned and invalidated to the PoC. * * - kaddr - kernel address * - size - size in question */ ENTRY(__flush_dcache_area) - dcache_line_size x2, x3 - add x1, x0, x1 - sub x3, x2, #1 - bic x0, x0, x3 -1: dc civac, x0 // clean & invalidate D line / unified line - add x0, x0, x2 - cmp x0, x1 - b.lo 1b - dsb sy + dcache_by_line_op civac, sy, x0, x1, x2, x3 ret ENDPIPROC(__flush_dcache_area) +/* + * __clean_dcache_area_pou(kaddr, size) + * + * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * are cleaned to the PoU. + * + * - kaddr - kernel address + * - size - size in question + */ +ENTRY(__clean_dcache_area_pou) + dcache_by_line_op cvau, ish, x0, x1, x2, x3 + ret +ENDPROC(__clean_dcache_area_pou) + /* * __inval_cache_range(start, end) * - start - start address of region diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index c26b804015e8..46649d6e6c5a 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -34,19 +34,24 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, __flush_icache_all(); } +static void sync_icache_aliases(void *kaddr, unsigned long len) +{ + unsigned long addr = (unsigned long)kaddr; + + if (icache_is_aliasing()) { + __clean_dcache_area_pou(kaddr, len); + __flush_icache_all(); + } else { + flush_icache_range(addr, addr + len); + } +} + static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *kaddr, unsigned long len) { - if (vma->vm_flags & VM_EXEC) { - unsigned long addr = (unsigned long)kaddr; - if (icache_is_aliasing()) { - __flush_dcache_area(kaddr, len); - __flush_icache_all(); - } else { - flush_icache_range(addr, addr + len); - } - } + if (vma->vm_flags & VM_EXEC) + sync_icache_aliases(kaddr, len); } /* @@ -74,13 +79,11 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr) if (!page_mapping(page)) return; - if (!test_and_set_bit(PG_dcache_clean, &page->flags)) { - __flush_dcache_area(page_address(page), - PAGE_SIZE << compound_order(page)); + if (!test_and_set_bit(PG_dcache_clean, &page->flags)) + sync_icache_aliases(page_address(page), + PAGE_SIZE << compound_order(page)); + else if (icache_is_aivivt()) __flush_icache_all(); - } else if (icache_is_aivivt()) { - __flush_icache_all(); - } } /* diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S index d69dffffaa89..2bce36415c0c 100644 --- a/arch/arm64/mm/proc-macros.S +++ b/arch/arm64/mm/proc-macros.S @@ -74,3 +74,24 @@ msr pmuserenr_el0, xzr // Disable PMU access from EL0 9000: .endm +/* + * Macro to perform a data cache maintenance for the interval + * [kaddr, kaddr + size) + * + * op: operation passed to dc instruction + * domain: domain used in dsb instruciton + * kaddr: starting virtual address of the region + * size: size of the region + * Corrupts: kaddr, size, tmp1, tmp2 + */ + .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2 + dcache_line_size \tmp1, \tmp2 + add \size, \kaddr, \size + sub \tmp2, \tmp1, #1 + bic \kaddr, \kaddr, \tmp2 +9998: dc \op, \kaddr + add \kaddr, \kaddr, \tmp1 + cmp \kaddr, \size + b.lo 9998b + dsb \domain + .endm From 2ddc142e465be4a0f11223e1a882f0317dde6763 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 5 Nov 2015 15:09:17 +0000 Subject: [PATCH 20/78] arm64: Add macros to read/write system registers Rather than crafting custom macros for reading/writing each system register provide generics accessors, read_sysreg and write_sysreg, for this purpose. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Cc: Suzuki Poulose Cc: Will Deacon Signed-off-by: Marc Zyngier (cherry picked from commit 3600c2fdc09a43a30909743569e35a29121602ed) Signed-off-by: Alex Shi --- arch/arm64/include/asm/sysreg.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index d48ab5b41f52..4aeebec3d882 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -20,6 +20,8 @@ #ifndef __ASM_SYSREG_H #define __ASM_SYSREG_H +#include + #include /* @@ -208,6 +210,8 @@ #else +#include + asm( " .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" " .equ __reg_num_x\\num, \\num\n" @@ -232,6 +236,23 @@ static inline void config_sctlr_el1(u32 clear, u32 set) val |= set; asm volatile("msr sctlr_el1, %0" : : "r" (val)); } + +/* + * Unlike read_cpuid, calls to read_sysreg are never expected to be + * optimized away or replaced with synthetic values. + */ +#define read_sysreg(r) ({ \ + u64 __val; \ + asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \ + __val; \ +}) + +#define write_sysreg(v, r) do { \ + u64 __val = (u64)v; \ + asm volatile("msr " __stringify(r) ", %0" \ + : : "r" (__val)); \ +} while (0) + #endif #endif /* __ASM_SYSREG_H */ From ac185e4487ba0b3684f511fcacbc63b0359f679b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 1 Dec 2015 13:48:56 +0000 Subject: [PATCH 21/78] KVM: arm/arm64: vgic-v3: Make the LR indexing macro public We store GICv3 LRs in reverse order so that the CPU can save/restore them in rever order as well (don't ask why, the design is crazy), and yet generate memory traffic that doesn't completely suck. We need this macro to be available to the C version of save/restore. Signed-off-by: Marc Zyngier (cherry picked from commit 3c13b8f435acb452eac62d966148a8b6fa92151f) Signed-off-by: Alex Shi --- include/kvm/arm_vgic.h | 6 ++++++ virt/kvm/arm/vgic-v3.c | 10 ++-------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index d2f41477f8ae..13a3d537811b 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -279,6 +279,12 @@ struct vgic_v2_cpu_if { u32 vgic_lr[VGIC_V2_MAX_LRS]; }; +/* + * LRs are stored in reverse order in memory. make sure we index them + * correctly. + */ +#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) + struct vgic_v3_cpu_if { #ifdef CONFIG_KVM_ARM_VGIC_V3 u32 vgic_hcr; diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index 487d6357b7e7..3813d23ebb80 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c @@ -36,18 +36,12 @@ #define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) #define ICH_LR_VIRTUALID_MASK (BIT_ULL(32) - 1) -/* - * LRs are stored in reverse order in memory. make sure we index them - * correctly. - */ -#define LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) - static u32 ich_vtr_el2; static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr) { struct vgic_lr lr_desc; - u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)]; + u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)]; if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) lr_desc.irq = val & ICH_LR_VIRTUALID_MASK; @@ -111,7 +105,7 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr, lr_val |= ((u64)lr_desc.hwirq) << ICH_LR_PHYS_ID_SHIFT; } - vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val; + vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)] = lr_val; if (!(lr_desc.state & LR_STATE_MASK)) vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); From eb99dec6d1e3b75580d312b1d29a18c39ed1e7ad Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 21 Oct 2015 10:09:49 +0100 Subject: [PATCH 22/78] arm64: KVM: Add a HYP-specific header file In order to expose the various EL2 services that are private to the hypervisor, add a new hyp.h file. So far, it only contains mundane things such as section annotation and VA manipulation. Signed-off-by: Marc Zyngier (cherry picked from commit c76a0a6695c61088c8d2e731e25305502666bf7d) Signed-off-by: Alex Shi --- arch/arm64/kvm/hyp/hyp.h | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 arch/arm64/kvm/hyp/hyp.h diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h new file mode 100644 index 000000000000..057f483d14c8 --- /dev/null +++ b/arch/arm64/kvm/hyp/hyp.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __ARM64_KVM_HYP_H__ +#define __ARM64_KVM_HYP_H__ + +#include +#include +#include +#include + +#define __hyp_text __section(.hyp.text) notrace + +#define kern_hyp_va(v) (typeof(v))((unsigned long)(v) & HYP_PAGE_OFFSET_MASK) +#define hyp_kern_va(v) (typeof(v))((unsigned long)(v) - HYP_PAGE_OFFSET \ + + PAGE_OFFSET) + +#endif /* __ARM64_KVM_HYP_H__ */ + From 8b7bc9d277b45dbde20a3c67ce3be136b44fca23 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 19 Oct 2015 15:50:37 +0100 Subject: [PATCH 23/78] arm64: KVM: Implement vgic-v2 save/restore Implement the vgic-v2 save restore (mostly) as a direct translation of the assembly code version. Signed-off-by: Marc Zyngier (cherry picked from commit 06282fd2c2bf61619649a2b13e4a08556598a64c) Signed-off-by: Alex Shi --- arch/arm64/kvm/Makefile | 1 + arch/arm64/kvm/hyp/Makefile | 5 ++ arch/arm64/kvm/hyp/hyp.h | 3 ++ arch/arm64/kvm/hyp/vgic-v2-sr.c | 84 +++++++++++++++++++++++++++++++++ 4 files changed, 93 insertions(+) create mode 100644 arch/arm64/kvm/hyp/Makefile create mode 100644 arch/arm64/kvm/hyp/vgic-v2-sr.c diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 1949fe5f5424..d31e4e58e961 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -10,6 +10,7 @@ KVM=../../../virt/kvm ARM=../../../arch/arm/kvm obj-$(CONFIG_KVM_ARM_HOST) += kvm.o +obj-$(CONFIG_KVM_ARM_HOST) += hyp/ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile new file mode 100644 index 000000000000..d8d5968fbbed --- /dev/null +++ b/arch/arm64/kvm/hyp/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for Kernel-based Virtual Machine module, HYP part +# + +obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index 057f483d14c8..ac63553834b9 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -29,5 +29,8 @@ #define hyp_kern_va(v) (typeof(v))((unsigned long)(v) - HYP_PAGE_OFFSET \ + PAGE_OFFSET) +void __vgic_v2_save_state(struct kvm_vcpu *vcpu); +void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); + #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/kvm/hyp/vgic-v2-sr.c b/arch/arm64/kvm/hyp/vgic-v2-sr.c new file mode 100644 index 000000000000..e71761238cfc --- /dev/null +++ b/arch/arm64/kvm/hyp/vgic-v2-sr.c @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2012-2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +#include "hyp.h" + +/* vcpu is already in the HYP VA space */ +void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; + struct vgic_dist *vgic = &kvm->arch.vgic; + void __iomem *base = kern_hyp_va(vgic->vctrl_base); + u32 eisr0, eisr1, elrsr0, elrsr1; + int i, nr_lr; + + if (!base) + return; + + nr_lr = vcpu->arch.vgic_cpu.nr_lr; + cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR); + cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR); + eisr0 = readl_relaxed(base + GICH_EISR0); + elrsr0 = readl_relaxed(base + GICH_ELRSR0); + if (unlikely(nr_lr > 32)) { + eisr1 = readl_relaxed(base + GICH_EISR1); + elrsr1 = readl_relaxed(base + GICH_ELRSR1); + } else { + eisr1 = elrsr1 = 0; + } +#ifdef CONFIG_CPU_BIG_ENDIAN + cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1; + cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1; +#else + cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0; + cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; +#endif + cpu_if->vgic_apr = readl_relaxed(base + GICH_APR); + + writel_relaxed(0, base + GICH_HCR); + + for (i = 0; i < nr_lr; i++) + cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); +} + +/* vcpu is already in the HYP VA space */ +void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; + struct vgic_dist *vgic = &kvm->arch.vgic; + void __iomem *base = kern_hyp_va(vgic->vctrl_base); + int i, nr_lr; + + if (!base) + return; + + writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR); + writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR); + writel_relaxed(cpu_if->vgic_apr, base + GICH_APR); + + nr_lr = vcpu->arch.vgic_cpu.nr_lr; + for (i = 0; i < nr_lr; i++) + writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4)); +} From ac6ec447c62ff314ea9b743447df2f579c614e9d Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 19 Oct 2015 16:32:20 +0100 Subject: [PATCH 24/78] arm64: KVM: Implement timer save/restore Implement the timer save restore as a direct translation of the assembly code version. Signed-off-by: Marc Zyngier (cherry picked from commit 1431af367e52b08038e78d346822966d968f1694) Signed-off-by: Alex Shi Conflicts: arch/arm64/kvm/hyp/Makefile arch/arm64/kvm/hyp/hyp.h --- arch/arm64/kvm/hyp/Makefile | 2 + arch/arm64/kvm/hyp/hyp.h | 6 +++ arch/arm64/kvm/hyp/timer-sr.c | 71 ++++++++++++++++++++++++++++ include/clocksource/arm_arch_timer.h | 6 +++ 4 files changed, 85 insertions(+) create mode 100644 arch/arm64/kvm/hyp/timer-sr.c diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index d8d5968fbbed..455dc0a3acb5 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -3,3 +3,5 @@ # obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index ac63553834b9..f213e4652bbf 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -32,5 +32,11 @@ void __vgic_v2_save_state(struct kvm_vcpu *vcpu); void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); +void __vgic_v3_save_state(struct kvm_vcpu *vcpu); +void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); + +void __timer_save_state(struct kvm_vcpu *vcpu); +void __timer_restore_state(struct kvm_vcpu *vcpu); + #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/kvm/hyp/timer-sr.c b/arch/arm64/kvm/hyp/timer-sr.c new file mode 100644 index 000000000000..1051e5d7320f --- /dev/null +++ b/arch/arm64/kvm/hyp/timer-sr.c @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2012-2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +#include "hyp.h" + +/* vcpu is already in the HYP VA space */ +void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + u64 val; + + if (kvm->arch.timer.enabled) { + timer->cntv_ctl = read_sysreg(cntv_ctl_el0); + timer->cntv_cval = read_sysreg(cntv_cval_el0); + } + + /* Disable the virtual timer */ + write_sysreg(0, cntv_ctl_el0); + + /* Allow physical timer/counter access for the host */ + val = read_sysreg(cnthctl_el2); + val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; + write_sysreg(val, cnthctl_el2); + + /* Clear cntvoff for the host */ + write_sysreg(0, cntvoff_el2); +} + +void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + u64 val; + + /* + * Disallow physical timer access for the guest + * Physical counter access is allowed + */ + val = read_sysreg(cnthctl_el2); + val &= ~CNTHCTL_EL1PCEN; + val |= CNTHCTL_EL1PCTEN; + write_sysreg(val, cnthctl_el2); + + if (kvm->arch.timer.enabled) { + write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); + write_sysreg(timer->cntv_cval, cntv_cval_el0); + isb(); + write_sysreg(timer->cntv_ctl, cntv_ctl_el0); + } +} diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h index 9916d0e4eff5..25d0914481a2 100644 --- a/include/clocksource/arm_arch_timer.h +++ b/include/clocksource/arm_arch_timer.h @@ -23,6 +23,12 @@ #define ARCH_TIMER_CTRL_IT_MASK (1 << 1) #define ARCH_TIMER_CTRL_IT_STAT (1 << 2) +#define CNTHCTL_EL1PCTEN (1 << 0) +#define CNTHCTL_EL1PCEN (1 << 1) +#define CNTHCTL_EVNTEN (1 << 2) +#define CNTHCTL_EVNTDIR (1 << 3) +#define CNTHCTL_EVNTI (0xF << 4) + enum arch_timer_reg { ARCH_TIMER_REG_CTRL, ARCH_TIMER_REG_TVAL, From 89f303c930f031d5ea6a1fa7da3b012e6589fc9a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 19 Oct 2015 18:02:48 +0100 Subject: [PATCH 25/78] arm64: KVM: Implement system register save/restore Implement the system register save/restore as a direct translation of the assembly code version. Signed-off-by: Marc Zyngier Reviewed-by: Christoffer Dall (cherry picked from commit 6d6ec20fcf2830ca10c1b7c8efd7e2592c40e3d6) Signed-off-by: Alex Shi --- arch/arm64/kvm/hyp/Makefile | 1 + arch/arm64/kvm/hyp/hyp.h | 3 ++ arch/arm64/kvm/hyp/sysreg-sr.c | 90 ++++++++++++++++++++++++++++++++++ 3 files changed, 94 insertions(+) create mode 100644 arch/arm64/kvm/hyp/sysreg-sr.c diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 455dc0a3acb5..ec94200e1e50 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index f213e4652bbf..778d56d41f9b 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -38,5 +38,8 @@ void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); void __timer_save_state(struct kvm_vcpu *vcpu); void __timer_restore_state(struct kvm_vcpu *vcpu); +void __sysreg_save_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_state(struct kvm_cpu_context *ctxt); + #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c new file mode 100644 index 000000000000..add8fcb6516f --- /dev/null +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2012-2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + +#include + +#include "hyp.h" + +/* ctxt is already in the HYP VA space */ +void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) +{ + ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2); + ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); + ctxt->sys_regs[SCTLR_EL1] = read_sysreg(sctlr_el1); + ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); + ctxt->sys_regs[CPACR_EL1] = read_sysreg(cpacr_el1); + ctxt->sys_regs[TTBR0_EL1] = read_sysreg(ttbr0_el1); + ctxt->sys_regs[TTBR1_EL1] = read_sysreg(ttbr1_el1); + ctxt->sys_regs[TCR_EL1] = read_sysreg(tcr_el1); + ctxt->sys_regs[ESR_EL1] = read_sysreg(esr_el1); + ctxt->sys_regs[AFSR0_EL1] = read_sysreg(afsr0_el1); + ctxt->sys_regs[AFSR1_EL1] = read_sysreg(afsr1_el1); + ctxt->sys_regs[FAR_EL1] = read_sysreg(far_el1); + ctxt->sys_regs[MAIR_EL1] = read_sysreg(mair_el1); + ctxt->sys_regs[VBAR_EL1] = read_sysreg(vbar_el1); + ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg(contextidr_el1); + ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); + ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); + ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); + ctxt->sys_regs[AMAIR_EL1] = read_sysreg(amair_el1); + ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg(cntkctl_el1); + ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1); + ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); + + ctxt->gp_regs.regs.sp = read_sysreg(sp_el0); + ctxt->gp_regs.regs.pc = read_sysreg(elr_el2); + ctxt->gp_regs.regs.pstate = read_sysreg(spsr_el2); + ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1); + ctxt->gp_regs.elr_el1 = read_sysreg(elr_el1); + ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1); +} + +void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) +{ + write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); + write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); + write_sysreg(ctxt->sys_regs[SCTLR_EL1], sctlr_el1); + write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); + write_sysreg(ctxt->sys_regs[CPACR_EL1], cpacr_el1); + write_sysreg(ctxt->sys_regs[TTBR0_EL1], ttbr0_el1); + write_sysreg(ctxt->sys_regs[TTBR1_EL1], ttbr1_el1); + write_sysreg(ctxt->sys_regs[TCR_EL1], tcr_el1); + write_sysreg(ctxt->sys_regs[ESR_EL1], esr_el1); + write_sysreg(ctxt->sys_regs[AFSR0_EL1], afsr0_el1); + write_sysreg(ctxt->sys_regs[AFSR1_EL1], afsr1_el1); + write_sysreg(ctxt->sys_regs[FAR_EL1], far_el1); + write_sysreg(ctxt->sys_regs[MAIR_EL1], mair_el1); + write_sysreg(ctxt->sys_regs[VBAR_EL1], vbar_el1); + write_sysreg(ctxt->sys_regs[CONTEXTIDR_EL1], contextidr_el1); + write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); + write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); + write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); + write_sysreg(ctxt->sys_regs[AMAIR_EL1], amair_el1); + write_sysreg(ctxt->sys_regs[CNTKCTL_EL1], cntkctl_el1); + write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); + write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); + + write_sysreg(ctxt->gp_regs.regs.sp, sp_el0); + write_sysreg(ctxt->gp_regs.regs.pc, elr_el2); + write_sysreg(ctxt->gp_regs.regs.pstate, spsr_el2); + write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); + write_sysreg(ctxt->gp_regs.elr_el1, elr_el1); + write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1); +} From 30876ccefcdb527659231064c355e16cfcb5e577 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 19 Oct 2015 19:28:29 +0100 Subject: [PATCH 26/78] arm64: KVM: Implement 32bit system register save/restore Implement the 32bit system register save/restore as a direct translation of the assembly code version. Signed-off-by: Marc Zyngier Reviewed-by: Christoffer Dall (cherry picked from commit c209ec85a2a7d2fd38bca0a44b7e70abd079c178) Signed-off-by: Alex Shi --- arch/arm64/kvm/hyp/hyp.h | 2 ++ arch/arm64/kvm/hyp/sysreg-sr.c | 47 ++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index 778d56d41f9b..bffd30834eb8 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -40,6 +40,8 @@ void __timer_restore_state(struct kvm_vcpu *vcpu); void __sysreg_save_state(struct kvm_cpu_context *ctxt); void __sysreg_restore_state(struct kvm_cpu_context *ctxt); +void __sysreg32_save_state(struct kvm_vcpu *vcpu); +void __sysreg32_restore_state(struct kvm_vcpu *vcpu); #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index add8fcb6516f..eb05afb77ee8 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -88,3 +88,50 @@ void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) write_sysreg(ctxt->gp_regs.elr_el1, elr_el1); write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1); } + +void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) +{ + u64 *spsr, *sysreg; + + if (read_sysreg(hcr_el2) & HCR_RW) + return; + + spsr = vcpu->arch.ctxt.gp_regs.spsr; + sysreg = vcpu->arch.ctxt.sys_regs; + + spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt); + spsr[KVM_SPSR_UND] = read_sysreg(spsr_und); + spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq); + spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq); + + sysreg[DACR32_EL2] = read_sysreg(dacr32_el2); + sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2); + + if (!(read_sysreg(cptr_el2) & CPTR_EL2_TFP)) + sysreg[FPEXC32_EL2] = read_sysreg(fpexc32_el2); + + if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY) + sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2); +} + +void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu) +{ + u64 *spsr, *sysreg; + + if (read_sysreg(hcr_el2) & HCR_RW) + return; + + spsr = vcpu->arch.ctxt.gp_regs.spsr; + sysreg = vcpu->arch.ctxt.sys_regs; + + write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt); + write_sysreg(spsr[KVM_SPSR_UND], spsr_und); + write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq); + write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq); + + write_sysreg(sysreg[DACR32_EL2], dacr32_el2); + write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2); + + if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY) + write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2); +} From b45242cdcdef74702189a2a5b6864e355df96a2f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 19 Oct 2015 21:02:46 +0100 Subject: [PATCH 27/78] arm64: KVM: Implement debug save/restore MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement the debug save restore as a direct translation of the assembly code version. Signed-off-by: Marc Zyngier Tested-by: Alex Bennée Reviewed-by: Alex Bennée Reviewed-by: Christoffer Dall (cherry picked from commit 8eb992674c9e69d57af199f36b6455dbc00ac9f9) Signed-off-by: Alex Shi --- arch/arm64/kvm/hyp/Makefile | 1 + arch/arm64/kvm/hyp/debug-sr.c | 137 ++++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/hyp.h | 9 +++ 3 files changed, 147 insertions(+) create mode 100644 arch/arm64/kvm/hyp/debug-sr.c diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index ec94200e1e50..ec14cacc21a6 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c new file mode 100644 index 000000000000..7848322deed6 --- /dev/null +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -0,0 +1,137 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + +#include + +#include "hyp.h" + +#define read_debug(r,n) read_sysreg(r##n##_el1) +#define write_debug(v,r,n) write_sysreg(v, r##n##_el1) + +#define save_debug(ptr,reg,nr) \ + switch (nr) { \ + case 15: ptr[15] = read_debug(reg, 15); \ + case 14: ptr[14] = read_debug(reg, 14); \ + case 13: ptr[13] = read_debug(reg, 13); \ + case 12: ptr[12] = read_debug(reg, 12); \ + case 11: ptr[11] = read_debug(reg, 11); \ + case 10: ptr[10] = read_debug(reg, 10); \ + case 9: ptr[9] = read_debug(reg, 9); \ + case 8: ptr[8] = read_debug(reg, 8); \ + case 7: ptr[7] = read_debug(reg, 7); \ + case 6: ptr[6] = read_debug(reg, 6); \ + case 5: ptr[5] = read_debug(reg, 5); \ + case 4: ptr[4] = read_debug(reg, 4); \ + case 3: ptr[3] = read_debug(reg, 3); \ + case 2: ptr[2] = read_debug(reg, 2); \ + case 1: ptr[1] = read_debug(reg, 1); \ + default: ptr[0] = read_debug(reg, 0); \ + } + +#define restore_debug(ptr,reg,nr) \ + switch (nr) { \ + case 15: write_debug(ptr[15], reg, 15); \ + case 14: write_debug(ptr[14], reg, 14); \ + case 13: write_debug(ptr[13], reg, 13); \ + case 12: write_debug(ptr[12], reg, 12); \ + case 11: write_debug(ptr[11], reg, 11); \ + case 10: write_debug(ptr[10], reg, 10); \ + case 9: write_debug(ptr[9], reg, 9); \ + case 8: write_debug(ptr[8], reg, 8); \ + case 7: write_debug(ptr[7], reg, 7); \ + case 6: write_debug(ptr[6], reg, 6); \ + case 5: write_debug(ptr[5], reg, 5); \ + case 4: write_debug(ptr[4], reg, 4); \ + case 3: write_debug(ptr[3], reg, 3); \ + case 2: write_debug(ptr[2], reg, 2); \ + case 1: write_debug(ptr[1], reg, 1); \ + default: write_debug(ptr[0], reg, 0); \ + } + +void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu, + struct kvm_guest_debug_arch *dbg, + struct kvm_cpu_context *ctxt) +{ + u64 aa64dfr0; + int brps, wrps; + + if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)) + return; + + aa64dfr0 = read_sysreg(id_aa64dfr0_el1); + brps = (aa64dfr0 >> 12) & 0xf; + wrps = (aa64dfr0 >> 20) & 0xf; + + save_debug(dbg->dbg_bcr, dbgbcr, brps); + save_debug(dbg->dbg_bvr, dbgbvr, brps); + save_debug(dbg->dbg_wcr, dbgwcr, wrps); + save_debug(dbg->dbg_wvr, dbgwvr, wrps); + + ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1); +} + +void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu, + struct kvm_guest_debug_arch *dbg, + struct kvm_cpu_context *ctxt) +{ + u64 aa64dfr0; + int brps, wrps; + + if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)) + return; + + aa64dfr0 = read_sysreg(id_aa64dfr0_el1); + + brps = (aa64dfr0 >> 12) & 0xf; + wrps = (aa64dfr0 >> 20) & 0xf; + + restore_debug(dbg->dbg_bcr, dbgbcr, brps); + restore_debug(dbg->dbg_bvr, dbgbvr, brps); + restore_debug(dbg->dbg_wcr, dbgwcr, wrps); + restore_debug(dbg->dbg_wvr, dbgwvr, wrps); + + write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1); +} + +void __hyp_text __debug_cond_save_host_state(struct kvm_vcpu *vcpu) +{ + /* If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY is set, perform + * a full save/restore cycle. */ + if ((vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_KDE) || + (vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_MDE)) + vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; + + __debug_save_state(vcpu, &vcpu->arch.host_debug_state, + kern_hyp_va(vcpu->arch.host_cpu_context)); +} + +void __hyp_text __debug_cond_restore_host_state(struct kvm_vcpu *vcpu) +{ + __debug_restore_state(vcpu, &vcpu->arch.host_debug_state, + kern_hyp_va(vcpu->arch.host_cpu_context)); + + if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY) + vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY; +} + +u32 __hyp_text __debug_read_mdcr_el2(void) +{ + return read_sysreg(mdcr_el2); +} diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index bffd30834eb8..454e46f9c15e 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -43,5 +43,14 @@ void __sysreg_restore_state(struct kvm_cpu_context *ctxt); void __sysreg32_save_state(struct kvm_vcpu *vcpu); void __sysreg32_restore_state(struct kvm_vcpu *vcpu); +void __debug_save_state(struct kvm_vcpu *vcpu, + struct kvm_guest_debug_arch *dbg, + struct kvm_cpu_context *ctxt); +void __debug_restore_state(struct kvm_vcpu *vcpu, + struct kvm_guest_debug_arch *dbg, + struct kvm_cpu_context *ctxt); +void __debug_cond_save_host_state(struct kvm_vcpu *vcpu); +void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu); + #endif /* __ARM64_KVM_HYP_H__ */ From 7b9a94b384beb4665876848afa0bb0e026a03e99 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 22 Oct 2015 08:32:18 +0100 Subject: [PATCH 28/78] arm64: KVM: Implement guest entry Contrary to the previous patch, the guest entry is fairly different from its assembly counterpart, mostly because it is only concerned with saving/restoring the GP registers, and nothing else. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier (cherry picked from commit b97b66c14b96ab562e4fd516d804c5cd05c0529e) Signed-off-by: Alex Shi --- arch/arm64/kvm/hyp/Makefile | 1 + arch/arm64/kvm/hyp/entry.S | 130 ++++++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/hyp.h | 2 + 3 files changed, 133 insertions(+) create mode 100644 arch/arm64/kvm/hyp/entry.S diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index ec14cacc21a6..1e1ff06348cd 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += entry.o diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S new file mode 100644 index 000000000000..ff196951e63b --- /dev/null +++ b/arch/arm64/kvm/hyp/entry.S @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) +#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) + + .text + .pushsection .hyp.text, "ax" + +.macro save_callee_saved_regs ctxt + stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] + stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] + stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] + stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] + stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] + stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] +.endm + +.macro restore_callee_saved_regs ctxt + ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] + ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] + ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] + ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] + ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] + ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] +.endm + +/* + * u64 __guest_enter(struct kvm_vcpu *vcpu, + * struct kvm_cpu_context *host_ctxt); + */ +ENTRY(__guest_enter) + // x0: vcpu + // x1: host/guest context + // x2-x18: clobbered by macros + + // Store the host regs + save_callee_saved_regs x1 + + // Preserve vcpu & host_ctxt for use at exit time + stp x0, x1, [sp, #-16]! + + add x1, x0, #VCPU_CONTEXT + + // Prepare x0-x1 for later restore by pushing them onto the stack + ldp x2, x3, [x1, #CPU_XREG_OFFSET(0)] + stp x2, x3, [sp, #-16]! + + // x2-x18 + ldp x2, x3, [x1, #CPU_XREG_OFFSET(2)] + ldp x4, x5, [x1, #CPU_XREG_OFFSET(4)] + ldp x6, x7, [x1, #CPU_XREG_OFFSET(6)] + ldp x8, x9, [x1, #CPU_XREG_OFFSET(8)] + ldp x10, x11, [x1, #CPU_XREG_OFFSET(10)] + ldp x12, x13, [x1, #CPU_XREG_OFFSET(12)] + ldp x14, x15, [x1, #CPU_XREG_OFFSET(14)] + ldp x16, x17, [x1, #CPU_XREG_OFFSET(16)] + ldr x18, [x1, #CPU_XREG_OFFSET(18)] + + // x19-x29, lr + restore_callee_saved_regs x1 + + // Last bits of the 64bit state + ldp x0, x1, [sp], #16 + + // Do not touch any register after this! + eret +ENDPROC(__guest_enter) + +ENTRY(__guest_exit) + // x0: vcpu + // x1: return code + // x2-x3: free + // x4-x29,lr: vcpu regs + // vcpu x0-x3 on the stack + + add x2, x0, #VCPU_CONTEXT + + stp x4, x5, [x2, #CPU_XREG_OFFSET(4)] + stp x6, x7, [x2, #CPU_XREG_OFFSET(6)] + stp x8, x9, [x2, #CPU_XREG_OFFSET(8)] + stp x10, x11, [x2, #CPU_XREG_OFFSET(10)] + stp x12, x13, [x2, #CPU_XREG_OFFSET(12)] + stp x14, x15, [x2, #CPU_XREG_OFFSET(14)] + stp x16, x17, [x2, #CPU_XREG_OFFSET(16)] + str x18, [x2, #CPU_XREG_OFFSET(18)] + + ldp x6, x7, [sp], #16 // x2, x3 + ldp x4, x5, [sp], #16 // x0, x1 + + stp x4, x5, [x2, #CPU_XREG_OFFSET(0)] + stp x6, x7, [x2, #CPU_XREG_OFFSET(2)] + + save_callee_saved_regs x2 + + // Restore vcpu & host_ctxt from the stack + // (preserving return code in x1) + ldp x0, x2, [sp], #16 + // Now restore the host regs + restore_callee_saved_regs x2 + + mov x0, x1 + ret +ENDPROC(__guest_exit) + + /* Insert fault handling here */ diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index 454e46f9c15e..080965348def 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -52,5 +52,7 @@ void __debug_restore_state(struct kvm_vcpu *vcpu, void __debug_cond_save_host_state(struct kvm_vcpu *vcpu); void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu); +u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); + #endif /* __ARM64_KVM_HYP_H__ */ From 6552c0046751e0da63df9799060a82b1012c09b8 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 28 Oct 2015 08:45:37 +0000 Subject: [PATCH 29/78] arm64: KVM: Add patchable function selector KVM so far relies on code patching, and is likely to use it more in the future. The main issue is that our alternative system works at the instruction level, while we'd like to have alternatives at the function level. In order to cope with this, add the "hyp_alternate_select" macro that outputs a brief sequence of code that in turn can be patched, allowing an alternative function to be selected. Signed-off-by: Marc Zyngier (cherry picked from commit c1bf6e18e97e7ead77371d4251f8ef1567455584) Signed-off-by: Alex Shi --- arch/arm64/kvm/hyp/hyp.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index 080965348def..73419a769112 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -29,6 +29,30 @@ #define hyp_kern_va(v) (typeof(v))((unsigned long)(v) - HYP_PAGE_OFFSET \ + PAGE_OFFSET) +/** + * hyp_alternate_select - Generates patchable code sequences that are + * used to switch between two implementations of a function, depending + * on the availability of a feature. + * + * @fname: a symbol name that will be defined as a function returning a + * function pointer whose type will match @orig and @alt + * @orig: A pointer to the default function, as returned by @fname when + * @cond doesn't hold + * @alt: A pointer to the alternate function, as returned by @fname + * when @cond holds + * @cond: a CPU feature (as described in asm/cpufeature.h) + */ +#define hyp_alternate_select(fname, orig, alt, cond) \ +typeof(orig) * __hyp_text fname(void) \ +{ \ + typeof(alt) *val = orig; \ + asm volatile(ALTERNATIVE("nop \n", \ + "mov %0, %1 \n", \ + cond) \ + : "+r" (val) : "r" (alt)); \ + return val; \ +} + void __vgic_v2_save_state(struct kvm_vcpu *vcpu); void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); From 1980361d468b1245235abf9240962d1a766a9c2d Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 21 Oct 2015 09:57:10 +0100 Subject: [PATCH 30/78] arm64: KVM: Implement the core world switch Implement the core of the world switch in C. Not everything is there yet, and there is nothing to re-enter the world switch either. But this already outlines the code structure well enough. Signed-off-by: Marc Zyngier Reviewed-by: Christoffer Dall (cherry picked from commit be901e9b15cd2c8e48dc089b4655ea4a076e66fd) Signed-off-by: Alex Shi --- arch/arm64/kvm/hyp/Makefile | 1 + arch/arm64/kvm/hyp/switch.c | 135 ++++++++++++++++++++++++++++++++++++ 2 files changed, 136 insertions(+) create mode 100644 arch/arm64/kvm/hyp/switch.c diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 1e1ff06348cd..9c11b0fe1646 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o +obj-$(CONFIG_KVM_ARM_HOST) += switch.o diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c new file mode 100644 index 000000000000..79f59c98b148 --- /dev/null +++ b/arch/arm64/kvm/hyp/switch.c @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "hyp.h" + +static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) +{ + u64 val; + + /* + * We are about to set CPTR_EL2.TFP to trap all floating point + * register accesses to EL2, however, the ARM ARM clearly states that + * traps are only taken to EL2 if the operation would not otherwise + * trap to EL1. Therefore, always make sure that for 32-bit guests, + * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. + */ + val = vcpu->arch.hcr_el2; + if (!(val & HCR_RW)) { + write_sysreg(1 << 30, fpexc32_el2); + isb(); + } + write_sysreg(val, hcr_el2); + /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ + write_sysreg(1 << 15, hstr_el2); + write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2); + write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); +} + +static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) +{ + write_sysreg(HCR_RW, hcr_el2); + write_sysreg(0, hstr_el2); + write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); + write_sysreg(0, cptr_el2); +} + +static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + write_sysreg(kvm->arch.vttbr, vttbr_el2); +} + +static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) +{ + write_sysreg(0, vttbr_el2); +} + +static hyp_alternate_select(__vgic_call_save_state, + __vgic_v2_save_state, __vgic_v3_save_state, + ARM64_HAS_SYSREG_GIC_CPUIF); + +static hyp_alternate_select(__vgic_call_restore_state, + __vgic_v2_restore_state, __vgic_v3_restore_state, + ARM64_HAS_SYSREG_GIC_CPUIF); + +static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) +{ + __vgic_call_save_state()(vcpu); + write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2); +} + +static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) +{ + u64 val; + + val = read_sysreg(hcr_el2); + val |= HCR_INT_OVERRIDE; + val |= vcpu->arch.irq_lines; + write_sysreg(val, hcr_el2); + + __vgic_call_restore_state()(vcpu); +} + +int __hyp_text __guest_run(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *host_ctxt; + struct kvm_cpu_context *guest_ctxt; + u64 exit_code; + + vcpu = kern_hyp_va(vcpu); + write_sysreg(vcpu, tpidr_el2); + + host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + guest_ctxt = &vcpu->arch.ctxt; + + __sysreg_save_state(host_ctxt); + __debug_cond_save_host_state(vcpu); + + __activate_traps(vcpu); + __activate_vm(vcpu); + + __vgic_restore_state(vcpu); + __timer_restore_state(vcpu); + + /* + * We must restore the 32-bit state before the sysregs, thanks + * to Cortex-A57 erratum #852523. + */ + __sysreg32_restore_state(vcpu); + __sysreg_restore_state(guest_ctxt); + __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); + + /* Jump in the fire! */ + exit_code = __guest_enter(vcpu, host_ctxt); + /* And we're baaack! */ + + __sysreg_save_state(guest_ctxt); + __sysreg32_save_state(vcpu); + __timer_save_state(vcpu); + __vgic_save_state(vcpu); + + __deactivate_traps(vcpu); + __deactivate_vm(vcpu); + + __sysreg_restore_state(host_ctxt); + + __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); + __debug_cond_restore_host_state(vcpu); + + return exit_code; +} From 3ff56990bcd69f2b80e8cbff3cb49a544971f3d5 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 26 Oct 2015 08:34:09 +0000 Subject: [PATCH 31/78] arm64: KVM: Implement fpsimd save/restore Implement the fpsimd save restore, keeping the lazy part in assembler (as returning to C would be overkill). Signed-off-by: Marc Zyngier Reviewed-by: Christoffer Dall (cherry picked from commit c13d1683df16db16c91372177ca10c31677b5ed5) Signed-off-by: Alex Shi --- arch/arm64/kvm/hyp/Makefile | 1 + arch/arm64/kvm/hyp/entry.S | 32 +++++++++++++++++++++++++++++++- arch/arm64/kvm/hyp/fpsimd.S | 33 +++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/hyp.h | 7 +++++++ arch/arm64/kvm/hyp/switch.c | 8 ++++++++ arch/arm64/kvm/hyp/sysreg-sr.c | 2 +- 6 files changed, 81 insertions(+), 2 deletions(-) create mode 100644 arch/arm64/kvm/hyp/fpsimd.S diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 9c11b0fe1646..56238d08d36f 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o obj-$(CONFIG_KVM_ARM_HOST) += switch.o +obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index ff196951e63b..90cbf0fee88a 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -27,6 +27,7 @@ #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) +#define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x) .text .pushsection .hyp.text, "ax" @@ -127,4 +128,33 @@ ENTRY(__guest_exit) ret ENDPROC(__guest_exit) - /* Insert fault handling here */ +ENTRY(__fpsimd_guest_restore) + stp x4, lr, [sp, #-16]! + + mrs x2, cptr_el2 + bic x2, x2, #CPTR_EL2_TFP + msr cptr_el2, x2 + isb + + mrs x3, tpidr_el2 + + ldr x0, [x3, #VCPU_HOST_CONTEXT] + kern_hyp_va x0 + add x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS) + bl __fpsimd_save_state + + add x2, x3, #VCPU_CONTEXT + add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) + bl __fpsimd_restore_state + + mrs x1, hcr_el2 + tbnz x1, #HCR_RW_SHIFT, 1f + ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)] + msr fpexc32_el2, x4 +1: + ldp x4, lr, [sp], #16 + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 + + eret +ENDPROC(__fpsimd_guest_restore) diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S new file mode 100644 index 000000000000..da3f22c7f14a --- /dev/null +++ b/arch/arm64/kvm/hyp/fpsimd.S @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include + +#include + + .text + .pushsection .hyp.text, "ax" + +ENTRY(__fpsimd_save_state) + fpsimd_save x0, 1 + ret +ENDPROC(__fpsimd_save_state) + +ENTRY(__fpsimd_restore_state) + fpsimd_restore x0, 1 + ret +ENDPROC(__fpsimd_restore_state) diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index 73419a769112..70d4f696c862 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -76,6 +76,13 @@ void __debug_restore_state(struct kvm_vcpu *vcpu, void __debug_cond_save_host_state(struct kvm_vcpu *vcpu); void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu); +void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); +void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); +static inline bool __fpsimd_enabled(void) +{ + return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP); +} + u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 79f59c98b148..608155f5b856 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -89,6 +89,7 @@ int __hyp_text __guest_run(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *guest_ctxt; + bool fp_enabled; u64 exit_code; vcpu = kern_hyp_va(vcpu); @@ -118,6 +119,8 @@ int __hyp_text __guest_run(struct kvm_vcpu *vcpu) exit_code = __guest_enter(vcpu, host_ctxt); /* And we're baaack! */ + fp_enabled = __fpsimd_enabled(); + __sysreg_save_state(guest_ctxt); __sysreg32_save_state(vcpu); __timer_save_state(vcpu); @@ -128,6 +131,11 @@ int __hyp_text __guest_run(struct kvm_vcpu *vcpu) __sysreg_restore_state(host_ctxt); + if (fp_enabled) { + __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs); + __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs); + } + __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); __debug_cond_restore_host_state(vcpu); diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index eb05afb77ee8..36035417ec52 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -107,7 +107,7 @@ void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) sysreg[DACR32_EL2] = read_sysreg(dacr32_el2); sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2); - if (!(read_sysreg(cptr_el2) & CPTR_EL2_TFP)) + if (__fpsimd_enabled()) sysreg[FPEXC32_EL2] = read_sysreg(fpexc32_el2); if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY) From bf7432f754ea2cbb5c58cddea7e6322f05014e75 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 23 Oct 2015 08:26:37 +0100 Subject: [PATCH 32/78] arm64: KVM: Implement TLB handling Implement the TLB handling as a direct translation of the assembly code version. Signed-off-by: Marc Zyngier Reviewed-by: Christoffer Dall (cherry picked from commit 5eec0a91e32a2862e86265532ae773820e0afd77) Signed-off-by: Alex Shi --- arch/arm64/kvm/hyp/Makefile | 1 + arch/arm64/kvm/hyp/entry.S | 1 + arch/arm64/kvm/hyp/tlb.c | 73 +++++++++++++++++++++++++++++++++++++ 3 files changed, 75 insertions(+) create mode 100644 arch/arm64/kvm/hyp/tlb.c diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 56238d08d36f..1a529f5a4922 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o obj-$(CONFIG_KVM_ARM_HOST) += switch.o obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o +obj-$(CONFIG_KVM_ARM_HOST) += tlb.o diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 90cbf0fee88a..1050b2b09904 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -147,6 +147,7 @@ ENTRY(__fpsimd_guest_restore) add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) bl __fpsimd_restore_state + // Skip restoring fpexc32 for AArch64 guests mrs x1, hcr_el2 tbnz x1, #HCR_RW_SHIFT, 1f ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)] diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c new file mode 100644 index 000000000000..6fcb93a03659 --- /dev/null +++ b/arch/arm64/kvm/hyp/tlb.c @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "hyp.h" + +void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) +{ + dsb(ishst); + + /* Switch to requested VMID */ + kvm = kern_hyp_va(kvm); + write_sysreg(kvm->arch.vttbr, vttbr_el2); + isb(); + + /* + * We could do so much better if we had the VA as well. + * Instead, we invalidate Stage-2 for this IPA, and the + * whole of Stage-1. Weep... + */ + ipa >>= 12; + asm volatile("tlbi ipas2e1is, %0" : : "r" (ipa)); + + /* + * We have to ensure completion of the invalidation at Stage-2, + * since a table walk on another CPU could refill a TLB with a + * complete (S1 + S2) walk based on the old Stage-2 mapping if + * the Stage-1 invalidation happened first. + */ + dsb(ish); + asm volatile("tlbi vmalle1is" : : ); + dsb(ish); + isb(); + + write_sysreg(0, vttbr_el2); +} + +void __hyp_text __tlb_flush_vmid(struct kvm *kvm) +{ + dsb(ishst); + + /* Switch to requested VMID */ + kvm = kern_hyp_va(kvm); + write_sysreg(kvm->arch.vttbr, vttbr_el2); + isb(); + + asm volatile("tlbi vmalls12e1is" : : ); + dsb(ish); + isb(); + + write_sysreg(0, vttbr_el2); +} + +void __hyp_text __tlb_flush_vm_context(void) +{ + dsb(ishst); + asm volatile("tlbi alle1is \n" + "ic ialluis ": : ); + dsb(ish); +} From bdfc3d08944d92a15b4a94290586f87d5a309434 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 25 Oct 2015 08:01:56 +0000 Subject: [PATCH 33/78] arm64: KVM: HYP mode entry points Add the entry points for HYP mode (both for hypercalls and exception handling). Signed-off-by: Marc Zyngier Reviewed-by: Christoffer Dall (cherry picked from commit 2b28162cf65a6fe1c93d172675e4f2792792f17e) Signed-off-by: Alex Shi --- arch/arm64/kvm/hyp/Makefile | 1 + arch/arm64/kvm/hyp/hyp-entry.S | 203 +++++++++++++++++++++++++++++++++ 2 files changed, 204 insertions(+) create mode 100644 arch/arm64/kvm/hyp/hyp-entry.S diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 1a529f5a4922..826032bc3945 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += entry.o obj-$(CONFIG_KVM_ARM_HOST) += switch.o obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o obj-$(CONFIG_KVM_ARM_HOST) += tlb.o +obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S new file mode 100644 index 000000000000..818731a5f61c --- /dev/null +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -0,0 +1,203 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + + .text + .pushsection .hyp.text, "ax" + +.macro save_x0_to_x3 + stp x0, x1, [sp, #-16]! + stp x2, x3, [sp, #-16]! +.endm + +.macro restore_x0_to_x3 + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 +.endm + +el1_sync: // Guest trapped into EL2 + save_x0_to_x3 + + mrs x1, esr_el2 + lsr x2, x1, #ESR_ELx_EC_SHIFT + + cmp x2, #ESR_ELx_EC_HVC64 + b.ne el1_trap + + mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest + cbnz x3, el1_trap // called HVC + + /* Here, we're pretty sure the host called HVC. */ + restore_x0_to_x3 + + /* Check for __hyp_get_vectors */ + cbnz x0, 1f + mrs x0, vbar_el2 + b 2f + +1: stp lr, xzr, [sp, #-16]! + + /* + * Compute the function address in EL2, and shuffle the parameters. + */ + kern_hyp_va x0 + mov lr, x0 + mov x0, x1 + mov x1, x2 + mov x2, x3 + blr lr + + ldp lr, xzr, [sp], #16 +2: eret + +el1_trap: + /* + * x1: ESR + * x2: ESR_EC + */ + + /* Guest accessed VFP/SIMD registers, save host, restore Guest */ + cmp x2, #ESR_ELx_EC_FP_ASIMD + b.eq __fpsimd_guest_restore + + cmp x2, #ESR_ELx_EC_DABT_LOW + mov x0, #ESR_ELx_EC_IABT_LOW + ccmp x2, x0, #4, ne + b.ne 1f // Not an abort we care about + + /* This is an abort. Check for permission fault */ +alternative_if_not ARM64_WORKAROUND_834220 + and x2, x1, #ESR_ELx_FSC_TYPE + cmp x2, #FSC_PERM + b.ne 1f // Not a permission fault +alternative_else + nop // Use the permission fault path to + nop // check for a valid S1 translation, + nop // regardless of the ESR value. +alternative_endif + + /* + * Check for Stage-1 page table walk, which is guaranteed + * to give a valid HPFAR_EL2. + */ + tbnz x1, #7, 1f // S1PTW is set + + /* Preserve PAR_EL1 */ + mrs x3, par_el1 + stp x3, xzr, [sp, #-16]! + + /* + * Permission fault, HPFAR_EL2 is invalid. + * Resolve the IPA the hard way using the guest VA. + * Stage-1 translation already validated the memory access rights. + * As such, we can use the EL1 translation regime, and don't have + * to distinguish between EL0 and EL1 access. + */ + mrs x2, far_el2 + at s1e1r, x2 + isb + + /* Read result */ + mrs x3, par_el1 + ldp x0, xzr, [sp], #16 // Restore PAR_EL1 from the stack + msr par_el1, x0 + tbnz x3, #0, 3f // Bail out if we failed the translation + ubfx x3, x3, #12, #36 // Extract IPA + lsl x3, x3, #4 // and present it like HPFAR + b 2f + +1: mrs x3, hpfar_el2 + mrs x2, far_el2 + +2: mrs x0, tpidr_el2 + str w1, [x0, #VCPU_ESR_EL2] + str x2, [x0, #VCPU_FAR_EL2] + str x3, [x0, #VCPU_HPFAR_EL2] + + mov x1, #ARM_EXCEPTION_TRAP + b __guest_exit + + /* + * Translation failed. Just return to the guest and + * let it fault again. Another CPU is probably playing + * behind our back. + */ +3: restore_x0_to_x3 + + eret + +el1_irq: + save_x0_to_x3 + mrs x0, tpidr_el2 + mov x1, #ARM_EXCEPTION_IRQ + b __guest_exit + +.macro invalid_vector label, target = __kvm_hyp_panic + .align 2 +\label: + b \target +ENDPROC(\label) +.endm + + /* None of these should ever happen */ + invalid_vector el2t_sync_invalid + invalid_vector el2t_irq_invalid + invalid_vector el2t_fiq_invalid + invalid_vector el2t_error_invalid + invalid_vector el2h_sync_invalid + invalid_vector el2h_irq_invalid + invalid_vector el2h_fiq_invalid + invalid_vector el2h_error_invalid + invalid_vector el1_sync_invalid + invalid_vector el1_irq_invalid + invalid_vector el1_fiq_invalid + invalid_vector el1_error_invalid + + .ltorg + + .align 11 + +ENTRY(__hyp_vector) + ventry el2t_sync_invalid // Synchronous EL2t + ventry el2t_irq_invalid // IRQ EL2t + ventry el2t_fiq_invalid // FIQ EL2t + ventry el2t_error_invalid // Error EL2t + + ventry el2h_sync_invalid // Synchronous EL2h + ventry el2h_irq_invalid // IRQ EL2h + ventry el2h_fiq_invalid // FIQ EL2h + ventry el2h_error_invalid // Error EL2h + + ventry el1_sync // Synchronous 64-bit EL1 + ventry el1_irq // IRQ 64-bit EL1 + ventry el1_fiq_invalid // FIQ 64-bit EL1 + ventry el1_error_invalid // Error 64-bit EL1 + + ventry el1_sync // Synchronous 32-bit EL1 + ventry el1_irq // IRQ 32-bit EL1 + ventry el1_fiq_invalid // FIQ 32-bit EL1 + ventry el1_error_invalid // Error 32-bit EL1 +ENDPROC(__hyp_vector) From f82b2dffa97f7598a81034ebdbcf2fcd689dab00 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 25 Oct 2015 15:21:52 +0000 Subject: [PATCH 34/78] arm64: KVM: Add panic handling Add the panic handler, together with the small bits of assembly code to call the kernel's panic implementation. Signed-off-by: Marc Zyngier Reviewed-by: Christoffer Dall (cherry picked from commit 53fd5b6487e4438049a5da5e36dfb8edcf1fd789) Signed-off-by: Alex Shi --- arch/arm64/kvm/hyp/hyp-entry.S | 11 ++++++++++- arch/arm64/kvm/hyp/hyp.h | 1 + arch/arm64/kvm/hyp/switch.c | 30 ++++++++++++++++++++++++++++++ 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 818731a5f61c..8e58a3ba6139 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -155,7 +155,16 @@ el1_irq: mov x1, #ARM_EXCEPTION_IRQ b __guest_exit -.macro invalid_vector label, target = __kvm_hyp_panic +ENTRY(__hyp_do_panic) + mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ + PSR_MODE_EL1h) + msr spsr_el2, lr + ldr lr, =panic + msr elr_el2, lr + eret +ENDPROC(__hyp_do_panic) + +.macro invalid_vector label, target = __hyp_panic .align 2 \label: b \target diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index 70d4f696c862..fb275178b6af 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -84,6 +84,7 @@ static inline bool __fpsimd_enabled(void) } u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); +void __noreturn __hyp_do_panic(unsigned long, ...); #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 608155f5b856..b012870a92e7 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -141,3 +141,33 @@ int __hyp_text __guest_run(struct kvm_vcpu *vcpu) return exit_code; } + +static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; + +void __hyp_text __noreturn __hyp_panic(void) +{ + unsigned long str_va = (unsigned long)__hyp_panic_string; + u64 spsr = read_sysreg(spsr_el2); + u64 elr = read_sysreg(elr_el2); + u64 par = read_sysreg(par_el1); + + if (read_sysreg(vttbr_el2)) { + struct kvm_vcpu *vcpu; + struct kvm_cpu_context *host_ctxt; + + vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2); + host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + __deactivate_traps(vcpu); + __deactivate_vm(vcpu); + __sysreg_restore_state(host_ctxt); + } + + /* Call panic for real */ + __hyp_do_panic(hyp_kern_va(str_va), + spsr, elr, + read_sysreg(esr_el2), read_sysreg(far_el2), + read_sysreg(hpfar_el2), par, + (void *)read_sysreg(tpidr_el2)); + + unreachable(); +} From 83f0b0bce9df6515279c43f98986e58d9838b933 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 19 Oct 2015 15:50:58 +0100 Subject: [PATCH 35/78] arm64: KVM: Implement vgic-v3 save/restore Implement the vgic-v3 save restore as a direct translation of the assembly code version. Signed-off-by: Marc Zyngier (cherry picked from commit f68d2b1b73cc3d8f6eb189c11ce79a472ed27c42) Signed-off-by: Alex Shi Conflicts: arch/arm64/kvm/hyp/Makefile arch/arm64/kvm/hyp/hyp.h --- arch/arm64/kvm/hyp/vgic-v3-sr.c | 226 ++++++++++++++++++++++++++++++++ 1 file changed, 226 insertions(+) create mode 100644 arch/arm64/kvm/hyp/vgic-v3-sr.c diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c new file mode 100644 index 000000000000..78d05f3cccfc --- /dev/null +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -0,0 +1,226 @@ +/* + * Copyright (C) 2012-2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +#include "hyp.h" + +#define vtr_to_max_lr_idx(v) ((v) & 0xf) +#define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1) + +#define read_gicreg(r) \ + ({ \ + u64 reg; \ + asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \ + reg; \ + }) + +#define write_gicreg(v,r) \ + do { \ + u64 __val = (v); \ + asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\ + } while (0) + +/* vcpu is already in the HYP VA space */ +void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) +{ + struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; + u64 val; + u32 max_lr_idx, nr_pri_bits; + + /* + * Make sure stores to the GIC via the memory mapped interface + * are now visible to the system register interface. + */ + dsb(st); + + cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); + cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2); + cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2); + cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); + + write_gicreg(0, ICH_HCR_EL2); + val = read_gicreg(ICH_VTR_EL2); + max_lr_idx = vtr_to_max_lr_idx(val); + nr_pri_bits = vtr_to_nr_pri_bits(val); + + switch (max_lr_idx) { + case 15: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)] = read_gicreg(ICH_LR15_EL2); + case 14: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)] = read_gicreg(ICH_LR14_EL2); + case 13: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)] = read_gicreg(ICH_LR13_EL2); + case 12: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)] = read_gicreg(ICH_LR12_EL2); + case 11: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)] = read_gicreg(ICH_LR11_EL2); + case 10: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)] = read_gicreg(ICH_LR10_EL2); + case 9: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)] = read_gicreg(ICH_LR9_EL2); + case 8: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)] = read_gicreg(ICH_LR8_EL2); + case 7: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)] = read_gicreg(ICH_LR7_EL2); + case 6: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)] = read_gicreg(ICH_LR6_EL2); + case 5: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)] = read_gicreg(ICH_LR5_EL2); + case 4: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)] = read_gicreg(ICH_LR4_EL2); + case 3: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)] = read_gicreg(ICH_LR3_EL2); + case 2: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)] = read_gicreg(ICH_LR2_EL2); + case 1: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)] = read_gicreg(ICH_LR1_EL2); + case 0: + cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)] = read_gicreg(ICH_LR0_EL2); + } + + switch (nr_pri_bits) { + case 7: + cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2); + cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2); + case 6: + cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2); + default: + cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2); + } + + switch (nr_pri_bits) { + case 7: + cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2); + cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2); + case 6: + cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2); + default: + cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2); + } + + val = read_gicreg(ICC_SRE_EL2); + write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); + isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ + write_gicreg(1, ICC_SRE_EL1); +} + +void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) +{ + struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; + u64 val; + u32 max_lr_idx, nr_pri_bits; + + /* + * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a + * Group0 interrupt (as generated in GICv2 mode) to be + * delivered as a FIQ to the guest, with potentially fatal + * consequences. So we must make sure that ICC_SRE_EL1 has + * been actually programmed with the value we want before + * starting to mess with the rest of the GIC. + */ + write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1); + isb(); + + write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); + write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); + + val = read_gicreg(ICH_VTR_EL2); + max_lr_idx = vtr_to_max_lr_idx(val); + nr_pri_bits = vtr_to_nr_pri_bits(val); + + switch (nr_pri_bits) { + case 7: + write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2); + write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2); + case 6: + write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2); + default: + write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2); + } + + switch (nr_pri_bits) { + case 7: + write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); + write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); + case 6: + write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2); + default: + write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); + } + + switch (max_lr_idx) { + case 15: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2); + case 14: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)], ICH_LR14_EL2); + case 13: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)], ICH_LR13_EL2); + case 12: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)], ICH_LR12_EL2); + case 11: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)], ICH_LR11_EL2); + case 10: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)], ICH_LR10_EL2); + case 9: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)], ICH_LR9_EL2); + case 8: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)], ICH_LR8_EL2); + case 7: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)], ICH_LR7_EL2); + case 6: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)], ICH_LR6_EL2); + case 5: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)], ICH_LR5_EL2); + case 4: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)], ICH_LR4_EL2); + case 3: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)], ICH_LR3_EL2); + case 2: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)], ICH_LR2_EL2); + case 1: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)], ICH_LR1_EL2); + case 0: + write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)], ICH_LR0_EL2); + } + + /* + * Ensures that the above will have reached the + * (re)distributors. This ensure the guest will read the + * correct values from the memory-mapped interface. + */ + isb(); + dsb(sy); + + /* + * Prevent the guest from touching the GIC system registers if + * SRE isn't enabled for GICv3 emulation. + */ + if (!cpu_if->vgic_sre) { + write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, + ICC_SRE_EL2); + } +} + +u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void) +{ + return read_gicreg(ICH_VTR_EL2); +} From eccbe96e1fc0d89088bfac7ddf762f108e2ce94c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 25 Oct 2015 13:58:00 +0000 Subject: [PATCH 36/78] arm64: KVM: Add compatibility aliases So far, we've implemented the new world switch with a completely different namespace, so that we could have both implementation compiled in. Let's take things one step further by adding weak aliases that have the same names as the original implementation. The weak attributes allows the new implementation to be overriden by the old one, and everything still work. At a later point, we'll be able to simply drop the old code, and everything will hopefully keep working, thanks to the aliases we have just added. This also saves us repainting all the callers. Signed-off-by: Marc Zyngier Acked-by: Christoffer Dall (cherry picked from commit 044ac37d1281fc7b59d5dce4fe979a99369e95f2) Signed-off-by: Alex Shi --- arch/arm64/kvm/hyp/debug-sr.c | 3 +++ arch/arm64/kvm/hyp/hyp-entry.S | 3 +++ arch/arm64/kvm/hyp/switch.c | 3 +++ arch/arm64/kvm/hyp/tlb.c | 9 +++++++++ arch/arm64/kvm/hyp/vgic-v3-sr.c | 3 +++ 5 files changed, 21 insertions(+) diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index 7848322deed6..d071f4591a6c 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -135,3 +135,6 @@ u32 __hyp_text __debug_read_mdcr_el2(void) { return read_sysreg(mdcr_el2); } + +__alias(__debug_read_mdcr_el2) +u32 __weak __kvm_get_mdcr_el2(void); diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 8e58a3ba6139..10d6d2a5390e 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -189,6 +189,8 @@ ENDPROC(\label) .align 11 + .weak __kvm_hyp_vector +ENTRY(__kvm_hyp_vector) ENTRY(__hyp_vector) ventry el2t_sync_invalid // Synchronous EL2t ventry el2t_irq_invalid // IRQ EL2t @@ -210,3 +212,4 @@ ENTRY(__hyp_vector) ventry el1_fiq_invalid // FIQ 32-bit EL1 ventry el1_error_invalid // Error 32-bit EL1 ENDPROC(__hyp_vector) +ENDPROC(__kvm_hyp_vector) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index b012870a92e7..7457ae4db86e 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -142,6 +142,9 @@ int __hyp_text __guest_run(struct kvm_vcpu *vcpu) return exit_code; } +__alias(__guest_run) +int __weak __kvm_vcpu_run(struct kvm_vcpu *vcpu); + static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; void __hyp_text __noreturn __hyp_panic(void) diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 6fcb93a03659..5f815cf53a9a 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -48,6 +48,9 @@ void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) write_sysreg(0, vttbr_el2); } +__alias(__tlb_flush_vmid_ipa) +void __weak __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); + void __hyp_text __tlb_flush_vmid(struct kvm *kvm) { dsb(ishst); @@ -64,6 +67,9 @@ void __hyp_text __tlb_flush_vmid(struct kvm *kvm) write_sysreg(0, vttbr_el2); } +__alias(__tlb_flush_vmid) +void __weak __kvm_tlb_flush_vmid(struct kvm *kvm); + void __hyp_text __tlb_flush_vm_context(void) { dsb(ishst); @@ -71,3 +77,6 @@ void __hyp_text __tlb_flush_vm_context(void) "ic ialluis ": : ); dsb(ish); } + +__alias(__tlb_flush_vm_context) +void __weak __kvm_flush_vm_context(void); diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 78d05f3cccfc..a76945874d5d 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -224,3 +224,6 @@ u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void) { return read_gicreg(ICH_VTR_EL2); } + +__alias(__vgic_v3_read_ich_vtr_el2) +u64 __weak __vgic_v3_get_ich_vtr_el2(void); From 38ddb602f47ae9790eefb71630104e016649f3d7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 27 Oct 2015 12:18:48 +0000 Subject: [PATCH 37/78] arm64: KVM: Map the kernel RO section into HYP In order to run C code in HYP, we must make sure that the kernel's RO section is mapped into HYP (otherwise things break badly). Signed-off-by: Marc Zyngier Acked-by: Christoffer Dall (cherry picked from commit 910917bb7db070cc67557a6b3c8fcceaa5c398a7) Signed-off-by: Alex Shi --- arch/arm/kvm/arm.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index d7bef2144760..449e312f1892 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -44,6 +44,7 @@ #include #include #include +#include #ifdef REQUIRES_VIRT __asm__(".arch_extension virt"); @@ -1065,6 +1066,12 @@ static int init_hyp_mode(void) goto out_free_mappings; } + err = create_hyp_mappings(__start_rodata, __end_rodata); + if (err) { + kvm_err("Cannot map rodata section\n"); + goto out_free_mappings; + } + /* * Map the Hyp stack pages */ From f540382473ed582cf94a962ab2ea500e6698d5ef Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 25 Oct 2015 15:51:41 +0000 Subject: [PATCH 38/78] arm64: KVM: Move away from the assembly version of the world switch This is it. We remove all of the code that has now been rewritten. Signed-off-by: Marc Zyngier Acked-by: Christoffer Dall (cherry picked from commit 1ea66d27e7b01086669ff2abdc3ac89dc90eae51) Signed-off-by: Alex Shi --- arch/arm64/kvm/Makefile | 2 - arch/arm64/kvm/hyp.S | 1081 +------------------------------ arch/arm64/kvm/vgic-v2-switch.S | 134 ---- arch/arm64/kvm/vgic-v3-switch.S | 269 -------- 4 files changed, 1 insertion(+), 1485 deletions(-) delete mode 100644 arch/arm64/kvm/vgic-v2-switch.S delete mode 100644 arch/arm64/kvm/vgic-v3-switch.S diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index d31e4e58e961..caee9ee8e12a 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -23,8 +23,6 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generi kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o -kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o -kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 86c289832272..0ccdcbbef3c2 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -17,910 +17,7 @@ #include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) -#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) -#define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x) -#define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x) - - .text - .pushsection .hyp.text, "ax" - .align PAGE_SHIFT - -.macro save_common_regs - // x2: base address for cpu context - // x3: tmp register - - add x3, x2, #CPU_XREG_OFFSET(19) - stp x19, x20, [x3] - stp x21, x22, [x3, #16] - stp x23, x24, [x3, #32] - stp x25, x26, [x3, #48] - stp x27, x28, [x3, #64] - stp x29, lr, [x3, #80] - - mrs x19, sp_el0 - mrs x20, elr_el2 // pc before entering el2 - mrs x21, spsr_el2 // pstate before entering el2 - - stp x19, x20, [x3, #96] - str x21, [x3, #112] - - mrs x22, sp_el1 - mrs x23, elr_el1 - mrs x24, spsr_el1 - - str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] - str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] - str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] -.endm - -.macro restore_common_regs - // x2: base address for cpu context - // x3: tmp register - - ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] - ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] - ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] - - msr sp_el1, x22 - msr elr_el1, x23 - msr spsr_el1, x24 - - add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0 - ldp x19, x20, [x3] - ldr x21, [x3, #16] - - msr sp_el0, x19 - msr elr_el2, x20 // pc on return from el2 - msr spsr_el2, x21 // pstate on return from el2 - - add x3, x2, #CPU_XREG_OFFSET(19) - ldp x19, x20, [x3] - ldp x21, x22, [x3, #16] - ldp x23, x24, [x3, #32] - ldp x25, x26, [x3, #48] - ldp x27, x28, [x3, #64] - ldp x29, lr, [x3, #80] -.endm - -.macro save_host_regs - save_common_regs -.endm - -.macro restore_host_regs - restore_common_regs -.endm - -.macro save_fpsimd - // x2: cpu context address - // x3, x4: tmp regs - add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) - fpsimd_save x3, 4 -.endm - -.macro restore_fpsimd - // x2: cpu context address - // x3, x4: tmp regs - add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) - fpsimd_restore x3, 4 -.endm - -.macro save_guest_regs - // x0 is the vcpu address - // x1 is the return code, do not corrupt! - // x2 is the cpu context - // x3 is a tmp register - // Guest's x0-x3 are on the stack - - // Compute base to save registers - add x3, x2, #CPU_XREG_OFFSET(4) - stp x4, x5, [x3] - stp x6, x7, [x3, #16] - stp x8, x9, [x3, #32] - stp x10, x11, [x3, #48] - stp x12, x13, [x3, #64] - stp x14, x15, [x3, #80] - stp x16, x17, [x3, #96] - str x18, [x3, #112] - - pop x6, x7 // x2, x3 - pop x4, x5 // x0, x1 - - add x3, x2, #CPU_XREG_OFFSET(0) - stp x4, x5, [x3] - stp x6, x7, [x3, #16] - - save_common_regs -.endm - -.macro restore_guest_regs - // x0 is the vcpu address. - // x2 is the cpu context - // x3 is a tmp register - - // Prepare x0-x3 for later restore - add x3, x2, #CPU_XREG_OFFSET(0) - ldp x4, x5, [x3] - ldp x6, x7, [x3, #16] - push x4, x5 // Push x0-x3 on the stack - push x6, x7 - - // x4-x18 - ldp x4, x5, [x3, #32] - ldp x6, x7, [x3, #48] - ldp x8, x9, [x3, #64] - ldp x10, x11, [x3, #80] - ldp x12, x13, [x3, #96] - ldp x14, x15, [x3, #112] - ldp x16, x17, [x3, #128] - ldr x18, [x3, #144] - - // x19-x29, lr, sp*, elr*, spsr* - restore_common_regs - - // Last bits of the 64bit state - pop x2, x3 - pop x0, x1 - - // Do not touch any register after this! -.endm - -/* - * Macros to perform system register save/restore. - * - * Ordering here is absolutely critical, and must be kept consistent - * in {save,restore}_sysregs, {save,restore}_guest_32bit_state, - * and in kvm_asm.h. - * - * In other words, don't touch any of these unless you know what - * you are doing. - */ -.macro save_sysregs - // x2: base address for cpu context - // x3: tmp register - - add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) - - mrs x4, vmpidr_el2 - mrs x5, csselr_el1 - mrs x6, sctlr_el1 - mrs x7, actlr_el1 - mrs x8, cpacr_el1 - mrs x9, ttbr0_el1 - mrs x10, ttbr1_el1 - mrs x11, tcr_el1 - mrs x12, esr_el1 - mrs x13, afsr0_el1 - mrs x14, afsr1_el1 - mrs x15, far_el1 - mrs x16, mair_el1 - mrs x17, vbar_el1 - mrs x18, contextidr_el1 - mrs x19, tpidr_el0 - mrs x20, tpidrro_el0 - mrs x21, tpidr_el1 - mrs x22, amair_el1 - mrs x23, cntkctl_el1 - mrs x24, par_el1 - mrs x25, mdscr_el1 - - stp x4, x5, [x3] - stp x6, x7, [x3, #16] - stp x8, x9, [x3, #32] - stp x10, x11, [x3, #48] - stp x12, x13, [x3, #64] - stp x14, x15, [x3, #80] - stp x16, x17, [x3, #96] - stp x18, x19, [x3, #112] - stp x20, x21, [x3, #128] - stp x22, x23, [x3, #144] - stp x24, x25, [x3, #160] -.endm - -.macro save_debug type - // x4: pointer to register set - // x5: number of registers to skip - // x6..x22 trashed - - adr x22, 1f - add x22, x22, x5, lsl #2 - br x22 -1: - mrs x21, \type\()15_el1 - mrs x20, \type\()14_el1 - mrs x19, \type\()13_el1 - mrs x18, \type\()12_el1 - mrs x17, \type\()11_el1 - mrs x16, \type\()10_el1 - mrs x15, \type\()9_el1 - mrs x14, \type\()8_el1 - mrs x13, \type\()7_el1 - mrs x12, \type\()6_el1 - mrs x11, \type\()5_el1 - mrs x10, \type\()4_el1 - mrs x9, \type\()3_el1 - mrs x8, \type\()2_el1 - mrs x7, \type\()1_el1 - mrs x6, \type\()0_el1 - - adr x22, 1f - add x22, x22, x5, lsl #2 - br x22 -1: - str x21, [x4, #(15 * 8)] - str x20, [x4, #(14 * 8)] - str x19, [x4, #(13 * 8)] - str x18, [x4, #(12 * 8)] - str x17, [x4, #(11 * 8)] - str x16, [x4, #(10 * 8)] - str x15, [x4, #(9 * 8)] - str x14, [x4, #(8 * 8)] - str x13, [x4, #(7 * 8)] - str x12, [x4, #(6 * 8)] - str x11, [x4, #(5 * 8)] - str x10, [x4, #(4 * 8)] - str x9, [x4, #(3 * 8)] - str x8, [x4, #(2 * 8)] - str x7, [x4, #(1 * 8)] - str x6, [x4, #(0 * 8)] -.endm - -.macro restore_sysregs - // x2: base address for cpu context - // x3: tmp register - - add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) - - ldp x4, x5, [x3] - ldp x6, x7, [x3, #16] - ldp x8, x9, [x3, #32] - ldp x10, x11, [x3, #48] - ldp x12, x13, [x3, #64] - ldp x14, x15, [x3, #80] - ldp x16, x17, [x3, #96] - ldp x18, x19, [x3, #112] - ldp x20, x21, [x3, #128] - ldp x22, x23, [x3, #144] - ldp x24, x25, [x3, #160] - - msr vmpidr_el2, x4 - msr csselr_el1, x5 - msr sctlr_el1, x6 - msr actlr_el1, x7 - msr cpacr_el1, x8 - msr ttbr0_el1, x9 - msr ttbr1_el1, x10 - msr tcr_el1, x11 - msr esr_el1, x12 - msr afsr0_el1, x13 - msr afsr1_el1, x14 - msr far_el1, x15 - msr mair_el1, x16 - msr vbar_el1, x17 - msr contextidr_el1, x18 - msr tpidr_el0, x19 - msr tpidrro_el0, x20 - msr tpidr_el1, x21 - msr amair_el1, x22 - msr cntkctl_el1, x23 - msr par_el1, x24 - msr mdscr_el1, x25 -.endm - -.macro restore_debug type - // x4: pointer to register set - // x5: number of registers to skip - // x6..x22 trashed - - adr x22, 1f - add x22, x22, x5, lsl #2 - br x22 -1: - ldr x21, [x4, #(15 * 8)] - ldr x20, [x4, #(14 * 8)] - ldr x19, [x4, #(13 * 8)] - ldr x18, [x4, #(12 * 8)] - ldr x17, [x4, #(11 * 8)] - ldr x16, [x4, #(10 * 8)] - ldr x15, [x4, #(9 * 8)] - ldr x14, [x4, #(8 * 8)] - ldr x13, [x4, #(7 * 8)] - ldr x12, [x4, #(6 * 8)] - ldr x11, [x4, #(5 * 8)] - ldr x10, [x4, #(4 * 8)] - ldr x9, [x4, #(3 * 8)] - ldr x8, [x4, #(2 * 8)] - ldr x7, [x4, #(1 * 8)] - ldr x6, [x4, #(0 * 8)] - - adr x22, 1f - add x22, x22, x5, lsl #2 - br x22 -1: - msr \type\()15_el1, x21 - msr \type\()14_el1, x20 - msr \type\()13_el1, x19 - msr \type\()12_el1, x18 - msr \type\()11_el1, x17 - msr \type\()10_el1, x16 - msr \type\()9_el1, x15 - msr \type\()8_el1, x14 - msr \type\()7_el1, x13 - msr \type\()6_el1, x12 - msr \type\()5_el1, x11 - msr \type\()4_el1, x10 - msr \type\()3_el1, x9 - msr \type\()2_el1, x8 - msr \type\()1_el1, x7 - msr \type\()0_el1, x6 -.endm - -.macro skip_32bit_state tmp, target - // Skip 32bit state if not needed - mrs \tmp, hcr_el2 - tbnz \tmp, #HCR_RW_SHIFT, \target -.endm - -.macro skip_tee_state tmp, target - // Skip ThumbEE state if not needed - mrs \tmp, id_pfr0_el1 - tbz \tmp, #12, \target -.endm - -.macro skip_debug_state tmp, target - ldr \tmp, [x0, #VCPU_DEBUG_FLAGS] - tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target -.endm - -/* - * Branch to target if CPTR_EL2.TFP bit is set (VFP/SIMD trapping enabled) - */ -.macro skip_fpsimd_state tmp, target - mrs \tmp, cptr_el2 - tbnz \tmp, #CPTR_EL2_TFP_SHIFT, \target -.endm - -.macro compute_debug_state target - // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY - // is set, we do a full save/restore cycle and disable trapping. - add x25, x0, #VCPU_CONTEXT - - // Check the state of MDSCR_EL1 - ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)] - and x26, x25, #DBG_MDSCR_KDE - and x25, x25, #DBG_MDSCR_MDE - adds xzr, x25, x26 - b.eq 9998f // Nothing to see there - - // If any interesting bits was set, we must set the flag - mov x26, #KVM_ARM64_DEBUG_DIRTY - str x26, [x0, #VCPU_DEBUG_FLAGS] - b 9999f // Don't skip restore - -9998: - // Otherwise load the flags from memory in case we recently - // trapped - skip_debug_state x25, \target -9999: -.endm - -.macro save_guest_32bit_state - skip_32bit_state x3, 1f - - add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT) - mrs x4, spsr_abt - mrs x5, spsr_und - mrs x6, spsr_irq - mrs x7, spsr_fiq - stp x4, x5, [x3] - stp x6, x7, [x3, #16] - - add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) - mrs x4, dacr32_el2 - mrs x5, ifsr32_el2 - stp x4, x5, [x3] - - skip_fpsimd_state x8, 2f - mrs x6, fpexc32_el2 - str x6, [x3, #16] -2: - skip_debug_state x8, 1f - mrs x7, dbgvcr32_el2 - str x7, [x3, #24] -1: -.endm - -.macro restore_guest_32bit_state - skip_32bit_state x3, 1f - - add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT) - ldp x4, x5, [x3] - ldp x6, x7, [x3, #16] - msr spsr_abt, x4 - msr spsr_und, x5 - msr spsr_irq, x6 - msr spsr_fiq, x7 - - add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) - ldp x4, x5, [x3] - msr dacr32_el2, x4 - msr ifsr32_el2, x5 - - skip_debug_state x8, 1f - ldr x7, [x3, #24] - msr dbgvcr32_el2, x7 -1: -.endm - -.macro activate_traps - ldr x2, [x0, #VCPU_HCR_EL2] - - /* - * We are about to set CPTR_EL2.TFP to trap all floating point - * register accesses to EL2, however, the ARM ARM clearly states that - * traps are only taken to EL2 if the operation would not otherwise - * trap to EL1. Therefore, always make sure that for 32-bit guests, - * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. - */ - tbnz x2, #HCR_RW_SHIFT, 99f // open code skip_32bit_state - mov x3, #(1 << 30) - msr fpexc32_el2, x3 - isb -99: - msr hcr_el2, x2 - mov x2, #CPTR_EL2_TTA - orr x2, x2, #CPTR_EL2_TFP - msr cptr_el2, x2 - - mov x2, #(1 << 15) // Trap CP15 Cr=15 - msr hstr_el2, x2 - - // Monitor Debug Config - see kvm_arm_setup_debug() - ldr x2, [x0, #VCPU_MDCR_EL2] - msr mdcr_el2, x2 -.endm - -.macro deactivate_traps - mov x2, #HCR_RW - msr hcr_el2, x2 - msr hstr_el2, xzr - - mrs x2, mdcr_el2 - and x2, x2, #MDCR_EL2_HPMN_MASK - msr mdcr_el2, x2 -.endm - -.macro activate_vm - ldr x1, [x0, #VCPU_KVM] - kern_hyp_va x1 - ldr x2, [x1, #KVM_VTTBR] - msr vttbr_el2, x2 -.endm - -.macro deactivate_vm - msr vttbr_el2, xzr -.endm - -/* - * Call into the vgic backend for state saving - */ -.macro save_vgic_state -alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF - bl __save_vgic_v2_state -alternative_else - bl __save_vgic_v3_state -alternative_endif - mrs x24, hcr_el2 - mov x25, #HCR_INT_OVERRIDE - neg x25, x25 - and x24, x24, x25 - msr hcr_el2, x24 -.endm - -/* - * Call into the vgic backend for state restoring - */ -.macro restore_vgic_state - mrs x24, hcr_el2 - ldr x25, [x0, #VCPU_IRQ_LINES] - orr x24, x24, #HCR_INT_OVERRIDE - orr x24, x24, x25 - msr hcr_el2, x24 -alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF - bl __restore_vgic_v2_state -alternative_else - bl __restore_vgic_v3_state -alternative_endif -.endm - -.macro save_timer_state - // x0: vcpu pointer - ldr x2, [x0, #VCPU_KVM] - kern_hyp_va x2 - ldr w3, [x2, #KVM_TIMER_ENABLED] - cbz w3, 1f - - mrs x3, cntv_ctl_el0 - and x3, x3, #3 - str w3, [x0, #VCPU_TIMER_CNTV_CTL] - - isb - - mrs x3, cntv_cval_el0 - str x3, [x0, #VCPU_TIMER_CNTV_CVAL] - -1: - // Disable the virtual timer - msr cntv_ctl_el0, xzr - - // Allow physical timer/counter access for the host - mrs x2, cnthctl_el2 - orr x2, x2, #3 - msr cnthctl_el2, x2 - - // Clear cntvoff for the host - msr cntvoff_el2, xzr -.endm - -.macro restore_timer_state - // x0: vcpu pointer - // Disallow physical timer access for the guest - // Physical counter access is allowed - mrs x2, cnthctl_el2 - orr x2, x2, #1 - bic x2, x2, #2 - msr cnthctl_el2, x2 - - ldr x2, [x0, #VCPU_KVM] - kern_hyp_va x2 - ldr w3, [x2, #KVM_TIMER_ENABLED] - cbz w3, 1f - - ldr x3, [x2, #KVM_TIMER_CNTVOFF] - msr cntvoff_el2, x3 - ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL] - msr cntv_cval_el0, x2 - isb - - ldr w2, [x0, #VCPU_TIMER_CNTV_CTL] - and x2, x2, #3 - msr cntv_ctl_el0, x2 -1: -.endm - -__save_sysregs: - save_sysregs - ret - -__restore_sysregs: - restore_sysregs - ret - -/* Save debug state */ -__save_debug: - // x2: ptr to CPU context - // x3: ptr to debug reg struct - // x4/x5/x6-22/x24-26: trashed - - mrs x26, id_aa64dfr0_el1 - ubfx x24, x26, #12, #4 // Extract BRPs - ubfx x25, x26, #20, #4 // Extract WRPs - mov w26, #15 - sub w24, w26, w24 // How many BPs to skip - sub w25, w26, w25 // How many WPs to skip - - mov x5, x24 - add x4, x3, #DEBUG_BCR - save_debug dbgbcr - add x4, x3, #DEBUG_BVR - save_debug dbgbvr - - mov x5, x25 - add x4, x3, #DEBUG_WCR - save_debug dbgwcr - add x4, x3, #DEBUG_WVR - save_debug dbgwvr - - mrs x21, mdccint_el1 - str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)] - ret - -/* Restore debug state */ -__restore_debug: - // x2: ptr to CPU context - // x3: ptr to debug reg struct - // x4/x5/x6-22/x24-26: trashed - - mrs x26, id_aa64dfr0_el1 - ubfx x24, x26, #12, #4 // Extract BRPs - ubfx x25, x26, #20, #4 // Extract WRPs - mov w26, #15 - sub w24, w26, w24 // How many BPs to skip - sub w25, w26, w25 // How many WPs to skip - - mov x5, x24 - add x4, x3, #DEBUG_BCR - restore_debug dbgbcr - add x4, x3, #DEBUG_BVR - restore_debug dbgbvr - - mov x5, x25 - add x4, x3, #DEBUG_WCR - restore_debug dbgwcr - add x4, x3, #DEBUG_WVR - restore_debug dbgwvr - - ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)] - msr mdccint_el1, x21 - - ret - -__save_fpsimd: - skip_fpsimd_state x3, 1f - save_fpsimd -1: ret - -__restore_fpsimd: - skip_fpsimd_state x3, 1f - restore_fpsimd -1: ret - -switch_to_guest_fpsimd: - push x4, lr - - mrs x2, cptr_el2 - bic x2, x2, #CPTR_EL2_TFP - msr cptr_el2, x2 - isb - - mrs x0, tpidr_el2 - - ldr x2, [x0, #VCPU_HOST_CONTEXT] - kern_hyp_va x2 - bl __save_fpsimd - - add x2, x0, #VCPU_CONTEXT - bl __restore_fpsimd - - skip_32bit_state x3, 1f - ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)] - msr fpexc32_el2, x4 -1: - pop x4, lr - pop x2, x3 - pop x0, x1 - - eret - -/* - * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu); - * - * This is the world switch. The first half of the function - * deals with entering the guest, and anything from __kvm_vcpu_return - * to the end of the function deals with reentering the host. - * On the enter path, only x0 (vcpu pointer) must be preserved until - * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception - * code) must both be preserved until the epilogue. - * In both cases, x2 points to the CPU context we're saving/restoring from/to. - */ -ENTRY(__kvm_vcpu_run) - kern_hyp_va x0 - msr tpidr_el2, x0 // Save the vcpu register - - // Host context - ldr x2, [x0, #VCPU_HOST_CONTEXT] - kern_hyp_va x2 - - save_host_regs - bl __save_sysregs - - compute_debug_state 1f - add x3, x0, #VCPU_HOST_DEBUG_STATE - bl __save_debug -1: - activate_traps - activate_vm - - restore_vgic_state - restore_timer_state - - // Guest context - add x2, x0, #VCPU_CONTEXT - - // We must restore the 32-bit state before the sysregs, thanks - // to Cortex-A57 erratum #852523. - restore_guest_32bit_state - bl __restore_sysregs - - skip_debug_state x3, 1f - ldr x3, [x0, #VCPU_DEBUG_PTR] - kern_hyp_va x3 - bl __restore_debug -1: - restore_guest_regs - - // That's it, no more messing around. - eret - -__kvm_vcpu_return: - // Assume x0 is the vcpu pointer, x1 the return code - // Guest's x0-x3 are on the stack - - // Guest context - add x2, x0, #VCPU_CONTEXT - - save_guest_regs - bl __save_fpsimd - bl __save_sysregs - - skip_debug_state x3, 1f - ldr x3, [x0, #VCPU_DEBUG_PTR] - kern_hyp_va x3 - bl __save_debug -1: - save_guest_32bit_state - - save_timer_state - save_vgic_state - - deactivate_traps - deactivate_vm - - // Host context - ldr x2, [x0, #VCPU_HOST_CONTEXT] - kern_hyp_va x2 - - bl __restore_sysregs - bl __restore_fpsimd - /* Clear FPSIMD and Trace trapping */ - msr cptr_el2, xzr - - skip_debug_state x3, 1f - // Clear the dirty flag for the next run, as all the state has - // already been saved. Note that we nuke the whole 64bit word. - // If we ever add more flags, we'll have to be more careful... - str xzr, [x0, #VCPU_DEBUG_FLAGS] - add x3, x0, #VCPU_HOST_DEBUG_STATE - bl __restore_debug -1: - restore_host_regs - - mov x0, x1 - ret -END(__kvm_vcpu_run) - -// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); -ENTRY(__kvm_tlb_flush_vmid_ipa) - dsb ishst - - kern_hyp_va x0 - ldr x2, [x0, #KVM_VTTBR] - msr vttbr_el2, x2 - isb - - /* - * We could do so much better if we had the VA as well. - * Instead, we invalidate Stage-2 for this IPA, and the - * whole of Stage-1. Weep... - */ - lsr x1, x1, #12 - tlbi ipas2e1is, x1 - /* - * We have to ensure completion of the invalidation at Stage-2, - * since a table walk on another CPU could refill a TLB with a - * complete (S1 + S2) walk based on the old Stage-2 mapping if - * the Stage-1 invalidation happened first. - */ - dsb ish - tlbi vmalle1is - dsb ish - isb - - msr vttbr_el2, xzr - ret -ENDPROC(__kvm_tlb_flush_vmid_ipa) - -/** - * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs - * @struct kvm *kvm - pointer to kvm structure - * - * Invalidates all Stage 1 and 2 TLB entries for current VMID. - */ -ENTRY(__kvm_tlb_flush_vmid) - dsb ishst - - kern_hyp_va x0 - ldr x2, [x0, #KVM_VTTBR] - msr vttbr_el2, x2 - isb - - tlbi vmalls12e1is - dsb ish - isb - - msr vttbr_el2, xzr - ret -ENDPROC(__kvm_tlb_flush_vmid) - -ENTRY(__kvm_flush_vm_context) - dsb ishst - tlbi alle1is - ic ialluis - dsb ish - ret -ENDPROC(__kvm_flush_vm_context) - -__kvm_hyp_panic: - // Stash PAR_EL1 before corrupting it in __restore_sysregs - mrs x0, par_el1 - push x0, xzr - - // Guess the context by looking at VTTBR: - // If zero, then we're already a host. - // Otherwise restore a minimal host context before panicing. - mrs x0, vttbr_el2 - cbz x0, 1f - - mrs x0, tpidr_el2 - - deactivate_traps - deactivate_vm - - ldr x2, [x0, #VCPU_HOST_CONTEXT] - kern_hyp_va x2 - - bl __restore_sysregs - - /* - * Make sure we have a valid host stack, and don't leave junk in the - * frame pointer that will give us a misleading host stack unwinding. - */ - ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] - msr sp_el1, x22 - mov x29, xzr - -1: adr x0, __hyp_panic_str - adr x1, 2f - ldp x2, x3, [x1] - sub x0, x0, x2 - add x0, x0, x3 - mrs x1, spsr_el2 - mrs x2, elr_el2 - mrs x3, esr_el2 - mrs x4, far_el2 - mrs x5, hpfar_el2 - pop x6, xzr // active context PAR_EL1 - mrs x7, tpidr_el2 - - mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ - PSR_MODE_EL1h) - msr spsr_el2, lr - ldr lr, =panic - msr elr_el2, lr - eret - - .align 3 -2: .quad HYP_PAGE_OFFSET - .quad PAGE_OFFSET -ENDPROC(__kvm_hyp_panic) - -__hyp_panic_str: - .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0" - - .align 2 /* * u64 kvm_call_hyp(void *hypfn, ...); @@ -934,7 +31,7 @@ __hyp_panic_str: * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the * function pointer can be passed). The function being called must be mapped * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are - * passed in r0 and r1. + * passed in x0. * * A function pointer with a value of 0 has a special meaning, and is * used to implement __hyp_get_vectors in the same way as in @@ -944,179 +41,3 @@ ENTRY(kvm_call_hyp) hvc #0 ret ENDPROC(kvm_call_hyp) - -.macro invalid_vector label, target - .align 2 -\label: - b \target -ENDPROC(\label) -.endm - - /* None of these should ever happen */ - invalid_vector el2t_sync_invalid, __kvm_hyp_panic - invalid_vector el2t_irq_invalid, __kvm_hyp_panic - invalid_vector el2t_fiq_invalid, __kvm_hyp_panic - invalid_vector el2t_error_invalid, __kvm_hyp_panic - invalid_vector el2h_sync_invalid, __kvm_hyp_panic - invalid_vector el2h_irq_invalid, __kvm_hyp_panic - invalid_vector el2h_fiq_invalid, __kvm_hyp_panic - invalid_vector el2h_error_invalid, __kvm_hyp_panic - invalid_vector el1_sync_invalid, __kvm_hyp_panic - invalid_vector el1_irq_invalid, __kvm_hyp_panic - invalid_vector el1_fiq_invalid, __kvm_hyp_panic - invalid_vector el1_error_invalid, __kvm_hyp_panic - -el1_sync: // Guest trapped into EL2 - push x0, x1 - push x2, x3 - - mrs x1, esr_el2 - lsr x2, x1, #ESR_ELx_EC_SHIFT - - cmp x2, #ESR_ELx_EC_HVC64 - b.ne el1_trap - - mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest - cbnz x3, el1_trap // called HVC - - /* Here, we're pretty sure the host called HVC. */ - pop x2, x3 - pop x0, x1 - - /* Check for __hyp_get_vectors */ - cbnz x0, 1f - mrs x0, vbar_el2 - b 2f - -1: push lr, xzr - - /* - * Compute the function address in EL2, and shuffle the parameters. - */ - kern_hyp_va x0 - mov lr, x0 - mov x0, x1 - mov x1, x2 - mov x2, x3 - blr lr - - pop lr, xzr -2: eret - -el1_trap: - /* - * x1: ESR - * x2: ESR_EC - */ - - /* Guest accessed VFP/SIMD registers, save host, restore Guest */ - cmp x2, #ESR_ELx_EC_FP_ASIMD - b.eq switch_to_guest_fpsimd - - cmp x2, #ESR_ELx_EC_DABT_LOW - mov x0, #ESR_ELx_EC_IABT_LOW - ccmp x2, x0, #4, ne - b.ne 1f // Not an abort we care about - - /* This is an abort. Check for permission fault */ -alternative_if_not ARM64_WORKAROUND_834220 - and x2, x1, #ESR_ELx_FSC_TYPE - cmp x2, #FSC_PERM - b.ne 1f // Not a permission fault -alternative_else - nop // Use the permission fault path to - nop // check for a valid S1 translation, - nop // regardless of the ESR value. -alternative_endif - - /* - * Check for Stage-1 page table walk, which is guaranteed - * to give a valid HPFAR_EL2. - */ - tbnz x1, #7, 1f // S1PTW is set - - /* Preserve PAR_EL1 */ - mrs x3, par_el1 - push x3, xzr - - /* - * Permission fault, HPFAR_EL2 is invalid. - * Resolve the IPA the hard way using the guest VA. - * Stage-1 translation already validated the memory access rights. - * As such, we can use the EL1 translation regime, and don't have - * to distinguish between EL0 and EL1 access. - */ - mrs x2, far_el2 - at s1e1r, x2 - isb - - /* Read result */ - mrs x3, par_el1 - pop x0, xzr // Restore PAR_EL1 from the stack - msr par_el1, x0 - tbnz x3, #0, 3f // Bail out if we failed the translation - ubfx x3, x3, #12, #36 // Extract IPA - lsl x3, x3, #4 // and present it like HPFAR - b 2f - -1: mrs x3, hpfar_el2 - mrs x2, far_el2 - -2: mrs x0, tpidr_el2 - str w1, [x0, #VCPU_ESR_EL2] - str x2, [x0, #VCPU_FAR_EL2] - str x3, [x0, #VCPU_HPFAR_EL2] - - mov x1, #ARM_EXCEPTION_TRAP - b __kvm_vcpu_return - - /* - * Translation failed. Just return to the guest and - * let it fault again. Another CPU is probably playing - * behind our back. - */ -3: pop x2, x3 - pop x0, x1 - - eret - -el1_irq: - push x0, x1 - push x2, x3 - mrs x0, tpidr_el2 - mov x1, #ARM_EXCEPTION_IRQ - b __kvm_vcpu_return - - .ltorg - - .align 11 - -ENTRY(__kvm_hyp_vector) - ventry el2t_sync_invalid // Synchronous EL2t - ventry el2t_irq_invalid // IRQ EL2t - ventry el2t_fiq_invalid // FIQ EL2t - ventry el2t_error_invalid // Error EL2t - - ventry el2h_sync_invalid // Synchronous EL2h - ventry el2h_irq_invalid // IRQ EL2h - ventry el2h_fiq_invalid // FIQ EL2h - ventry el2h_error_invalid // Error EL2h - - ventry el1_sync // Synchronous 64-bit EL1 - ventry el1_irq // IRQ 64-bit EL1 - ventry el1_fiq_invalid // FIQ 64-bit EL1 - ventry el1_error_invalid // Error 64-bit EL1 - - ventry el1_sync // Synchronous 32-bit EL1 - ventry el1_irq // IRQ 32-bit EL1 - ventry el1_fiq_invalid // FIQ 32-bit EL1 - ventry el1_error_invalid // Error 32-bit EL1 -ENDPROC(__kvm_hyp_vector) - - -ENTRY(__kvm_get_mdcr_el2) - mrs x0, mdcr_el2 - ret -ENDPROC(__kvm_get_mdcr_el2) - - .popsection diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S deleted file mode 100644 index 3f000712a85d..000000000000 --- a/arch/arm64/kvm/vgic-v2-switch.S +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright (C) 2012,2013 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - - .text - .pushsection .hyp.text, "ax" - -/* - * Save the VGIC CPU state into memory - * x0: Register pointing to VCPU struct - * Do not corrupt x1!!! - */ -ENTRY(__save_vgic_v2_state) -__save_vgic_v2_state: - /* Get VGIC VCTRL base into x2 */ - ldr x2, [x0, #VCPU_KVM] - kern_hyp_va x2 - ldr x2, [x2, #KVM_VGIC_VCTRL] - kern_hyp_va x2 - cbz x2, 2f // disabled - - /* Compute the address of struct vgic_cpu */ - add x3, x0, #VCPU_VGIC_CPU - - /* Save all interesting registers */ - ldr w5, [x2, #GICH_VMCR] - ldr w6, [x2, #GICH_MISR] - ldr w7, [x2, #GICH_EISR0] - ldr w8, [x2, #GICH_EISR1] - ldr w9, [x2, #GICH_ELRSR0] - ldr w10, [x2, #GICH_ELRSR1] - ldr w11, [x2, #GICH_APR] -CPU_BE( rev w5, w5 ) -CPU_BE( rev w6, w6 ) -CPU_BE( rev w7, w7 ) -CPU_BE( rev w8, w8 ) -CPU_BE( rev w9, w9 ) -CPU_BE( rev w10, w10 ) -CPU_BE( rev w11, w11 ) - - str w5, [x3, #VGIC_V2_CPU_VMCR] - str w6, [x3, #VGIC_V2_CPU_MISR] -CPU_LE( str w7, [x3, #VGIC_V2_CPU_EISR] ) -CPU_LE( str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] ) -CPU_LE( str w9, [x3, #VGIC_V2_CPU_ELRSR] ) -CPU_LE( str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] ) -CPU_BE( str w7, [x3, #(VGIC_V2_CPU_EISR + 4)] ) -CPU_BE( str w8, [x3, #VGIC_V2_CPU_EISR] ) -CPU_BE( str w9, [x3, #(VGIC_V2_CPU_ELRSR + 4)] ) -CPU_BE( str w10, [x3, #VGIC_V2_CPU_ELRSR] ) - str w11, [x3, #VGIC_V2_CPU_APR] - - /* Clear GICH_HCR */ - str wzr, [x2, #GICH_HCR] - - /* Save list registers */ - add x2, x2, #GICH_LR0 - ldr w4, [x3, #VGIC_CPU_NR_LR] - add x3, x3, #VGIC_V2_CPU_LR -1: ldr w5, [x2], #4 -CPU_BE( rev w5, w5 ) - str w5, [x3], #4 - sub w4, w4, #1 - cbnz w4, 1b -2: - ret -ENDPROC(__save_vgic_v2_state) - -/* - * Restore the VGIC CPU state from memory - * x0: Register pointing to VCPU struct - */ -ENTRY(__restore_vgic_v2_state) -__restore_vgic_v2_state: - /* Get VGIC VCTRL base into x2 */ - ldr x2, [x0, #VCPU_KVM] - kern_hyp_va x2 - ldr x2, [x2, #KVM_VGIC_VCTRL] - kern_hyp_va x2 - cbz x2, 2f // disabled - - /* Compute the address of struct vgic_cpu */ - add x3, x0, #VCPU_VGIC_CPU - - /* We only restore a minimal set of registers */ - ldr w4, [x3, #VGIC_V2_CPU_HCR] - ldr w5, [x3, #VGIC_V2_CPU_VMCR] - ldr w6, [x3, #VGIC_V2_CPU_APR] -CPU_BE( rev w4, w4 ) -CPU_BE( rev w5, w5 ) -CPU_BE( rev w6, w6 ) - - str w4, [x2, #GICH_HCR] - str w5, [x2, #GICH_VMCR] - str w6, [x2, #GICH_APR] - - /* Restore list registers */ - add x2, x2, #GICH_LR0 - ldr w4, [x3, #VGIC_CPU_NR_LR] - add x3, x3, #VGIC_V2_CPU_LR -1: ldr w5, [x3], #4 -CPU_BE( rev w5, w5 ) - str w5, [x2], #4 - sub w4, w4, #1 - cbnz w4, 1b -2: - ret -ENDPROC(__restore_vgic_v2_state) - - .popsection diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S deleted file mode 100644 index 3c20730ddff5..000000000000 --- a/arch/arm64/kvm/vgic-v3-switch.S +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Copyright (C) 2012,2013 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include - -#include -#include -#include -#include -#include -#include - - .text - .pushsection .hyp.text, "ax" - -/* - * We store LRs in reverse order to let the CPU deal with streaming - * access. Use this macro to make it look saner... - */ -#define LR_OFFSET(n) (VGIC_V3_CPU_LR + (15 - n) * 8) - -/* - * Save the VGIC CPU state into memory - * x0: Register pointing to VCPU struct - * Do not corrupt x1!!! - */ -.macro save_vgic_v3_state - // Compute the address of struct vgic_cpu - add x3, x0, #VCPU_VGIC_CPU - - // Make sure stores to the GIC via the memory mapped interface - // are now visible to the system register interface - dsb st - - // Save all interesting registers - mrs_s x5, ICH_VMCR_EL2 - mrs_s x6, ICH_MISR_EL2 - mrs_s x7, ICH_EISR_EL2 - mrs_s x8, ICH_ELSR_EL2 - - str w5, [x3, #VGIC_V3_CPU_VMCR] - str w6, [x3, #VGIC_V3_CPU_MISR] - str w7, [x3, #VGIC_V3_CPU_EISR] - str w8, [x3, #VGIC_V3_CPU_ELRSR] - - msr_s ICH_HCR_EL2, xzr - - mrs_s x21, ICH_VTR_EL2 - mvn w22, w21 - ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4 - - adr x24, 1f - add x24, x24, x23 - br x24 - -1: - mrs_s x20, ICH_LR15_EL2 - mrs_s x19, ICH_LR14_EL2 - mrs_s x18, ICH_LR13_EL2 - mrs_s x17, ICH_LR12_EL2 - mrs_s x16, ICH_LR11_EL2 - mrs_s x15, ICH_LR10_EL2 - mrs_s x14, ICH_LR9_EL2 - mrs_s x13, ICH_LR8_EL2 - mrs_s x12, ICH_LR7_EL2 - mrs_s x11, ICH_LR6_EL2 - mrs_s x10, ICH_LR5_EL2 - mrs_s x9, ICH_LR4_EL2 - mrs_s x8, ICH_LR3_EL2 - mrs_s x7, ICH_LR2_EL2 - mrs_s x6, ICH_LR1_EL2 - mrs_s x5, ICH_LR0_EL2 - - adr x24, 1f - add x24, x24, x23 - br x24 - -1: - str x20, [x3, #LR_OFFSET(15)] - str x19, [x3, #LR_OFFSET(14)] - str x18, [x3, #LR_OFFSET(13)] - str x17, [x3, #LR_OFFSET(12)] - str x16, [x3, #LR_OFFSET(11)] - str x15, [x3, #LR_OFFSET(10)] - str x14, [x3, #LR_OFFSET(9)] - str x13, [x3, #LR_OFFSET(8)] - str x12, [x3, #LR_OFFSET(7)] - str x11, [x3, #LR_OFFSET(6)] - str x10, [x3, #LR_OFFSET(5)] - str x9, [x3, #LR_OFFSET(4)] - str x8, [x3, #LR_OFFSET(3)] - str x7, [x3, #LR_OFFSET(2)] - str x6, [x3, #LR_OFFSET(1)] - str x5, [x3, #LR_OFFSET(0)] - - tbnz w21, #29, 6f // 6 bits - tbz w21, #30, 5f // 5 bits - // 7 bits - mrs_s x20, ICH_AP0R3_EL2 - str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)] - mrs_s x19, ICH_AP0R2_EL2 - str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)] -6: mrs_s x18, ICH_AP0R1_EL2 - str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)] -5: mrs_s x17, ICH_AP0R0_EL2 - str w17, [x3, #VGIC_V3_CPU_AP0R] - - tbnz w21, #29, 6f // 6 bits - tbz w21, #30, 5f // 5 bits - // 7 bits - mrs_s x20, ICH_AP1R3_EL2 - str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)] - mrs_s x19, ICH_AP1R2_EL2 - str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)] -6: mrs_s x18, ICH_AP1R1_EL2 - str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)] -5: mrs_s x17, ICH_AP1R0_EL2 - str w17, [x3, #VGIC_V3_CPU_AP1R] - - // Restore SRE_EL1 access and re-enable SRE at EL1. - mrs_s x5, ICC_SRE_EL2 - orr x5, x5, #ICC_SRE_EL2_ENABLE - msr_s ICC_SRE_EL2, x5 - isb - mov x5, #1 - msr_s ICC_SRE_EL1, x5 -.endm - -/* - * Restore the VGIC CPU state from memory - * x0: Register pointing to VCPU struct - */ -.macro restore_vgic_v3_state - // Compute the address of struct vgic_cpu - add x3, x0, #VCPU_VGIC_CPU - - // Restore all interesting registers - ldr w4, [x3, #VGIC_V3_CPU_HCR] - ldr w5, [x3, #VGIC_V3_CPU_VMCR] - ldr w25, [x3, #VGIC_V3_CPU_SRE] - - msr_s ICC_SRE_EL1, x25 - - // make sure SRE is valid before writing the other registers - isb - - msr_s ICH_HCR_EL2, x4 - msr_s ICH_VMCR_EL2, x5 - - mrs_s x21, ICH_VTR_EL2 - - tbnz w21, #29, 6f // 6 bits - tbz w21, #30, 5f // 5 bits - // 7 bits - ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)] - msr_s ICH_AP1R3_EL2, x20 - ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)] - msr_s ICH_AP1R2_EL2, x19 -6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)] - msr_s ICH_AP1R1_EL2, x18 -5: ldr w17, [x3, #VGIC_V3_CPU_AP1R] - msr_s ICH_AP1R0_EL2, x17 - - tbnz w21, #29, 6f // 6 bits - tbz w21, #30, 5f // 5 bits - // 7 bits - ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)] - msr_s ICH_AP0R3_EL2, x20 - ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)] - msr_s ICH_AP0R2_EL2, x19 -6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)] - msr_s ICH_AP0R1_EL2, x18 -5: ldr w17, [x3, #VGIC_V3_CPU_AP0R] - msr_s ICH_AP0R0_EL2, x17 - - and w22, w21, #0xf - mvn w22, w21 - ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4 - - adr x24, 1f - add x24, x24, x23 - br x24 - -1: - ldr x20, [x3, #LR_OFFSET(15)] - ldr x19, [x3, #LR_OFFSET(14)] - ldr x18, [x3, #LR_OFFSET(13)] - ldr x17, [x3, #LR_OFFSET(12)] - ldr x16, [x3, #LR_OFFSET(11)] - ldr x15, [x3, #LR_OFFSET(10)] - ldr x14, [x3, #LR_OFFSET(9)] - ldr x13, [x3, #LR_OFFSET(8)] - ldr x12, [x3, #LR_OFFSET(7)] - ldr x11, [x3, #LR_OFFSET(6)] - ldr x10, [x3, #LR_OFFSET(5)] - ldr x9, [x3, #LR_OFFSET(4)] - ldr x8, [x3, #LR_OFFSET(3)] - ldr x7, [x3, #LR_OFFSET(2)] - ldr x6, [x3, #LR_OFFSET(1)] - ldr x5, [x3, #LR_OFFSET(0)] - - adr x24, 1f - add x24, x24, x23 - br x24 - -1: - msr_s ICH_LR15_EL2, x20 - msr_s ICH_LR14_EL2, x19 - msr_s ICH_LR13_EL2, x18 - msr_s ICH_LR12_EL2, x17 - msr_s ICH_LR11_EL2, x16 - msr_s ICH_LR10_EL2, x15 - msr_s ICH_LR9_EL2, x14 - msr_s ICH_LR8_EL2, x13 - msr_s ICH_LR7_EL2, x12 - msr_s ICH_LR6_EL2, x11 - msr_s ICH_LR5_EL2, x10 - msr_s ICH_LR4_EL2, x9 - msr_s ICH_LR3_EL2, x8 - msr_s ICH_LR2_EL2, x7 - msr_s ICH_LR1_EL2, x6 - msr_s ICH_LR0_EL2, x5 - - // Ensure that the above will have reached the - // (re)distributors. This ensure the guest will read - // the correct values from the memory-mapped interface. - isb - dsb sy - - // Prevent the guest from touching the GIC system registers - // if SRE isn't enabled for GICv3 emulation - cbnz x25, 1f - mrs_s x5, ICC_SRE_EL2 - and x5, x5, #~ICC_SRE_EL2_ENABLE - msr_s ICC_SRE_EL2, x5 -1: -.endm - -ENTRY(__save_vgic_v3_state) - save_vgic_v3_state - ret -ENDPROC(__save_vgic_v3_state) - -ENTRY(__restore_vgic_v3_state) - restore_vgic_v3_state - ret -ENDPROC(__restore_vgic_v3_state) - -ENTRY(__vgic_v3_get_ich_vtr_el2) - mrs_s x0, ICH_VTR_EL2 - ret -ENDPROC(__vgic_v3_get_ich_vtr_el2) - - .popsection From baf8b41c950e6536f750146ae27c0dbd307ebc94 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 9 Jun 2014 19:47:09 +0100 Subject: [PATCH 39/78] arm/arm64: Add new is_kernel_in_hyp_mode predicate With ARMv8.1 VHE extension, it will be possible to run the kernel at EL2 (aka HYP mode). In order for the kernel to easily find out where it is running, add a new predicate that returns whether or not the kernel is in HYP mode. For completeness, the 32bit code also get such a predicate (always returning false) so that code common to both architecture (timers, KVM) can use it transparently. Acked-by: Christoffer Dall Acked-by: Catalin Marinas Signed-off-by: Marc Zyngier (cherry picked from commit 82deae0fc8ba256c1061dd4de42f0ef6cb9f954f) Signed-off-by: Alex Shi Conflicts: arch/arm/include/asm/virt.h --- arch/arm/include/asm/virt.h | 9 +++++++++ arch/arm64/include/asm/virt.h | 10 ++++++++++ 2 files changed, 19 insertions(+) diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 4371f45c5784..d4ceaf5f299b 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -74,6 +74,15 @@ static inline bool is_hyp_mode_mismatched(void) { return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH); } + +static inline bool is_kernel_in_hyp_mode(void) +{ + return false; +} + +/* The section containing the hypervisor text */ +extern char __hyp_text_start[]; +extern char __hyp_text_end[]; #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 7a5df5252dd7..9f22dd607958 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -23,6 +23,8 @@ #ifndef __ASSEMBLY__ +#include + /* * __boot_cpu_mode records what mode CPUs were booted in. * A correctly-implemented bootloader must start all CPUs in the same mode: @@ -50,6 +52,14 @@ static inline bool is_hyp_mode_mismatched(void) return __boot_cpu_mode[0] != __boot_cpu_mode[1]; } +static inline bool is_kernel_in_hyp_mode(void) +{ + u64 el; + + asm("mrs %0, CurrentEL" : "=r" (el)); + return el == CurrentEL_EL2; +} + /* The section containing the hypervisor text */ extern char __hyp_text_start[]; extern char __hyp_text_end[]; From 45f5d7f2ea9f736fde9d94fe3e3268add0ba2b15 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 29 Jan 2015 11:24:05 +0000 Subject: [PATCH 40/78] arm64: Add ARM64_HAS_VIRT_HOST_EXTN feature Add a new ARM64_HAS_VIRT_HOST_EXTN features to indicate that the CPU has the ARMv8.1 VHE capability. This will be used to trigger kernel patching in KVM. Acked-by: Christoffer Dall Acked-by: Catalin Marinas Signed-off-by: Marc Zyngier (cherry picked from commit d88701bea3664cea99b8b7380f63a3bd0ec3ead3) Signed-off-by: Alex Shi Conflicts: arch/arm64/include/asm/cpufeature.h --- arch/arm64/include/asm/cpufeature.h | 7 ++++++- arch/arm64/kernel/cpufeature.c | 11 +++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 8136afc9df0d..f1a134900ae5 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -32,7 +32,12 @@ #define ARM64_WORKAROUND_834220 7 #define ARM64_WORKAROUND_CAVIUM_27456 8 -#define ARM64_NCAPS 9 +/* #define ARM64_HAS_NO_HW_PREFETCH 8 */ +/* #define ARM64_HAS_UAO 9 */ +/* #define ARM64_ALT_PAN_NOT_UAO 10 */ +#define ARM64_HAS_VIRT_HOST_EXTN 11 + +#define ARM64_NCAPS 12 #ifndef __ASSEMBLY__ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 0669c63281ea..ab68e7c9808d 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -26,6 +26,7 @@ #include #include #include +#include unsigned long elf_hwcap __read_mostly; EXPORT_SYMBOL_GPL(elf_hwcap); @@ -621,6 +622,11 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry) return has_sre; } +static bool runs_at_el2(const struct arm64_cpu_capabilities *entry) +{ + return is_kernel_in_hyp_mode(); +} + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -651,6 +657,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .min_field_value = 2, }, #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ + { + .desc = "Virtualization Host Extensions", + .capability = ARM64_HAS_VIRT_HOST_EXTN, + .matches = runs_at_el2, + }, {}, }; From 493f9d879875347d62bc1436402ce92bb5b5f699 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 29 Jan 2015 13:52:12 +0000 Subject: [PATCH 41/78] arm64: KVM: VHE: Patch out use of HVC With VHE, the host never issues an HVC instruction to get into the KVM code, as we can simply branch there. Use runtime code patching to simplify things a bit. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier (cherry picked from commit b81125c791a2958cc60ae968fc1cdea82b7cd3b0) Signed-off-by: Alex Shi --- arch/arm64/kvm/hyp.S | 7 ++++++ arch/arm64/kvm/hyp/hyp-entry.S | 40 ++++++++++++++++++++++++++-------- 2 files changed, 38 insertions(+), 9 deletions(-) diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 0ccdcbbef3c2..0689a74e6ba0 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -17,7 +17,9 @@ #include +#include #include +#include /* * u64 kvm_call_hyp(void *hypfn, ...); @@ -38,6 +40,11 @@ * arch/arm64/kernel/hyp_stub.S. */ ENTRY(kvm_call_hyp) +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN hvc #0 ret +alternative_else + b __vhe_hyp_call + nop +alternative_endif ENDPROC(kvm_call_hyp) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 10d6d2a5390e..8950cdb1174b 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -38,6 +38,34 @@ ldp x0, x1, [sp], #16 .endm +.macro do_el2_call + /* + * Shuffle the parameters before calling the function + * pointed to in x0. Assumes parameters in x[1,2,3]. + */ + sub sp, sp, #16 + str lr, [sp] + mov lr, x0 + mov x0, x1 + mov x1, x2 + mov x2, x3 + blr lr + ldr lr, [sp] + add sp, sp, #16 +.endm + +ENTRY(__vhe_hyp_call) + do_el2_call + /* + * We used to rely on having an exception return to get + * an implicit isb. In the E2H case, we don't have it anymore. + * rather than changing all the leaf functions, just do it here + * before returning to the rest of the kernel. + */ + isb + ret +ENDPROC(__vhe_hyp_call) + el1_sync: // Guest trapped into EL2 save_x0_to_x3 @@ -58,19 +86,13 @@ el1_sync: // Guest trapped into EL2 mrs x0, vbar_el2 b 2f -1: stp lr, xzr, [sp, #-16]! - +1: /* - * Compute the function address in EL2, and shuffle the parameters. + * Perform the EL2 call */ kern_hyp_va x0 - mov lr, x0 - mov x0, x1 - mov x1, x2 - mov x2, x3 - blr lr + do_el2_call - ldp lr, xzr, [sp], #16 2: eret el1_trap: From f65bf332f0f43691cf7c3916b15f218bcb0e3e79 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 25 Oct 2015 19:57:11 +0000 Subject: [PATCH 42/78] arm64: KVM: Turn system register numbers to an enum Having the system register numbers as #defines has been a pain since day one, as the ordering is pretty fragile, and moving things around leads to renumbering and epic conflict resolutions. Now that we're mostly acessing the sysreg file in C, an enum is a much better type to use, and we can clean things up a bit. Signed-off-by: Marc Zyngier Acked-by: Christoffer Dall (cherry picked from commit 9d8415d6c148a16b6d906a96f0596851d7e4d607) Signed-off-by: Alex Shi --- arch/arm64/include/asm/kvm_asm.h | 76 -------------------------- arch/arm64/include/asm/kvm_emulate.h | 1 - arch/arm64/include/asm/kvm_host.h | 81 +++++++++++++++++++++++++++- arch/arm64/include/asm/kvm_mmio.h | 1 - arch/arm64/kernel/asm-offsets.c | 1 + arch/arm64/kvm/guest.c | 1 - arch/arm64/kvm/handle_exit.c | 1 + arch/arm64/kvm/hyp/debug-sr.c | 1 + arch/arm64/kvm/hyp/entry.S | 3 +- arch/arm64/kvm/hyp/sysreg-sr.c | 1 + arch/arm64/kvm/sys_regs.c | 1 + virt/kvm/arm/vgic-v3.c | 1 + 12 files changed, 87 insertions(+), 82 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 5e377101f919..52b777b7d407 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -20,82 +20,6 @@ #include -/* - * 0 is reserved as an invalid value. - * Order *must* be kept in sync with the hyp switch code. - */ -#define MPIDR_EL1 1 /* MultiProcessor Affinity Register */ -#define CSSELR_EL1 2 /* Cache Size Selection Register */ -#define SCTLR_EL1 3 /* System Control Register */ -#define ACTLR_EL1 4 /* Auxiliary Control Register */ -#define CPACR_EL1 5 /* Coprocessor Access Control */ -#define TTBR0_EL1 6 /* Translation Table Base Register 0 */ -#define TTBR1_EL1 7 /* Translation Table Base Register 1 */ -#define TCR_EL1 8 /* Translation Control Register */ -#define ESR_EL1 9 /* Exception Syndrome Register */ -#define AFSR0_EL1 10 /* Auxilary Fault Status Register 0 */ -#define AFSR1_EL1 11 /* Auxilary Fault Status Register 1 */ -#define FAR_EL1 12 /* Fault Address Register */ -#define MAIR_EL1 13 /* Memory Attribute Indirection Register */ -#define VBAR_EL1 14 /* Vector Base Address Register */ -#define CONTEXTIDR_EL1 15 /* Context ID Register */ -#define TPIDR_EL0 16 /* Thread ID, User R/W */ -#define TPIDRRO_EL0 17 /* Thread ID, User R/O */ -#define TPIDR_EL1 18 /* Thread ID, Privileged */ -#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ -#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ -#define PAR_EL1 21 /* Physical Address Register */ -#define MDSCR_EL1 22 /* Monitor Debug System Control Register */ -#define MDCCINT_EL1 23 /* Monitor Debug Comms Channel Interrupt Enable Reg */ - -/* 32bit specific registers. Keep them at the end of the range */ -#define DACR32_EL2 24 /* Domain Access Control Register */ -#define IFSR32_EL2 25 /* Instruction Fault Status Register */ -#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */ -#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */ -#define NR_SYS_REGS 28 - -/* 32bit mapping */ -#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ -#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */ -#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */ -#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */ -#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */ -#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */ -#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */ -#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */ -#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */ -#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */ -#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */ -#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */ -#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */ -#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */ -#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ -#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ -#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ -#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */ -#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */ -#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ -#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ -#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ -#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */ -#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */ -#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */ -#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */ -#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ -#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */ -#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */ - -#define cp14_DBGDSCRext (MDSCR_EL1 * 2) -#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2) -#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2) -#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1) -#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2) -#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2) -#define cp14_DBGDCCINT (MDCCINT_EL1 * 2) - -#define NR_COPRO_REGS (NR_SYS_REGS * 2) - #define ARM_EXCEPTION_IRQ 0 #define ARM_EXCEPTION_TRAP 1 diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 25a40213bd9b..3066328cd86b 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -26,7 +26,6 @@ #include #include -#include #include #include #include diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a35ce7266aac..975db147b43e 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -25,7 +25,6 @@ #include #include #include -#include #include #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -85,6 +84,86 @@ struct kvm_vcpu_fault_info { u64 hpfar_el2; /* Hyp IPA Fault Address Register */ }; +/* + * 0 is reserved as an invalid value. + * Order should be kept in sync with the save/restore code. + */ +enum vcpu_sysreg { + __INVALID_SYSREG__, + MPIDR_EL1, /* MultiProcessor Affinity Register */ + CSSELR_EL1, /* Cache Size Selection Register */ + SCTLR_EL1, /* System Control Register */ + ACTLR_EL1, /* Auxiliary Control Register */ + CPACR_EL1, /* Coprocessor Access Control */ + TTBR0_EL1, /* Translation Table Base Register 0 */ + TTBR1_EL1, /* Translation Table Base Register 1 */ + TCR_EL1, /* Translation Control Register */ + ESR_EL1, /* Exception Syndrome Register */ + AFSR0_EL1, /* Auxilary Fault Status Register 0 */ + AFSR1_EL1, /* Auxilary Fault Status Register 1 */ + FAR_EL1, /* Fault Address Register */ + MAIR_EL1, /* Memory Attribute Indirection Register */ + VBAR_EL1, /* Vector Base Address Register */ + CONTEXTIDR_EL1, /* Context ID Register */ + TPIDR_EL0, /* Thread ID, User R/W */ + TPIDRRO_EL0, /* Thread ID, User R/O */ + TPIDR_EL1, /* Thread ID, Privileged */ + AMAIR_EL1, /* Aux Memory Attribute Indirection Register */ + CNTKCTL_EL1, /* Timer Control Register (EL1) */ + PAR_EL1, /* Physical Address Register */ + MDSCR_EL1, /* Monitor Debug System Control Register */ + MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ + + /* 32bit specific registers. Keep them at the end of the range */ + DACR32_EL2, /* Domain Access Control Register */ + IFSR32_EL2, /* Instruction Fault Status Register */ + FPEXC32_EL2, /* Floating-Point Exception Control Register */ + DBGVCR32_EL2, /* Debug Vector Catch Register */ + + NR_SYS_REGS /* Nothing after this line! */ +}; + +/* 32bit mapping */ +#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ +#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */ +#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */ +#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */ +#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */ +#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */ +#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */ +#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */ +#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */ +#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */ +#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */ +#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */ +#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */ +#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */ +#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ +#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ +#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ +#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */ +#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */ +#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ +#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ +#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ +#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */ +#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */ +#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */ +#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */ +#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ +#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */ +#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */ + +#define cp14_DBGDSCRext (MDSCR_EL1 * 2) +#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2) +#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2) +#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1) +#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2) +#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2) +#define cp14_DBGDCCINT (MDCCINT_EL1 * 2) + +#define NR_COPRO_REGS (NR_SYS_REGS * 2) + struct kvm_cpu_context { struct kvm_regs gp_regs; union { diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h index 889c908ee631..fe612a962576 100644 --- a/arch/arm64/include/asm/kvm_mmio.h +++ b/arch/arm64/include/asm/kvm_mmio.h @@ -19,7 +19,6 @@ #define __ARM64_KVM_MMIO_H__ #include -#include #include /* diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 2a948bfcaf87..35e6bc67e376 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -125,6 +125,7 @@ int main(void) DEFINE(CPU_ELR_EL1, offsetof(struct kvm_regs, elr_el1)); DEFINE(CPU_SPSR, offsetof(struct kvm_regs, spsr)); DEFINE(CPU_SYSREGS, offsetof(struct kvm_cpu_context, sys_regs)); + DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2])); DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2)); DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2)); DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2)); diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 3039f080e2d5..e5ee8880d5d9 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 15f0477b0d2a..198cf10b262d 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -23,6 +23,7 @@ #include #include +#include #include #include #include diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index d071f4591a6c..567a0d6aa1df 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -18,6 +18,7 @@ #include #include +#include #include #include "hyp.h" diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 1050b2b09904..fd0fbe9b7e6a 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -27,7 +27,6 @@ #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) -#define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x) .text .pushsection .hyp.text, "ax" @@ -150,7 +149,7 @@ ENTRY(__fpsimd_guest_restore) // Skip restoring fpexc32 for AArch64 guests mrs x1, hcr_el2 tbnz x1, #HCR_RW_SHIFT, 1f - ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)] + ldr x4, [x3, #VCPU_FPEXC32_EL2] msr fpexc32_el2, x4 1: ldp x4, lr, [sp], #16 diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 36035417ec52..425630980229 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -18,6 +18,7 @@ #include #include +#include #include #include "hyp.h" diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index d2650e84faf2..88adebfab0bd 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index 3813d23ebb80..453eafd4dd6e 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c @@ -28,6 +28,7 @@ #include #include +#include #include /* These are for GICv2 emulation only */ From 45adca2b9e519cee6db63fae5f5460a2abd997fd Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 25 Oct 2015 20:03:08 +0000 Subject: [PATCH 43/78] arm64: KVM: Cleanup asm-offset.c As we've now rewritten most of our code-base in C, most of the KVM-specific code in asm-offset.c is useless. Delete-time again! Signed-off-by: Marc Zyngier Acked-by: Christoffer Dall (cherry picked from commit 23a13465c84c51ec4330863b59e9d50ee671f8b4) Signed-off-by: Alex Shi --- arch/arm64/kernel/asm-offsets.c | 39 --------------------------------- 1 file changed, 39 deletions(-) diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 35e6bc67e376..66ce4fdd4644 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -121,50 +121,11 @@ int main(void) DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs)); - DEFINE(CPU_SP_EL1, offsetof(struct kvm_regs, sp_el1)); - DEFINE(CPU_ELR_EL1, offsetof(struct kvm_regs, elr_el1)); - DEFINE(CPU_SPSR, offsetof(struct kvm_regs, spsr)); - DEFINE(CPU_SYSREGS, offsetof(struct kvm_cpu_context, sys_regs)); DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2])); DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2)); DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2)); DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2)); - DEFINE(VCPU_DEBUG_FLAGS, offsetof(struct kvm_vcpu, arch.debug_flags)); - DEFINE(VCPU_DEBUG_PTR, offsetof(struct kvm_vcpu, arch.debug_ptr)); - DEFINE(DEBUG_BCR, offsetof(struct kvm_guest_debug_arch, dbg_bcr)); - DEFINE(DEBUG_BVR, offsetof(struct kvm_guest_debug_arch, dbg_bvr)); - DEFINE(DEBUG_WCR, offsetof(struct kvm_guest_debug_arch, dbg_wcr)); - DEFINE(DEBUG_WVR, offsetof(struct kvm_guest_debug_arch, dbg_wvr)); - DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); - DEFINE(VCPU_MDCR_EL2, offsetof(struct kvm_vcpu, arch.mdcr_el2)); - DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); - DEFINE(VCPU_HOST_DEBUG_STATE, offsetof(struct kvm_vcpu, arch.host_debug_state)); - DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); - DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); - DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); - DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); - DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); - DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); - DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); - DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); - DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr)); - DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr)); - DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr)); - DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); - DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); - DEFINE(VGIC_V3_CPU_SRE, offsetof(struct vgic_cpu, vgic_v3.vgic_sre)); - DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr)); - DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr)); - DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr)); - DEFINE(VGIC_V3_CPU_EISR, offsetof(struct vgic_cpu, vgic_v3.vgic_eisr)); - DEFINE(VGIC_V3_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr)); - DEFINE(VGIC_V3_CPU_AP0R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r)); - DEFINE(VGIC_V3_CPU_AP1R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r)); - DEFINE(VGIC_V3_CPU_LR, offsetof(struct vgic_cpu, vgic_v3.vgic_lr)); - DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); - DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); - DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); #endif #ifdef CONFIG_CPU_PM DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx)); From aa9b1a6affdc4da75d95d40cf83167351b9848b4 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 26 Oct 2015 09:10:07 +0000 Subject: [PATCH 44/78] arm64: KVM: Remove weak attributes As we've now switched to the new world switch implementation, remove the weak attributes, as nobody is supposed to override it anymore. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier (cherry picked from commit 3ffa75cd18134a03f86f9d9b8b6e9128e0eda254) Signed-off-by: Alex Shi --- arch/arm64/kvm/hyp/debug-sr.c | 5 ++--- arch/arm64/kvm/hyp/hyp-entry.S | 3 --- arch/arm64/kvm/hyp/switch.c | 5 ++--- arch/arm64/kvm/hyp/tlb.c | 16 +++++++--------- arch/arm64/kvm/hyp/vgic-v3-sr.c | 5 ++--- 5 files changed, 13 insertions(+), 21 deletions(-) diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index 567a0d6aa1df..c9c1e97501a9 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -132,10 +132,9 @@ void __hyp_text __debug_cond_restore_host_state(struct kvm_vcpu *vcpu) vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY; } -u32 __hyp_text __debug_read_mdcr_el2(void) +static u32 __hyp_text __debug_read_mdcr_el2(void) { return read_sysreg(mdcr_el2); } -__alias(__debug_read_mdcr_el2) -u32 __weak __kvm_get_mdcr_el2(void); +__alias(__debug_read_mdcr_el2) u32 __kvm_get_mdcr_el2(void); diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 8950cdb1174b..1bdeee70833e 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -211,9 +211,7 @@ ENDPROC(\label) .align 11 - .weak __kvm_hyp_vector ENTRY(__kvm_hyp_vector) -ENTRY(__hyp_vector) ventry el2t_sync_invalid // Synchronous EL2t ventry el2t_irq_invalid // IRQ EL2t ventry el2t_fiq_invalid // FIQ EL2t @@ -233,5 +231,4 @@ ENTRY(__hyp_vector) ventry el1_irq // IRQ 32-bit EL1 ventry el1_fiq_invalid // FIQ 32-bit EL1 ventry el1_error_invalid // Error 32-bit EL1 -ENDPROC(__hyp_vector) ENDPROC(__kvm_hyp_vector) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 7457ae4db86e..ca8f5a5e2f96 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -85,7 +85,7 @@ static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) __vgic_call_restore_state()(vcpu); } -int __hyp_text __guest_run(struct kvm_vcpu *vcpu) +static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *guest_ctxt; @@ -142,8 +142,7 @@ int __hyp_text __guest_run(struct kvm_vcpu *vcpu) return exit_code; } -__alias(__guest_run) -int __weak __kvm_vcpu_run(struct kvm_vcpu *vcpu); +__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu); static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 5f815cf53a9a..2a7e0d838698 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -17,7 +17,7 @@ #include "hyp.h" -void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) +static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { dsb(ishst); @@ -48,10 +48,10 @@ void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) write_sysreg(0, vttbr_el2); } -__alias(__tlb_flush_vmid_ipa) -void __weak __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); +__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, + phys_addr_t ipa); -void __hyp_text __tlb_flush_vmid(struct kvm *kvm) +static void __hyp_text __tlb_flush_vmid(struct kvm *kvm) { dsb(ishst); @@ -67,10 +67,9 @@ void __hyp_text __tlb_flush_vmid(struct kvm *kvm) write_sysreg(0, vttbr_el2); } -__alias(__tlb_flush_vmid) -void __weak __kvm_tlb_flush_vmid(struct kvm *kvm); +__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm); -void __hyp_text __tlb_flush_vm_context(void) +static void __hyp_text __tlb_flush_vm_context(void) { dsb(ishst); asm volatile("tlbi alle1is \n" @@ -78,5 +77,4 @@ void __hyp_text __tlb_flush_vm_context(void) dsb(ish); } -__alias(__tlb_flush_vm_context) -void __weak __kvm_flush_vm_context(void); +__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void); diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index a76945874d5d..9142e082f5f3 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -220,10 +220,9 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) } } -u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void) +static u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void) { return read_gicreg(ICH_VTR_EL2); } -__alias(__vgic_v3_read_ich_vtr_el2) -u64 __weak __vgic_v3_get_ich_vtr_el2(void); +__alias(__vgic_v3_read_ich_vtr_el2) u64 __vgic_v3_get_ich_vtr_el2(void); From bbe9945cbb95b179bdee5420e4b22804d560eb16 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 14 Dec 2015 17:58:33 +0000 Subject: [PATCH 45/78] ARM: KVM: Cleanup exception injection David Binderman reported that the exception injection code had a couple of unused variables lingering around. Upon examination, it looked like this code could do with an anticipated spring cleaning, which amounts to deduplicating the CPSR/SPSR update, and making it look a bit more like the architecture spec. The spurious variables are removed in the process. Reported-by: David Binderman Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier (cherry picked from commit e078ef81514222ffc10bf1767c15df16ca0b84db) Signed-off-by: Alex Shi --- arch/arm/kvm/emulate.c | 74 ++++++++++++++++++++++-------------------- 1 file changed, 38 insertions(+), 36 deletions(-) diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index d6c005283678..dc99159857b4 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c @@ -275,6 +275,40 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu) return vbar; } +/* + * Switch to an exception mode, updating both CPSR and SPSR. Follow + * the logic described in AArch32.EnterMode() from the ARMv8 ARM. + */ +static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode) +{ + unsigned long cpsr = *vcpu_cpsr(vcpu); + u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; + + *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode; + + switch (mode) { + case FIQ_MODE: + *vcpu_cpsr(vcpu) |= PSR_F_BIT; + /* Fall through */ + case ABT_MODE: + case IRQ_MODE: + *vcpu_cpsr(vcpu) |= PSR_A_BIT; + /* Fall through */ + default: + *vcpu_cpsr(vcpu) |= PSR_I_BIT; + } + + *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); + + if (sctlr & SCTLR_TE) + *vcpu_cpsr(vcpu) |= PSR_T_BIT; + if (sctlr & SCTLR_EE) + *vcpu_cpsr(vcpu) |= PSR_E_BIT; + + /* Note: These now point to the mode banked copies */ + *vcpu_spsr(vcpu) = cpsr; +} + /** * kvm_inject_undefined - inject an undefined exception into the guest * @vcpu: The VCPU to receive the undefined exception @@ -286,29 +320,13 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu) */ void kvm_inject_undefined(struct kvm_vcpu *vcpu) { - unsigned long new_lr_value; - unsigned long new_spsr_value; unsigned long cpsr = *vcpu_cpsr(vcpu); - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; bool is_thumb = (cpsr & PSR_T_BIT); u32 vect_offset = 4; u32 return_offset = (is_thumb) ? 2 : 4; - new_spsr_value = cpsr; - new_lr_value = *vcpu_pc(vcpu) - return_offset; - - *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE; - *vcpu_cpsr(vcpu) |= PSR_I_BIT; - *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); - - if (sctlr & SCTLR_TE) - *vcpu_cpsr(vcpu) |= PSR_T_BIT; - if (sctlr & SCTLR_EE) - *vcpu_cpsr(vcpu) |= PSR_E_BIT; - - /* Note: These now point to UND banked copies */ - *vcpu_spsr(vcpu) = cpsr; - *vcpu_reg(vcpu, 14) = new_lr_value; + kvm_update_psr(vcpu, UND_MODE); + *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset; /* Branch to exception vector */ *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; @@ -320,30 +338,14 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) */ static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) { - unsigned long new_lr_value; - unsigned long new_spsr_value; unsigned long cpsr = *vcpu_cpsr(vcpu); - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; bool is_thumb = (cpsr & PSR_T_BIT); u32 vect_offset; u32 return_offset = (is_thumb) ? 4 : 0; bool is_lpae; - new_spsr_value = cpsr; - new_lr_value = *vcpu_pc(vcpu) + return_offset; - - *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE; - *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT; - *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); - - if (sctlr & SCTLR_TE) - *vcpu_cpsr(vcpu) |= PSR_T_BIT; - if (sctlr & SCTLR_EE) - *vcpu_cpsr(vcpu) |= PSR_E_BIT; - - /* Note: These now point to ABT banked copies */ - *vcpu_spsr(vcpu) = cpsr; - *vcpu_reg(vcpu, 14) = new_lr_value; + kvm_update_psr(vcpu, ABT_MODE); + *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; if (is_pabt) vect_offset = 12; From 3893b12b089ffb06908112d2852711d9b489148c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 16 Dec 2015 15:41:12 +0000 Subject: [PATCH 46/78] arm64: KVM: debug: Remove spurious inline attributes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The debug trapping code is pretty heavy on the "inline" attribute, but most functions are actually referenced in the sysreg tables, making the inlining imposible. Removing the useless inline qualifier seems the right thing to do, having verified that the output code is similar. Cc: Alex Bennée Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier (cherry picked from commit 281243cbe075d27ab884858d6e0b15d8ed61bc25) Signed-off-by: Alex Shi --- arch/arm64/kvm/sys_regs.c | 58 +++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 88adebfab0bd..eec3598b4184 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -220,9 +220,9 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu, * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the * hyp.S code switches between host and guest values in future. */ -static inline void reg_to_dbg(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - u64 *dbg_reg) +static void reg_to_dbg(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + u64 *dbg_reg) { u64 val = p->regval; @@ -235,18 +235,18 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu, vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; } -static inline void dbg_to_reg(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - u64 *dbg_reg) +static void dbg_to_reg(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + u64 *dbg_reg) { p->regval = *dbg_reg; if (p->is_32bit) p->regval &= 0xffffffffUL; } -static inline bool trap_bvr(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *rd) +static bool trap_bvr(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; @@ -280,15 +280,15 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, return 0; } -static inline void reset_bvr(struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd) +static void reset_bvr(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val; } -static inline bool trap_bcr(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *rd) +static bool trap_bcr(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; @@ -323,15 +323,15 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, return 0; } -static inline void reset_bcr(struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd) +static void reset_bcr(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val; } -static inline bool trap_wvr(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *rd) +static bool trap_wvr(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; @@ -366,15 +366,15 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, return 0; } -static inline void reset_wvr(struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd) +static void reset_wvr(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val; } -static inline bool trap_wcr(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *rd) +static bool trap_wcr(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; @@ -408,8 +408,8 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, return 0; } -static inline void reset_wcr(struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd) +static void reset_wcr(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val; } @@ -723,9 +723,9 @@ static bool trap_debug32(struct kvm_vcpu *vcpu, * system is in. */ -static inline bool trap_xvr(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *rd) +static bool trap_xvr(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; From ab054025ab77a09a751083bb70e7e2a242e2242e Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Mon, 16 Nov 2015 11:28:16 +0000 Subject: [PATCH 47/78] arm/arm64: KVM: Remove unreferenced S2_PGD_ORDER Since commit a987370 ("arm64: KVM: Fix stage-2 PGD allocation to have per-page refcounting") there is no reference to S2_PGD_ORDER, so kill it for the good. Acked-by: Christoffer Dall Signed-off-by: Vladimir Murzin Signed-off-by: Marc Zyngier (cherry picked from commit 9d4dc688342a3cbda43a1789cd2c6c888658c60d) Signed-off-by: Alex Shi --- arch/arm/include/asm/kvm_arm.h | 1 - arch/arm/kvm/mmu.c | 6 +++--- arch/arm64/include/asm/kvm_mmu.h | 1 - 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index dc641ddf0784..b05bb5ae3659 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -135,7 +135,6 @@ #define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT) #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL) #define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30)) -#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) /* Virtualization Translation Control Register (VTCR) bits */ #define VTCR_SH0 (3 << 12) diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 11b6595c2672..e2b6801f54e4 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -656,9 +656,9 @@ static void *kvm_alloc_hwpgd(void) * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. * @kvm: The KVM struct pointer for the VM. * - * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can - * support either full 40-bit input addresses or limited to 32-bit input - * addresses). Clears the allocated pages. + * Allocates only the stage-2 HW PGD level table(s) (can support either full + * 40-bit input addresses or limited to 32-bit input addresses). Clears the + * allocated pages. * * Note we don't need locking here as this is only called when the VM is * created, which can only be done once. diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 61505676d085..54cba803e2d9 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -158,7 +158,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) #define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT) #endif #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) -#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) #define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) From 516f3f777e5fb0710f1626c79e3dacca751b8c30 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Mon, 16 Nov 2015 11:28:17 +0000 Subject: [PATCH 48/78] arm: KVM: Make kvm_arm.h friendly to assembly code kvm_arm.h is included from both C code and assembly code; however some definitions in this header supplied with U/UL/ULL suffixes which might confuse assembly once they got evaluated. We have _AC macro for such cases, so just wrap problem places with it. Acked-by: Christoffer Dall Signed-off-by: Vladimir Murzin Signed-off-by: Marc Zyngier (cherry picked from commit 8420dcd37ef34040c8fc5a27bf66887b3b2faf80) Signed-off-by: Alex Shi --- arch/arm/include/asm/kvm_arm.h | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index b05bb5ae3659..01d4d7abe34e 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -19,6 +19,7 @@ #ifndef __ARM_KVM_ARM_H__ #define __ARM_KVM_ARM_H__ +#include #include /* Hyp Configuration Register (HCR) bits */ @@ -132,9 +133,9 @@ * space. */ #define KVM_PHYS_SHIFT (40) -#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT) -#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL) -#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30)) +#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT) +#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL)) +#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30)) /* Virtualization Translation Control Register (VTCR) bits */ #define VTCR_SH0 (3 << 12) @@ -161,17 +162,17 @@ #define VTTBR_X (5 - KVM_T0SZ) #endif #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) -#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) -#define VTTBR_VMID_SHIFT (48LLU) -#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) +#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) +#define VTTBR_VMID_SHIFT _AC(48, ULL) +#define VTTBR_VMID_MASK (_AC(0xff, ULL) << VTTBR_VMID_SHIFT) /* Hyp Syndrome Register (HSR) bits */ #define HSR_EC_SHIFT (26) -#define HSR_EC (0x3fU << HSR_EC_SHIFT) -#define HSR_IL (1U << 25) +#define HSR_EC (_AC(0x3f, UL) << HSR_EC_SHIFT) +#define HSR_IL (_AC(1, UL) << 25) #define HSR_ISS (HSR_IL - 1) #define HSR_ISV_SHIFT (24) -#define HSR_ISV (1U << HSR_ISV_SHIFT) +#define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT) #define HSR_SRT_SHIFT (16) #define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT) #define HSR_FSC (0x3f) @@ -179,9 +180,9 @@ #define HSR_SSE (1 << 21) #define HSR_WNR (1 << 6) #define HSR_CV_SHIFT (24) -#define HSR_CV (1U << HSR_CV_SHIFT) +#define HSR_CV (_AC(1, UL) << HSR_CV_SHIFT) #define HSR_COND_SHIFT (20) -#define HSR_COND (0xfU << HSR_COND_SHIFT) +#define HSR_COND (_AC(0xf, UL) << HSR_COND_SHIFT) #define FSC_FAULT (0x04) #define FSC_ACCESS (0x08) @@ -209,13 +210,13 @@ #define HSR_EC_DABT (0x24) #define HSR_EC_DABT_HYP (0x25) -#define HSR_WFI_IS_WFE (1U << 0) +#define HSR_WFI_IS_WFE (_AC(1, UL) << 0) -#define HSR_HVC_IMM_MASK ((1UL << 16) - 1) +#define HSR_HVC_IMM_MASK ((_AC(1, UL) << 16) - 1) -#define HSR_DABT_S1PTW (1U << 7) -#define HSR_DABT_CM (1U << 8) -#define HSR_DABT_EA (1U << 9) +#define HSR_DABT_S1PTW (_AC(1, UL) << 7) +#define HSR_DABT_CM (_AC(1, UL) << 8) +#define HSR_DABT_EA (_AC(1, UL) << 9) #define kvm_arm_exception_type \ {0, "RESET" }, \ From 1a1549bd76b83aef16938f575af8f5a1ecf3ed1f Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Mon, 16 Nov 2015 11:28:18 +0000 Subject: [PATCH 49/78] arm64: KVM: Add support for 16-bit VMID The ARMv8.1 architecture extension allows to choose between 8-bit and 16-bit of VMID, so use this capability for KVM. Reviewed-by: Christoffer Dall Signed-off-by: Vladimir Murzin Signed-off-by: Marc Zyngier (cherry picked from commit 20475f784d29991b3b843c80c38a36f2ebb35ac4) Signed-off-by: Alex Shi Conflicts: camptiable with LTS 849e28efb04c4c:arm64: KVM: Configure TCR_EL2.PS at runtime in arch/arm64/kvm/hyp-init.S --- arch/arm/include/asm/kvm_arm.h | 2 +- arch/arm/include/asm/kvm_mmu.h | 5 +++++ arch/arm/kvm/arm.c | 10 ++++++++-- arch/arm64/include/asm/kvm_arm.h | 3 ++- arch/arm64/include/asm/kvm_mmu.h | 8 ++++++++ arch/arm64/kvm/hyp-init.S | 8 ++++++++ 6 files changed, 32 insertions(+), 4 deletions(-) diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 01d4d7abe34e..e22089fb44dc 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -164,7 +164,7 @@ #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) #define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) #define VTTBR_VMID_SHIFT _AC(48, ULL) -#define VTTBR_VMID_MASK (_AC(0xff, ULL) << VTTBR_VMID_SHIFT) +#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) /* Hyp Syndrome Register (HSR) bits */ #define HSR_EC_SHIFT (26) diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 405aa1883307..9203c21b4673 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -279,6 +279,11 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, pgd_t *merged_hyp_pgd, unsigned long hyp_idmap_start) { } +static inline unsigned int kvm_get_vmid_bits(void) +{ + return 8; +} + #endif /* !__ASSEMBLY__ */ #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 449e312f1892..0178376a6585 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -59,7 +59,8 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); /* The VMID used in the VTTBR */ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); -static u8 kvm_next_vmid; +static u32 kvm_next_vmid; +static unsigned int kvm_vmid_bits __read_mostly; static DEFINE_SPINLOCK(kvm_vmid_lock); static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) @@ -432,11 +433,12 @@ static void update_vttbr(struct kvm *kvm) kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); kvm->arch.vmid = kvm_next_vmid; kvm_next_vmid++; + kvm_next_vmid &= (1 << kvm_vmid_bits) - 1; /* update vttbr to be used with the new vmid */ pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm)); BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); - vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; + vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); kvm->arch.vttbr = pgd_phys | vmid; spin_unlock(&kvm_vmid_lock); @@ -1132,6 +1134,10 @@ static int init_hyp_mode(void) kvm_perf_init(); + /* set size of VMID supported by CPU */ + kvm_vmid_bits = kvm_get_vmid_bits(); + kvm_info("%d-bit VMID\n", kvm_vmid_bits); + kvm_info("Hyp mode initialized successfully\n"); return 0; diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 2d960f8588b0..3776db01a8bb 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -123,6 +123,7 @@ #define VTCR_EL2_SL0_LVL1 (1 << 6) #define VTCR_EL2_T0SZ_MASK 0x3f #define VTCR_EL2_T0SZ_40B 24 +#define VTCR_EL2_VS 19 /* * We configure the Stage-2 page tables to always restrict the IPA space to be @@ -167,7 +168,7 @@ #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) #define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) #define VTTBR_VMID_SHIFT (UL(48)) -#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT) +#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) /* Hyp System Trap Register */ #define HSTR_EL2_T(x) (1 << x) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 54cba803e2d9..0bf8b4320a91 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -20,6 +20,7 @@ #include #include +#include /* * As we only have the TTBR0_EL2 register, we cannot express @@ -301,5 +302,12 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE); } +static inline unsigned int kvm_get_vmid_bits(void) +{ + int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1); + + return (cpuid_feature_extract_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; +} + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 84c338f017b2..d073b5a216f7 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -96,6 +96,14 @@ __do_hyp_init: ldr x4, =VTCR_EL2_FLAGS bfi x4, x5, #16, #3 + /* + * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in + * VTCR_EL2. + */ + mrs x5, ID_AA64MMFR1_EL1 + ubfx x5, x5, #5, #1 + lsl x5, x5, #VTCR_EL2_VS + orr x4, x4, x5 msr vtcr_el2, x4 From 6a852456e1259a8986e8c0b45a984b904ee9466e Mon Sep 17 00:00:00 2001 From: Pavel Fedin Date: Fri, 18 Dec 2015 14:38:43 +0300 Subject: [PATCH 50/78] arm/arm64: KVM: Detect vGIC presence at runtime Before commit 662d9715840aef44dcb573b0f9fab9e8319c868a ("arm/arm64: KVM: Kill CONFIG_KVM_ARM_{VGIC,TIMER}") is was possible to compile the kernel without vGIC and vTimer support. Commit message says about possibility to detect vGIC support in runtime, but this has never been implemented. This patch introduces runtime check, restoring the lost functionality. It again allows to use KVM on hardware without vGIC. Interrupt controller has to be emulated in userspace in this case. -ENODEV return code from probe function means there's no GIC at all. -ENXIO happens when, for example, there is GIC node in the device tree, but it does not specify vGIC resources. Any other error code is still treated as full stop because it might mean some really serious problems. Signed-off-by: Pavel Fedin Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier (cherry picked from commit c7da6fa43cb1c5e649da0f478a491feb9208cae7) Signed-off-by: Alex Shi --- arch/arm/kvm/arm.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 0178376a6585..b936475489b5 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -63,6 +63,8 @@ static u32 kvm_next_vmid; static unsigned int kvm_vmid_bits __read_mostly; static DEFINE_SPINLOCK(kvm_vmid_lock); +static bool vgic_present; + static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) { BUG_ON(preemptible()); @@ -134,7 +136,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.vmid_gen = 0; /* The maximum number of VCPUs is limited by the host's GIC model */ - kvm->arch.max_vcpus = kvm_vgic_get_max_vcpus(); + kvm->arch.max_vcpus = vgic_present ? + kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS; return ret; out_free_stage2_pgd: @@ -172,6 +175,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) int r; switch (ext) { case KVM_CAP_IRQCHIP: + r = vgic_present; + break; case KVM_CAP_IOEVENTFD: case KVM_CAP_DEVICE_CTRL: case KVM_CAP_USER_MEMORY: @@ -914,6 +919,8 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, switch (dev_id) { case KVM_ARM_DEVICE_VGIC_V2: + if (!vgic_present) + return -ENXIO; return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); default: return -ENODEV; @@ -928,6 +935,8 @@ long kvm_arch_vm_ioctl(struct file *filp, switch (ioctl) { case KVM_CREATE_IRQCHIP: { + if (!vgic_present) + return -ENXIO; return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); } case KVM_ARM_SET_DEVICE_ADDR: { @@ -1118,8 +1127,17 @@ static int init_hyp_mode(void) * Init HYP view of VGIC */ err = kvm_vgic_hyp_init(); - if (err) + switch (err) { + case 0: + vgic_present = true; + break; + case -ENODEV: + case -ENXIO: + vgic_present = false; + break; + default: goto out_free_context; + } /* * Init HYP architected timer support From 9502888106b61278b043b20032ee45de159384df Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 1 Feb 2016 17:54:35 +0000 Subject: [PATCH 51/78] arm/arm64: KVM: Add hook for C-based stage2 init As we're about to move the stage2 init to C code, introduce some C hooks that will later be populated with arch-specific implementations. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier (cherry picked from commit 35a2491a624af1fa7ab6990639f5246cd5f12592) Signed-off-by: Alex Shi --- arch/arm/include/asm/kvm_host.h | 4 ++++ arch/arm/kvm/arm.c | 1 + arch/arm64/include/asm/kvm_host.h | 4 ++++ 3 files changed, 9 insertions(+) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 6692982c9b57..945bfa5e7752 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -214,6 +214,10 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); } +static inline void __cpu_init_stage2(void) +{ +} + static inline int kvm_arch_dev_ioctl_check_extension(long ext) { return 0; diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index b936475489b5..30c9f7b42505 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -982,6 +982,7 @@ static void cpu_init_hyp_mode(void *dummy) vector_ptr = (unsigned long)__kvm_hyp_vector; __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); + __cpu_init_stage2(); kvm_arm_init_debug(); } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 975db147b43e..1b37b5d5092f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -326,6 +326,10 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, hyp_stack_ptr, vector_ptr); } +static inline void __cpu_init_stage2(void) +{ +} + static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} From a7ba39df2de41823e8b0dcda1ab183e71feb988a Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Wed, 27 Apr 2016 17:47:00 +0100 Subject: [PATCH 52/78] arm64: Fold proc-macros.S into assembler.h To allow the assembler macros defined in arch/arm64/mm/proc-macros.S to be used outside the mm code move the contents of proc-macros.S to asm/assembler.h. Also, delete proc-macros.S, and fix up all references to proc-macros.S. Signed-off-by: Geoff Levand Acked-by: Pavel Machek [rebased, included dcache_by_line_op] Signed-off-by: James Morse Acked-by: Catalin Marinas Signed-off-by: Will Deacon (cherry picked from commit 7b7293ae3dbd0a1965bf310b77fed5f9bb37bb93) Signed-off-by: Alex Shi Conflicts: remove obsolete arch/arm64/mm/proc-macros.S --- arch/arm64/include/asm/assembler.h | 82 ++++++++++++++++++++++++- arch/arm64/mm/cache.S | 2 - arch/arm64/mm/proc-macros.S | 97 ------------------------------ arch/arm64/mm/proc.S | 3 - 4 files changed, 81 insertions(+), 103 deletions(-) delete mode 100644 arch/arm64/mm/proc-macros.S diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 12eff928ef8b..55fab0a827d9 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -1,5 +1,5 @@ /* - * Based on arch/arm/include/asm/assembler.h + * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S * * Copyright (C) 1996-2000 Russell King * Copyright (C) 2012 ARM Ltd. @@ -23,6 +23,8 @@ #ifndef __ASM_ASSEMBLER_H #define __ASM_ASSEMBLER_H +#include +#include #include #include @@ -193,6 +195,84 @@ lr .req x30 // link register str \src, [\tmp, :lo12:\sym] .endm +/* + * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm) + */ + .macro vma_vm_mm, rd, rn + ldr \rd, [\rn, #VMA_VM_MM] + .endm + +/* + * mmid - get context id from mm pointer (mm->context.id) + */ + .macro mmid, rd, rn + ldr \rd, [\rn, #MM_CONTEXT_ID] + .endm + +/* + * dcache_line_size - get the minimum D-cache line size from the CTR register. + */ + .macro dcache_line_size, reg, tmp + mrs \tmp, ctr_el0 // read CTR + ubfm \tmp, \tmp, #16, #19 // cache line size encoding + mov \reg, #4 // bytes per word + lsl \reg, \reg, \tmp // actual cache line size + .endm + +/* + * icache_line_size - get the minimum I-cache line size from the CTR register. + */ + .macro icache_line_size, reg, tmp + mrs \tmp, ctr_el0 // read CTR + and \tmp, \tmp, #0xf // cache line size encoding + mov \reg, #4 // bytes per word + lsl \reg, \reg, \tmp // actual cache line size + .endm + +/* + * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map + */ + .macro tcr_set_idmap_t0sz, valreg, tmpreg +#ifndef CONFIG_ARM64_VA_BITS_48 + ldr_l \tmpreg, idmap_t0sz + bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH +#endif + .endm + +/* + * Macro to perform a data cache maintenance for the interval + * [kaddr, kaddr + size) + * + * op: operation passed to dc instruction + * domain: domain used in dsb instruciton + * kaddr: starting virtual address of the region + * size: size of the region + * Corrupts: kaddr, size, tmp1, tmp2 + */ + .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2 + dcache_line_size \tmp1, \tmp2 + add \size, \kaddr, \size + sub \tmp2, \tmp1, #1 + bic \kaddr, \kaddr, \tmp2 +9998: dc \op, \kaddr + add \kaddr, \kaddr, \tmp1 + cmp \kaddr, \size + b.lo 9998b + dsb \domain + .endm + +/* + * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present + */ + .macro reset_pmuserenr_el0, tmpreg + mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer + sbfx \tmpreg, \tmpreg, #8, #4 + cmp \tmpreg, #1 // Skip if no PMU present + b.lt 9000f + msr pmuserenr_el0, xzr // Disable PMU access from EL0 +9000: + .endm + /* * Annotate a function as position independent, i.e., safe to be called before * the kernel virtual mapping is activated. diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 6df07069a025..50ff9ba3a236 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -24,8 +24,6 @@ #include #include -#include "proc-macros.S" - /* * flush_icache_range(start,end) * diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S deleted file mode 100644 index 2bce36415c0c..000000000000 --- a/arch/arm64/mm/proc-macros.S +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Based on arch/arm/mm/proc-macros.S - * - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include - -/* - * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm) - */ - .macro vma_vm_mm, rd, rn - ldr \rd, [\rn, #VMA_VM_MM] - .endm - -/* - * mmid - get context id from mm pointer (mm->context.id) - */ - .macro mmid, rd, rn - ldr \rd, [\rn, #MM_CONTEXT_ID] - .endm - -/* - * dcache_line_size - get the minimum D-cache line size from the CTR register. - */ - .macro dcache_line_size, reg, tmp - mrs \tmp, ctr_el0 // read CTR - ubfm \tmp, \tmp, #16, #19 // cache line size encoding - mov \reg, #4 // bytes per word - lsl \reg, \reg, \tmp // actual cache line size - .endm - -/* - * icache_line_size - get the minimum I-cache line size from the CTR register. - */ - .macro icache_line_size, reg, tmp - mrs \tmp, ctr_el0 // read CTR - and \tmp, \tmp, #0xf // cache line size encoding - mov \reg, #4 // bytes per word - lsl \reg, \reg, \tmp // actual cache line size - .endm - -/* - * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map - */ - .macro tcr_set_idmap_t0sz, valreg, tmpreg -#ifndef CONFIG_ARM64_VA_BITS_48 - ldr_l \tmpreg, idmap_t0sz - bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH -#endif - .endm - -/* - * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present - */ - .macro reset_pmuserenr_el0, tmpreg - mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer - sbfx \tmpreg, \tmpreg, #8, #4 - cmp \tmpreg, #1 // Skip if no PMU present - b.lt 9000f - msr pmuserenr_el0, xzr // Disable PMU access from EL0 -9000: - .endm -/* - * Macro to perform a data cache maintenance for the interval - * [kaddr, kaddr + size) - * - * op: operation passed to dc instruction - * domain: domain used in dsb instruciton - * kaddr: starting virtual address of the region - * size: size of the region - * Corrupts: kaddr, size, tmp1, tmp2 - */ - .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2 - dcache_line_size \tmp1, \tmp2 - add \size, \kaddr, \size - sub \tmp2, \tmp1, #1 - bic \kaddr, \kaddr, \tmp2 -9998: dc \op, \kaddr - add \kaddr, \kaddr, \tmp1 - cmp \kaddr, \size - b.lo 9998b - dsb \domain - .endm diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index a92738e8b1eb..b6a141fd0386 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -23,13 +23,10 @@ #include #include #include -#include #include #include #include -#include "proc-macros.S" - #ifdef CONFIG_ARM64_64K_PAGES #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K #elif defined(CONFIG_ARM64_16K_PAGES) From 46f269e394b70c332389818cb378a84f27026f24 Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Wed, 27 Apr 2016 17:47:01 +0100 Subject: [PATCH 53/78] arm64: Cleanup SCTLR flags We currently have macros defining flags for the arm64 sctlr registers in both kvm_arm.h and sysreg.h. To clean things up and simplify move the definitions of the SCTLR_EL2 flags from kvm_arm.h to sysreg.h, rename any SCTLR_EL1 or SCTLR_EL2 flags that are common to both registers to be SCTLR_ELx, with 'x' indicating a common flag, and fixup all files to include the proper header or to use the new macro names. Signed-off-by: Geoff Levand [Restored pgtable-hwdef.h include] Signed-off-by: James Morse Acked-by: Marc Zyngier Signed-off-by: Will Deacon (cherry picked from commit e7227d0e528f9a96d4a866f43e20dd9b33f0e782) Signed-off-by: Alex Shi --- arch/arm64/include/asm/kvm_arm.h | 11 ----------- arch/arm64/include/asm/sysreg.h | 19 +++++++++++++++---- arch/arm64/kvm/hyp-init.S | 5 +++-- 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 3776db01a8bb..8b709f53f874 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -83,17 +83,6 @@ #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO) -/* Hyp System Control Register (SCTLR_EL2) bits */ -#define SCTLR_EL2_EE (1 << 25) -#define SCTLR_EL2_WXN (1 << 19) -#define SCTLR_EL2_I (1 << 12) -#define SCTLR_EL2_SA (1 << 3) -#define SCTLR_EL2_C (1 << 2) -#define SCTLR_EL2_A (1 << 1) -#define SCTLR_EL2_M 1 -#define SCTLR_EL2_FLAGS (SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C | \ - SCTLR_EL2_SA | SCTLR_EL2_I) - /* TCR_EL2 Registers bits */ #define TCR_EL2_RES1 ((1 << 31) | (1 << 23)) #define TCR_EL2_TBI (1 << 20) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 4aeebec3d882..99dac2f9f21a 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -82,10 +82,21 @@ #define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\ (!!x)<<8 | 0x1f) -/* SCTLR_EL1 */ -#define SCTLR_EL1_CP15BEN (0x1 << 5) -#define SCTLR_EL1_SED (0x1 << 8) -#define SCTLR_EL1_SPAN (0x1 << 23) +/* Common SCTLR_ELx flags. */ +#define SCTLR_ELx_EE (1 << 25) +#define SCTLR_ELx_I (1 << 12) +#define SCTLR_ELx_SA (1 << 3) +#define SCTLR_ELx_C (1 << 2) +#define SCTLR_ELx_A (1 << 1) +#define SCTLR_ELx_M 1 + +#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ + SCTLR_ELx_SA | SCTLR_ELx_I) + +/* SCTLR_EL1 specific flags. */ +#define SCTLR_EL1_SPAN (1 << 23) +#define SCTLR_EL1_SED (1 << 8) +#define SCTLR_EL1_CP15BEN (1 << 5) /* id_aa64isar0 */ diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index d073b5a216f7..034d152c3fbe 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -21,6 +21,7 @@ #include #include #include +#include .text .pushsection .hyp.idmap.text, "ax" @@ -116,8 +117,8 @@ __do_hyp_init: dsb sy mrs x4, sctlr_el2 - and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2 - ldr x5, =SCTLR_EL2_FLAGS + and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2 + ldr x5, =SCTLR_ELx_FLAGS orr x4, x4, x5 msr sctlr_el2, x4 isb From be5d6aa0cc805bb6f7bc936042620f5826322009 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 27 Apr 2016 17:47:06 +0100 Subject: [PATCH 54/78] arm64: kernel: Rework finisher callback out of __cpu_suspend_enter() Hibernate could make use of the cpu_suspend() code to save/restore cpu state, however it needs to be able to return '0' from the 'finisher'. Rework cpu_suspend() so that the finisher is called from C code, independently from the save/restore of cpu state. Space to save the context in is allocated in the caller's stack frame, and passed into __cpu_suspend_enter(). Hibernate's use of this API will look like a copy of the cpu_suspend() function. Signed-off-by: James Morse Acked-by: Lorenzo Pieralisi Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon (cherry picked from commit adc9b2dfd00924e9e9b98613f36a6cb8c51f0dc6) Signed-off-by: Alex Shi --- arch/arm64/include/asm/suspend.h | 20 +++++++ arch/arm64/kernel/asm-offsets.c | 2 + arch/arm64/kernel/sleep.S | 93 ++++++++++++-------------------- arch/arm64/kernel/suspend.c | 72 ++++++++++++++----------- 4 files changed, 97 insertions(+), 90 deletions(-) diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index 59a5b0f1e81c..365d8cdd0073 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h @@ -2,6 +2,7 @@ #define __ASM_SUSPEND_H #define NR_CTX_REGS 11 +#define NR_CALLEE_SAVED_REGS 12 /* * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on @@ -21,6 +22,25 @@ struct sleep_save_sp { phys_addr_t save_ptr_stash_phys; }; +/* + * Memory to save the cpu state is allocated on the stack by + * __cpu_suspend_enter()'s caller, and populated by __cpu_suspend_enter(). + * This data must survive until cpu_resume() is called. + * + * This struct desribes the size and the layout of the saved cpu state. + * The layout of the callee_saved_regs is defined by the implementation + * of __cpu_suspend_enter(), and cpu_resume(). This struct must be passed + * in by the caller as __cpu_suspend_enter()'s stack-frame is gone once it + * returns, and the data would be subsequently corrupted by the call to the + * finisher. + */ +struct sleep_stack_data { + struct cpu_suspend_ctx system_regs; + unsigned long callee_saved_regs[NR_CALLEE_SAVED_REGS]; +}; + extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); extern void cpu_resume(void); +int __cpu_suspend_enter(struct sleep_stack_data *state); +void __cpu_suspend_exit(void); #endif diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 66ce4fdd4644..2abd57fd4255 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -135,6 +135,8 @@ int main(void) DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp)); DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); + DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS, offsetof(struct sleep_stack_data, system_regs)); + DEFINE(SLEEP_STACK_DATA_CALLEE_REGS, offsetof(struct sleep_stack_data, callee_saved_regs)); #endif DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0)); DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2)); diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index f586f7c875e2..c1c28c0c2a7d 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -49,37 +49,30 @@ orr \dst, \dst, \mask // dst|=(aff3>>rs3) .endm /* - * Save CPU state for a suspend and execute the suspend finisher. - * On success it will return 0 through cpu_resume - ie through a CPU - * soft/hard reboot from the reset vector. - * On failure it returns the suspend finisher return value or force - * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher - * is not allowed to return, if it does this must be considered failure). - * It saves callee registers, and allocates space on the kernel stack - * to save the CPU specific registers + some other data for resume. + * Save CPU state in the provided sleep_stack_data area, and publish its + * location for cpu_resume()'s use in sleep_save_stash. * - * x0 = suspend finisher argument - * x1 = suspend finisher function pointer + * cpu_resume() will restore this saved state, and return. Because the + * link-register is saved and restored, it will appear to return from this + * function. So that the caller can tell the suspend/resume paths apart, + * __cpu_suspend_enter() will always return a non-zero value, whereas the + * path through cpu_resume() will return 0. + * + * x0 = struct sleep_stack_data area */ ENTRY(__cpu_suspend_enter) - stp x29, lr, [sp, #-96]! - stp x19, x20, [sp,#16] - stp x21, x22, [sp,#32] - stp x23, x24, [sp,#48] - stp x25, x26, [sp,#64] - stp x27, x28, [sp,#80] - /* - * Stash suspend finisher and its argument in x20 and x19 - */ - mov x19, x0 - mov x20, x1 + stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS] + stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16] + stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32] + stp x23, x24, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+48] + stp x25, x26, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+64] + stp x27, x28, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+80] + + /* save the sp in cpu_suspend_ctx */ mov x2, sp - sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx - mov x0, sp - /* - * x0 now points to struct cpu_suspend_ctx allocated on the stack - */ - str x2, [x0, #CPU_CTX_SP] + str x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP] + + /* find the mpidr_hash */ ldr x1, =sleep_save_sp ldr x1, [x1, #SLEEP_SAVE_SP_VIRT] mrs x7, mpidr_el1 @@ -93,34 +86,11 @@ ENTRY(__cpu_suspend_enter) ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)] compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10 add x1, x1, x8, lsl #3 + + stp x29, lr, [sp, #-16]! bl __cpu_suspend_save - /* - * Grab suspend finisher in x20 and its argument in x19 - */ - mov x0, x19 - mov x1, x20 - /* - * We are ready for power down, fire off the suspend finisher - * in x1, with argument in x0 - */ - blr x1 - /* - * Never gets here, unless suspend finisher fails. - * Successful cpu_suspend should return from cpu_resume, returning - * through this code path is considered an error - * If the return value is set to 0 force x0 = -EOPNOTSUPP - * to make sure a proper error condition is propagated - */ - cmp x0, #0 - mov x3, #-EOPNOTSUPP - csel x0, x3, x0, eq - add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer - ldp x19, x20, [sp, #16] - ldp x21, x22, [sp, #32] - ldp x23, x24, [sp, #48] - ldp x25, x26, [sp, #64] - ldp x27, x28, [sp, #80] - ldp x29, lr, [sp], #96 + ldp x29, lr, [sp], #16 + mov x0, #1 ret ENDPROC(__cpu_suspend_enter) .ltorg @@ -146,12 +116,6 @@ ENDPROC(cpu_resume_mmu) .popsection cpu_resume_after_mmu: mov x0, #0 // return zero on success - ldp x19, x20, [sp, #16] - ldp x21, x22, [sp, #32] - ldp x23, x24, [sp, #48] - ldp x25, x26, [sp, #64] - ldp x27, x28, [sp, #80] - ldp x29, lr, [sp], #96 ret ENDPROC(cpu_resume_after_mmu) @@ -168,6 +132,8 @@ ENTRY(cpu_resume) /* x7 contains hash index, let's use it to grab context pointer */ ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS ldr x0, [x0, x7, lsl #3] + add x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS + add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS /* load sp from context */ ldr x2, [x0, #CPU_CTX_SP] /* load physical address of identity map page table in x1 */ @@ -178,5 +144,12 @@ ENTRY(cpu_resume) * pointer and x1 to contain physical address of 1:1 page tables */ bl cpu_do_resume // PC relative jump, MMU off + /* Can't access these by physical address once the MMU is on */ + ldp x19, x20, [x29, #16] + ldp x21, x22, [x29, #32] + ldp x23, x24, [x29, #48] + ldp x25, x26, [x29, #64] + ldp x27, x28, [x29, #80] + ldp x29, lr, [x29] b cpu_resume_mmu // Resume MMU, never returns ENDPROC(cpu_resume) diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 66055392f445..0d52ec972a38 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -10,22 +10,22 @@ #include #include -extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long)); + /* * This is called by __cpu_suspend_enter() to save the state, and do whatever * flushing is required to ensure that when the CPU goes to sleep we have * the necessary data available when the caches are not searched. * - * ptr: CPU context virtual address + * ptr: sleep_stack_data containing cpu state virtual address. * save_ptr: address of the location where the context physical address * must be saved */ -void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr, +void notrace __cpu_suspend_save(struct sleep_stack_data *ptr, phys_addr_t *save_ptr) { *save_ptr = virt_to_phys(ptr); - cpu_do_suspend(ptr); + cpu_do_suspend(&ptr->system_regs); /* * Only flush the context that must be retrieved with the MMU * off. VA primitives ensure the flush is applied to all @@ -51,6 +51,30 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) hw_breakpoint_restore = hw_bp_restore; } +void notrace __cpu_suspend_exit(void) +{ + /* + * We are resuming from reset with the idmap active in TTBR0_EL1. + * We must uninstall the idmap and restore the expected MMU + * state before we can possibly return to userspace. + */ + cpu_uninstall_idmap(); + + /* + * Restore per-cpu offset before any kernel + * subsystem relying on it has a chance to run. + */ + set_my_cpu_offset(per_cpu_offset(smp_processor_id())); + + /* + * Restore HW breakpoint registers to sane values + * before debug exceptions are possibly reenabled + * through local_dbg_restore. + */ + if (hw_breakpoint_restore) + hw_breakpoint_restore(NULL); +} + /* * cpu_suspend * @@ -60,8 +84,9 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) */ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { - int ret; + int ret = 0; unsigned long flags; + struct sleep_stack_data state; /* * From this point debug exceptions are disabled to prevent @@ -77,34 +102,21 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) */ pause_graph_tracing(); - /* - * mm context saved on the stack, it will be restored when - * the cpu comes out of reset through the identity mapped - * page tables, so that the thread address space is properly - * set-up on function return. - */ - ret = __cpu_suspend_enter(arg, fn); - if (ret == 0) { - /* - * We are resuming from reset with the idmap active in TTBR0_EL1. - * We must uninstall the idmap and restore the expected MMU - * state before we can possibly return to userspace. - */ - cpu_uninstall_idmap(); + if (__cpu_suspend_enter(&state)) { + /* Call the suspend finisher */ + ret = fn(arg); /* - * Restore per-cpu offset before any kernel - * subsystem relying on it has a chance to run. + * Never gets here, unless the suspend finisher fails. + * Successful cpu_suspend() should return from cpu_resume(), + * returning through this code path is considered an error + * If the return value is set to 0 force ret = -EOPNOTSUPP + * to make sure a proper error condition is propagated */ - set_my_cpu_offset(per_cpu_offset(smp_processor_id())); - - /* - * Restore HW breakpoint registers to sane values - * before debug exceptions are possibly reenabled - * through local_dbg_restore. - */ - if (hw_breakpoint_restore) - hw_breakpoint_restore(NULL); + if (!ret) + ret = -EOPNOTSUPP; + } else { + __cpu_suspend_exit(); } unpause_graph_tracing(); From 3b5f4eb5962e4fe312327850eeaa4f5fc662ec70 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 27 Apr 2016 17:47:07 +0100 Subject: [PATCH 55/78] arm64: Change cpu_resume() to enable mmu early then access sleep_sp by va By enabling the MMU early in cpu_resume(), the sleep_save_sp and stack can be accessed by VA, which avoids the need to convert-addresses and clean to PoC on the suspend path. MMU setup is shared with the boot path, meaning the swapper_pg_dir is restored directly: ttbr1_el1 is no longer saved/restored. struct sleep_save_sp is removed, replacing it with a single array of pointers. cpu_do_{suspend,resume} could be further reduced to not restore: cpacr_el1, mdscr_el1, tcr_el1, vbar_el1 and sctlr_el1, all of which are set by __cpu_setup(). However these values all contain res0 bits that may be used to enable future features. Signed-off-by: James Morse Reviewed-by: Lorenzo Pieralisi Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon (cherry picked from commit cabe1c81ea5be983425d117912d7883e252a3b09) Signed-off-by: Alex Shi Conflicts: arch/arm64/kernel/head.S remove KASAN change in arch/arm64/kernel/sleep.S --- arch/arm64/include/asm/suspend.h | 9 ++--- arch/arm64/kernel/asm-offsets.c | 3 -- arch/arm64/kernel/head.S | 2 +- arch/arm64/kernel/setup.c | 1 - arch/arm64/kernel/sleep.S | 57 ++++++++++++-------------------- arch/arm64/kernel/suspend.c | 38 ++++----------------- arch/arm64/mm/proc.S | 53 ++++++++++++----------------- 7 files changed, 52 insertions(+), 111 deletions(-) diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index 365d8cdd0073..29d3c71433e1 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h @@ -1,7 +1,7 @@ #ifndef __ASM_SUSPEND_H #define __ASM_SUSPEND_H -#define NR_CTX_REGS 11 +#define NR_CTX_REGS 10 #define NR_CALLEE_SAVED_REGS 12 /* @@ -17,11 +17,6 @@ struct cpu_suspend_ctx { u64 sp; } __aligned(16); -struct sleep_save_sp { - phys_addr_t *save_ptr_stash; - phys_addr_t save_ptr_stash_phys; -}; - /* * Memory to save the cpu state is allocated on the stack by * __cpu_suspend_enter()'s caller, and populated by __cpu_suspend_enter(). @@ -39,6 +34,8 @@ struct sleep_stack_data { unsigned long callee_saved_regs[NR_CALLEE_SAVED_REGS]; }; +extern unsigned long *sleep_save_stash; + extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); extern void cpu_resume(void); int __cpu_suspend_enter(struct sleep_stack_data *state); diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 2abd57fd4255..52b4c8c2be45 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -132,9 +132,6 @@ int main(void) DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask)); DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff)); - DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp)); - DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); - DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS, offsetof(struct sleep_stack_data, system_regs)); DEFINE(SLEEP_STACK_DATA_CALLEE_REGS, offsetof(struct sleep_stack_data, callee_saved_regs)); #endif diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 7d095f69bbb6..977913e6f8a9 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -627,7 +627,7 @@ ENDPROC(__secondary_switched) * If it isn't, park the CPU */ .section ".idmap.text", "ax" -__enable_mmu: +ENTRY(__enable_mmu) mrs x1, ID_AA64MMFR0_EL1 ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index cfed56f0ad26..ad290601f4da 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -175,7 +175,6 @@ static void __init smp_build_mpidr_hash(void) */ if (mpidr_hash_size() > 4 * num_possible_cpus()) pr_warn("Large number of MPIDR hash buckets detected\n"); - __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash)); } static void __init setup_machine_fdt(phys_addr_t dt_phys) diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index c1c28c0c2a7d..b7d8f814caaf 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -73,8 +73,8 @@ ENTRY(__cpu_suspend_enter) str x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP] /* find the mpidr_hash */ - ldr x1, =sleep_save_sp - ldr x1, [x1, #SLEEP_SAVE_SP_VIRT] + ldr x1, =sleep_save_stash + ldr x1, [x1] mrs x7, mpidr_el1 ldr x9, =mpidr_hash ldr x10, [x9, #MPIDR_HASH_MASK] @@ -87,40 +87,27 @@ ENTRY(__cpu_suspend_enter) compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10 add x1, x1, x8, lsl #3 + str x0, [x1] + add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS stp x29, lr, [sp, #-16]! - bl __cpu_suspend_save + bl cpu_do_suspend ldp x29, lr, [sp], #16 mov x0, #1 ret ENDPROC(__cpu_suspend_enter) .ltorg -/* - * x0 must contain the sctlr value retrieved from restored context - */ - .pushsection ".idmap.text", "ax" -ENTRY(cpu_resume_mmu) - ldr x3, =cpu_resume_after_mmu - msr sctlr_el1, x0 // restore sctlr_el1 - isb - /* - * Invalidate the local I-cache so that any instructions fetched - * speculatively from the PoC are discarded, since they may have - * been dynamically patched at the PoU. - */ - ic iallu - dsb nsh - isb - br x3 // global jump to virtual address -ENDPROC(cpu_resume_mmu) - .popsection -cpu_resume_after_mmu: - mov x0, #0 // return zero on success - ret -ENDPROC(cpu_resume_after_mmu) - ENTRY(cpu_resume) bl el2_setup // if in EL2 drop to EL1 cleanly + /* enable the MMU early - so we can access sleep_save_stash by va */ + adr_l lr, __enable_mmu /* __cpu_setup will return here */ + ldr x27, =_cpu_resume /* __enable_mmu will branch here */ + adrp x25, idmap_pg_dir + adrp x26, swapper_pg_dir + b __cpu_setup +ENDPROC(cpu_resume) + +ENTRY(_cpu_resume) mrs x1, mpidr_el1 adrp x8, mpidr_hash add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address @@ -130,26 +117,24 @@ ENTRY(cpu_resume) ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)] compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2 /* x7 contains hash index, let's use it to grab context pointer */ - ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS + ldr_l x0, sleep_save_stash ldr x0, [x0, x7, lsl #3] add x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS /* load sp from context */ ldr x2, [x0, #CPU_CTX_SP] - /* load physical address of identity map page table in x1 */ - adrp x1, idmap_pg_dir mov sp, x2 /* - * cpu_do_resume expects x0 to contain context physical address - * pointer and x1 to contain physical address of 1:1 page tables + * cpu_do_resume expects x0 to contain context address pointer */ - bl cpu_do_resume // PC relative jump, MMU off - /* Can't access these by physical address once the MMU is on */ + bl cpu_do_resume + ldp x19, x20, [x29, #16] ldp x21, x22, [x29, #32] ldp x23, x24, [x29, #48] ldp x25, x26, [x29, #64] ldp x27, x28, [x29, #80] ldp x29, lr, [x29] - b cpu_resume_mmu // Resume MMU, never returns -ENDPROC(cpu_resume) + mov x0, #0 + ret +ENDPROC(_cpu_resume) diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 0d52ec972a38..b616e365cee3 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -10,30 +10,11 @@ #include #include - /* - * This is called by __cpu_suspend_enter() to save the state, and do whatever - * flushing is required to ensure that when the CPU goes to sleep we have - * the necessary data available when the caches are not searched. - * - * ptr: sleep_stack_data containing cpu state virtual address. - * save_ptr: address of the location where the context physical address - * must be saved + * This is allocated by cpu_suspend_init(), and used to store a pointer to + * the 'struct sleep_stack_data' the contains a particular CPUs state. */ -void notrace __cpu_suspend_save(struct sleep_stack_data *ptr, - phys_addr_t *save_ptr) -{ - *save_ptr = virt_to_phys(ptr); - - cpu_do_suspend(&ptr->system_regs); - /* - * Only flush the context that must be retrieved with the MMU - * off. VA primitives ensure the flush is applied to all - * cache levels so context is pushed to DRAM. - */ - __flush_dcache_area(ptr, sizeof(*ptr)); - __flush_dcache_area(save_ptr, sizeof(*save_ptr)); -} +unsigned long *sleep_save_stash; /* * This hook is provided so that cpu_suspend code can restore HW @@ -131,22 +112,15 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) return ret; } -struct sleep_save_sp sleep_save_sp; - static int __init cpu_suspend_init(void) { - void *ctx_ptr; - /* ctx_ptr is an array of physical addresses */ - ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL); + sleep_save_stash = kcalloc(mpidr_hash_size(), sizeof(*sleep_save_stash), + GFP_KERNEL); - if (WARN_ON(!ctx_ptr)) + if (WARN_ON(!sleep_save_stash)) return -ENOMEM; - sleep_save_sp.save_ptr_stash = ctx_ptr; - sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr); - __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp)); - return 0; } early_initcall(cpu_suspend_init); diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index b6a141fd0386..5bb61de23201 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -63,62 +64,50 @@ ENTRY(cpu_do_suspend) mrs x2, tpidr_el0 mrs x3, tpidrro_el0 mrs x4, contextidr_el1 - mrs x5, mair_el1 - mrs x6, cpacr_el1 - mrs x7, ttbr1_el1 - mrs x8, tcr_el1 - mrs x9, vbar_el1 - mrs x10, mdscr_el1 - mrs x11, oslsr_el1 - mrs x12, sctlr_el1 + mrs x5, cpacr_el1 + mrs x6, tcr_el1 + mrs x7, vbar_el1 + mrs x8, mdscr_el1 + mrs x9, oslsr_el1 + mrs x10, sctlr_el1 stp x2, x3, [x0] - stp x4, x5, [x0, #16] - stp x6, x7, [x0, #32] - stp x8, x9, [x0, #48] - stp x10, x11, [x0, #64] - str x12, [x0, #80] + stp x4, xzr, [x0, #16] + stp x5, x6, [x0, #32] + stp x7, x8, [x0, #48] + stp x9, x10, [x0, #64] ret ENDPROC(cpu_do_suspend) /** * cpu_do_resume - restore CPU register context * - * x0: Physical address of context pointer - * x1: ttbr0_el1 to be restored - * - * Returns: - * sctlr_el1 value in x0 + * x0: Address of context pointer */ ENTRY(cpu_do_resume) - /* - * Invalidate local tlb entries before turning on MMU - */ - tlbi vmalle1 ldp x2, x3, [x0] ldp x4, x5, [x0, #16] - ldp x6, x7, [x0, #32] - ldp x8, x9, [x0, #48] - ldp x10, x11, [x0, #64] - ldr x12, [x0, #80] + ldp x6, x8, [x0, #32] + ldp x9, x10, [x0, #48] + ldp x11, x12, [x0, #64] msr tpidr_el0, x2 msr tpidrro_el0, x3 msr contextidr_el1, x4 - msr mair_el1, x5 msr cpacr_el1, x6 - msr ttbr0_el1, x1 - msr ttbr1_el1, x7 - tcr_set_idmap_t0sz x8, x7 + + /* Don't change t0sz here, mask those bits when restoring */ + mrs x5, tcr_el1 + bfi x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH + msr tcr_el1, x8 msr vbar_el1, x9 msr mdscr_el1, x10 + msr sctlr_el1, x12 /* * Restore oslsr_el1 by writing oslar_el1 */ ubfx x11, x11, #1, #1 msr oslar_el1, x11 reset_pmuserenr_el0 x0 // Disable PMU access from EL0 - mov x0, x12 - dsb nsh // Make sure local tlb invalidation completed isb ret ENDPROC(cpu_do_resume) From c193591874c3b92c96ea47218ce5c6ee8fd23d1e Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 27 Apr 2016 17:47:08 +0100 Subject: [PATCH 56/78] arm64: kernel: Include _AC definition in page.h page.h uses '_AC' in the definition of PAGE_SIZE, but doesn't include linux/const.h where this is defined. This produces build warnings when only asm/page.h is included by asm code. Signed-off-by: James Morse Acked-by: Mark Rutland Acked-by: Catalin Marinas Signed-off-by: Will Deacon (cherry picked from commit 812264550dcba6cdbe84bfac2f27e7d23b5b8733) Signed-off-by: Alex Shi --- arch/arm64/include/asm/page.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 9b2f5a9d019d..fbafd0ad16df 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -19,6 +19,8 @@ #ifndef __ASM_PAGE_H #define __ASM_PAGE_H +#include + /* PAGE_SHIFT determines the page size */ /* CONT_SHIFT determines the number of pages which can be tracked together */ #ifdef CONFIG_ARM64_64K_PAGES From 5a13294e6edc1ab1816a0c8e7fa5b930d33871da Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 27 Apr 2016 17:47:09 +0100 Subject: [PATCH 57/78] arm64: Promote KERNEL_START/KERNEL_END definitions to a header file KERNEL_START and KERNEL_END are useful outside head.S, move them to a header file. Signed-off-by: James Morse Acked-by: Catalin Marinas Signed-off-by: Will Deacon (cherry picked from commit 28c7258330ee4ce701a4da7af96d6605d1a0b3bd) Signed-off-by: Alex Shi --- arch/arm64/include/asm/memory.h | 3 +++ arch/arm64/kernel/head.S | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 853953cd1f08..5773a6629f10 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -70,6 +70,9 @@ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) +#define KERNEL_START _text +#define KERNEL_END _end + /* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 977913e6f8a9..79cf6059ea50 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -48,9 +48,6 @@ #error TEXT_OFFSET must be less than 2MB #endif -#define KERNEL_START _text -#define KERNEL_END _end - /* * Kernel startup entry point. * --------------------------- From 5249f3c1e453fc3a1661976e94500da09f2cdcf1 Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Wed, 27 Apr 2016 17:47:10 +0100 Subject: [PATCH 58/78] arm64: Add new asm macro copy_page Kexec and hibernate need to copy pages of memory, but may not have all of the kernel mapped, and are unable to call copy_page(). Add a simplistic copy_page() macro, that can be inlined in these situations. lib/copy_page.S provides a bigger better version, but uses more registers. Signed-off-by: Geoff Levand [Changed asm label to 9998, added commit message] Signed-off-by: James Morse Acked-by: Catalin Marinas Signed-off-by: Will Deacon (cherry picked from commit 5003dbde45961dd7ab3d8a09ab9ad8bcb604db40) Signed-off-by: Alex Shi --- arch/arm64/include/asm/assembler.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 55fab0a827d9..3c6d0df4b6bb 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -24,6 +24,7 @@ #define __ASM_ASSEMBLER_H #include +#include #include #include #include @@ -273,6 +274,24 @@ lr .req x30 // link register 9000: .endm +/* + * copy_page - copy src to dest using temp registers t1-t8 + */ + .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req +9998: ldp \t1, \t2, [\src] + ldp \t3, \t4, [\src, #16] + ldp \t5, \t6, [\src, #32] + ldp \t7, \t8, [\src, #48] + add \src, \src, #64 + stnp \t1, \t2, [\dest] + stnp \t3, \t4, [\dest, #16] + stnp \t5, \t6, [\dest, #32] + stnp \t7, \t8, [\dest, #48] + add \dest, \dest, #64 + tst \src, #(PAGE_SIZE - 1) + b.ne 9998b + .endm + /* * Annotate a function as position independent, i.e., safe to be called before * the kernel virtual mapping is activated. From 4d4fe8f2b27ceb29c16b32c3882ebd19bf21800f Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 27 Apr 2016 17:47:11 +0100 Subject: [PATCH 59/78] PM / Hibernate: Call flush_icache_range() on pages restored in-place Some architectures require code written to memory as if it were data to be 'cleaned' from any data caches before the processor can fetch them as new instructions. During resume from hibernate, the snapshot code copies some pages directly, meaning these architectures do not get a chance to perform their cache maintenance. Modify the read and decompress code to call flush_icache_range() on all pages that are restored, so that the restored in-place pages are guaranteed to be executable on these architectures. Signed-off-by: James Morse Acked-by: Pavel Machek Acked-by: Rafael J. Wysocki Acked-by: Catalin Marinas [will: make clean_pages_on_* static and remove initialisers] Signed-off-by: Will Deacon (cherry picked from commit f6cf0545ec697ddc278b7457b7d0c0d86a2ea88e) Signed-off-by: Alex Shi --- kernel/power/swap.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 12cd989dadf6..160e1006640d 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -36,6 +36,14 @@ #define HIBERNATE_SIG "S1SUSPEND" +/* + * When reading an {un,}compressed image, we may restore pages in place, + * in which case some architectures need these pages cleaning before they + * can be executed. We don't know which pages these may be, so clean the lot. + */ +static bool clean_pages_on_read; +static bool clean_pages_on_decompress; + /* * The swap map is a data structure used for keeping track of each page * written to a swap partition. It consists of many swap_map_page @@ -241,6 +249,9 @@ static void hib_end_io(struct bio *bio) if (bio_data_dir(bio) == WRITE) put_page(page); + else if (clean_pages_on_read) + flush_icache_range((unsigned long)page_address(page), + (unsigned long)page_address(page) + PAGE_SIZE); if (bio->bi_error && !hb->error) hb->error = bio->bi_error; @@ -1049,6 +1060,7 @@ static int load_image(struct swap_map_handle *handle, hib_init_batch(&hb); + clean_pages_on_read = true; printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n", nr_to_read); m = nr_to_read / 10; @@ -1124,6 +1136,10 @@ static int lzo_decompress_threadfn(void *data) d->unc_len = LZO_UNC_SIZE; d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len, d->unc, &d->unc_len); + if (clean_pages_on_decompress) + flush_icache_range((unsigned long)d->unc, + (unsigned long)d->unc + d->unc_len); + atomic_set(&d->stop, 1); wake_up(&d->done); } @@ -1189,6 +1205,8 @@ static int load_image_lzo(struct swap_map_handle *handle, } memset(crc, 0, offsetof(struct crc_data, go)); + clean_pages_on_decompress = true; + /* * Start the decompression threads. */ From 03f3f03d454da21b61d6a18575d909b035fa80b2 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:45:03 +0000 Subject: [PATCH 60/78] arm64: mm: move pte_* macros For pmd, pud, and pgd levels of table, functions including p?d_index and p?d_offset are defined after the p?d_page_vaddr function for the immediately higher level of table. The pte functions however are defined much earlier, even though several rely on the later definition of pmd_page_vaddr. While this isn't currently a problem as these are macros, it prevents the logical grouping of later C functions (which cannot rely on prototypes for functions not yet defined). Move these definitions after pmd_page_vaddr, for consistency with the placement of these functions for other levels of table. Signed-off-by: Mark Rutland Reviewed-by: Catalin Marinas Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Tested-by: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit 053520f7d3923cc6d37afb28f9887cb1e7d77454) Signed-off-by: Alex Shi --- arch/arm64/include/asm/pgtable.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index a11053af0537..0dba915f5080 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -136,16 +136,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) -/* Find an entry in the third-level page table. */ -#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) - -#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr)) - -#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) -#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) -#define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) - /* * The following only work if pte_present(). Undefined behaviour otherwise. */ @@ -430,6 +420,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); } +/* Find an entry in the third-level page table. */ +#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + +#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr)) + +#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) +#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) +#define pte_unmap(pte) do { } while (0) +#define pte_unmap_nested(pte) do { } while (0) + #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) /* From 3169fc9c6154b1c8672a0c2a6fb7932e23653722 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:45:04 +0000 Subject: [PATCH 61/78] arm64: mm: add functions to walk page tables by PA To allow us to walk tables allocated into the fixmap, we need to acquire the physical address of a page, rather than the virtual address in the linear map. This patch adds new p??_page_paddr and p??_offset_phys functions to acquire the physical address of a next-level table, and changes p??_offset* into macros which simply convert this to a linear map VA. This renders p??_page_vaddr unused, and hence they are removed. At the pgd level, a new pgd_offset_raw function is added to find the relevant PGD entry given the base of a PGD and a virtual address. Signed-off-by: Mark Rutland Reviewed-by: Catalin Marinas Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Tested-by: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit dca56dca7124709f3dfca81afe61b4d98eb9cacf) Signed-off-by: Alex Shi --- arch/arm64/include/asm/pgtable.h | 39 +++++++++++++++++++------------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 0dba915f5080..774854e31404 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -415,15 +415,16 @@ static inline void pmd_clear(pmd_t *pmdp) set_pmd(pmdp, __pmd(0)); } -static inline pte_t *pmd_page_vaddr(pmd_t pmd) +static inline phys_addr_t pmd_page_paddr(pmd_t pmd) { - return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); + return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK; } /* Find an entry in the third-level page table. */ #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr)) +#define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t)) +#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr)))) #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) @@ -458,21 +459,23 @@ static inline void pud_clear(pud_t *pudp) set_pud(pudp, __pud(0)); } -static inline pmd_t *pud_page_vaddr(pud_t pud) +static inline phys_addr_t pud_page_paddr(pud_t pud) { - return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); + return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK; } /* Find an entry in the second-level page table. */ #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) -static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) -{ - return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); -} +#define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t)) +#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr)))) #define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK)) +#else + +#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) + #endif /* CONFIG_PGTABLE_LEVELS > 2 */ #if CONFIG_PGTABLE_LEVELS > 3 @@ -494,21 +497,23 @@ static inline void pgd_clear(pgd_t *pgdp) set_pgd(pgdp, __pgd(0)); } -static inline pud_t *pgd_page_vaddr(pgd_t pgd) +static inline phys_addr_t pgd_page_paddr(pgd_t pgd) { - return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK); + return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK; } /* Find an entry in the frst-level page table. */ #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) -static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) -{ - return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr); -} +#define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t)) +#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr)))) #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK)) +#else + +#define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;}) + #endif /* CONFIG_PGTABLE_LEVELS > 3 */ #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) @@ -516,7 +521,9 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) /* to find an entry in a page-table-directory */ #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) -#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) +#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr)) + +#define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr))) /* to find an entry in a kernel page-table-directory */ #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) From a584a909a96b66b9b43a1a628b96bca630d2589b Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 27 Apr 2016 17:47:12 +0100 Subject: [PATCH 62/78] arm64: kernel: Add support for hibernate/suspend-to-disk Add support for hibernate/suspend-to-disk. Suspend borrows code from cpu_suspend() to write cpu state onto the stack, before calling swsusp_save() to save the memory image. Restore creates a set of temporary page tables, covering only the linear map, copies the restore code to a 'safe' page, then uses the copy to restore the memory image. The copied code executes in the lower half of the address space, and once complete, restores the original kernel's page tables. It then calls into cpu_resume(), and follows the normal cpu_suspend() path back into the suspend code. To restore a kernel using KASLR, the address of the page tables, and cpu_resume() are stored in the hibernate arch-header and the el2 vectors are pivotted via the 'safe' page in low memory. Reviewed-by: Catalin Marinas Tested-by: Kevin Hilman # Tested on Juno R2 Signed-off-by: James Morse Signed-off-by: Will Deacon (cherry picked from commit 82869ac57b5d3b550446932c918dbf2caf020c9e) Signed-off-by: Alex Shi Conflicts: arch/arm64/kernel/Makefile --- arch/arm64/Kconfig | 8 + arch/arm64/include/asm/suspend.h | 7 + arch/arm64/kernel/Makefile | 1 + arch/arm64/kernel/asm-offsets.c | 5 + arch/arm64/kernel/hibernate-asm.S | 176 ++++++++++++ arch/arm64/kernel/hibernate.c | 461 ++++++++++++++++++++++++++++++ arch/arm64/kernel/vmlinux.lds.S | 15 + 7 files changed, 673 insertions(+) create mode 100644 arch/arm64/kernel/hibernate-asm.S create mode 100644 arch/arm64/kernel/hibernate.c diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7107dc965f32..fd5cb5074410 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -814,6 +814,14 @@ menu "Power management options" source "kernel/power/Kconfig" +config ARCH_HIBERNATION_POSSIBLE + def_bool y + depends on CPU_PM + +config ARCH_HIBERNATION_HEADER + def_bool y + depends on HIBERNATION + config ARCH_SUSPEND_POSSIBLE def_bool y diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index 29d3c71433e1..024d623f662e 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h @@ -40,4 +40,11 @@ extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); extern void cpu_resume(void); int __cpu_suspend_enter(struct sleep_stack_data *state); void __cpu_suspend_exit(void); +void _cpu_resume(void); + +int swsusp_arch_suspend(void); +int swsusp_arch_resume(void); +int arch_hibernation_header_save(void *addr, unsigned int max_size); +int arch_hibernation_header_restore(void *addr); + #endif diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 85f6924d6394..8867ff71a4b7 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -40,6 +40,7 @@ arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o arm64-obj-$(CONFIG_PCI) += pci.o arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o arm64-obj-$(CONFIG_ACPI) += acpi.o +arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o obj-y += $(arm64-obj-y) vdso/ probes/ obj-m += $(arm64-obj-m) diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 52b4c8c2be45..2bb17bd556f8 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -137,5 +138,9 @@ int main(void) #endif DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0)); DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2)); + BLANK(); + DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address)); + DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address)); + DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next)); return 0; } diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S new file mode 100644 index 000000000000..46f29b6560ec --- /dev/null +++ b/arch/arm64/kernel/hibernate-asm.S @@ -0,0 +1,176 @@ +/* + * Hibernate low-level support + * + * Copyright (C) 2016 ARM Ltd. + * Author: James Morse + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * To prevent the possibility of old and new partial table walks being visible + * in the tlb, switch the ttbr to a zero page when we invalidate the old + * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i + * Even switching to our copied tables will cause a changed output address at + * each stage of the walk. + */ +.macro break_before_make_ttbr_switch zero_page, page_table + msr ttbr1_el1, \zero_page + isb + tlbi vmalle1is + dsb ish + msr ttbr1_el1, \page_table + isb +.endm + + +/* + * Resume from hibernate + * + * Loads temporary page tables then restores the memory image. + * Finally branches to cpu_resume() to restore the state saved by + * swsusp_arch_suspend(). + * + * Because this code has to be copied to a 'safe' page, it can't call out to + * other functions by PC-relative address. Also remember that it may be + * mid-way through over-writing other functions. For this reason it contains + * code from flush_icache_range() and uses the copy_page() macro. + * + * This 'safe' page is mapped via ttbr0, and executed from there. This function + * switches to a copy of the linear map in ttbr1, performs the restore, then + * switches ttbr1 to the original kernel's swapper_pg_dir. + * + * All of memory gets written to, including code. We need to clean the kernel + * text to the Point of Coherence (PoC) before secondary cores can be booted. + * Because the kernel modules and executable pages mapped to user space are + * also written as data, we clean all pages we touch to the Point of + * Unification (PoU). + * + * x0: physical address of temporary page tables + * x1: physical address of swapper page tables + * x2: address of cpu_resume + * x3: linear map address of restore_pblist in the current kernel + * x4: physical address of __hyp_stub_vectors, or 0 + * x5: physical address of a zero page that remains zero after resume + */ +.pushsection ".hibernate_exit.text", "ax" +ENTRY(swsusp_arch_suspend_exit) + /* + * We execute from ttbr0, change ttbr1 to our copied linear map tables + * with a break-before-make via the zero page + */ + break_before_make_ttbr_switch x5, x0 + + mov x21, x1 + mov x30, x2 + mov x24, x4 + mov x25, x5 + + /* walk the restore_pblist and use copy_page() to over-write memory */ + mov x19, x3 + +1: ldr x10, [x19, #HIBERN_PBE_ORIG] + mov x0, x10 + ldr x1, [x19, #HIBERN_PBE_ADDR] + + copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9 + + add x1, x10, #PAGE_SIZE + /* Clean the copied page to PoU - based on flush_icache_range() */ + dcache_line_size x2, x3 + sub x3, x2, #1 + bic x4, x10, x3 +2: dc cvau, x4 /* clean D line / unified line */ + add x4, x4, x2 + cmp x4, x1 + b.lo 2b + + ldr x19, [x19, #HIBERN_PBE_NEXT] + cbnz x19, 1b + dsb ish /* wait for PoU cleaning to finish */ + + /* switch to the restored kernels page tables */ + break_before_make_ttbr_switch x25, x21 + + ic ialluis + dsb ish + isb + + cbz x24, 3f /* Do we need to re-initialise EL2? */ + hvc #0 +3: ret + + .ltorg +ENDPROC(swsusp_arch_suspend_exit) + +/* + * Restore the hyp stub. + * This must be done before the hibernate page is unmapped by _cpu_resume(), + * but happens before any of the hyp-stub's code is cleaned to PoC. + * + * x24: The physical address of __hyp_stub_vectors + */ +el1_sync: + msr vbar_el2, x24 + eret +ENDPROC(el1_sync) + +.macro invalid_vector label +\label: + b \label +ENDPROC(\label) +.endm + + invalid_vector el2_sync_invalid + invalid_vector el2_irq_invalid + invalid_vector el2_fiq_invalid + invalid_vector el2_error_invalid + invalid_vector el1_sync_invalid + invalid_vector el1_irq_invalid + invalid_vector el1_fiq_invalid + invalid_vector el1_error_invalid + +/* el2 vectors - switch el2 here while we restore the memory image. */ + .align 11 +ENTRY(hibernate_el2_vectors) + ventry el2_sync_invalid // Synchronous EL2t + ventry el2_irq_invalid // IRQ EL2t + ventry el2_fiq_invalid // FIQ EL2t + ventry el2_error_invalid // Error EL2t + + ventry el2_sync_invalid // Synchronous EL2h + ventry el2_irq_invalid // IRQ EL2h + ventry el2_fiq_invalid // FIQ EL2h + ventry el2_error_invalid // Error EL2h + + ventry el1_sync // Synchronous 64-bit EL1 + ventry el1_irq_invalid // IRQ 64-bit EL1 + ventry el1_fiq_invalid // FIQ 64-bit EL1 + ventry el1_error_invalid // Error 64-bit EL1 + + ventry el1_sync_invalid // Synchronous 32-bit EL1 + ventry el1_irq_invalid // IRQ 32-bit EL1 + ventry el1_fiq_invalid // FIQ 32-bit EL1 + ventry el1_error_invalid // Error 32-bit EL1 +END(hibernate_el2_vectors) + +.popsection diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c new file mode 100644 index 000000000000..7e16fb327644 --- /dev/null +++ b/arch/arm64/kernel/hibernate.c @@ -0,0 +1,461 @@ +/*: + * Hibernate support specific for ARM64 + * + * Derived from work on ARM hibernation support by: + * + * Ubuntu project, hibernation support for mach-dove + * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu) + * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.) + * https://lkml.org/lkml/2010/6/18/4 + * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html + * https://patchwork.kernel.org/patch/96442/ + * + * Copyright (C) 2006 Rafael J. Wysocki + * + * License terms: GNU General Public License (GPL) version 2 + */ +#define pr_fmt(x) "hibernate: " x +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Hibernate core relies on this value being 0 on resume, and marks it + * __nosavedata assuming it will keep the resume kernel's '0' value. This + * doesn't happen with either KASLR. + * + * defined as "__visible int in_suspend __nosavedata" in + * kernel/power/hibernate.c + */ +extern int in_suspend; + +/* Find a symbols alias in the linear map */ +#define LMADDR(x) phys_to_virt(virt_to_phys(x)) + +/* Do we need to reset el2? */ +#define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) + +/* + * Start/end of the hibernate exit code, this must be copied to a 'safe' + * location in memory, and executed from there. + */ +extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[]; + +/* temporary el2 vectors in the __hibernate_exit_text section. */ +extern char hibernate_el2_vectors[]; + +/* hyp-stub vectors, used to restore el2 during resume from hibernate. */ +extern char __hyp_stub_vectors[]; + +/* + * Values that may not change over hibernate/resume. We put the build number + * and date in here so that we guarantee not to resume with a different + * kernel. + */ +struct arch_hibernate_hdr_invariants { + char uts_version[__NEW_UTS_LEN + 1]; +}; + +/* These values need to be know across a hibernate/restore. */ +static struct arch_hibernate_hdr { + struct arch_hibernate_hdr_invariants invariants; + + /* These are needed to find the relocated kernel if built with kaslr */ + phys_addr_t ttbr1_el1; + void (*reenter_kernel)(void); + + /* + * We need to know where the __hyp_stub_vectors are after restore to + * re-configure el2. + */ + phys_addr_t __hyp_stub_vectors; +} resume_hdr; + +static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i) +{ + memset(i, 0, sizeof(*i)); + memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version)); +} + +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin); + unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1); + + return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn); +} + +void notrace save_processor_state(void) +{ + WARN_ON(num_online_cpus() != 1); +} + +void notrace restore_processor_state(void) +{ +} + +int arch_hibernation_header_save(void *addr, unsigned int max_size) +{ + struct arch_hibernate_hdr *hdr = addr; + + if (max_size < sizeof(*hdr)) + return -EOVERFLOW; + + arch_hdr_invariants(&hdr->invariants); + hdr->ttbr1_el1 = virt_to_phys(swapper_pg_dir); + hdr->reenter_kernel = _cpu_resume; + + /* We can't use __hyp_get_vectors() because kvm may still be loaded */ + if (el2_reset_needed()) + hdr->__hyp_stub_vectors = virt_to_phys(__hyp_stub_vectors); + else + hdr->__hyp_stub_vectors = 0; + + return 0; +} +EXPORT_SYMBOL(arch_hibernation_header_save); + +int arch_hibernation_header_restore(void *addr) +{ + struct arch_hibernate_hdr_invariants invariants; + struct arch_hibernate_hdr *hdr = addr; + + arch_hdr_invariants(&invariants); + if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) { + pr_crit("Hibernate image not generated by this kernel!\n"); + return -EINVAL; + } + + resume_hdr = *hdr; + + return 0; +} +EXPORT_SYMBOL(arch_hibernation_header_restore); + +/* + * Copies length bytes, starting at src_start into an new page, + * perform cache maintentance, then maps it at the specified address low + * address as executable. + * + * This is used by hibernate to copy the code it needs to execute when + * overwriting the kernel text. This function generates a new set of page + * tables, which it loads into ttbr0. + * + * Length is provided as we probably only want 4K of data, even on a 64K + * page system. + */ +static int create_safe_exec_page(void *src_start, size_t length, + unsigned long dst_addr, + phys_addr_t *phys_dst_addr, + void *(*allocator)(gfp_t mask), + gfp_t mask) +{ + int rc = 0; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + unsigned long dst = (unsigned long)allocator(mask); + + if (!dst) { + rc = -ENOMEM; + goto out; + } + + memcpy((void *)dst, src_start, length); + flush_icache_range(dst, dst + length); + + pgd = pgd_offset_raw(allocator(mask), dst_addr); + if (pgd_none(*pgd)) { + pud = allocator(mask); + if (!pud) { + rc = -ENOMEM; + goto out; + } + pgd_populate(&init_mm, pgd, pud); + } + + pud = pud_offset(pgd, dst_addr); + if (pud_none(*pud)) { + pmd = allocator(mask); + if (!pmd) { + rc = -ENOMEM; + goto out; + } + pud_populate(&init_mm, pud, pmd); + } + + pmd = pmd_offset(pud, dst_addr); + if (pmd_none(*pmd)) { + pte = allocator(mask); + if (!pte) { + rc = -ENOMEM; + goto out; + } + pmd_populate_kernel(&init_mm, pmd, pte); + } + + pte = pte_offset_kernel(pmd, dst_addr); + set_pte(pte, __pte(virt_to_phys((void *)dst) | + pgprot_val(PAGE_KERNEL_EXEC))); + + /* Load our new page tables */ + asm volatile("msr ttbr0_el1, %0;" + "isb;" + "tlbi vmalle1is;" + "dsb ish;" + "isb" : : "r"(virt_to_phys(pgd))); + + *phys_dst_addr = virt_to_phys((void *)dst); + +out: + return rc; +} + + +int swsusp_arch_suspend(void) +{ + int ret = 0; + unsigned long flags; + struct sleep_stack_data state; + + local_dbg_save(flags); + + if (__cpu_suspend_enter(&state)) { + ret = swsusp_save(); + } else { + /* Clean kernel to PoC for secondary core startup */ + __flush_dcache_area(LMADDR(KERNEL_START), KERNEL_END - KERNEL_START); + + /* + * Tell the hibernation core that we've just restored + * the memory + */ + in_suspend = 0; + + __cpu_suspend_exit(); + } + + local_dbg_restore(flags); + + return ret; +} + +static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start, + unsigned long end) +{ + pte_t *src_pte; + pte_t *dst_pte; + unsigned long addr = start; + + dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC); + if (!dst_pte) + return -ENOMEM; + pmd_populate_kernel(&init_mm, dst_pmd, dst_pte); + dst_pte = pte_offset_kernel(dst_pmd, start); + + src_pte = pte_offset_kernel(src_pmd, start); + do { + if (!pte_none(*src_pte)) + /* + * Resume will overwrite areas that may be marked + * read only (code, rodata). Clear the RDONLY bit from + * the temporary mappings we use during restore. + */ + set_pte(dst_pte, __pte(pte_val(*src_pte) & ~PTE_RDONLY)); + } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); + + return 0; +} + +static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start, + unsigned long end) +{ + pmd_t *src_pmd; + pmd_t *dst_pmd; + unsigned long next; + unsigned long addr = start; + + if (pud_none(*dst_pud)) { + dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); + if (!dst_pmd) + return -ENOMEM; + pud_populate(&init_mm, dst_pud, dst_pmd); + } + dst_pmd = pmd_offset(dst_pud, start); + + src_pmd = pmd_offset(src_pud, start); + do { + next = pmd_addr_end(addr, end); + if (pmd_none(*src_pmd)) + continue; + if (pmd_table(*src_pmd)) { + if (copy_pte(dst_pmd, src_pmd, addr, next)) + return -ENOMEM; + } else { + set_pmd(dst_pmd, + __pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY)); + } + } while (dst_pmd++, src_pmd++, addr = next, addr != end); + + return 0; +} + +static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start, + unsigned long end) +{ + pud_t *dst_pud; + pud_t *src_pud; + unsigned long next; + unsigned long addr = start; + + if (pgd_none(*dst_pgd)) { + dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC); + if (!dst_pud) + return -ENOMEM; + pgd_populate(&init_mm, dst_pgd, dst_pud); + } + dst_pud = pud_offset(dst_pgd, start); + + src_pud = pud_offset(src_pgd, start); + do { + next = pud_addr_end(addr, end); + if (pud_none(*src_pud)) + continue; + if (pud_table(*(src_pud))) { + if (copy_pmd(dst_pud, src_pud, addr, next)) + return -ENOMEM; + } else { + set_pud(dst_pud, + __pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY)); + } + } while (dst_pud++, src_pud++, addr = next, addr != end); + + return 0; +} + +static int copy_page_tables(pgd_t *dst_pgd, unsigned long start, + unsigned long end) +{ + unsigned long next; + unsigned long addr = start; + pgd_t *src_pgd = pgd_offset_k(start); + + dst_pgd = pgd_offset_raw(dst_pgd, start); + do { + next = pgd_addr_end(addr, end); + if (pgd_none(*src_pgd)) + continue; + if (copy_pud(dst_pgd, src_pgd, addr, next)) + return -ENOMEM; + } while (dst_pgd++, src_pgd++, addr = next, addr != end); + + return 0; +} + +/* + * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit(). + * + * Memory allocated by get_safe_page() will be dealt with by the hibernate code, + * we don't need to free it here. + */ +int swsusp_arch_resume(void) +{ + int rc = 0; + void *zero_page; + size_t exit_size; + pgd_t *tmp_pg_dir; + void *lm_restore_pblist; + phys_addr_t phys_hibernate_exit; + void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *, + void *, phys_addr_t, phys_addr_t); + + /* + * Locate the exit code in the bottom-but-one page, so that *NULL + * still has disastrous affects. + */ + hibernate_exit = (void *)PAGE_SIZE; + exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start; + /* + * Copy swsusp_arch_suspend_exit() to a safe page. This will generate + * a new set of ttbr0 page tables and load them. + */ + rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size, + (unsigned long)hibernate_exit, + &phys_hibernate_exit, + (void *)get_safe_page, GFP_ATOMIC); + if (rc) { + pr_err("Failed to create safe executable page for hibernate_exit code."); + goto out; + } + + /* + * The hibernate exit text contains a set of el2 vectors, that will + * be executed at el2 with the mmu off in order to reload hyp-stub. + */ + __flush_dcache_area(hibernate_exit, exit_size); + + /* + * Restoring the memory image will overwrite the ttbr1 page tables. + * Create a second copy of just the linear map, and use this when + * restoring. + */ + tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); + if (!tmp_pg_dir) { + pr_err("Failed to allocate memory for temporary page tables."); + rc = -ENOMEM; + goto out; + } + rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); + if (rc) + goto out; + + /* + * Since we only copied the linear map, we need to find restore_pblist's + * linear map address. + */ + lm_restore_pblist = LMADDR(restore_pblist); + + /* + * KASLR will cause the el2 vectors to be in a different location in + * the resumed kernel. Load hibernate's temporary copy into el2. + * + * We can skip this step if we booted at EL1, or are running with VHE. + */ + if (el2_reset_needed()) { + phys_addr_t el2_vectors = phys_hibernate_exit; /* base */ + el2_vectors += hibernate_el2_vectors - + __hibernate_exit_text_start; /* offset */ + + __hyp_set_vectors(el2_vectors); + } + + /* + * We need a zero page that is zero before & after resume in order to + * to break before make on the ttbr1 page tables. + */ + zero_page = (void *)get_safe_page(GFP_ATOMIC); + + hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, + resume_hdr.reenter_kernel, lm_restore_pblist, + resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); + +out: + return rc; +} diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index f04c4762977b..e2d63ed9c361 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -46,6 +46,16 @@ jiffies = jiffies_64; *(.idmap.text) \ VMLINUX_SYMBOL(__idmap_text_end) = .; +#ifdef CONFIG_HIBERNATION +#define HIBERNATE_TEXT \ + . = ALIGN(SZ_4K); \ + VMLINUX_SYMBOL(__hibernate_exit_text_start) = .;\ + *(.hibernate_exit.text) \ + VMLINUX_SYMBOL(__hibernate_exit_text_end) = .; +#else +#define HIBERNATE_TEXT +#endif + /* * The size of the PE/COFF section that covers the kernel image, which * runs from stext to _edata, must be a round multiple of the PE/COFF @@ -109,6 +119,7 @@ SECTIONS KPROBES_TEXT HYPERVISOR_TEXT IDMAP_TEXT + HIBERNATE_TEXT *(.fixup) *(.gnu.warning) . = ALIGN(16); @@ -187,6 +198,10 @@ ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, "HYP init code too big or misaligned") ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, "ID map text too big or misaligned") +#ifdef CONFIG_HIBERNATION +ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) + <= SZ_4K, "Hibernate exit text too big or misaligned") +#endif /* * If padding is applied before .head.text, virt<->phys conversions will fail. From 3eb846e0d5ebc563d111ea825e55ea1b731b1a8b Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sat, 2 Jan 2016 03:09:16 +0100 Subject: [PATCH 63/78] PM / sleep: Add support for read-only sysfs attributes Some sysfs attributes in /sys/power/ should really be read-only, so add support for that, convert those attributes to read-only and drop the stub .show() routines from them. Original-by: Sergey Senozhatsky Signed-off-by: Rafael J. Wysocki (cherry picked from commit a1e9ca6967d68209c70e616a224efa89a6b86ca6) Signed-off-by: Alex Shi --- kernel/power/main.c | 17 ++--------------- kernel/power/power.h | 9 +++++++++ 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/kernel/power/main.c b/kernel/power/main.c index b2dd4d999900..27946975eff0 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -280,13 +280,7 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj, return pm_wakeup_irq ? sprintf(buf, "%u\n", pm_wakeup_irq) : -ENODATA; } -static ssize_t pm_wakeup_irq_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t n) -{ - return -EINVAL; -} -power_attr(pm_wakeup_irq); +power_attr_ro(pm_wakeup_irq); #else /* !CONFIG_PM_SLEEP_DEBUG */ static inline void pm_print_times_init(void) {} @@ -564,14 +558,7 @@ static ssize_t pm_trace_dev_match_show(struct kobject *kobj, return show_trace_dev_match(buf, PAGE_SIZE); } -static ssize_t -pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr, - const char *buf, size_t n) -{ - return -EINVAL; -} - -power_attr(pm_trace_dev_match); +power_attr_ro(pm_trace_dev_match); #endif /* CONFIG_PM_TRACE */ diff --git a/kernel/power/power.h b/kernel/power/power.h index caadb566e82b..efe1b3b17c88 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -77,6 +77,15 @@ static struct kobj_attribute _name##_attr = { \ .store = _name##_store, \ } +#define power_attr_ro(_name) \ +static struct kobj_attribute _name##_attr = { \ + .attr = { \ + .name = __stringify(_name), \ + .mode = S_IRUGO, \ + }, \ + .show = _name##_show, \ +} + /* Preferred image size in bytes (default 500 MB) */ extern unsigned long image_size; /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */ From 98b513ea2f654c223a508080bb0722a007fc16ac Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 27 Apr 2016 17:47:13 +0100 Subject: [PATCH 64/78] arm64: hibernate: Refuse to hibernate if the boot cpu is offline Hibernation represents a system state save/restore through a system reboot; this implies that the logical cpus carrying out hibernation/thawing must be the same, so that the context saved in the snapshot image on hibernation is consistent with the state of the system on resume. If resume from hibernation is driven through kernel command line parameter, the cpu responsible for thawing the system will be whatever CPU firmware boots the system on upon cold-boot (ie logical cpu 0); this means that in order to keep system context consistent between the hibernate snapshot image and system state on kernel resume from hibernate, logical cpu 0 must be online on hibernation and must be the logical cpu that creates the snapshot image. This patch adds a PM notifier that enforces logical cpu 0 is online when the hibernation is started (and prevents hibernation if it is not), which is sufficient to guarantee it will be the one creating the snapshot image therefore providing the resume cpu a consistent snapshot of the system to resume to. Signed-off-by: James Morse Acked-by: Lorenzo Pieralisi Acked-by: Catalin Marinas Signed-off-by: Will Deacon (cherry picked from commit 1fe492ce6482b77807b25d29690a48c46456beee) Signed-off-by: Alex Shi --- arch/arm64/kernel/hibernate.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 7e16fb327644..f8df75d740f4 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -17,6 +17,7 @@ #define pr_fmt(x) "hibernate: " x #include #include +#include #include #include #include @@ -459,3 +460,28 @@ int swsusp_arch_resume(void) out: return rc; } + +static int check_boot_cpu_online_pm_callback(struct notifier_block *nb, + unsigned long action, void *ptr) +{ + if (action == PM_HIBERNATION_PREPARE && + cpumask_first(cpu_online_mask) != 0) { + pr_warn("CPU0 is offline.\n"); + return notifier_from_errno(-ENODEV); + } + + return NOTIFY_OK; +} + +static int __init check_boot_cpu_online_init(void) +{ + /* + * Set this pm_notifier callback with a lower priority than + * cpu_hotplug_pm_callback, so that cpu_hotplug_pm_callback will be + * called earlier to disable cpu hotplug before the cpu online check. + */ + pm_notifier(check_boot_cpu_online_pm_callback, -INT_MAX); + + return 0; +} +core_initcall(check_boot_cpu_online_init); From de93e05ca12346af8ebe2af155d21bb24cd98c94 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:45:05 +0000 Subject: [PATCH 65/78] arm64: mm: avoid redundant __pa(__va(x)) When we "upgrade" to a section mapping, we free any table we made redundant by giving it back to memblock. To get the PA, we acquire the physical address and convert this to a VA, then subsequently convert this back to a PA. This works currently, but will not work if the tables are not accessed via linear map VAs (e.g. is we use fixmap slots). This patch uses {pmd,pud}_page_paddr to acquire the PA. This avoids the __pa(__va()) round trip, saving some work and avoiding reliance on the linear mapping. Signed-off-by: Mark Rutland Reviewed-by: Catalin Marinas Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Tested-by: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit 316b39db06718d59d82736df9fc65cf05b467cc7) Signed-off-by: Alex Shi --- arch/arm64/mm/mmu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1e2ae807130f..e1240cfe5dcf 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -171,7 +171,7 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, if (!pmd_none(old_pmd)) { flush_tlb_all(); if (pmd_table(old_pmd)) { - phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0)); + phys_addr_t table = pmd_page_paddr(old_pmd); if (!WARN_ON_ONCE(slab_is_available())) memblock_free(table, PAGE_SIZE); } @@ -232,7 +232,7 @@ static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, if (!pud_none(old_pud)) { flush_tlb_all(); if (pud_table(old_pud)) { - phys_addr_t table = __pa(pmd_offset(&old_pud, 0)); + phys_addr_t table = pud_page_paddr(old_pud); if (!WARN_ON_ONCE(slab_is_available())) memblock_free(table, PAGE_SIZE); } From cae587548c55fc9cdc54d8e85950ae733cd10b37 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:45:06 +0000 Subject: [PATCH 66/78] arm64: mm: add __{pud,pgd}_populate We currently have __pmd_populate for creating a pmd table entry given the physical address of a pte, but don't have equivalents for the pud or pgd levels of table. To enable us to manipulate tables which are mapped outside of the linear mapping (where we have a PA, but not a linear map VA), it is useful to have these functions. This patch adds __{pud,pgd}_populate. As these should not be called when the kernel uses folded {pmd,pud}s, in these cases they expand to BUILD_BUG(). So long as the appropriate checks are made on the {pud,pgd} entry prior to attempting population, these should be optimized out at compile time. Signed-off-by: Mark Rutland Reviewed-by: Catalin Marinas Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Tested-by: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit 1e531cce68c92b46c7d29f36a72f9a3e5886678f) Signed-off-by: Alex Shi --- arch/arm64/include/asm/pgalloc.h | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index c15053902942..ff98585d085a 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -42,11 +42,20 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) free_page((unsigned long)pmd); } -static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot) { - set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); + set_pud(pud, __pud(pmd | prot)); } +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + __pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE); +} +#else +static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot) +{ + BUILD_BUG(); +} #endif /* CONFIG_PGTABLE_LEVELS > 2 */ #if CONFIG_PGTABLE_LEVELS > 3 @@ -62,11 +71,20 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) free_page((unsigned long)pud); } -static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) +static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot) { - set_pgd(pgd, __pgd(__pa(pud) | PUD_TYPE_TABLE)); + set_pgd(pgdp, __pgd(pud | prot)); } +static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) +{ + __pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE); +} +#else +static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot) +{ + BUILD_BUG(); +} #endif /* CONFIG_PGTABLE_LEVELS > 3 */ extern pgd_t *pgd_alloc(struct mm_struct *mm); From 510fbd87a7a3cb2d7cbc81dfa10f9a02b6d301ef Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:45:07 +0000 Subject: [PATCH 67/78] arm64: mm: add functions to walk tables in fixmap As a preparatory step to allow us to allocate early page tables from unmapped memory using memblock_alloc, add new p??_{set,clear}_fixmap* functions which can be used to walk page tables outside of the linear mapping by using fixmap slots. Signed-off-by: Mark Rutland Reviewed-by: Catalin Marinas Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Tested-by: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit 961faac114819a01e627fe9c9c82b830bb3849d4) Signed-off-by: Alex Shi --- arch/arm64/include/asm/fixmap.h | 10 ++++++++++ arch/arm64/include/asm/pgtable.h | 26 ++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index 309704544d22..1a617d46fce9 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -62,6 +62,16 @@ enum fixed_addresses { FIX_BTMAP_END = __end_of_permanent_fixed_addresses, FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, + + /* + * Used for kernel page table creation, so unmapped memory may be used + * for tables. + */ + FIX_PTE, + FIX_PMD, + FIX_PUD, + FIX_PGD, + __end_of_fixed_addresses }; diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 774854e31404..50341f006400 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -59,6 +59,7 @@ #ifndef __ASSEMBLY__ +#include #include extern void __pte_error(const char *file, int line, unsigned long val); @@ -431,6 +432,10 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) +#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) +#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) +#define pte_clear_fixmap() clear_fixmap(FIX_PTE) + #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) /* @@ -470,12 +475,21 @@ static inline phys_addr_t pud_page_paddr(pud_t pud) #define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t)) #define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr)))) +#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) +#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) +#define pmd_clear_fixmap() clear_fixmap(FIX_PMD) + #define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK)) #else #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) +/* Match pmd_offset folding in */ +#define pmd_set_fixmap(addr) NULL +#define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp) +#define pmd_clear_fixmap() + #endif /* CONFIG_PGTABLE_LEVELS > 2 */ #if CONFIG_PGTABLE_LEVELS > 3 @@ -508,12 +522,21 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd) #define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t)) #define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr)))) +#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr)) +#define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr)) +#define pud_clear_fixmap() clear_fixmap(FIX_PUD) + #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK)) #else #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;}) +/* Match pud_offset folding in */ +#define pud_set_fixmap(addr) NULL +#define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) +#define pud_clear_fixmap() + #endif /* CONFIG_PGTABLE_LEVELS > 3 */ #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) @@ -528,6 +551,9 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd) /* to find an entry in a kernel page-table-directory */ #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) +#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) +#define pgd_clear_fixmap() clear_fixmap(FIX_PGD) + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | From af72fa120efb767e5196e02efac3b67b69e20089 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:45:08 +0000 Subject: [PATCH 68/78] arm64: mm: use fixmap when creating page tables As a preparatory step to allow us to allocate early page tables from unmapped memory using memblock_alloc, modify the __create_mapping callees to map and unmap the tables they modify using fixmap entries. All but the top-level pgd initialisation is performed via the fixmap. Subsequent patches will inject the pgd physical address, and migrate to using the FIX_PGD slot. Signed-off-by: Mark Rutland Reviewed-by: Catalin Marinas Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Tested-by: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit f4710445458c0a1bd1c3c014ada2e7d7dc7b882f) Signed-off-by: Alex Shi --- arch/arm64/mm/mmu.c | 61 ++++++++++++++++++++++++++++++--------------- 1 file changed, 41 insertions(+), 20 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e1240cfe5dcf..36203c27d70c 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -63,19 +63,30 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, } EXPORT_SYMBOL(phys_mem_access_prot); -static void __init *early_pgtable_alloc(void) +static phys_addr_t __init early_pgtable_alloc(void) { phys_addr_t phys; void *ptr; phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE); BUG_ON(!phys); - ptr = __va(phys); + + /* + * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE + * slot will be free, so we can (ab)use the FIX_PTE slot to initialise + * any level of table. + */ + ptr = pte_set_fixmap(phys); + memset(ptr, 0, PAGE_SIZE); - /* Ensure the zeroed page is visible to the page table walker */ - dsb(ishst); - return ptr; + /* + * Implicit barriers also ensure the zeroed page is visible to the page + * table walker + */ + pte_clear_fixmap(); + + return phys; } /* @@ -99,24 +110,28 @@ static void split_pmd(pmd_t *pmd, pte_t *pte) static void alloc_init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot, - void *(*pgtable_alloc)(void)) + phys_addr_t (*pgtable_alloc)(void)) { pte_t *pte; if (pmd_none(*pmd) || pmd_sect(*pmd)) { - pte = pgtable_alloc(); + phys_addr_t pte_phys = pgtable_alloc(); + pte = pte_set_fixmap(pte_phys); if (pmd_sect(*pmd)) split_pmd(pmd, pte); - __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE); + __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE); flush_tlb_all(); + pte_clear_fixmap(); } BUG_ON(pmd_bad(*pmd)); - pte = pte_offset_kernel(pmd, addr); + pte = pte_set_fixmap_offset(pmd, addr); do { set_pte(pte, pfn_pte(pfn, prot)); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); + + pte_clear_fixmap(); } static void split_pud(pud_t *old_pud, pmd_t *pmd) @@ -134,7 +149,7 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd) static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, - void *(*pgtable_alloc)(void)) + phys_addr_t (*pgtable_alloc)(void)) { pmd_t *pmd; unsigned long next; @@ -143,7 +158,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, * Check for initial section mappings in the pgd/pud and remove them. */ if (pud_none(*pud) || pud_sect(*pud)) { - pmd = pgtable_alloc(); + phys_addr_t pmd_phys = pgtable_alloc(); + pmd = pmd_set_fixmap(pmd_phys); if (pud_sect(*pud)) { /* * need to have the 1G of mappings continue to be @@ -151,12 +167,13 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, */ split_pud(pud, pmd); } - pud_populate(mm, pud, pmd); + __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE); flush_tlb_all(); + pmd_clear_fixmap(); } BUG_ON(pud_bad(*pud)); - pmd = pmd_offset(pud, addr); + pmd = pmd_set_fixmap_offset(pud, addr); do { next = pmd_addr_end(addr, end); /* try section mapping first */ @@ -182,6 +199,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, } phys += next - addr; } while (pmd++, addr = next, addr != end); + + pmd_clear_fixmap(); } static inline bool use_1G_block(unsigned long addr, unsigned long next, @@ -199,18 +218,18 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next, static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, - void *(*pgtable_alloc)(void)) + phys_addr_t (*pgtable_alloc)(void)) { pud_t *pud; unsigned long next; if (pgd_none(*pgd)) { - pud = pgtable_alloc(); - pgd_populate(mm, pgd, pud); + phys_addr_t pud_phys = pgtable_alloc(); + __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE); } BUG_ON(pgd_bad(*pgd)); - pud = pud_offset(pgd, addr); + pud = pud_set_fixmap_offset(pgd, addr); do { next = pud_addr_end(addr, end); @@ -243,6 +262,8 @@ static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, } phys += next - addr; } while (pud++, addr = next, addr != end); + + pud_clear_fixmap(); } /* @@ -252,7 +273,7 @@ static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, static void __create_mapping(struct mm_struct *mm, pgd_t *pgd, phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot, - void *(*pgtable_alloc)(void)) + phys_addr_t (*pgtable_alloc)(void)) { unsigned long addr, length, end, next; @@ -267,14 +288,14 @@ static void __create_mapping(struct mm_struct *mm, pgd_t *pgd, } while (pgd++, addr = next, addr != end); } -static void *late_pgtable_alloc(void) +static phys_addr_t late_pgtable_alloc(void) { void *ptr = (void *)__get_free_page(PGALLOC_GFP); BUG_ON(!ptr); /* Ensure the zeroed page is visible to the page table walker */ dsb(ishst); - return ptr; + return __pa(ptr); } static void __init create_mapping(phys_addr_t phys, unsigned long virt, From ae97b43d8a46b19d1fada3f294e14873b720a559 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:45:09 +0000 Subject: [PATCH 69/78] arm64: mm: allocate pagetables anywhere Now that create_mapping uses fixmap slots to modify pte, pmd, and pud entries, we can access page tables anywhere in physical memory, regardless of the extent of the linear mapping. Given that, we no longer need to limit memblock allocations during page table creation, and can leave the limit as its default MEMBLOCK_ALLOC_ANYWHERE. We never add memory which will fall outside of the linear map range given phys_offset and MAX_MEMBLOCK_ADDR are configured appropriately, so any tables we create will fall in the linear map of the final tables. Signed-off-by: Mark Rutland Reviewed-by: Catalin Marinas Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Tested-by: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit cdef5f6e9e0e5ee397759b664a9f875ff59ccf01) Signed-off-by: Alex Shi --- arch/arm64/mm/mmu.c | 35 ----------------------------------- 1 file changed, 35 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 36203c27d70c..dc6665f925d0 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -376,20 +376,6 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end) static void __init map_mem(void) { struct memblock_region *reg; - phys_addr_t limit; - - /* - * Temporarily limit the memblock range. We need to do this as - * create_mapping requires puds, pmds and ptes to be allocated from - * memory addressable from the initial direct kernel mapping. - * - * The initial direct kernel mapping, located at swapper_pg_dir, gives - * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps, - * memory starting from PHYS_OFFSET (which must be aligned to 2MB as - * per Documentation/arm64/booting.txt). - */ - limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE; - memblock_set_current_limit(limit); /* map all the memory banks */ for_each_memblock(memory, reg) { @@ -399,29 +385,8 @@ static void __init map_mem(void) if (start >= end) break; - if (ARM64_SWAPPER_USES_SECTION_MAPS) { - /* - * For the first memory bank align the start address and - * current memblock limit to prevent create_mapping() from - * allocating pte page tables from unmapped memory. With - * the section maps, if the first block doesn't end on section - * size boundary, create_mapping() will try to allocate a pte - * page, which may be returned from an unmapped area. - * When section maps are not used, the pte page table for the - * current limit is already present in swapper_pg_dir. - */ - if (start < limit) - start = ALIGN(start, SECTION_SIZE); - if (end < limit) { - limit = end & SECTION_MASK; - memblock_set_current_limit(limit); - } - } __map_memblock(start, end); } - - /* Limit no longer required. */ - memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); } static void __init fixup_executable(void) From 1adf036ec9cf6bb3090b945d6485b8a9095ec817 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:45:10 +0000 Subject: [PATCH 70/78] arm64: mm: allow passing a pgdir to alloc_init_* To allow us to initialise pgdirs which are fixmapped, allow explicitly passing a pgdir rather than an mm. A new __create_pgd_mapping function is added for this, with existing __create_mapping callers migrated to this. The mm argument was previously only used at the top level. Now that it is redundant at all levels, it is removed. To indicate its new found similarity to alloc_init_{pud,pmd,pte}, __create_mapping is renamed to init_pgd. Signed-off-by: Mark Rutland Reviewed-by: Catalin Marinas Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Tested-by: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit 11509a306bb6ea595878b2d246d2d56b1783e040) Signed-off-by: Alex Shi --- arch/arm64/mm/mmu.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index dc6665f925d0..fd4c95ba1b6d 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -146,8 +146,7 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd) } while (pmd++, i++, i < PTRS_PER_PMD); } -static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, - unsigned long addr, unsigned long end, +static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, phys_addr_t (*pgtable_alloc)(void)) { @@ -215,8 +214,7 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next, return true; } -static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, - unsigned long addr, unsigned long end, +static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, phys_addr_t (*pgtable_alloc)(void)) { @@ -257,7 +255,7 @@ static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, } } } else { - alloc_init_pmd(mm, pud, addr, next, phys, prot, + alloc_init_pmd(pud, addr, next, phys, prot, pgtable_alloc); } phys += next - addr; @@ -270,8 +268,7 @@ static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, * Create the page directory entries and any necessary page tables for the * mapping specified by 'md'. */ -static void __create_mapping(struct mm_struct *mm, pgd_t *pgd, - phys_addr_t phys, unsigned long virt, +static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot, phys_addr_t (*pgtable_alloc)(void)) { @@ -283,7 +280,7 @@ static void __create_mapping(struct mm_struct *mm, pgd_t *pgd, end = addr + length; do { next = pgd_addr_end(addr, end); - alloc_init_pud(mm, pgd, addr, next, phys, prot, pgtable_alloc); + alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc); phys += next - addr; } while (pgd++, addr = next, addr != end); } @@ -298,6 +295,14 @@ static phys_addr_t late_pgtable_alloc(void) return __pa(ptr); } +static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, + unsigned long virt, phys_addr_t size, + pgprot_t prot, + phys_addr_t (*alloc)(void)) +{ + init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc); +} + static void __init create_mapping(phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot) { @@ -306,16 +311,16 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt, &phys, virt); return; } - __create_mapping(&init_mm, pgd_offset_k(virt), phys, virt, - size, prot, early_pgtable_alloc); + __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, + early_pgtable_alloc); } void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot) { - __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot, - late_pgtable_alloc); + __create_pgd_mapping(mm->pgd, phys, virt, size, prot, + late_pgtable_alloc); } static void create_mapping_late(phys_addr_t phys, unsigned long virt, @@ -327,8 +332,8 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt, return; } - return __create_mapping(&init_mm, pgd_offset_k(virt), - phys, virt, size, prot, late_pgtable_alloc); + __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, + late_pgtable_alloc); } #ifdef CONFIG_DEBUG_RODATA From c2b11f6d9c88db3ebe11dd14173773c8a6b4aa2d Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:45:11 +0000 Subject: [PATCH 71/78] arm64: ensure _stext and _etext are page-aligned Currently we have separate ALIGN_DEBUG_RO{,_MIN} directives to align _etext and __init_begin. While we ensure that __init_begin is page-aligned, we do not provide the same guarantee for _etext. This is not problematic currently as the alignment of __init_begin is sufficient to prevent issues when we modify permissions. Subsequent patches will assume page alignment of segments of the kernel we wish to map with different permissions. To ensure this, move _etext after the ALIGN_DEBUG_RO_MIN for the init section. This renders the prior ALIGN_DEBUG_RO irrelevant, and hence it is removed. Likewise, upgrade to ALIGN_DEBUG_RO_MIN(PAGE_SIZE) for _stext. Signed-off-by: Mark Rutland Reviewed-by: Catalin Marinas Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Tested-by: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit fca082bfb543ccaaff864fc0892379ccaa1711cd) Signed-off-by: Alex Shi --- arch/arm64/kernel/vmlinux.lds.S | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index e2d63ed9c361..fa2fc078990f 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -105,7 +105,7 @@ SECTIONS _text = .; HEAD_TEXT } - ALIGN_DEBUG_RO + ALIGN_DEBUG_RO_MIN(PAGE_SIZE) .text : { /* Real text segment */ _stext = .; /* Text and read-only data */ __exception_text_start = .; @@ -130,10 +130,9 @@ SECTIONS RO_DATA(PAGE_SIZE) EXCEPTION_TABLE(8) NOTES - ALIGN_DEBUG_RO - _etext = .; /* End of text and rodata section */ ALIGN_DEBUG_RO_MIN(PAGE_SIZE) + _etext = .; /* End of text and rodata section */ __init_begin = .; INIT_TEXT_SECTION(8) From a146f6b94592514d9ee2e11377eb71b804cc941d Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 25 Jan 2016 11:45:12 +0000 Subject: [PATCH 72/78] arm64: mm: create new fine-grained mappings at boot At boot we may change the granularity of the tables mapping the kernel (by splitting or making sections). This may happen when we create the linear mapping (in __map_memblock), or at any point we try to apply fine-grained permissions to the kernel (e.g. fixup_executable, mark_rodata_ro, fixup_init). Changing the active page tables in this manner may result in multiple entries for the same address being allocated into TLBs, risking problems such as TLB conflict aborts or issues derived from the amalgamation of TLB entries. Generally, a break-before-make (BBM) approach is necessary to avoid conflicts, but we cannot do this for the kernel tables as it risks unmapping text or data being used to do so. Instead, we can create a new set of tables from scratch in the safety of the existing mappings, and subsequently migrate over to these using the new cpu_replace_ttbr1 helper, which avoids the two sets of tables being active simultaneously. To avoid issues when we later modify permissions of the page tables (e.g. in fixup_init), we must create the page tables at a granularity such that later modification does not result in splitting of tables. This patch applies this strategy, creating a new set of fine-grained page tables from scratch, and safely migrating to them. The existing fixmap and kasan shadow page tables are reused in the new fine-grained tables. Signed-off-by: Mark Rutland Reviewed-by: Catalin Marinas Cc: Andrey Ryabinin Tested-by: Ard Biesheuvel Reviewed-by: Ard Biesheuvel Tested-by: Jeremy Linton Cc: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas (cherry picked from commit 068a17a5805dfbca4bbf03e664ca6b19709cc7a8) Signed-off-by: Alex Shi --- arch/arm64/include/asm/kasan.h | 3 + arch/arm64/mm/kasan_init.c | 15 ++++ arch/arm64/mm/mmu.c | 155 +++++++++++++++++++-------------- 3 files changed, 110 insertions(+), 63 deletions(-) diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h index 2774fa384c47..de0d21211c34 100644 --- a/arch/arm64/include/asm/kasan.h +++ b/arch/arm64/include/asm/kasan.h @@ -7,6 +7,7 @@ #include #include +#include /* * KASAN_SHADOW_START: beginning of the kernel virtual addresses. @@ -28,10 +29,12 @@ #define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << (64 - 3))) void kasan_init(void); +void kasan_copy_shadow(pgd_t *pgdir); asmlinkage void kasan_early_init(void); #else static inline void kasan_init(void) { } +static inline void kasan_copy_shadow(pgd_t *pgdir) { } #endif #endif diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index cf038c7d9fa9..a365dfa191d4 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -96,6 +96,21 @@ asmlinkage void __init kasan_early_init(void) kasan_map_early_shadow(); } +/* + * Copy the current shadow region into a new pgdir. + */ +void __init kasan_copy_shadow(pgd_t *pgdir) +{ + pgd_t *pgd, *pgd_new, *pgd_end; + + pgd = pgd_offset_k(KASAN_SHADOW_START); + pgd_end = pgd_offset_k(KASAN_SHADOW_END); + pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START); + do { + set_pgd(pgd_new, *pgd); + } while (pgd++, pgd_new++, pgd != pgd_end); +} + static void __init clear_pgds(unsigned long start, unsigned long end) { diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index fd4c95ba1b6d..9c5b5ddcd088 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -336,49 +337,42 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt, late_pgtable_alloc); } -#ifdef CONFIG_DEBUG_RODATA -static void __init __map_memblock(phys_addr_t start, phys_addr_t end) +static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end) { - /* - * Set up the executable regions using the existing section mappings - * for now. This will get more fine grained later once all memory - * is mapped - */ - unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE); - unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE); - if (end < kernel_x_start) { - create_mapping(start, __phys_to_virt(start), - end - start, PAGE_KERNEL); - } else if (start >= kernel_x_end) { - create_mapping(start, __phys_to_virt(start), - end - start, PAGE_KERNEL); - } else { - if (start < kernel_x_start) - create_mapping(start, __phys_to_virt(start), - kernel_x_start - start, - PAGE_KERNEL); - create_mapping(kernel_x_start, - __phys_to_virt(kernel_x_start), - kernel_x_end - kernel_x_start, - PAGE_KERNEL_EXEC); - if (kernel_x_end < end) - create_mapping(kernel_x_end, - __phys_to_virt(kernel_x_end), - end - kernel_x_end, - PAGE_KERNEL); + unsigned long kernel_start = __pa(_stext); + unsigned long kernel_end = __pa(_end); + + /* + * The kernel itself is mapped at page granularity. Map all other + * memory, making sure we don't overwrite the existing kernel mappings. + */ + + /* No overlap with the kernel. */ + if (end < kernel_start || start >= kernel_end) { + __create_pgd_mapping(pgd, start, __phys_to_virt(start), + end - start, PAGE_KERNEL, + early_pgtable_alloc); + return; } + /* + * This block overlaps the kernel mapping. Map the portion(s) which + * don't overlap. + */ + if (start < kernel_start) + __create_pgd_mapping(pgd, start, + __phys_to_virt(start), + kernel_start - start, PAGE_KERNEL, + early_pgtable_alloc); + if (kernel_end < end) + __create_pgd_mapping(pgd, kernel_end, + __phys_to_virt(kernel_end), + end - kernel_end, PAGE_KERNEL, + early_pgtable_alloc); } -#else -static void __init __map_memblock(phys_addr_t start, phys_addr_t end) -{ - create_mapping(start, __phys_to_virt(start), end - start, - PAGE_KERNEL_EXEC); -} -#endif -static void __init map_mem(void) +static void __init map_mem(pgd_t *pgd) { struct memblock_region *reg; @@ -390,33 +384,10 @@ static void __init map_mem(void) if (start >= end) break; - __map_memblock(start, end); + __map_memblock(pgd, start, end); } } -static void __init fixup_executable(void) -{ -#ifdef CONFIG_DEBUG_RODATA - /* now that we are actually fully mapped, make the start/end more fine grained */ - if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) { - unsigned long aligned_start = round_down(__pa(_stext), - SWAPPER_BLOCK_SIZE); - - create_mapping(aligned_start, __phys_to_virt(aligned_start), - __pa(_stext) - aligned_start, - PAGE_KERNEL); - } - - if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) { - unsigned long aligned_end = round_up(__pa(__init_end), - SWAPPER_BLOCK_SIZE); - create_mapping(__pa(__init_end), (unsigned long)__init_end, - aligned_end - __pa(__init_end), - PAGE_KERNEL); - } -#endif -} - #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void) { @@ -434,14 +405,72 @@ void fixup_init(void) PAGE_KERNEL); } +static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end, + pgprot_t prot) +{ + phys_addr_t pa_start = __pa(va_start); + unsigned long size = va_end - va_start; + + BUG_ON(!PAGE_ALIGNED(pa_start)); + BUG_ON(!PAGE_ALIGNED(size)); + + __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, + early_pgtable_alloc); +} + +/* + * Create fine-grained mappings for the kernel. + */ +static void __init map_kernel(pgd_t *pgd) +{ + + map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC); + map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC); + map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL); + + /* + * The fixmap falls in a separate pgd to the kernel, and doesn't live + * in the carveout for the swapper_pg_dir. We can simply re-use the + * existing dir for the fixmap. + */ + set_pgd(pgd_offset_raw(pgd, FIXADDR_START), *pgd_offset_k(FIXADDR_START)); + + kasan_copy_shadow(pgd); +} + /* * paging_init() sets up the page tables, initialises the zone memory * maps and sets up the zero page. */ void __init paging_init(void) { - map_mem(); - fixup_executable(); + phys_addr_t pgd_phys = early_pgtable_alloc(); + pgd_t *pgd = pgd_set_fixmap(pgd_phys); + + map_kernel(pgd); + map_mem(pgd); + + /* + * We want to reuse the original swapper_pg_dir so we don't have to + * communicate the new address to non-coherent secondaries in + * secondary_entry, and so cpu_switch_mm can generate the address with + * adrp+add rather than a load from some global variable. + * + * To do this we need to go via a temporary pgd. + */ + cpu_replace_ttbr1(__va(pgd_phys)); + memcpy(swapper_pg_dir, pgd, PAGE_SIZE); + cpu_replace_ttbr1(swapper_pg_dir); + + pgd_clear_fixmap(); + memblock_free(pgd_phys, PAGE_SIZE); + + /* + * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd + * allocated with it. + */ + memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE, + SWAPPER_DIR_SIZE - PAGE_SIZE); bootmem_init(); } From 762f201f0a4445cec9cd8130dde538d946aedee1 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Tue, 26 Jan 2016 11:10:38 +0000 Subject: [PATCH 73/78] arm64: kernel: implement ACPI parking protocol The SBBR and ACPI specifications allow ACPI based systems that do not implement PSCI (eg systems with no EL3) to boot through the ACPI parking protocol specification[1]. This patch implements the ACPI parking protocol CPU operations, and adds code that eases parsing the parking protocol data structures to the ARM64 SMP initializion carried out at the same time as cpus enumeration. To wake-up the CPUs from the parked state, this patch implements a wakeup IPI for ARM64 (ie arch_send_wakeup_ipi_mask()) that mirrors the ARM one, so that a specific IPI is sent for wake-up purpose in order to distinguish it from other IPI sources. Given the current ACPI MADT parsing API, the patch implements a glue layer that helps passing MADT GICC data structure from SMP initialization code to the parking protocol implementation somewhat overriding the CPU operations interfaces. This to avoid creating a completely trasparent DT/ACPI CPU operations layer that would require creating opaque structure handling for CPUs data (DT represents CPU through DT nodes, ACPI through static MADT table entries), which seems overkill given that ACPI on ARM64 mandates only two booting protocols (PSCI and parking protocol), so there is no need for further protocol additions. Based on the original work by Mark Salter [1] https://acpica.org/sites/acpica/files/MP%20Startup%20for%20ARM%20platforms.docx Signed-off-by: Lorenzo Pieralisi Tested-by: Loc Ho Cc: Will Deacon Cc: Hanjun Guo Cc: Sudeep Holla Cc: Mark Rutland Cc: Mark Salter Cc: Al Stone [catalin.marinas@arm.com: Added WARN_ONCE(!acpi_parking_protocol_valid() on the IPI] Signed-off-by: Catalin Marinas (cherry picked from commit 5e89c55e4ed81d7abb1ce8828db35fa389dc0e90) Signed-off-by: Alex Shi Conflicts: have probes/ now in arch/arm64/kernel/Makefile --- arch/arm64/Kconfig | 9 ++ arch/arm64/include/asm/acpi.h | 19 ++- arch/arm64/include/asm/hardirq.h | 2 +- arch/arm64/include/asm/smp.h | 9 ++ arch/arm64/kernel/Makefile | 2 + arch/arm64/kernel/acpi_parking_protocol.c | 153 ++++++++++++++++++++++ arch/arm64/kernel/cpu_ops.c | 27 +++- arch/arm64/kernel/smp.c | 28 ++++ 8 files changed, 243 insertions(+), 6 deletions(-) create mode 100644 arch/arm64/kernel/acpi_parking_protocol.c diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index fd5cb5074410..2d4a79b9889a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -732,6 +732,15 @@ endmenu menu "Boot options" +config ARM64_ACPI_PARKING_PROTOCOL + bool "Enable support for the ARM64 ACPI parking protocol" + depends on ACPI + help + Enable support for the ARM64 ACPI parking protocol. If disabled + the kernel will not allow booting through the ARM64 ACPI parking + protocol even if the corresponding data is present in the ACPI + MADT table. + config CMDLINE string "Default kernel command string" default "" diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index caafd63b8092..aee323b13802 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -87,9 +87,26 @@ void __init acpi_init_cpus(void); static inline void acpi_init_cpus(void) { } #endif /* CONFIG_ACPI */ +#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL +bool acpi_parking_protocol_valid(int cpu); +void __init +acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor); +#else +static inline bool acpi_parking_protocol_valid(int cpu) { return false; } +static inline void +acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor) +{} +#endif + static inline const char *acpi_get_enable_method(int cpu) { - return acpi_psci_present() ? "psci" : NULL; + if (acpi_psci_present()) + return "psci"; + + if (acpi_parking_protocol_valid(cpu)) + return "parking-protocol"; + + return NULL; } #ifdef CONFIG_ACPI_APEI diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index a57601f9d17c..8740297dac77 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -20,7 +20,7 @@ #include #include -#define NR_IPI 5 +#define NR_IPI 6 typedef struct { unsigned int __softirq_pending; diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index d9c3d6a6100a..2013a4dc5124 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -64,6 +64,15 @@ extern void secondary_entry(void); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL +extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask); +#else +static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask) +{ + BUILD_BUG(); +} +#endif + extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 8867ff71a4b7..ee2ffacdaa44 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -41,6 +41,8 @@ arm64-obj-$(CONFIG_PCI) += pci.o arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o arm64-obj-$(CONFIG_ACPI) += acpi.o arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o +arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o +arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o obj-y += $(arm64-obj-y) vdso/ probes/ obj-m += $(arm64-obj-m) diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c new file mode 100644 index 000000000000..4b1e5a7a98da --- /dev/null +++ b/arch/arm64/kernel/acpi_parking_protocol.c @@ -0,0 +1,153 @@ +/* + * ARM64 ACPI Parking Protocol implementation + * + * Authors: Lorenzo Pieralisi + * Mark Salter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include + +#include + +struct cpu_mailbox_entry { + phys_addr_t mailbox_addr; + u8 version; + u8 gic_cpu_id; +}; + +static struct cpu_mailbox_entry cpu_mailbox_entries[NR_CPUS]; + +void __init acpi_set_mailbox_entry(int cpu, + struct acpi_madt_generic_interrupt *p) +{ + struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu]; + + cpu_entry->mailbox_addr = p->parked_address; + cpu_entry->version = p->parking_version; + cpu_entry->gic_cpu_id = p->cpu_interface_number; +} + +bool acpi_parking_protocol_valid(int cpu) +{ + struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu]; + + return cpu_entry->mailbox_addr && cpu_entry->version; +} + +static int acpi_parking_protocol_cpu_init(unsigned int cpu) +{ + pr_debug("%s: ACPI parked addr=%llx\n", __func__, + cpu_mailbox_entries[cpu].mailbox_addr); + + return 0; +} + +static int acpi_parking_protocol_cpu_prepare(unsigned int cpu) +{ + return 0; +} + +struct parking_protocol_mailbox { + __le32 cpu_id; + __le32 reserved; + __le64 entry_point; +}; + +static int acpi_parking_protocol_cpu_boot(unsigned int cpu) +{ + struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu]; + struct parking_protocol_mailbox __iomem *mailbox; + __le32 cpu_id; + + /* + * Map mailbox memory with attribute device nGnRE (ie ioremap - + * this deviates from the parking protocol specifications since + * the mailboxes are required to be mapped nGnRnE; the attribute + * discrepancy is harmless insofar as the protocol specification + * is concerned). + * If the mailbox is mistakenly allocated in the linear mapping + * by FW ioremap will fail since the mapping will be prevented + * by the kernel (it clashes with the linear mapping attributes + * specifications). + */ + mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox)); + if (!mailbox) + return -EIO; + + cpu_id = readl_relaxed(&mailbox->cpu_id); + /* + * Check if firmware has set-up the mailbox entry properly + * before kickstarting the respective cpu. + */ + if (cpu_id != ~0U) { + iounmap(mailbox); + return -ENXIO; + } + + /* + * We write the entry point and cpu id as LE regardless of the + * native endianness of the kernel. Therefore, any boot-loaders + * that read this address need to convert this address to the + * Boot-Loader's endianness before jumping. + */ + writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point); + writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id); + + arch_send_wakeup_ipi_mask(cpumask_of(cpu)); + + iounmap(mailbox); + + return 0; +} + +static void acpi_parking_protocol_cpu_postboot(void) +{ + int cpu = smp_processor_id(); + struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu]; + struct parking_protocol_mailbox __iomem *mailbox; + __le64 entry_point; + + /* + * Map mailbox memory with attribute device nGnRE (ie ioremap - + * this deviates from the parking protocol specifications since + * the mailboxes are required to be mapped nGnRnE; the attribute + * discrepancy is harmless insofar as the protocol specification + * is concerned). + * If the mailbox is mistakenly allocated in the linear mapping + * by FW ioremap will fail since the mapping will be prevented + * by the kernel (it clashes with the linear mapping attributes + * specifications). + */ + mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox)); + if (!mailbox) + return; + + entry_point = readl_relaxed(&mailbox->entry_point); + /* + * Check if firmware has cleared the entry_point as expected + * by the protocol specification. + */ + WARN_ON(entry_point); + + iounmap(mailbox); +} + +const struct cpu_operations acpi_parking_protocol_ops = { + .name = "parking-protocol", + .cpu_init = acpi_parking_protocol_cpu_init, + .cpu_prepare = acpi_parking_protocol_cpu_prepare, + .cpu_boot = acpi_parking_protocol_cpu_boot, + .cpu_postboot = acpi_parking_protocol_cpu_postboot +}; diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index b6bd7d447768..c7cfb8fe06f9 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -25,19 +25,30 @@ #include extern const struct cpu_operations smp_spin_table_ops; +extern const struct cpu_operations acpi_parking_protocol_ops; extern const struct cpu_operations cpu_psci_ops; const struct cpu_operations *cpu_ops[NR_CPUS]; -static const struct cpu_operations *supported_cpu_ops[] __initconst = { +static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = { &smp_spin_table_ops, &cpu_psci_ops, NULL, }; +static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = { +#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL + &acpi_parking_protocol_ops, +#endif + &cpu_psci_ops, + NULL, +}; + static const struct cpu_operations * __init cpu_get_ops(const char *name) { - const struct cpu_operations **ops = supported_cpu_ops; + const struct cpu_operations **ops; + + ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops; while (*ops) { if (!strcmp(name, (*ops)->name)) @@ -75,8 +86,16 @@ static const char *__init cpu_read_enable_method(int cpu) } } else { enable_method = acpi_get_enable_method(cpu); - if (!enable_method) - pr_err("Unsupported ACPI enable-method\n"); + if (!enable_method) { + /* + * In ACPI systems the boot CPU does not require + * checking the enable method since for some + * boot protocol (ie parking protocol) it need not + * be initialized. Don't warn spuriously. + */ + if (cpu != 0) + pr_err("Unsupported ACPI enable-method\n"); + } } return enable_method; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 3df454ff37d5..a84623d91410 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -70,6 +70,7 @@ enum ipi_msg_type { IPI_CPU_STOP, IPI_TIMER, IPI_IRQ_WORK, + IPI_WAKEUP }; /* @@ -442,6 +443,17 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) /* map the logical cpu id to cpu MPIDR */ cpu_logical_map(cpu_count) = hwid; + /* + * Set-up the ACPI parking protocol cpu entries + * while initializing the cpu_logical_map to + * avoid parsing MADT entries multiple times for + * nothing (ie a valid cpu_logical_map entry should + * contain a valid parking protocol data set to + * initialize the cpu if the parking protocol is + * the only available enable method). + */ + acpi_set_mailbox_entry(cpu_count, processor); + cpu_count++; } @@ -624,6 +636,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = { S(IPI_CPU_STOP, "CPU stop interrupts"), S(IPI_TIMER, "Timer broadcast interrupts"), S(IPI_IRQ_WORK, "IRQ work interrupts"), + S(IPI_WAKEUP, "CPU wake-up interrupts"), }; static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) @@ -667,6 +680,13 @@ void arch_send_call_function_single_ipi(int cpu) smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); } +#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL +void arch_send_wakeup_ipi_mask(const struct cpumask *mask) +{ + smp_cross_call(mask, IPI_WAKEUP); +} +#endif + #ifdef CONFIG_IRQ_WORK void arch_irq_work_raise(void) { @@ -744,6 +764,14 @@ void handle_IPI(int ipinr, struct pt_regs *regs) break; #endif +#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL + case IPI_WAKEUP: + WARN_ONCE(!acpi_parking_protocol_valid(cpu), + "CPU%u: Wake-up IPI outside the ACPI parking protocol\n", + cpu); + break; +#endif + default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); break; From a916d13d03d627145048c944130cc595b4642282 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 27 Jan 2016 10:50:19 +0100 Subject: [PATCH 74/78] arm64: allow vmalloc regions to be set with set_memory_* The range of set_memory_* is currently restricted to the module address range because of difficulties in breaking down larger block sizes. vmalloc maps PAGE_SIZE pages so it is safe to use as well. Update the function ranges and add a comment explaining why the range is restricted the way it is. Suggested-by: Laura Abbott Acked-by: Mark Rutland Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon (cherry picked from commit 95f5c80050ad723163aa80dc8bffd48ef4afc6d5) Signed-off-by: Alex Shi --- arch/arm64/mm/pageattr.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index cf6240741134..0795c3a36d8f 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -44,6 +45,7 @@ static int change_memory_common(unsigned long addr, int numpages, unsigned long end = start + size; int ret; struct page_change_data data; + struct vm_struct *area; if (!PAGE_ALIGNED(addr)) { start &= PAGE_MASK; @@ -51,10 +53,23 @@ static int change_memory_common(unsigned long addr, int numpages, WARN_ON_ONCE(1); } - if (start < MODULES_VADDR || start >= MODULES_END) - return -EINVAL; - - if (end < MODULES_VADDR || end >= MODULES_END) + /* + * Kernel VA mappings are always live, and splitting live section + * mappings into page mappings may cause TLB conflicts. This means + * we have to ensure that changing the permission bits of the range + * we are operating on does not result in such splitting. + * + * Let's restrict ourselves to mappings created by vmalloc (or vmap). + * Those are guaranteed to consist entirely of page mappings, and + * splitting is never needed. + * + * So check whether the [addr, addr + size) interval is entirely + * covered by precisely one VM area that has the VM_ALLOC flag set. + */ + area = find_vm_area((void *)addr); + if (!area || + end > (unsigned long)area->addr + area->size || + !(area->flags & VM_ALLOC)) return -EINVAL; if (!numpages) From e1e5e10153f6e6ecbe3e132c87ac55fbd8e892bf Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Fri, 5 Feb 2016 16:24:46 -0800 Subject: [PATCH 75/78] arm64: Drop alloc function from create_mapping create_mapping is only used in fixmap_remap_fdt. All the create_mapping calls need to happen on existing translation table pages without additional allocations. Rather than have an alloc function be called and fail, just set it to NULL and catch its use. Also change the name to create_mapping_noalloc to better capture what exactly is going on. Reviewed-by: Ard Biesheuvel Reviewed-by: Mark Rutland Signed-off-by: Laura Abbott Signed-off-by: Catalin Marinas (cherry picked from commit 132233a759580f5ce9b1bfaac9073e47d03c460d) Signed-off-by: Alex Shi --- arch/arm64/mm/mmu.c | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 9c5b5ddcd088..2ea381ad4263 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -116,7 +116,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr, pte_t *pte; if (pmd_none(*pmd) || pmd_sect(*pmd)) { - phys_addr_t pte_phys = pgtable_alloc(); + phys_addr_t pte_phys; + BUG_ON(!pgtable_alloc); + pte_phys = pgtable_alloc(); pte = pte_set_fixmap(pte_phys); if (pmd_sect(*pmd)) split_pmd(pmd, pte); @@ -158,7 +160,9 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, * Check for initial section mappings in the pgd/pud and remove them. */ if (pud_none(*pud) || pud_sect(*pud)) { - phys_addr_t pmd_phys = pgtable_alloc(); + phys_addr_t pmd_phys; + BUG_ON(!pgtable_alloc); + pmd_phys = pgtable_alloc(); pmd = pmd_set_fixmap(pmd_phys); if (pud_sect(*pud)) { /* @@ -223,7 +227,9 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long next; if (pgd_none(*pgd)) { - phys_addr_t pud_phys = pgtable_alloc(); + phys_addr_t pud_phys; + BUG_ON(!pgtable_alloc); + pud_phys = pgtable_alloc(); __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE); } BUG_ON(pgd_bad(*pgd)); @@ -304,7 +310,12 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc); } -static void __init create_mapping(phys_addr_t phys, unsigned long virt, +/* + * This function can only be used to modify existing table entries, + * without allocating new levels of table. Note that this permits the + * creation of new section or page entries. + */ +static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot) { if (virt < VMALLOC_START) { @@ -313,7 +324,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt, return; } __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, - early_pgtable_alloc); + NULL); } void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, @@ -670,7 +681,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) /* * Make sure that the FDT region can be mapped without the need to * allocate additional translation table pages, so that it is safe - * to call create_mapping() this early. + * to call create_mapping_noalloc() this early. * * On 64k pages, the FDT will be mapped using PTEs, so we need to * be in the same PMD as the rest of the fixmap. @@ -686,8 +697,8 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) dt_virt = (void *)dt_virt_base + offset; /* map the first chunk so we can read the size from the header */ - create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, - SWAPPER_BLOCK_SIZE, prot); + create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), + dt_virt_base, SWAPPER_BLOCK_SIZE, prot); if (fdt_magic(dt_virt) != FDT_MAGIC) return NULL; @@ -697,7 +708,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) return NULL; if (offset + size > SWAPPER_BLOCK_SIZE) - create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, + create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, round_up(offset + size, SWAPPER_BLOCK_SIZE), prot); memblock_reserve(dt_phys, size); From 7d13203b6442d6851b967f3d8a9889b08ddd6e00 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Fri, 5 Feb 2016 16:24:47 -0800 Subject: [PATCH 76/78] arm64: Add support for ARCH_SUPPORTS_DEBUG_PAGEALLOC ARCH_SUPPORTS_DEBUG_PAGEALLOC provides a hook to map and unmap pages for debugging purposes. This requires memory be mapped with PAGE_SIZE mappings since breaking down larger mappings at runtime will lead to TLB conflicts. Check if debug_pagealloc is enabled at runtime and if so, map everyting with PAGE_SIZE pages. Implement the functions to actually map/unmap the pages at runtime. Reviewed-by: Ard Biesheuvel Reviewed-by: Mark Rutland Tested-by: Mark Rutland Signed-off-by: Laura Abbott [catalin.marinas@arm.com: static annotation block_mappings_allowed() and #ifdef] Signed-off-by: Catalin Marinas (cherry picked from commit 83863f25e4b8214e994ef8b5647aad614d74b45d) Signed-off-by: Alex Shi --- arch/arm64/Kconfig | 3 +++ arch/arm64/mm/mmu.c | 26 +++++++++++++++++++++-- arch/arm64/mm/pageattr.c | 46 +++++++++++++++++++++++++++++++--------- 3 files changed, 63 insertions(+), 12 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 2d4a79b9889a..80450fcba31f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -530,6 +530,9 @@ config HOTPLUG_CPU source kernel/Kconfig.preempt source kernel/Kconfig.hz +config ARCH_SUPPORTS_DEBUG_PAGEALLOC + def_bool y + config ARCH_HAS_HOLES_MEMORYMODEL def_bool y if SPARSEMEM diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 2ea381ad4263..ee2821cbc48d 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -149,6 +149,26 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd) } while (pmd++, i++, i < PTRS_PER_PMD); } +#ifdef CONFIG_DEBUG_PAGEALLOC +static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void)) +{ + + /* + * If debug_page_alloc is enabled we must map the linear map + * using pages. However, other mappings created by + * create_mapping_noalloc must use sections in some cases. Allow + * sections to be used in those cases, where no pgtable_alloc + * function is provided. + */ + return !pgtable_alloc || !debug_pagealloc_enabled(); +} +#else +static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void)) +{ + return true; +} +#endif + static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, phys_addr_t (*pgtable_alloc)(void)) @@ -181,7 +201,8 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, do { next = pmd_addr_end(addr, end); /* try section mapping first */ - if (((addr | next | phys) & ~SECTION_MASK) == 0) { + if (((addr | next | phys) & ~SECTION_MASK) == 0 && + block_mappings_allowed(pgtable_alloc)) { pmd_t old_pmd =*pmd; set_pmd(pmd, __pmd(phys | pgprot_val(mk_sect_prot(prot)))); @@ -241,7 +262,8 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, /* * For 4K granule only, attempt to put down a 1GB block */ - if (use_1G_block(addr, next, phys)) { + if (use_1G_block(addr, next, phys) && + block_mappings_allowed(pgtable_alloc)) { pud_t old_pud = *pud; set_pud(pud, __pud(phys | pgprot_val(mk_sect_prot(prot)))); diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 0795c3a36d8f..ca6d268e3313 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -37,14 +37,31 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, return 0; } +/* + * This function assumes that the range is mapped with PAGE_SIZE pages. + */ +static int __change_memory_common(unsigned long start, unsigned long size, + pgprot_t set_mask, pgprot_t clear_mask) +{ + struct page_change_data data; + int ret; + + data.set_mask = set_mask; + data.clear_mask = clear_mask; + + ret = apply_to_page_range(&init_mm, start, size, change_page_range, + &data); + + flush_tlb_kernel_range(start, start + size); + return ret; +} + static int change_memory_common(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask) { unsigned long start = addr; unsigned long size = PAGE_SIZE*numpages; unsigned long end = start + size; - int ret; - struct page_change_data data; struct vm_struct *area; if (!PAGE_ALIGNED(addr)) { @@ -75,14 +92,7 @@ static int change_memory_common(unsigned long addr, int numpages, if (!numpages) return 0; - data.set_mask = set_mask; - data.clear_mask = clear_mask; - - ret = apply_to_page_range(&init_mm, start, size, change_page_range, - &data); - - flush_tlb_kernel_range(start, end); - return ret; + return __change_memory_common(start, size, set_mask, clear_mask); } int set_memory_ro(unsigned long addr, int numpages) @@ -114,3 +124,19 @@ int set_memory_x(unsigned long addr, int numpages) __pgprot(PTE_PXN)); } EXPORT_SYMBOL_GPL(set_memory_x); + +#ifdef CONFIG_DEBUG_PAGEALLOC +void __kernel_map_pages(struct page *page, int numpages, int enable) +{ + unsigned long addr = (unsigned long) page_address(page); + + if (enable) + __change_memory_common(addr, PAGE_SIZE * numpages, + __pgprot(PTE_VALID), + __pgprot(0)); + else + __change_memory_common(addr, PAGE_SIZE * numpages, + __pgprot(0), + __pgprot(PTE_VALID)); +} +#endif From 602decea81c1d7e89cb633acb0e34fd74cd8a122 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Fri, 5 Feb 2016 16:24:48 -0800 Subject: [PATCH 77/78] arm64: ptdump: Indicate whether memory should be faulting With CONFIG_DEBUG_PAGEALLOC, pages do not have the valid bit set when free in the buddy allocator. Add an indiciation to the page table dumping code that the valid bit is not set, 'F' for fault, to make this easier to understand. Reviewed-by: Ard Biesheuvel Reviewed-by: Mark Rutland Tested-by: Mark Rutland Signed-off-by: Laura Abbott Signed-off-by: Catalin Marinas (cherry picked from commit d7e9d59494a9a5d83274f5af2148b82ca22dff3f) Signed-off-by: Alex Shi --- arch/arm64/mm/dump.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 5a22a119a74c..f381ac991de7 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -90,6 +90,11 @@ struct prot_bits { static const struct prot_bits pte_bits[] = { { + .mask = PTE_VALID, + .val = PTE_VALID, + .set = " ", + .clear = "F", + }, { .mask = PTE_USER, .val = PTE_USER, .set = "USR", From d3ee568c134543eaaebeb3b559cfca11709dad01 Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Mon, 14 Nov 2016 21:52:47 +0800 Subject: [PATCH 78/78] arm64/kvm: fix build issue on kvm debug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the header file asm/debug-monitors.h into debug-sr.c to fix the following bug arch/arm64/kvm/hyp/debug-sr.c: In function ‘__debug_cond_save_host_state’: arch/arm64/kvm/hyp/debug-sr.c:118:45: error: ‘DBG_MDSCR_KDE’ undeclared (first use in this function) if ((vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_KDE) || ^~~~~~~~~~~~~ arch/arm64/kvm/hyp/debug-sr.c:118:45: note: each undeclared identifier is reported only once for each function it appears in arch/arm64/kvm/hyp/debug-sr.c:119:45: error: ‘DBG_MDSCR_MDE’ undeclared (first use in this function) (vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_MDE)) ^~~~~~~~~~~~~ Signed-off-by: Alex Shi --- arch/arm64/kvm/hyp/debug-sr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index c9c1e97501a9..2f8bca8af295 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -18,6 +18,7 @@ #include #include +#include #include #include