Merge branch v4.4/topic/hibernate into linux-linaro-lsk-v4.4
This commit is contained in:
commit
62c3330b7f
71 changed files with 3120 additions and 2087 deletions
|
@ -38,6 +38,7 @@ config ARM
|
|||
select HAVE_ARCH_HARDENED_USERCOPY
|
||||
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_ARM_SMCCC if CPU_V7
|
||||
select HAVE_BPF_JIT
|
||||
select HAVE_CC_STACKPROTECTOR
|
||||
select HAVE_CONTEXT_TRACKING
|
||||
|
@ -1423,8 +1424,7 @@ config BIG_LITTLE
|
|||
|
||||
config BL_SWITCHER
|
||||
bool "big.LITTLE switcher support"
|
||||
depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
|
||||
select ARM_CPU_SUSPEND
|
||||
depends on BIG_LITTLE && MCPM && HOTPLUG_CPU && ARM_GIC
|
||||
select CPU_PM
|
||||
help
|
||||
The big.LITTLE "switcher" provides the core functionality to
|
||||
|
@ -1482,7 +1482,7 @@ config HOTPLUG_CPU
|
|||
|
||||
config ARM_PSCI
|
||||
bool "Support for the ARM Power State Coordination Interface (PSCI)"
|
||||
depends on CPU_V7
|
||||
depends on HAVE_ARM_SMCCC
|
||||
select ARM_PSCI_FW
|
||||
help
|
||||
Say Y here if you want Linux to communicate with system firmware
|
||||
|
@ -2141,7 +2141,8 @@ config ARCH_SUSPEND_POSSIBLE
|
|||
def_bool y
|
||||
|
||||
config ARM_CPU_SUSPEND
|
||||
def_bool PM_SLEEP
|
||||
def_bool PM_SLEEP || BL_SWITCHER || ARM_PSCI_FW
|
||||
depends on ARCH_SUSPEND_POSSIBLE
|
||||
|
||||
config ARCH_HIBERNATION_POSSIBLE
|
||||
bool
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#ifndef __ARM_KVM_ARM_H__
|
||||
#define __ARM_KVM_ARM_H__
|
||||
|
||||
#include <linux/const.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Hyp Configuration Register (HCR) bits */
|
||||
|
@ -132,10 +133,9 @@
|
|||
* space.
|
||||
*/
|
||||
#define KVM_PHYS_SHIFT (40)
|
||||
#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT)
|
||||
#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL)
|
||||
#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30))
|
||||
#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
|
||||
#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT)
|
||||
#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL))
|
||||
#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30))
|
||||
|
||||
/* Virtualization Translation Control Register (VTCR) bits */
|
||||
#define VTCR_SH0 (3 << 12)
|
||||
|
@ -162,17 +162,17 @@
|
|||
#define VTTBR_X (5 - KVM_T0SZ)
|
||||
#endif
|
||||
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
|
||||
#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
|
||||
#define VTTBR_VMID_SHIFT (48LLU)
|
||||
#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
|
||||
#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
|
||||
#define VTTBR_VMID_SHIFT _AC(48, ULL)
|
||||
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
|
||||
|
||||
/* Hyp Syndrome Register (HSR) bits */
|
||||
#define HSR_EC_SHIFT (26)
|
||||
#define HSR_EC (0x3fU << HSR_EC_SHIFT)
|
||||
#define HSR_IL (1U << 25)
|
||||
#define HSR_EC (_AC(0x3f, UL) << HSR_EC_SHIFT)
|
||||
#define HSR_IL (_AC(1, UL) << 25)
|
||||
#define HSR_ISS (HSR_IL - 1)
|
||||
#define HSR_ISV_SHIFT (24)
|
||||
#define HSR_ISV (1U << HSR_ISV_SHIFT)
|
||||
#define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT)
|
||||
#define HSR_SRT_SHIFT (16)
|
||||
#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
|
||||
#define HSR_FSC (0x3f)
|
||||
|
@ -180,9 +180,9 @@
|
|||
#define HSR_SSE (1 << 21)
|
||||
#define HSR_WNR (1 << 6)
|
||||
#define HSR_CV_SHIFT (24)
|
||||
#define HSR_CV (1U << HSR_CV_SHIFT)
|
||||
#define HSR_CV (_AC(1, UL) << HSR_CV_SHIFT)
|
||||
#define HSR_COND_SHIFT (20)
|
||||
#define HSR_COND (0xfU << HSR_COND_SHIFT)
|
||||
#define HSR_COND (_AC(0xf, UL) << HSR_COND_SHIFT)
|
||||
|
||||
#define FSC_FAULT (0x04)
|
||||
#define FSC_ACCESS (0x08)
|
||||
|
@ -210,13 +210,13 @@
|
|||
#define HSR_EC_DABT (0x24)
|
||||
#define HSR_EC_DABT_HYP (0x25)
|
||||
|
||||
#define HSR_WFI_IS_WFE (1U << 0)
|
||||
#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
|
||||
|
||||
#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
|
||||
#define HSR_HVC_IMM_MASK ((_AC(1, UL) << 16) - 1)
|
||||
|
||||
#define HSR_DABT_S1PTW (1U << 7)
|
||||
#define HSR_DABT_CM (1U << 8)
|
||||
#define HSR_DABT_EA (1U << 9)
|
||||
#define HSR_DABT_S1PTW (_AC(1, UL) << 7)
|
||||
#define HSR_DABT_CM (_AC(1, UL) << 8)
|
||||
#define HSR_DABT_EA (_AC(1, UL) << 9)
|
||||
|
||||
#define kvm_arm_exception_type \
|
||||
{0, "RESET" }, \
|
||||
|
|
|
@ -214,6 +214,10 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
|
|||
kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
|
||||
}
|
||||
|
||||
static inline void __cpu_init_stage2(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int kvm_arch_dev_ioctl_check_extension(long ext)
|
||||
{
|
||||
return 0;
|
||||
|
|
|
@ -279,6 +279,11 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
|
|||
pgd_t *merged_hyp_pgd,
|
||||
unsigned long hyp_idmap_start) { }
|
||||
|
||||
static inline unsigned int kvm_get_vmid_bits(void)
|
||||
{
|
||||
return 8;
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __ARM_KVM_MMU_H__ */
|
||||
|
|
|
@ -74,6 +74,15 @@ static inline bool is_hyp_mode_mismatched(void)
|
|||
{
|
||||
return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH);
|
||||
}
|
||||
|
||||
static inline bool is_kernel_in_hyp_mode(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/* The section containing the hypervisor text */
|
||||
extern char __hyp_text_start[];
|
||||
extern char __hyp_text_end[];
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -88,8 +88,9 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
|
|||
|
||||
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
|
||||
ifeq ($(CONFIG_ARM_PSCI),y)
|
||||
obj-y += psci-call.o
|
||||
obj-$(CONFIG_SMP) += psci_smp.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_HAVE_ARM_SMCCC) += smccc-call.o
|
||||
|
||||
extra-y := $(head-y) vmlinux.lds
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/syscalls.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
|
||||
#include <asm/checksum.h>
|
||||
#include <asm/ftrace.h>
|
||||
|
@ -175,3 +176,8 @@ EXPORT_SYMBOL(__gnu_mcount_nc);
|
|||
EXPORT_SYMBOL(__pv_phys_pfn_offset);
|
||||
EXPORT_SYMBOL(__pv_offset);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_ARM_SMCCC
|
||||
EXPORT_SYMBOL(arm_smccc_smc);
|
||||
EXPORT_SYMBOL(arm_smccc_hvc);
|
||||
#endif
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* Copyright (C) 2015 ARM Limited
|
||||
*
|
||||
* Author: Mark Rutland <mark.rutland@arm.com>
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/opcodes-sec.h>
|
||||
#include <asm/opcodes-virt.h>
|
||||
|
||||
/* int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
|
||||
ENTRY(__invoke_psci_fn_hvc)
|
||||
__HVC(0)
|
||||
bx lr
|
||||
ENDPROC(__invoke_psci_fn_hvc)
|
||||
|
||||
/* int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
|
||||
ENTRY(__invoke_psci_fn_smc)
|
||||
__SMC(0)
|
||||
bx lr
|
||||
ENDPROC(__invoke_psci_fn_smc)
|
62
arch/arm/kernel/smccc-call.S
Normal file
62
arch/arm/kernel/smccc-call.S
Normal file
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Copyright (c) 2015, Linaro Limited
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/opcodes-sec.h>
|
||||
#include <asm/opcodes-virt.h>
|
||||
#include <asm/unwind.h>
|
||||
|
||||
/*
|
||||
* Wrap c macros in asm macros to delay expansion until after the
|
||||
* SMCCC asm macro is expanded.
|
||||
*/
|
||||
.macro SMCCC_SMC
|
||||
__SMC(0)
|
||||
.endm
|
||||
|
||||
.macro SMCCC_HVC
|
||||
__HVC(0)
|
||||
.endm
|
||||
|
||||
.macro SMCCC instr
|
||||
UNWIND( .fnstart)
|
||||
mov r12, sp
|
||||
push {r4-r7}
|
||||
UNWIND( .save {r4-r7})
|
||||
ldm r12, {r4-r7}
|
||||
\instr
|
||||
pop {r4-r7}
|
||||
ldr r12, [sp, #(4 * 4)]
|
||||
stm r12, {r0-r3}
|
||||
bx lr
|
||||
UNWIND( .fnend)
|
||||
.endm
|
||||
|
||||
/*
|
||||
* void smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
|
||||
* unsigned long a3, unsigned long a4, unsigned long a5,
|
||||
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
|
||||
*/
|
||||
ENTRY(arm_smccc_smc)
|
||||
SMCCC SMCCC_SMC
|
||||
ENDPROC(arm_smccc_smc)
|
||||
|
||||
/*
|
||||
* void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
|
||||
* unsigned long a3, unsigned long a4, unsigned long a5,
|
||||
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
|
||||
*/
|
||||
ENTRY(arm_smccc_hvc)
|
||||
SMCCC SMCCC_HVC
|
||||
ENDPROC(arm_smccc_hvc)
|
|
@ -44,6 +44,7 @@
|
|||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_psci.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#ifdef REQUIRES_VIRT
|
||||
__asm__(".arch_extension virt");
|
||||
|
@ -58,9 +59,12 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
|
|||
|
||||
/* The VMID used in the VTTBR */
|
||||
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
|
||||
static u8 kvm_next_vmid;
|
||||
static u32 kvm_next_vmid;
|
||||
static unsigned int kvm_vmid_bits __read_mostly;
|
||||
static DEFINE_SPINLOCK(kvm_vmid_lock);
|
||||
|
||||
static bool vgic_present;
|
||||
|
||||
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
BUG_ON(preemptible());
|
||||
|
@ -132,7 +136,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||
kvm->arch.vmid_gen = 0;
|
||||
|
||||
/* The maximum number of VCPUs is limited by the host's GIC model */
|
||||
kvm->arch.max_vcpus = kvm_vgic_get_max_vcpus();
|
||||
kvm->arch.max_vcpus = vgic_present ?
|
||||
kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
|
||||
|
||||
return ret;
|
||||
out_free_stage2_pgd:
|
||||
|
@ -170,6 +175,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
int r;
|
||||
switch (ext) {
|
||||
case KVM_CAP_IRQCHIP:
|
||||
r = vgic_present;
|
||||
break;
|
||||
case KVM_CAP_IOEVENTFD:
|
||||
case KVM_CAP_DEVICE_CTRL:
|
||||
case KVM_CAP_USER_MEMORY:
|
||||
|
@ -431,11 +438,12 @@ static void update_vttbr(struct kvm *kvm)
|
|||
kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
|
||||
kvm->arch.vmid = kvm_next_vmid;
|
||||
kvm_next_vmid++;
|
||||
kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
|
||||
|
||||
/* update vttbr to be used with the new vmid */
|
||||
pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm));
|
||||
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
|
||||
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
|
||||
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
|
||||
kvm->arch.vttbr = pgd_phys | vmid;
|
||||
|
||||
spin_unlock(&kvm_vmid_lock);
|
||||
|
@ -911,6 +919,8 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
|
|||
|
||||
switch (dev_id) {
|
||||
case KVM_ARM_DEVICE_VGIC_V2:
|
||||
if (!vgic_present)
|
||||
return -ENXIO;
|
||||
return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
|
||||
default:
|
||||
return -ENODEV;
|
||||
|
@ -925,6 +935,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
|||
|
||||
switch (ioctl) {
|
||||
case KVM_CREATE_IRQCHIP: {
|
||||
if (!vgic_present)
|
||||
return -ENXIO;
|
||||
return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
|
||||
}
|
||||
case KVM_ARM_SET_DEVICE_ADDR: {
|
||||
|
@ -970,6 +982,7 @@ static void cpu_init_hyp_mode(void *dummy)
|
|||
vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
|
||||
|
||||
__cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
|
||||
__cpu_init_stage2();
|
||||
|
||||
kvm_arm_init_debug();
|
||||
}
|
||||
|
@ -1066,6 +1079,12 @@ static int init_hyp_mode(void)
|
|||
goto out_free_mappings;
|
||||
}
|
||||
|
||||
err = create_hyp_mappings(__start_rodata, __end_rodata);
|
||||
if (err) {
|
||||
kvm_err("Cannot map rodata section\n");
|
||||
goto out_free_mappings;
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the Hyp stack pages
|
||||
*/
|
||||
|
@ -1110,8 +1129,17 @@ static int init_hyp_mode(void)
|
|||
* Init HYP view of VGIC
|
||||
*/
|
||||
err = kvm_vgic_hyp_init();
|
||||
if (err)
|
||||
switch (err) {
|
||||
case 0:
|
||||
vgic_present = true;
|
||||
break;
|
||||
case -ENODEV:
|
||||
case -ENXIO:
|
||||
vgic_present = false;
|
||||
break;
|
||||
default:
|
||||
goto out_free_context;
|
||||
}
|
||||
|
||||
/*
|
||||
* Init HYP architected timer support
|
||||
|
@ -1126,6 +1154,10 @@ static int init_hyp_mode(void)
|
|||
|
||||
kvm_perf_init();
|
||||
|
||||
/* set size of VMID supported by CPU */
|
||||
kvm_vmid_bits = kvm_get_vmid_bits();
|
||||
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
|
||||
|
||||
kvm_info("Hyp mode initialized successfully\n");
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -275,6 +275,40 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
|
|||
return vbar;
|
||||
}
|
||||
|
||||
/*
|
||||
* Switch to an exception mode, updating both CPSR and SPSR. Follow
|
||||
* the logic described in AArch32.EnterMode() from the ARMv8 ARM.
|
||||
*/
|
||||
static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode)
|
||||
{
|
||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
|
||||
|
||||
*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode;
|
||||
|
||||
switch (mode) {
|
||||
case FIQ_MODE:
|
||||
*vcpu_cpsr(vcpu) |= PSR_F_BIT;
|
||||
/* Fall through */
|
||||
case ABT_MODE:
|
||||
case IRQ_MODE:
|
||||
*vcpu_cpsr(vcpu) |= PSR_A_BIT;
|
||||
/* Fall through */
|
||||
default:
|
||||
*vcpu_cpsr(vcpu) |= PSR_I_BIT;
|
||||
}
|
||||
|
||||
*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
|
||||
|
||||
if (sctlr & SCTLR_TE)
|
||||
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
|
||||
if (sctlr & SCTLR_EE)
|
||||
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
|
||||
|
||||
/* Note: These now point to the mode banked copies */
|
||||
*vcpu_spsr(vcpu) = cpsr;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_inject_undefined - inject an undefined exception into the guest
|
||||
* @vcpu: The VCPU to receive the undefined exception
|
||||
|
@ -286,29 +320,13 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long new_lr_value;
|
||||
unsigned long new_spsr_value;
|
||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
|
||||
bool is_thumb = (cpsr & PSR_T_BIT);
|
||||
u32 vect_offset = 4;
|
||||
u32 return_offset = (is_thumb) ? 2 : 4;
|
||||
|
||||
new_spsr_value = cpsr;
|
||||
new_lr_value = *vcpu_pc(vcpu) - return_offset;
|
||||
|
||||
*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
|
||||
*vcpu_cpsr(vcpu) |= PSR_I_BIT;
|
||||
*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
|
||||
|
||||
if (sctlr & SCTLR_TE)
|
||||
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
|
||||
if (sctlr & SCTLR_EE)
|
||||
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
|
||||
|
||||
/* Note: These now point to UND banked copies */
|
||||
*vcpu_spsr(vcpu) = cpsr;
|
||||
*vcpu_reg(vcpu, 14) = new_lr_value;
|
||||
kvm_update_psr(vcpu, UND_MODE);
|
||||
*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
|
||||
|
||||
/* Branch to exception vector */
|
||||
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
|
||||
|
@ -320,30 +338,14 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
|
||||
{
|
||||
unsigned long new_lr_value;
|
||||
unsigned long new_spsr_value;
|
||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
|
||||
bool is_thumb = (cpsr & PSR_T_BIT);
|
||||
u32 vect_offset;
|
||||
u32 return_offset = (is_thumb) ? 4 : 0;
|
||||
bool is_lpae;
|
||||
|
||||
new_spsr_value = cpsr;
|
||||
new_lr_value = *vcpu_pc(vcpu) + return_offset;
|
||||
|
||||
*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
|
||||
*vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
|
||||
*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
|
||||
|
||||
if (sctlr & SCTLR_TE)
|
||||
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
|
||||
if (sctlr & SCTLR_EE)
|
||||
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
|
||||
|
||||
/* Note: These now point to ABT banked copies */
|
||||
*vcpu_spsr(vcpu) = cpsr;
|
||||
*vcpu_reg(vcpu, 14) = new_lr_value;
|
||||
kvm_update_psr(vcpu, ABT_MODE);
|
||||
*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
|
||||
|
||||
if (is_pabt)
|
||||
vect_offset = 12;
|
||||
|
|
|
@ -656,9 +656,9 @@ static void *kvm_alloc_hwpgd(void)
|
|||
* kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
|
||||
* @kvm: The KVM struct pointer for the VM.
|
||||
*
|
||||
* Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
|
||||
* support either full 40-bit input addresses or limited to 32-bit input
|
||||
* addresses). Clears the allocated pages.
|
||||
* Allocates only the stage-2 HW PGD level table(s) (can support either full
|
||||
* 40-bit input addresses or limited to 32-bit input addresses). Clears the
|
||||
* allocated pages.
|
||||
*
|
||||
* Note we don't need locking here as this is only called when the VM is
|
||||
* created, which can only be done once.
|
||||
|
|
|
@ -99,6 +99,7 @@ config ARM64
|
|||
select SPARSE_IRQ
|
||||
select SYSCTL_EXCEPTION_TRACE
|
||||
select HAVE_CONTEXT_TRACKING
|
||||
select HAVE_ARM_SMCCC
|
||||
help
|
||||
ARM 64-bit (AArch64) Linux support.
|
||||
|
||||
|
@ -901,6 +902,14 @@ menu "Power management options"
|
|||
|
||||
source "kernel/power/Kconfig"
|
||||
|
||||
config ARCH_HIBERNATION_POSSIBLE
|
||||
def_bool y
|
||||
depends on CPU_PM
|
||||
|
||||
config ARCH_HIBERNATION_HEADER
|
||||
def_bool y
|
||||
depends on HIBERNATION
|
||||
|
||||
config ARCH_SUSPEND_POSSIBLE
|
||||
def_bool y
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Based on arch/arm/include/asm/assembler.h
|
||||
* Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
|
||||
*
|
||||
* Copyright (C) 1996-2000 Russell King
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
|
@ -23,6 +23,9 @@
|
|||
#ifndef __ASM_ASSEMBLER_H
|
||||
#define __ASM_ASSEMBLER_H
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
|
@ -211,6 +214,102 @@ lr .req x30 // link register
|
|||
add \reg, \reg, \tmp
|
||||
.endm
|
||||
|
||||
/*
|
||||
* vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
|
||||
*/
|
||||
.macro vma_vm_mm, rd, rn
|
||||
ldr \rd, [\rn, #VMA_VM_MM]
|
||||
.endm
|
||||
|
||||
/*
|
||||
* mmid - get context id from mm pointer (mm->context.id)
|
||||
*/
|
||||
.macro mmid, rd, rn
|
||||
ldr \rd, [\rn, #MM_CONTEXT_ID]
|
||||
.endm
|
||||
|
||||
/*
|
||||
* dcache_line_size - get the minimum D-cache line size from the CTR register.
|
||||
*/
|
||||
.macro dcache_line_size, reg, tmp
|
||||
mrs \tmp, ctr_el0 // read CTR
|
||||
ubfm \tmp, \tmp, #16, #19 // cache line size encoding
|
||||
mov \reg, #4 // bytes per word
|
||||
lsl \reg, \reg, \tmp // actual cache line size
|
||||
.endm
|
||||
|
||||
/*
|
||||
* icache_line_size - get the minimum I-cache line size from the CTR register.
|
||||
*/
|
||||
.macro icache_line_size, reg, tmp
|
||||
mrs \tmp, ctr_el0 // read CTR
|
||||
and \tmp, \tmp, #0xf // cache line size encoding
|
||||
mov \reg, #4 // bytes per word
|
||||
lsl \reg, \reg, \tmp // actual cache line size
|
||||
.endm
|
||||
|
||||
/*
|
||||
* tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
|
||||
*/
|
||||
.macro tcr_set_idmap_t0sz, valreg, tmpreg
|
||||
#ifndef CONFIG_ARM64_VA_BITS_48
|
||||
ldr_l \tmpreg, idmap_t0sz
|
||||
bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Macro to perform a data cache maintenance for the interval
|
||||
* [kaddr, kaddr + size)
|
||||
*
|
||||
* op: operation passed to dc instruction
|
||||
* domain: domain used in dsb instruciton
|
||||
* kaddr: starting virtual address of the region
|
||||
* size: size of the region
|
||||
* Corrupts: kaddr, size, tmp1, tmp2
|
||||
*/
|
||||
.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
|
||||
dcache_line_size \tmp1, \tmp2
|
||||
add \size, \kaddr, \size
|
||||
sub \tmp2, \tmp1, #1
|
||||
bic \kaddr, \kaddr, \tmp2
|
||||
9998: dc \op, \kaddr
|
||||
add \kaddr, \kaddr, \tmp1
|
||||
cmp \kaddr, \size
|
||||
b.lo 9998b
|
||||
dsb \domain
|
||||
.endm
|
||||
|
||||
/*
|
||||
* reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
|
||||
*/
|
||||
.macro reset_pmuserenr_el0, tmpreg
|
||||
mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
|
||||
sbfx \tmpreg, \tmpreg, #8, #4
|
||||
cmp \tmpreg, #1 // Skip if no PMU present
|
||||
b.lt 9000f
|
||||
msr pmuserenr_el0, xzr // Disable PMU access from EL0
|
||||
9000:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* copy_page - copy src to dest using temp registers t1-t8
|
||||
*/
|
||||
.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
|
||||
9998: ldp \t1, \t2, [\src]
|
||||
ldp \t3, \t4, [\src, #16]
|
||||
ldp \t5, \t6, [\src, #32]
|
||||
ldp \t7, \t8, [\src, #48]
|
||||
add \src, \src, #64
|
||||
stnp \t1, \t2, [\dest]
|
||||
stnp \t3, \t4, [\dest, #16]
|
||||
stnp \t5, \t6, [\dest, #32]
|
||||
stnp \t7, \t8, [\dest, #48]
|
||||
add \dest, \dest, #64
|
||||
tst \src, #(PAGE_SIZE - 1)
|
||||
b.ne 9998b
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Annotate a function as position independent, i.e., safe to be called before
|
||||
* the kernel virtual mapping is activated.
|
||||
|
|
|
@ -35,8 +35,8 @@
|
|||
#define ARM64_ALT_PAN_NOT_UAO 10
|
||||
|
||||
#define ARM64_WORKAROUND_CAVIUM_27456 11
|
||||
#define ARM64_NCAPS 12
|
||||
|
||||
#define ARM64_HAS_VIRT_HOST_EXTN 12
|
||||
#define ARM64_NCAPS 13
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
|
|
@ -83,17 +83,6 @@
|
|||
#define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
|
||||
|
||||
|
||||
/* Hyp System Control Register (SCTLR_EL2) bits */
|
||||
#define SCTLR_EL2_EE (1 << 25)
|
||||
#define SCTLR_EL2_WXN (1 << 19)
|
||||
#define SCTLR_EL2_I (1 << 12)
|
||||
#define SCTLR_EL2_SA (1 << 3)
|
||||
#define SCTLR_EL2_C (1 << 2)
|
||||
#define SCTLR_EL2_A (1 << 1)
|
||||
#define SCTLR_EL2_M 1
|
||||
#define SCTLR_EL2_FLAGS (SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C | \
|
||||
SCTLR_EL2_SA | SCTLR_EL2_I)
|
||||
|
||||
/* TCR_EL2 Registers bits */
|
||||
#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
|
||||
#define TCR_EL2_TBI (1 << 20)
|
||||
|
@ -123,6 +112,7 @@
|
|||
#define VTCR_EL2_SL0_LVL1 (1 << 6)
|
||||
#define VTCR_EL2_T0SZ_MASK 0x3f
|
||||
#define VTCR_EL2_T0SZ_40B 24
|
||||
#define VTCR_EL2_VS 19
|
||||
|
||||
/*
|
||||
* We configure the Stage-2 page tables to always restrict the IPA space to be
|
||||
|
@ -167,7 +157,7 @@
|
|||
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
|
||||
#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
|
||||
#define VTTBR_VMID_SHIFT (UL(48))
|
||||
#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
|
||||
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
|
||||
|
||||
/* Hyp System Trap Register */
|
||||
#define HSTR_EL2_T(x) (1 << x)
|
||||
|
|
|
@ -20,82 +20,6 @@
|
|||
|
||||
#include <asm/virt.h>
|
||||
|
||||
/*
|
||||
* 0 is reserved as an invalid value.
|
||||
* Order *must* be kept in sync with the hyp switch code.
|
||||
*/
|
||||
#define MPIDR_EL1 1 /* MultiProcessor Affinity Register */
|
||||
#define CSSELR_EL1 2 /* Cache Size Selection Register */
|
||||
#define SCTLR_EL1 3 /* System Control Register */
|
||||
#define ACTLR_EL1 4 /* Auxiliary Control Register */
|
||||
#define CPACR_EL1 5 /* Coprocessor Access Control */
|
||||
#define TTBR0_EL1 6 /* Translation Table Base Register 0 */
|
||||
#define TTBR1_EL1 7 /* Translation Table Base Register 1 */
|
||||
#define TCR_EL1 8 /* Translation Control Register */
|
||||
#define ESR_EL1 9 /* Exception Syndrome Register */
|
||||
#define AFSR0_EL1 10 /* Auxilary Fault Status Register 0 */
|
||||
#define AFSR1_EL1 11 /* Auxilary Fault Status Register 1 */
|
||||
#define FAR_EL1 12 /* Fault Address Register */
|
||||
#define MAIR_EL1 13 /* Memory Attribute Indirection Register */
|
||||
#define VBAR_EL1 14 /* Vector Base Address Register */
|
||||
#define CONTEXTIDR_EL1 15 /* Context ID Register */
|
||||
#define TPIDR_EL0 16 /* Thread ID, User R/W */
|
||||
#define TPIDRRO_EL0 17 /* Thread ID, User R/O */
|
||||
#define TPIDR_EL1 18 /* Thread ID, Privileged */
|
||||
#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
|
||||
#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
|
||||
#define PAR_EL1 21 /* Physical Address Register */
|
||||
#define MDSCR_EL1 22 /* Monitor Debug System Control Register */
|
||||
#define MDCCINT_EL1 23 /* Monitor Debug Comms Channel Interrupt Enable Reg */
|
||||
|
||||
/* 32bit specific registers. Keep them at the end of the range */
|
||||
#define DACR32_EL2 24 /* Domain Access Control Register */
|
||||
#define IFSR32_EL2 25 /* Instruction Fault Status Register */
|
||||
#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */
|
||||
#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */
|
||||
#define NR_SYS_REGS 28
|
||||
|
||||
/* 32bit mapping */
|
||||
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
|
||||
#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
|
||||
#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
|
||||
#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
|
||||
#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
|
||||
#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
|
||||
#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
|
||||
#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
|
||||
#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
|
||||
#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
|
||||
#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
|
||||
#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
|
||||
#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
|
||||
#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
|
||||
#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
|
||||
#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
|
||||
#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
|
||||
#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
|
||||
#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
|
||||
#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
|
||||
#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
|
||||
#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
|
||||
#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
|
||||
#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
|
||||
#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
|
||||
#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
|
||||
#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
|
||||
#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
|
||||
#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
|
||||
|
||||
#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
|
||||
#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
|
||||
#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
|
||||
#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
|
||||
#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
|
||||
#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
|
||||
#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
|
||||
|
||||
#define NR_COPRO_REGS (NR_SYS_REGS * 2)
|
||||
|
||||
#define ARM_EXCEPTION_IRQ 0
|
||||
#define ARM_EXCEPTION_TRAP 1
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
|
||||
#include <asm/esr.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/cputype.h>
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/kvm_types.h>
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
|
||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
|
@ -85,6 +84,86 @@ struct kvm_vcpu_fault_info {
|
|||
u64 hpfar_el2; /* Hyp IPA Fault Address Register */
|
||||
};
|
||||
|
||||
/*
|
||||
* 0 is reserved as an invalid value.
|
||||
* Order should be kept in sync with the save/restore code.
|
||||
*/
|
||||
enum vcpu_sysreg {
|
||||
__INVALID_SYSREG__,
|
||||
MPIDR_EL1, /* MultiProcessor Affinity Register */
|
||||
CSSELR_EL1, /* Cache Size Selection Register */
|
||||
SCTLR_EL1, /* System Control Register */
|
||||
ACTLR_EL1, /* Auxiliary Control Register */
|
||||
CPACR_EL1, /* Coprocessor Access Control */
|
||||
TTBR0_EL1, /* Translation Table Base Register 0 */
|
||||
TTBR1_EL1, /* Translation Table Base Register 1 */
|
||||
TCR_EL1, /* Translation Control Register */
|
||||
ESR_EL1, /* Exception Syndrome Register */
|
||||
AFSR0_EL1, /* Auxilary Fault Status Register 0 */
|
||||
AFSR1_EL1, /* Auxilary Fault Status Register 1 */
|
||||
FAR_EL1, /* Fault Address Register */
|
||||
MAIR_EL1, /* Memory Attribute Indirection Register */
|
||||
VBAR_EL1, /* Vector Base Address Register */
|
||||
CONTEXTIDR_EL1, /* Context ID Register */
|
||||
TPIDR_EL0, /* Thread ID, User R/W */
|
||||
TPIDRRO_EL0, /* Thread ID, User R/O */
|
||||
TPIDR_EL1, /* Thread ID, Privileged */
|
||||
AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
|
||||
CNTKCTL_EL1, /* Timer Control Register (EL1) */
|
||||
PAR_EL1, /* Physical Address Register */
|
||||
MDSCR_EL1, /* Monitor Debug System Control Register */
|
||||
MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
|
||||
|
||||
/* 32bit specific registers. Keep them at the end of the range */
|
||||
DACR32_EL2, /* Domain Access Control Register */
|
||||
IFSR32_EL2, /* Instruction Fault Status Register */
|
||||
FPEXC32_EL2, /* Floating-Point Exception Control Register */
|
||||
DBGVCR32_EL2, /* Debug Vector Catch Register */
|
||||
|
||||
NR_SYS_REGS /* Nothing after this line! */
|
||||
};
|
||||
|
||||
/* 32bit mapping */
|
||||
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
|
||||
#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
|
||||
#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
|
||||
#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
|
||||
#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
|
||||
#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
|
||||
#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
|
||||
#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
|
||||
#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
|
||||
#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
|
||||
#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
|
||||
#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
|
||||
#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
|
||||
#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
|
||||
#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
|
||||
#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
|
||||
#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
|
||||
#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
|
||||
#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
|
||||
#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
|
||||
#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
|
||||
#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
|
||||
#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
|
||||
#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
|
||||
#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
|
||||
#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
|
||||
#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
|
||||
#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
|
||||
#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
|
||||
|
||||
#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
|
||||
#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
|
||||
#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
|
||||
#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
|
||||
#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
|
||||
#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
|
||||
#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
|
||||
|
||||
#define NR_COPRO_REGS (NR_SYS_REGS * 2)
|
||||
|
||||
struct kvm_cpu_context {
|
||||
struct kvm_regs gp_regs;
|
||||
union {
|
||||
|
@ -247,6 +326,10 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
|
|||
hyp_stack_ptr, vector_ptr);
|
||||
}
|
||||
|
||||
static inline void __cpu_init_stage2(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kvm_arch_hardware_disable(void) {}
|
||||
static inline void kvm_arch_hardware_unsetup(void) {}
|
||||
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
#define __ARM64_KVM_MMIO_H__
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
|
||||
/*
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
#include <asm/page.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
/*
|
||||
* As we only have the TTBR0_EL2 register, we cannot express
|
||||
|
@ -158,7 +159,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
|
|||
#define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
|
||||
#endif
|
||||
#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
|
||||
#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
|
||||
|
||||
#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
|
||||
|
||||
|
@ -302,5 +302,12 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
|
|||
merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
|
||||
}
|
||||
|
||||
static inline unsigned int kvm_get_vmid_bits(void)
|
||||
{
|
||||
int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
|
||||
|
||||
return (cpuid_feature_extract_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ARM64_KVM_MMU_H__ */
|
||||
|
|
|
@ -71,6 +71,9 @@
|
|||
|
||||
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
|
||||
|
||||
#define KERNEL_START _text
|
||||
#define KERNEL_END _end
|
||||
|
||||
/*
|
||||
* The size of the KASAN shadow region. This should be 1/8th of the
|
||||
* size of the entire kernel virtual address space.
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
#ifndef __ASM_PAGE_H
|
||||
#define __ASM_PAGE_H
|
||||
|
||||
#include <linux/const.h>
|
||||
|
||||
/* PAGE_SHIFT determines the page size */
|
||||
/* CONT_SHIFT determines the number of pages which can be tracked together */
|
||||
#ifdef CONFIG_ARM64_64K_PAGES
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
#ifndef __ASM_SUSPEND_H
|
||||
#define __ASM_SUSPEND_H
|
||||
|
||||
#define NR_CTX_REGS 11
|
||||
#define NR_CTX_REGS 10
|
||||
#define NR_CALLEE_SAVED_REGS 12
|
||||
|
||||
/*
|
||||
* struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
|
||||
|
@ -16,11 +17,34 @@ struct cpu_suspend_ctx {
|
|||
u64 sp;
|
||||
} __aligned(16);
|
||||
|
||||
struct sleep_save_sp {
|
||||
phys_addr_t *save_ptr_stash;
|
||||
phys_addr_t save_ptr_stash_phys;
|
||||
/*
|
||||
* Memory to save the cpu state is allocated on the stack by
|
||||
* __cpu_suspend_enter()'s caller, and populated by __cpu_suspend_enter().
|
||||
* This data must survive until cpu_resume() is called.
|
||||
*
|
||||
* This struct desribes the size and the layout of the saved cpu state.
|
||||
* The layout of the callee_saved_regs is defined by the implementation
|
||||
* of __cpu_suspend_enter(), and cpu_resume(). This struct must be passed
|
||||
* in by the caller as __cpu_suspend_enter()'s stack-frame is gone once it
|
||||
* returns, and the data would be subsequently corrupted by the call to the
|
||||
* finisher.
|
||||
*/
|
||||
struct sleep_stack_data {
|
||||
struct cpu_suspend_ctx system_regs;
|
||||
unsigned long callee_saved_regs[NR_CALLEE_SAVED_REGS];
|
||||
};
|
||||
|
||||
extern unsigned long *sleep_save_stash;
|
||||
|
||||
extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
|
||||
extern void cpu_resume(void);
|
||||
int __cpu_suspend_enter(struct sleep_stack_data *state);
|
||||
void __cpu_suspend_exit(void);
|
||||
void _cpu_resume(void);
|
||||
|
||||
int swsusp_arch_suspend(void);
|
||||
int swsusp_arch_resume(void);
|
||||
int arch_hibernation_header_save(void *addr, unsigned int max_size);
|
||||
int arch_hibernation_header_restore(void *addr);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
#ifndef __ASM_SYSREG_H
|
||||
#define __ASM_SYSREG_H
|
||||
|
||||
#include <linux/stringify.h>
|
||||
|
||||
#include <asm/opcodes.h>
|
||||
|
||||
/*
|
||||
|
@ -84,10 +86,21 @@
|
|||
#define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\
|
||||
(!!x)<<8 | 0x1f)
|
||||
|
||||
/* SCTLR_EL1 */
|
||||
#define SCTLR_EL1_CP15BEN (0x1 << 5)
|
||||
#define SCTLR_EL1_SED (0x1 << 8)
|
||||
#define SCTLR_EL1_SPAN (0x1 << 23)
|
||||
/* Common SCTLR_ELx flags. */
|
||||
#define SCTLR_ELx_EE (1 << 25)
|
||||
#define SCTLR_ELx_I (1 << 12)
|
||||
#define SCTLR_ELx_SA (1 << 3)
|
||||
#define SCTLR_ELx_C (1 << 2)
|
||||
#define SCTLR_ELx_A (1 << 1)
|
||||
#define SCTLR_ELx_M 1
|
||||
|
||||
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
|
||||
SCTLR_ELx_SA | SCTLR_ELx_I)
|
||||
|
||||
/* SCTLR_EL1 specific flags. */
|
||||
#define SCTLR_EL1_SPAN (1 << 23)
|
||||
#define SCTLR_EL1_SED (1 << 8)
|
||||
#define SCTLR_EL1_CP15BEN (1 << 5)
|
||||
|
||||
|
||||
/* id_aa64isar0 */
|
||||
|
@ -215,6 +228,8 @@
|
|||
|
||||
#else
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
asm(
|
||||
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
|
||||
" .equ .L__reg_num_x\\num, \\num\n"
|
||||
|
@ -239,6 +254,23 @@ static inline void config_sctlr_el1(u32 clear, u32 set)
|
|||
val |= set;
|
||||
asm volatile("msr sctlr_el1, %0" : : "r" (val));
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlike read_cpuid, calls to read_sysreg are never expected to be
|
||||
* optimized away or replaced with synthetic values.
|
||||
*/
|
||||
#define read_sysreg(r) ({ \
|
||||
u64 __val; \
|
||||
asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
#define write_sysreg(v, r) do { \
|
||||
u64 __val = (u64)v; \
|
||||
asm volatile("msr " __stringify(r) ", %0" \
|
||||
: : "r" (__val)); \
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_SYSREG_H */
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
/*
|
||||
* __boot_cpu_mode records what mode CPUs were booted in.
|
||||
* A correctly-implemented bootloader must start all CPUs in the same mode:
|
||||
|
@ -50,6 +52,14 @@ static inline bool is_hyp_mode_mismatched(void)
|
|||
return __boot_cpu_mode[0] != __boot_cpu_mode[1];
|
||||
}
|
||||
|
||||
static inline bool is_kernel_in_hyp_mode(void)
|
||||
{
|
||||
u64 el;
|
||||
|
||||
asm("mrs %0, CurrentEL" : "=r" (el));
|
||||
return el == CurrentEL_EL2;
|
||||
}
|
||||
|
||||
/* The section containing the hypervisor text */
|
||||
extern char __hyp_text_start[];
|
||||
extern char __hyp_text_end[];
|
||||
|
|
|
@ -14,10 +14,10 @@ CFLAGS_REMOVE_return_address.o = -pg
|
|||
arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
|
||||
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
|
||||
sys.o stacktrace.o time.o traps.o io.o vdso.o \
|
||||
hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \
|
||||
hyp-stub.o psci.o cpu_ops.o insn.o \
|
||||
return_address.o cpuinfo.o cpu_errata.o \
|
||||
cpufeature.o alternative.o cacheinfo.o \
|
||||
smp.o smp_spin_table.o topology.o
|
||||
smp.o smp_spin_table.o topology.o smccc-call.o
|
||||
|
||||
extra-$(CONFIG_EFI) := efi-entry.o
|
||||
|
||||
|
@ -41,8 +41,10 @@ arm64-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o
|
|||
arm64-obj-$(CONFIG_PCI) += pci.o
|
||||
arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
|
||||
arm64-obj-$(CONFIG_ACPI) += acpi.o
|
||||
arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
|
||||
arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
|
||||
arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
|
||||
arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
|
||||
arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o
|
||||
|
||||
obj-y += $(arm64-obj-y) vdso/ probes/
|
||||
obj-m += $(arm64-obj-m)
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
|
||||
#include <asm/checksum.h>
|
||||
|
||||
|
@ -70,3 +71,7 @@ EXPORT_SYMBOL(test_and_change_bit);
|
|||
EXPORT_SYMBOL(_mcount);
|
||||
NOKPROBE_SYMBOL(_mcount);
|
||||
#endif
|
||||
|
||||
/* arm-smccc */
|
||||
EXPORT_SYMBOL(arm_smccc_smc);
|
||||
EXPORT_SYMBOL(arm_smccc_hvc);
|
||||
|
|
|
@ -22,12 +22,14 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/suspend.h>
|
||||
#include <asm/vdso_datapage.h>
|
||||
#include <linux/kbuild.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
|
@ -120,58 +122,25 @@ int main(void)
|
|||
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
|
||||
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
|
||||
DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
|
||||
DEFINE(CPU_SP_EL1, offsetof(struct kvm_regs, sp_el1));
|
||||
DEFINE(CPU_ELR_EL1, offsetof(struct kvm_regs, elr_el1));
|
||||
DEFINE(CPU_SPSR, offsetof(struct kvm_regs, spsr));
|
||||
DEFINE(CPU_SYSREGS, offsetof(struct kvm_cpu_context, sys_regs));
|
||||
DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
|
||||
DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2));
|
||||
DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
|
||||
DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
|
||||
DEFINE(VCPU_DEBUG_FLAGS, offsetof(struct kvm_vcpu, arch.debug_flags));
|
||||
DEFINE(VCPU_DEBUG_PTR, offsetof(struct kvm_vcpu, arch.debug_ptr));
|
||||
DEFINE(DEBUG_BCR, offsetof(struct kvm_guest_debug_arch, dbg_bcr));
|
||||
DEFINE(DEBUG_BVR, offsetof(struct kvm_guest_debug_arch, dbg_bvr));
|
||||
DEFINE(DEBUG_WCR, offsetof(struct kvm_guest_debug_arch, dbg_wcr));
|
||||
DEFINE(DEBUG_WVR, offsetof(struct kvm_guest_debug_arch, dbg_wvr));
|
||||
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
|
||||
DEFINE(VCPU_MDCR_EL2, offsetof(struct kvm_vcpu, arch.mdcr_el2));
|
||||
DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
|
||||
DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
|
||||
DEFINE(VCPU_HOST_DEBUG_STATE, offsetof(struct kvm_vcpu, arch.host_debug_state));
|
||||
DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
|
||||
DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
|
||||
DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
|
||||
DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
|
||||
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
|
||||
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
|
||||
DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
|
||||
DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
|
||||
DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
|
||||
DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
|
||||
DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
|
||||
DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
|
||||
DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
|
||||
DEFINE(VGIC_V3_CPU_SRE, offsetof(struct vgic_cpu, vgic_v3.vgic_sre));
|
||||
DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
|
||||
DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
|
||||
DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
|
||||
DEFINE(VGIC_V3_CPU_EISR, offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
|
||||
DEFINE(VGIC_V3_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
|
||||
DEFINE(VGIC_V3_CPU_AP0R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
|
||||
DEFINE(VGIC_V3_CPU_AP1R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
|
||||
DEFINE(VGIC_V3_CPU_LR, offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
|
||||
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
|
||||
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
|
||||
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_PM
|
||||
DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
|
||||
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
|
||||
DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask));
|
||||
DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff));
|
||||
DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp));
|
||||
DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
|
||||
DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
|
||||
DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS, offsetof(struct sleep_stack_data, system_regs));
|
||||
DEFINE(SLEEP_STACK_DATA_CALLEE_REGS, offsetof(struct sleep_stack_data, callee_saved_regs));
|
||||
#endif
|
||||
DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0));
|
||||
DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
|
||||
BLANK();
|
||||
DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
|
||||
DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
|
||||
DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next));
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <asm/cpu_ops.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/virt.h>
|
||||
|
||||
unsigned long elf_hwcap __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(elf_hwcap);
|
||||
|
@ -646,6 +647,11 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry)
|
|||
return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
|
||||
}
|
||||
|
||||
static bool runs_at_el2(const struct arm64_cpu_capabilities *entry)
|
||||
{
|
||||
return is_kernel_in_hyp_mode();
|
||||
}
|
||||
|
||||
static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
{
|
||||
.desc = "GIC system register CPU interface",
|
||||
|
@ -698,6 +704,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||
.matches = cpufeature_pan_not_uao,
|
||||
},
|
||||
#endif /* CONFIG_ARM64_PAN */
|
||||
{
|
||||
.desc = "Virtualization Host Extensions",
|
||||
.capability = ARM64_HAS_VIRT_HOST_EXTN,
|
||||
.matches = runs_at_el2,
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
|
@ -50,9 +50,6 @@
|
|||
#error TEXT_OFFSET must be less than 2MB
|
||||
#endif
|
||||
|
||||
#define KERNEL_START _text
|
||||
#define KERNEL_END _end
|
||||
|
||||
/*
|
||||
* Kernel startup entry point.
|
||||
* ---------------------------
|
||||
|
@ -666,7 +663,7 @@ ENDPROC(__secondary_switched)
|
|||
* If it isn't, park the CPU
|
||||
*/
|
||||
.section ".idmap.text", "ax"
|
||||
__enable_mmu:
|
||||
ENTRY(__enable_mmu)
|
||||
mrs x18, sctlr_el1 // preserve old SCTLR_EL1 value
|
||||
mrs x1, ID_AA64MMFR0_EL1
|
||||
ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
|
||||
|
|
176
arch/arm64/kernel/hibernate-asm.S
Normal file
176
arch/arm64/kernel/hibernate-asm.S
Normal file
|
@ -0,0 +1,176 @@
|
|||
/*
|
||||
* Hibernate low-level support
|
||||
*
|
||||
* Copyright (C) 2016 ARM Ltd.
|
||||
* Author: James Morse <james.morse@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/virt.h>
|
||||
|
||||
/*
|
||||
* To prevent the possibility of old and new partial table walks being visible
|
||||
* in the tlb, switch the ttbr to a zero page when we invalidate the old
|
||||
* records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
|
||||
* Even switching to our copied tables will cause a changed output address at
|
||||
* each stage of the walk.
|
||||
*/
|
||||
.macro break_before_make_ttbr_switch zero_page, page_table
|
||||
msr ttbr1_el1, \zero_page
|
||||
isb
|
||||
tlbi vmalle1is
|
||||
dsb ish
|
||||
msr ttbr1_el1, \page_table
|
||||
isb
|
||||
.endm
|
||||
|
||||
|
||||
/*
|
||||
* Resume from hibernate
|
||||
*
|
||||
* Loads temporary page tables then restores the memory image.
|
||||
* Finally branches to cpu_resume() to restore the state saved by
|
||||
* swsusp_arch_suspend().
|
||||
*
|
||||
* Because this code has to be copied to a 'safe' page, it can't call out to
|
||||
* other functions by PC-relative address. Also remember that it may be
|
||||
* mid-way through over-writing other functions. For this reason it contains
|
||||
* code from flush_icache_range() and uses the copy_page() macro.
|
||||
*
|
||||
* This 'safe' page is mapped via ttbr0, and executed from there. This function
|
||||
* switches to a copy of the linear map in ttbr1, performs the restore, then
|
||||
* switches ttbr1 to the original kernel's swapper_pg_dir.
|
||||
*
|
||||
* All of memory gets written to, including code. We need to clean the kernel
|
||||
* text to the Point of Coherence (PoC) before secondary cores can be booted.
|
||||
* Because the kernel modules and executable pages mapped to user space are
|
||||
* also written as data, we clean all pages we touch to the Point of
|
||||
* Unification (PoU).
|
||||
*
|
||||
* x0: physical address of temporary page tables
|
||||
* x1: physical address of swapper page tables
|
||||
* x2: address of cpu_resume
|
||||
* x3: linear map address of restore_pblist in the current kernel
|
||||
* x4: physical address of __hyp_stub_vectors, or 0
|
||||
* x5: physical address of a zero page that remains zero after resume
|
||||
*/
|
||||
.pushsection ".hibernate_exit.text", "ax"
|
||||
ENTRY(swsusp_arch_suspend_exit)
|
||||
/*
|
||||
* We execute from ttbr0, change ttbr1 to our copied linear map tables
|
||||
* with a break-before-make via the zero page
|
||||
*/
|
||||
break_before_make_ttbr_switch x5, x0
|
||||
|
||||
mov x21, x1
|
||||
mov x30, x2
|
||||
mov x24, x4
|
||||
mov x25, x5
|
||||
|
||||
/* walk the restore_pblist and use copy_page() to over-write memory */
|
||||
mov x19, x3
|
||||
|
||||
1: ldr x10, [x19, #HIBERN_PBE_ORIG]
|
||||
mov x0, x10
|
||||
ldr x1, [x19, #HIBERN_PBE_ADDR]
|
||||
|
||||
copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
|
||||
|
||||
add x1, x10, #PAGE_SIZE
|
||||
/* Clean the copied page to PoU - based on flush_icache_range() */
|
||||
dcache_line_size x2, x3
|
||||
sub x3, x2, #1
|
||||
bic x4, x10, x3
|
||||
2: dc cvau, x4 /* clean D line / unified line */
|
||||
add x4, x4, x2
|
||||
cmp x4, x1
|
||||
b.lo 2b
|
||||
|
||||
ldr x19, [x19, #HIBERN_PBE_NEXT]
|
||||
cbnz x19, 1b
|
||||
dsb ish /* wait for PoU cleaning to finish */
|
||||
|
||||
/* switch to the restored kernels page tables */
|
||||
break_before_make_ttbr_switch x25, x21
|
||||
|
||||
ic ialluis
|
||||
dsb ish
|
||||
isb
|
||||
|
||||
cbz x24, 3f /* Do we need to re-initialise EL2? */
|
||||
hvc #0
|
||||
3: ret
|
||||
|
||||
.ltorg
|
||||
ENDPROC(swsusp_arch_suspend_exit)
|
||||
|
||||
/*
|
||||
* Restore the hyp stub.
|
||||
* This must be done before the hibernate page is unmapped by _cpu_resume(),
|
||||
* but happens before any of the hyp-stub's code is cleaned to PoC.
|
||||
*
|
||||
* x24: The physical address of __hyp_stub_vectors
|
||||
*/
|
||||
el1_sync:
|
||||
msr vbar_el2, x24
|
||||
eret
|
||||
ENDPROC(el1_sync)
|
||||
|
||||
.macro invalid_vector label
|
||||
\label:
|
||||
b \label
|
||||
ENDPROC(\label)
|
||||
.endm
|
||||
|
||||
invalid_vector el2_sync_invalid
|
||||
invalid_vector el2_irq_invalid
|
||||
invalid_vector el2_fiq_invalid
|
||||
invalid_vector el2_error_invalid
|
||||
invalid_vector el1_sync_invalid
|
||||
invalid_vector el1_irq_invalid
|
||||
invalid_vector el1_fiq_invalid
|
||||
invalid_vector el1_error_invalid
|
||||
|
||||
/* el2 vectors - switch el2 here while we restore the memory image. */
|
||||
.align 11
|
||||
ENTRY(hibernate_el2_vectors)
|
||||
ventry el2_sync_invalid // Synchronous EL2t
|
||||
ventry el2_irq_invalid // IRQ EL2t
|
||||
ventry el2_fiq_invalid // FIQ EL2t
|
||||
ventry el2_error_invalid // Error EL2t
|
||||
|
||||
ventry el2_sync_invalid // Synchronous EL2h
|
||||
ventry el2_irq_invalid // IRQ EL2h
|
||||
ventry el2_fiq_invalid // FIQ EL2h
|
||||
ventry el2_error_invalid // Error EL2h
|
||||
|
||||
ventry el1_sync // Synchronous 64-bit EL1
|
||||
ventry el1_irq_invalid // IRQ 64-bit EL1
|
||||
ventry el1_fiq_invalid // FIQ 64-bit EL1
|
||||
ventry el1_error_invalid // Error 64-bit EL1
|
||||
|
||||
ventry el1_sync_invalid // Synchronous 32-bit EL1
|
||||
ventry el1_irq_invalid // IRQ 32-bit EL1
|
||||
ventry el1_fiq_invalid // FIQ 32-bit EL1
|
||||
ventry el1_error_invalid // Error 32-bit EL1
|
||||
END(hibernate_el2_vectors)
|
||||
|
||||
.popsection
|
487
arch/arm64/kernel/hibernate.c
Normal file
487
arch/arm64/kernel/hibernate.c
Normal file
|
@ -0,0 +1,487 @@
|
|||
/*:
|
||||
* Hibernate support specific for ARM64
|
||||
*
|
||||
* Derived from work on ARM hibernation support by:
|
||||
*
|
||||
* Ubuntu project, hibernation support for mach-dove
|
||||
* Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
|
||||
* Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
|
||||
* https://lkml.org/lkml/2010/6/18/4
|
||||
* https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
|
||||
* https://patchwork.kernel.org/patch/96442/
|
||||
*
|
||||
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
|
||||
*
|
||||
* License terms: GNU General Public License (GPL) version 2
|
||||
*/
|
||||
#define pr_fmt(x) "hibernate: " x
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/version.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/suspend.h>
|
||||
#include <asm/virt.h>
|
||||
|
||||
/*
|
||||
* Hibernate core relies on this value being 0 on resume, and marks it
|
||||
* __nosavedata assuming it will keep the resume kernel's '0' value. This
|
||||
* doesn't happen with either KASLR.
|
||||
*
|
||||
* defined as "__visible int in_suspend __nosavedata" in
|
||||
* kernel/power/hibernate.c
|
||||
*/
|
||||
extern int in_suspend;
|
||||
|
||||
/* Find a symbols alias in the linear map */
|
||||
#define LMADDR(x) phys_to_virt(virt_to_phys(x))
|
||||
|
||||
/* Do we need to reset el2? */
|
||||
#define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
|
||||
|
||||
/*
|
||||
* Start/end of the hibernate exit code, this must be copied to a 'safe'
|
||||
* location in memory, and executed from there.
|
||||
*/
|
||||
extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
|
||||
|
||||
/* temporary el2 vectors in the __hibernate_exit_text section. */
|
||||
extern char hibernate_el2_vectors[];
|
||||
|
||||
/* hyp-stub vectors, used to restore el2 during resume from hibernate. */
|
||||
extern char __hyp_stub_vectors[];
|
||||
|
||||
/*
|
||||
* Values that may not change over hibernate/resume. We put the build number
|
||||
* and date in here so that we guarantee not to resume with a different
|
||||
* kernel.
|
||||
*/
|
||||
struct arch_hibernate_hdr_invariants {
|
||||
char uts_version[__NEW_UTS_LEN + 1];
|
||||
};
|
||||
|
||||
/* These values need to be know across a hibernate/restore. */
|
||||
static struct arch_hibernate_hdr {
|
||||
struct arch_hibernate_hdr_invariants invariants;
|
||||
|
||||
/* These are needed to find the relocated kernel if built with kaslr */
|
||||
phys_addr_t ttbr1_el1;
|
||||
void (*reenter_kernel)(void);
|
||||
|
||||
/*
|
||||
* We need to know where the __hyp_stub_vectors are after restore to
|
||||
* re-configure el2.
|
||||
*/
|
||||
phys_addr_t __hyp_stub_vectors;
|
||||
} resume_hdr;
|
||||
|
||||
static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
|
||||
{
|
||||
memset(i, 0, sizeof(*i));
|
||||
memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
|
||||
}
|
||||
|
||||
int pfn_is_nosave(unsigned long pfn)
|
||||
{
|
||||
unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
|
||||
unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
|
||||
|
||||
return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
|
||||
}
|
||||
|
||||
void notrace save_processor_state(void)
|
||||
{
|
||||
WARN_ON(num_online_cpus() != 1);
|
||||
}
|
||||
|
||||
void notrace restore_processor_state(void)
|
||||
{
|
||||
}
|
||||
|
||||
int arch_hibernation_header_save(void *addr, unsigned int max_size)
|
||||
{
|
||||
struct arch_hibernate_hdr *hdr = addr;
|
||||
|
||||
if (max_size < sizeof(*hdr))
|
||||
return -EOVERFLOW;
|
||||
|
||||
arch_hdr_invariants(&hdr->invariants);
|
||||
hdr->ttbr1_el1 = virt_to_phys(swapper_pg_dir);
|
||||
hdr->reenter_kernel = _cpu_resume;
|
||||
|
||||
/* We can't use __hyp_get_vectors() because kvm may still be loaded */
|
||||
if (el2_reset_needed())
|
||||
hdr->__hyp_stub_vectors = virt_to_phys(__hyp_stub_vectors);
|
||||
else
|
||||
hdr->__hyp_stub_vectors = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(arch_hibernation_header_save);
|
||||
|
||||
int arch_hibernation_header_restore(void *addr)
|
||||
{
|
||||
struct arch_hibernate_hdr_invariants invariants;
|
||||
struct arch_hibernate_hdr *hdr = addr;
|
||||
|
||||
arch_hdr_invariants(&invariants);
|
||||
if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
|
||||
pr_crit("Hibernate image not generated by this kernel!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
resume_hdr = *hdr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(arch_hibernation_header_restore);
|
||||
|
||||
/*
|
||||
* Copies length bytes, starting at src_start into an new page,
|
||||
* perform cache maintentance, then maps it at the specified address low
|
||||
* address as executable.
|
||||
*
|
||||
* This is used by hibernate to copy the code it needs to execute when
|
||||
* overwriting the kernel text. This function generates a new set of page
|
||||
* tables, which it loads into ttbr0.
|
||||
*
|
||||
* Length is provided as we probably only want 4K of data, even on a 64K
|
||||
* page system.
|
||||
*/
|
||||
static int create_safe_exec_page(void *src_start, size_t length,
|
||||
unsigned long dst_addr,
|
||||
phys_addr_t *phys_dst_addr,
|
||||
void *(*allocator)(gfp_t mask),
|
||||
gfp_t mask)
|
||||
{
|
||||
int rc = 0;
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
unsigned long dst = (unsigned long)allocator(mask);
|
||||
|
||||
if (!dst) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy((void *)dst, src_start, length);
|
||||
flush_icache_range(dst, dst + length);
|
||||
|
||||
pgd = pgd_offset_raw(allocator(mask), dst_addr);
|
||||
if (pgd_none(*pgd)) {
|
||||
pud = allocator(mask);
|
||||
if (!pud) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
pgd_populate(&init_mm, pgd, pud);
|
||||
}
|
||||
|
||||
pud = pud_offset(pgd, dst_addr);
|
||||
if (pud_none(*pud)) {
|
||||
pmd = allocator(mask);
|
||||
if (!pmd) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
pud_populate(&init_mm, pud, pmd);
|
||||
}
|
||||
|
||||
pmd = pmd_offset(pud, dst_addr);
|
||||
if (pmd_none(*pmd)) {
|
||||
pte = allocator(mask);
|
||||
if (!pte) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
pmd_populate_kernel(&init_mm, pmd, pte);
|
||||
}
|
||||
|
||||
pte = pte_offset_kernel(pmd, dst_addr);
|
||||
set_pte(pte, __pte(virt_to_phys((void *)dst) |
|
||||
pgprot_val(PAGE_KERNEL_EXEC)));
|
||||
|
||||
/* Load our new page tables */
|
||||
asm volatile("msr ttbr0_el1, %0;"
|
||||
"isb;"
|
||||
"tlbi vmalle1is;"
|
||||
"dsb ish;"
|
||||
"isb" : : "r"(virt_to_phys(pgd)));
|
||||
|
||||
*phys_dst_addr = virt_to_phys((void *)dst);
|
||||
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
int swsusp_arch_suspend(void)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
struct sleep_stack_data state;
|
||||
|
||||
local_dbg_save(flags);
|
||||
|
||||
if (__cpu_suspend_enter(&state)) {
|
||||
ret = swsusp_save();
|
||||
} else {
|
||||
/* Clean kernel to PoC for secondary core startup */
|
||||
__flush_dcache_area(LMADDR(KERNEL_START), KERNEL_END - KERNEL_START);
|
||||
|
||||
/*
|
||||
* Tell the hibernation core that we've just restored
|
||||
* the memory
|
||||
*/
|
||||
in_suspend = 0;
|
||||
|
||||
__cpu_suspend_exit();
|
||||
}
|
||||
|
||||
local_dbg_restore(flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
pte_t *src_pte;
|
||||
pte_t *dst_pte;
|
||||
unsigned long addr = start;
|
||||
|
||||
dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!dst_pte)
|
||||
return -ENOMEM;
|
||||
pmd_populate_kernel(&init_mm, dst_pmd, dst_pte);
|
||||
dst_pte = pte_offset_kernel(dst_pmd, start);
|
||||
|
||||
src_pte = pte_offset_kernel(src_pmd, start);
|
||||
do {
|
||||
if (!pte_none(*src_pte))
|
||||
/*
|
||||
* Resume will overwrite areas that may be marked
|
||||
* read only (code, rodata). Clear the RDONLY bit from
|
||||
* the temporary mappings we use during restore.
|
||||
*/
|
||||
set_pte(dst_pte, __pte(pte_val(*src_pte) & ~PTE_RDONLY));
|
||||
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
pmd_t *src_pmd;
|
||||
pmd_t *dst_pmd;
|
||||
unsigned long next;
|
||||
unsigned long addr = start;
|
||||
|
||||
if (pud_none(*dst_pud)) {
|
||||
dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!dst_pmd)
|
||||
return -ENOMEM;
|
||||
pud_populate(&init_mm, dst_pud, dst_pmd);
|
||||
}
|
||||
dst_pmd = pmd_offset(dst_pud, start);
|
||||
|
||||
src_pmd = pmd_offset(src_pud, start);
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_none(*src_pmd))
|
||||
continue;
|
||||
if (pmd_table(*src_pmd)) {
|
||||
if (copy_pte(dst_pmd, src_pmd, addr, next))
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
set_pmd(dst_pmd,
|
||||
__pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY));
|
||||
}
|
||||
} while (dst_pmd++, src_pmd++, addr = next, addr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
pud_t *dst_pud;
|
||||
pud_t *src_pud;
|
||||
unsigned long next;
|
||||
unsigned long addr = start;
|
||||
|
||||
if (pgd_none(*dst_pgd)) {
|
||||
dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!dst_pud)
|
||||
return -ENOMEM;
|
||||
pgd_populate(&init_mm, dst_pgd, dst_pud);
|
||||
}
|
||||
dst_pud = pud_offset(dst_pgd, start);
|
||||
|
||||
src_pud = pud_offset(src_pgd, start);
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_none(*src_pud))
|
||||
continue;
|
||||
if (pud_table(*(src_pud))) {
|
||||
if (copy_pmd(dst_pud, src_pud, addr, next))
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
set_pud(dst_pud,
|
||||
__pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY));
|
||||
}
|
||||
} while (dst_pud++, src_pud++, addr = next, addr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_page_tables(pgd_t *dst_pgd, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
unsigned long next;
|
||||
unsigned long addr = start;
|
||||
pgd_t *src_pgd = pgd_offset_k(start);
|
||||
|
||||
dst_pgd = pgd_offset_raw(dst_pgd, start);
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(*src_pgd))
|
||||
continue;
|
||||
if (copy_pud(dst_pgd, src_pgd, addr, next))
|
||||
return -ENOMEM;
|
||||
} while (dst_pgd++, src_pgd++, addr = next, addr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
|
||||
*
|
||||
* Memory allocated by get_safe_page() will be dealt with by the hibernate code,
|
||||
* we don't need to free it here.
|
||||
*/
|
||||
int swsusp_arch_resume(void)
|
||||
{
|
||||
int rc = 0;
|
||||
void *zero_page;
|
||||
size_t exit_size;
|
||||
pgd_t *tmp_pg_dir;
|
||||
void *lm_restore_pblist;
|
||||
phys_addr_t phys_hibernate_exit;
|
||||
void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
|
||||
void *, phys_addr_t, phys_addr_t);
|
||||
|
||||
/*
|
||||
* Locate the exit code in the bottom-but-one page, so that *NULL
|
||||
* still has disastrous affects.
|
||||
*/
|
||||
hibernate_exit = (void *)PAGE_SIZE;
|
||||
exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
|
||||
/*
|
||||
* Copy swsusp_arch_suspend_exit() to a safe page. This will generate
|
||||
* a new set of ttbr0 page tables and load them.
|
||||
*/
|
||||
rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
|
||||
(unsigned long)hibernate_exit,
|
||||
&phys_hibernate_exit,
|
||||
(void *)get_safe_page, GFP_ATOMIC);
|
||||
if (rc) {
|
||||
pr_err("Failed to create safe executable page for hibernate_exit code.");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* The hibernate exit text contains a set of el2 vectors, that will
|
||||
* be executed at el2 with the mmu off in order to reload hyp-stub.
|
||||
*/
|
||||
__flush_dcache_area(hibernate_exit, exit_size);
|
||||
|
||||
/*
|
||||
* Restoring the memory image will overwrite the ttbr1 page tables.
|
||||
* Create a second copy of just the linear map, and use this when
|
||||
* restoring.
|
||||
*/
|
||||
tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!tmp_pg_dir) {
|
||||
pr_err("Failed to allocate memory for temporary page tables.");
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Since we only copied the linear map, we need to find restore_pblist's
|
||||
* linear map address.
|
||||
*/
|
||||
lm_restore_pblist = LMADDR(restore_pblist);
|
||||
|
||||
/*
|
||||
* KASLR will cause the el2 vectors to be in a different location in
|
||||
* the resumed kernel. Load hibernate's temporary copy into el2.
|
||||
*
|
||||
* We can skip this step if we booted at EL1, or are running with VHE.
|
||||
*/
|
||||
if (el2_reset_needed()) {
|
||||
phys_addr_t el2_vectors = phys_hibernate_exit; /* base */
|
||||
el2_vectors += hibernate_el2_vectors -
|
||||
__hibernate_exit_text_start; /* offset */
|
||||
|
||||
__hyp_set_vectors(el2_vectors);
|
||||
}
|
||||
|
||||
/*
|
||||
* We need a zero page that is zero before & after resume in order to
|
||||
* to break before make on the ttbr1 page tables.
|
||||
*/
|
||||
zero_page = (void *)get_safe_page(GFP_ATOMIC);
|
||||
|
||||
hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
|
||||
resume_hdr.reenter_kernel, lm_restore_pblist,
|
||||
resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
|
||||
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int check_boot_cpu_online_pm_callback(struct notifier_block *nb,
|
||||
unsigned long action, void *ptr)
|
||||
{
|
||||
if (action == PM_HIBERNATION_PREPARE &&
|
||||
cpumask_first(cpu_online_mask) != 0) {
|
||||
pr_warn("CPU0 is offline.\n");
|
||||
return notifier_from_errno(-ENODEV);
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int __init check_boot_cpu_online_init(void)
|
||||
{
|
||||
/*
|
||||
* Set this pm_notifier callback with a lower priority than
|
||||
* cpu_hotplug_pm_callback, so that cpu_hotplug_pm_callback will be
|
||||
* called earlier to disable cpu hotplug before the cpu online check.
|
||||
*/
|
||||
pm_notifier(check_boot_cpu_online_pm_callback, -INT_MAX);
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(check_boot_cpu_online_init);
|
|
@ -20,7 +20,6 @@
|
|||
#include <linux/smp.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/psci.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <uapi/linux/psci.h>
|
||||
|
||||
|
@ -28,73 +27,6 @@
|
|||
#include <asm/cpu_ops.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/suspend.h>
|
||||
|
||||
static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
|
||||
|
||||
static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
|
||||
{
|
||||
int i, ret, count = 0;
|
||||
u32 *psci_states;
|
||||
struct device_node *state_node, *cpu_node;
|
||||
|
||||
cpu_node = of_get_cpu_node(cpu, NULL);
|
||||
if (!cpu_node)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* If the PSCI cpu_suspend function hook has not been initialized
|
||||
* idle states must not be enabled, so bail out
|
||||
*/
|
||||
if (!psci_ops.cpu_suspend)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Count idle states */
|
||||
while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
|
||||
count))) {
|
||||
count++;
|
||||
of_node_put(state_node);
|
||||
}
|
||||
|
||||
if (!count)
|
||||
return -ENODEV;
|
||||
|
||||
psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
|
||||
if (!psci_states)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
u32 state;
|
||||
|
||||
state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
|
||||
|
||||
ret = of_property_read_u32(state_node,
|
||||
"arm,psci-suspend-param",
|
||||
&state);
|
||||
if (ret) {
|
||||
pr_warn(" * %s missing arm,psci-suspend-param property\n",
|
||||
state_node->full_name);
|
||||
of_node_put(state_node);
|
||||
goto free_mem;
|
||||
}
|
||||
|
||||
of_node_put(state_node);
|
||||
pr_debug("psci-power-state %#x index %d\n", state, i);
|
||||
if (!psci_power_state_is_valid(state)) {
|
||||
pr_warn("Invalid PSCI power state %#x\n", state);
|
||||
ret = -EINVAL;
|
||||
goto free_mem;
|
||||
}
|
||||
psci_states[i] = state;
|
||||
}
|
||||
/* Idle states parsed correctly, initialize per-cpu pointer */
|
||||
per_cpu(psci_power_state, cpu) = psci_states;
|
||||
return 0;
|
||||
|
||||
free_mem:
|
||||
kfree(psci_states);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init cpu_psci_cpu_init(unsigned int cpu)
|
||||
{
|
||||
|
@ -178,38 +110,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
|
|||
}
|
||||
#endif
|
||||
|
||||
static int psci_suspend_finisher(unsigned long index)
|
||||
{
|
||||
u32 *state = __this_cpu_read(psci_power_state);
|
||||
|
||||
return psci_ops.cpu_suspend(state[index - 1],
|
||||
virt_to_phys(cpu_resume));
|
||||
}
|
||||
|
||||
static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
|
||||
{
|
||||
int ret;
|
||||
u32 *state = __this_cpu_read(psci_power_state);
|
||||
/*
|
||||
* idle state index 0 corresponds to wfi, should never be called
|
||||
* from the cpu_suspend operations
|
||||
*/
|
||||
if (WARN_ON_ONCE(!index))
|
||||
return -EINVAL;
|
||||
|
||||
if (!psci_power_state_loses_context(state[index - 1]))
|
||||
ret = psci_ops.cpu_suspend(state[index - 1], 0);
|
||||
else
|
||||
ret = cpu_suspend(index, psci_suspend_finisher);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
const struct cpu_operations cpu_psci_ops = {
|
||||
.name = "psci",
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
.cpu_init_idle = cpu_psci_cpu_init_idle,
|
||||
.cpu_suspend = cpu_psci_cpu_suspend,
|
||||
.cpu_init_idle = psci_cpu_init_idle,
|
||||
.cpu_suspend = psci_cpu_suspend_enter,
|
||||
#endif
|
||||
.cpu_init = cpu_psci_cpu_init,
|
||||
.cpu_prepare = cpu_psci_cpu_prepare,
|
||||
|
|
|
@ -175,7 +175,6 @@ static void __init smp_build_mpidr_hash(void)
|
|||
*/
|
||||
if (mpidr_hash_size() > 4 * num_possible_cpus())
|
||||
pr_warn("Large number of MPIDR hash buckets detected\n");
|
||||
__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
|
||||
}
|
||||
|
||||
static void __init setup_machine_fdt(phys_addr_t dt_phys)
|
||||
|
|
|
@ -49,39 +49,32 @@
|
|||
orr \dst, \dst, \mask // dst|=(aff3>>rs3)
|
||||
.endm
|
||||
/*
|
||||
* Save CPU state for a suspend and execute the suspend finisher.
|
||||
* On success it will return 0 through cpu_resume - ie through a CPU
|
||||
* soft/hard reboot from the reset vector.
|
||||
* On failure it returns the suspend finisher return value or force
|
||||
* -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
|
||||
* is not allowed to return, if it does this must be considered failure).
|
||||
* It saves callee registers, and allocates space on the kernel stack
|
||||
* to save the CPU specific registers + some other data for resume.
|
||||
* Save CPU state in the provided sleep_stack_data area, and publish its
|
||||
* location for cpu_resume()'s use in sleep_save_stash.
|
||||
*
|
||||
* x0 = suspend finisher argument
|
||||
* x1 = suspend finisher function pointer
|
||||
* cpu_resume() will restore this saved state, and return. Because the
|
||||
* link-register is saved and restored, it will appear to return from this
|
||||
* function. So that the caller can tell the suspend/resume paths apart,
|
||||
* __cpu_suspend_enter() will always return a non-zero value, whereas the
|
||||
* path through cpu_resume() will return 0.
|
||||
*
|
||||
* x0 = struct sleep_stack_data area
|
||||
*/
|
||||
ENTRY(__cpu_suspend_enter)
|
||||
stp x29, lr, [sp, #-96]!
|
||||
stp x19, x20, [sp,#16]
|
||||
stp x21, x22, [sp,#32]
|
||||
stp x23, x24, [sp,#48]
|
||||
stp x25, x26, [sp,#64]
|
||||
stp x27, x28, [sp,#80]
|
||||
/*
|
||||
* Stash suspend finisher and its argument in x20 and x19
|
||||
*/
|
||||
mov x19, x0
|
||||
mov x20, x1
|
||||
stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS]
|
||||
stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16]
|
||||
stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32]
|
||||
stp x23, x24, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+48]
|
||||
stp x25, x26, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+64]
|
||||
stp x27, x28, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+80]
|
||||
|
||||
/* save the sp in cpu_suspend_ctx */
|
||||
mov x2, sp
|
||||
sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
|
||||
mov x0, sp
|
||||
/*
|
||||
* x0 now points to struct cpu_suspend_ctx allocated on the stack
|
||||
*/
|
||||
str x2, [x0, #CPU_CTX_SP]
|
||||
ldr x1, =sleep_save_sp
|
||||
ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
|
||||
str x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP]
|
||||
|
||||
/* find the mpidr_hash */
|
||||
ldr x1, =sleep_save_stash
|
||||
ldr x1, [x1]
|
||||
mrs x7, mpidr_el1
|
||||
ldr x9, =mpidr_hash
|
||||
ldr x10, [x9, #MPIDR_HASH_MASK]
|
||||
|
@ -93,70 +86,28 @@ ENTRY(__cpu_suspend_enter)
|
|||
ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
|
||||
compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
|
||||
add x1, x1, x8, lsl #3
|
||||
bl __cpu_suspend_save
|
||||
/*
|
||||
* Grab suspend finisher in x20 and its argument in x19
|
||||
*/
|
||||
mov x0, x19
|
||||
mov x1, x20
|
||||
/*
|
||||
* We are ready for power down, fire off the suspend finisher
|
||||
* in x1, with argument in x0
|
||||
*/
|
||||
blr x1
|
||||
/*
|
||||
* Never gets here, unless suspend finisher fails.
|
||||
* Successful cpu_suspend should return from cpu_resume, returning
|
||||
* through this code path is considered an error
|
||||
* If the return value is set to 0 force x0 = -EOPNOTSUPP
|
||||
* to make sure a proper error condition is propagated
|
||||
*/
|
||||
cmp x0, #0
|
||||
mov x3, #-EOPNOTSUPP
|
||||
csel x0, x3, x0, eq
|
||||
add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer
|
||||
ldp x19, x20, [sp, #16]
|
||||
ldp x21, x22, [sp, #32]
|
||||
ldp x23, x24, [sp, #48]
|
||||
ldp x25, x26, [sp, #64]
|
||||
ldp x27, x28, [sp, #80]
|
||||
ldp x29, lr, [sp], #96
|
||||
|
||||
str x0, [x1]
|
||||
add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
|
||||
stp x29, lr, [sp, #-16]!
|
||||
bl cpu_do_suspend
|
||||
ldp x29, lr, [sp], #16
|
||||
mov x0, #1
|
||||
ret
|
||||
ENDPROC(__cpu_suspend_enter)
|
||||
.ltorg
|
||||
|
||||
/*
|
||||
* x0 must contain the sctlr value retrieved from restored context
|
||||
*/
|
||||
.pushsection ".idmap.text", "ax"
|
||||
ENTRY(cpu_resume_mmu)
|
||||
ldr x3, =cpu_resume_after_mmu
|
||||
msr sctlr_el1, x0 // restore sctlr_el1
|
||||
isb
|
||||
/*
|
||||
* Invalidate the local I-cache so that any instructions fetched
|
||||
* speculatively from the PoC are discarded, since they may have
|
||||
* been dynamically patched at the PoU.
|
||||
*/
|
||||
ic iallu
|
||||
dsb nsh
|
||||
isb
|
||||
br x3 // global jump to virtual address
|
||||
ENDPROC(cpu_resume_mmu)
|
||||
.popsection
|
||||
cpu_resume_after_mmu:
|
||||
mov x0, #0 // return zero on success
|
||||
ldp x19, x20, [sp, #16]
|
||||
ldp x21, x22, [sp, #32]
|
||||
ldp x23, x24, [sp, #48]
|
||||
ldp x25, x26, [sp, #64]
|
||||
ldp x27, x28, [sp, #80]
|
||||
ldp x29, lr, [sp], #96
|
||||
ret
|
||||
ENDPROC(cpu_resume_after_mmu)
|
||||
|
||||
ENTRY(cpu_resume)
|
||||
bl el2_setup // if in EL2 drop to EL1 cleanly
|
||||
/* enable the MMU early - so we can access sleep_save_stash by va */
|
||||
adr_l lr, __enable_mmu /* __cpu_setup will return here */
|
||||
ldr x27, =_cpu_resume /* __enable_mmu will branch here */
|
||||
adrp x25, idmap_pg_dir
|
||||
adrp x26, swapper_pg_dir
|
||||
b __cpu_setup
|
||||
ENDPROC(cpu_resume)
|
||||
|
||||
ENTRY(_cpu_resume)
|
||||
mrs x1, mpidr_el1
|
||||
adrp x8, mpidr_hash
|
||||
add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
|
||||
|
@ -166,20 +117,27 @@ ENTRY(cpu_resume)
|
|||
ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
|
||||
compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
|
||||
/* x7 contains hash index, let's use it to grab context pointer */
|
||||
ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
|
||||
ldr_l x0, sleep_save_stash
|
||||
ldr x0, [x0, x7, lsl #3]
|
||||
add x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS
|
||||
add x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
|
||||
/* load sp from context */
|
||||
ldr x2, [x0, #CPU_CTX_SP]
|
||||
/* load physical address of identity map page table in x1 */
|
||||
adrp x1, idmap_pg_dir
|
||||
mov sp, x2
|
||||
/* save thread_info */
|
||||
and x2, x2, #~(THREAD_SIZE - 1)
|
||||
msr sp_el0, x2
|
||||
/*
|
||||
* cpu_do_resume expects x0 to contain context physical address
|
||||
* pointer and x1 to contain physical address of 1:1 page tables
|
||||
* cpu_do_resume expects x0 to contain context address pointer
|
||||
*/
|
||||
bl cpu_do_resume // PC relative jump, MMU off
|
||||
b cpu_resume_mmu // Resume MMU, never returns
|
||||
ENDPROC(cpu_resume)
|
||||
bl cpu_do_resume
|
||||
|
||||
ldp x19, x20, [x29, #16]
|
||||
ldp x21, x22, [x29, #32]
|
||||
ldp x23, x24, [x29, #48]
|
||||
ldp x25, x26, [x29, #64]
|
||||
ldp x27, x28, [x29, #80]
|
||||
ldp x29, lr, [x29]
|
||||
mov x0, #0
|
||||
ret
|
||||
ENDPROC(_cpu_resume)
|
||||
|
|
43
arch/arm64/kernel/smccc-call.S
Normal file
43
arch/arm64/kernel/smccc-call.S
Normal file
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Copyright (c) 2015, Linaro Limited
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License Version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
.macro SMCCC instr
|
||||
.cfi_startproc
|
||||
\instr #0
|
||||
ldr x4, [sp]
|
||||
stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
|
||||
stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
|
||||
ret
|
||||
.cfi_endproc
|
||||
.endm
|
||||
|
||||
/*
|
||||
* void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
|
||||
* unsigned long a3, unsigned long a4, unsigned long a5,
|
||||
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
|
||||
*/
|
||||
ENTRY(arm_smccc_smc)
|
||||
SMCCC smc
|
||||
ENDPROC(arm_smccc_smc)
|
||||
|
||||
/*
|
||||
* void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
|
||||
* unsigned long a3, unsigned long a4, unsigned long a5,
|
||||
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
|
||||
*/
|
||||
ENTRY(arm_smccc_hvc)
|
||||
SMCCC hvc
|
||||
ENDPROC(arm_smccc_hvc)
|
|
@ -10,30 +10,11 @@
|
|||
#include <asm/suspend.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
|
||||
/*
|
||||
* This is called by __cpu_suspend_enter() to save the state, and do whatever
|
||||
* flushing is required to ensure that when the CPU goes to sleep we have
|
||||
* the necessary data available when the caches are not searched.
|
||||
*
|
||||
* ptr: CPU context virtual address
|
||||
* save_ptr: address of the location where the context physical address
|
||||
* must be saved
|
||||
* This is allocated by cpu_suspend_init(), and used to store a pointer to
|
||||
* the 'struct sleep_stack_data' the contains a particular CPUs state.
|
||||
*/
|
||||
void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
|
||||
phys_addr_t *save_ptr)
|
||||
{
|
||||
*save_ptr = virt_to_phys(ptr);
|
||||
|
||||
cpu_do_suspend(ptr);
|
||||
/*
|
||||
* Only flush the context that must be retrieved with the MMU
|
||||
* off. VA primitives ensure the flush is applied to all
|
||||
* cache levels so context is pushed to DRAM.
|
||||
*/
|
||||
__flush_dcache_area(ptr, sizeof(*ptr));
|
||||
__flush_dcache_area(save_ptr, sizeof(*save_ptr));
|
||||
}
|
||||
unsigned long *sleep_save_stash;
|
||||
|
||||
/*
|
||||
* This hook is provided so that cpu_suspend code can restore HW
|
||||
|
@ -51,40 +32,8 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
|
|||
hw_breakpoint_restore = hw_bp_restore;
|
||||
}
|
||||
|
||||
/*
|
||||
* cpu_suspend
|
||||
*
|
||||
* arg: argument to pass to the finisher function
|
||||
* fn: finisher function pointer
|
||||
*
|
||||
*/
|
||||
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||
void notrace __cpu_suspend_exit(void)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* From this point debug exceptions are disabled to prevent
|
||||
* updates to mdscr register (saved and restored along with
|
||||
* general purpose registers) from kernel debuggers.
|
||||
*/
|
||||
local_dbg_save(flags);
|
||||
|
||||
/*
|
||||
* Function graph tracer state gets incosistent when the kernel
|
||||
* calls functions that never return (aka suspend finishers) hence
|
||||
* disable graph tracing during their execution.
|
||||
*/
|
||||
pause_graph_tracing();
|
||||
|
||||
/*
|
||||
* mm context saved on the stack, it will be restored when
|
||||
* the cpu comes out of reset through the identity mapped
|
||||
* page tables, so that the thread address space is properly
|
||||
* set-up on function return.
|
||||
*/
|
||||
ret = __cpu_suspend_enter(arg, fn);
|
||||
if (ret == 0) {
|
||||
/*
|
||||
* We are resuming from reset with the idmap active in TTBR0_EL1.
|
||||
* We must uninstall the idmap and restore the expected MMU
|
||||
|
@ -107,6 +56,50 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
|||
hw_breakpoint_restore(NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* cpu_suspend
|
||||
*
|
||||
* arg: argument to pass to the finisher function
|
||||
* fn: finisher function pointer
|
||||
*
|
||||
*/
|
||||
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
struct sleep_stack_data state;
|
||||
|
||||
/*
|
||||
* From this point debug exceptions are disabled to prevent
|
||||
* updates to mdscr register (saved and restored along with
|
||||
* general purpose registers) from kernel debuggers.
|
||||
*/
|
||||
local_dbg_save(flags);
|
||||
|
||||
/*
|
||||
* Function graph tracer state gets incosistent when the kernel
|
||||
* calls functions that never return (aka suspend finishers) hence
|
||||
* disable graph tracing during their execution.
|
||||
*/
|
||||
pause_graph_tracing();
|
||||
|
||||
if (__cpu_suspend_enter(&state)) {
|
||||
/* Call the suspend finisher */
|
||||
ret = fn(arg);
|
||||
|
||||
/*
|
||||
* Never gets here, unless the suspend finisher fails.
|
||||
* Successful cpu_suspend() should return from cpu_resume(),
|
||||
* returning through this code path is considered an error
|
||||
* If the return value is set to 0 force ret = -EOPNOTSUPP
|
||||
* to make sure a proper error condition is propagated
|
||||
*/
|
||||
if (!ret)
|
||||
ret = -EOPNOTSUPP;
|
||||
} else {
|
||||
__cpu_suspend_exit();
|
||||
}
|
||||
|
||||
unpause_graph_tracing();
|
||||
|
||||
/*
|
||||
|
@ -119,22 +112,15 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct sleep_save_sp sleep_save_sp;
|
||||
|
||||
static int __init cpu_suspend_init(void)
|
||||
{
|
||||
void *ctx_ptr;
|
||||
|
||||
/* ctx_ptr is an array of physical addresses */
|
||||
ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL);
|
||||
sleep_save_stash = kcalloc(mpidr_hash_size(), sizeof(*sleep_save_stash),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (WARN_ON(!ctx_ptr))
|
||||
if (WARN_ON(!sleep_save_stash))
|
||||
return -ENOMEM;
|
||||
|
||||
sleep_save_sp.save_ptr_stash = ctx_ptr;
|
||||
sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
|
||||
__flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(cpu_suspend_init);
|
||||
|
|
|
@ -46,6 +46,16 @@ jiffies = jiffies_64;
|
|||
*(.idmap.text) \
|
||||
VMLINUX_SYMBOL(__idmap_text_end) = .;
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
#define HIBERNATE_TEXT \
|
||||
. = ALIGN(SZ_4K); \
|
||||
VMLINUX_SYMBOL(__hibernate_exit_text_start) = .;\
|
||||
*(.hibernate_exit.text) \
|
||||
VMLINUX_SYMBOL(__hibernate_exit_text_end) = .;
|
||||
#else
|
||||
#define HIBERNATE_TEXT
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The size of the PE/COFF section that covers the kernel image, which
|
||||
* runs from stext to _edata, must be a round multiple of the PE/COFF
|
||||
|
@ -115,6 +125,7 @@ SECTIONS
|
|||
KPROBES_TEXT
|
||||
HYPERVISOR_TEXT
|
||||
IDMAP_TEXT
|
||||
HIBERNATE_TEXT
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
. = ALIGN(16);
|
||||
|
@ -197,6 +208,10 @@ ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
|
|||
"HYP init code too big or misaligned")
|
||||
ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
|
||||
"ID map text too big or misaligned")
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
|
||||
<= SZ_4K, "Hibernate exit text too big or misaligned")
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If padding is applied before .head.text, virt<->phys conversions will fail.
|
||||
|
|
|
@ -10,6 +10,7 @@ KVM=../../../virt/kvm
|
|||
ARM=../../../arch/arm/kvm
|
||||
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += hyp/
|
||||
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
|
||||
|
@ -22,8 +23,6 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generi
|
|||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
#include <asm/cputype.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/esr.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
.text
|
||||
.pushsection .hyp.idmap.text, "ax"
|
||||
|
@ -96,6 +97,14 @@ __do_hyp_init:
|
|||
|
||||
ldr x4, =VTCR_EL2_FLAGS
|
||||
bfi x4, x5, #16, #3
|
||||
/*
|
||||
* Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in
|
||||
* VTCR_EL2.
|
||||
*/
|
||||
mrs x5, ID_AA64MMFR1_EL1
|
||||
ubfx x5, x5, #5, #1
|
||||
lsl x5, x5, #VTCR_EL2_VS
|
||||
orr x4, x4, x5
|
||||
|
||||
msr vtcr_el2, x4
|
||||
|
||||
|
@ -108,8 +117,8 @@ __do_hyp_init:
|
|||
dsb sy
|
||||
|
||||
mrs x4, sctlr_el2
|
||||
and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2
|
||||
ldr x5, =SCTLR_EL2_FLAGS
|
||||
and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2
|
||||
ldr x5, =SCTLR_ELx_FLAGS
|
||||
orr x4, x4, x5
|
||||
msr sctlr_el2, x4
|
||||
isb
|
||||
|
|
1082
arch/arm64/kvm/hyp.S
1082
arch/arm64/kvm/hyp.S
File diff suppressed because it is too large
Load diff
14
arch/arm64/kvm/hyp/Makefile
Normal file
14
arch/arm64/kvm/hyp/Makefile
Normal file
|
@ -0,0 +1,14 @@
|
|||
#
|
||||
# Makefile for Kernel-based Virtual Machine module, HYP part
|
||||
#
|
||||
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += entry.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += switch.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
|
141
arch/arm64/kvm/hyp/debug-sr.c
Normal file
141
arch/arm64/kvm/hyp/debug-sr.c
Normal file
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
* Copyright (C) 2015 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
#include "hyp.h"
|
||||
|
||||
#define read_debug(r,n) read_sysreg(r##n##_el1)
|
||||
#define write_debug(v,r,n) write_sysreg(v, r##n##_el1)
|
||||
|
||||
#define save_debug(ptr,reg,nr) \
|
||||
switch (nr) { \
|
||||
case 15: ptr[15] = read_debug(reg, 15); \
|
||||
case 14: ptr[14] = read_debug(reg, 14); \
|
||||
case 13: ptr[13] = read_debug(reg, 13); \
|
||||
case 12: ptr[12] = read_debug(reg, 12); \
|
||||
case 11: ptr[11] = read_debug(reg, 11); \
|
||||
case 10: ptr[10] = read_debug(reg, 10); \
|
||||
case 9: ptr[9] = read_debug(reg, 9); \
|
||||
case 8: ptr[8] = read_debug(reg, 8); \
|
||||
case 7: ptr[7] = read_debug(reg, 7); \
|
||||
case 6: ptr[6] = read_debug(reg, 6); \
|
||||
case 5: ptr[5] = read_debug(reg, 5); \
|
||||
case 4: ptr[4] = read_debug(reg, 4); \
|
||||
case 3: ptr[3] = read_debug(reg, 3); \
|
||||
case 2: ptr[2] = read_debug(reg, 2); \
|
||||
case 1: ptr[1] = read_debug(reg, 1); \
|
||||
default: ptr[0] = read_debug(reg, 0); \
|
||||
}
|
||||
|
||||
#define restore_debug(ptr,reg,nr) \
|
||||
switch (nr) { \
|
||||
case 15: write_debug(ptr[15], reg, 15); \
|
||||
case 14: write_debug(ptr[14], reg, 14); \
|
||||
case 13: write_debug(ptr[13], reg, 13); \
|
||||
case 12: write_debug(ptr[12], reg, 12); \
|
||||
case 11: write_debug(ptr[11], reg, 11); \
|
||||
case 10: write_debug(ptr[10], reg, 10); \
|
||||
case 9: write_debug(ptr[9], reg, 9); \
|
||||
case 8: write_debug(ptr[8], reg, 8); \
|
||||
case 7: write_debug(ptr[7], reg, 7); \
|
||||
case 6: write_debug(ptr[6], reg, 6); \
|
||||
case 5: write_debug(ptr[5], reg, 5); \
|
||||
case 4: write_debug(ptr[4], reg, 4); \
|
||||
case 3: write_debug(ptr[3], reg, 3); \
|
||||
case 2: write_debug(ptr[2], reg, 2); \
|
||||
case 1: write_debug(ptr[1], reg, 1); \
|
||||
default: write_debug(ptr[0], reg, 0); \
|
||||
}
|
||||
|
||||
void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
|
||||
struct kvm_guest_debug_arch *dbg,
|
||||
struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
u64 aa64dfr0;
|
||||
int brps, wrps;
|
||||
|
||||
if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
|
||||
return;
|
||||
|
||||
aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
|
||||
brps = (aa64dfr0 >> 12) & 0xf;
|
||||
wrps = (aa64dfr0 >> 20) & 0xf;
|
||||
|
||||
save_debug(dbg->dbg_bcr, dbgbcr, brps);
|
||||
save_debug(dbg->dbg_bvr, dbgbvr, brps);
|
||||
save_debug(dbg->dbg_wcr, dbgwcr, wrps);
|
||||
save_debug(dbg->dbg_wvr, dbgwvr, wrps);
|
||||
|
||||
ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1);
|
||||
}
|
||||
|
||||
void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
|
||||
struct kvm_guest_debug_arch *dbg,
|
||||
struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
u64 aa64dfr0;
|
||||
int brps, wrps;
|
||||
|
||||
if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
|
||||
return;
|
||||
|
||||
aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
|
||||
|
||||
brps = (aa64dfr0 >> 12) & 0xf;
|
||||
wrps = (aa64dfr0 >> 20) & 0xf;
|
||||
|
||||
restore_debug(dbg->dbg_bcr, dbgbcr, brps);
|
||||
restore_debug(dbg->dbg_bvr, dbgbvr, brps);
|
||||
restore_debug(dbg->dbg_wcr, dbgwcr, wrps);
|
||||
restore_debug(dbg->dbg_wvr, dbgwvr, wrps);
|
||||
|
||||
write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
|
||||
}
|
||||
|
||||
void __hyp_text __debug_cond_save_host_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY is set, perform
|
||||
* a full save/restore cycle. */
|
||||
if ((vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_KDE) ||
|
||||
(vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_MDE))
|
||||
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
|
||||
|
||||
__debug_save_state(vcpu, &vcpu->arch.host_debug_state,
|
||||
kern_hyp_va(vcpu->arch.host_cpu_context));
|
||||
}
|
||||
|
||||
void __hyp_text __debug_cond_restore_host_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__debug_restore_state(vcpu, &vcpu->arch.host_debug_state,
|
||||
kern_hyp_va(vcpu->arch.host_cpu_context));
|
||||
|
||||
if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
|
||||
vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY;
|
||||
}
|
||||
|
||||
static u32 __hyp_text __debug_read_mdcr_el2(void)
|
||||
{
|
||||
return read_sysreg(mdcr_el2);
|
||||
}
|
||||
|
||||
__alias(__debug_read_mdcr_el2) u32 __kvm_get_mdcr_el2(void);
|
160
arch/arm64/kvm/hyp/entry.S
Normal file
160
arch/arm64/kvm/hyp/entry.S
Normal file
|
@ -0,0 +1,160 @@
|
|||
/*
|
||||
* Copyright (C) 2015 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/fpsimdmacros.h>
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
|
||||
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
|
||||
|
||||
.text
|
||||
.pushsection .hyp.text, "ax"
|
||||
|
||||
.macro save_callee_saved_regs ctxt
|
||||
stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
|
||||
stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
|
||||
stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
|
||||
stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
|
||||
stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
|
||||
stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
|
||||
.endm
|
||||
|
||||
.macro restore_callee_saved_regs ctxt
|
||||
ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
|
||||
ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
|
||||
ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
|
||||
ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
|
||||
ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
|
||||
ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
|
||||
.endm
|
||||
|
||||
/*
|
||||
* u64 __guest_enter(struct kvm_vcpu *vcpu,
|
||||
* struct kvm_cpu_context *host_ctxt);
|
||||
*/
|
||||
ENTRY(__guest_enter)
|
||||
// x0: vcpu
|
||||
// x1: host/guest context
|
||||
// x2-x18: clobbered by macros
|
||||
|
||||
// Store the host regs
|
||||
save_callee_saved_regs x1
|
||||
|
||||
// Preserve vcpu & host_ctxt for use at exit time
|
||||
stp x0, x1, [sp, #-16]!
|
||||
|
||||
add x1, x0, #VCPU_CONTEXT
|
||||
|
||||
// Prepare x0-x1 for later restore by pushing them onto the stack
|
||||
ldp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
|
||||
stp x2, x3, [sp, #-16]!
|
||||
|
||||
// x2-x18
|
||||
ldp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
|
||||
ldp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
|
||||
ldp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
|
||||
ldp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
|
||||
ldp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
|
||||
ldp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
|
||||
ldp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
|
||||
ldp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
|
||||
ldr x18, [x1, #CPU_XREG_OFFSET(18)]
|
||||
|
||||
// x19-x29, lr
|
||||
restore_callee_saved_regs x1
|
||||
|
||||
// Last bits of the 64bit state
|
||||
ldp x0, x1, [sp], #16
|
||||
|
||||
// Do not touch any register after this!
|
||||
eret
|
||||
ENDPROC(__guest_enter)
|
||||
|
||||
ENTRY(__guest_exit)
|
||||
// x0: vcpu
|
||||
// x1: return code
|
||||
// x2-x3: free
|
||||
// x4-x29,lr: vcpu regs
|
||||
// vcpu x0-x3 on the stack
|
||||
|
||||
add x2, x0, #VCPU_CONTEXT
|
||||
|
||||
stp x4, x5, [x2, #CPU_XREG_OFFSET(4)]
|
||||
stp x6, x7, [x2, #CPU_XREG_OFFSET(6)]
|
||||
stp x8, x9, [x2, #CPU_XREG_OFFSET(8)]
|
||||
stp x10, x11, [x2, #CPU_XREG_OFFSET(10)]
|
||||
stp x12, x13, [x2, #CPU_XREG_OFFSET(12)]
|
||||
stp x14, x15, [x2, #CPU_XREG_OFFSET(14)]
|
||||
stp x16, x17, [x2, #CPU_XREG_OFFSET(16)]
|
||||
str x18, [x2, #CPU_XREG_OFFSET(18)]
|
||||
|
||||
ldp x6, x7, [sp], #16 // x2, x3
|
||||
ldp x4, x5, [sp], #16 // x0, x1
|
||||
|
||||
stp x4, x5, [x2, #CPU_XREG_OFFSET(0)]
|
||||
stp x6, x7, [x2, #CPU_XREG_OFFSET(2)]
|
||||
|
||||
save_callee_saved_regs x2
|
||||
|
||||
// Restore vcpu & host_ctxt from the stack
|
||||
// (preserving return code in x1)
|
||||
ldp x0, x2, [sp], #16
|
||||
// Now restore the host regs
|
||||
restore_callee_saved_regs x2
|
||||
|
||||
mov x0, x1
|
||||
ret
|
||||
ENDPROC(__guest_exit)
|
||||
|
||||
ENTRY(__fpsimd_guest_restore)
|
||||
stp x4, lr, [sp, #-16]!
|
||||
|
||||
mrs x2, cptr_el2
|
||||
bic x2, x2, #CPTR_EL2_TFP
|
||||
msr cptr_el2, x2
|
||||
isb
|
||||
|
||||
mrs x3, tpidr_el2
|
||||
|
||||
ldr x0, [x3, #VCPU_HOST_CONTEXT]
|
||||
kern_hyp_va x0
|
||||
add x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
|
||||
bl __fpsimd_save_state
|
||||
|
||||
add x2, x3, #VCPU_CONTEXT
|
||||
add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
|
||||
bl __fpsimd_restore_state
|
||||
|
||||
// Skip restoring fpexc32 for AArch64 guests
|
||||
mrs x1, hcr_el2
|
||||
tbnz x1, #HCR_RW_SHIFT, 1f
|
||||
ldr x4, [x3, #VCPU_FPEXC32_EL2]
|
||||
msr fpexc32_el2, x4
|
||||
1:
|
||||
ldp x4, lr, [sp], #16
|
||||
ldp x2, x3, [sp], #16
|
||||
ldp x0, x1, [sp], #16
|
||||
|
||||
eret
|
||||
ENDPROC(__fpsimd_guest_restore)
|
|
@ -1,4 +1,7 @@
|
|||
/*
|
||||
* Copyright (C) 2015 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
|
@ -8,21 +11,23 @@
|
|||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* Copyright (C) 2015 ARM Limited
|
||||
*
|
||||
* Author: Will Deacon <will.deacon@arm.com>
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
|
||||
ENTRY(__invoke_psci_fn_hvc)
|
||||
hvc #0
|
||||
ret
|
||||
ENDPROC(__invoke_psci_fn_hvc)
|
||||
#include <asm/fpsimdmacros.h>
|
||||
|
||||
/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
|
||||
ENTRY(__invoke_psci_fn_smc)
|
||||
smc #0
|
||||
.text
|
||||
.pushsection .hyp.text, "ax"
|
||||
|
||||
ENTRY(__fpsimd_save_state)
|
||||
fpsimd_save x0, 1
|
||||
ret
|
||||
ENDPROC(__invoke_psci_fn_smc)
|
||||
ENDPROC(__fpsimd_save_state)
|
||||
|
||||
ENTRY(__fpsimd_restore_state)
|
||||
fpsimd_restore x0, 1
|
||||
ret
|
||||
ENDPROC(__fpsimd_restore_state)
|
234
arch/arm64/kvm/hyp/hyp-entry.S
Normal file
234
arch/arm64/kvm/hyp/hyp-entry.S
Normal file
|
@ -0,0 +1,234 @@
|
|||
/*
|
||||
* Copyright (C) 2015 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
.text
|
||||
.pushsection .hyp.text, "ax"
|
||||
|
||||
.macro save_x0_to_x3
|
||||
stp x0, x1, [sp, #-16]!
|
||||
stp x2, x3, [sp, #-16]!
|
||||
.endm
|
||||
|
||||
.macro restore_x0_to_x3
|
||||
ldp x2, x3, [sp], #16
|
||||
ldp x0, x1, [sp], #16
|
||||
.endm
|
||||
|
||||
.macro do_el2_call
|
||||
/*
|
||||
* Shuffle the parameters before calling the function
|
||||
* pointed to in x0. Assumes parameters in x[1,2,3].
|
||||
*/
|
||||
sub sp, sp, #16
|
||||
str lr, [sp]
|
||||
mov lr, x0
|
||||
mov x0, x1
|
||||
mov x1, x2
|
||||
mov x2, x3
|
||||
blr lr
|
||||
ldr lr, [sp]
|
||||
add sp, sp, #16
|
||||
.endm
|
||||
|
||||
ENTRY(__vhe_hyp_call)
|
||||
do_el2_call
|
||||
/*
|
||||
* We used to rely on having an exception return to get
|
||||
* an implicit isb. In the E2H case, we don't have it anymore.
|
||||
* rather than changing all the leaf functions, just do it here
|
||||
* before returning to the rest of the kernel.
|
||||
*/
|
||||
isb
|
||||
ret
|
||||
ENDPROC(__vhe_hyp_call)
|
||||
|
||||
el1_sync: // Guest trapped into EL2
|
||||
save_x0_to_x3
|
||||
|
||||
mrs x1, esr_el2
|
||||
lsr x2, x1, #ESR_ELx_EC_SHIFT
|
||||
|
||||
cmp x2, #ESR_ELx_EC_HVC64
|
||||
b.ne el1_trap
|
||||
|
||||
mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
|
||||
cbnz x3, el1_trap // called HVC
|
||||
|
||||
/* Here, we're pretty sure the host called HVC. */
|
||||
restore_x0_to_x3
|
||||
|
||||
/* Check for __hyp_get_vectors */
|
||||
cbnz x0, 1f
|
||||
mrs x0, vbar_el2
|
||||
b 2f
|
||||
|
||||
1:
|
||||
/*
|
||||
* Perform the EL2 call
|
||||
*/
|
||||
kern_hyp_va x0
|
||||
do_el2_call
|
||||
|
||||
2: eret
|
||||
|
||||
el1_trap:
|
||||
/*
|
||||
* x1: ESR
|
||||
* x2: ESR_EC
|
||||
*/
|
||||
|
||||
/* Guest accessed VFP/SIMD registers, save host, restore Guest */
|
||||
cmp x2, #ESR_ELx_EC_FP_ASIMD
|
||||
b.eq __fpsimd_guest_restore
|
||||
|
||||
cmp x2, #ESR_ELx_EC_DABT_LOW
|
||||
mov x0, #ESR_ELx_EC_IABT_LOW
|
||||
ccmp x2, x0, #4, ne
|
||||
b.ne 1f // Not an abort we care about
|
||||
|
||||
/* This is an abort. Check for permission fault */
|
||||
alternative_if_not ARM64_WORKAROUND_834220
|
||||
and x2, x1, #ESR_ELx_FSC_TYPE
|
||||
cmp x2, #FSC_PERM
|
||||
b.ne 1f // Not a permission fault
|
||||
alternative_else
|
||||
nop // Use the permission fault path to
|
||||
nop // check for a valid S1 translation,
|
||||
nop // regardless of the ESR value.
|
||||
alternative_endif
|
||||
|
||||
/*
|
||||
* Check for Stage-1 page table walk, which is guaranteed
|
||||
* to give a valid HPFAR_EL2.
|
||||
*/
|
||||
tbnz x1, #7, 1f // S1PTW is set
|
||||
|
||||
/* Preserve PAR_EL1 */
|
||||
mrs x3, par_el1
|
||||
stp x3, xzr, [sp, #-16]!
|
||||
|
||||
/*
|
||||
* Permission fault, HPFAR_EL2 is invalid.
|
||||
* Resolve the IPA the hard way using the guest VA.
|
||||
* Stage-1 translation already validated the memory access rights.
|
||||
* As such, we can use the EL1 translation regime, and don't have
|
||||
* to distinguish between EL0 and EL1 access.
|
||||
*/
|
||||
mrs x2, far_el2
|
||||
at s1e1r, x2
|
||||
isb
|
||||
|
||||
/* Read result */
|
||||
mrs x3, par_el1
|
||||
ldp x0, xzr, [sp], #16 // Restore PAR_EL1 from the stack
|
||||
msr par_el1, x0
|
||||
tbnz x3, #0, 3f // Bail out if we failed the translation
|
||||
ubfx x3, x3, #12, #36 // Extract IPA
|
||||
lsl x3, x3, #4 // and present it like HPFAR
|
||||
b 2f
|
||||
|
||||
1: mrs x3, hpfar_el2
|
||||
mrs x2, far_el2
|
||||
|
||||
2: mrs x0, tpidr_el2
|
||||
str w1, [x0, #VCPU_ESR_EL2]
|
||||
str x2, [x0, #VCPU_FAR_EL2]
|
||||
str x3, [x0, #VCPU_HPFAR_EL2]
|
||||
|
||||
mov x1, #ARM_EXCEPTION_TRAP
|
||||
b __guest_exit
|
||||
|
||||
/*
|
||||
* Translation failed. Just return to the guest and
|
||||
* let it fault again. Another CPU is probably playing
|
||||
* behind our back.
|
||||
*/
|
||||
3: restore_x0_to_x3
|
||||
|
||||
eret
|
||||
|
||||
el1_irq:
|
||||
save_x0_to_x3
|
||||
mrs x0, tpidr_el2
|
||||
mov x1, #ARM_EXCEPTION_IRQ
|
||||
b __guest_exit
|
||||
|
||||
ENTRY(__hyp_do_panic)
|
||||
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
|
||||
PSR_MODE_EL1h)
|
||||
msr spsr_el2, lr
|
||||
ldr lr, =panic
|
||||
msr elr_el2, lr
|
||||
eret
|
||||
ENDPROC(__hyp_do_panic)
|
||||
|
||||
.macro invalid_vector label, target = __hyp_panic
|
||||
.align 2
|
||||
\label:
|
||||
b \target
|
||||
ENDPROC(\label)
|
||||
.endm
|
||||
|
||||
/* None of these should ever happen */
|
||||
invalid_vector el2t_sync_invalid
|
||||
invalid_vector el2t_irq_invalid
|
||||
invalid_vector el2t_fiq_invalid
|
||||
invalid_vector el2t_error_invalid
|
||||
invalid_vector el2h_sync_invalid
|
||||
invalid_vector el2h_irq_invalid
|
||||
invalid_vector el2h_fiq_invalid
|
||||
invalid_vector el2h_error_invalid
|
||||
invalid_vector el1_sync_invalid
|
||||
invalid_vector el1_irq_invalid
|
||||
invalid_vector el1_fiq_invalid
|
||||
invalid_vector el1_error_invalid
|
||||
|
||||
.ltorg
|
||||
|
||||
.align 11
|
||||
|
||||
ENTRY(__kvm_hyp_vector)
|
||||
ventry el2t_sync_invalid // Synchronous EL2t
|
||||
ventry el2t_irq_invalid // IRQ EL2t
|
||||
ventry el2t_fiq_invalid // FIQ EL2t
|
||||
ventry el2t_error_invalid // Error EL2t
|
||||
|
||||
ventry el2h_sync_invalid // Synchronous EL2h
|
||||
ventry el2h_irq_invalid // IRQ EL2h
|
||||
ventry el2h_fiq_invalid // FIQ EL2h
|
||||
ventry el2h_error_invalid // Error EL2h
|
||||
|
||||
ventry el1_sync // Synchronous 64-bit EL1
|
||||
ventry el1_irq // IRQ 64-bit EL1
|
||||
ventry el1_fiq_invalid // FIQ 64-bit EL1
|
||||
ventry el1_error_invalid // Error 64-bit EL1
|
||||
|
||||
ventry el1_sync // Synchronous 32-bit EL1
|
||||
ventry el1_irq // IRQ 32-bit EL1
|
||||
ventry el1_fiq_invalid // FIQ 32-bit EL1
|
||||
ventry el1_error_invalid // Error 32-bit EL1
|
||||
ENDPROC(__kvm_hyp_vector)
|
90
arch/arm64/kvm/hyp/hyp.h
Normal file
90
arch/arm64/kvm/hyp/hyp.h
Normal file
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
* Copyright (C) 2015 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __ARM64_KVM_HYP_H__
|
||||
#define __ARM64_KVM_HYP_H__
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#define __hyp_text __section(.hyp.text) notrace
|
||||
|
||||
#define kern_hyp_va(v) (typeof(v))((unsigned long)(v) & HYP_PAGE_OFFSET_MASK)
|
||||
#define hyp_kern_va(v) (typeof(v))((unsigned long)(v) - HYP_PAGE_OFFSET \
|
||||
+ PAGE_OFFSET)
|
||||
|
||||
/**
|
||||
* hyp_alternate_select - Generates patchable code sequences that are
|
||||
* used to switch between two implementations of a function, depending
|
||||
* on the availability of a feature.
|
||||
*
|
||||
* @fname: a symbol name that will be defined as a function returning a
|
||||
* function pointer whose type will match @orig and @alt
|
||||
* @orig: A pointer to the default function, as returned by @fname when
|
||||
* @cond doesn't hold
|
||||
* @alt: A pointer to the alternate function, as returned by @fname
|
||||
* when @cond holds
|
||||
* @cond: a CPU feature (as described in asm/cpufeature.h)
|
||||
*/
|
||||
#define hyp_alternate_select(fname, orig, alt, cond) \
|
||||
typeof(orig) * __hyp_text fname(void) \
|
||||
{ \
|
||||
typeof(alt) *val = orig; \
|
||||
asm volatile(ALTERNATIVE("nop \n", \
|
||||
"mov %0, %1 \n", \
|
||||
cond) \
|
||||
: "+r" (val) : "r" (alt)); \
|
||||
return val; \
|
||||
}
|
||||
|
||||
void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
|
||||
void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
|
||||
|
||||
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
|
||||
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
|
||||
|
||||
void __timer_save_state(struct kvm_vcpu *vcpu);
|
||||
void __timer_restore_state(struct kvm_vcpu *vcpu);
|
||||
|
||||
void __sysreg_save_state(struct kvm_cpu_context *ctxt);
|
||||
void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
|
||||
void __sysreg32_save_state(struct kvm_vcpu *vcpu);
|
||||
void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
|
||||
|
||||
void __debug_save_state(struct kvm_vcpu *vcpu,
|
||||
struct kvm_guest_debug_arch *dbg,
|
||||
struct kvm_cpu_context *ctxt);
|
||||
void __debug_restore_state(struct kvm_vcpu *vcpu,
|
||||
struct kvm_guest_debug_arch *dbg,
|
||||
struct kvm_cpu_context *ctxt);
|
||||
void __debug_cond_save_host_state(struct kvm_vcpu *vcpu);
|
||||
void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu);
|
||||
|
||||
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
|
||||
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
|
||||
static inline bool __fpsimd_enabled(void)
|
||||
{
|
||||
return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
|
||||
}
|
||||
|
||||
u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
|
||||
void __noreturn __hyp_do_panic(unsigned long, ...);
|
||||
|
||||
#endif /* __ARM64_KVM_HYP_H__ */
|
||||
|
175
arch/arm64/kvm/hyp/switch.c
Normal file
175
arch/arm64/kvm/hyp/switch.c
Normal file
|
@ -0,0 +1,175 @@
|
|||
/*
|
||||
* Copyright (C) 2015 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "hyp.h"
|
||||
|
||||
static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* We are about to set CPTR_EL2.TFP to trap all floating point
|
||||
* register accesses to EL2, however, the ARM ARM clearly states that
|
||||
* traps are only taken to EL2 if the operation would not otherwise
|
||||
* trap to EL1. Therefore, always make sure that for 32-bit guests,
|
||||
* we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
|
||||
*/
|
||||
val = vcpu->arch.hcr_el2;
|
||||
if (!(val & HCR_RW)) {
|
||||
write_sysreg(1 << 30, fpexc32_el2);
|
||||
isb();
|
||||
}
|
||||
write_sysreg(val, hcr_el2);
|
||||
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
|
||||
write_sysreg(1 << 15, hstr_el2);
|
||||
write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2);
|
||||
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
|
||||
}
|
||||
|
||||
static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
write_sysreg(HCR_RW, hcr_el2);
|
||||
write_sysreg(0, hstr_el2);
|
||||
write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
|
||||
write_sysreg(0, cptr_el2);
|
||||
}
|
||||
|
||||
static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
write_sysreg(kvm->arch.vttbr, vttbr_el2);
|
||||
}
|
||||
|
||||
static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
write_sysreg(0, vttbr_el2);
|
||||
}
|
||||
|
||||
static hyp_alternate_select(__vgic_call_save_state,
|
||||
__vgic_v2_save_state, __vgic_v3_save_state,
|
||||
ARM64_HAS_SYSREG_GIC_CPUIF);
|
||||
|
||||
static hyp_alternate_select(__vgic_call_restore_state,
|
||||
__vgic_v2_restore_state, __vgic_v3_restore_state,
|
||||
ARM64_HAS_SYSREG_GIC_CPUIF);
|
||||
|
||||
static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__vgic_call_save_state()(vcpu);
|
||||
write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
|
||||
}
|
||||
|
||||
static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
val = read_sysreg(hcr_el2);
|
||||
val |= HCR_INT_OVERRIDE;
|
||||
val |= vcpu->arch.irq_lines;
|
||||
write_sysreg(val, hcr_el2);
|
||||
|
||||
__vgic_call_restore_state()(vcpu);
|
||||
}
|
||||
|
||||
static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
struct kvm_cpu_context *guest_ctxt;
|
||||
bool fp_enabled;
|
||||
u64 exit_code;
|
||||
|
||||
vcpu = kern_hyp_va(vcpu);
|
||||
write_sysreg(vcpu, tpidr_el2);
|
||||
|
||||
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
|
||||
guest_ctxt = &vcpu->arch.ctxt;
|
||||
|
||||
__sysreg_save_state(host_ctxt);
|
||||
__debug_cond_save_host_state(vcpu);
|
||||
|
||||
__activate_traps(vcpu);
|
||||
__activate_vm(vcpu);
|
||||
|
||||
__vgic_restore_state(vcpu);
|
||||
__timer_restore_state(vcpu);
|
||||
|
||||
/*
|
||||
* We must restore the 32-bit state before the sysregs, thanks
|
||||
* to Cortex-A57 erratum #852523.
|
||||
*/
|
||||
__sysreg32_restore_state(vcpu);
|
||||
__sysreg_restore_state(guest_ctxt);
|
||||
__debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
|
||||
|
||||
/* Jump in the fire! */
|
||||
exit_code = __guest_enter(vcpu, host_ctxt);
|
||||
/* And we're baaack! */
|
||||
|
||||
fp_enabled = __fpsimd_enabled();
|
||||
|
||||
__sysreg_save_state(guest_ctxt);
|
||||
__sysreg32_save_state(vcpu);
|
||||
__timer_save_state(vcpu);
|
||||
__vgic_save_state(vcpu);
|
||||
|
||||
__deactivate_traps(vcpu);
|
||||
__deactivate_vm(vcpu);
|
||||
|
||||
__sysreg_restore_state(host_ctxt);
|
||||
|
||||
if (fp_enabled) {
|
||||
__fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
|
||||
__fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
|
||||
}
|
||||
|
||||
__debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
|
||||
__debug_cond_restore_host_state(vcpu);
|
||||
|
||||
return exit_code;
|
||||
}
|
||||
|
||||
__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
|
||||
static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
|
||||
|
||||
void __hyp_text __noreturn __hyp_panic(void)
|
||||
{
|
||||
unsigned long str_va = (unsigned long)__hyp_panic_string;
|
||||
u64 spsr = read_sysreg(spsr_el2);
|
||||
u64 elr = read_sysreg(elr_el2);
|
||||
u64 par = read_sysreg(par_el1);
|
||||
|
||||
if (read_sysreg(vttbr_el2)) {
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
|
||||
vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
|
||||
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
|
||||
__deactivate_traps(vcpu);
|
||||
__deactivate_vm(vcpu);
|
||||
__sysreg_restore_state(host_ctxt);
|
||||
}
|
||||
|
||||
/* Call panic for real */
|
||||
__hyp_do_panic(hyp_kern_va(str_va),
|
||||
spsr, elr,
|
||||
read_sysreg(esr_el2), read_sysreg(far_el2),
|
||||
read_sysreg(hpfar_el2), par,
|
||||
(void *)read_sysreg(tpidr_el2));
|
||||
|
||||
unreachable();
|
||||
}
|
138
arch/arm64/kvm/hyp/sysreg-sr.c
Normal file
138
arch/arm64/kvm/hyp/sysreg-sr.c
Normal file
|
@ -0,0 +1,138 @@
|
|||
/*
|
||||
* Copyright (C) 2012-2015 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
#include "hyp.h"
|
||||
|
||||
/* ctxt is already in the HYP VA space */
|
||||
void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2);
|
||||
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
|
||||
ctxt->sys_regs[SCTLR_EL1] = read_sysreg(sctlr_el1);
|
||||
ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
|
||||
ctxt->sys_regs[CPACR_EL1] = read_sysreg(cpacr_el1);
|
||||
ctxt->sys_regs[TTBR0_EL1] = read_sysreg(ttbr0_el1);
|
||||
ctxt->sys_regs[TTBR1_EL1] = read_sysreg(ttbr1_el1);
|
||||
ctxt->sys_regs[TCR_EL1] = read_sysreg(tcr_el1);
|
||||
ctxt->sys_regs[ESR_EL1] = read_sysreg(esr_el1);
|
||||
ctxt->sys_regs[AFSR0_EL1] = read_sysreg(afsr0_el1);
|
||||
ctxt->sys_regs[AFSR1_EL1] = read_sysreg(afsr1_el1);
|
||||
ctxt->sys_regs[FAR_EL1] = read_sysreg(far_el1);
|
||||
ctxt->sys_regs[MAIR_EL1] = read_sysreg(mair_el1);
|
||||
ctxt->sys_regs[VBAR_EL1] = read_sysreg(vbar_el1);
|
||||
ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg(contextidr_el1);
|
||||
ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
|
||||
ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
|
||||
ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
|
||||
ctxt->sys_regs[AMAIR_EL1] = read_sysreg(amair_el1);
|
||||
ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg(cntkctl_el1);
|
||||
ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
|
||||
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
|
||||
|
||||
ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
|
||||
ctxt->gp_regs.regs.pc = read_sysreg(elr_el2);
|
||||
ctxt->gp_regs.regs.pstate = read_sysreg(spsr_el2);
|
||||
ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
|
||||
ctxt->gp_regs.elr_el1 = read_sysreg(elr_el1);
|
||||
ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1);
|
||||
}
|
||||
|
||||
void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
|
||||
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
|
||||
write_sysreg(ctxt->sys_regs[SCTLR_EL1], sctlr_el1);
|
||||
write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
|
||||
write_sysreg(ctxt->sys_regs[CPACR_EL1], cpacr_el1);
|
||||
write_sysreg(ctxt->sys_regs[TTBR0_EL1], ttbr0_el1);
|
||||
write_sysreg(ctxt->sys_regs[TTBR1_EL1], ttbr1_el1);
|
||||
write_sysreg(ctxt->sys_regs[TCR_EL1], tcr_el1);
|
||||
write_sysreg(ctxt->sys_regs[ESR_EL1], esr_el1);
|
||||
write_sysreg(ctxt->sys_regs[AFSR0_EL1], afsr0_el1);
|
||||
write_sysreg(ctxt->sys_regs[AFSR1_EL1], afsr1_el1);
|
||||
write_sysreg(ctxt->sys_regs[FAR_EL1], far_el1);
|
||||
write_sysreg(ctxt->sys_regs[MAIR_EL1], mair_el1);
|
||||
write_sysreg(ctxt->sys_regs[VBAR_EL1], vbar_el1);
|
||||
write_sysreg(ctxt->sys_regs[CONTEXTIDR_EL1], contextidr_el1);
|
||||
write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
|
||||
write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
|
||||
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
|
||||
write_sysreg(ctxt->sys_regs[AMAIR_EL1], amair_el1);
|
||||
write_sysreg(ctxt->sys_regs[CNTKCTL_EL1], cntkctl_el1);
|
||||
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
|
||||
write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
|
||||
|
||||
write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
|
||||
write_sysreg(ctxt->gp_regs.regs.pc, elr_el2);
|
||||
write_sysreg(ctxt->gp_regs.regs.pstate, spsr_el2);
|
||||
write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
|
||||
write_sysreg(ctxt->gp_regs.elr_el1, elr_el1);
|
||||
write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1);
|
||||
}
|
||||
|
||||
void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 *spsr, *sysreg;
|
||||
|
||||
if (read_sysreg(hcr_el2) & HCR_RW)
|
||||
return;
|
||||
|
||||
spsr = vcpu->arch.ctxt.gp_regs.spsr;
|
||||
sysreg = vcpu->arch.ctxt.sys_regs;
|
||||
|
||||
spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
|
||||
spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
|
||||
spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
|
||||
spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
|
||||
|
||||
sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
|
||||
sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
|
||||
|
||||
if (__fpsimd_enabled())
|
||||
sysreg[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
|
||||
|
||||
if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
|
||||
sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
|
||||
}
|
||||
|
||||
void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 *spsr, *sysreg;
|
||||
|
||||
if (read_sysreg(hcr_el2) & HCR_RW)
|
||||
return;
|
||||
|
||||
spsr = vcpu->arch.ctxt.gp_regs.spsr;
|
||||
sysreg = vcpu->arch.ctxt.sys_regs;
|
||||
|
||||
write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
|
||||
write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
|
||||
write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
|
||||
write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
|
||||
|
||||
write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
|
||||
write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
|
||||
|
||||
if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
|
||||
write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
|
||||
}
|
71
arch/arm64/kvm/hyp/timer-sr.c
Normal file
71
arch/arm64/kvm/hyp/timer-sr.c
Normal file
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Copyright (C) 2012-2015 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <clocksource/arm_arch_timer.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
#include "hyp.h"
|
||||
|
||||
/* vcpu is already in the HYP VA space */
|
||||
void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
u64 val;
|
||||
|
||||
if (kvm->arch.timer.enabled) {
|
||||
timer->cntv_ctl = read_sysreg(cntv_ctl_el0);
|
||||
timer->cntv_cval = read_sysreg(cntv_cval_el0);
|
||||
}
|
||||
|
||||
/* Disable the virtual timer */
|
||||
write_sysreg(0, cntv_ctl_el0);
|
||||
|
||||
/* Allow physical timer/counter access for the host */
|
||||
val = read_sysreg(cnthctl_el2);
|
||||
val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
|
||||
write_sysreg(val, cnthctl_el2);
|
||||
|
||||
/* Clear cntvoff for the host */
|
||||
write_sysreg(0, cntvoff_el2);
|
||||
}
|
||||
|
||||
void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* Disallow physical timer access for the guest
|
||||
* Physical counter access is allowed
|
||||
*/
|
||||
val = read_sysreg(cnthctl_el2);
|
||||
val &= ~CNTHCTL_EL1PCEN;
|
||||
val |= CNTHCTL_EL1PCTEN;
|
||||
write_sysreg(val, cnthctl_el2);
|
||||
|
||||
if (kvm->arch.timer.enabled) {
|
||||
write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
|
||||
write_sysreg(timer->cntv_cval, cntv_cval_el0);
|
||||
isb();
|
||||
write_sysreg(timer->cntv_ctl, cntv_ctl_el0);
|
||||
}
|
||||
}
|
80
arch/arm64/kvm/hyp/tlb.c
Normal file
80
arch/arm64/kvm/hyp/tlb.c
Normal file
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
* Copyright (C) 2015 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "hyp.h"
|
||||
|
||||
static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
|
||||
{
|
||||
dsb(ishst);
|
||||
|
||||
/* Switch to requested VMID */
|
||||
kvm = kern_hyp_va(kvm);
|
||||
write_sysreg(kvm->arch.vttbr, vttbr_el2);
|
||||
isb();
|
||||
|
||||
/*
|
||||
* We could do so much better if we had the VA as well.
|
||||
* Instead, we invalidate Stage-2 for this IPA, and the
|
||||
* whole of Stage-1. Weep...
|
||||
*/
|
||||
ipa >>= 12;
|
||||
asm volatile("tlbi ipas2e1is, %0" : : "r" (ipa));
|
||||
|
||||
/*
|
||||
* We have to ensure completion of the invalidation at Stage-2,
|
||||
* since a table walk on another CPU could refill a TLB with a
|
||||
* complete (S1 + S2) walk based on the old Stage-2 mapping if
|
||||
* the Stage-1 invalidation happened first.
|
||||
*/
|
||||
dsb(ish);
|
||||
asm volatile("tlbi vmalle1is" : : );
|
||||
dsb(ish);
|
||||
isb();
|
||||
|
||||
write_sysreg(0, vttbr_el2);
|
||||
}
|
||||
|
||||
__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm,
|
||||
phys_addr_t ipa);
|
||||
|
||||
static void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
|
||||
{
|
||||
dsb(ishst);
|
||||
|
||||
/* Switch to requested VMID */
|
||||
kvm = kern_hyp_va(kvm);
|
||||
write_sysreg(kvm->arch.vttbr, vttbr_el2);
|
||||
isb();
|
||||
|
||||
asm volatile("tlbi vmalls12e1is" : : );
|
||||
dsb(ish);
|
||||
isb();
|
||||
|
||||
write_sysreg(0, vttbr_el2);
|
||||
}
|
||||
|
||||
__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
||||
|
||||
static void __hyp_text __tlb_flush_vm_context(void)
|
||||
{
|
||||
dsb(ishst);
|
||||
asm volatile("tlbi alle1is \n"
|
||||
"ic ialluis ": : );
|
||||
dsb(ish);
|
||||
}
|
||||
|
||||
__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void);
|
84
arch/arm64/kvm/hyp/vgic-v2-sr.c
Normal file
84
arch/arm64/kvm/hyp/vgic-v2-sr.c
Normal file
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Copyright (C) 2012-2015 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/irqchip/arm-gic.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
#include "hyp.h"
|
||||
|
||||
/* vcpu is already in the HYP VA space */
|
||||
void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||
struct vgic_dist *vgic = &kvm->arch.vgic;
|
||||
void __iomem *base = kern_hyp_va(vgic->vctrl_base);
|
||||
u32 eisr0, eisr1, elrsr0, elrsr1;
|
||||
int i, nr_lr;
|
||||
|
||||
if (!base)
|
||||
return;
|
||||
|
||||
nr_lr = vcpu->arch.vgic_cpu.nr_lr;
|
||||
cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
|
||||
cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
|
||||
eisr0 = readl_relaxed(base + GICH_EISR0);
|
||||
elrsr0 = readl_relaxed(base + GICH_ELRSR0);
|
||||
if (unlikely(nr_lr > 32)) {
|
||||
eisr1 = readl_relaxed(base + GICH_EISR1);
|
||||
elrsr1 = readl_relaxed(base + GICH_ELRSR1);
|
||||
} else {
|
||||
eisr1 = elrsr1 = 0;
|
||||
}
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1;
|
||||
cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
|
||||
#else
|
||||
cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0;
|
||||
cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
|
||||
#endif
|
||||
cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
|
||||
|
||||
writel_relaxed(0, base + GICH_HCR);
|
||||
|
||||
for (i = 0; i < nr_lr; i++)
|
||||
cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
|
||||
}
|
||||
|
||||
/* vcpu is already in the HYP VA space */
|
||||
void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||
struct vgic_dist *vgic = &kvm->arch.vgic;
|
||||
void __iomem *base = kern_hyp_va(vgic->vctrl_base);
|
||||
int i, nr_lr;
|
||||
|
||||
if (!base)
|
||||
return;
|
||||
|
||||
writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
|
||||
writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
|
||||
writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
|
||||
|
||||
nr_lr = vcpu->arch.vgic_cpu.nr_lr;
|
||||
for (i = 0; i < nr_lr; i++)
|
||||
writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4));
|
||||
}
|
228
arch/arm64/kvm/hyp/vgic-v3-sr.c
Normal file
228
arch/arm64/kvm/hyp/vgic-v3-sr.c
Normal file
|
@ -0,0 +1,228 @@
|
|||
/*
|
||||
* Copyright (C) 2012-2015 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/irqchip/arm-gic-v3.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
#include "hyp.h"
|
||||
|
||||
#define vtr_to_max_lr_idx(v) ((v) & 0xf)
|
||||
#define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1)
|
||||
|
||||
#define read_gicreg(r) \
|
||||
({ \
|
||||
u64 reg; \
|
||||
asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \
|
||||
reg; \
|
||||
})
|
||||
|
||||
#define write_gicreg(v,r) \
|
||||
do { \
|
||||
u64 __val = (v); \
|
||||
asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\
|
||||
} while (0)
|
||||
|
||||
/* vcpu is already in the HYP VA space */
|
||||
void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
u64 val;
|
||||
u32 max_lr_idx, nr_pri_bits;
|
||||
|
||||
/*
|
||||
* Make sure stores to the GIC via the memory mapped interface
|
||||
* are now visible to the system register interface.
|
||||
*/
|
||||
dsb(st);
|
||||
|
||||
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
|
||||
cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2);
|
||||
cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2);
|
||||
cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
|
||||
|
||||
write_gicreg(0, ICH_HCR_EL2);
|
||||
val = read_gicreg(ICH_VTR_EL2);
|
||||
max_lr_idx = vtr_to_max_lr_idx(val);
|
||||
nr_pri_bits = vtr_to_nr_pri_bits(val);
|
||||
|
||||
switch (max_lr_idx) {
|
||||
case 15:
|
||||
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)] = read_gicreg(ICH_LR15_EL2);
|
||||
case 14:
|
||||
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)] = read_gicreg(ICH_LR14_EL2);
|
||||
case 13:
|
||||
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)] = read_gicreg(ICH_LR13_EL2);
|
||||
case 12:
|
||||
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)] = read_gicreg(ICH_LR12_EL2);
|
||||
case 11:
|
||||
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)] = read_gicreg(ICH_LR11_EL2);
|
||||
case 10:
|
||||
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)] = read_gicreg(ICH_LR10_EL2);
|
||||
case 9:
|
||||
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)] = read_gicreg(ICH_LR9_EL2);
|
||||
case 8:
|
||||
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)] = read_gicreg(ICH_LR8_EL2);
|
||||
case 7:
|
||||
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)] = read_gicreg(ICH_LR7_EL2);
|
||||
case 6:
|
||||
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)] = read_gicreg(ICH_LR6_EL2);
|
||||
case 5:
|
||||
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)] = read_gicreg(ICH_LR5_EL2);
|
||||
case 4:
|
||||
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)] = read_gicreg(ICH_LR4_EL2);
|
||||
case 3:
|
||||
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)] = read_gicreg(ICH_LR3_EL2);
|
||||
case 2:
|
||||
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)] = read_gicreg(ICH_LR2_EL2);
|
||||
case 1:
|
||||
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)] = read_gicreg(ICH_LR1_EL2);
|
||||
case 0:
|
||||
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)] = read_gicreg(ICH_LR0_EL2);
|
||||
}
|
||||
|
||||
switch (nr_pri_bits) {
|
||||
case 7:
|
||||
cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2);
|
||||
cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2);
|
||||
case 6:
|
||||
cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2);
|
||||
default:
|
||||
cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2);
|
||||
}
|
||||
|
||||
switch (nr_pri_bits) {
|
||||
case 7:
|
||||
cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2);
|
||||
cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2);
|
||||
case 6:
|
||||
cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2);
|
||||
default:
|
||||
cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2);
|
||||
}
|
||||
|
||||
val = read_gicreg(ICC_SRE_EL2);
|
||||
write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
|
||||
isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
|
||||
write_gicreg(1, ICC_SRE_EL1);
|
||||
}
|
||||
|
||||
void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
u64 val;
|
||||
u32 max_lr_idx, nr_pri_bits;
|
||||
|
||||
/*
|
||||
* VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
|
||||
* Group0 interrupt (as generated in GICv2 mode) to be
|
||||
* delivered as a FIQ to the guest, with potentially fatal
|
||||
* consequences. So we must make sure that ICC_SRE_EL1 has
|
||||
* been actually programmed with the value we want before
|
||||
* starting to mess with the rest of the GIC.
|
||||
*/
|
||||
write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1);
|
||||
isb();
|
||||
|
||||
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
|
||||
write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
|
||||
|
||||
val = read_gicreg(ICH_VTR_EL2);
|
||||
max_lr_idx = vtr_to_max_lr_idx(val);
|
||||
nr_pri_bits = vtr_to_nr_pri_bits(val);
|
||||
|
||||
switch (nr_pri_bits) {
|
||||
case 7:
|
||||
write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
|
||||
write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
|
||||
case 6:
|
||||
write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
|
||||
default:
|
||||
write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
|
||||
}
|
||||
|
||||
switch (nr_pri_bits) {
|
||||
case 7:
|
||||
write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
|
||||
write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
|
||||
case 6:
|
||||
write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2);
|
||||
default:
|
||||
write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
|
||||
}
|
||||
|
||||
switch (max_lr_idx) {
|
||||
case 15:
|
||||
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2);
|
||||
case 14:
|
||||
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)], ICH_LR14_EL2);
|
||||
case 13:
|
||||
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)], ICH_LR13_EL2);
|
||||
case 12:
|
||||
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)], ICH_LR12_EL2);
|
||||
case 11:
|
||||
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)], ICH_LR11_EL2);
|
||||
case 10:
|
||||
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)], ICH_LR10_EL2);
|
||||
case 9:
|
||||
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)], ICH_LR9_EL2);
|
||||
case 8:
|
||||
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)], ICH_LR8_EL2);
|
||||
case 7:
|
||||
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)], ICH_LR7_EL2);
|
||||
case 6:
|
||||
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)], ICH_LR6_EL2);
|
||||
case 5:
|
||||
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)], ICH_LR5_EL2);
|
||||
case 4:
|
||||
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)], ICH_LR4_EL2);
|
||||
case 3:
|
||||
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)], ICH_LR3_EL2);
|
||||
case 2:
|
||||
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)], ICH_LR2_EL2);
|
||||
case 1:
|
||||
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)], ICH_LR1_EL2);
|
||||
case 0:
|
||||
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)], ICH_LR0_EL2);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensures that the above will have reached the
|
||||
* (re)distributors. This ensure the guest will read the
|
||||
* correct values from the memory-mapped interface.
|
||||
*/
|
||||
isb();
|
||||
dsb(sy);
|
||||
|
||||
/*
|
||||
* Prevent the guest from touching the GIC system registers if
|
||||
* SRE isn't enabled for GICv3 emulation.
|
||||
*/
|
||||
if (!cpu_if->vgic_sre) {
|
||||
write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
|
||||
ICC_SRE_EL2);
|
||||
}
|
||||
}
|
||||
|
||||
static u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void)
|
||||
{
|
||||
return read_gicreg(ICH_VTR_EL2);
|
||||
}
|
||||
|
||||
__alias(__vgic_v3_read_ich_vtr_el2) u64 __vgic_v3_get_ich_vtr_el2(void);
|
|
@ -29,6 +29,7 @@
|
|||
#include <asm/debug-monitors.h>
|
||||
#include <asm/esr.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_host.h>
|
||||
|
@ -219,7 +220,7 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
|
|||
* All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
|
||||
* hyp.S code switches between host and guest values in future.
|
||||
*/
|
||||
static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
|
||||
static void reg_to_dbg(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
u64 *dbg_reg)
|
||||
{
|
||||
|
@ -234,7 +235,7 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
|
|||
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
|
||||
}
|
||||
|
||||
static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
|
||||
static void dbg_to_reg(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
u64 *dbg_reg)
|
||||
{
|
||||
|
@ -243,7 +244,7 @@ static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
|
|||
p->regval &= 0xffffffffUL;
|
||||
}
|
||||
|
||||
static inline bool trap_bvr(struct kvm_vcpu *vcpu,
|
||||
static bool trap_bvr(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
|
@ -279,13 +280,13 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void reset_bvr(struct kvm_vcpu *vcpu,
|
||||
static void reset_bvr(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
|
||||
}
|
||||
|
||||
static inline bool trap_bcr(struct kvm_vcpu *vcpu,
|
||||
static bool trap_bcr(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
|
@ -322,13 +323,13 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void reset_bcr(struct kvm_vcpu *vcpu,
|
||||
static void reset_bcr(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
|
||||
}
|
||||
|
||||
static inline bool trap_wvr(struct kvm_vcpu *vcpu,
|
||||
static bool trap_wvr(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
|
@ -365,13 +366,13 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void reset_wvr(struct kvm_vcpu *vcpu,
|
||||
static void reset_wvr(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
|
||||
}
|
||||
|
||||
static inline bool trap_wcr(struct kvm_vcpu *vcpu,
|
||||
static bool trap_wcr(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
|
@ -407,7 +408,7 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void reset_wcr(struct kvm_vcpu *vcpu,
|
||||
static void reset_wcr(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
|
||||
|
@ -722,7 +723,7 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
|
|||
* system is in.
|
||||
*/
|
||||
|
||||
static inline bool trap_xvr(struct kvm_vcpu *vcpu,
|
||||
static bool trap_xvr(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *rd)
|
||||
{
|
||||
|
|
|
@ -1,134 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/irqchip/arm-gic.h>
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
.text
|
||||
.pushsection .hyp.text, "ax"
|
||||
|
||||
/*
|
||||
* Save the VGIC CPU state into memory
|
||||
* x0: Register pointing to VCPU struct
|
||||
* Do not corrupt x1!!!
|
||||
*/
|
||||
ENTRY(__save_vgic_v2_state)
|
||||
__save_vgic_v2_state:
|
||||
/* Get VGIC VCTRL base into x2 */
|
||||
ldr x2, [x0, #VCPU_KVM]
|
||||
kern_hyp_va x2
|
||||
ldr x2, [x2, #KVM_VGIC_VCTRL]
|
||||
kern_hyp_va x2
|
||||
cbz x2, 2f // disabled
|
||||
|
||||
/* Compute the address of struct vgic_cpu */
|
||||
add x3, x0, #VCPU_VGIC_CPU
|
||||
|
||||
/* Save all interesting registers */
|
||||
ldr w5, [x2, #GICH_VMCR]
|
||||
ldr w6, [x2, #GICH_MISR]
|
||||
ldr w7, [x2, #GICH_EISR0]
|
||||
ldr w8, [x2, #GICH_EISR1]
|
||||
ldr w9, [x2, #GICH_ELRSR0]
|
||||
ldr w10, [x2, #GICH_ELRSR1]
|
||||
ldr w11, [x2, #GICH_APR]
|
||||
CPU_BE( rev w5, w5 )
|
||||
CPU_BE( rev w6, w6 )
|
||||
CPU_BE( rev w7, w7 )
|
||||
CPU_BE( rev w8, w8 )
|
||||
CPU_BE( rev w9, w9 )
|
||||
CPU_BE( rev w10, w10 )
|
||||
CPU_BE( rev w11, w11 )
|
||||
|
||||
str w5, [x3, #VGIC_V2_CPU_VMCR]
|
||||
str w6, [x3, #VGIC_V2_CPU_MISR]
|
||||
CPU_LE( str w7, [x3, #VGIC_V2_CPU_EISR] )
|
||||
CPU_LE( str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] )
|
||||
CPU_LE( str w9, [x3, #VGIC_V2_CPU_ELRSR] )
|
||||
CPU_LE( str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
|
||||
CPU_BE( str w7, [x3, #(VGIC_V2_CPU_EISR + 4)] )
|
||||
CPU_BE( str w8, [x3, #VGIC_V2_CPU_EISR] )
|
||||
CPU_BE( str w9, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
|
||||
CPU_BE( str w10, [x3, #VGIC_V2_CPU_ELRSR] )
|
||||
str w11, [x3, #VGIC_V2_CPU_APR]
|
||||
|
||||
/* Clear GICH_HCR */
|
||||
str wzr, [x2, #GICH_HCR]
|
||||
|
||||
/* Save list registers */
|
||||
add x2, x2, #GICH_LR0
|
||||
ldr w4, [x3, #VGIC_CPU_NR_LR]
|
||||
add x3, x3, #VGIC_V2_CPU_LR
|
||||
1: ldr w5, [x2], #4
|
||||
CPU_BE( rev w5, w5 )
|
||||
str w5, [x3], #4
|
||||
sub w4, w4, #1
|
||||
cbnz w4, 1b
|
||||
2:
|
||||
ret
|
||||
ENDPROC(__save_vgic_v2_state)
|
||||
|
||||
/*
|
||||
* Restore the VGIC CPU state from memory
|
||||
* x0: Register pointing to VCPU struct
|
||||
*/
|
||||
ENTRY(__restore_vgic_v2_state)
|
||||
__restore_vgic_v2_state:
|
||||
/* Get VGIC VCTRL base into x2 */
|
||||
ldr x2, [x0, #VCPU_KVM]
|
||||
kern_hyp_va x2
|
||||
ldr x2, [x2, #KVM_VGIC_VCTRL]
|
||||
kern_hyp_va x2
|
||||
cbz x2, 2f // disabled
|
||||
|
||||
/* Compute the address of struct vgic_cpu */
|
||||
add x3, x0, #VCPU_VGIC_CPU
|
||||
|
||||
/* We only restore a minimal set of registers */
|
||||
ldr w4, [x3, #VGIC_V2_CPU_HCR]
|
||||
ldr w5, [x3, #VGIC_V2_CPU_VMCR]
|
||||
ldr w6, [x3, #VGIC_V2_CPU_APR]
|
||||
CPU_BE( rev w4, w4 )
|
||||
CPU_BE( rev w5, w5 )
|
||||
CPU_BE( rev w6, w6 )
|
||||
|
||||
str w4, [x2, #GICH_HCR]
|
||||
str w5, [x2, #GICH_VMCR]
|
||||
str w6, [x2, #GICH_APR]
|
||||
|
||||
/* Restore list registers */
|
||||
add x2, x2, #GICH_LR0
|
||||
ldr w4, [x3, #VGIC_CPU_NR_LR]
|
||||
add x3, x3, #VGIC_V2_CPU_LR
|
||||
1: ldr w5, [x3], #4
|
||||
CPU_BE( rev w5, w5 )
|
||||
str w5, [x2], #4
|
||||
sub w4, w4, #1
|
||||
cbnz w4, 1b
|
||||
2:
|
||||
ret
|
||||
ENDPROC(__restore_vgic_v2_state)
|
||||
|
||||
.popsection
|
|
@ -1,269 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/irqchip/arm-gic-v3.h>
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
|
||||
.text
|
||||
.pushsection .hyp.text, "ax"
|
||||
|
||||
/*
|
||||
* We store LRs in reverse order to let the CPU deal with streaming
|
||||
* access. Use this macro to make it look saner...
|
||||
*/
|
||||
#define LR_OFFSET(n) (VGIC_V3_CPU_LR + (15 - n) * 8)
|
||||
|
||||
/*
|
||||
* Save the VGIC CPU state into memory
|
||||
* x0: Register pointing to VCPU struct
|
||||
* Do not corrupt x1!!!
|
||||
*/
|
||||
.macro save_vgic_v3_state
|
||||
// Compute the address of struct vgic_cpu
|
||||
add x3, x0, #VCPU_VGIC_CPU
|
||||
|
||||
// Make sure stores to the GIC via the memory mapped interface
|
||||
// are now visible to the system register interface
|
||||
dsb st
|
||||
|
||||
// Save all interesting registers
|
||||
mrs_s x5, ICH_VMCR_EL2
|
||||
mrs_s x6, ICH_MISR_EL2
|
||||
mrs_s x7, ICH_EISR_EL2
|
||||
mrs_s x8, ICH_ELSR_EL2
|
||||
|
||||
str w5, [x3, #VGIC_V3_CPU_VMCR]
|
||||
str w6, [x3, #VGIC_V3_CPU_MISR]
|
||||
str w7, [x3, #VGIC_V3_CPU_EISR]
|
||||
str w8, [x3, #VGIC_V3_CPU_ELRSR]
|
||||
|
||||
msr_s ICH_HCR_EL2, xzr
|
||||
|
||||
mrs_s x21, ICH_VTR_EL2
|
||||
mvn w22, w21
|
||||
ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
|
||||
|
||||
adr x24, 1f
|
||||
add x24, x24, x23
|
||||
br x24
|
||||
|
||||
1:
|
||||
mrs_s x20, ICH_LR15_EL2
|
||||
mrs_s x19, ICH_LR14_EL2
|
||||
mrs_s x18, ICH_LR13_EL2
|
||||
mrs_s x17, ICH_LR12_EL2
|
||||
mrs_s x16, ICH_LR11_EL2
|
||||
mrs_s x15, ICH_LR10_EL2
|
||||
mrs_s x14, ICH_LR9_EL2
|
||||
mrs_s x13, ICH_LR8_EL2
|
||||
mrs_s x12, ICH_LR7_EL2
|
||||
mrs_s x11, ICH_LR6_EL2
|
||||
mrs_s x10, ICH_LR5_EL2
|
||||
mrs_s x9, ICH_LR4_EL2
|
||||
mrs_s x8, ICH_LR3_EL2
|
||||
mrs_s x7, ICH_LR2_EL2
|
||||
mrs_s x6, ICH_LR1_EL2
|
||||
mrs_s x5, ICH_LR0_EL2
|
||||
|
||||
adr x24, 1f
|
||||
add x24, x24, x23
|
||||
br x24
|
||||
|
||||
1:
|
||||
str x20, [x3, #LR_OFFSET(15)]
|
||||
str x19, [x3, #LR_OFFSET(14)]
|
||||
str x18, [x3, #LR_OFFSET(13)]
|
||||
str x17, [x3, #LR_OFFSET(12)]
|
||||
str x16, [x3, #LR_OFFSET(11)]
|
||||
str x15, [x3, #LR_OFFSET(10)]
|
||||
str x14, [x3, #LR_OFFSET(9)]
|
||||
str x13, [x3, #LR_OFFSET(8)]
|
||||
str x12, [x3, #LR_OFFSET(7)]
|
||||
str x11, [x3, #LR_OFFSET(6)]
|
||||
str x10, [x3, #LR_OFFSET(5)]
|
||||
str x9, [x3, #LR_OFFSET(4)]
|
||||
str x8, [x3, #LR_OFFSET(3)]
|
||||
str x7, [x3, #LR_OFFSET(2)]
|
||||
str x6, [x3, #LR_OFFSET(1)]
|
||||
str x5, [x3, #LR_OFFSET(0)]
|
||||
|
||||
tbnz w21, #29, 6f // 6 bits
|
||||
tbz w21, #30, 5f // 5 bits
|
||||
// 7 bits
|
||||
mrs_s x20, ICH_AP0R3_EL2
|
||||
str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
|
||||
mrs_s x19, ICH_AP0R2_EL2
|
||||
str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
|
||||
6: mrs_s x18, ICH_AP0R1_EL2
|
||||
str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
|
||||
5: mrs_s x17, ICH_AP0R0_EL2
|
||||
str w17, [x3, #VGIC_V3_CPU_AP0R]
|
||||
|
||||
tbnz w21, #29, 6f // 6 bits
|
||||
tbz w21, #30, 5f // 5 bits
|
||||
// 7 bits
|
||||
mrs_s x20, ICH_AP1R3_EL2
|
||||
str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
|
||||
mrs_s x19, ICH_AP1R2_EL2
|
||||
str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
|
||||
6: mrs_s x18, ICH_AP1R1_EL2
|
||||
str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
|
||||
5: mrs_s x17, ICH_AP1R0_EL2
|
||||
str w17, [x3, #VGIC_V3_CPU_AP1R]
|
||||
|
||||
// Restore SRE_EL1 access and re-enable SRE at EL1.
|
||||
mrs_s x5, ICC_SRE_EL2
|
||||
orr x5, x5, #ICC_SRE_EL2_ENABLE
|
||||
msr_s ICC_SRE_EL2, x5
|
||||
isb
|
||||
mov x5, #1
|
||||
msr_s ICC_SRE_EL1, x5
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Restore the VGIC CPU state from memory
|
||||
* x0: Register pointing to VCPU struct
|
||||
*/
|
||||
.macro restore_vgic_v3_state
|
||||
// Compute the address of struct vgic_cpu
|
||||
add x3, x0, #VCPU_VGIC_CPU
|
||||
|
||||
// Restore all interesting registers
|
||||
ldr w4, [x3, #VGIC_V3_CPU_HCR]
|
||||
ldr w5, [x3, #VGIC_V3_CPU_VMCR]
|
||||
ldr w25, [x3, #VGIC_V3_CPU_SRE]
|
||||
|
||||
msr_s ICC_SRE_EL1, x25
|
||||
|
||||
// make sure SRE is valid before writing the other registers
|
||||
isb
|
||||
|
||||
msr_s ICH_HCR_EL2, x4
|
||||
msr_s ICH_VMCR_EL2, x5
|
||||
|
||||
mrs_s x21, ICH_VTR_EL2
|
||||
|
||||
tbnz w21, #29, 6f // 6 bits
|
||||
tbz w21, #30, 5f // 5 bits
|
||||
// 7 bits
|
||||
ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
|
||||
msr_s ICH_AP1R3_EL2, x20
|
||||
ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
|
||||
msr_s ICH_AP1R2_EL2, x19
|
||||
6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
|
||||
msr_s ICH_AP1R1_EL2, x18
|
||||
5: ldr w17, [x3, #VGIC_V3_CPU_AP1R]
|
||||
msr_s ICH_AP1R0_EL2, x17
|
||||
|
||||
tbnz w21, #29, 6f // 6 bits
|
||||
tbz w21, #30, 5f // 5 bits
|
||||
// 7 bits
|
||||
ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
|
||||
msr_s ICH_AP0R3_EL2, x20
|
||||
ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
|
||||
msr_s ICH_AP0R2_EL2, x19
|
||||
6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
|
||||
msr_s ICH_AP0R1_EL2, x18
|
||||
5: ldr w17, [x3, #VGIC_V3_CPU_AP0R]
|
||||
msr_s ICH_AP0R0_EL2, x17
|
||||
|
||||
and w22, w21, #0xf
|
||||
mvn w22, w21
|
||||
ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
|
||||
|
||||
adr x24, 1f
|
||||
add x24, x24, x23
|
||||
br x24
|
||||
|
||||
1:
|
||||
ldr x20, [x3, #LR_OFFSET(15)]
|
||||
ldr x19, [x3, #LR_OFFSET(14)]
|
||||
ldr x18, [x3, #LR_OFFSET(13)]
|
||||
ldr x17, [x3, #LR_OFFSET(12)]
|
||||
ldr x16, [x3, #LR_OFFSET(11)]
|
||||
ldr x15, [x3, #LR_OFFSET(10)]
|
||||
ldr x14, [x3, #LR_OFFSET(9)]
|
||||
ldr x13, [x3, #LR_OFFSET(8)]
|
||||
ldr x12, [x3, #LR_OFFSET(7)]
|
||||
ldr x11, [x3, #LR_OFFSET(6)]
|
||||
ldr x10, [x3, #LR_OFFSET(5)]
|
||||
ldr x9, [x3, #LR_OFFSET(4)]
|
||||
ldr x8, [x3, #LR_OFFSET(3)]
|
||||
ldr x7, [x3, #LR_OFFSET(2)]
|
||||
ldr x6, [x3, #LR_OFFSET(1)]
|
||||
ldr x5, [x3, #LR_OFFSET(0)]
|
||||
|
||||
adr x24, 1f
|
||||
add x24, x24, x23
|
||||
br x24
|
||||
|
||||
1:
|
||||
msr_s ICH_LR15_EL2, x20
|
||||
msr_s ICH_LR14_EL2, x19
|
||||
msr_s ICH_LR13_EL2, x18
|
||||
msr_s ICH_LR12_EL2, x17
|
||||
msr_s ICH_LR11_EL2, x16
|
||||
msr_s ICH_LR10_EL2, x15
|
||||
msr_s ICH_LR9_EL2, x14
|
||||
msr_s ICH_LR8_EL2, x13
|
||||
msr_s ICH_LR7_EL2, x12
|
||||
msr_s ICH_LR6_EL2, x11
|
||||
msr_s ICH_LR5_EL2, x10
|
||||
msr_s ICH_LR4_EL2, x9
|
||||
msr_s ICH_LR3_EL2, x8
|
||||
msr_s ICH_LR2_EL2, x7
|
||||
msr_s ICH_LR1_EL2, x6
|
||||
msr_s ICH_LR0_EL2, x5
|
||||
|
||||
// Ensure that the above will have reached the
|
||||
// (re)distributors. This ensure the guest will read
|
||||
// the correct values from the memory-mapped interface.
|
||||
isb
|
||||
dsb sy
|
||||
|
||||
// Prevent the guest from touching the GIC system registers
|
||||
// if SRE isn't enabled for GICv3 emulation
|
||||
cbnz x25, 1f
|
||||
mrs_s x5, ICC_SRE_EL2
|
||||
and x5, x5, #~ICC_SRE_EL2_ENABLE
|
||||
msr_s ICC_SRE_EL2, x5
|
||||
1:
|
||||
.endm
|
||||
|
||||
ENTRY(__save_vgic_v3_state)
|
||||
save_vgic_v3_state
|
||||
ret
|
||||
ENDPROC(__save_vgic_v3_state)
|
||||
|
||||
ENTRY(__restore_vgic_v3_state)
|
||||
restore_vgic_v3_state
|
||||
ret
|
||||
ENDPROC(__restore_vgic_v3_state)
|
||||
|
||||
ENTRY(__vgic_v3_get_ich_vtr_el2)
|
||||
mrs_s x0, ICH_VTR_EL2
|
||||
ret
|
||||
ENDPROC(__vgic_v3_get_ich_vtr_el2)
|
||||
|
||||
.popsection
|
|
@ -24,8 +24,6 @@
|
|||
#include <asm/cpufeature.h>
|
||||
#include <asm/alternative.h>
|
||||
|
||||
#include "proc-macros.S"
|
||||
|
||||
/*
|
||||
* flush_icache_range(start,end)
|
||||
*
|
||||
|
|
|
@ -23,13 +23,11 @@
|
|||
#include <asm/assembler.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/alternative.h>
|
||||
|
||||
#include "proc-macros.S"
|
||||
|
||||
#ifdef CONFIG_ARM64_64K_PAGES
|
||||
#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
|
||||
#elif defined(CONFIG_ARM64_16K_PAGES)
|
||||
|
@ -66,62 +64,50 @@ ENTRY(cpu_do_suspend)
|
|||
mrs x2, tpidr_el0
|
||||
mrs x3, tpidrro_el0
|
||||
mrs x4, contextidr_el1
|
||||
mrs x5, mair_el1
|
||||
mrs x6, cpacr_el1
|
||||
mrs x7, ttbr1_el1
|
||||
mrs x8, tcr_el1
|
||||
mrs x9, vbar_el1
|
||||
mrs x10, mdscr_el1
|
||||
mrs x11, oslsr_el1
|
||||
mrs x12, sctlr_el1
|
||||
mrs x5, cpacr_el1
|
||||
mrs x6, tcr_el1
|
||||
mrs x7, vbar_el1
|
||||
mrs x8, mdscr_el1
|
||||
mrs x9, oslsr_el1
|
||||
mrs x10, sctlr_el1
|
||||
stp x2, x3, [x0]
|
||||
stp x4, x5, [x0, #16]
|
||||
stp x6, x7, [x0, #32]
|
||||
stp x8, x9, [x0, #48]
|
||||
stp x10, x11, [x0, #64]
|
||||
str x12, [x0, #80]
|
||||
stp x4, xzr, [x0, #16]
|
||||
stp x5, x6, [x0, #32]
|
||||
stp x7, x8, [x0, #48]
|
||||
stp x9, x10, [x0, #64]
|
||||
ret
|
||||
ENDPROC(cpu_do_suspend)
|
||||
|
||||
/**
|
||||
* cpu_do_resume - restore CPU register context
|
||||
*
|
||||
* x0: Physical address of context pointer
|
||||
* x1: ttbr0_el1 to be restored
|
||||
*
|
||||
* Returns:
|
||||
* sctlr_el1 value in x0
|
||||
* x0: Address of context pointer
|
||||
*/
|
||||
ENTRY(cpu_do_resume)
|
||||
/*
|
||||
* Invalidate local tlb entries before turning on MMU
|
||||
*/
|
||||
tlbi vmalle1
|
||||
ldp x2, x3, [x0]
|
||||
ldp x4, x5, [x0, #16]
|
||||
ldp x6, x7, [x0, #32]
|
||||
ldp x8, x9, [x0, #48]
|
||||
ldp x10, x11, [x0, #64]
|
||||
ldr x12, [x0, #80]
|
||||
ldp x6, x8, [x0, #32]
|
||||
ldp x9, x10, [x0, #48]
|
||||
ldp x11, x12, [x0, #64]
|
||||
msr tpidr_el0, x2
|
||||
msr tpidrro_el0, x3
|
||||
msr contextidr_el1, x4
|
||||
msr mair_el1, x5
|
||||
msr cpacr_el1, x6
|
||||
msr ttbr0_el1, x1
|
||||
msr ttbr1_el1, x7
|
||||
tcr_set_idmap_t0sz x8, x7
|
||||
|
||||
/* Don't change t0sz here, mask those bits when restoring */
|
||||
mrs x5, tcr_el1
|
||||
bfi x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
|
||||
|
||||
msr tcr_el1, x8
|
||||
msr vbar_el1, x9
|
||||
msr mdscr_el1, x10
|
||||
msr sctlr_el1, x12
|
||||
/*
|
||||
* Restore oslsr_el1 by writing oslar_el1
|
||||
*/
|
||||
ubfx x11, x11, #1, #1
|
||||
msr oslar_el1, x11
|
||||
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
|
||||
mov x0, x12
|
||||
dsb nsh // Make sure local tlb invalidation completed
|
||||
isb
|
||||
ret
|
||||
ENDPROC(cpu_do_resume)
|
||||
|
|
|
@ -173,6 +173,9 @@ config QCOM_SCM_64
|
|||
def_bool y
|
||||
depends on QCOM_SCM && ARM64
|
||||
|
||||
config HAVE_ARM_SMCCC
|
||||
bool
|
||||
|
||||
source "drivers/firmware/broadcom/Kconfig"
|
||||
source "drivers/firmware/google/Kconfig"
|
||||
source "drivers/firmware/efi/Kconfig"
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
|
||||
#define pr_fmt(fmt) "psci: " fmt
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/of.h>
|
||||
|
@ -20,10 +22,12 @@
|
|||
#include <linux/printk.h>
|
||||
#include <linux/psci.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/suspend.h>
|
||||
|
||||
#include <uapi/linux/psci.h>
|
||||
|
||||
#include <asm/cpuidle.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
@ -58,8 +62,6 @@ struct psci_operations psci_ops;
|
|||
|
||||
typedef unsigned long (psci_fn)(unsigned long, unsigned long,
|
||||
unsigned long, unsigned long);
|
||||
asmlinkage psci_fn __invoke_psci_fn_hvc;
|
||||
asmlinkage psci_fn __invoke_psci_fn_smc;
|
||||
static psci_fn *invoke_psci_fn;
|
||||
|
||||
enum psci_function {
|
||||
|
@ -107,6 +109,26 @@ bool psci_power_state_is_valid(u32 state)
|
|||
return !(state & ~valid_mask);
|
||||
}
|
||||
|
||||
static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
|
||||
unsigned long arg0, unsigned long arg1,
|
||||
unsigned long arg2)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
|
||||
return res.a0;
|
||||
}
|
||||
|
||||
static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
|
||||
unsigned long arg0, unsigned long arg1,
|
||||
unsigned long arg2)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
|
||||
return res.a0;
|
||||
}
|
||||
|
||||
static int psci_to_linux_errno(int errno)
|
||||
{
|
||||
switch (errno) {
|
||||
|
@ -225,6 +247,123 @@ static int __init psci_features(u32 psci_func_id)
|
|||
psci_func_id, 0, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
|
||||
|
||||
static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
|
||||
{
|
||||
int i, ret, count = 0;
|
||||
u32 *psci_states;
|
||||
struct device_node *state_node;
|
||||
|
||||
/*
|
||||
* If the PSCI cpu_suspend function hook has not been initialized
|
||||
* idle states must not be enabled, so bail out
|
||||
*/
|
||||
if (!psci_ops.cpu_suspend)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Count idle states */
|
||||
while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
|
||||
count))) {
|
||||
count++;
|
||||
of_node_put(state_node);
|
||||
}
|
||||
|
||||
if (!count)
|
||||
return -ENODEV;
|
||||
|
||||
psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
|
||||
if (!psci_states)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
u32 state;
|
||||
|
||||
state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
|
||||
|
||||
ret = of_property_read_u32(state_node,
|
||||
"arm,psci-suspend-param",
|
||||
&state);
|
||||
if (ret) {
|
||||
pr_warn(" * %s missing arm,psci-suspend-param property\n",
|
||||
state_node->full_name);
|
||||
of_node_put(state_node);
|
||||
goto free_mem;
|
||||
}
|
||||
|
||||
of_node_put(state_node);
|
||||
pr_debug("psci-power-state %#x index %d\n", state, i);
|
||||
if (!psci_power_state_is_valid(state)) {
|
||||
pr_warn("Invalid PSCI power state %#x\n", state);
|
||||
ret = -EINVAL;
|
||||
goto free_mem;
|
||||
}
|
||||
psci_states[i] = state;
|
||||
}
|
||||
/* Idle states parsed correctly, initialize per-cpu pointer */
|
||||
per_cpu(psci_power_state, cpu) = psci_states;
|
||||
return 0;
|
||||
|
||||
free_mem:
|
||||
kfree(psci_states);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int psci_cpu_init_idle(unsigned int cpu)
|
||||
{
|
||||
struct device_node *cpu_node;
|
||||
int ret;
|
||||
|
||||
cpu_node = of_get_cpu_node(cpu, NULL);
|
||||
if (!cpu_node)
|
||||
return -ENODEV;
|
||||
|
||||
ret = psci_dt_cpu_init_idle(cpu_node, cpu);
|
||||
|
||||
of_node_put(cpu_node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int psci_suspend_finisher(unsigned long index)
|
||||
{
|
||||
u32 *state = __this_cpu_read(psci_power_state);
|
||||
|
||||
return psci_ops.cpu_suspend(state[index - 1],
|
||||
virt_to_phys(cpu_resume));
|
||||
}
|
||||
|
||||
int psci_cpu_suspend_enter(unsigned long index)
|
||||
{
|
||||
int ret;
|
||||
u32 *state = __this_cpu_read(psci_power_state);
|
||||
/*
|
||||
* idle state index 0 corresponds to wfi, should never be called
|
||||
* from the cpu_suspend operations
|
||||
*/
|
||||
if (WARN_ON_ONCE(!index))
|
||||
return -EINVAL;
|
||||
|
||||
if (!psci_power_state_loses_context(state[index - 1]))
|
||||
ret = psci_ops.cpu_suspend(state[index - 1], 0);
|
||||
else
|
||||
ret = cpu_suspend(index, psci_suspend_finisher);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* ARM specific CPU idle operations */
|
||||
#ifdef CONFIG_ARM
|
||||
static struct cpuidle_ops psci_cpuidle_ops __initdata = {
|
||||
.suspend = psci_cpu_suspend_enter,
|
||||
.init = psci_dt_cpu_init_idle,
|
||||
};
|
||||
|
||||
CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
static int psci_system_suspend(unsigned long unused)
|
||||
{
|
||||
return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
|
||||
|
|
|
@ -23,6 +23,12 @@
|
|||
#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
|
||||
#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
|
||||
|
||||
#define CNTHCTL_EL1PCTEN (1 << 0)
|
||||
#define CNTHCTL_EL1PCEN (1 << 1)
|
||||
#define CNTHCTL_EVNTEN (1 << 2)
|
||||
#define CNTHCTL_EVNTDIR (1 << 3)
|
||||
#define CNTHCTL_EVNTI (0xF << 4)
|
||||
|
||||
enum arch_timer_reg {
|
||||
ARCH_TIMER_REG_CTRL,
|
||||
ARCH_TIMER_REG_TVAL,
|
||||
|
|
|
@ -279,6 +279,12 @@ struct vgic_v2_cpu_if {
|
|||
u32 vgic_lr[VGIC_V2_MAX_LRS];
|
||||
};
|
||||
|
||||
/*
|
||||
* LRs are stored in reverse order in memory. make sure we index them
|
||||
* correctly.
|
||||
*/
|
||||
#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
|
||||
|
||||
struct vgic_v3_cpu_if {
|
||||
#ifdef CONFIG_KVM_ARM_VGIC_V3
|
||||
u32 vgic_hcr;
|
||||
|
|
104
include/linux/arm-smccc.h
Normal file
104
include/linux/arm-smccc.h
Normal file
|
@ -0,0 +1,104 @@
|
|||
/*
|
||||
* Copyright (c) 2015, Linaro Limited
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#ifndef __LINUX_ARM_SMCCC_H
|
||||
#define __LINUX_ARM_SMCCC_H
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* This file provides common defines for ARM SMC Calling Convention as
|
||||
* specified in
|
||||
* http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
|
||||
*/
|
||||
|
||||
#define ARM_SMCCC_STD_CALL 0
|
||||
#define ARM_SMCCC_FAST_CALL 1
|
||||
#define ARM_SMCCC_TYPE_SHIFT 31
|
||||
|
||||
#define ARM_SMCCC_SMC_32 0
|
||||
#define ARM_SMCCC_SMC_64 1
|
||||
#define ARM_SMCCC_CALL_CONV_SHIFT 30
|
||||
|
||||
#define ARM_SMCCC_OWNER_MASK 0x3F
|
||||
#define ARM_SMCCC_OWNER_SHIFT 24
|
||||
|
||||
#define ARM_SMCCC_FUNC_MASK 0xFFFF
|
||||
|
||||
#define ARM_SMCCC_IS_FAST_CALL(smc_val) \
|
||||
((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT))
|
||||
#define ARM_SMCCC_IS_64(smc_val) \
|
||||
((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT))
|
||||
#define ARM_SMCCC_FUNC_NUM(smc_val) ((smc_val) & ARM_SMCCC_FUNC_MASK)
|
||||
#define ARM_SMCCC_OWNER_NUM(smc_val) \
|
||||
(((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK)
|
||||
|
||||
#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \
|
||||
(((type) << ARM_SMCCC_TYPE_SHIFT) | \
|
||||
((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \
|
||||
(((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \
|
||||
((func_num) & ARM_SMCCC_FUNC_MASK))
|
||||
|
||||
#define ARM_SMCCC_OWNER_ARCH 0
|
||||
#define ARM_SMCCC_OWNER_CPU 1
|
||||
#define ARM_SMCCC_OWNER_SIP 2
|
||||
#define ARM_SMCCC_OWNER_OEM 3
|
||||
#define ARM_SMCCC_OWNER_STANDARD 4
|
||||
#define ARM_SMCCC_OWNER_TRUSTED_APP 48
|
||||
#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49
|
||||
#define ARM_SMCCC_OWNER_TRUSTED_OS 50
|
||||
#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63
|
||||
|
||||
/**
|
||||
* struct arm_smccc_res - Result from SMC/HVC call
|
||||
* @a0-a3 result values from registers 0 to 3
|
||||
*/
|
||||
struct arm_smccc_res {
|
||||
unsigned long a0;
|
||||
unsigned long a1;
|
||||
unsigned long a2;
|
||||
unsigned long a3;
|
||||
};
|
||||
|
||||
/**
|
||||
* arm_smccc_smc() - make SMC calls
|
||||
* @a0-a7: arguments passed in registers 0 to 7
|
||||
* @res: result values from registers 0 to 3
|
||||
*
|
||||
* This function is used to make SMC calls following SMC Calling Convention.
|
||||
* The content of the supplied param are copied to registers 0 to 7 prior
|
||||
* to the SMC instruction. The return values are updated with the content
|
||||
* from register 0 to 3 on return from the SMC instruction.
|
||||
*/
|
||||
asmlinkage void arm_smccc_smc(unsigned long a0, unsigned long a1,
|
||||
unsigned long a2, unsigned long a3, unsigned long a4,
|
||||
unsigned long a5, unsigned long a6, unsigned long a7,
|
||||
struct arm_smccc_res *res);
|
||||
|
||||
/**
|
||||
* arm_smccc_hvc() - make HVC calls
|
||||
* @a0-a7: arguments passed in registers 0 to 7
|
||||
* @res: result values from registers 0 to 3
|
||||
*
|
||||
* This function is used to make HVC calls following SMC Calling
|
||||
* Convention. The content of the supplied param are copied to registers 0
|
||||
* to 7 prior to the HVC instruction. The return values are updated with
|
||||
* the content from register 0 to 3 on return from the HVC instruction.
|
||||
*/
|
||||
asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1,
|
||||
unsigned long a2, unsigned long a3, unsigned long a4,
|
||||
unsigned long a5, unsigned long a6, unsigned long a7,
|
||||
struct arm_smccc_res *res);
|
||||
|
||||
#endif /*__LINUX_ARM_SMCCC_H*/
|
|
@ -24,6 +24,9 @@ bool psci_tos_resident_on(int cpu);
|
|||
bool psci_power_state_loses_context(u32 state);
|
||||
bool psci_power_state_is_valid(u32 state);
|
||||
|
||||
int psci_cpu_init_idle(unsigned int cpu);
|
||||
int psci_cpu_suspend_enter(unsigned long index);
|
||||
|
||||
struct psci_operations {
|
||||
int (*cpu_suspend)(u32 state, unsigned long entry_point);
|
||||
int (*cpu_off)(u32 state);
|
||||
|
|
|
@ -280,13 +280,7 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj,
|
|||
return pm_wakeup_irq ? sprintf(buf, "%u\n", pm_wakeup_irq) : -ENODATA;
|
||||
}
|
||||
|
||||
static ssize_t pm_wakeup_irq_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t n)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
power_attr(pm_wakeup_irq);
|
||||
power_attr_ro(pm_wakeup_irq);
|
||||
|
||||
#else /* !CONFIG_PM_SLEEP_DEBUG */
|
||||
static inline void pm_print_times_init(void) {}
|
||||
|
@ -564,14 +558,7 @@ static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
|
|||
return show_trace_dev_match(buf, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t n)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
power_attr(pm_trace_dev_match);
|
||||
power_attr_ro(pm_trace_dev_match);
|
||||
|
||||
#endif /* CONFIG_PM_TRACE */
|
||||
|
||||
|
|
|
@ -77,6 +77,15 @@ static struct kobj_attribute _name##_attr = { \
|
|||
.store = _name##_store, \
|
||||
}
|
||||
|
||||
#define power_attr_ro(_name) \
|
||||
static struct kobj_attribute _name##_attr = { \
|
||||
.attr = { \
|
||||
.name = __stringify(_name), \
|
||||
.mode = S_IRUGO, \
|
||||
}, \
|
||||
.show = _name##_show, \
|
||||
}
|
||||
|
||||
/* Preferred image size in bytes (default 500 MB) */
|
||||
extern unsigned long image_size;
|
||||
/* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
|
||||
|
|
|
@ -36,6 +36,14 @@
|
|||
|
||||
#define HIBERNATE_SIG "S1SUSPEND"
|
||||
|
||||
/*
|
||||
* When reading an {un,}compressed image, we may restore pages in place,
|
||||
* in which case some architectures need these pages cleaning before they
|
||||
* can be executed. We don't know which pages these may be, so clean the lot.
|
||||
*/
|
||||
static bool clean_pages_on_read;
|
||||
static bool clean_pages_on_decompress;
|
||||
|
||||
/*
|
||||
* The swap map is a data structure used for keeping track of each page
|
||||
* written to a swap partition. It consists of many swap_map_page
|
||||
|
@ -241,6 +249,9 @@ static void hib_end_io(struct bio *bio)
|
|||
|
||||
if (bio_data_dir(bio) == WRITE)
|
||||
put_page(page);
|
||||
else if (clean_pages_on_read)
|
||||
flush_icache_range((unsigned long)page_address(page),
|
||||
(unsigned long)page_address(page) + PAGE_SIZE);
|
||||
|
||||
if (bio->bi_error && !hb->error)
|
||||
hb->error = bio->bi_error;
|
||||
|
@ -1049,6 +1060,7 @@ static int load_image(struct swap_map_handle *handle,
|
|||
|
||||
hib_init_batch(&hb);
|
||||
|
||||
clean_pages_on_read = true;
|
||||
printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
|
||||
nr_to_read);
|
||||
m = nr_to_read / 10;
|
||||
|
@ -1124,6 +1136,10 @@ static int lzo_decompress_threadfn(void *data)
|
|||
d->unc_len = LZO_UNC_SIZE;
|
||||
d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
|
||||
d->unc, &d->unc_len);
|
||||
if (clean_pages_on_decompress)
|
||||
flush_icache_range((unsigned long)d->unc,
|
||||
(unsigned long)d->unc + d->unc_len);
|
||||
|
||||
atomic_set(&d->stop, 1);
|
||||
wake_up(&d->done);
|
||||
}
|
||||
|
@ -1189,6 +1205,8 @@ static int load_image_lzo(struct swap_map_handle *handle,
|
|||
}
|
||||
memset(crc, 0, offsetof(struct crc_data, go));
|
||||
|
||||
clean_pages_on_decompress = true;
|
||||
|
||||
/*
|
||||
* Start the decompression threads.
|
||||
*/
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
/* These are for GICv2 emulation only */
|
||||
|
@ -36,18 +37,12 @@
|
|||
#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
|
||||
#define ICH_LR_VIRTUALID_MASK (BIT_ULL(32) - 1)
|
||||
|
||||
/*
|
||||
* LRs are stored in reverse order in memory. make sure we index them
|
||||
* correctly.
|
||||
*/
|
||||
#define LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
|
||||
|
||||
static u32 ich_vtr_el2;
|
||||
|
||||
static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
|
||||
{
|
||||
struct vgic_lr lr_desc;
|
||||
u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)];
|
||||
u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)];
|
||||
|
||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
|
||||
lr_desc.irq = val & ICH_LR_VIRTUALID_MASK;
|
||||
|
@ -111,7 +106,7 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
|
|||
lr_val |= ((u64)lr_desc.hwirq) << ICH_LR_PHYS_ID_SHIFT;
|
||||
}
|
||||
|
||||
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val;
|
||||
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)] = lr_val;
|
||||
|
||||
if (!(lr_desc.state & LR_STATE_MASK))
|
||||
vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
|
||||
|
|
Loading…
Add table
Reference in a new issue