Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android

This commit is contained in:
Alex Shi 2016-11-17 14:58:56 +08:00
commit 662d618b98
14 changed files with 355 additions and 112 deletions

View file

@ -218,6 +218,15 @@ static inline void __cpu_init_stage2(void)
{
}
static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
phys_addr_t phys_idmap_start)
{
/*
* TODO
* kvm_call_reset(boot_pgd_ptr, phys_idmap_start);
*/
}
static inline int kvm_arch_dev_ioctl_check_extension(long ext)
{
return 0;
@ -230,7 +239,6 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}

View file

@ -66,6 +66,7 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_mmu_get_boot_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
phys_addr_t kvm_get_idmap_start(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);

View file

@ -16,7 +16,6 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/errno.h>
#include <linux/err.h>
@ -65,6 +64,8 @@ static DEFINE_SPINLOCK(kvm_vmid_lock);
static bool vgic_present;
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{
BUG_ON(preemptible());
@ -89,11 +90,6 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
return &kvm_arm_running_vcpu;
}
int kvm_arch_hardware_enable(void)
{
return 0;
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@ -987,36 +983,91 @@ static void cpu_init_hyp_mode(void *dummy)
kvm_arm_init_debug();
}
static int hyp_init_cpu_notify(struct notifier_block *self,
unsigned long action, void *cpu)
static void cpu_hyp_reinit(void)
{
switch (action) {
case CPU_STARTING:
case CPU_STARTING_FROZEN:
if (is_kernel_in_hyp_mode()) {
/*
* __cpu_init_stage2() is safe to call even if the PM
* event was cancelled before the CPU was reset.
*/
__cpu_init_stage2();
} else {
if (__hyp_get_vectors() == hyp_default_vectors)
cpu_init_hyp_mode(NULL);
break;
}
return NOTIFY_OK;
}
static struct notifier_block hyp_init_cpu_nb = {
.notifier_call = hyp_init_cpu_notify,
};
static void cpu_hyp_reset(void)
{
phys_addr_t boot_pgd_ptr;
phys_addr_t phys_idmap_start;
if (!is_kernel_in_hyp_mode()) {
boot_pgd_ptr = kvm_mmu_get_boot_httbr();
phys_idmap_start = kvm_get_idmap_start();
__cpu_reset_hyp_mode(boot_pgd_ptr, phys_idmap_start);
}
}
static void _kvm_arch_hardware_enable(void *discard)
{
if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
cpu_hyp_reinit();
__this_cpu_write(kvm_arm_hardware_enabled, 1);
}
}
int kvm_arch_hardware_enable(void)
{
_kvm_arch_hardware_enable(NULL);
return 0;
}
static void _kvm_arch_hardware_disable(void *discard)
{
if (__this_cpu_read(kvm_arm_hardware_enabled)) {
cpu_hyp_reset();
__this_cpu_write(kvm_arm_hardware_enabled, 0);
}
}
void kvm_arch_hardware_disable(void)
{
_kvm_arch_hardware_disable(NULL);
}
#ifdef CONFIG_CPU_PM
static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd,
void *v)
{
if (cmd == CPU_PM_EXIT &&
__hyp_get_vectors() == hyp_default_vectors) {
cpu_init_hyp_mode(NULL);
return NOTIFY_OK;
}
/*
* kvm_arm_hardware_enabled is left with its old value over
* PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
* re-enable hyp.
*/
switch (cmd) {
case CPU_PM_ENTER:
if (__this_cpu_read(kvm_arm_hardware_enabled))
/*
* don't update kvm_arm_hardware_enabled here
* so that the hardware will be re-enabled
* when we resume. See below.
*/
cpu_hyp_reset();
return NOTIFY_DONE;
return NOTIFY_OK;
case CPU_PM_EXIT:
if (__this_cpu_read(kvm_arm_hardware_enabled))
/* The hardware was enabled before suspend. */
cpu_hyp_reinit();
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
static struct notifier_block hyp_init_cpu_pm_nb = {
@ -1033,6 +1084,91 @@ static inline void hyp_cpu_pm_init(void)
}
#endif
static void teardown_common_resources(void)
{
free_percpu(kvm_host_cpu_state);
}
static int init_common_resources(void)
{
kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
if (!kvm_host_cpu_state) {
kvm_err("Cannot allocate host CPU state\n");
return -ENOMEM;
}
return 0;
}
static int init_subsystems(void)
{
int err = 0;
/*
* Enable hardware so that subsystem initialisation can access EL2.
*/
on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
/*
* Register CPU lower-power notifier
*/
hyp_cpu_pm_init();
/*
* Init HYP view of VGIC
*/
err = kvm_vgic_hyp_init();
switch (err) {
case 0:
vgic_present = true;
break;
case -ENODEV:
case -ENXIO:
vgic_present = false;
err = 0;
break;
default:
goto out;
}
/*
* Init HYP architected timer support
*/
err = kvm_timer_hyp_init();
if (err)
goto out;
kvm_perf_init();
kvm_coproc_table_init();
out:
on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
return err;
}
static void teardown_hyp_mode(void)
{
int cpu;
if (is_kernel_in_hyp_mode())
return;
free_hyp_pgds();
for_each_possible_cpu(cpu)
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
}
static int init_vhe_mode(void)
{
/* set size of VMID supported by CPU */
kvm_vmid_bits = kvm_get_vmid_bits();
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
kvm_info("VHE mode initialized successfully\n");
return 0;
}
/**
* Inits Hyp-mode on all online CPUs
*/
@ -1063,7 +1199,7 @@ static int init_hyp_mode(void)
stack_page = __get_free_page(GFP_KERNEL);
if (!stack_page) {
err = -ENOMEM;
goto out_free_stack_pages;
goto out_err;
}
per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
@ -1076,13 +1212,14 @@ static int init_hyp_mode(void)
kvm_ksym_ref(__kvm_hyp_code_end));
if (err) {
kvm_err("Cannot map world-switch code\n");
goto out_free_mappings;
goto out_err;
}
err = create_hyp_mappings(__start_rodata, __end_rodata);
err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
kvm_ksym_ref(__end_rodata));
if (err) {
kvm_err("Cannot map rodata section\n");
goto out_free_mappings;
goto out_err;
}
/*
@ -1094,20 +1231,10 @@ static int init_hyp_mode(void)
if (err) {
kvm_err("Cannot map hyp stack\n");
goto out_free_mappings;
goto out_err;
}
}
/*
* Map the host CPU structures
*/
kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
if (!kvm_host_cpu_state) {
err = -ENOMEM;
kvm_err("Cannot allocate host CPU state\n");
goto out_free_mappings;
}
for_each_possible_cpu(cpu) {
kvm_cpu_context_t *cpu_ctxt;
@ -1116,44 +1243,14 @@ static int init_hyp_mode(void)
if (err) {
kvm_err("Cannot map host CPU state: %d\n", err);
goto out_free_context;
goto out_err;
}
}
/*
* Execute the init code on each CPU.
*/
on_each_cpu(cpu_init_hyp_mode, NULL, 1);
/*
* Init HYP view of VGIC
*/
err = kvm_vgic_hyp_init();
switch (err) {
case 0:
vgic_present = true;
break;
case -ENODEV:
case -ENXIO:
vgic_present = false;
break;
default:
goto out_free_context;
}
/*
* Init HYP architected timer support
*/
err = kvm_timer_hyp_init();
if (err)
goto out_free_context;
#ifndef CONFIG_HOTPLUG_CPU
free_boot_hyp_pgd();
#endif
kvm_perf_init();
/* set size of VMID supported by CPU */
kvm_vmid_bits = kvm_get_vmid_bits();
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
@ -1161,14 +1258,9 @@ static int init_hyp_mode(void)
kvm_info("Hyp mode initialized successfully\n");
return 0;
out_free_context:
free_percpu(kvm_host_cpu_state);
out_free_mappings:
free_hyp_pgds();
out_free_stack_pages:
for_each_possible_cpu(cpu)
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
out_err:
teardown_hyp_mode();
kvm_err("error initializing Hyp mode: %d\n", err);
return err;
}
@ -1212,26 +1304,27 @@ int kvm_arch_init(void *opaque)
}
}
cpu_notifier_register_begin();
err = init_common_resources();
if (err)
return err;
err = init_hyp_mode();
if (is_kernel_in_hyp_mode())
err = init_vhe_mode();
else
err = init_hyp_mode();
if (err)
goto out_err;
err = __register_cpu_notifier(&hyp_init_cpu_nb);
if (err) {
kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
goto out_err;
}
err = init_subsystems();
if (err)
goto out_hyp;
cpu_notifier_register_done();
hyp_cpu_pm_init();
kvm_coproc_table_init();
return 0;
out_hyp:
teardown_hyp_mode();
out_err:
cpu_notifier_register_done();
teardown_common_resources();
return err;
}

View file

@ -28,6 +28,7 @@
#include <asm/kvm_mmio.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/virt.h>
#include "trace.h"
@ -598,6 +599,9 @@ int create_hyp_mappings(void *from, void *to)
unsigned long start = KERN_TO_HYP((unsigned long)from);
unsigned long end = KERN_TO_HYP((unsigned long)to);
if (is_kernel_in_hyp_mode())
return 0;
start = start & PAGE_MASK;
end = PAGE_ALIGN(end);
@ -630,6 +634,9 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
unsigned long start = KERN_TO_HYP((unsigned long)from);
unsigned long end = KERN_TO_HYP((unsigned long)to);
if (is_kernel_in_hyp_mode())
return 0;
/* Check for a valid kernel IO mapping */
if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
return -EINVAL;
@ -1648,6 +1655,11 @@ phys_addr_t kvm_get_idmap_vector(void)
return hyp_idmap_vector;
}
phys_addr_t kvm_get_idmap_start(void)
{
return hyp_idmap_start;
}
int kvm_mmu_init(void)
{
int err;

View file

@ -22,6 +22,8 @@
#define ARM_EXCEPTION_IRQ 0
#define ARM_EXCEPTION_TRAP 1
/* The hyp-stub will return this for any kvm_call_hyp() call */
#define ARM_EXCEPTION_HYP_GONE 2
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
@ -29,11 +31,27 @@
#define kvm_ksym_ref(sym) phys_to_virt((u64)&sym - kimage_voffset)
#ifndef __ASSEMBLY__
#if __GNUC__ > 4
#define kvm_ksym_shift (PAGE_OFFSET - KIMAGE_VADDR)
#else
/*
* GCC versions 4.9 and older will fold the constant below into the addend of
* the reference to 'sym' above if kvm_ksym_shift is declared static or if the
* constant is used directly. However, since we use the small code model for
* the core kernel, the reference to 'sym' will be emitted as a adrp/add pair,
* with a +/- 4 GB range, resulting in linker relocation errors if the shift
* is sufficiently large. So prevent the compiler from folding the shift into
* the addend, by making the shift a variable with external linkage.
*/
__weak u64 kvm_ksym_shift = PAGE_OFFSET - KIMAGE_VADDR;
#endif
struct kvm;
struct kvm_vcpu;
extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
extern char __kvm_hyp_reset[];
extern char __kvm_hyp_vector[];

View file

@ -44,6 +44,7 @@
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
int kvm_arch_dev_ioctl_check_extension(long ext);
phys_addr_t kvm_hyp_reset_entry(void);
struct kvm_arch {
/* The VMID generation used for the virt. memory system */
@ -330,7 +331,17 @@ static inline void __cpu_init_stage2(void)
{
}
static inline void kvm_arch_hardware_disable(void) {}
static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
phys_addr_t phys_idmap_start)
{
/*
* Call reset code, and switch back to stub hyp vectors.
* Uses __kvm_call_hyp() to avoid kaslr's kvm_ksym_ref() translation.
*/
__kvm_call_hyp((void *)kvm_hyp_reset_entry(),
boot_pgd_ptr, phys_idmap_start);
}
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}

View file

@ -99,6 +99,7 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_mmu_get_boot_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
phys_addr_t kvm_get_idmap_start(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);

View file

@ -18,6 +18,22 @@
#ifndef __ASM__VIRT_H
#define __ASM__VIRT_H
/*
* The arm64 hcall implementation uses x0 to specify the hcall type. A value
* less than 0xfff indicates a special hcall, such as get/set vector.
* Any other value is used as a pointer to the function to call.
*/
/* HVC_GET_VECTORS - Return the value of the vbar_el2 register. */
#define HVC_GET_VECTORS 0
/*
* HVC_SET_VECTORS - Set the value of the vbar_el2 register.
*
* @x1: Physical address of the new vector table.
*/
#define HVC_SET_VECTORS 1
#define BOOT_CPU_MODE_EL1 (0xe11)
#define BOOT_CPU_MODE_EL2 (0xe12)

View file

@ -22,6 +22,8 @@
#include <linux/irqchip/arm-gic-v3.h>
#include <asm/assembler.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/ptrace.h>
#include <asm/virt.h>
@ -53,15 +55,26 @@ ENDPROC(__hyp_stub_vectors)
.align 11
el1_sync:
mrs x1, esr_el2
lsr x1, x1, #26
cmp x1, #0x16
b.ne 2f // Not an HVC trap
cbz x0, 1f
msr vbar_el2, x0 // Set vbar_el2
b 2f
1: mrs x0, vbar_el2 // Return vbar_el2
2: eret
mrs x30, esr_el2
lsr x30, x30, #ESR_ELx_EC_SHIFT
cmp x30, #ESR_ELx_EC_HVC64
b.ne 9f // Not an HVC trap
cmp x0, #HVC_GET_VECTORS
b.ne 1f
mrs x0, vbar_el2
b 9f
1: cmp x0, #HVC_SET_VECTORS
b.ne 2f
msr vbar_el2, x1
b 9f
/* Someone called kvm_call_hyp() against the hyp-stub... */
2: mov x0, #ARM_EXCEPTION_HYP_GONE
9: eret
ENDPROC(el1_sync)
.macro invalid_vector label
@ -101,10 +114,18 @@ ENDPROC(\label)
*/
ENTRY(__hyp_get_vectors)
mov x0, xzr
// fall through
ENTRY(__hyp_set_vectors)
str lr, [sp, #-16]!
mov x0, #HVC_GET_VECTORS
hvc #0
ldr lr, [sp], #16
ret
ENDPROC(__hyp_get_vectors)
ENTRY(__hyp_set_vectors)
str lr, [sp, #-16]!
mov x1, x0
mov x0, #HVC_SET_VECTORS
hvc #0
ldr lr, [sp], #16
ret
ENDPROC(__hyp_set_vectors)

View file

@ -183,6 +183,13 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
exit_handler = kvm_get_exit_handler(vcpu);
return exit_handler(vcpu, run);
case ARM_EXCEPTION_HYP_GONE:
/*
* EL2 has been reset to the hyp-stub. This happens when a guest
* is pre-empted by kvm_reboot()'s shutdown call.
*/
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
return 0;
default:
kvm_pr_unimpl("Unsupported exception type: %d",
exception_index);

View file

@ -152,6 +152,44 @@ merged:
eret
ENDPROC(__kvm_hyp_init)
/*
* x0: HYP boot pgd
* x1: HYP phys_idmap_start
*/
ENTRY(__kvm_hyp_reset)
/* We're in trampoline code in VA, switch back to boot page tables */
msr ttbr0_el2, x0
isb
/* Ensure the PA branch doesn't find a stale tlb entry or stale code. */
ic iallu
tlbi alle2
dsb sy
isb
/* Branch into PA space */
adr x0, 1f
bfi x1, x0, #0, #PAGE_SHIFT
br x1
/* We're now in idmap, disable MMU */
1: mrs x0, sctlr_el2
ldr x1, =SCTLR_ELx_FLAGS
bic x0, x0, x1 // Clear SCTL_M and etc
msr sctlr_el2, x0
isb
/* Invalidate the old TLBs */
tlbi alle2
dsb sy
/* Install stub vectors */
adr_l x0, __hyp_stub_vectors
msr vbar_el2, x0
eret
ENDPROC(__kvm_hyp_reset)
.ltorg
.popsection

View file

@ -35,16 +35,21 @@
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
* passed in x0.
*
* A function pointer with a value of 0 has a special meaning, and is
* used to implement __hyp_get_vectors in the same way as in
* A function pointer with a value less than 0xfff has a special meaning,
* and is used to implement __hyp_get_vectors in the same way as in
* arch/arm64/kernel/hyp_stub.S.
* HVC behaves as a 'bl' call and will clobber lr.
*/
ENTRY(__kvm_call_hyp)
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
str lr, [sp, #-16]!
hvc #0
ldr lr, [sp], #16
ret
alternative_else
b __vhe_hyp_call
nop
nop
nop
alternative_endif
ENDPROC(__kvm_call_hyp)

View file

@ -43,19 +43,17 @@
* Shuffle the parameters before calling the function
* pointed to in x0. Assumes parameters in x[1,2,3].
*/
sub sp, sp, #16
str lr, [sp]
mov lr, x0
mov x0, x1
mov x1, x2
mov x2, x3
blr lr
ldr lr, [sp]
add sp, sp, #16
.endm
ENTRY(__vhe_hyp_call)
str lr, [sp, #-16]!
do_el2_call
ldr lr, [sp], #16
/*
* We used to rely on having an exception return to get
* an implicit isb. In the E2H case, we don't have it anymore.
@ -81,8 +79,8 @@ el1_sync: // Guest trapped into EL2
/* Here, we're pretty sure the host called HVC. */
restore_x0_to_x3
/* Check for __hyp_get_vectors */
cbnz x0, 1f
cmp x0, #HVC_GET_VECTORS
b.ne 1f
mrs x0, vbar_el2
b 2f

View file

@ -29,7 +29,9 @@
#include <asm/cputype.h>
#include <asm/ptrace.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_mmu.h>
/*
* ARMv8 Reset Values
@ -123,3 +125,15 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset timer */
return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
}
extern char __hyp_idmap_text_start[];
phys_addr_t kvm_hyp_reset_entry(void)
{
unsigned long offset;
offset = (unsigned long)__kvm_hyp_reset
- ((unsigned long)__hyp_idmap_text_start & PAGE_MASK);
return TRAMPOLINE_VA + offset;
}