KVM: Rename vcpu->shadow_efer to efer
None of the other registers have the shadow_ prefix. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
836a1b3c34
commit
f6801dff23
6 changed files with 23 additions and 23 deletions
|
@ -277,7 +277,7 @@ struct kvm_vcpu_arch {
|
||||||
unsigned long cr8;
|
unsigned long cr8;
|
||||||
u32 hflags;
|
u32 hflags;
|
||||||
u64 pdptrs[4]; /* pae */
|
u64 pdptrs[4]; /* pae */
|
||||||
u64 shadow_efer;
|
u64 efer;
|
||||||
u64 apic_base;
|
u64 apic_base;
|
||||||
struct kvm_lapic *apic; /* kernel irqchip context */
|
struct kvm_lapic *apic; /* kernel irqchip context */
|
||||||
int32_t apic_arb_prio;
|
int32_t apic_arb_prio;
|
||||||
|
|
|
@ -237,7 +237,7 @@ static int is_cpuid_PSE36(void)
|
||||||
|
|
||||||
static int is_nx(struct kvm_vcpu *vcpu)
|
static int is_nx(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->arch.shadow_efer & EFER_NX;
|
return vcpu->arch.efer & EFER_NX;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int is_shadow_present_pte(u64 pte)
|
static int is_shadow_present_pte(u64 pte)
|
||||||
|
|
|
@ -231,7 +231,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
||||||
efer &= ~EFER_LME;
|
efer &= ~EFER_LME;
|
||||||
|
|
||||||
to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
|
to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
|
||||||
vcpu->arch.shadow_efer = efer;
|
vcpu->arch.efer = efer;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
|
static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
|
||||||
|
@ -996,14 +996,14 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
||||||
struct vcpu_svm *svm = to_svm(vcpu);
|
struct vcpu_svm *svm = to_svm(vcpu);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
if (vcpu->arch.shadow_efer & EFER_LME) {
|
if (vcpu->arch.efer & EFER_LME) {
|
||||||
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
|
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
|
||||||
vcpu->arch.shadow_efer |= EFER_LMA;
|
vcpu->arch.efer |= EFER_LMA;
|
||||||
svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
|
svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
|
if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
|
||||||
vcpu->arch.shadow_efer &= ~EFER_LMA;
|
vcpu->arch.efer &= ~EFER_LMA;
|
||||||
svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
|
svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1361,7 +1361,7 @@ static int vmmcall_interception(struct vcpu_svm *svm)
|
||||||
|
|
||||||
static int nested_svm_check_permissions(struct vcpu_svm *svm)
|
static int nested_svm_check_permissions(struct vcpu_svm *svm)
|
||||||
{
|
{
|
||||||
if (!(svm->vcpu.arch.shadow_efer & EFER_SVME)
|
if (!(svm->vcpu.arch.efer & EFER_SVME)
|
||||||
|| !is_paging(&svm->vcpu)) {
|
|| !is_paging(&svm->vcpu)) {
|
||||||
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
|
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -1764,7 +1764,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
|
||||||
hsave->save.ds = vmcb->save.ds;
|
hsave->save.ds = vmcb->save.ds;
|
||||||
hsave->save.gdtr = vmcb->save.gdtr;
|
hsave->save.gdtr = vmcb->save.gdtr;
|
||||||
hsave->save.idtr = vmcb->save.idtr;
|
hsave->save.idtr = vmcb->save.idtr;
|
||||||
hsave->save.efer = svm->vcpu.arch.shadow_efer;
|
hsave->save.efer = svm->vcpu.arch.efer;
|
||||||
hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
|
hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
|
||||||
hsave->save.cr4 = svm->vcpu.arch.cr4;
|
hsave->save.cr4 = svm->vcpu.arch.cr4;
|
||||||
hsave->save.rflags = vmcb->save.rflags;
|
hsave->save.rflags = vmcb->save.rflags;
|
||||||
|
|
|
@ -613,7 +613,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
|
||||||
u64 guest_efer;
|
u64 guest_efer;
|
||||||
u64 ignore_bits;
|
u64 ignore_bits;
|
||||||
|
|
||||||
guest_efer = vmx->vcpu.arch.shadow_efer;
|
guest_efer = vmx->vcpu.arch.efer;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NX is emulated; LMA and LME handled by hardware; SCE meaninless
|
* NX is emulated; LMA and LME handled by hardware; SCE meaninless
|
||||||
|
@ -955,7 +955,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
|
||||||
* if efer.sce is enabled.
|
* if efer.sce is enabled.
|
||||||
*/
|
*/
|
||||||
index = __find_msr_index(vmx, MSR_K6_STAR);
|
index = __find_msr_index(vmx, MSR_K6_STAR);
|
||||||
if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE))
|
if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
|
||||||
move_msr_up(vmx, index, save_nmsrs++);
|
move_msr_up(vmx, index, save_nmsrs++);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -1600,7 +1600,7 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
||||||
* of this msr depends on is_long_mode().
|
* of this msr depends on is_long_mode().
|
||||||
*/
|
*/
|
||||||
vmx_load_host_state(to_vmx(vcpu));
|
vmx_load_host_state(to_vmx(vcpu));
|
||||||
vcpu->arch.shadow_efer = efer;
|
vcpu->arch.efer = efer;
|
||||||
if (!msr)
|
if (!msr)
|
||||||
return;
|
return;
|
||||||
if (efer & EFER_LMA) {
|
if (efer & EFER_LMA) {
|
||||||
|
@ -1632,13 +1632,13 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
|
||||||
(guest_tr_ar & ~AR_TYPE_MASK)
|
(guest_tr_ar & ~AR_TYPE_MASK)
|
||||||
| AR_TYPE_BUSY_64_TSS);
|
| AR_TYPE_BUSY_64_TSS);
|
||||||
}
|
}
|
||||||
vcpu->arch.shadow_efer |= EFER_LMA;
|
vcpu->arch.efer |= EFER_LMA;
|
||||||
vmx_set_efer(vcpu, vcpu->arch.shadow_efer);
|
vmx_set_efer(vcpu, vcpu->arch.efer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void exit_lmode(struct kvm_vcpu *vcpu)
|
static void exit_lmode(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
vcpu->arch.shadow_efer &= ~EFER_LMA;
|
vcpu->arch.efer &= ~EFER_LMA;
|
||||||
|
|
||||||
vmcs_write32(VM_ENTRY_CONTROLS,
|
vmcs_write32(VM_ENTRY_CONTROLS,
|
||||||
vmcs_read32(VM_ENTRY_CONTROLS)
|
vmcs_read32(VM_ENTRY_CONTROLS)
|
||||||
|
@ -1745,7 +1745,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
||||||
enter_rmode(vcpu);
|
enter_rmode(vcpu);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
if (vcpu->arch.shadow_efer & EFER_LME) {
|
if (vcpu->arch.efer & EFER_LME) {
|
||||||
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
|
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
|
||||||
enter_lmode(vcpu);
|
enter_lmode(vcpu);
|
||||||
if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
|
if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
|
||||||
|
|
|
@ -456,7 +456,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
||||||
|
|
||||||
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
|
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
if ((vcpu->arch.shadow_efer & EFER_LME)) {
|
if ((vcpu->arch.efer & EFER_LME)) {
|
||||||
int cs_db, cs_l;
|
int cs_db, cs_l;
|
||||||
|
|
||||||
if (!is_pae(vcpu)) {
|
if (!is_pae(vcpu)) {
|
||||||
|
@ -655,7 +655,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_paging(vcpu)
|
if (is_paging(vcpu)
|
||||||
&& (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
|
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
|
||||||
printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
|
printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
|
||||||
kvm_inject_gp(vcpu, 0);
|
kvm_inject_gp(vcpu, 0);
|
||||||
return;
|
return;
|
||||||
|
@ -686,9 +686,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
||||||
kvm_x86_ops->set_efer(vcpu, efer);
|
kvm_x86_ops->set_efer(vcpu, efer);
|
||||||
|
|
||||||
efer &= ~EFER_LMA;
|
efer &= ~EFER_LMA;
|
||||||
efer |= vcpu->arch.shadow_efer & EFER_LMA;
|
efer |= vcpu->arch.efer & EFER_LMA;
|
||||||
|
|
||||||
vcpu->arch.shadow_efer = efer;
|
vcpu->arch.efer = efer;
|
||||||
|
|
||||||
vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
|
vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
|
||||||
kvm_mmu_reset_context(vcpu);
|
kvm_mmu_reset_context(vcpu);
|
||||||
|
@ -1426,7 +1426,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
||||||
data |= (((uint64_t)4ULL) << 40);
|
data |= (((uint64_t)4ULL) << 40);
|
||||||
break;
|
break;
|
||||||
case MSR_EFER:
|
case MSR_EFER:
|
||||||
data = vcpu->arch.shadow_efer;
|
data = vcpu->arch.efer;
|
||||||
break;
|
break;
|
||||||
case MSR_KVM_WALL_CLOCK:
|
case MSR_KVM_WALL_CLOCK:
|
||||||
data = vcpu->kvm->arch.wall_clock;
|
data = vcpu->kvm->arch.wall_clock;
|
||||||
|
@ -4569,7 +4569,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
||||||
sregs->cr3 = vcpu->arch.cr3;
|
sregs->cr3 = vcpu->arch.cr3;
|
||||||
sregs->cr4 = kvm_read_cr4(vcpu);
|
sregs->cr4 = kvm_read_cr4(vcpu);
|
||||||
sregs->cr8 = kvm_get_cr8(vcpu);
|
sregs->cr8 = kvm_get_cr8(vcpu);
|
||||||
sregs->efer = vcpu->arch.shadow_efer;
|
sregs->efer = vcpu->arch.efer;
|
||||||
sregs->apic_base = kvm_get_apic_base(vcpu);
|
sregs->apic_base = kvm_get_apic_base(vcpu);
|
||||||
|
|
||||||
memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
|
memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
|
||||||
|
@ -5059,7 +5059,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
kvm_set_cr8(vcpu, sregs->cr8);
|
kvm_set_cr8(vcpu, sregs->cr8);
|
||||||
|
|
||||||
mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
|
mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
|
||||||
kvm_x86_ops->set_efer(vcpu, sregs->efer);
|
kvm_x86_ops->set_efer(vcpu, sregs->efer);
|
||||||
kvm_set_apic_base(vcpu, sregs->apic_base);
|
kvm_set_apic_base(vcpu, sregs->apic_base);
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ static inline bool is_protmode(struct kvm_vcpu *vcpu)
|
||||||
static inline int is_long_mode(struct kvm_vcpu *vcpu)
|
static inline int is_long_mode(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
return vcpu->arch.shadow_efer & EFER_LMA;
|
return vcpu->arch.efer & EFER_LMA;
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Add table
Reference in a new issue