KVM: x86: propagate exception from permission checks on the nested page fault
Currently, if a permission error happens during the translation of the final GPA to HPA, walk_addr_generic returns 0 but does not fill in walker->fault. To avoid this, add an x86_exception* argument to the translate_gpa function, and let it fill in walker->fault. The nested_page_fault field will be true, since the walk_mmu is the nested_mmu and translate_gpu instead operates on the "outer" (NPT) instance. Reported-by: Valentine Sinitsyn <valentine.sinitsyn@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
ef54bcfeea
commit
54987b7afa
4 changed files with 16 additions and 11 deletions
|
@ -262,7 +262,8 @@ struct kvm_mmu {
|
||||||
struct x86_exception *fault);
|
struct x86_exception *fault);
|
||||||
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
|
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
|
||||||
struct x86_exception *exception);
|
struct x86_exception *exception);
|
||||||
gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
|
gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
|
||||||
|
struct x86_exception *exception);
|
||||||
int (*sync_page)(struct kvm_vcpu *vcpu,
|
int (*sync_page)(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_mmu_page *sp);
|
struct kvm_mmu_page *sp);
|
||||||
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
|
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
|
||||||
|
@ -923,7 +924,8 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
|
||||||
int kvm_mmu_load(struct kvm_vcpu *vcpu);
|
int kvm_mmu_load(struct kvm_vcpu *vcpu);
|
||||||
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
|
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
|
||||||
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
|
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
|
||||||
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
|
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
|
||||||
|
struct x86_exception *exception);
|
||||||
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
|
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
|
||||||
struct x86_exception *exception);
|
struct x86_exception *exception);
|
||||||
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
|
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
|
||||||
|
@ -943,7 +945,8 @@ void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
|
||||||
void kvm_enable_tdp(void);
|
void kvm_enable_tdp(void);
|
||||||
void kvm_disable_tdp(void);
|
void kvm_disable_tdp(void);
|
||||||
|
|
||||||
static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
|
static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
|
||||||
|
struct x86_exception *exception)
|
||||||
{
|
{
|
||||||
return gpa;
|
return gpa;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3200,7 +3200,7 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
|
||||||
{
|
{
|
||||||
if (exception)
|
if (exception)
|
||||||
exception->error_code = 0;
|
exception->error_code = 0;
|
||||||
return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
|
return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
||||||
|
|
|
@ -321,7 +321,8 @@ retry_walk:
|
||||||
walker->pte_gpa[walker->level - 1] = pte_gpa;
|
walker->pte_gpa[walker->level - 1] = pte_gpa;
|
||||||
|
|
||||||
real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
|
real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
|
||||||
PFERR_USER_MASK|PFERR_WRITE_MASK);
|
PFERR_USER_MASK|PFERR_WRITE_MASK,
|
||||||
|
&walker->fault);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FIXME: This can happen if emulation (for of an INS/OUTS
|
* FIXME: This can happen if emulation (for of an INS/OUTS
|
||||||
|
@ -334,7 +335,7 @@ retry_walk:
|
||||||
* fields.
|
* fields.
|
||||||
*/
|
*/
|
||||||
if (unlikely(real_gfn == UNMAPPED_GVA))
|
if (unlikely(real_gfn == UNMAPPED_GVA))
|
||||||
goto error;
|
return 0;
|
||||||
|
|
||||||
real_gfn = gpa_to_gfn(real_gfn);
|
real_gfn = gpa_to_gfn(real_gfn);
|
||||||
|
|
||||||
|
@ -376,7 +377,7 @@ retry_walk:
|
||||||
if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36())
|
if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36())
|
||||||
gfn += pse36_gfn_delta(pte);
|
gfn += pse36_gfn_delta(pte);
|
||||||
|
|
||||||
real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access);
|
real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault);
|
||||||
if (real_gpa == UNMAPPED_GVA)
|
if (real_gpa == UNMAPPED_GVA)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -459,11 +459,12 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||||
gfn_t ngfn, void *data, int offset, int len,
|
gfn_t ngfn, void *data, int offset, int len,
|
||||||
u32 access)
|
u32 access)
|
||||||
{
|
{
|
||||||
|
struct x86_exception exception;
|
||||||
gfn_t real_gfn;
|
gfn_t real_gfn;
|
||||||
gpa_t ngpa;
|
gpa_t ngpa;
|
||||||
|
|
||||||
ngpa = gfn_to_gpa(ngfn);
|
ngpa = gfn_to_gpa(ngfn);
|
||||||
real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
|
real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
|
||||||
if (real_gfn == UNMAPPED_GVA)
|
if (real_gfn == UNMAPPED_GVA)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
|
@ -4065,16 +4066,16 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
|
||||||
kvm_x86_ops->get_segment(vcpu, var, seg);
|
kvm_x86_ops->get_segment(vcpu, var, seg);
|
||||||
}
|
}
|
||||||
|
|
||||||
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
|
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
|
||||||
|
struct x86_exception *exception)
|
||||||
{
|
{
|
||||||
gpa_t t_gpa;
|
gpa_t t_gpa;
|
||||||
struct x86_exception exception;
|
|
||||||
|
|
||||||
BUG_ON(!mmu_is_nested(vcpu));
|
BUG_ON(!mmu_is_nested(vcpu));
|
||||||
|
|
||||||
/* NPT walks are always user-walks */
|
/* NPT walks are always user-walks */
|
||||||
access |= PFERR_USER_MASK;
|
access |= PFERR_USER_MASK;
|
||||||
t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
|
t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception);
|
||||||
|
|
||||||
return t_gpa;
|
return t_gpa;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue