kvm/x86: Convert iommu_flags to iommu_noncoherent
Default to operating in coherent mode. This simplifies the logic when we switch to a model of registering and unregistering noncoherent I/O with KVM. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
ec53500fae
commit
d96eb2c6f4
6 changed files with 12 additions and 15 deletions
|
@ -476,7 +476,7 @@ struct kvm_arch {
|
||||||
|
|
||||||
struct list_head assigned_dev_head;
|
struct list_head assigned_dev_head;
|
||||||
struct iommu_domain *iommu_domain;
|
struct iommu_domain *iommu_domain;
|
||||||
int iommu_flags;
|
bool iommu_noncoherent;
|
||||||
|
|
||||||
unsigned long irq_sources_bitmap;
|
unsigned long irq_sources_bitmap;
|
||||||
unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
|
unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
|
||||||
|
|
|
@ -564,7 +564,7 @@ struct kvm_arch {
|
||||||
|
|
||||||
struct list_head assigned_dev_head;
|
struct list_head assigned_dev_head;
|
||||||
struct iommu_domain *iommu_domain;
|
struct iommu_domain *iommu_domain;
|
||||||
int iommu_flags;
|
bool iommu_noncoherent;
|
||||||
struct kvm_pic *vpic;
|
struct kvm_pic *vpic;
|
||||||
struct kvm_ioapic *vioapic;
|
struct kvm_ioapic *vioapic;
|
||||||
struct kvm_pit *vpit;
|
struct kvm_pit *vpit;
|
||||||
|
|
|
@ -7446,7 +7446,7 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
|
||||||
if (is_mmio)
|
if (is_mmio)
|
||||||
ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
|
ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
|
||||||
else if (vcpu->kvm->arch.iommu_domain &&
|
else if (vcpu->kvm->arch.iommu_domain &&
|
||||||
!(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY))
|
vcpu->kvm->arch.iommu_noncoherent)
|
||||||
ret = kvm_get_guest_memory_type(vcpu, gfn) <<
|
ret = kvm_get_guest_memory_type(vcpu, gfn) <<
|
||||||
VMX_EPT_MT_EPTE_SHIFT;
|
VMX_EPT_MT_EPTE_SHIFT;
|
||||||
else
|
else
|
||||||
|
|
|
@ -2719,7 +2719,7 @@ static void wbinvd_ipi(void *garbage)
|
||||||
static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
|
static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->kvm->arch.iommu_domain &&
|
return vcpu->kvm->arch.iommu_domain &&
|
||||||
!(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
|
vcpu->kvm->arch.iommu_noncoherent;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||||
|
|
|
@ -746,9 +746,6 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
|
||||||
int kvm_request_irq_source_id(struct kvm *kvm);
|
int kvm_request_irq_source_id(struct kvm *kvm);
|
||||||
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
|
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
|
||||||
|
|
||||||
/* For vcpu->arch.iommu_flags */
|
|
||||||
#define KVM_IOMMU_CACHE_COHERENCY 0x1
|
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
|
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
|
||||||
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
|
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
|
||||||
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
|
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
|
||||||
|
|
|
@ -79,7 +79,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
|
||||||
flags = IOMMU_READ;
|
flags = IOMMU_READ;
|
||||||
if (!(slot->flags & KVM_MEM_READONLY))
|
if (!(slot->flags & KVM_MEM_READONLY))
|
||||||
flags |= IOMMU_WRITE;
|
flags |= IOMMU_WRITE;
|
||||||
if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
|
if (!kvm->arch.iommu_noncoherent)
|
||||||
flags |= IOMMU_CACHE;
|
flags |= IOMMU_CACHE;
|
||||||
|
|
||||||
|
|
||||||
|
@ -158,7 +158,8 @@ int kvm_assign_device(struct kvm *kvm,
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = NULL;
|
struct pci_dev *pdev = NULL;
|
||||||
struct iommu_domain *domain = kvm->arch.iommu_domain;
|
struct iommu_domain *domain = kvm->arch.iommu_domain;
|
||||||
int r, last_flags;
|
int r;
|
||||||
|
bool noncoherent;
|
||||||
|
|
||||||
/* check if iommu exists and in use */
|
/* check if iommu exists and in use */
|
||||||
if (!domain)
|
if (!domain)
|
||||||
|
@ -174,15 +175,13 @@ int kvm_assign_device(struct kvm *kvm,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
last_flags = kvm->arch.iommu_flags;
|
noncoherent = !iommu_domain_has_cap(kvm->arch.iommu_domain,
|
||||||
if (iommu_domain_has_cap(kvm->arch.iommu_domain,
|
IOMMU_CAP_CACHE_COHERENCY);
|
||||||
IOMMU_CAP_CACHE_COHERENCY))
|
|
||||||
kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY;
|
|
||||||
|
|
||||||
/* Check if need to update IOMMU page table for guest memory */
|
/* Check if need to update IOMMU page table for guest memory */
|
||||||
if ((last_flags ^ kvm->arch.iommu_flags) ==
|
if (noncoherent != kvm->arch.iommu_noncoherent) {
|
||||||
KVM_IOMMU_CACHE_COHERENCY) {
|
|
||||||
kvm_iommu_unmap_memslots(kvm);
|
kvm_iommu_unmap_memslots(kvm);
|
||||||
|
kvm->arch.iommu_noncoherent = noncoherent;
|
||||||
r = kvm_iommu_map_memslots(kvm);
|
r = kvm_iommu_map_memslots(kvm);
|
||||||
if (r)
|
if (r)
|
||||||
goto out_unmap;
|
goto out_unmap;
|
||||||
|
@ -342,6 +341,7 @@ int kvm_iommu_unmap_guest(struct kvm *kvm)
|
||||||
mutex_lock(&kvm->slots_lock);
|
mutex_lock(&kvm->slots_lock);
|
||||||
kvm_iommu_unmap_memslots(kvm);
|
kvm_iommu_unmap_memslots(kvm);
|
||||||
kvm->arch.iommu_domain = NULL;
|
kvm->arch.iommu_domain = NULL;
|
||||||
|
kvm->arch.iommu_noncoherent = false;
|
||||||
mutex_unlock(&kvm->slots_lock);
|
mutex_unlock(&kvm->slots_lock);
|
||||||
|
|
||||||
iommu_domain_free(domain);
|
iommu_domain_free(domain);
|
||||||
|
|
Loading…
Add table
Reference in a new issue