iommu/vt-d: Update IOMMU state when memory hotplug happens
If static identity domain is created, IOMMU driver needs to update si_domain page table when memory hotplug event happens. Otherwise PCI device DMA operations can't access the hot-added memory regions. Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
This commit is contained in:
parent
2e45528930
commit
75f05569d0
3 changed files with 130 additions and 7 deletions
|
@ -33,6 +33,7 @@
|
||||||
#include <linux/dmar.h>
|
#include <linux/dmar.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/mempool.h>
|
#include <linux/mempool.h>
|
||||||
|
#include <linux/memory.h>
|
||||||
#include <linux/timer.h>
|
#include <linux/timer.h>
|
||||||
#include <linux/iova.h>
|
#include <linux/iova.h>
|
||||||
#include <linux/iommu.h>
|
#include <linux/iommu.h>
|
||||||
|
@ -3683,6 +3684,73 @@ static struct notifier_block device_nb = {
|
||||||
.notifier_call = device_notifier,
|
.notifier_call = device_notifier,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static int intel_iommu_memory_notifier(struct notifier_block *nb,
|
||||||
|
unsigned long val, void *v)
|
||||||
|
{
|
||||||
|
struct memory_notify *mhp = v;
|
||||||
|
unsigned long long start, end;
|
||||||
|
unsigned long start_vpfn, last_vpfn;
|
||||||
|
|
||||||
|
switch (val) {
|
||||||
|
case MEM_GOING_ONLINE:
|
||||||
|
start = mhp->start_pfn << PAGE_SHIFT;
|
||||||
|
end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
|
||||||
|
if (iommu_domain_identity_map(si_domain, start, end)) {
|
||||||
|
pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
|
||||||
|
start, end);
|
||||||
|
return NOTIFY_BAD;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case MEM_OFFLINE:
|
||||||
|
case MEM_CANCEL_ONLINE:
|
||||||
|
start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
|
||||||
|
last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
|
||||||
|
while (start_vpfn <= last_vpfn) {
|
||||||
|
struct iova *iova;
|
||||||
|
struct dmar_drhd_unit *drhd;
|
||||||
|
struct intel_iommu *iommu;
|
||||||
|
|
||||||
|
iova = find_iova(&si_domain->iovad, start_vpfn);
|
||||||
|
if (iova == NULL) {
|
||||||
|
pr_debug("dmar: failed get IOVA for PFN %lx\n",
|
||||||
|
start_vpfn);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
iova = split_and_remove_iova(&si_domain->iovad, iova,
|
||||||
|
start_vpfn, last_vpfn);
|
||||||
|
if (iova == NULL) {
|
||||||
|
pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
|
||||||
|
start_vpfn, last_vpfn);
|
||||||
|
return NOTIFY_BAD;
|
||||||
|
}
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
for_each_active_iommu(iommu, drhd)
|
||||||
|
iommu_flush_iotlb_psi(iommu, si_domain->id,
|
||||||
|
iova->pfn_lo,
|
||||||
|
iova->pfn_hi - iova->pfn_lo + 1, 0);
|
||||||
|
rcu_read_unlock();
|
||||||
|
dma_pte_clear_range(si_domain, iova->pfn_lo,
|
||||||
|
iova->pfn_hi);
|
||||||
|
dma_pte_free_pagetable(si_domain, iova->pfn_lo,
|
||||||
|
iova->pfn_hi);
|
||||||
|
|
||||||
|
start_vpfn = iova->pfn_hi + 1;
|
||||||
|
free_iova_mem(iova);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NOTIFY_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct notifier_block intel_iommu_memory_nb = {
|
||||||
|
.notifier_call = intel_iommu_memory_notifier,
|
||||||
|
.priority = 0
|
||||||
|
};
|
||||||
|
|
||||||
int __init intel_iommu_init(void)
|
int __init intel_iommu_init(void)
|
||||||
{
|
{
|
||||||
int ret = -ENODEV;
|
int ret = -ENODEV;
|
||||||
|
@ -3755,8 +3823,9 @@ int __init intel_iommu_init(void)
|
||||||
init_iommu_pm_ops();
|
init_iommu_pm_ops();
|
||||||
|
|
||||||
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
|
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
|
||||||
|
|
||||||
bus_register_notifier(&pci_bus_type, &device_nb);
|
bus_register_notifier(&pci_bus_type, &device_nb);
|
||||||
|
if (si_domain && !hw_pass_through)
|
||||||
|
register_memory_notifier(&intel_iommu_memory_nb);
|
||||||
|
|
||||||
intel_iommu_enabled = 1;
|
intel_iommu_enabled = 1;
|
||||||
|
|
||||||
|
|
|
@ -342,19 +342,30 @@ __is_range_overlap(struct rb_node *node,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct iova *
|
||||||
|
alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
|
||||||
|
{
|
||||||
|
struct iova *iova;
|
||||||
|
|
||||||
|
iova = alloc_iova_mem();
|
||||||
|
if (iova) {
|
||||||
|
iova->pfn_lo = pfn_lo;
|
||||||
|
iova->pfn_hi = pfn_hi;
|
||||||
|
}
|
||||||
|
|
||||||
|
return iova;
|
||||||
|
}
|
||||||
|
|
||||||
static struct iova *
|
static struct iova *
|
||||||
__insert_new_range(struct iova_domain *iovad,
|
__insert_new_range(struct iova_domain *iovad,
|
||||||
unsigned long pfn_lo, unsigned long pfn_hi)
|
unsigned long pfn_lo, unsigned long pfn_hi)
|
||||||
{
|
{
|
||||||
struct iova *iova;
|
struct iova *iova;
|
||||||
|
|
||||||
iova = alloc_iova_mem();
|
iova = alloc_and_init_iova(pfn_lo, pfn_hi);
|
||||||
if (!iova)
|
if (iova)
|
||||||
return iova;
|
iova_insert_rbtree(&iovad->rbroot, iova);
|
||||||
|
|
||||||
iova->pfn_hi = pfn_hi;
|
|
||||||
iova->pfn_lo = pfn_lo;
|
|
||||||
iova_insert_rbtree(&iovad->rbroot, iova);
|
|
||||||
return iova;
|
return iova;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -433,3 +444,44 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
|
spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct iova *
|
||||||
|
split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
|
||||||
|
unsigned long pfn_lo, unsigned long pfn_hi)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
struct iova *prev = NULL, *next = NULL;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
||||||
|
if (iova->pfn_lo < pfn_lo) {
|
||||||
|
prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
|
||||||
|
if (prev == NULL)
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
if (iova->pfn_hi > pfn_hi) {
|
||||||
|
next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
|
||||||
|
if (next == NULL)
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
|
__cached_rbnode_delete_update(iovad, iova);
|
||||||
|
rb_erase(&iova->node, &iovad->rbroot);
|
||||||
|
|
||||||
|
if (prev) {
|
||||||
|
iova_insert_rbtree(&iovad->rbroot, prev);
|
||||||
|
iova->pfn_lo = pfn_lo;
|
||||||
|
}
|
||||||
|
if (next) {
|
||||||
|
iova_insert_rbtree(&iovad->rbroot, next);
|
||||||
|
iova->pfn_hi = pfn_hi;
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
||||||
|
|
||||||
|
return iova;
|
||||||
|
|
||||||
|
error:
|
||||||
|
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
||||||
|
if (prev)
|
||||||
|
free_iova_mem(prev);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
|
@ -47,5 +47,7 @@ void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
|
||||||
void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
|
void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
|
||||||
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
||||||
void put_iova_domain(struct iova_domain *iovad);
|
void put_iova_domain(struct iova_domain *iovad);
|
||||||
|
struct iova *split_and_remove_iova(struct iova_domain *iovad,
|
||||||
|
struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Add table
Reference in a new issue