Merge "iommu/arm-smmu: add support to configure IOVA range"
This commit is contained in:
commit
27a0d415d4
4 changed files with 71 additions and 19 deletions
|
@ -3288,6 +3288,43 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
|
|||
1 << DOMAIN_ATTR_ENABLE_TTBR1;
|
||||
ret = 0;
|
||||
break;
|
||||
case DOMAIN_ATTR_GEOMETRY: {
|
||||
struct iommu_domain_geometry *geometry =
|
||||
(struct iommu_domain_geometry *)data;
|
||||
|
||||
if (smmu_domain->smmu != NULL) {
|
||||
dev_err(smmu_domain->smmu->dev,
|
||||
"cannot set geometry attribute while attached\n");
|
||||
ret = -EBUSY;
|
||||
break;
|
||||
}
|
||||
|
||||
if (geometry->aperture_start >= SZ_1G * 4ULL ||
|
||||
geometry->aperture_end >= SZ_1G * 4ULL) {
|
||||
pr_err("fastmap does not support IOVAs >= 4GB\n");
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (smmu_domain->attributes
|
||||
& (1 << DOMAIN_ATTR_GEOMETRY)) {
|
||||
if (geometry->aperture_start
|
||||
< domain->geometry.aperture_start)
|
||||
domain->geometry.aperture_start =
|
||||
geometry->aperture_start;
|
||||
|
||||
if (geometry->aperture_end
|
||||
> domain->geometry.aperture_end)
|
||||
domain->geometry.aperture_end =
|
||||
geometry->aperture_end;
|
||||
} else {
|
||||
smmu_domain->attributes |= 1 << DOMAIN_ATTR_GEOMETRY;
|
||||
domain->geometry.aperture_start =
|
||||
geometry->aperture_start;
|
||||
domain->geometry.aperture_end = geometry->aperture_end;
|
||||
}
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ret = -ENODEV;
|
||||
break;
|
||||
|
|
|
@ -188,7 +188,9 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
|
|||
|
||||
iommu_tlbiall(mapping->domain);
|
||||
mapping->have_stale_tlbs = false;
|
||||
av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, mapping->base,
|
||||
av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds,
|
||||
mapping->domain->geometry.aperture_start,
|
||||
mapping->base,
|
||||
mapping->base + mapping->size - 1,
|
||||
skip_sync);
|
||||
}
|
||||
|
@ -367,7 +369,8 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page,
|
|||
if (unlikely(iova == DMA_ERROR_CODE))
|
||||
goto fail;
|
||||
|
||||
pmd = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, iova);
|
||||
pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
|
||||
mapping->domain->geometry.aperture_start, iova);
|
||||
|
||||
if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot)))
|
||||
goto fail_free_iova;
|
||||
|
@ -391,7 +394,8 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
|
|||
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
|
||||
unsigned long flags;
|
||||
av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
|
||||
mapping->base, iova);
|
||||
mapping->domain->geometry.aperture_start,
|
||||
iova);
|
||||
unsigned long offset = iova & ~FAST_PAGE_MASK;
|
||||
size_t len = ALIGN(size + offset, FAST_PAGE_SIZE);
|
||||
int nptes = len >> FAST_PAGE_SHIFT;
|
||||
|
@ -414,7 +418,8 @@ static void fast_smmu_sync_single_for_cpu(struct device *dev,
|
|||
{
|
||||
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
|
||||
av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
|
||||
mapping->base, iova);
|
||||
mapping->domain->geometry.aperture_start,
|
||||
iova);
|
||||
unsigned long offset = iova & ~FAST_PAGE_MASK;
|
||||
struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
|
||||
|
||||
|
@ -427,7 +432,8 @@ static void fast_smmu_sync_single_for_device(struct device *dev,
|
|||
{
|
||||
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
|
||||
av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
|
||||
mapping->base, iova);
|
||||
mapping->domain->geometry.aperture_start,
|
||||
iova);
|
||||
unsigned long offset = iova & ~FAST_PAGE_MASK;
|
||||
struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
|
||||
|
||||
|
@ -555,8 +561,9 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
|
|||
while (sg_miter_next(&miter)) {
|
||||
int nptes = miter.length >> FAST_PAGE_SHIFT;
|
||||
|
||||
ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base,
|
||||
iova_iter);
|
||||
ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
|
||||
mapping->domain->geometry.aperture_start,
|
||||
iova_iter);
|
||||
if (unlikely(av8l_fast_map_public(
|
||||
ptep, page_to_phys(miter.page),
|
||||
miter.length, prot))) {
|
||||
|
@ -584,7 +591,9 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
|
|||
out_unmap:
|
||||
/* need to take the lock again for page tables and iova */
|
||||
spin_lock_irqsave(&mapping->lock, flags);
|
||||
ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_addr);
|
||||
ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
|
||||
mapping->domain->geometry.aperture_start,
|
||||
dma_addr);
|
||||
av8l_fast_unmap_public(ptep, size);
|
||||
fast_dmac_clean_range(mapping, ptep, ptep + count);
|
||||
out_free_iova:
|
||||
|
@ -616,7 +625,8 @@ static void fast_smmu_free(struct device *dev, size_t size,
|
|||
|
||||
pages = area->pages;
|
||||
dma_common_free_remap(vaddr, size, VM_USERMAP, false);
|
||||
ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_handle);
|
||||
ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
|
||||
mapping->domain->geometry.aperture_start, dma_handle);
|
||||
spin_lock_irqsave(&mapping->lock, flags);
|
||||
av8l_fast_unmap_public(ptep, size);
|
||||
fast_dmac_clean_range(mapping, ptep, ptep + count);
|
||||
|
@ -720,7 +730,7 @@ static const struct dma_map_ops fast_smmu_dma_ops = {
|
|||
*
|
||||
* Creates a mapping structure which holds information about used/unused IO
|
||||
* address ranges, which is required to perform mapping with IOMMU aware
|
||||
* functions. The only VA range supported is [0, 4GB).
|
||||
* functions. The only VA range supported is [0, 4GB].
|
||||
*
|
||||
* The client device need to be attached to the mapping with
|
||||
* fast_smmu_attach_device function.
|
||||
|
@ -774,6 +784,7 @@ int fast_smmu_attach_device(struct device *dev,
|
|||
struct iommu_domain *domain = mapping->domain;
|
||||
struct iommu_pgtbl_info info;
|
||||
u64 size = (u64)mapping->bits << PAGE_SHIFT;
|
||||
struct iommu_domain_geometry geometry;
|
||||
|
||||
if (mapping->base + size > (SZ_1G * 4ULL))
|
||||
return -EINVAL;
|
||||
|
@ -788,8 +799,11 @@ int fast_smmu_attach_device(struct device *dev,
|
|||
mapping->fast->domain = domain;
|
||||
mapping->fast->dev = dev;
|
||||
|
||||
domain->geometry.aperture_start = mapping->base;
|
||||
domain->geometry.aperture_end = mapping->base + size - 1;
|
||||
geometry.aperture_start = mapping->base;
|
||||
geometry.aperture_end = mapping->base + size - 1;
|
||||
if (iommu_domain_set_attr(domain, DOMAIN_ATTR_GEOMETRY,
|
||||
&geometry))
|
||||
return -EINVAL;
|
||||
|
||||
if (iommu_attach_device(domain, dev))
|
||||
return -EINVAL;
|
||||
|
|
|
@ -173,12 +173,12 @@ static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep)
|
|||
}
|
||||
|
||||
void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, u64 base,
|
||||
u64 end, bool skip_sync)
|
||||
u64 start, u64 end, bool skip_sync)
|
||||
{
|
||||
int i;
|
||||
av8l_fast_iopte *pmdp = pmds;
|
||||
av8l_fast_iopte *pmdp = iopte_pmd_offset(pmds, base, start);
|
||||
|
||||
for (i = base >> AV8L_FAST_PAGE_SHIFT;
|
||||
for (i = start >> AV8L_FAST_PAGE_SHIFT;
|
||||
i <= (end >> AV8L_FAST_PAGE_SHIFT); ++i) {
|
||||
if (!(*pmdp & AV8L_FAST_PTE_VALID)) {
|
||||
*pmdp = 0;
|
||||
|
@ -668,7 +668,7 @@ static int __init av8l_fast_positive_testing(void)
|
|||
}
|
||||
|
||||
/* sweep up TLB proving PTEs */
|
||||
av8l_fast_clear_stale_ptes(pmds, base, max, false);
|
||||
av8l_fast_clear_stale_ptes(pmds, base, base, max, false);
|
||||
|
||||
/* map the entire 4GB VA space with 8K map calls */
|
||||
for (iova = base; iova < max; iova += SZ_8K) {
|
||||
|
@ -689,7 +689,7 @@ static int __init av8l_fast_positive_testing(void)
|
|||
}
|
||||
|
||||
/* sweep up TLB proving PTEs */
|
||||
av8l_fast_clear_stale_ptes(pmds, base, max, false);
|
||||
av8l_fast_clear_stale_ptes(pmds, base, base, max, false);
|
||||
|
||||
/* map the entire 4GB VA space with 16K map calls */
|
||||
for (iova = base; iova < max; iova += SZ_16K) {
|
||||
|
@ -710,7 +710,7 @@ static int __init av8l_fast_positive_testing(void)
|
|||
}
|
||||
|
||||
/* sweep up TLB proving PTEs */
|
||||
av8l_fast_clear_stale_ptes(pmds, base, max, false);
|
||||
av8l_fast_clear_stale_ptes(pmds, base, base, max, false);
|
||||
|
||||
/* map the entire 4GB VA space with 64K map calls */
|
||||
for (iova = base; iova < max; iova += SZ_64K) {
|
||||
|
|
|
@ -37,7 +37,7 @@ void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size);
|
|||
#define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0xa
|
||||
|
||||
void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, u64 base,
|
||||
u64 end, bool skip_sync);
|
||||
u64 start, u64 end, bool skip_sync);
|
||||
void av8l_register_notify(struct notifier_block *nb);
|
||||
|
||||
#else /* !CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */
|
||||
|
@ -46,6 +46,7 @@ void av8l_register_notify(struct notifier_block *nb);
|
|||
|
||||
static inline void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds,
|
||||
u64 base,
|
||||
u64 start,
|
||||
u64 end,
|
||||
bool skip_sync)
|
||||
{
|
||||
|
|
Loading…
Add table
Reference in a new issue