iommu/arm-smmu: add support to configure IOVA range
Allow clients to specify the IOVA range for fastmap clients via the DOMAIN_ATTR_GEOMETRY domain attribute. Presently fastmap only allocates page tables for the IOVA range specified during the create mapping call. However clients may want to use IOVA addresses outside this range, such as for their calls to iommu_map. So allow clients to extend the available IOVA space by setting the DOMAIN_ATTR_GEOMETRY domain attribute's iommu_domain_geometry.aperture_start to the new start address of the IOVA space and by setting iommu_domain_geometry.aperture_end to the new end address of the IOVA space. The new IOVA space created by iommu_domain_geometry.aperture_start and iommu_domain_geometry.aperture_end will be a superset of the IOVA range which was created through the create mapping call. The DOMAIN_ATTR_GEOMETRY domain attribute can only be set before attaching. Calls to set the DOMAIN_ATTR_GEOMETRY domain attribute can only be used to extend the IOVA space, it cannot shrink the range. Note that extending the IOVA range will not change the range of IOVA addresses which will be available to the DMA APIs. Change-Id: Ib389e019a022d98417884002de08115fb0fc9384 Signed-off-by: Liam Mark <lmark@codeaurora.org>
This commit is contained in:
parent
7f0d77b390
commit
4f83071078
4 changed files with 71 additions and 19 deletions
|
@ -3288,6 +3288,43 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
|
|||
1 << DOMAIN_ATTR_ENABLE_TTBR1;
|
||||
ret = 0;
|
||||
break;
|
||||
case DOMAIN_ATTR_GEOMETRY: {
|
||||
struct iommu_domain_geometry *geometry =
|
||||
(struct iommu_domain_geometry *)data;
|
||||
|
||||
if (smmu_domain->smmu != NULL) {
|
||||
dev_err(smmu_domain->smmu->dev,
|
||||
"cannot set geometry attribute while attached\n");
|
||||
ret = -EBUSY;
|
||||
break;
|
||||
}
|
||||
|
||||
if (geometry->aperture_start >= SZ_1G * 4ULL ||
|
||||
geometry->aperture_end >= SZ_1G * 4ULL) {
|
||||
pr_err("fastmap does not support IOVAs >= 4GB\n");
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (smmu_domain->attributes
|
||||
& (1 << DOMAIN_ATTR_GEOMETRY)) {
|
||||
if (geometry->aperture_start
|
||||
< domain->geometry.aperture_start)
|
||||
domain->geometry.aperture_start =
|
||||
geometry->aperture_start;
|
||||
|
||||
if (geometry->aperture_end
|
||||
> domain->geometry.aperture_end)
|
||||
domain->geometry.aperture_end =
|
||||
geometry->aperture_end;
|
||||
} else {
|
||||
smmu_domain->attributes |= 1 << DOMAIN_ATTR_GEOMETRY;
|
||||
domain->geometry.aperture_start =
|
||||
geometry->aperture_start;
|
||||
domain->geometry.aperture_end = geometry->aperture_end;
|
||||
}
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ret = -ENODEV;
|
||||
break;
|
||||
|
|
|
@ -188,7 +188,9 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
|
|||
|
||||
iommu_tlbiall(mapping->domain);
|
||||
mapping->have_stale_tlbs = false;
|
||||
av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, mapping->base,
|
||||
av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds,
|
||||
mapping->domain->geometry.aperture_start,
|
||||
mapping->base,
|
||||
mapping->base + mapping->size - 1,
|
||||
skip_sync);
|
||||
}
|
||||
|
@ -367,7 +369,8 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page,
|
|||
if (unlikely(iova == DMA_ERROR_CODE))
|
||||
goto fail;
|
||||
|
||||
pmd = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, iova);
|
||||
pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
|
||||
mapping->domain->geometry.aperture_start, iova);
|
||||
|
||||
if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot)))
|
||||
goto fail_free_iova;
|
||||
|
@ -391,7 +394,8 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
|
|||
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
|
||||
unsigned long flags;
|
||||
av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
|
||||
mapping->base, iova);
|
||||
mapping->domain->geometry.aperture_start,
|
||||
iova);
|
||||
unsigned long offset = iova & ~FAST_PAGE_MASK;
|
||||
size_t len = ALIGN(size + offset, FAST_PAGE_SIZE);
|
||||
int nptes = len >> FAST_PAGE_SHIFT;
|
||||
|
@ -414,7 +418,8 @@ static void fast_smmu_sync_single_for_cpu(struct device *dev,
|
|||
{
|
||||
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
|
||||
av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
|
||||
mapping->base, iova);
|
||||
mapping->domain->geometry.aperture_start,
|
||||
iova);
|
||||
unsigned long offset = iova & ~FAST_PAGE_MASK;
|
||||
struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
|
||||
|
||||
|
@ -427,7 +432,8 @@ static void fast_smmu_sync_single_for_device(struct device *dev,
|
|||
{
|
||||
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
|
||||
av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
|
||||
mapping->base, iova);
|
||||
mapping->domain->geometry.aperture_start,
|
||||
iova);
|
||||
unsigned long offset = iova & ~FAST_PAGE_MASK;
|
||||
struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
|
||||
|
||||
|
@ -555,8 +561,9 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
|
|||
while (sg_miter_next(&miter)) {
|
||||
int nptes = miter.length >> FAST_PAGE_SHIFT;
|
||||
|
||||
ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base,
|
||||
iova_iter);
|
||||
ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
|
||||
mapping->domain->geometry.aperture_start,
|
||||
iova_iter);
|
||||
if (unlikely(av8l_fast_map_public(
|
||||
ptep, page_to_phys(miter.page),
|
||||
miter.length, prot))) {
|
||||
|
@ -584,7 +591,9 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
|
|||
out_unmap:
|
||||
/* need to take the lock again for page tables and iova */
|
||||
spin_lock_irqsave(&mapping->lock, flags);
|
||||
ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_addr);
|
||||
ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
|
||||
mapping->domain->geometry.aperture_start,
|
||||
dma_addr);
|
||||
av8l_fast_unmap_public(ptep, size);
|
||||
fast_dmac_clean_range(mapping, ptep, ptep + count);
|
||||
out_free_iova:
|
||||
|
@ -616,7 +625,8 @@ static void fast_smmu_free(struct device *dev, size_t size,
|
|||
|
||||
pages = area->pages;
|
||||
dma_common_free_remap(vaddr, size, VM_USERMAP, false);
|
||||
ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_handle);
|
||||
ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
|
||||
mapping->domain->geometry.aperture_start, dma_handle);
|
||||
spin_lock_irqsave(&mapping->lock, flags);
|
||||
av8l_fast_unmap_public(ptep, size);
|
||||
fast_dmac_clean_range(mapping, ptep, ptep + count);
|
||||
|
@ -720,7 +730,7 @@ static const struct dma_map_ops fast_smmu_dma_ops = {
|
|||
*
|
||||
* Creates a mapping structure which holds information about used/unused IO
|
||||
* address ranges, which is required to perform mapping with IOMMU aware
|
||||
* functions. The only VA range supported is [0, 4GB).
|
||||
* functions. The only VA range supported is [0, 4GB].
|
||||
*
|
||||
* The client device need to be attached to the mapping with
|
||||
* fast_smmu_attach_device function.
|
||||
|
@ -774,6 +784,7 @@ int fast_smmu_attach_device(struct device *dev,
|
|||
struct iommu_domain *domain = mapping->domain;
|
||||
struct iommu_pgtbl_info info;
|
||||
u64 size = (u64)mapping->bits << PAGE_SHIFT;
|
||||
struct iommu_domain_geometry geometry;
|
||||
|
||||
if (mapping->base + size > (SZ_1G * 4ULL))
|
||||
return -EINVAL;
|
||||
|
@ -788,8 +799,11 @@ int fast_smmu_attach_device(struct device *dev,
|
|||
mapping->fast->domain = domain;
|
||||
mapping->fast->dev = dev;
|
||||
|
||||
domain->geometry.aperture_start = mapping->base;
|
||||
domain->geometry.aperture_end = mapping->base + size - 1;
|
||||
geometry.aperture_start = mapping->base;
|
||||
geometry.aperture_end = mapping->base + size - 1;
|
||||
if (iommu_domain_set_attr(domain, DOMAIN_ATTR_GEOMETRY,
|
||||
&geometry))
|
||||
return -EINVAL;
|
||||
|
||||
if (iommu_attach_device(domain, dev))
|
||||
return -EINVAL;
|
||||
|
|
|
@ -173,12 +173,12 @@ static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep)
|
|||
}
|
||||
|
||||
void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, u64 base,
|
||||
u64 end, bool skip_sync)
|
||||
u64 start, u64 end, bool skip_sync)
|
||||
{
|
||||
int i;
|
||||
av8l_fast_iopte *pmdp = pmds;
|
||||
av8l_fast_iopte *pmdp = iopte_pmd_offset(pmds, base, start);
|
||||
|
||||
for (i = base >> AV8L_FAST_PAGE_SHIFT;
|
||||
for (i = start >> AV8L_FAST_PAGE_SHIFT;
|
||||
i <= (end >> AV8L_FAST_PAGE_SHIFT); ++i) {
|
||||
if (!(*pmdp & AV8L_FAST_PTE_VALID)) {
|
||||
*pmdp = 0;
|
||||
|
@ -668,7 +668,7 @@ static int __init av8l_fast_positive_testing(void)
|
|||
}
|
||||
|
||||
/* sweep up TLB proving PTEs */
|
||||
av8l_fast_clear_stale_ptes(pmds, base, max, false);
|
||||
av8l_fast_clear_stale_ptes(pmds, base, base, max, false);
|
||||
|
||||
/* map the entire 4GB VA space with 8K map calls */
|
||||
for (iova = base; iova < max; iova += SZ_8K) {
|
||||
|
@ -689,7 +689,7 @@ static int __init av8l_fast_positive_testing(void)
|
|||
}
|
||||
|
||||
/* sweep up TLB proving PTEs */
|
||||
av8l_fast_clear_stale_ptes(pmds, base, max, false);
|
||||
av8l_fast_clear_stale_ptes(pmds, base, base, max, false);
|
||||
|
||||
/* map the entire 4GB VA space with 16K map calls */
|
||||
for (iova = base; iova < max; iova += SZ_16K) {
|
||||
|
@ -710,7 +710,7 @@ static int __init av8l_fast_positive_testing(void)
|
|||
}
|
||||
|
||||
/* sweep up TLB proving PTEs */
|
||||
av8l_fast_clear_stale_ptes(pmds, base, max, false);
|
||||
av8l_fast_clear_stale_ptes(pmds, base, base, max, false);
|
||||
|
||||
/* map the entire 4GB VA space with 64K map calls */
|
||||
for (iova = base; iova < max; iova += SZ_64K) {
|
||||
|
|
|
@ -37,7 +37,7 @@ void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size);
|
|||
#define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0xa
|
||||
|
||||
void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, u64 base,
|
||||
u64 end, bool skip_sync);
|
||||
u64 start, u64 end, bool skip_sync);
|
||||
void av8l_register_notify(struct notifier_block *nb);
|
||||
|
||||
#else /* !CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */
|
||||
|
@ -46,6 +46,7 @@ void av8l_register_notify(struct notifier_block *nb);
|
|||
|
||||
static inline void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds,
|
||||
u64 base,
|
||||
u64 start,
|
||||
u64 end,
|
||||
bool skip_sync)
|
||||
{
|
||||
|
|
Loading…
Add table
Reference in a new issue