Merge "iommu/arm-smmu: add support to configure IOVA range"

This commit is contained in:
Linux Build Service Account 2017-04-20 04:40:58 -07:00 committed by Gerrit - the friendly Code Review server
commit 27a0d415d4
4 changed files with 71 additions and 19 deletions

View file

@ -3288,6 +3288,43 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1 << DOMAIN_ATTR_ENABLE_TTBR1; 1 << DOMAIN_ATTR_ENABLE_TTBR1;
ret = 0; ret = 0;
break; break;
case DOMAIN_ATTR_GEOMETRY: {
struct iommu_domain_geometry *geometry =
(struct iommu_domain_geometry *)data;
if (smmu_domain->smmu != NULL) {
dev_err(smmu_domain->smmu->dev,
"cannot set geometry attribute while attached\n");
ret = -EBUSY;
break;
}
if (geometry->aperture_start >= SZ_1G * 4ULL ||
geometry->aperture_end >= SZ_1G * 4ULL) {
pr_err("fastmap does not support IOVAs >= 4GB\n");
ret = -EINVAL;
break;
}
if (smmu_domain->attributes
& (1 << DOMAIN_ATTR_GEOMETRY)) {
if (geometry->aperture_start
< domain->geometry.aperture_start)
domain->geometry.aperture_start =
geometry->aperture_start;
if (geometry->aperture_end
> domain->geometry.aperture_end)
domain->geometry.aperture_end =
geometry->aperture_end;
} else {
smmu_domain->attributes |= 1 << DOMAIN_ATTR_GEOMETRY;
domain->geometry.aperture_start =
geometry->aperture_start;
domain->geometry.aperture_end = geometry->aperture_end;
}
ret = 0;
break;
}
default: default:
ret = -ENODEV; ret = -ENODEV;
break; break;

View file

@ -188,7 +188,9 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
iommu_tlbiall(mapping->domain); iommu_tlbiall(mapping->domain);
mapping->have_stale_tlbs = false; mapping->have_stale_tlbs = false;
av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, mapping->base, av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds,
mapping->domain->geometry.aperture_start,
mapping->base,
mapping->base + mapping->size - 1, mapping->base + mapping->size - 1,
skip_sync); skip_sync);
} }
@ -367,7 +369,8 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page,
if (unlikely(iova == DMA_ERROR_CODE)) if (unlikely(iova == DMA_ERROR_CODE))
goto fail; goto fail;
pmd = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, iova); pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
mapping->domain->geometry.aperture_start, iova);
if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot))) if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot)))
goto fail_free_iova; goto fail_free_iova;
@ -391,7 +394,8 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
unsigned long flags; unsigned long flags;
av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
mapping->base, iova); mapping->domain->geometry.aperture_start,
iova);
unsigned long offset = iova & ~FAST_PAGE_MASK; unsigned long offset = iova & ~FAST_PAGE_MASK;
size_t len = ALIGN(size + offset, FAST_PAGE_SIZE); size_t len = ALIGN(size + offset, FAST_PAGE_SIZE);
int nptes = len >> FAST_PAGE_SHIFT; int nptes = len >> FAST_PAGE_SHIFT;
@ -414,7 +418,8 @@ static void fast_smmu_sync_single_for_cpu(struct device *dev,
{ {
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
mapping->base, iova); mapping->domain->geometry.aperture_start,
iova);
unsigned long offset = iova & ~FAST_PAGE_MASK; unsigned long offset = iova & ~FAST_PAGE_MASK;
struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK)); struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
@ -427,7 +432,8 @@ static void fast_smmu_sync_single_for_device(struct device *dev,
{ {
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
mapping->base, iova); mapping->domain->geometry.aperture_start,
iova);
unsigned long offset = iova & ~FAST_PAGE_MASK; unsigned long offset = iova & ~FAST_PAGE_MASK;
struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK)); struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
@ -555,8 +561,9 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
while (sg_miter_next(&miter)) { while (sg_miter_next(&miter)) {
int nptes = miter.length >> FAST_PAGE_SHIFT; int nptes = miter.length >> FAST_PAGE_SHIFT;
ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
iova_iter); mapping->domain->geometry.aperture_start,
iova_iter);
if (unlikely(av8l_fast_map_public( if (unlikely(av8l_fast_map_public(
ptep, page_to_phys(miter.page), ptep, page_to_phys(miter.page),
miter.length, prot))) { miter.length, prot))) {
@ -584,7 +591,9 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
out_unmap: out_unmap:
/* need to take the lock again for page tables and iova */ /* need to take the lock again for page tables and iova */
spin_lock_irqsave(&mapping->lock, flags); spin_lock_irqsave(&mapping->lock, flags);
ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_addr); ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
mapping->domain->geometry.aperture_start,
dma_addr);
av8l_fast_unmap_public(ptep, size); av8l_fast_unmap_public(ptep, size);
fast_dmac_clean_range(mapping, ptep, ptep + count); fast_dmac_clean_range(mapping, ptep, ptep + count);
out_free_iova: out_free_iova:
@ -616,7 +625,8 @@ static void fast_smmu_free(struct device *dev, size_t size,
pages = area->pages; pages = area->pages;
dma_common_free_remap(vaddr, size, VM_USERMAP, false); dma_common_free_remap(vaddr, size, VM_USERMAP, false);
ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_handle); ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
mapping->domain->geometry.aperture_start, dma_handle);
spin_lock_irqsave(&mapping->lock, flags); spin_lock_irqsave(&mapping->lock, flags);
av8l_fast_unmap_public(ptep, size); av8l_fast_unmap_public(ptep, size);
fast_dmac_clean_range(mapping, ptep, ptep + count); fast_dmac_clean_range(mapping, ptep, ptep + count);
@ -720,7 +730,7 @@ static const struct dma_map_ops fast_smmu_dma_ops = {
* *
* Creates a mapping structure which holds information about used/unused IO * Creates a mapping structure which holds information about used/unused IO
* address ranges, which is required to perform mapping with IOMMU aware * address ranges, which is required to perform mapping with IOMMU aware
* functions. The only VA range supported is [0, 4GB). * functions. The only VA range supported is [0, 4GB].
* *
* The client device need to be attached to the mapping with * The client device need to be attached to the mapping with
* fast_smmu_attach_device function. * fast_smmu_attach_device function.
@ -774,6 +784,7 @@ int fast_smmu_attach_device(struct device *dev,
struct iommu_domain *domain = mapping->domain; struct iommu_domain *domain = mapping->domain;
struct iommu_pgtbl_info info; struct iommu_pgtbl_info info;
u64 size = (u64)mapping->bits << PAGE_SHIFT; u64 size = (u64)mapping->bits << PAGE_SHIFT;
struct iommu_domain_geometry geometry;
if (mapping->base + size > (SZ_1G * 4ULL)) if (mapping->base + size > (SZ_1G * 4ULL))
return -EINVAL; return -EINVAL;
@ -788,8 +799,11 @@ int fast_smmu_attach_device(struct device *dev,
mapping->fast->domain = domain; mapping->fast->domain = domain;
mapping->fast->dev = dev; mapping->fast->dev = dev;
domain->geometry.aperture_start = mapping->base; geometry.aperture_start = mapping->base;
domain->geometry.aperture_end = mapping->base + size - 1; geometry.aperture_end = mapping->base + size - 1;
if (iommu_domain_set_attr(domain, DOMAIN_ATTR_GEOMETRY,
&geometry))
return -EINVAL;
if (iommu_attach_device(domain, dev)) if (iommu_attach_device(domain, dev))
return -EINVAL; return -EINVAL;

View file

@ -173,12 +173,12 @@ static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep)
} }
void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, u64 base, void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, u64 base,
u64 end, bool skip_sync) u64 start, u64 end, bool skip_sync)
{ {
int i; int i;
av8l_fast_iopte *pmdp = pmds; av8l_fast_iopte *pmdp = iopte_pmd_offset(pmds, base, start);
for (i = base >> AV8L_FAST_PAGE_SHIFT; for (i = start >> AV8L_FAST_PAGE_SHIFT;
i <= (end >> AV8L_FAST_PAGE_SHIFT); ++i) { i <= (end >> AV8L_FAST_PAGE_SHIFT); ++i) {
if (!(*pmdp & AV8L_FAST_PTE_VALID)) { if (!(*pmdp & AV8L_FAST_PTE_VALID)) {
*pmdp = 0; *pmdp = 0;
@ -668,7 +668,7 @@ static int __init av8l_fast_positive_testing(void)
} }
/* sweep up TLB proving PTEs */ /* sweep up TLB proving PTEs */
av8l_fast_clear_stale_ptes(pmds, base, max, false); av8l_fast_clear_stale_ptes(pmds, base, base, max, false);
/* map the entire 4GB VA space with 8K map calls */ /* map the entire 4GB VA space with 8K map calls */
for (iova = base; iova < max; iova += SZ_8K) { for (iova = base; iova < max; iova += SZ_8K) {
@ -689,7 +689,7 @@ static int __init av8l_fast_positive_testing(void)
} }
/* sweep up TLB proving PTEs */ /* sweep up TLB proving PTEs */
av8l_fast_clear_stale_ptes(pmds, base, max, false); av8l_fast_clear_stale_ptes(pmds, base, base, max, false);
/* map the entire 4GB VA space with 16K map calls */ /* map the entire 4GB VA space with 16K map calls */
for (iova = base; iova < max; iova += SZ_16K) { for (iova = base; iova < max; iova += SZ_16K) {
@ -710,7 +710,7 @@ static int __init av8l_fast_positive_testing(void)
} }
/* sweep up TLB proving PTEs */ /* sweep up TLB proving PTEs */
av8l_fast_clear_stale_ptes(pmds, base, max, false); av8l_fast_clear_stale_ptes(pmds, base, base, max, false);
/* map the entire 4GB VA space with 64K map calls */ /* map the entire 4GB VA space with 64K map calls */
for (iova = base; iova < max; iova += SZ_64K) { for (iova = base; iova < max; iova += SZ_64K) {

View file

@ -37,7 +37,7 @@ void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size);
#define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0xa #define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0xa
void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, u64 base, void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, u64 base,
u64 end, bool skip_sync); u64 start, u64 end, bool skip_sync);
void av8l_register_notify(struct notifier_block *nb); void av8l_register_notify(struct notifier_block *nb);
#else /* !CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */ #else /* !CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */
@ -46,6 +46,7 @@ void av8l_register_notify(struct notifier_block *nb);
static inline void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, static inline void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds,
u64 base, u64 base,
u64 start,
u64 end, u64 end,
bool skip_sync) bool skip_sync)
{ {