iommu: io-pgtable-arm: unmap last level all at once

Currently we walk each last-level leaf pte during unmap and zero them
out individually.  Since these last-level ptes are all contiguous (up to
512 entries), optimize the unmapping process by simply zeroing them all
out at once rather than operating on them individually.

Change-Id: I21d490e8a94355df4d4caecab33774b5f8ecf3ca
Signed-off-by: Mitchel Humpherys <mitchelh@codeaurora.org>
This commit is contained in:
Mitchel Humpherys 2015-04-30 09:49:29 -07:00 committed by David Keitel
parent 500744e038
commit 8304e32fe8
2 changed files with 35 additions and 4 deletions

View file

@ -463,6 +463,7 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
void *cookie = data->iop.cookie;
size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
size_t pgsize = iommu_pgsize(data->iop.cfg.pgsize_bitmap, iova, size);
ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
pte = *ptep;
@ -472,7 +473,7 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
return 0;
/* If the size matches this level, we're in the right place */
if (size == blk_size) {
if (size == pgsize && size == blk_size) {
*ptep = 0;
tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
@ -482,6 +483,37 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
}
return size;
} else if ((lvl == ARM_LPAE_MAX_LEVELS - 2) && !iopte_leaf(pte, lvl)) {
size_t remaining = size;
/*
* This isn't a block mapping so it must be a table mapping
* and since it's the 2nd-to-last level the next level has
* to be all page mappings. Zero them all out in one fell
* swoop.
*/
for (;;) {
arm_lpae_iopte *table = iopte_deref(pte, data);
int table_len;
size_t unmapped;
int tl_offset = ARM_LPAE_LVL_IDX(iova, lvl + 1, data);
int entry_size = (1 << data->pg_shift);
int max_entries =
ARM_LPAE_BLOCK_SIZE(lvl, data) / entry_size;
int entries = min_t(int, remaining / entry_size,
max_entries - tl_offset);
table += tl_offset;
table_len = entries * sizeof(*table);
memset(table, 0, table_len);
tlb->flush_pgtable(table, table_len, cookie);
unmapped = entries * entry_size;
iova += unmapped;
remaining -= unmapped;
if (!remaining)
break;
pte = *++ptep;
}
return size;
} else if (iopte_leaf(pte, lvl)) {
/*

View file

@ -1107,10 +1107,9 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
* or we hit an area that isn't mapped.
*/
while (unmapped < size) {
size_t pgsize = iommu_pgsize(domain->ops->pgsize_bitmap, iova,
size - unmapped);
size_t left = size - unmapped;
unmapped_page = domain->ops->unmap(domain, iova, pgsize);
unmapped_page = domain->ops->unmap(domain, iova, left);
if (!unmapped_page)
break;