iommu: io-pgtable-arm: Flush all tlbs at end of unmap
Rather than calling the tlb maintenance routines throughout the course of the unmap operation, just flush the entire tlb for the context in question all at once, at the very end of the unmap. This greatly improves performance for large page tables (which is common for large buffers in a heavily fragmented system). In my testing, this optimization gave a ~10% speedup when unmapping 64K. Change-Id: Iaa2b211e730dad6bd9235ef98dd2a89cf541e663 Signed-off-by: Mitchel Humpherys <mitchelh@codeaurora.org>
This commit is contained in:
parent
700c9f4fcb
commit
a75bc6aef5
1 changed files with 1 additions and 7 deletions
|
@ -452,8 +452,6 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
|
|||
|
||||
*ptep = table;
|
||||
tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
|
||||
iova &= ~(blk_size - 1);
|
||||
tlb->tlb_add_flush(iova, blk_size, true, cookie);
|
||||
return size;
|
||||
}
|
||||
|
||||
|
@ -480,12 +478,8 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
|
|||
|
||||
if (!iopte_leaf(pte, lvl)) {
|
||||
/* Also flush any partial walks */
|
||||
tlb->tlb_add_flush(iova, size, false, cookie);
|
||||
tlb->tlb_sync(data->iop.cookie);
|
||||
ptep = iopte_deref(pte, data);
|
||||
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
|
||||
} else {
|
||||
tlb->tlb_add_flush(iova, size, true, cookie);
|
||||
}
|
||||
|
||||
return size;
|
||||
|
@ -515,7 +509,7 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
|
|||
|
||||
unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
|
||||
if (unmapped)
|
||||
iop->cfg.tlb->tlb_sync(iop->cookie);
|
||||
iop->cfg.tlb->tlb_flush_all(iop->cookie);
|
||||
|
||||
return unmapped;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue