From a75bc6aef54d6dfb60814fd7c1ce908317da57b7 Mon Sep 17 00:00:00 2001 From: Mitchel Humpherys Date: Fri, 24 Apr 2015 17:10:59 -0700 Subject: [PATCH] iommu: io-pgtable-arm: Flush all tlbs at end of unmap Rather than calling the tlb maintenance routines throughout the course of the unmap operation, just flush the entire tlb for the context in question all at once, at the very end of the unmap. This greatly improves performance for large page tables (which is common for large buffers in a heavily fragmented system). In my testing, this optimization gave a ~10% speedup when unmapping 64K. Change-Id: Iaa2b211e730dad6bd9235ef98dd2a89cf541e663 Signed-off-by: Mitchel Humpherys --- drivers/iommu/io-pgtable-arm.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 97c3b8172ac3..c04e201c15f3 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -452,8 +452,6 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, *ptep = table; tlb->flush_pgtable(ptep, sizeof(*ptep), cookie); - iova &= ~(blk_size - 1); - tlb->tlb_add_flush(iova, blk_size, true, cookie); return size; } @@ -480,12 +478,8 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, if (!iopte_leaf(pte, lvl)) { /* Also flush any partial walks */ - tlb->tlb_add_flush(iova, size, false, cookie); - tlb->tlb_sync(data->iop.cookie); ptep = iopte_deref(pte, data); __arm_lpae_free_pgtable(data, lvl + 1, ptep); - } else { - tlb->tlb_add_flush(iova, size, true, cookie); } return size; @@ -515,7 +509,7 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); if (unmapped) - iop->cfg.tlb->tlb_sync(iop->cookie); + iop->cfg.tlb->tlb_flush_all(iop->cookie); return unmapped; }