AMD IOMMU: add branch hints to completion wait checks
This patch adds branch hints to the cecks if a completion_wait is necessary. The completion_waits in the mapping paths are unlikly because they will only happen on software implementations of AMD IOMMU which don't exists today or with lazy IO/TLB flushing when the allocator wraps around the address space. With lazy IO/TLB flushing the completion_wait in the unmapping path is unlikely too. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
1c65577398
commit
5507eef835
1 changed files with 6 additions and 6 deletions
|
@ -876,7 +876,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
|
||||||
if (addr == bad_dma_address)
|
if (addr == bad_dma_address)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (iommu->need_sync)
|
if (unlikely(iommu->need_sync))
|
||||||
iommu_completion_wait(iommu);
|
iommu_completion_wait(iommu);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -905,7 +905,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
|
||||||
|
|
||||||
__unmap_single(iommu, domain->priv, dma_addr, size, dir);
|
__unmap_single(iommu, domain->priv, dma_addr, size, dir);
|
||||||
|
|
||||||
if (iommu->need_sync)
|
if (unlikely(iommu->need_sync))
|
||||||
iommu_completion_wait(iommu);
|
iommu_completion_wait(iommu);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
|
@ -968,7 +968,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
goto unmap;
|
goto unmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (iommu->need_sync)
|
if (unlikely(iommu->need_sync))
|
||||||
iommu_completion_wait(iommu);
|
iommu_completion_wait(iommu);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -1014,7 +1014,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
s->dma_address = s->dma_length = 0;
|
s->dma_address = s->dma_length = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (iommu->need_sync)
|
if (unlikely(iommu->need_sync))
|
||||||
iommu_completion_wait(iommu);
|
iommu_completion_wait(iommu);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
|
@ -1061,7 +1061,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (iommu->need_sync)
|
if (unlikely(iommu->need_sync))
|
||||||
iommu_completion_wait(iommu);
|
iommu_completion_wait(iommu);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -1093,7 +1093,7 @@ static void free_coherent(struct device *dev, size_t size,
|
||||||
|
|
||||||
__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
|
__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
if (iommu->need_sync)
|
if (unlikely(iommu->need_sync))
|
||||||
iommu_completion_wait(iommu);
|
iommu_completion_wait(iommu);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&domain->lock, flags);
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
|
|
Loading…
Add table
Reference in a new issue