iommu/io-pgtable-arm: Don't use dma_to_phys()
In checking whether DMA addresses differ from physical addresses, using dma_to_phys() is actually the wrong thing to do, since it may hide any DMA offset, which is precisely one of the things we are checking for. Simply casting between the two address types, whilst ugly, is in fact the appropriate course of action. Further care (and ugliness) is also necessary in the comparison to avoid truncation if phys_addr_t and dma_addr_t differ in size. We can also reject any device with a fixed DMA offset up-front at page table creation, leaving the allocation-time check for the more subtle cases like bounce buffering due to an incorrect DMA mask. Furthermore, we can then fix the hackish KConfig dependency so that architectures without a dma_to_phys() implementation may still COMPILE_TEST (or even use!) the code. The true dependency is on the DMA API, so use the appropriate symbol for that. Change-Id: I2f7087d43e2d8f16ea36f8e10530d0c4811a4fcd Signed-off-by: Robin Murphy <robin.murphy@arm.com> [will: folded in selftest fix from Yong Wu] Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
fc1e02aa45
commit
aeae109c4a
2 changed files with 15 additions and 14 deletions
|
@ -23,8 +23,7 @@ config IOMMU_IO_PGTABLE
|
||||||
config IOMMU_IO_PGTABLE_LPAE
|
config IOMMU_IO_PGTABLE_LPAE
|
||||||
bool "ARMv7/v8 Long Descriptor Format"
|
bool "ARMv7/v8 Long Descriptor Format"
|
||||||
select IOMMU_IO_PGTABLE
|
select IOMMU_IO_PGTABLE
|
||||||
# SWIOTLB guarantees a dma_to_phys() implementation
|
depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST)
|
||||||
depends on ARM || ARM64 || (COMPILE_TEST && SWIOTLB)
|
|
||||||
help
|
help
|
||||||
Enable support for the ARM long descriptor pagetable format.
|
Enable support for the ARM long descriptor pagetable format.
|
||||||
This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
|
This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
|
||||||
|
|
|
@ -280,9 +280,9 @@ static void iopte_tblcnt_add(arm_lpae_iopte *table_ptep, int cnt)
|
||||||
|
|
||||||
static bool suppress_map_failures;
|
static bool suppress_map_failures;
|
||||||
|
|
||||||
static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages)
|
static dma_addr_t __arm_lpae_dma_addr(void *pages)
|
||||||
{
|
{
|
||||||
return phys_to_dma(dev, virt_to_phys(pages));
|
return (dma_addr_t)virt_to_phys(pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
|
static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
|
||||||
|
@ -303,10 +303,10 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
|
||||||
goto out_free;
|
goto out_free;
|
||||||
/*
|
/*
|
||||||
* We depend on the IOMMU being able to work with any physical
|
* We depend on the IOMMU being able to work with any physical
|
||||||
* address directly, so if the DMA layer suggests it can't by
|
* address directly, so if the DMA layer suggests otherwise by
|
||||||
* giving us back some translation, that bodes very badly...
|
* translating or truncating them, that bodes very badly...
|
||||||
*/
|
*/
|
||||||
if (dma != __arm_lpae_dma_addr(dev, pages))
|
if (dma != virt_to_phys(pages))
|
||||||
goto out_unmap;
|
goto out_unmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -323,23 +323,20 @@ out_free:
|
||||||
static void __arm_lpae_free_pages(void *pages, size_t size,
|
static void __arm_lpae_free_pages(void *pages, size_t size,
|
||||||
struct io_pgtable_cfg *cfg, void *cookie)
|
struct io_pgtable_cfg *cfg, void *cookie)
|
||||||
{
|
{
|
||||||
struct device *dev = cfg->iommu_dev;
|
|
||||||
|
|
||||||
if (!selftest_running)
|
if (!selftest_running)
|
||||||
dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages),
|
dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
|
||||||
size, DMA_TO_DEVICE);
|
size, DMA_TO_DEVICE);
|
||||||
io_pgtable_free_pages_exact(cfg, cookie, pages, size);
|
io_pgtable_free_pages_exact(cfg, cookie, pages, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
|
static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
|
||||||
struct io_pgtable_cfg *cfg)
|
struct io_pgtable_cfg *cfg)
|
||||||
{
|
{
|
||||||
struct device *dev = cfg->iommu_dev;
|
|
||||||
|
|
||||||
*ptep = pte;
|
*ptep = pte;
|
||||||
|
|
||||||
if (!selftest_running)
|
if (!selftest_running)
|
||||||
dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep),
|
dma_sync_single_for_device(cfg->iommu_dev,
|
||||||
|
__arm_lpae_dma_addr(ptep),
|
||||||
sizeof(pte), DMA_TO_DEVICE);
|
sizeof(pte), DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -894,6 +891,11 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
|
||||||
if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
|
if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
|
||||||
|
dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
||||||
if (!data)
|
if (!data)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
Loading…
Add table
Reference in a new issue