From bc13d86d3dd14d2868776ea8ad9f352ab124b4fc Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Fri, 2 Dec 2016 10:31:07 -0800 Subject: [PATCH] arm: dma-mapping: handle IOVA address zero The IOVA allocator used by these calls supports IOVA address zero so properly handle IOVA address zero. Change-Id: I012452d4cf3534dfb79e6deb15b7ff74f5e3bb40 Signed-off-by: Liam Mark --- arch/arm/mm/dma-mapping.c | 12 ------------ arch/arm64/mm/dma-mapping.c | 9 --------- 2 files changed, 21 deletions(-) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 80c4c50814d8..723e3925dc84 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1931,9 +1931,6 @@ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, int offset = handle & ~PAGE_MASK; int len = PAGE_ALIGN(size + offset); - if (!iova) - return; - iommu_unmap(mapping->domain, iova, len); __free_iova(mapping, iova, len); } @@ -1957,9 +1954,6 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, int offset = handle & ~PAGE_MASK; int len = PAGE_ALIGN(size + offset); - if (!iova) - return; - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_dev_to_cpu(page, offset, size, dir); @@ -1975,9 +1969,6 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev, struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; - if (!iova) - return; - __dma_page_dev_to_cpu(page, offset, size, dir); } @@ -1989,9 +1980,6 @@ static void arm_iommu_sync_single_for_device(struct device *dev, struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; - if (!iova) - return; - __dma_page_cpu_to_dev(page, offset, size, dir); } diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index df083e9350c4..78319858f734 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -1796,9 +1796,6 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, int offset = handle & ~PAGE_MASK; int len = PAGE_ALIGN(size + offset); - if (!iova) - return; - if (!(is_device_dma_coherent(dev) || dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))) __dma_page_dev_to_cpu(page, offset, size, dir); @@ -1816,9 +1813,6 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev, mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; - if (!iova) - return; - if (!is_device_dma_coherent(dev)) __dma_page_dev_to_cpu(page, offset, size, dir); } @@ -1832,9 +1826,6 @@ static void arm_iommu_sync_single_for_device(struct device *dev, mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; - if (!iova) - return; - if (!is_device_dma_coherent(dev)) __dma_page_cpu_to_dev(page, offset, size, dir); }