arm: dma-mapping: map_page map to nearest page
Since the page offset can be greater than the size of a page fix arm_coherent_iommu_map_page so that it maps to nearest page boundary. This both prevents unnecessarily mapping memory we don’t need to map and fixes a bug where the unmap wasn’t unmapping this extra memory. Change-Id: Iaa69aff7505ee75d1f2e69bb0cda814bc6211bd3 Signed-off-by: Liam Mark <lmark@codeaurora.org>
This commit is contained in:
parent
20e1ed9259
commit
45cb559804
2 changed files with 16 additions and 7 deletions
|
@ -1875,7 +1875,11 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
|
|||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
dma_addr_t dma_addr;
|
||||
int ret, prot, len = PAGE_ALIGN(size + offset);
|
||||
int ret, prot, len, start_offset, map_offset;
|
||||
|
||||
map_offset = offset & ~PAGE_MASK;
|
||||
start_offset = offset & PAGE_MASK;
|
||||
len = PAGE_ALIGN(map_offset + size);
|
||||
|
||||
dma_addr = __alloc_iova(mapping, len);
|
||||
if (dma_addr == DMA_ERROR_CODE)
|
||||
|
@ -1883,11 +1887,12 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
|
|||
|
||||
prot = __dma_direction_to_prot(dir);
|
||||
|
||||
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
|
||||
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
|
||||
start_offset, len, prot);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
return dma_addr + offset;
|
||||
return dma_addr + map_offset;
|
||||
fail:
|
||||
__free_iova(mapping, dma_addr, len);
|
||||
return DMA_ERROR_CODE;
|
||||
|
|
|
@ -1756,7 +1756,11 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev,
|
|||
{
|
||||
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
||||
dma_addr_t dma_addr;
|
||||
int ret, prot, len = PAGE_ALIGN(size + offset);
|
||||
int ret, prot, len, start_offset, map_offset;
|
||||
|
||||
map_offset = offset & ~PAGE_MASK;
|
||||
start_offset = offset & PAGE_MASK;
|
||||
len = PAGE_ALIGN(map_offset + size);
|
||||
|
||||
dma_addr = __alloc_iova(mapping, len);
|
||||
if (dma_addr == DMA_ERROR_CODE)
|
||||
|
@ -1766,12 +1770,12 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev,
|
|||
prot = __get_iommu_pgprot(attrs, prot,
|
||||
is_dma_coherent(dev, attrs));
|
||||
|
||||
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
|
||||
prot);
|
||||
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
|
||||
start_offset, len, prot);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
return dma_addr + offset;
|
||||
return dma_addr + map_offset;
|
||||
fail:
|
||||
__free_iova(mapping, dma_addr, len);
|
||||
return DMA_ERROR_CODE;
|
||||
|
|
Loading…
Add table
Reference in a new issue