arm64: dma-mapping: Split large pages when allocating in atomic context

In atomic context, gen_pool_alloc allocates a single page large
enough to accomodate the requested size. However __iommu_create_mapping
always maps pages assuming they are of size 4K. Thus only the first
4K of the buffer is mapped and a translation fault is generated
during an unmap.

Fix this by splitting the larger pages into 4K pages.

Change-Id: Ifcbe29477ad210204028486bd011470fe8b50852
Signed-off-by: Neeti Desai <neetid@codeaurora.org>
Signed-off-by: Mitchel Humpherys <mitchelh@codeaurora.org>
[pdaly@codeaurora.org Keep upstream version of alloc_from_pool]
Signed-off-by: Patrick Daly <pdaly@codeaurora.org>
This commit is contained in:
Neeti Desai 2015-05-06 15:38:34 -07:00 committed by David Keitel
parent 1b458c04b4
commit c9eb16e60f

View file

@ -1389,20 +1389,38 @@ static void *__iommu_alloc_atomic(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp) dma_addr_t *handle, gfp_t gfp)
{ {
struct page *page; struct page *page;
struct page **pages;
int count = size >> PAGE_SHIFT;
int array_size = count * sizeof(struct page *);
int i;
void *addr; void *addr;
if (array_size <= PAGE_SIZE)
pages = kzalloc(array_size, gfp);
else
pages = vzalloc(array_size);
if (!pages)
return NULL;
addr = __alloc_from_pool(size, &page, gfp); addr = __alloc_from_pool(size, &page, gfp);
if (!addr) if (!addr)
return NULL; goto err_free;
*handle = __iommu_create_mapping(dev, &page, size); for (i = 0; i < count ; i++)
pages[i] = page + i;
*handle = __iommu_create_mapping(dev, pages, size);
if (*handle == DMA_ERROR_CODE) if (*handle == DMA_ERROR_CODE)
goto err_mapping; goto err_mapping;
kvfree(pages);
return addr; return addr;
err_mapping: err_mapping:
__free_from_pool(addr, size); __free_from_pool(addr, size);
err_free:
kvfree(pages);
return NULL; return NULL;
} }