ion: Fix DMA operations for ARM64

The ion API requires

" The implicit contract here is that memory coming from the heaps is
ready for dma, ie if it has a cached mapping that mapping has been
invalidated "

In v4.4, passsing device==NULL to any dma operation for arm64 results in
a no-op. Ensure that proper device pointers are used.

Change-Id: Id354f7cf6979aa58621408cfcfbd8ef62015fdbd
Signed-off-by: Patrick Daly <pdaly@codeaurora.org>
This commit is contained in:
Patrick Daly 2016-05-26 18:03:28 -07:00 committed by Kyle Yan
parent f668e1e3d0
commit 2d4fdc7bde
7 changed files with 49 additions and 24 deletions

View file

@ -1118,6 +1118,8 @@ void ion_pages_sync_for_device(struct device *dev, struct page *page,
{
struct scatterlist sg;
WARN_ONCE(!dev, "A device is required for dma_sync\n");
sg_init_table(&sg, 1);
sg_set_page(&sg, page, size, 0);
/*

View file

@ -111,12 +111,14 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
struct ion_heap *heap = buffer->heap;
struct sg_table *table = buffer->priv_virt;
struct page *page = sg_page(table->sgl);
struct device *dev = heap->priv;
ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
ion_heap_buffer_zero(buffer);
if (ion_buffer_cached(buffer))
dma_sync_sg_for_device(NULL, table->sgl, table->nents,
dma_sync_sg_for_device(dev, table->sgl, table->nents,
DMA_BIDIRECTIONAL);
ion_carveout_free(heap, paddr, buffer->size);
@ -153,11 +155,12 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
struct page *page;
size_t size;
struct device *dev = heap_data->priv;
page = pfn_to_page(PFN_DOWN(heap_data->base));
size = heap_data->size;
ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
ion_pages_sync_for_device(dev, page, size, DMA_BIDIRECTIONAL);
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret)

View file

@ -99,13 +99,14 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
struct scatterlist *sg;
int i;
unsigned long allocated_size;
struct device *dev = heap->priv;
allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
ion_heap_buffer_zero(buffer);
if (ion_buffer_cached(buffer))
dma_sync_sg_for_device(NULL, table->sgl, table->nents,
dma_sync_sg_for_device(dev, table->sgl, table->nents,
DMA_BIDIRECTIONAL);
for_each_sg(table->sgl, sg, table->nents, i) {
@ -144,11 +145,12 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
int ret;
struct page *page;
size_t size;
struct device *dev = heap_data->priv;
page = pfn_to_page(PFN_DOWN(heap_data->base));
size = heap_data->size;
ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
ion_pages_sync_for_device(dev, page, size, DMA_BIDIRECTIONAL);
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret)

View file

@ -35,7 +35,8 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
return NULL;
if (pool->gfp_mask & __GFP_ZERO)
if (msm_ion_heap_high_order_page_zero(page, pool->order))
if (msm_ion_heap_high_order_page_zero(pool->dev, page,
pool->order))
goto error_free_pages;
ion_page_pool_alloc_set_cache_policy(pool, page);
@ -222,12 +223,14 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
return ion_page_pool_total(pool, high);
}
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask,
unsigned int order)
{
struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
GFP_KERNEL);
if (!pool)
return NULL;
pool->dev = dev;
pool->high_count = 0;
pool->low_count = 0;
pool->nr_unreserved = 0;

View file

@ -262,9 +262,11 @@ int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
int ion_heap_buffer_zero(struct ion_buffer *buffer);
int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
int msm_ion_heap_high_order_page_zero(struct page *page, int order);
int msm_ion_heap_high_order_page_zero(struct device *dev, struct page *page,
int order);
struct ion_heap *get_ion_heap(int heap_id);
int msm_ion_heap_sg_table_zero(struct sg_table *, size_t size);
int msm_ion_heap_sg_table_zero(struct device *dev, struct sg_table *,
size_t size);
int msm_ion_heap_pages_zero(struct page **pages, int num_pages);
int msm_ion_heap_alloc_pages_mem(struct pages_mem *pages_mem);
void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem);
@ -435,12 +437,14 @@ struct ion_page_pool {
struct list_head high_items;
struct list_head low_items;
struct mutex mutex;
struct device *dev;
gfp_t gfp_mask;
unsigned int order;
struct plist_node list;
};
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask,
unsigned int order);
void ion_page_pool_destroy(struct ion_page_pool *);
void *ion_page_pool_alloc(struct ion_page_pool *, bool *from_pool);
void *ion_page_pool_alloc_pool_only(struct ion_page_pool *);

View file

@ -82,6 +82,7 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
struct page *page;
struct ion_page_pool *pool;
int vmid = get_secure_vmid(buffer->flags);
struct device *dev = heap->heap.priv;
if (*from_pool) {
if (vmid > 0)
@ -100,6 +101,8 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
if (order)
gfp_mask = high_order_gfp_flags;
page = alloc_pages(gfp_mask, order);
ion_pages_sync_for_device(dev, page, PAGE_SIZE << order,
DMA_BIDIRECTIONAL);
}
if (!page)
return 0;
@ -222,6 +225,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
struct pages_mem data;
unsigned int sz;
int vmid = get_secure_vmid(buffer->flags);
struct device *dev = heap->priv;
if (align > PAGE_SIZE)
return -EINVAL;
@ -311,7 +315,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
}
if (nents_sync) {
dma_sync_sg_for_device(NULL, table_sync.sgl, table_sync.nents,
dma_sync_sg_for_device(dev, table_sync.sgl, table_sync.nents,
DMA_BIDIRECTIONAL);
if (vmid > 0) {
ret = ion_system_secure_heap_assign_sg(&table_sync,
@ -368,11 +372,12 @@ void ion_system_heap_free(struct ion_buffer *buffer)
LIST_HEAD(pages);
int i;
int vmid = get_secure_vmid(buffer->flags);
struct device *dev = heap->priv;
if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) &&
!(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)) {
if (vmid < 0)
msm_ion_heap_sg_table_zero(table, buffer->size);
msm_ion_heap_sg_table_zero(dev, table, buffer->size);
} else if (vmid > 0) {
if (ion_system_secure_heap_unassign_sg(table, vmid))
return;
@ -627,7 +632,8 @@ static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
* nothing. If it succeeds you'll eventually need to use
* ion_system_heap_destroy_pools to destroy the pools.
*/
static int ion_system_heap_create_pools(struct ion_page_pool **pools)
static int ion_system_heap_create_pools(struct device *dev,
struct ion_page_pool **pools)
{
int i;
for (i = 0; i < num_orders; i++) {
@ -636,7 +642,7 @@ static int ion_system_heap_create_pools(struct ion_page_pool **pools)
if (orders[i])
gfp_flags = high_order_gfp_flags;
pool = ion_page_pool_create(gfp_flags, orders[i]);
pool = ion_page_pool_create(dev, gfp_flags, orders[i]);
if (!pool)
goto err_create_pool;
pools[i] = pool;
@ -647,11 +653,12 @@ err_create_pool:
return 1;
}
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)
{
struct ion_system_heap *heap;
int i;
int pools_size = sizeof(struct ion_page_pool *) * num_orders;
struct device *dev = data->priv;
heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
if (!heap)
@ -673,15 +680,16 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
heap->secure_pools[i] = kzalloc(pools_size, GFP_KERNEL);
if (!heap->secure_pools[i])
goto err_create_secure_pools;
if (ion_system_heap_create_pools(heap->secure_pools[i]))
if (ion_system_heap_create_pools(
dev, heap->secure_pools[i]))
goto err_create_secure_pools;
}
}
if (ion_system_heap_create_pools(heap->uncached_pools))
if (ion_system_heap_create_pools(dev, heap->uncached_pools))
goto err_create_uncached_pools;
if (ion_system_heap_create_pools(heap->cached_pools))
if (ion_system_heap_create_pools(dev, heap->cached_pools))
goto err_create_cached_pools;
heap->heap.debug_show = ion_system_heap_debug_show;
@ -738,6 +746,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
struct sg_table *table;
unsigned long i;
int ret;
struct device *dev = heap->priv;
if (align > (PAGE_SIZE << order))
return -EINVAL;
@ -766,7 +775,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
buffer->priv_virt = table;
ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
ion_pages_sync_for_device(dev, page, len, DMA_BIDIRECTIONAL);
return 0;

View file

@ -859,7 +859,8 @@ void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem)
pages_mem->free_fn(pages_mem->pages);
}
int msm_ion_heap_high_order_page_zero(struct page *page, int order)
int msm_ion_heap_high_order_page_zero(struct device *dev, struct page *page,
int order)
{
int i, ret;
struct pages_mem pages_mem;
@ -873,13 +874,14 @@ int msm_ion_heap_high_order_page_zero(struct page *page, int order)
pages_mem.pages[i] = page + i;
ret = msm_ion_heap_pages_zero(pages_mem.pages, npages);
dma_sync_single_for_device(NULL, page_to_phys(page), pages_mem.size,
dma_sync_single_for_device(dev, page_to_phys(page), pages_mem.size,
DMA_BIDIRECTIONAL);
msm_ion_heap_free_pages_mem(&pages_mem);
return ret;
}
int msm_ion_heap_sg_table_zero(struct sg_table *table, size_t size)
int msm_ion_heap_sg_table_zero(struct device *dev, struct sg_table *table,
size_t size)
{
struct scatterlist *sg;
int i, j, ret = 0, npages = 0;
@ -901,7 +903,7 @@ int msm_ion_heap_sg_table_zero(struct sg_table *table, size_t size)
}
ret = msm_ion_heap_pages_zero(pages_mem.pages, npages);
dma_sync_sg_for_device(NULL, table->sgl, table->nents,
dma_sync_sg_for_device(dev, table->sgl, table->nents,
DMA_BIDIRECTIONAL);
msm_ion_heap_free_pages_mem(&pages_mem);
return ret;