ion: Drop cache operation after zeroing

At one time, the zeroing algorithm used a mixture of
cached and uncached mappings which necessitated an extra
cache operation afterwards. The zeroing algorithm has been
simplified so the cache operation is no longer required if
the mapping used for zeroing matches the cache status of
the buffer (cached or not). Remove the cache operation and
use appropriate page protections instead.

Change-Id: I7d3596ed503811818782e77a0f3f7f63b1456d70
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
This commit is contained in:
Laura Abbott 2014-09-05 13:24:15 -07:00 committed by David Keitel
parent 327b1e3da1
commit f29d7e7e0e
4 changed files with 33 additions and 16 deletions

View file

@ -41,7 +41,8 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
DMA_BIDIRECTIONAL);
if (pool->gfp_mask & __GFP_ZERO) {
if (msm_ion_heap_high_order_page_zero(page, pool->order))
if (msm_ion_heap_high_order_page_zero(page, pool->order,
pool->cached))
goto error_free_pages;
}

View file

@ -258,9 +258,10 @@ int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
int ion_heap_buffer_zero(struct ion_buffer *buffer);
int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
int msm_ion_heap_high_order_page_zero(struct page *page, int order);
int msm_ion_heap_high_order_page_zero(struct page *page, int order,
bool cached);
int msm_ion_heap_buffer_zero(struct ion_buffer *buffer);
int msm_ion_heap_pages_zero(struct page **pages, int num_pages);
int msm_ion_heap_pages_zero(struct page **pages, int num_pages, pgprot_t prot);
int msm_ion_heap_alloc_pages_mem(struct pages_mem *pages_mem);
void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem);
@ -415,6 +416,7 @@ struct ion_page_pool {
gfp_t gfp_mask;
unsigned int order;
struct plist_node list;
bool cached;
};
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);

View file

@ -192,6 +192,8 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
unsigned int max_order = orders[0];
struct pages_mem data;
unsigned int sz;
pgprot_t pgprot = buffer->flags & ION_FLAG_CACHED ? PAGE_KERNEL :
pgprot_writecombine(PAGE_KERNEL);
if (align > PAGE_SIZE)
return -EINVAL;
@ -274,7 +276,8 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
} while (sg);
ret = msm_ion_heap_pages_zero(data.pages, data.size >> PAGE_SHIFT);
ret = msm_ion_heap_pages_zero(data.pages, data.size >> PAGE_SHIFT,
pgprot);
if (ret) {
pr_err("Unable to zero pages\n");
goto err_free_sg2;
@ -440,7 +443,8 @@ static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
* nothing. If it succeeds you'll eventually need to use
* ion_system_heap_destroy_pools to destroy the pools.
*/
static int ion_system_heap_create_pools(struct ion_page_pool **pools)
static int ion_system_heap_create_pools(struct ion_page_pool **pools,
bool cached)
{
int i;
for (i = 0; i < num_orders; i++) {
@ -452,6 +456,7 @@ static int ion_system_heap_create_pools(struct ion_page_pool **pools)
pool = ion_page_pool_create(gfp_flags, orders[i]);
if (!pool)
goto err_create_pool;
pool->cached = cached;
pools[i] = pool;
}
return 0;
@ -480,10 +485,10 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
if (!heap->cached_pools)
goto err_alloc_cached_pools;
if (ion_system_heap_create_pools(heap->uncached_pools))
if (ion_system_heap_create_pools(heap->uncached_pools, false))
goto err_create_uncached_pools;
if (ion_system_heap_create_pools(heap->cached_pools))
if (ion_system_heap_create_pools(heap->cached_pools, true))
goto err_create_cached_pools;
heap->heap.debug_show = ion_system_heap_debug_show;

View file

@ -708,7 +708,7 @@ long msm_ion_custom_ioctl(struct ion_client *client,
* and thus caller is responsible for handling any cache maintenance
* operations needed.
*/
int msm_ion_heap_pages_zero(struct page **pages, int num_pages)
int msm_ion_heap_pages_zero(struct page **pages, int num_pages, pgprot_t pgprot)
{
int i, j, npages_to_vmap;
void *ptr = NULL;
@ -727,7 +727,7 @@ int msm_ion_heap_pages_zero(struct page **pages, int num_pages)
for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
++j) {
ptr = vmap(&pages[i], npages_to_vmap,
VM_IOREMAP, PAGE_KERNEL);
VM_IOREMAP, pgprot);
if (ptr)
break;
else
@ -778,22 +778,27 @@ void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem)
pages_mem->free_fn(pages_mem->pages);
}
int msm_ion_heap_high_order_page_zero(struct page *page, int order)
int msm_ion_heap_high_order_page_zero(struct page *page, int order, bool cached)
{
int i, ret;
struct pages_mem pages_mem;
int npages = 1 << order;
pgprot_t pgprot;
pages_mem.size = npages * PAGE_SIZE;
if (cached)
pgprot = PAGE_KERNEL;
else
pgprot = pgprot_writecombine(PAGE_KERNEL);
if (msm_ion_heap_alloc_pages_mem(&pages_mem))
return -ENOMEM;
for (i = 0; i < (1 << order); ++i)
pages_mem.pages[i] = page + i;
ret = msm_ion_heap_pages_zero(pages_mem.pages, npages);
dma_sync_single_for_device(NULL, page_to_phys(page), pages_mem.size,
DMA_BIDIRECTIONAL);
ret = msm_ion_heap_pages_zero(pages_mem.pages, npages, pgprot);
msm_ion_heap_free_pages_mem(&pages_mem);
return ret;
}
@ -804,6 +809,12 @@ int msm_ion_heap_buffer_zero(struct ion_buffer *buffer)
struct scatterlist *sg;
int i, j, ret = 0, npages = 0;
struct pages_mem pages_mem;
pgprot_t pgprot;
if (buffer->flags & ION_FLAG_CACHED)
pgprot = PAGE_KERNEL;
else
pgprot = pgprot_writecombine(PAGE_KERNEL);
pages_mem.size = PAGE_ALIGN(buffer->size);
@ -818,9 +829,7 @@ int msm_ion_heap_buffer_zero(struct ion_buffer *buffer)
pages_mem.pages[npages++] = page + j;
}
ret = msm_ion_heap_pages_zero(pages_mem.pages, npages);
dma_sync_sg_for_device(NULL, table->sgl, table->nents,
DMA_BIDIRECTIONAL);
ret = msm_ion_heap_pages_zero(pages_mem.pages, npages, pgprot);
msm_ion_heap_free_pages_mem(&pages_mem);
return ret;
}