msm: ion: fix ion cache maintenance issue

ion cache maintenance code while doing cache maintenance
on each sg is only considering page length which may not
always be correct. An sg node can have more than page
length contiguous pages.

Update the code to properly take care of any length sg.

Change-Id: Ied380fa32cbda5384bab8ec2625ceaf1010fd314
Signed-off-by: Shiraz Hashim <shashim@codeaurora.org>
This commit is contained in:
Shiraz Hashim 2015-08-19 14:31:50 +05:30 committed by David Keitel
parent b3589b554d
commit 7c7828ace0

View file

@ -12,6 +12,7 @@
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/msm_ion.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@ -231,6 +232,71 @@ static int ion_no_pages_cache_ops(struct ion_client *client,
return 0;
}
static void __do_cache_ops(struct page *page, unsigned int offset,
unsigned int length, void (*op)(const void *, const void *))
{
unsigned int left = length;
unsigned long pfn;
unsigned long nr_pages;
void *vaddr;
pfn = page_to_pfn(page) + offset / PAGE_SIZE;
page = pfn_to_page(pfn);
offset &= ~PAGE_MASK;
if (!PageHighMem(page)) {
vaddr = page_address(page) + offset;
op(vaddr, vaddr + length);
goto out;
}
nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
if (nr_pages > 1) {
struct vm_struct *area;
unsigned long addr;
area = get_vm_area_caller((nr_pages << PAGE_SHIFT), VM_IOREMAP,
__builtin_return_address(0));
if (!area)
goto perpage;
addr = (unsigned long)area->addr;
area->phys_addr = __pfn_to_phys(pfn);
if (ioremap_page_range(addr, addr + length, area->phys_addr,
PAGE_KERNEL)) {
vunmap(area->addr);
goto perpage;
}
op(area->addr + offset, area->addr + offset + length);
unmap_kernel_range_noflush(addr, area->size);
vunmap(area->addr);
goto out;
}
perpage:
do {
unsigned int len;
len = left;
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
page = pfn_to_page(pfn);
vaddr = kmap_atomic(page);
op(vaddr + offset, vaddr + offset + len);
kunmap_atomic(vaddr);
offset = 0;
pfn++;
left -= len;
} while (left);
out:
return;
}
static int ion_pages_cache_ops(struct ion_client *client,
struct ion_handle *handle,
void *vaddr, unsigned int offset, unsigned int length,
@ -238,36 +304,43 @@ static int ion_pages_cache_ops(struct ion_client *client,
{
struct sg_table *table = NULL;
struct scatterlist *sg;
struct page *page;
int i;
void *ptr;
unsigned int len = 0;
void (*op)(const void *, const void *);
table = ion_sg_table(client, handle);
if (IS_ERR_OR_NULL(table))
return PTR_ERR(table);
for_each_sg(table->sgl, sg, table->nents, i) {
page = sg_page(sg);
if (PageHighMem(page))
ptr = kmap_atomic(page);
else
ptr = page_address(page);
switch (cmd) {
switch (cmd) {
case ION_IOC_CLEAN_CACHES:
dmac_clean_range(ptr, ptr + sg->length);
op = dmac_clean_range;
break;
case ION_IOC_INV_CACHES:
dmac_inv_range(ptr, ptr + sg->length);
op = dmac_inv_range;
break;
case ION_IOC_CLEAN_INV_CACHES:
dmac_flush_range(ptr, ptr + sg->length);
op = dmac_flush_range;
break;
default:
return -EINVAL;
}
if (PageHighMem(page))
kunmap_atomic(ptr);
};
if (vaddr) {
op(vaddr + offset, vaddr + offset + length);
return 0;
}
for_each_sg(table->sgl, sg, table->nents, i) {
len += sg->length;
if (len < offset)
continue;
__do_cache_ops(sg_page(sg), sg->offset, sg->length, op);
if (len > length + offset)
break;
}
return 0;
}