msm: kgsl: Use per page cache operation instead of bulk cache operation

For any cache operation, the current code tries to map all pages to the
kernel using vmap in case sg table is not available and then performs
the requested cache operation. If vmap fails because of memory crunch
ioctl just returns failure.

This change avoids using vmap and performs per page cache operation
even when sg table is not available. This is done to avoid failures
because of vmap especially on 32 bit systems.

Change-Id: I123b46e6a55a62cbf934ab6a2a49dcd1f0d4c7d4
Signed-off-by: Deepak Kumar <dkumar@codeaurora.org>
This commit is contained in:
Deepak Kumar 2017-05-23 13:13:35 +05:30
parent 5404e35069
commit 7490d55401

View file

@ -623,6 +623,9 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
uint64_t size, unsigned int op)
{
void *addr = NULL;
struct sg_table *sgt = NULL;
struct scatterlist *sg;
unsigned int i, pos = 0;
int ret = 0;
if (size == 0 || size > UINT_MAX)
@ -650,40 +653,38 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
* If the buffer is not to mapped to kernel, perform cache
* operations after mapping to kernel.
*/
if (memdesc->sgt != NULL) {
struct scatterlist *sg;
unsigned int i, pos = 0;
if (memdesc->sgt != NULL)
sgt = memdesc->sgt;
else {
if (memdesc->pages == NULL)
return ret;
for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) {
uint64_t sg_offset, sg_left;
if (offset >= (pos + sg->length)) {
pos += sg->length;
continue;
}
sg_offset = offset > pos ? offset - pos : 0;
sg_left = (sg->length - sg_offset > size) ? size :
sg->length - sg_offset;
ret = kgsl_do_cache_op(sg_page(sg), NULL, sg_offset,
sg_left, op);
size -= sg_left;
if (size == 0)
break;
pos += sg->length;
}
} else if (memdesc->pages != NULL) {
addr = vmap(memdesc->pages, memdesc->page_count,
VM_IOREMAP, pgprot_writecombine(PAGE_KERNEL));
if (addr == NULL)
return -ENOMEM;
/* Make sure the offset + size do not overflow the address */
if (addr + ((size_t) offset + (size_t) size) < addr)
return -ERANGE;
ret = kgsl_do_cache_op(NULL, addr, offset, size, op);
vunmap(addr);
sgt = kgsl_alloc_sgt_from_pages(memdesc);
if (IS_ERR(sgt))
return PTR_ERR(sgt);
}
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
uint64_t sg_offset, sg_left;
if (offset >= (pos + sg->length)) {
pos += sg->length;
continue;
}
sg_offset = offset > pos ? offset - pos : 0;
sg_left = (sg->length - sg_offset > size) ? size :
sg->length - sg_offset;
ret = kgsl_do_cache_op(sg_page(sg), NULL, sg_offset,
sg_left, op);
size -= sg_left;
if (size == 0)
break;
pos += sg->length;
}
if (memdesc->sgt == NULL)
kgsl_free_sgt(sgt);
return ret;
}
EXPORT_SYMBOL(kgsl_cache_range_op);