msm: kgsl: Use per page cache operation instead of bulk cache operation
For any cache operation, the current code tries to map all pages to the kernel using vmap in case sg table is not available and then performs the requested cache operation. If vmap fails because of memory crunch ioctl just returns failure. This change avoids using vmap and performs per page cache operation even when sg table is not available. This is done to avoid failures because of vmap especially on 32 bit systems. Change-Id: I123b46e6a55a62cbf934ab6a2a49dcd1f0d4c7d4 Signed-off-by: Deepak Kumar <dkumar@codeaurora.org>
This commit is contained in:
parent
5404e35069
commit
7490d55401
1 changed files with 33 additions and 32 deletions
|
@ -623,6 +623,9 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
|
||||||
uint64_t size, unsigned int op)
|
uint64_t size, unsigned int op)
|
||||||
{
|
{
|
||||||
void *addr = NULL;
|
void *addr = NULL;
|
||||||
|
struct sg_table *sgt = NULL;
|
||||||
|
struct scatterlist *sg;
|
||||||
|
unsigned int i, pos = 0;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (size == 0 || size > UINT_MAX)
|
if (size == 0 || size > UINT_MAX)
|
||||||
|
@ -650,40 +653,38 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
|
||||||
* If the buffer is not to mapped to kernel, perform cache
|
* If the buffer is not to mapped to kernel, perform cache
|
||||||
* operations after mapping to kernel.
|
* operations after mapping to kernel.
|
||||||
*/
|
*/
|
||||||
if (memdesc->sgt != NULL) {
|
if (memdesc->sgt != NULL)
|
||||||
struct scatterlist *sg;
|
sgt = memdesc->sgt;
|
||||||
unsigned int i, pos = 0;
|
else {
|
||||||
|
if (memdesc->pages == NULL)
|
||||||
|
return ret;
|
||||||
|
|
||||||
for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) {
|
sgt = kgsl_alloc_sgt_from_pages(memdesc);
|
||||||
uint64_t sg_offset, sg_left;
|
if (IS_ERR(sgt))
|
||||||
|
return PTR_ERR(sgt);
|
||||||
if (offset >= (pos + sg->length)) {
|
|
||||||
pos += sg->length;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
sg_offset = offset > pos ? offset - pos : 0;
|
|
||||||
sg_left = (sg->length - sg_offset > size) ? size :
|
|
||||||
sg->length - sg_offset;
|
|
||||||
ret = kgsl_do_cache_op(sg_page(sg), NULL, sg_offset,
|
|
||||||
sg_left, op);
|
|
||||||
size -= sg_left;
|
|
||||||
if (size == 0)
|
|
||||||
break;
|
|
||||||
pos += sg->length;
|
|
||||||
}
|
|
||||||
} else if (memdesc->pages != NULL) {
|
|
||||||
addr = vmap(memdesc->pages, memdesc->page_count,
|
|
||||||
VM_IOREMAP, pgprot_writecombine(PAGE_KERNEL));
|
|
||||||
if (addr == NULL)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
/* Make sure the offset + size do not overflow the address */
|
|
||||||
if (addr + ((size_t) offset + (size_t) size) < addr)
|
|
||||||
return -ERANGE;
|
|
||||||
|
|
||||||
ret = kgsl_do_cache_op(NULL, addr, offset, size, op);
|
|
||||||
vunmap(addr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||||
|
uint64_t sg_offset, sg_left;
|
||||||
|
|
||||||
|
if (offset >= (pos + sg->length)) {
|
||||||
|
pos += sg->length;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
sg_offset = offset > pos ? offset - pos : 0;
|
||||||
|
sg_left = (sg->length - sg_offset > size) ? size :
|
||||||
|
sg->length - sg_offset;
|
||||||
|
ret = kgsl_do_cache_op(sg_page(sg), NULL, sg_offset,
|
||||||
|
sg_left, op);
|
||||||
|
size -= sg_left;
|
||||||
|
if (size == 0)
|
||||||
|
break;
|
||||||
|
pos += sg->length;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (memdesc->sgt == NULL)
|
||||||
|
kgsl_free_sgt(sgt);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kgsl_cache_range_op);
|
EXPORT_SYMBOL(kgsl_cache_range_op);
|
||||||
|
|
Loading…
Add table
Reference in a new issue