From 40b830cd5e450ddf1e96bc1eb6061c1c8880ae16 Mon Sep 17 00:00:00 2001 From: Deepak Kumar Date: Tue, 20 Jun 2017 16:59:51 +0530 Subject: [PATCH] msm: kgsl: Do not memset pages to zero while adding to pool Doing a memset to zero while adding a page to pool is not efficient as the page added to the pool can be returned to system in case shrinker kicks in. In this scenario, time spent in zeroing the page is a waste. Instead of zeroing the page while adding it to pool zero the page when it is taken from the pool. This helps in reducing the time taken to free big chunk of memory. Also, allocation time shouldn't be a problem as zeroing of page anyways happens during allocation in case it is allocated from system. Change-Id: I41ab2cb88fb4fd9854d2cc9a45bb60fc7013286a Signed-off-by: Deepak Kumar --- drivers/gpu/msm/kgsl_pool.c | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c index bb92b8b79d93..6c26e420f5cd 100644 --- a/drivers/gpu/msm/kgsl_pool.c +++ b/drivers/gpu/msm/kgsl_pool.c @@ -65,26 +65,19 @@ _kgsl_get_pool_from_order(unsigned int order) /* Map the page into kernel and zero it out */ static void -_kgsl_pool_zero_page(struct page *p, unsigned int pool_order) +_kgsl_pool_zero_page(struct page *p) { - int i; + void *addr = kmap_atomic(p); - for (i = 0; i < (1 << pool_order); i++) { - struct page *page = nth_page(p, i); - void *addr = kmap_atomic(page); - - memset(addr, 0, PAGE_SIZE); - dmac_flush_range(addr, addr + PAGE_SIZE); - kunmap_atomic(addr); - } + memset(addr, 0, PAGE_SIZE); + dmac_flush_range(addr, addr + PAGE_SIZE); + kunmap_atomic(addr); } /* Add a page to specified pool */ static void _kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p) { - _kgsl_pool_zero_page(p, pool->pool_order); - spin_lock(&pool->list_lock); list_add_tail(&p->lru, &pool->page_list); pool->page_count++; @@ -318,7 +311,6 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages, } else return -ENOMEM; } - _kgsl_pool_zero_page(page, order); goto done; } @@ -338,7 +330,6 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages, page = alloc_pages(gfp_mask, order); if (page == NULL) return -ENOMEM; - _kgsl_pool_zero_page(page, order); goto done; } } @@ -368,13 +359,12 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages, } else return -ENOMEM; } - - _kgsl_pool_zero_page(page, order); } done: for (j = 0; j < (*page_size >> PAGE_SHIFT); j++) { p = nth_page(page, j); + _kgsl_pool_zero_page(p); pages[pcount] = p; pcount++; }