ion: fix the return value of shrinkers

ION page pool and cma shrinkers return the total pooled
memory instead of the freed. This can result in an early
exit of ION page pool shrinker thus giving back less memory
to the system. For cma shrinker this change doesn't have an
effect since the callers of shrink_slab currently ignores
the return value.

Change-Id: I0b3727807d9a5f020623766d27da360d077395f3
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
This commit is contained in:
Vinayak Menon 2017-09-19 15:04:09 +05:30
parent d727a95d2e
commit d385ca7d06
2 changed files with 8 additions and 5 deletions

View file

@ -3,7 +3,7 @@
*
* Copyright (C) Linaro 2012
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@ -344,7 +344,8 @@ static void ion_secure_cma_free_chunk(struct ion_cma_secure_heap *sheap,
}
void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr)
static unsigned long
__ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr)
{
struct list_head *entry, *_n;
unsigned long drained_size = 0, skipped_size = 0;
@ -368,6 +369,7 @@ void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr)
}
trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size);
return drained_size;
}
int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
@ -385,6 +387,7 @@ int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
struct shrink_control *sc)
{
unsigned long freed;
struct ion_cma_secure_heap *sheap = container_of(shrinker,
struct ion_cma_secure_heap, shrinker);
int nr_to_scan = sc->nr_to_scan;
@ -397,11 +400,11 @@ static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
if (!mutex_trylock(&sheap->chunk_lock))
return -1;
__ion_secure_cma_shrink_pool(sheap, nr_to_scan);
freed = __ion_secure_cma_shrink_pool(sheap, nr_to_scan);
mutex_unlock(&sheap->chunk_lock);
return atomic_read(&sheap->total_pool_size);
return freed;
}
static unsigned long ion_secure_cma_shrinker_count(struct shrinker *shrinker,

View file

@ -183,7 +183,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
freed += (1 << pool->order);
}
return ion_page_pool_total(pool, high);
return freed;
}
struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask,