mm: zcache: shrink zcache on memory pressure
When file pages drops down, at a point zcache pages can be more than the file pages, and even further when we have to reclaim the maximum number of file pages possible to launch an application, we need a way to reclaim even the zcache pages at least to an extend which makes it match with the number of file pages. This can help in better foreground headroom. Change-Id: I481bfb9961ed5cee47ebeae08eb910bb269b644c Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
This commit is contained in:
parent
42bfd5ca64
commit
aefb461740
1 changed files with 70 additions and 0 deletions
70
mm/zcache.c
70
mm/zcache.c
|
@ -68,6 +68,9 @@ static u64 zcache_pool_pages;
|
|||
static u64 zcache_evict_zpages;
|
||||
static u64 zcache_evict_filepages;
|
||||
static u64 zcache_reclaim_fail;
|
||||
static u64 zcache_pool_shrink;
|
||||
static u64 zcache_pool_shrink_fail;
|
||||
static u64 zcache_pool_shrink_pages;
|
||||
static atomic_t zcache_stored_pages = ATOMIC_INIT(0);
|
||||
|
||||
#define GFP_ZCACHE \
|
||||
|
@ -148,6 +151,64 @@ static void zcache_rbnode_cache_destroy(void)
|
|||
kmem_cache_destroy(zcache_rbnode_cache);
|
||||
}
|
||||
|
||||
static unsigned long zcache_count(struct shrinker *s,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
unsigned long active_file;
|
||||
long file_gap;
|
||||
|
||||
active_file = global_page_state(NR_ACTIVE_FILE);
|
||||
file_gap = zcache_pool_pages - active_file;
|
||||
if (file_gap < 0)
|
||||
file_gap = 0;
|
||||
return file_gap;
|
||||
}
|
||||
|
||||
static unsigned long zcache_scan(struct shrinker *s, struct shrink_control *sc)
|
||||
{
|
||||
unsigned long active_file;
|
||||
long file_gap;
|
||||
unsigned long freed = 0;
|
||||
static bool running;
|
||||
struct zcache_pool *zpool = zcache.pools[0];
|
||||
|
||||
if (running)
|
||||
goto end;
|
||||
|
||||
running = true;
|
||||
active_file = global_page_state(NR_ACTIVE_FILE);
|
||||
|
||||
/*
|
||||
* file_gap == 0 means that the number of pages
|
||||
* stored by zcache is around twice as many as the
|
||||
* number of active file pages.
|
||||
*/
|
||||
file_gap = zcache_pool_pages - active_file;
|
||||
if (file_gap < 0)
|
||||
file_gap = 0;
|
||||
else
|
||||
zcache_pool_shrink++;
|
||||
while (file_gap-- > 0) {
|
||||
if (zbud_reclaim_page(zpool->pool, 8)) {
|
||||
zcache_pool_shrink_fail++;
|
||||
break;
|
||||
}
|
||||
freed++;
|
||||
}
|
||||
|
||||
zcache_pool_shrink_pages += freed;
|
||||
zcache_pool_pages = zbud_get_pool_size(zpool->pool);
|
||||
running = false;
|
||||
end:
|
||||
return freed;
|
||||
}
|
||||
|
||||
static struct shrinker zcache_shrinker = {
|
||||
.scan_objects = zcache_scan,
|
||||
.count_objects = zcache_count,
|
||||
.seeks = DEFAULT_SEEKS * 16
|
||||
};
|
||||
|
||||
/*
|
||||
* Compression functions
|
||||
* (Below functions are copyed from zswap!)
|
||||
|
@ -875,6 +936,14 @@ static int __init zcache_debugfs_init(void)
|
|||
&zcache_evict_filepages);
|
||||
debugfs_create_u64("reclaim_fail", S_IRUGO, zcache_debugfs_root,
|
||||
&zcache_reclaim_fail);
|
||||
debugfs_create_u64("inactive_pages_refused", S_IRUGO,
|
||||
zcache_debugfs_root, &zcache_inactive_pages_refused);
|
||||
debugfs_create_u64("pool_shrink_count", S_IRUGO,
|
||||
zcache_debugfs_root, &zcache_pool_shrink);
|
||||
debugfs_create_u64("pool_shrink_fail", S_IRUGO,
|
||||
zcache_debugfs_root, &zcache_pool_shrink_fail);
|
||||
debugfs_create_u64("pool_shrink_pages", S_IRUGO,
|
||||
zcache_debugfs_root, &zcache_pool_shrink_pages);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -920,6 +989,7 @@ static int __init init_zcache(void)
|
|||
|
||||
if (zcache_debugfs_init())
|
||||
pr_warn("debugfs initialization failed\n");
|
||||
register_shrinker(&zcache_shrinker);
|
||||
return 0;
|
||||
pcpufail:
|
||||
zcache_comp_exit();
|
||||
|
|
Loading…
Add table
Reference in a new issue