mm: vmscan: support setting of kswapd cpu affinity

Allow the kswapd cpu affinity to be configured.
There can be power benefits on certain targets when limiting kswapd
to run only on certain cores.

CRs-fixed: 752344
Change-Id: I8a83337ff313a7e0324361140398226a09f8be0f
Signed-off-by: Liam Mark <lmark@codeaurora.org>
[imaund@codeaurora.org: Resolved trivial context conflicts.]
Signed-off-by: Ian Maund <imaund@codeaurora.org>
This commit is contained in:
Liam Mark 2014-11-25 09:51:29 -08:00 committed by David Keitel
parent 90863369e5
commit 8918861861
2 changed files with 41 additions and 2 deletions

View file

@ -628,6 +628,19 @@ config BALANCE_ANON_FILE_RECLAIM
Swapping anonymous pages out to memory can be efficient enough to justify
treating anonymous and file backed pages equally.
config KSWAPD_CPU_AFFINITY_MASK
string "kswapd cpu affinity mask"
depends on SMP
help
Set the cpu affinity for the kswapd task.
There can be power benefits on certain targets when limiting kswapd
to run only on certain cores.
The cpu affinity bitmask is represented by a hex string where commas
group hex digits into chunks. Each chunk defines exactly 32 bits of
the resultant bitmask.
For example to limit kswapd to the first 4 cores use the following:
CONFIG_KSWAPD_CPU_AFFINITY_MASK="f"
# For architectures that support deferred memory initialisation
config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
bool

View file

@ -147,6 +147,12 @@ int vm_swappiness = 60;
*/
unsigned long vm_total_pages;
#ifdef CONFIG_KSWAPD_CPU_AFFINITY_MASK
char *kswapd_cpu_mask = CONFIG_KSWAPD_CPU_AFFINITY_MASK;
#else
char *kswapd_cpu_mask = NULL;
#endif
static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);
@ -3485,7 +3491,7 @@ static int kswapd(void *p)
lockdep_set_current_reclaim_state(GFP_KERNEL);
if (!cpumask_empty(cpumask))
if (kswapd_cpu_mask == NULL && !cpumask_empty(cpumask))
set_cpus_allowed_ptr(tsk, cpumask);
current->reclaim_state = &reclaim_state;
@ -3655,6 +3661,22 @@ static int cpu_callback(struct notifier_block *nfb, unsigned long action,
return NOTIFY_OK;
}
static int set_kswapd_cpu_mask(pg_data_t *pgdat)
{
int ret = 0;
cpumask_t tmask;
if (!kswapd_cpu_mask)
return 0;
cpumask_clear(&tmask);
ret = cpumask_parse(kswapd_cpu_mask, &tmask);
if (ret)
return ret;
return set_cpus_allowed_ptr(pgdat->kswapd, &tmask);
}
/*
* This kswapd start function will be called by init and node-hot-add.
* On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
@ -3674,6 +3696,9 @@ int kswapd_run(int nid)
pr_err("Failed to start kswapd on node %d\n", nid);
ret = PTR_ERR(pgdat->kswapd);
pgdat->kswapd = NULL;
} else if (kswapd_cpu_mask) {
if (set_kswapd_cpu_mask(pgdat))
pr_warn("error setting kswapd cpu affinity mask\n");
}
return ret;
}
@ -3699,7 +3724,8 @@ static int __init kswapd_init(void)
swap_setup();
for_each_node_state(nid, N_MEMORY)
kswapd_run(nid);
hotcpu_notifier(cpu_callback, 0);
if (kswapd_cpu_mask == NULL)
hotcpu_notifier(cpu_callback, 0);
return 0;
}