sched: add option whether CPU C-state is used to guide task placement

There are CPUs that don't have an obvious low power mode exit latency
penalty.  Add a new Kconfig CONFIG_SCHED_HMP_CSTATE_AWARE which controls
whether CPU C-state is used to guide task placement.

CRs-fixed: 1006303
Change-Id: Ie8dbab8e173c3a1842d922f4d1fbd8cc4221789c
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
This commit is contained in:
Joonwoo Park 2016-03-28 18:27:47 -07:00 committed by Jeevan Shriram
parent d4ca4d767f
commit 2e0ebb0155
2 changed files with 55 additions and 19 deletions

View file

@ -1162,6 +1162,14 @@ config SCHED_HMP
in their instructions per-cycle capability or the maximum
frequency they can attain.
config SCHED_HMP_CSTATE_AWARE
bool "CPU C-state aware scheduler"
depends on SCHED_HMP
help
This feature will let the HMP scheduler optimize task placement
with CPUs C-state. If this is enabled, scheduler places tasks
onto the shallowest C-state CPU among the most power efficient CPUs.
config CHECKPOINT_RESTORE
bool "Checkpoint/restore support" if EXPERT
select PROC_CHILDREN

View file

@ -3376,28 +3376,13 @@ next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env,
return next;
}
static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
struct cpu_select_env *env)
#ifdef SCHED_HMP_CSTATE_AWARE
static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
struct cpu_select_env *env, int cpu_cost)
{
int cpu_cost, cpu_cstate;
int cpu_cstate;
int prev_cpu = env->prev_cpu;
cpu_cost = power_cost(cpu, task_load(env->p) +
cpu_cravg_sync(cpu, env->sync));
if (cpu_cost > stats->min_cost)
return;
if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) {
if (stats->best_sibling_cpu_cost > cpu_cost ||
(stats->best_sibling_cpu_cost == cpu_cost &&
stats->best_sibling_cpu_load > env->cpu_load)) {
stats->best_sibling_cpu_cost = cpu_cost;
stats->best_sibling_cpu_load = env->cpu_load;
stats->best_sibling_cpu = cpu;
}
}
cpu_cstate = cpu_rq(cpu)->cstate;
if (env->need_idle) {
@ -3452,6 +3437,49 @@ static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
stats->best_cpu = cpu;
}
}
#else /* SCHED_HMP_CSTATE_AWARE */
static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
struct cpu_select_env *env, int cpu_cost)
{
int prev_cpu = env->prev_cpu;
if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) {
if (stats->best_sibling_cpu_cost > cpu_cost ||
(stats->best_sibling_cpu_cost == cpu_cost &&
stats->best_sibling_cpu_load > env->cpu_load)) {
stats->best_sibling_cpu_cost = cpu_cost;
stats->best_sibling_cpu_load = env->cpu_load;
stats->best_sibling_cpu = cpu;
}
}
if ((cpu_cost < stats->min_cost) ||
((stats->best_cpu != prev_cpu &&
stats->min_load > env->cpu_load) || cpu == prev_cpu)) {
if (env->need_idle) {
if (idle_cpu(cpu)) {
stats->min_cost = cpu_cost;
stats->best_idle_cpu = cpu;
}
} else {
stats->min_cost = cpu_cost;
stats->min_load = env->cpu_load;
stats->best_cpu = cpu;
}
}
}
#endif
static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
struct cpu_select_env *env)
{
int cpu_cost;
cpu_cost = power_cost(cpu, task_load(env->p) +
cpu_cravg_sync(cpu, env->sync));
if (cpu_cost <= stats->min_cost)
__update_cluster_stats(cpu, stats, env, cpu_cost);
}
static void find_best_cpu_in_cluster(struct sched_cluster *c,
struct cpu_select_env *env, struct cluster_cpu_stats *stats)