From c34b0b85aa3cc5ed434420b2636131f3ff614b7a Mon Sep 17 00:00:00 2001 From: Syed Rameez Mustafa Date: Tue, 22 Mar 2016 20:01:56 -0700 Subject: [PATCH] sched: Optimize wakeup placement logic when need_idle is set Try and find the min cstate CPU within the little cluster when a task fits there. If there is no idle CPU return the least busy CPU. Also Add a prev CPU bias when C-states or load is the same. CRs-fixed: 1006303 Change-Id: I577cc70a59f2b0c5309c87b54e106211f96e04a0 Signed-off-by: Syed Rameez Mustafa --- kernel/sched/fair.c | 50 ++++++++++++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 14 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 673a39503a2d..11dc798e071a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3134,8 +3134,10 @@ struct cpu_select_env { }; struct cluster_cpu_stats { - int best_idle_cpu, best_capacity_cpu, best_cpu, best_sibling_cpu; + int best_idle_cpu, least_loaded_cpu; + int best_capacity_cpu, best_cpu, best_sibling_cpu; int min_cost, best_sibling_cpu_cost; + int best_cpu_cstate; u64 min_load, best_sibling_cpu_load; s64 highest_spare_capacity; }; @@ -3377,7 +3379,7 @@ next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env, static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats, struct cpu_select_env *env) { - int cpu_cost; + int cpu_cost, cpu_cstate; int prev_cpu = env->prev_cpu; cpu_cost = power_cost(cpu, task_load(env->p) + @@ -3396,19 +3398,34 @@ static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats, } } + if (env->need_idle) { + stats->min_cost = cpu_cost; + if (idle_cpu(cpu)) { + cpu_cstate = cpu_rq(cpu)->cstate; + if (cpu_cstate < stats->best_cpu_cstate || + (cpu_cstate == stats->best_cpu_cstate && + cpu == prev_cpu)) { + stats->best_idle_cpu = cpu; + stats->best_cpu_cstate = cpu_cstate; + } + } else { + if (env->cpu_load < stats->min_load || + (env->cpu_load == stats->min_load && + cpu == prev_cpu)) { + stats->least_loaded_cpu = cpu; + stats->min_load = env->cpu_load; + } + } + + return; + } + if ((cpu_cost < stats->min_cost) || ((stats->best_cpu != prev_cpu && stats->min_load > env->cpu_load) || cpu == prev_cpu)) { - if (env->need_idle) { - if (idle_cpu(cpu)) { - stats->min_cost = cpu_cost; - stats->best_idle_cpu = cpu; - } - } else { - stats->min_cost = cpu_cost; - stats->min_load = env->cpu_load; - stats->best_cpu = cpu; - } + stats->min_cost = cpu_cost; + stats->min_load = env->cpu_load; + stats->best_cpu = cpu; } } @@ -3452,6 +3469,8 @@ static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats) stats->min_cost = stats->best_sibling_cpu_cost = INT_MAX; stats->min_load = stats->best_sibling_cpu_load = ULLONG_MAX; stats->highest_spare_capacity = 0; + stats->least_loaded_cpu = -1; + stats->best_cpu_cstate = INT_MAX; } /* @@ -3604,8 +3623,11 @@ retry: } while ((cluster = next_best_cluster(cluster, &env, &stats))); - if (stats.best_idle_cpu >= 0) { - target = stats.best_idle_cpu; + if (env.need_idle) { + if (stats.best_idle_cpu >= 0) + target = stats.best_idle_cpu; + else if (stats.least_loaded_cpu >= 0) + target = stats.least_loaded_cpu; } else if (stats.best_cpu >= 0) { if (stats.best_cpu != task_cpu(p) && stats.min_cost == stats.best_sibling_cpu_cost)