sched: use wakeup latency as c-state determinant

C-state aware scheduler at present, uses a raw c-state index number as
its determinant and avoids task placement on deeper c-state CPUs at
cost of latency.  However there are CPUs offering comparable wake-up
latency at different c-state levels and the wake-up latency at each
c-state levels are already have being fed to scheduler.

Hence use the wakeup_latency as c-state determinant instead of raw
c-state index to avoid unnecessary task packing where it's doable.

CRs-fixed: 1074879
Change-Id: If927f84f6c8ba719716d99669e5d1f1b19aaacbe
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
This commit is contained in:
Joonwoo Park 2016-09-23 12:55:54 -07:00
parent 773f15cdab
commit 15d2c97d2a

View file

@ -2619,7 +2619,7 @@ struct cluster_cpu_stats {
int best_idle_cpu, least_loaded_cpu;
int best_capacity_cpu, best_cpu, best_sibling_cpu;
int min_cost, best_sibling_cpu_cost;
int best_cpu_cstate;
int best_cpu_wakeup_latency;
u64 min_load, best_load, best_sibling_cpu_load;
s64 highest_spare_capacity;
};
@ -2827,19 +2827,19 @@ next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env,
static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
struct cpu_select_env *env, int cpu_cost)
{
int cpu_cstate;
int wakeup_latency;
int prev_cpu = env->prev_cpu;
cpu_cstate = cpu_rq(cpu)->cstate;
wakeup_latency = cpu_rq(cpu)->wakeup_latency;
if (env->need_idle) {
stats->min_cost = cpu_cost;
if (idle_cpu(cpu)) {
if (cpu_cstate < stats->best_cpu_cstate ||
(cpu_cstate == stats->best_cpu_cstate &&
cpu == prev_cpu)) {
if (wakeup_latency < stats->best_cpu_wakeup_latency ||
(wakeup_latency == stats->best_cpu_wakeup_latency &&
cpu == prev_cpu)) {
stats->best_idle_cpu = cpu;
stats->best_cpu_cstate = cpu_cstate;
stats->best_cpu_wakeup_latency = wakeup_latency;
}
} else {
if (env->cpu_load < stats->min_load ||
@ -2855,7 +2855,7 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
if (cpu_cost < stats->min_cost) {
stats->min_cost = cpu_cost;
stats->best_cpu_cstate = cpu_cstate;
stats->best_cpu_wakeup_latency = wakeup_latency;
stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
env->sbc_best_flag = SBC_FLAG_CPU_COST;
@ -2864,11 +2864,11 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
/* CPU cost is the same. Start breaking the tie by C-state */
if (cpu_cstate > stats->best_cpu_cstate)
if (wakeup_latency > stats->best_cpu_wakeup_latency)
return;
if (cpu_cstate < stats->best_cpu_cstate) {
stats->best_cpu_cstate = cpu_cstate;
if (wakeup_latency < stats->best_cpu_wakeup_latency) {
stats->best_cpu_wakeup_latency = wakeup_latency;
stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
env->sbc_best_flag = SBC_FLAG_COST_CSTATE_TIE_BREAKER;
@ -2883,8 +2883,8 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
}
if (stats->best_cpu != prev_cpu &&
((cpu_cstate == 0 && env->cpu_load < stats->best_load) ||
(cpu_cstate > 0 && env->cpu_load > stats->best_load))) {
((wakeup_latency == 0 && env->cpu_load < stats->best_load) ||
(wakeup_latency > 0 && env->cpu_load > stats->best_load))) {
stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
env->sbc_best_flag = SBC_FLAG_CSTATE_LOAD;
@ -2979,7 +2979,7 @@ static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats)
stats->min_load = stats->best_sibling_cpu_load = ULLONG_MAX;
stats->highest_spare_capacity = 0;
stats->least_loaded_cpu = -1;
stats->best_cpu_cstate = INT_MAX;
stats->best_cpu_wakeup_latency = INT_MAX;
/* No need to initialize stats->best_load */
}