BACKPORT: sched/fair: Move select_task_rq_fair slow-path into its own function

In preparation for changes that would otherwise require adding a new
level of indentation to the while(sd) loop, create a new function
find_idlest_cpu() which contains this loop, and rename the existing
find_idlest_cpu() to find_idlest_group_cpu().

Code inside the while(sd) loop is unchanged. @new_cpu is added as a
variable in the new function, with the same initial value as the
@new_cpu in select_task_rq_fair().

Change-Id: I9842308cab00dc9cd6c513fc38c609089a1aaaaf
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Brendan Jackman <brendan.jackman@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Josef Bacik <jbacik@fb.com>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20171005114516.18617-2-brendan.jackman@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
(reworked for eas/cas schedstats added in Android)
(cherry-picked commit 18bd1b4bd53a from tip:sched/core)
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
This commit is contained in:
Brendan Jackman 2017-08-31 12:57:58 +01:00 committed by Chris Redpath
parent 795a6867cf
commit 0f743ce745

View file

@ -6072,10 +6072,10 @@ skip_spare:
}
/*
* find_idlest_cpu - find the idlest cpu among the cpus in group.
* find_idlest_group_cpu - find the idlest cpu among the cpus in group.
*/
static int
find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
{
unsigned long load, min_load = ULONG_MAX;
unsigned int min_exit_latency = UINT_MAX;
@ -6122,6 +6122,65 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
}
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
}
static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
int cpu, int prev_cpu, int sd_flag)
{
int new_cpu = prev_cpu;
int wu = sd_flag & SD_BALANCE_WAKE;
int cas_cpu = -1;
if (wu) {
schedstat_inc(p, se.statistics.nr_wakeups_cas_attempts);
schedstat_inc(this_rq(), eas_stats.cas_attempts);
}
while (sd) {
struct sched_group *group;
struct sched_domain *tmp;
int weight;
if (wu)
schedstat_inc(sd, eas_stats.cas_attempts);
if (!(sd->flags & sd_flag)) {
sd = sd->child;
continue;
}
group = find_idlest_group(sd, p, cpu, sd_flag);
if (!group) {
sd = sd->child;
continue;
}
new_cpu = find_idlest_group_cpu(group, p, cpu);
if (new_cpu == -1 || new_cpu == cpu) {
/* Now try balancing at a lower domain level of cpu */
sd = sd->child;
continue;
}
/* Now try balancing at a lower domain level of new_cpu */
cpu = cas_cpu = new_cpu;
weight = sd->span_weight;
sd = NULL;
for_each_domain(cpu, tmp) {
if (weight <= tmp->span_weight)
break;
if (tmp->flags & sd_flag)
sd = tmp;
}
/* while loop will break here if sd == NULL */
}
if (wu && (cas_cpu >= 0)) {
schedstat_inc(p, se.statistics.nr_wakeups_cas_count);
schedstat_inc(this_rq(), eas_stats.cas_count);
}
return new_cpu;
}
/*
@ -6698,56 +6757,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
} else {
int wu = sd_flag & SD_BALANCE_WAKE;
int cas_cpu = -1;
if (wu) {
schedstat_inc(p, se.statistics.nr_wakeups_cas_attempts);
schedstat_inc(this_rq(), eas_stats.cas_attempts);
}
while (sd) {
struct sched_group *group;
int weight;
if (wu)
schedstat_inc(sd, eas_stats.cas_attempts);
if (!(sd->flags & sd_flag)) {
sd = sd->child;
continue;
}
group = find_idlest_group(sd, p, cpu, sd_flag);
if (!group) {
sd = sd->child;
continue;
}
new_cpu = find_idlest_cpu(group, p, cpu);
if (new_cpu == -1 || new_cpu == cpu) {
/* Now try balancing at a lower domain level of cpu */
sd = sd->child;
continue;
}
/* Now try balancing at a lower domain level of new_cpu */
cpu = cas_cpu = new_cpu;
weight = sd->span_weight;
sd = NULL;
for_each_domain(cpu, tmp) {
if (weight <= tmp->span_weight)
break;
if (tmp->flags & sd_flag)
sd = tmp;
}
/* while loop will break here if sd == NULL */
}
if (wu && (cas_cpu >= 0)) {
schedstat_inc(p, se.statistics.nr_wakeups_cas_count);
schedstat_inc(this_rq(), eas_stats.cas_count);
}
new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
}
rcu_read_unlock();