sched/fair: Add a backup_cpu to find_best_target

Sometimes we find a target cpu but then we do not use it as
the energy_diff indicates that we would increase energy usage
or not save anything. To offer an additional option for those
cases, we return a second option which is what we would have
selected if the target CPU had not been found. This gives us
another chance to try to save some energy.

Change-Id: I42c4f20aba10e4cf65b51ac4153e2e00e534c8c7
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com>
This commit is contained in:
Chris Redpath 2017-07-04 10:19:58 +01:00
parent 6cb8fcccb2
commit 2e13f308a9

View file

@ -6238,7 +6238,8 @@ static int start_cpu(bool boosted)
return boosted ? rd->max_cap_orig_cpu : rd->min_cap_orig_cpu;
}
static inline int find_best_target(struct task_struct *p, bool boosted, bool prefer_idle)
static inline int find_best_target(struct task_struct *p, int *backup_cpu,
bool boosted, bool prefer_idle)
{
unsigned long best_idle_min_cap_orig = ULONG_MAX;
unsigned long min_util = boosted_task_util(p);
@ -6255,6 +6256,8 @@ static inline int find_best_target(struct task_struct *p, bool boosted, bool pre
int target_cpu = -1;
int cpu, i;
*backup_cpu = -1;
schedstat_inc(p, se.statistics.nr_wakeups_fbt_attempts);
schedstat_inc(this_rq(), eas_stats.fbt_attempts);
@ -6489,6 +6492,10 @@ static inline int find_best_target(struct task_struct *p, bool boosted, bool pre
target_cpu = prefer_idle
? best_active_cpu
: best_idle_cpu;
else
*backup_cpu = prefer_idle
? best_active_cpu
: best_idle_cpu;
trace_sched_find_best_target(p, prefer_idle, min_util, cpu,
best_idle_cpu, best_active_cpu,
@ -6527,7 +6534,7 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
{
struct sched_domain *sd;
int target_cpu = prev_cpu, tmp_target;
int target_cpu = prev_cpu, tmp_target, tmp_backup;
bool boosted, prefer_idle;
schedstat_inc(p, se.statistics.nr_wakeups_secb_attempts);
@ -6556,7 +6563,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
/* Find a cpu with sufficient capacity */
tmp_target = find_best_target(p, boosted, prefer_idle);
tmp_target = find_best_target(p, &tmp_backup, boosted, prefer_idle);
if (!sd)
goto unlock;
@ -6585,10 +6592,15 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
}
if (energy_diff(&eenv) >= 0) {
schedstat_inc(p, se.statistics.nr_wakeups_secb_no_nrg_sav);
schedstat_inc(this_rq(), eas_stats.secb_no_nrg_sav);
target_cpu = prev_cpu;
goto unlock;
/* No energy saving for target_cpu, try backup */
target_cpu = tmp_backup;
eenv.dst_cpu = target_cpu;
if (tmp_backup < 0 || energy_diff(&eenv) >= 0) {
schedstat_inc(p, se.statistics.nr_wakeups_secb_no_nrg_sav);
schedstat_inc(this_rq(), eas_stats.secb_no_nrg_sav);
target_cpu = prev_cpu;
goto unlock;
}
}
schedstat_inc(p, se.statistics.nr_wakeups_secb_nrg_sav);