sched: EAS/WALT: take into account of waking task's load

WALT's function cpu_util(cpu) reports CPU's load without taking into
account of waking task's load.  Thus currently cpu_overutilized()
underestimates load on the previous CPU of waking task.

Take into account of task's load to determine whether previous CPU is
overutilzed to bail out early without running energy_diff() which is
expensive.

Change-Id: I30f146984a880ad2cc1b8a4ce35bd239a8c9a607
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
(minor rebase conflicts)
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
This commit is contained in:
Joonwoo Park 2017-01-25 17:45:56 -08:00
parent f94958ffa7
commit 94e5c96507

View file

@ -4656,6 +4656,7 @@ static inline void hrtick_update(struct rq *rq)
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static bool __cpu_overutilized(int cpu, int delta);
static bool cpu_overutilized(int cpu); static bool cpu_overutilized(int cpu);
unsigned long boosted_cpu_util(int cpu); unsigned long boosted_cpu_util(int cpu);
#else #else
@ -5856,9 +5857,14 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
return __task_fits(p, cpu, 0); return __task_fits(p, cpu, 0);
} }
static bool __cpu_overutilized(int cpu, int delta)
{
return (capacity_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * capacity_margin);
}
static bool cpu_overutilized(int cpu) static bool cpu_overutilized(int cpu)
{ {
return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin); return __cpu_overutilized(cpu, 0);
} }
#ifdef CONFIG_SCHED_TUNE #ifdef CONFIG_SCHED_TUNE
@ -6577,6 +6583,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
} }
if (target_cpu != prev_cpu) { if (target_cpu != prev_cpu) {
int delta = 0;
struct energy_env eenv = { struct energy_env eenv = {
.util_delta = task_util(p), .util_delta = task_util(p),
.src_cpu = prev_cpu, .src_cpu = prev_cpu,
@ -6584,8 +6591,13 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
.task = p, .task = p,
}; };
#ifdef CONFIG_SCHED_WALT
if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
delta = task_util(p);
#endif
/* Not enough spare capacity on previous cpu */ /* Not enough spare capacity on previous cpu */
if (cpu_overutilized(prev_cpu)) { if (__cpu_overutilized(prev_cpu, delta)) {
schedstat_inc(p, se.statistics.nr_wakeups_secb_insuff_cap); schedstat_inc(p, se.statistics.nr_wakeups_secb_insuff_cap);
schedstat_inc(this_rq(), eas_stats.secb_insuff_cap); schedstat_inc(this_rq(), eas_stats.secb_insuff_cap);
goto unlock; goto unlock;