sched: EAS/WALT: use cr_avg instead of prev_runnable_sum

WALT accounts two major statistics; CPU load and cumulative tasks
demand.

The CPU load which is account of accumulated each CPU's absolute
execution time is for CPU frequency guidance.  Whereas cumulative
tasks demand which is each CPU's instantaneous load to reflect
CPU's load at given time is for task placement decision.

Use cumulative tasks demand for cpu_util() for task placement and
introduce cpu_util_freq() for frequency guidance.

Change-Id: Id928f01dbc8cb2a617cdadc584c1f658022565c5
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
This commit is contained in:
Joonwoo Park 2016-12-08 16:12:12 -08:00
parent 48f67ea85d
commit ee4cebd75e
3 changed files with 18 additions and 4 deletions

View file

@ -2992,7 +2992,7 @@ static void sched_freq_tick_pelt(int cpu)
#ifdef CONFIG_SCHED_WALT
static void sched_freq_tick_walt(int cpu)
{
unsigned long cpu_utilization = cpu_util(cpu);
unsigned long cpu_utilization = cpu_util_freq(cpu);
unsigned long capacity_curr = capacity_curr_of(cpu);
if (walt_disabled || !sysctl_sched_use_walt_cpu_util)

View file

@ -4659,7 +4659,7 @@ static inline void hrtick_update(struct rq *rq)
static bool cpu_overutilized(int cpu);
unsigned long boosted_cpu_util(int cpu);
#else
#define boosted_cpu_util(cpu) cpu_util(cpu)
#define boosted_cpu_util(cpu) cpu_util_freq(cpu)
#endif
#ifdef CONFIG_SMP
@ -5937,7 +5937,7 @@ schedtune_task_margin(struct task_struct *task)
unsigned long
boosted_cpu_util(int cpu)
{
unsigned long util = cpu_util(cpu);
unsigned long util = cpu_util_freq(cpu);
long margin = schedtune_cpu_margin(util, cpu);
trace_sched_boost_cpu(cpu, util, margin);

View file

@ -1592,7 +1592,7 @@ static inline unsigned long __cpu_util(int cpu, int delta)
#ifdef CONFIG_SCHED_WALT
if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
util = cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT;
util = cpu_rq(cpu)->cumulative_runnable_avg << SCHED_LOAD_SHIFT;
do_div(util, walt_ravg_window);
}
#endif
@ -1608,6 +1608,20 @@ static inline unsigned long cpu_util(int cpu)
return __cpu_util(cpu, 0);
}
static inline unsigned long cpu_util_freq(int cpu)
{
unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
unsigned long capacity = capacity_orig_of(cpu);
#ifdef CONFIG_SCHED_WALT
if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
util = cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT;
do_div(util, walt_ravg_window);
}
#endif
return (util >= capacity) ? capacity : util;
}
#endif
#ifdef CONFIG_CPU_FREQ_GOV_SCHED