sched: Provide tunable to switch between PELT and window-based stats

Provide a runtime tunable to switch between using PELT-based load
stats and window-based load stats. This will be needed for runtime
analysis of the two load tracking schemes.

Change-Id: I018f6a90b49844bf2c4e5666912621d87acc7217
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
This commit is contained in:
Srivatsa Vaddagiri 2014-04-02 19:18:38 -07:00 committed by David Keitel
parent bf863e333f
commit fb9ab2a720
3 changed files with 28 additions and 7 deletions

View file

@ -1752,10 +1752,10 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
__read_mostly unsigned int sched_ravg_window = 10000000;
/* Min window size (in ns) = 10ms */
__read_mostly unsigned int min_sched_ravg_window = 10000000;
#define MIN_SCHED_RAVG_WINDOW 10000000
/* Max window size (in ns) = 1s */
__read_mostly unsigned int max_sched_ravg_window = 1000000000;
#define MAX_SCHED_RAVG_WINDOW 1000000000
#define WINDOW_STATS_USE_RECENT 0
#define WINDOW_STATS_USE_MAX 1
@ -1764,6 +1764,9 @@ __read_mostly unsigned int max_sched_ravg_window = 1000000000;
__read_mostly unsigned int sysctl_sched_window_stats_policy =
WINDOW_STATS_USE_AVG;
/* 1 -> use PELT based load stats, 0 -> use window-based load stats */
unsigned int __read_mostly sched_use_pelt = 1;
unsigned int max_possible_efficiency = 1024;
unsigned int min_possible_efficiency = 1024;
@ -1832,6 +1835,9 @@ static int __init set_sched_ravg_window(char *str)
{
get_option(&str, &sched_ravg_window);
sched_use_pelt = (sched_ravg_window < MIN_SCHED_RAVG_WINDOW ||
sched_ravg_window > MAX_SCHED_RAVG_WINDOW);
return 0;
}
@ -1843,7 +1849,7 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum)
int new_window;
u64 wallclock = sched_clock();
if (is_idle_task(p) || (sched_ravg_window < min_sched_ravg_window))
if (is_idle_task(p) || sched_use_pelt)
return;
do {

View file

@ -2545,12 +2545,18 @@ unsigned int __read_mostly sysctl_sched_init_task_load_pct = 100;
static inline unsigned int task_load(struct task_struct *p)
{
return p->se.avg.runnable_avg_sum_scaled;
if (sched_use_pelt)
return p->se.avg.runnable_avg_sum_scaled;
return p->ravg.demand;
}
static inline unsigned int max_task_load(void)
{
return LOAD_AVG_MAX;
if (sched_use_pelt)
return LOAD_AVG_MAX;
return sched_ravg_window;
}
#endif /* CONFIG_SCHED_FREQ_INPUT || CONFIG_SCHED_HMP */

View file

@ -930,6 +930,7 @@ extern void init_new_task_load(struct task_struct *p);
#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
extern unsigned int sched_ravg_window;
extern unsigned int sched_use_pelt;
extern unsigned int max_possible_freq;
extern unsigned int min_max_freq;
extern unsigned int pct_task_load(struct task_struct *p);
@ -949,13 +950,21 @@ extern unsigned int sched_init_task_load_windows;
static inline void
inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
{
rq->cumulative_runnable_avg += p->ravg.demand;
if (sched_use_pelt)
rq->cumulative_runnable_avg +=
p->se.avg.runnable_avg_sum_scaled;
else
rq->cumulative_runnable_avg += p->ravg.demand;
}
static inline void
dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
{
rq->cumulative_runnable_avg -= p->ravg.demand;
if (sched_use_pelt)
rq->cumulative_runnable_avg -=
p->se.avg.runnable_avg_sum_scaled;
else
rq->cumulative_runnable_avg -= p->ravg.demand;
BUG_ON((s64)rq->cumulative_runnable_avg < 0);
}