sched: Introduce CONFIG_SCHED_FREQ_INPUT

Introduce a compile time flag to enable scheduler guidance of
frequency selection. This flag is also used to turn on or off
window-based load stats feature.

Having a compile time flag will let some platforms avoid any
overhead that may be present with this scheduler feature.

Change-Id: Id8dec9839f90dcac82f58ef7e2bd0ccd0b6bd16c
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
[rameezmustafa@codeaurora.org]: Port to msm-3.18]
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
[joonwoop@codeaurora.org: fixed minor conflict around
 sysctl_timer_migration.]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
This commit is contained in:
Srivatsa Vaddagiri 2014-03-29 16:56:45 -07:00 committed by David Keitel
parent a25a5c1c30
commit 77fe8dd14d
6 changed files with 56 additions and 0 deletions

View file

@ -25,6 +25,16 @@ config CPU_FREQ_BOOST_SW
bool
depends on THERMAL
config SCHED_FREQ_INPUT
bool "Scheduler inputs to cpufreq governor"
depends on SMP && FAIR_GROUP_SCHED
help
This option enables support for scheduler based CPU utilization
calculations which may then be used by any cpufreq governor. The
scheduler keeps track of "recent" cpu demand of tasks, which can
help determine need for changing frequency well in advance of what
a governor would have been able to detect on its own.
config CPU_FREQ_STAT
tristate "CPU frequency translation statistics"
default y

View file

@ -1424,7 +1424,9 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
#ifdef CONFIG_SCHED_FREQ_INPUT
struct ravg ravg;
#endif
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif

View file

@ -1746,6 +1746,8 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
wq_worker_waking_up(p, cpu_of(rq));
}
#ifdef CONFIG_SCHED_FREQ_INPUT
/* Window size (in ns) */
__read_mostly unsigned int sched_ravg_window = 10000000;
@ -1891,6 +1893,8 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum)
p->ravg.mark_start = wallclock;
}
#endif /* CONFIG_SCHED_FREQ_INPUT */
/*
* Mark the task runnable and perform wakeup-preemption.
*/
@ -7569,6 +7573,7 @@ void __init sched_init_smp(void)
#endif /* CONFIG_SMP */
#ifdef CONFIG_SCHED_FREQ_INPUT
/*
* Maximum possible frequency across all cpus. Task demand and cpu
* capacity (cpu_power) metrics are scaled in reference to it.
@ -7655,6 +7660,8 @@ static int register_sched_callback(void)
*/
core_initcall(register_sched_callback);
#endif /* CONFIG_SCHED_FREQ_INPUT */
int in_sched_functions(unsigned long addr)
{
return in_lock_functions(addr) ||
@ -7792,11 +7799,13 @@ void __init sched_init(void)
rq->online = 0;
rq->idle_stamp = 0;
rq->avg_idle = 2*sysctl_sched_migration_cost;
#ifdef CONFIG_SCHED_FREQ_INPUT
rq->cur_freq = 1;
rq->max_freq = 1;
rq->min_freq = 1;
rq->max_possible_freq = 1;
rq->cumulative_runnable_avg = 0;
#endif
rq->max_idle_balance_cost = sysctl_sched_migration_cost;
rq->cstate = 0;
rq->wakeup_latency = 0;

View file

@ -2894,6 +2894,8 @@ static inline int idle_balance(struct rq *rq)
#endif /* CONFIG_SMP */
#ifdef CONFIG_SCHED_FREQ_INPUT
static inline unsigned int task_load(struct task_struct *p)
{
return p->ravg.demand;
@ -2927,6 +2929,8 @@ void init_new_task_load(struct task_struct *p)
p->ravg.sum_history[i] = 0;
}
#endif /* CONFIG_SCHED_FREQ_INPUT */
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
#ifdef CONFIG_SCHEDSTATS

View file

@ -643,12 +643,14 @@ struct rq {
u64 max_idle_balance_cost;
#endif
#ifdef CONFIG_SCHED_FREQ_INPUT
/*
* max_freq = user or thermal defined maximum
* max_possible_freq = maximum supported by hardware
*/
unsigned int cur_freq, max_freq, min_freq, max_possible_freq;
u64 cumulative_runnable_avg;
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
u64 prev_irq_time;
@ -915,6 +917,8 @@ static inline void sched_ttwu_pending(void) { }
#include "stats.h"
#include "auto_group.h"
#ifdef CONFIG_SCHED_FREQ_INPUT
extern unsigned int sched_ravg_window;
extern unsigned int max_possible_freq;
extern unsigned int min_max_freq;
@ -934,6 +938,24 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
BUG_ON((s64)rq->cumulative_runnable_avg < 0);
}
#else /* CONFIG_SCHED_FREQ_INPUT */
static inline int pct_task_load(struct task_struct *p) { return 0; }
static inline void
inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
{
}
static inline void
dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
{
}
static inline void init_new_task_load(struct task_struct *p) { }
#endif /* CONFIG_SCHED_FREQ_INPUT */
#ifdef CONFIG_CGROUP_SCHED
/*
@ -1267,8 +1289,15 @@ struct sched_class {
#endif
};
#ifdef CONFIG_SCHED_FREQ_INPUT
extern void
update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum);
#else /* CONFIG_SCHED_FREQ_INPUT */
static inline void
update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum)
{
}
#endif /* CONFIG_SCHED_FREQ_INPUT */
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
{

View file

@ -292,6 +292,7 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_SCHED_FREQ_INPUT
{
.procname = "sched_window_stats_policy",
.data = &sysctl_sched_window_stats_policy,
@ -306,6 +307,7 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_SCHED_DEBUG
{
.procname = "sched_min_granularity_ns",