sched/fair: add tunable to set initial task load
The choice of initial task load upon fork has a large influence on CPU and OPP selection when scheduler-driven DVFS is in use. Make this tuneable by adding a new sysctl "sched_initial_task_util". If the sched governor is not used, the default remains at SCHED_LOAD_SCALE Otherwise, the value from the sysctl is used. This defaults to 0. Signed-off-by: "Todd Kjos <tkjos@google.com>"
This commit is contained in:
parent
4a5e890ec6
commit
c50cc2299c
3 changed files with 12 additions and 1 deletions
|
@ -41,6 +41,7 @@ extern unsigned int sysctl_sched_wakeup_granularity;
|
||||||
extern unsigned int sysctl_sched_child_runs_first;
|
extern unsigned int sysctl_sched_child_runs_first;
|
||||||
extern unsigned int sysctl_sched_is_big_little;
|
extern unsigned int sysctl_sched_is_big_little;
|
||||||
extern unsigned int sysctl_sched_sync_hint_enable;
|
extern unsigned int sysctl_sched_sync_hint_enable;
|
||||||
|
extern unsigned int sysctl_sched_initial_task_util;
|
||||||
extern unsigned int sysctl_sched_cstate_aware;
|
extern unsigned int sysctl_sched_cstate_aware;
|
||||||
|
|
||||||
enum sched_tunable_scaling {
|
enum sched_tunable_scaling {
|
||||||
|
|
|
@ -53,6 +53,7 @@ unsigned int normalized_sysctl_sched_latency = 6000000ULL;
|
||||||
|
|
||||||
unsigned int sysctl_sched_is_big_little = 0;
|
unsigned int sysctl_sched_is_big_little = 0;
|
||||||
unsigned int sysctl_sched_sync_hint_enable = 1;
|
unsigned int sysctl_sched_sync_hint_enable = 1;
|
||||||
|
unsigned int sysctl_sched_initial_task_util = 0;
|
||||||
unsigned int sysctl_sched_cstate_aware = 1;
|
unsigned int sysctl_sched_cstate_aware = 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -687,7 +688,9 @@ void init_entity_runnable_average(struct sched_entity *se)
|
||||||
sa->period_contrib = 1023;
|
sa->period_contrib = 1023;
|
||||||
sa->load_avg = scale_load_down(se->load.weight);
|
sa->load_avg = scale_load_down(se->load.weight);
|
||||||
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
|
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
|
||||||
sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
|
sa->util_avg = sched_freq() ?
|
||||||
|
sysctl_sched_initial_task_util :
|
||||||
|
scale_load_down(SCHED_LOAD_SCALE);
|
||||||
sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
|
sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
|
||||||
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
|
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
|
||||||
}
|
}
|
||||||
|
|
|
@ -317,6 +317,13 @@ static struct ctl_table kern_table[] = {
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = proc_dointvec,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.procname = "sched_initial_task_util",
|
||||||
|
.data = &sysctl_sched_initial_task_util,
|
||||||
|
.maxlen = sizeof(unsigned int),
|
||||||
|
.mode = 0644,
|
||||||
|
.proc_handler = proc_dointvec,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
.procname = "sched_cstate_aware",
|
.procname = "sched_cstate_aware",
|
||||||
.data = &sysctl_sched_cstate_aware,
|
.data = &sysctl_sched_cstate_aware,
|
||||||
|
|
Loading…
Add table
Reference in a new issue