sched: EAS: take cstate into account when selecting idle core

Introduce a new sysctl for this option, 'sched_cstate_aware'.
When this is enabled, select_idle_sibling in CFS is modified to
choose the idle CPU in the sibling group which has the lowest
idle state index - idle state indexes are assumed to increase
as sleep depth and hence wakeup latency increase. In this way,
we attempt to minimise wakeup latency when an idle CPU is
required.

Signed-off-by: Srinath Sridharan <srinathsr@google.com>

Includes:
sched: EAS: fix select_idle_sibling

when sysctl_sched_cstate_aware is enabled, best_idle cpu will not be chosen
in the original flow because it will goto done directly

Bug: 30107557
Change-Id: Ie09c2e3960cafbb976f8d472747faefab3b4d6ac
Signed-off-by: martin_liu <martin_liu@htc.com>
This commit is contained in:
Srinath Sridharan 2016-07-14 09:57:29 +01:00 committed by John Stultz
parent d753e92e19
commit 2e9abbc942
3 changed files with 50 additions and 13 deletions

View file

@ -39,6 +39,7 @@ extern unsigned int sysctl_sched_latency;
extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_cstate_aware;
enum sched_tunable_scaling {
SCHED_TUNABLESCALING_NONE,

View file

@ -51,6 +51,7 @@
unsigned int sysctl_sched_latency = 6000000ULL;
unsigned int normalized_sysctl_sched_latency = 6000000ULL;
unsigned int sysctl_sched_cstate_aware = 1;
/*
* The initial- and re-scaling of tunables is configurable
* (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
@ -5468,15 +5469,20 @@ static int select_idle_sibling(struct task_struct *p, int target)
struct sched_domain *sd;
struct sched_group *sg;
int i = task_cpu(p);
int best_idle = -1;
int best_idle_cstate = -1;
int best_idle_capacity = INT_MAX;
if (idle_cpu(target))
return target;
if (!sysctl_sched_cstate_aware) {
if (idle_cpu(target))
return target;
/*
* If the prevous cpu is cache affine and idle, don't be stupid.
*/
if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
return i;
/*
* If the prevous cpu is cache affine and idle, don't be stupid.
*/
if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
return i;
}
/*
* Otherwise, iterate the domains and find an elegible idle cpu.
@ -5489,18 +5495,41 @@ static int select_idle_sibling(struct task_struct *p, int target)
tsk_cpus_allowed(p)))
goto next;
for_each_cpu(i, sched_group_cpus(sg)) {
if (i == target || !idle_cpu(i))
goto next;
}
if (sysctl_sched_cstate_aware) {
for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg)) {
struct rq *rq = cpu_rq(i);
int idle_idx = idle_get_state_idx(rq);
unsigned long new_usage = boosted_task_util(p);
unsigned long capacity_orig = capacity_orig_of(i);
if (new_usage > capacity_orig || !idle_cpu(i))
goto next;
target = cpumask_first_and(sched_group_cpus(sg),
if (i == target && new_usage <= capacity_curr_of(target))
return target;
if (best_idle < 0 || (idle_idx < best_idle_cstate && capacity_orig <= best_idle_capacity)) {
best_idle = i;
best_idle_cstate = idle_idx;
best_idle_capacity = capacity_orig;
}
}
} else {
for_each_cpu(i, sched_group_cpus(sg)) {
if (i == target || !idle_cpu(i))
goto next;
}
target = cpumask_first_and(sched_group_cpus(sg),
tsk_cpus_allowed(p));
goto done;
goto done;
}
next:
sg = sg->next;
} while (sg != sd->groups);
}
if (best_idle > 0)
target = best_idle;
done:
return target;
}

View file

@ -303,6 +303,13 @@ static struct ctl_table kern_table[] = {
.extra1 = &min_sched_granularity_ns,
.extra2 = &max_sched_granularity_ns,
},
{
.procname = "sched_cstate_aware",
.data = &sysctl_sched_cstate_aware,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sched_wakeup_granularity_ns",
.data = &sysctl_sched_wakeup_granularity,