sched: Add over-utilization/tipping point indicator

Energy-aware scheduling is only meant to be active while the system is
_not_ over-utilized. That is, there are spare cycles available to shift
tasks around based on their actual utilization to get a more
energy-efficient task distribution without depriving any tasks. When
above the tipping point task placement is done the traditional way based
on load_avg, spreading the tasks across as many cpus as possible based
on priority scaled load to preserve smp_nice. Below the tipping point we
want to use util_avg instead. We need to define a criteria for when we
make the switch.

The util_avg for each cpu converges towards 100% (1024) regardless of
how many task additional task we may put on it. If we define
over-utilized as:

sum_{cpus}(rq.cfs.avg.util_avg) + margin > sum_{cpus}(rq.capacity)

some individual cpus may be over-utilized running multiple tasks even
when the above condition is false. That should be okay as long as we try
to spread the tasks out to avoid per-cpu over-utilization as much as
possible and if all tasks have the _same_ priority. If the latter isn't
true, we have to consider priority to preserve smp_nice.

For example, we could have n_cpus nice=-10 util_avg=55% tasks and
n_cpus/2 nice=0 util_avg=60% tasks. Balancing based on util_avg we are
likely to end up with nice=-10 tasks sharing cpus and nice=0 tasks
getting their own as we 1.5*n_cpus tasks in total and 55%+55% is less
over-utilized than 55%+60% for those cpus that have to be shared. The
system utilization is only 85% of the system capacity, but we are
breaking smp_nice.

To be sure not to break smp_nice, we have defined over-utilization
conservatively as when any cpu in the system is fully utilized at it's
highest frequency instead:

cpu_rq(any).cfs.avg.util_avg + margin > cpu_rq(any).capacity

IOW, as soon as one cpu is (nearly) 100% utilized, we switch to load_avg
to factor in priority to preserve smp_nice.

With this definition, we can skip periodic load-balance as no cpu has an
always-running task when the system is not over-utilized. All tasks will
be periodic and we can balance them at wake-up. This conservative
condition does however mean that some scenarios that could benefit from
energy-aware decisions even if one cpu is fully utilized would not get
those benefits.

For system where some cpus might have reduced capacity on some cpus
(RT-pressure and/or big.LITTLE), we want periodic load-balance checks as
soon a just a single cpu is fully utilized as it might one of those with
reduced capacity and in that case we want to migrate it.

cc: Ingo Molnar <mingo@redhat.com>
cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
This commit is contained in:
Morten Rasmussen 2015-05-09 16:49:57 +01:00 committed by Leo Yan
parent 2c6a8a48a7
commit 1b5ec5d8ab
2 changed files with 28 additions and 6 deletions

View file

@ -4144,6 +4144,8 @@ static inline void hrtick_update(struct rq *rq)
} }
#endif #endif
static bool cpu_overutilized(int cpu);
/* /*
* The enqueue_task method is called before nr_running is * The enqueue_task method is called before nr_running is
* increased. Here we update the fair scheduling stats and * increased. Here we update the fair scheduling stats and
@ -4154,6 +4156,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{ {
struct cfs_rq *cfs_rq; struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se; struct sched_entity *se = &p->se;
int task_new = !(flags & ENQUEUE_WAKEUP);
for_each_sched_entity(se) { for_each_sched_entity(se) {
if (se->on_rq) if (se->on_rq)
@ -4185,9 +4188,12 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
update_cfs_shares(cfs_rq); update_cfs_shares(cfs_rq);
} }
if (!se) if (!se) {
add_nr_running(rq, 1); add_nr_running(rq, 1);
if (!task_new && !rq->rd->overutilized &&
cpu_overutilized(rq->cpu))
rq->rd->overutilized = true;
}
hrtick_update(rq); hrtick_update(rq);
} }
@ -6651,11 +6657,12 @@ group_type group_classify(struct sched_group *group,
* @local_group: Does group contain this_cpu. * @local_group: Does group contain this_cpu.
* @sgs: variable to hold the statistics for this group. * @sgs: variable to hold the statistics for this group.
* @overload: Indicate more than one runnable task for any CPU. * @overload: Indicate more than one runnable task for any CPU.
* @overutilized: Indicate overutilization for any CPU.
*/ */
static inline void update_sg_lb_stats(struct lb_env *env, static inline void update_sg_lb_stats(struct lb_env *env,
struct sched_group *group, int load_idx, struct sched_group *group, int load_idx,
int local_group, struct sg_lb_stats *sgs, int local_group, struct sg_lb_stats *sgs,
bool *overload) bool *overload, bool *overutilized)
{ {
unsigned long load; unsigned long load;
int i; int i;
@ -6685,6 +6692,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->sum_weighted_load += weighted_cpuload(i); sgs->sum_weighted_load += weighted_cpuload(i);
if (idle_cpu(i)) if (idle_cpu(i))
sgs->idle_cpus++; sgs->idle_cpus++;
if (cpu_overutilized(i))
*overutilized = true;
} }
/* Adjust by relative CPU capacity of the group */ /* Adjust by relative CPU capacity of the group */
@ -6790,7 +6800,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
struct sched_group *sg = env->sd->groups; struct sched_group *sg = env->sd->groups;
struct sg_lb_stats tmp_sgs; struct sg_lb_stats tmp_sgs;
int load_idx, prefer_sibling = 0; int load_idx, prefer_sibling = 0;
bool overload = false; bool overload = false, overutilized = false;
if (child && child->flags & SD_PREFER_SIBLING) if (child && child->flags & SD_PREFER_SIBLING)
prefer_sibling = 1; prefer_sibling = 1;
@ -6812,7 +6822,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
} }
update_sg_lb_stats(env, sg, load_idx, local_group, sgs, update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
&overload); &overload, &overutilized);
if (local_group) if (local_group)
goto next_group; goto next_group;
@ -6856,8 +6866,14 @@ next_group:
/* update overload indicator if we are at root domain */ /* update overload indicator if we are at root domain */
if (env->dst_rq->rd->overload != overload) if (env->dst_rq->rd->overload != overload)
env->dst_rq->rd->overload = overload; env->dst_rq->rd->overload = overload;
}
/* Update over-utilization (tipping point, U >= 0) indicator */
if (env->dst_rq->rd->overutilized != overutilized)
env->dst_rq->rd->overutilized = overutilized;
} else {
if (!env->dst_rq->rd->overutilized && overutilized)
env->dst_rq->rd->overutilized = true;
}
} }
/** /**
@ -8250,6 +8266,9 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
if (static_branch_unlikely(&sched_numa_balancing)) if (static_branch_unlikely(&sched_numa_balancing))
task_tick_numa(rq, curr); task_tick_numa(rq, curr);
if (!rq->rd->overutilized && cpu_overutilized(task_cpu(curr)))
rq->rd->overutilized = true;
} }
/* /*

View file

@ -528,6 +528,9 @@ struct root_domain {
/* Indicate more than one runnable task for any CPU */ /* Indicate more than one runnable task for any CPU */
bool overload; bool overload;
/* Indicate one or more cpus over-utilized (tipping point) */
bool overutilized;
/* /*
* The bit corresponding to a CPU gets set here if such CPU has more * The bit corresponding to a CPU gets set here if such CPU has more
* than one runnable -deadline task (as it is below for RT tasks). * than one runnable -deadline task (as it is below for RT tasks).