sched/fair: Update signals of nohz cpus if we are going idle
Stale cpu utilization signals can cause havoc for energy-aware systems, and they are caused by no updates being performed for cpus which have no tick running. There is open debate about when is the correct time to update these cpus, and general recognition that something needs to be done. This is an attempt to do something useful. When we are looking for a task to pull for a newly-idle cpu, we have an opportunity to update the stats for any cpu which has no tick running without causing too much disturbance to the system or waking it up. Change-Id: I0280104ea9c53e56c26f1c56a62bacab5d3e951b Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Brendan Jackman <brendan.jackman@arm.com>
This commit is contained in:
parent
bf6cd4d156
commit
7b63e1ff52
1 changed files with 38 additions and 6 deletions
|
@ -8051,6 +8051,38 @@ group_type group_classify(struct sched_group *group,
|
|||
return group_other;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
/*
|
||||
* idle load balancing data
|
||||
* - used by the nohz balance, but we want it available here
|
||||
* so that we can see which CPUs have no tick.
|
||||
*/
|
||||
static struct {
|
||||
cpumask_var_t idle_cpus_mask;
|
||||
atomic_t nr_cpus;
|
||||
unsigned long next_balance; /* in jiffy units */
|
||||
} nohz ____cacheline_aligned;
|
||||
|
||||
static inline void update_cpu_stats_if_tickless(struct rq *rq)
|
||||
{
|
||||
/* only called from update_sg_lb_stats when irqs are disabled */
|
||||
if (cpumask_test_cpu(rq->cpu, nohz.idle_cpus_mask)) {
|
||||
/* rate limit updates to once-per-jiffie at most */
|
||||
if (READ_ONCE(jiffies) <= rq->last_load_update_tick)
|
||||
return;
|
||||
|
||||
raw_spin_lock(&rq->lock);
|
||||
update_rq_clock(rq);
|
||||
update_idle_cpu_load(rq);
|
||||
update_cfs_rq_load_avg(rq->clock_task, &rq->cfs, false);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void update_cpu_stats_if_tickless(struct rq *rq) { }
|
||||
#endif
|
||||
|
||||
/**
|
||||
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
|
||||
* @env: The load balancing environment.
|
||||
|
@ -8074,6 +8106,12 @@ static inline void update_sg_lb_stats(struct lb_env *env,
|
|||
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
|
||||
struct rq *rq = cpu_rq(i);
|
||||
|
||||
/* if we are entering idle and there are CPUs with
|
||||
* their tick stopped, do an update for them
|
||||
*/
|
||||
if (env->idle == CPU_NEWLY_IDLE)
|
||||
update_cpu_stats_if_tickless(rq);
|
||||
|
||||
/* Bias balancing toward cpus of our domain */
|
||||
if (local_group)
|
||||
load = target_load(i, load_idx);
|
||||
|
@ -9314,12 +9352,6 @@ static inline int on_null_domain(struct rq *rq)
|
|||
* needed, they will kick the idle load balancer, which then does idle
|
||||
* load balancing for all the idle CPUs.
|
||||
*/
|
||||
static struct {
|
||||
cpumask_var_t idle_cpus_mask;
|
||||
atomic_t nr_cpus;
|
||||
unsigned long next_balance; /* in jiffy units */
|
||||
} nohz ____cacheline_aligned;
|
||||
|
||||
static inline int find_new_ilb(void)
|
||||
{
|
||||
int ilb = cpumask_first(nohz.idle_cpus_mask);
|
||||
|
|
Loading…
Add table
Reference in a new issue