CHROMIUM: sched: update the average of nr_running
Doing a Exponential moving average per nr_running++/-- does not guarantee a fixed sample rate which induces errors if there are lots of threads being enqueued/dequeued from the rq (Linpack mt). Instead of keeping track of the avg, the scheduler now keeps track of the integral of nr_running and allows the readers to perform filtering on top. Original-author: Sai Charan Gurrappadi <sgurrappadi@nvidia.com> Change-Id: Id946654f32fa8be0eaf9d8fa7c9a8039b5ef9fab Signed-off-by: Joseph Lo <josephl@nvidia.com> Signed-off-by: Andrew Bresticker <abrestic@chromium.org> Reviewed-on: https://chromium-review.googlesource.com/174694 Reviewed-on: https://chromium-review.googlesource.com/272853 [jstultz: fwdported to 4.4] Signed-off-by: John Stultz <john.stultz@linaro.org>
This commit is contained in:
parent
a727f6b626
commit
75f2b9bac8
3 changed files with 80 additions and 2 deletions
|
@ -173,6 +173,9 @@ extern bool single_task_running(void);
|
|||
extern unsigned long nr_iowait(void);
|
||||
extern unsigned long nr_iowait_cpu(int cpu);
|
||||
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
|
||||
#ifdef CONFIG_CPU_QUIET
|
||||
extern u64 nr_running_integral(unsigned int cpu);
|
||||
#endif
|
||||
|
||||
extern void calc_global_load(unsigned long ticks);
|
||||
|
||||
|
|
|
@ -2768,6 +2768,36 @@ unsigned long nr_iowait_cpu(int cpu)
|
|||
return atomic_read(&this->nr_iowait);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_QUIET
|
||||
u64 nr_running_integral(unsigned int cpu)
|
||||
{
|
||||
unsigned int seqcnt;
|
||||
u64 integral;
|
||||
struct rq *q;
|
||||
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return 0;
|
||||
|
||||
q = cpu_rq(cpu);
|
||||
|
||||
/*
|
||||
* Update average to avoid reading stalled value if there were
|
||||
* no run-queue changes for a long time. On the other hand if
|
||||
* the changes are happening right now, just read current value
|
||||
* directly.
|
||||
*/
|
||||
|
||||
seqcnt = read_seqcount_begin(&q->ave_seqcnt);
|
||||
integral = do_nr_running_integral(q);
|
||||
if (read_seqcount_retry(&q->ave_seqcnt, seqcnt)) {
|
||||
read_seqcount_begin(&q->ave_seqcnt);
|
||||
integral = q->nr_running_integral;
|
||||
}
|
||||
|
||||
return integral;
|
||||
}
|
||||
#endif
|
||||
|
||||
void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
|
||||
{
|
||||
struct rq *rq = this_rq();
|
||||
|
|
|
@ -594,6 +594,14 @@ struct rq {
|
|||
#ifdef CONFIG_NO_HZ_FULL
|
||||
unsigned long last_sched_tick;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_QUIET
|
||||
/* time-based average load */
|
||||
u64 nr_last_stamp;
|
||||
u64 nr_running_integral;
|
||||
seqcount_t ave_seqcnt;
|
||||
#endif
|
||||
|
||||
/* capture load from *all* tasks on this cpu: */
|
||||
struct load_weight load;
|
||||
unsigned long nr_load_updates;
|
||||
|
@ -1353,7 +1361,7 @@ extern void init_entity_runnable_average(struct sched_entity *se);
|
|||
|
||||
extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc);
|
||||
|
||||
static inline void add_nr_running(struct rq *rq, unsigned count)
|
||||
static inline void __add_nr_running(struct rq *rq, unsigned count)
|
||||
{
|
||||
unsigned prev_nr = rq->nr_running;
|
||||
|
||||
|
@ -1381,11 +1389,48 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void sub_nr_running(struct rq *rq, unsigned count)
|
||||
static inline void __sub_nr_running(struct rq *rq, unsigned count)
|
||||
{
|
||||
rq->nr_running -= count;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_QUIET
|
||||
#define NR_AVE_SCALE(x) ((x) << FSHIFT)
|
||||
static inline u64 do_nr_running_integral(struct rq *rq)
|
||||
{
|
||||
s64 nr, deltax;
|
||||
u64 nr_running_integral = rq->nr_running_integral;
|
||||
|
||||
deltax = rq->clock_task - rq->nr_last_stamp;
|
||||
nr = NR_AVE_SCALE(rq->nr_running);
|
||||
|
||||
nr_running_integral += nr * deltax;
|
||||
|
||||
return nr_running_integral;
|
||||
}
|
||||
|
||||
static inline void add_nr_running(struct rq *rq, unsigned count)
|
||||
{
|
||||
write_seqcount_begin(&rq->ave_seqcnt);
|
||||
rq->nr_running_integral = do_nr_running_integral(rq);
|
||||
rq->nr_last_stamp = rq->clock_task;
|
||||
__add_nr_running(rq, count);
|
||||
write_seqcount_end(&rq->ave_seqcnt);
|
||||
}
|
||||
|
||||
static inline void sub_nr_running(struct rq *rq, unsigned count)
|
||||
{
|
||||
write_seqcount_begin(&rq->ave_seqcnt);
|
||||
rq->nr_running_integral = do_nr_running_integral(rq);
|
||||
rq->nr_last_stamp = rq->clock_task;
|
||||
__sub_nr_running(rq, count);
|
||||
write_seqcount_end(&rq->ave_seqcnt);
|
||||
}
|
||||
#else
|
||||
#define add_nr_running __add_nr_running
|
||||
#define sub_nr_running __sub_nr_running
|
||||
#endif
|
||||
|
||||
static inline void rq_last_tick_reset(struct rq *rq)
|
||||
{
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
|
|
Loading…
Add table
Reference in a new issue