sched: fix potential deflated frequency estimation during IRQ handling
Time between mark_start of idle task and IRQ handler entry time is CPU cycle counter stall period. Therefore it's inappropriate to include such duration as part of sample period when we do frequency estimation. Fix such suboptimality by replenishing idle task's CPU cycle counter upon IRQ entry and using irqtime as time delta. Change-Id: I274d5047a50565cfaaa2fb821ece21c8cf4c991d Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
This commit is contained in:
parent
6e8c9ac98d
commit
96818d6f1d
3 changed files with 35 additions and 1 deletions
|
@ -2730,7 +2730,20 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
|
|||
else
|
||||
rq->cc.cycles = cur_cycles - p->cpu_cycles;
|
||||
rq->cc.cycles = rq->cc.cycles * NSEC_PER_MSEC;
|
||||
rq->cc.time = wallclock - p->ravg.mark_start;
|
||||
|
||||
if (event == IRQ_UPDATE && is_idle_task(p))
|
||||
/*
|
||||
* Time between mark_start of idle task and IRQ handler
|
||||
* entry time is CPU cycle counter stall period.
|
||||
* Upon IRQ handler entry sched_account_irqstart()
|
||||
* replenishes idle task's cpu cycle counter so
|
||||
* rq->cc.cycles now represents increased cycles during
|
||||
* IRQ handler rather than time between idle entry and
|
||||
* IRQ exit. Thus use irqtime as time delta.
|
||||
*/
|
||||
rq->cc.time = irqtime;
|
||||
else
|
||||
rq->cc.time = wallclock - p->ravg.mark_start;
|
||||
BUG_ON((s64)rq->cc.time < 0);
|
||||
}
|
||||
|
||||
|
@ -3003,6 +3016,17 @@ void sched_account_irqtime(int cpu, struct task_struct *curr,
|
|||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
|
||||
void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
if (!rq->window_start || sched_disable_window_stats)
|
||||
return;
|
||||
|
||||
if (is_idle_task(curr) && use_cycle_counter)
|
||||
update_task_cpu_cycles(curr, cpu);
|
||||
}
|
||||
|
||||
static void reset_task_stats(struct task_struct *p)
|
||||
{
|
||||
u32 sum = 0;
|
||||
|
|
|
@ -80,6 +80,8 @@ void irqtime_account_irq(struct task_struct *curr)
|
|||
|
||||
if (account)
|
||||
sched_account_irqtime(cpu, curr, delta, wallclock);
|
||||
else if (curr != this_cpu_ksoftirqd())
|
||||
sched_account_irqstart(cpu, curr, wallclock);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
|
|
@ -1077,6 +1077,9 @@ extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
|
|||
extern unsigned int max_task_load(void);
|
||||
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
|
||||
u64 delta, u64 wallclock);
|
||||
extern void sched_account_irqstart(int cpu, struct task_struct *curr,
|
||||
u64 wallclock);
|
||||
|
||||
unsigned int cpu_temp(int cpu);
|
||||
int sched_set_group_id(struct task_struct *p, unsigned int group_id);
|
||||
extern unsigned int nr_eligible_big_tasks(int cpu);
|
||||
|
@ -1323,6 +1326,11 @@ static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
|
|||
{
|
||||
}
|
||||
|
||||
static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
|
||||
u64 wallclock)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int sched_cpu_high_irqload(int cpu) { return 0; }
|
||||
|
||||
static inline void set_preferred_cluster(struct related_thread_group *grp) { }
|
||||
|
|
Loading…
Add table
Reference in a new issue