diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 607e2bf0c75f..ab519d8d3422 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2730,7 +2730,20 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event, else rq->cc.cycles = cur_cycles - p->cpu_cycles; rq->cc.cycles = rq->cc.cycles * NSEC_PER_MSEC; - rq->cc.time = wallclock - p->ravg.mark_start; + + if (event == IRQ_UPDATE && is_idle_task(p)) + /* + * Time between mark_start of idle task and IRQ handler + * entry time is CPU cycle counter stall period. + * Upon IRQ handler entry sched_account_irqstart() + * replenishes idle task's cpu cycle counter so + * rq->cc.cycles now represents increased cycles during + * IRQ handler rather than time between idle entry and + * IRQ exit. Thus use irqtime as time delta. + */ + rq->cc.time = irqtime; + else + rq->cc.time = wallclock - p->ravg.mark_start; BUG_ON((s64)rq->cc.time < 0); } @@ -3003,6 +3016,17 @@ void sched_account_irqtime(int cpu, struct task_struct *curr, raw_spin_unlock_irqrestore(&rq->lock, flags); } +void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock) +{ + struct rq *rq = cpu_rq(cpu); + + if (!rq->window_start || sched_disable_window_stats) + return; + + if (is_idle_task(curr) && use_cycle_counter) + update_task_cpu_cycles(curr, cpu); +} + static void reset_task_stats(struct task_struct *p) { u32 sum = 0; diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 930d3ce4f34e..647f184f8aec 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -80,6 +80,8 @@ void irqtime_account_irq(struct task_struct *curr) if (account) sched_account_irqtime(cpu, curr, delta, wallclock); + else if (curr != this_cpu_ksoftirqd()) + sched_account_irqstart(cpu, curr, wallclock); local_irq_restore(flags); } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7c5fd1ca78bc..ff2161cc9fc0 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1077,6 +1077,9 @@ extern void reset_cpu_hmp_stats(int cpu, int reset_cra); extern unsigned int max_task_load(void); extern void sched_account_irqtime(int cpu, struct task_struct *curr, u64 delta, u64 wallclock); +extern void sched_account_irqstart(int cpu, struct task_struct *curr, + u64 wallclock); + unsigned int cpu_temp(int cpu); int sched_set_group_id(struct task_struct *p, unsigned int group_id); extern unsigned int nr_eligible_big_tasks(int cpu); @@ -1323,6 +1326,11 @@ static inline void sched_account_irqtime(int cpu, struct task_struct *curr, { } +static inline void sched_account_irqstart(int cpu, struct task_struct *curr, + u64 wallclock) +{ +} + static inline int sched_cpu_high_irqload(int cpu) { return 0; } static inline void set_preferred_cluster(struct related_thread_group *grp) { }