sched: window-stats: Code cleanup

add_task_demand() and 'long_sleep' calculation in it are not strictly
required. rq_freq_margin() check for need to change frequency, which
removes need for long_sleep calculation. Once that is removed, need
for add_task_demand() vanishes.

Change-Id: I936540c06072eb8238fc18754aba88789ee3c9f5
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
[joonwoop@codeaurora.org: fixed minior conflict in core.c]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
This commit is contained in:
Srivatsa Vaddagiri 2014-08-08 18:14:54 +05:30 committed by David Keitel
parent 9425ce4309
commit 90a01bb623

View file

@ -1434,29 +1434,8 @@ static inline u64 scale_exec_time(u64 delta, struct rq *rq)
return delta; return delta;
} }
/*
* We depend on task's partial_demand to be always represented in
* rq->curr_runnable_sum and its demand to be represented in
* rq->prev_runnable_sum. When task wakes up (TASK_WAKE) or is picked to run
* (PICK_NEXT_TASK) or migrated (TASK_MIGRATE) with sched_account_wait_time ==
* 0, ensure this dependency is met.
*/
static inline int add_task_demand(int event, struct task_struct *p,
struct rq *rq, int *long_sleep)
{
if ((p->ravg.flags & CURR_WINDOW_CONTRIB) &&
(p->ravg.flags & PREV_WINDOW_CONTRIB))
return 0;
if (long_sleep && (rq->window_start > p->ravg.mark_start &&
rq->window_start - p->ravg.mark_start > sched_ravg_window))
*long_sleep = 1;
return 1;
}
static void update_task_ravg(struct task_struct *p, struct rq *rq, static void update_task_ravg(struct task_struct *p, struct rq *rq,
int event, u64 wallclock, int *long_sleep, u64 irqtime) int event, u64 wallclock, u64 irqtime)
{ {
u32 window_size = sched_ravg_window; u32 window_size = sched_ravg_window;
int update_sum, new_window; int update_sum, new_window;
@ -1571,7 +1550,14 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
mark_start = window_start; mark_start = window_start;
} while (new_window); } while (new_window);
if (add_task_demand(event, p, rq, long_sleep)) { /*
* We depend on task's partial_demand to be always represented in
* rq->curr_runnable_sum and its demand to be represented in
* rq->prev_runnable_sum. When task wakes up (TASK_WAKE) or is picked to
* run (PICK_NEXT_TASK) or migrated (TASK_MIGRATE) with
* sched_account_wait_time == 0, ensure this dependency is met.
*/
if (!(p->ravg.flags & CURR_WINDOW_CONTRIB)) { if (!(p->ravg.flags & CURR_WINDOW_CONTRIB)) {
rq->curr_runnable_sum += p->ravg.partial_demand; rq->curr_runnable_sum += p->ravg.partial_demand;
p->ravg.flags |= CURR_WINDOW_CONTRIB; p->ravg.flags |= CURR_WINDOW_CONTRIB;
@ -1581,7 +1567,6 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
rq->prev_runnable_sum += p->ravg.demand; rq->prev_runnable_sum += p->ravg.demand;
p->ravg.flags |= PREV_WINDOW_CONTRIB; p->ravg.flags |= PREV_WINDOW_CONTRIB;
} }
}
done: done:
trace_sched_update_task_ravg(p, rq, event, wallclock); trace_sched_update_task_ravg(p, rq, event, wallclock);
@ -1599,7 +1584,7 @@ void sched_account_irqtime(int cpu, struct task_struct *curr,
return; return;
raw_spin_lock_irqsave(&rq->lock, flags); raw_spin_lock_irqsave(&rq->lock, flags);
update_task_ravg(curr, rq, IRQ_UPDATE, wallclock, NULL, delta); update_task_ravg(curr, rq, IRQ_UPDATE, wallclock, delta);
raw_spin_unlock_irqrestore(&rq->lock, flags); raw_spin_unlock_irqrestore(&rq->lock, flags);
} }
@ -1644,7 +1629,7 @@ static inline void mark_task_starting(struct task_struct *p)
return; return;
} }
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL, 0); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
p->ravg.mark_start = wallclock; p->ravg.mark_start = wallclock;
rq->prev_runnable_sum += p->ravg.demand; rq->prev_runnable_sum += p->ravg.demand;
rq->curr_runnable_sum += p->ravg.partial_demand; rq->curr_runnable_sum += p->ravg.partial_demand;
@ -1697,7 +1682,7 @@ unsigned long sched_get_busy(int cpu)
* that the window stats are current by doing an update. * that the window stats are current by doing an update.
*/ */
raw_spin_lock_irqsave(&rq->lock, flags); raw_spin_lock_irqsave(&rq->lock, flags);
update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL, 0); update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0);
raw_spin_unlock_irqrestore(&rq->lock, flags); raw_spin_unlock_irqrestore(&rq->lock, flags);
return div64_u64(scale_load_to_cpu(rq->prev_runnable_sum, cpu), return div64_u64(scale_load_to_cpu(rq->prev_runnable_sum, cpu),
@ -1960,7 +1945,7 @@ static int cpufreq_notifier_trans(struct notifier_block *nb,
BUG_ON(!new_freq); BUG_ON(!new_freq);
raw_spin_lock_irqsave(&rq->lock, flags); raw_spin_lock_irqsave(&rq->lock, flags);
update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL, 0); update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0);
cpu_rq(cpu)->cur_freq = new_freq; cpu_rq(cpu)->cur_freq = new_freq;
raw_spin_unlock_irqrestore(&rq->lock, flags); raw_spin_unlock_irqrestore(&rq->lock, flags);
@ -2013,9 +1998,9 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
update_task_ravg(task_rq(p)->curr, task_rq(p), update_task_ravg(task_rq(p)->curr, task_rq(p),
TASK_UPDATE, TASK_UPDATE,
wallclock, NULL, 0); wallclock, 0);
update_task_ravg(dest_rq->curr, dest_rq, update_task_ravg(dest_rq->curr, dest_rq,
TASK_UPDATE, wallclock, NULL, 0); TASK_UPDATE, wallclock, 0);
/* /*
* In case of migration of task on runqueue, on_rq =1, * In case of migration of task on runqueue, on_rq =1,
@ -2032,7 +2017,7 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
} }
update_task_ravg(p, task_rq(p), TASK_MIGRATE, update_task_ravg(p, task_rq(p), TASK_MIGRATE,
wallclock, NULL, 0); wallclock, 0);
/* /*
* Remove task's load from rq as its now migrating to * Remove task's load from rq as its now migrating to
@ -2087,7 +2072,7 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
static inline void static inline void
update_task_ravg(struct task_struct *p, struct rq *rq, update_task_ravg(struct task_struct *p, struct rq *rq,
int event, u64 wallclock, int *long_sleep, u64 irqtime) int event, u64 wallclock, u64 irqtime)
{ {
} }
@ -2994,6 +2979,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
} }
__read_mostly unsigned int sysctl_sched_wakeup_load_threshold = 110; __read_mostly unsigned int sysctl_sched_wakeup_load_threshold = 110;
/** /**
* try_to_wake_up - wake up a thread * try_to_wake_up - wake up a thread
* @p: the thread to be awakened * @p: the thread to be awakened
@ -3016,7 +3002,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
int cpu, src_cpu, success = 0; int cpu, src_cpu, success = 0;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
struct rq *rq; struct rq *rq;
int long_sleep = 0;
u64 wallclock; u64 wallclock;
#endif #endif
@ -3081,8 +3066,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
raw_spin_lock(&rq->lock); raw_spin_lock(&rq->lock);
wallclock = sched_clock(); wallclock = sched_clock();
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL, 0); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
update_task_ravg(p, rq, TASK_WAKE, wallclock, &long_sleep, 0); update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
raw_spin_unlock(&rq->lock); raw_spin_unlock(&rq->lock);
p->sched_contributes_to_load = !!task_contributes_to_load(p); p->sched_contributes_to_load = !!task_contributes_to_load(p);
@ -3149,7 +3134,6 @@ out:
static void try_to_wake_up_local(struct task_struct *p) static void try_to_wake_up_local(struct task_struct *p)
{ {
struct rq *rq = task_rq(p); struct rq *rq = task_rq(p);
int long_sleep = 0;
if (rq != this_rq() || p == current) { if (rq != this_rq() || p == current) {
printk_deferred("%s: Failed to wakeup task %d (%s), rq = %p," printk_deferred("%s: Failed to wakeup task %d (%s), rq = %p,"
@ -3183,8 +3167,8 @@ static void try_to_wake_up_local(struct task_struct *p)
if (!task_on_rq_queued(p)) { if (!task_on_rq_queued(p)) {
u64 wallclock = sched_clock(); u64 wallclock = sched_clock();
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, NULL, 0); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
update_task_ravg(p, rq, TASK_WAKE, wallclock, &long_sleep, 0); update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
ttwu_activate(rq, p, ENQUEUE_WAKEUP); ttwu_activate(rq, p, ENQUEUE_WAKEUP);
} }
@ -4030,7 +4014,7 @@ void scheduler_tick(void)
curr->sched_class->task_tick(rq, curr, 0); curr->sched_class->task_tick(rq, curr, 0);
update_cpu_load_active(rq); update_cpu_load_active(rq);
calc_global_load_tick(rq); calc_global_load_tick(rq);
update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), NULL, 0); update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0);
raw_spin_unlock(&rq->lock); raw_spin_unlock(&rq->lock);
perf_event_task_tick(); perf_event_task_tick();
@ -4331,8 +4315,8 @@ static void __sched notrace __schedule(bool preempt)
next = pick_next_task(rq, prev); next = pick_next_task(rq, prev);
wallclock = sched_clock(); wallclock = sched_clock();
update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, NULL, 0); update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, NULL, 0); update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
clear_tsk_need_resched(prev); clear_tsk_need_resched(prev);
clear_preempt_need_resched(); clear_preempt_need_resched();
rq->clock_skip_update = 0; rq->clock_skip_update = 0;