sched: window-stats: Account idle time as busy time

Provide a knob to consider idle time as busy time, when cpu becomes
idle as a result of io_schedule() call. This will let governor
parameter 'io_is_busy' to be appropriately honored.

Change-Id: Id9fb4fe448e8e4909696aa8a3be5a165ad7529d3
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
This commit is contained in:
Srivatsa Vaddagiri 2014-07-30 00:22:26 -07:00 committed by David Keitel
parent 900b44b621
commit c20a41478d
2 changed files with 40 additions and 8 deletions

View file

@ -2114,6 +2114,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
extern int sched_set_window(u64 window_start, unsigned int window_size);
extern unsigned long sched_get_busy(int cpu);
extern void sched_set_io_is_busy(int val);
/*
* Per process flags

View file

@ -1195,6 +1195,7 @@ unsigned int min_possible_efficiency = 1024;
__read_mostly int sysctl_sched_freq_inc_notify_slack_pct;
__read_mostly int sysctl_sched_freq_dec_notify_slack_pct = 25;
static __read_mostly unsigned int sched_account_wait_time = 1;
static __read_mostly unsigned int sched_io_is_busy;
/*
* Maximum possible frequency across all cpus. Task demand and cpu
@ -1360,6 +1361,14 @@ static int __init set_sched_ravg_window(char *str)
early_param("sched_ravg_window", set_sched_ravg_window);
static inline int cpu_is_waiting_on_io(struct rq *rq)
{
if (!sched_io_is_busy)
return 0;
return atomic_read(&rq->nr_iowait);
}
static inline void
move_window_start(struct rq *rq, u64 wallclock, int update_sum,
struct task_struct *p)
@ -1375,7 +1384,7 @@ move_window_start(struct rq *rq, u64 wallclock, int update_sum,
nr_windows = div64_u64(delta, sched_ravg_window);
rq->window_start += (u64)nr_windows * (u64)sched_ravg_window;
if (is_idle_task(rq->curr)) {
if (is_idle_task(rq->curr) && !cpu_is_waiting_on_io(rq)) {
if (nr_windows == 1)
rq->prev_runnable_sum = rq->curr_runnable_sum;
else
@ -1431,6 +1440,7 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
int update_sum, new_window;
u64 mark_start = p->ravg.mark_start;
u64 window_start;
s64 delta = 0;
if (sched_use_pelt || !rq->window_start)
return;
@ -1444,15 +1454,30 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
move_window_start(rq, wallclock, update_sum, p);
window_start = rq->window_start;
/*
* Don't bother accounting for idle task, also we would not want
* to attribute its time to the aggregate RQ busy time
*/
if (is_idle_task(p))
return;
if (is_idle_task(p)) {
if (!(event == PUT_PREV_TASK && cpu_is_waiting_on_io(rq)))
goto done;
if (window_start > mark_start) {
delta = window_start - mark_start;
if (delta > window_size) {
rq->curr_runnable_sum = 0;
delta = window_size;
}
delta = scale_exec_time(delta, rq);
rq->curr_runnable_sum += delta;
rq->prev_runnable_sum = rq->curr_runnable_sum;
rq->curr_runnable_sum = 0;
mark_start = window_start;
}
delta = wallclock - mark_start;
delta = scale_exec_time(delta, rq);
rq->curr_runnable_sum += delta;
goto done;
}
do {
s64 delta = 0;
int nr_full_windows = 0;
u64 now = wallclock;
u32 sum = 0;
@ -1532,6 +1557,7 @@ static void update_task_ravg(struct task_struct *p, struct rq *rq,
}
}
done:
trace_sched_update_task_ravg(p, rq, event, wallclock);
p->ravg.mark_start = wallclock;
@ -1689,6 +1715,11 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size,
}
}
void sched_set_io_is_busy(int val)
{
sched_io_is_busy = val;
}
int sched_set_window(u64 window_start, unsigned int window_size)
{
u64 ws, now;