From add97fe0da92778d694c69c2354446196f6e0f9e Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Fri, 6 Jan 2017 09:57:42 +0530 Subject: [PATCH] sched: fix a bug in handling top task table rollover When frequency aggregation is enabled, there is a possibility of rolling over the top task table multiple times in a single window. For example - utra() is called with PUT_PREV_TASK for task 'A' which does not belong to any related thread grp. Lets say window rollover happens. rq counters and top task table rollover is done. - utra() is called with PICK_NEXT_TASK/TASK_WAKE for task 'B' which belongs to a related thread grp. Lets say this happens before the grp's cpu_time->window_start is in sync with rq->window_start. In this case, grp's cpu_time counters are rolled over and the top task table is also rolled over again. Roll over the top task table in the context of current running task to fix this. Change-Id: Iea3075e0ea460a9279a01ba42725890c46edd713 Signed-off-by: Pavankumar Kondeti --- kernel/sched/hmp.c | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index 180e2fcf785b..5631bbfbc254 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -2227,6 +2227,27 @@ static inline void clear_top_tasks_table(u8 *table) memset(table, 0, NUM_LOAD_INDICES * sizeof(u8)); } +static void rollover_top_tasks(struct rq *rq, bool full_window) +{ + u8 curr_table = rq->curr_table; + u8 prev_table = 1 - curr_table; + int curr_top = rq->curr_top; + + clear_top_tasks_table(rq->top_tasks[prev_table]); + clear_top_tasks_bitmap(rq->top_tasks_bitmap[prev_table]); + + if (full_window) { + curr_top = 0; + clear_top_tasks_table(rq->top_tasks[curr_table]); + clear_top_tasks_bitmap( + rq->top_tasks_bitmap[curr_table]); + } + + rq->curr_table = prev_table; + rq->prev_top = curr_top; + rq->curr_top = 0; +} + static u32 empty_windows[NR_CPUS]; static void rollover_task_window(struct task_struct *p, bool full_window) @@ -2344,29 +2365,18 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq, if (flip_counters) { u64 curr_sum = *curr_runnable_sum; u64 nt_curr_sum = *nt_curr_runnable_sum; - u8 curr_table = rq->curr_table; - u8 prev_table = 1 - curr_table; - int curr_top = rq->curr_top; - clear_top_tasks_table(rq->top_tasks[prev_table]); - clear_top_tasks_bitmap(rq->top_tasks_bitmap[prev_table]); - - if (prev_sum_reset) { + if (prev_sum_reset) curr_sum = nt_curr_sum = 0; - curr_top = 0; - clear_top_tasks_table(rq->top_tasks[curr_table]); - clear_top_tasks_bitmap( - rq->top_tasks_bitmap[curr_table]); - } *prev_runnable_sum = curr_sum; *nt_prev_runnable_sum = nt_curr_sum; *curr_runnable_sum = 0; *nt_curr_runnable_sum = 0; - rq->curr_table = prev_table; - rq->prev_top = curr_top; - rq->curr_top = 0; + + if (p_is_curr_task) + rollover_top_tasks(rq, full_window); } if (!account_busy_for_cpu_time(rq, p, irqtime, event))