sched: fix a bug in handling top task table rollover
When frequency aggregation is enabled, there is a possibility of rolling over the top task table multiple times in a single window. For example - utra() is called with PUT_PREV_TASK for task 'A' which does not belong to any related thread grp. Lets say window rollover happens. rq counters and top task table rollover is done. - utra() is called with PICK_NEXT_TASK/TASK_WAKE for task 'B' which belongs to a related thread grp. Lets say this happens before the grp's cpu_time->window_start is in sync with rq->window_start. In this case, grp's cpu_time counters are rolled over and the top task table is also rolled over again. Roll over the top task table in the context of current running task to fix this. Change-Id: Iea3075e0ea460a9279a01ba42725890c46edd713 Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
This commit is contained in:
parent
432662eb4d
commit
add97fe0da
1 changed files with 25 additions and 15 deletions
|
@ -2227,6 +2227,27 @@ static inline void clear_top_tasks_table(u8 *table)
|
||||||
memset(table, 0, NUM_LOAD_INDICES * sizeof(u8));
|
memset(table, 0, NUM_LOAD_INDICES * sizeof(u8));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void rollover_top_tasks(struct rq *rq, bool full_window)
|
||||||
|
{
|
||||||
|
u8 curr_table = rq->curr_table;
|
||||||
|
u8 prev_table = 1 - curr_table;
|
||||||
|
int curr_top = rq->curr_top;
|
||||||
|
|
||||||
|
clear_top_tasks_table(rq->top_tasks[prev_table]);
|
||||||
|
clear_top_tasks_bitmap(rq->top_tasks_bitmap[prev_table]);
|
||||||
|
|
||||||
|
if (full_window) {
|
||||||
|
curr_top = 0;
|
||||||
|
clear_top_tasks_table(rq->top_tasks[curr_table]);
|
||||||
|
clear_top_tasks_bitmap(
|
||||||
|
rq->top_tasks_bitmap[curr_table]);
|
||||||
|
}
|
||||||
|
|
||||||
|
rq->curr_table = prev_table;
|
||||||
|
rq->prev_top = curr_top;
|
||||||
|
rq->curr_top = 0;
|
||||||
|
}
|
||||||
|
|
||||||
static u32 empty_windows[NR_CPUS];
|
static u32 empty_windows[NR_CPUS];
|
||||||
|
|
||||||
static void rollover_task_window(struct task_struct *p, bool full_window)
|
static void rollover_task_window(struct task_struct *p, bool full_window)
|
||||||
|
@ -2344,29 +2365,18 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
|
||||||
if (flip_counters) {
|
if (flip_counters) {
|
||||||
u64 curr_sum = *curr_runnable_sum;
|
u64 curr_sum = *curr_runnable_sum;
|
||||||
u64 nt_curr_sum = *nt_curr_runnable_sum;
|
u64 nt_curr_sum = *nt_curr_runnable_sum;
|
||||||
u8 curr_table = rq->curr_table;
|
|
||||||
u8 prev_table = 1 - curr_table;
|
|
||||||
int curr_top = rq->curr_top;
|
|
||||||
|
|
||||||
clear_top_tasks_table(rq->top_tasks[prev_table]);
|
if (prev_sum_reset)
|
||||||
clear_top_tasks_bitmap(rq->top_tasks_bitmap[prev_table]);
|
|
||||||
|
|
||||||
if (prev_sum_reset) {
|
|
||||||
curr_sum = nt_curr_sum = 0;
|
curr_sum = nt_curr_sum = 0;
|
||||||
curr_top = 0;
|
|
||||||
clear_top_tasks_table(rq->top_tasks[curr_table]);
|
|
||||||
clear_top_tasks_bitmap(
|
|
||||||
rq->top_tasks_bitmap[curr_table]);
|
|
||||||
}
|
|
||||||
|
|
||||||
*prev_runnable_sum = curr_sum;
|
*prev_runnable_sum = curr_sum;
|
||||||
*nt_prev_runnable_sum = nt_curr_sum;
|
*nt_prev_runnable_sum = nt_curr_sum;
|
||||||
|
|
||||||
*curr_runnable_sum = 0;
|
*curr_runnable_sum = 0;
|
||||||
*nt_curr_runnable_sum = 0;
|
*nt_curr_runnable_sum = 0;
|
||||||
rq->curr_table = prev_table;
|
|
||||||
rq->prev_top = curr_top;
|
if (p_is_curr_task)
|
||||||
rq->curr_top = 0;
|
rollover_top_tasks(rq, full_window);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!account_busy_for_cpu_time(rq, p, irqtime, event))
|
if (!account_busy_for_cpu_time(rq, p, irqtime, event))
|
||||||
|
|
Loading…
Add table
Reference in a new issue