DEBUG: sched: add tracepoint for RD overutilized
Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com>
This commit is contained in:
parent
c5a00c2dad
commit
93db70f21c
2 changed files with 33 additions and 4 deletions
|
@ -888,6 +888,26 @@ TRACE_EVENT(sched_tune_filter,
|
|||
__entry->payoff, __entry->region)
|
||||
);
|
||||
|
||||
/*
|
||||
* Tracepoint for system overutilized flag
|
||||
*/
|
||||
TRACE_EVENT(sched_overutilized,
|
||||
|
||||
TP_PROTO(bool overutilized),
|
||||
|
||||
TP_ARGS(overutilized),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( bool, overutilized )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->overutilized = overutilized;
|
||||
),
|
||||
|
||||
TP_printk("overutilized=%d",
|
||||
__entry->overutilized ? 1 : 0)
|
||||
);
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
struct rq;
|
||||
|
||||
|
|
|
@ -4242,8 +4242,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|||
if (!se) {
|
||||
walt_inc_cumulative_runnable_avg(rq, p);
|
||||
if (!task_new && !rq->rd->overutilized &&
|
||||
cpu_overutilized(rq->cpu))
|
||||
cpu_overutilized(rq->cpu)) {
|
||||
rq->rd->overutilized = true;
|
||||
trace_sched_overutilized(true);
|
||||
}
|
||||
|
||||
/*
|
||||
* We want to potentially trigger a freq switch
|
||||
|
@ -7503,12 +7505,17 @@ next_group:
|
|||
env->dst_rq->rd->overload = overload;
|
||||
|
||||
/* Update over-utilization (tipping point, U >= 0) indicator */
|
||||
if (env->dst_rq->rd->overutilized != overutilized)
|
||||
if (env->dst_rq->rd->overutilized != overutilized) {
|
||||
env->dst_rq->rd->overutilized = overutilized;
|
||||
} else {
|
||||
if (!env->dst_rq->rd->overutilized && overutilized)
|
||||
env->dst_rq->rd->overutilized = true;
|
||||
trace_sched_overutilized(overutilized);
|
||||
}
|
||||
} else {
|
||||
if (!env->dst_rq->rd->overutilized && overutilized) {
|
||||
env->dst_rq->rd->overutilized = true;
|
||||
trace_sched_overutilized(true);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -8948,8 +8955,10 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
|
|||
task_tick_numa(rq, curr);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (!rq->rd->overutilized && cpu_overutilized(task_cpu(curr)))
|
||||
if (!rq->rd->overutilized && cpu_overutilized(task_cpu(curr))) {
|
||||
rq->rd->overutilized = true;
|
||||
trace_sched_overutilized(true);
|
||||
}
|
||||
|
||||
rq->misfit_task = !task_fits_max(curr, rq->cpu);
|
||||
#endif
|
||||
|
|
Loading…
Add table
Reference in a new issue