diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e32d4d7903b0..e0f212743c77 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3525,7 +3525,7 @@ static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats, BUG_ON(stats->nr_big_tasks < 0 || (s64)stats->cumulative_runnable_avg < 0); - verify_pred_demands_sum(stats); + BUG_ON((s64)stats->pred_demands_sum < 0); } #else /* CONFIG_CFS_BANDWIDTH */ diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index d220482f4dbc..50a6d8e0d4d4 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -1387,7 +1387,7 @@ void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) dec_cumulative_runnable_avg(&rq->hmp_stats, p); } -static void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra) +void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra) { stats->nr_big_tasks = 0; if (reset_cra) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 471dc9faab35..4289bf6cd642 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1407,6 +1407,7 @@ extern void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra); extern void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra); +extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra); extern int is_big_task(struct task_struct *p); extern int upmigrate_discouraged(struct task_struct *p); extern struct sched_cluster *rq_cluster(struct rq *rq);