diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9ffcaa5cf41c..fbc5e647c59e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5246,7 +5246,9 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) if (dequeue) dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); qcfs_rq->h_nr_running -= task_delta; +#ifdef CONFIG_SCHED_HMP dec_throttled_cfs_rq_hmp_stats(&qcfs_rq->hmp_stats, cfs_rq); +#endif if (qcfs_rq->load.weight) dequeue = 0; @@ -5254,7 +5256,9 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) if (!se) { sub_nr_running(rq, task_delta); +#ifdef CONFIG_SCHED_HMP dec_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, cfs_rq); +#endif } cfs_rq->throttled = 1; @@ -5319,7 +5323,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) if (enqueue) enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); cfs_rq->h_nr_running += task_delta; +#ifdef CONFIG_SCHED_HMP inc_throttled_cfs_rq_hmp_stats(&cfs_rq->hmp_stats, tcfs_rq); +#endif if (cfs_rq_throttled(cfs_rq)) break; @@ -5327,7 +5333,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) if (!se) { add_nr_running(rq, task_delta); +#ifdef CONFIG_SCHED_HMP inc_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, tcfs_rq); +#endif } /* determine whether we need to wake up potentially idle cpu */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 85531d648cfb..1e4b2e986b15 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1137,6 +1137,8 @@ static inline int sched_cpu_high_irqload(int cpu) #else /* CONFIG_SCHED_HMP */ +#define sched_use_pelt 0 + struct hmp_sched_stats; static inline u64 scale_load_to_cpu(u64 load, int cpu) @@ -1289,8 +1291,6 @@ static inline void set_hmp_defaults(void) { } static inline void clear_reserved(int cpu) { } -#define power_cost(...) 0 - #define trace_sched_cpu_load(...) #define trace_sched_cpu_load_lb(...) #define trace_sched_cpu_load_cgroup(...)