From f1a15235d6b840e1af7cfc1cf94a60f628da8984 Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Wed, 11 Jan 2017 15:11:23 +0530 Subject: [PATCH 1/2] sched: fix compiler errors with !SCHED_HMP HMP scheduler boost feature related functions are referred in SMP load balancer. Add the nop functions for the same to fix the compiler errors with !SCHED_HMP. Change-Id: I1cbcf67f728c2cbc7c0f47e8eaf1f4165649dce8 Signed-off-by: Pavankumar Kondeti --- kernel/sched/sched.h | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index a3abdf19ff4c..af6d02e2f012 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1054,6 +1054,12 @@ static inline void sched_ttwu_pending(void) { } #include "stats.h" #include "auto_group.h" +enum sched_boost_policy { + SCHED_BOOST_NONE, + SCHED_BOOST_ON_BIG, + SCHED_BOOST_ON_ALL, +}; + #ifdef CONFIG_SCHED_HMP #define WINDOW_STATS_RECENT 0 @@ -1138,12 +1144,6 @@ extern unsigned int update_freq_aggregate_threshold(unsigned int threshold); extern void update_avg_burst(struct task_struct *p); extern void update_avg(u64 *avg, u64 sample); -enum sched_boost_policy { - SCHED_BOOST_NONE, - SCHED_BOOST_ON_BIG, - SCHED_BOOST_ON_ALL, -}; - #define NO_BOOST 0 #define FULL_THROTTLE_BOOST 1 #define CONSERVATIVE_BOOST 2 @@ -1495,6 +1495,16 @@ struct hmp_sched_stats; struct related_thread_group; struct sched_cluster; +static inline enum sched_boost_policy sched_boost_policy(void) +{ + return SCHED_BOOST_NONE; +} + +static inline bool task_sched_boost(struct task_struct *p) +{ + return true; +} + static inline int got_boost_kick(void) { return 0; From f6471c2c9d94cf61aa0be049be24f593cfa4f5d6 Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Wed, 11 Jan 2017 15:45:54 +0530 Subject: [PATCH 2/2] sched: Fix compilation errors when CFS_BANDWIDTH && !SCHED_HMP There are few compiler errors and warnings when CFS_BANDWIDTH config is enabled but not SCHED_HMP. Change-Id: Idaf4a7364564b6faf56df2eb3a1a74eeb242d57e Signed-off-by: Pavankumar Kondeti --- kernel/sched/fair.c | 23 +++++------------------ 1 file changed, 5 insertions(+), 18 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 87538f7d495a..dd7a9b280f6c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3640,15 +3640,8 @@ static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq, static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq, struct task_struct *p, int change_cra) { } -static inline void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats, - struct cfs_rq *cfs_rq) -{ -} - -static inline void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats, - struct cfs_rq *cfs_rq) -{ -} +#define dec_throttled_cfs_rq_hmp_stats(...) +#define inc_throttled_cfs_rq_hmp_stats(...) #endif /* CONFIG_SCHED_HMP */ @@ -4676,6 +4669,7 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) return cfs_bandwidth_used() && cfs_rq->throttled; } +#ifdef CONFIG_SCHED_HMP /* * Check if task is part of a hierarchy where some cfs_rq does not have any * runtime left. @@ -4702,6 +4696,7 @@ static int task_will_be_throttled(struct task_struct *p) return 0; } +#endif /* check whether cfs_rq, or any parent, is throttled */ static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) @@ -4782,9 +4777,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) if (dequeue) dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); qcfs_rq->h_nr_running -= task_delta; -#ifdef CONFIG_SCHED_HMP dec_throttled_cfs_rq_hmp_stats(&qcfs_rq->hmp_stats, cfs_rq); -#endif if (qcfs_rq->load.weight) dequeue = 0; @@ -4792,9 +4785,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) if (!se) { sub_nr_running(rq, task_delta); -#ifdef CONFIG_SCHED_HMP dec_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, cfs_rq); -#endif } cfs_rq->throttled = 1; @@ -4831,7 +4822,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) struct sched_entity *se; int enqueue = 1; long task_delta; - struct cfs_rq *tcfs_rq = cfs_rq; + struct cfs_rq *tcfs_rq __maybe_unused = cfs_rq; se = cfs_rq->tg->se[cpu_of(rq)]; @@ -4859,9 +4850,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) if (enqueue) enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); cfs_rq->h_nr_running += task_delta; -#ifdef CONFIG_SCHED_HMP inc_throttled_cfs_rq_hmp_stats(&cfs_rq->hmp_stats, tcfs_rq); -#endif if (cfs_rq_throttled(cfs_rq)) break; @@ -4869,9 +4858,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) if (!se) { add_nr_running(rq, task_delta); -#ifdef CONFIG_SCHED_HMP inc_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, tcfs_rq); -#endif } /* determine whether we need to wake up potentially idle cpu */