Merge "sched: Fix compilation errors when CFS_BANDWIDTH && !SCHED_HMP"

This commit is contained in:
Linux Build Service Account 2017-01-16 04:29:05 -08:00 committed by Gerrit - the friendly Code Review server
commit a963750b83
2 changed files with 21 additions and 24 deletions

View file

@ -3634,15 +3634,8 @@ static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
struct task_struct *p, int change_cra) { }
static inline void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
struct cfs_rq *cfs_rq)
{
}
static inline void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
struct cfs_rq *cfs_rq)
{
}
#define dec_throttled_cfs_rq_hmp_stats(...)
#define inc_throttled_cfs_rq_hmp_stats(...)
#endif /* CONFIG_SCHED_HMP */
@ -4670,6 +4663,7 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
return cfs_bandwidth_used() && cfs_rq->throttled;
}
#ifdef CONFIG_SCHED_HMP
/*
* Check if task is part of a hierarchy where some cfs_rq does not have any
* runtime left.
@ -4696,6 +4690,7 @@ static int task_will_be_throttled(struct task_struct *p)
return 0;
}
#endif
/* check whether cfs_rq, or any parent, is throttled */
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
@ -4776,9 +4771,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
if (dequeue)
dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
qcfs_rq->h_nr_running -= task_delta;
#ifdef CONFIG_SCHED_HMP
dec_throttled_cfs_rq_hmp_stats(&qcfs_rq->hmp_stats, cfs_rq);
#endif
if (qcfs_rq->load.weight)
dequeue = 0;
@ -4786,9 +4779,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
if (!se) {
sub_nr_running(rq, task_delta);
#ifdef CONFIG_SCHED_HMP
dec_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, cfs_rq);
#endif
}
cfs_rq->throttled = 1;
@ -4825,7 +4816,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
struct sched_entity *se;
int enqueue = 1;
long task_delta;
struct cfs_rq *tcfs_rq = cfs_rq;
struct cfs_rq *tcfs_rq __maybe_unused = cfs_rq;
se = cfs_rq->tg->se[cpu_of(rq)];
@ -4853,9 +4844,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
if (enqueue)
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
cfs_rq->h_nr_running += task_delta;
#ifdef CONFIG_SCHED_HMP
inc_throttled_cfs_rq_hmp_stats(&cfs_rq->hmp_stats, tcfs_rq);
#endif
if (cfs_rq_throttled(cfs_rq))
break;
@ -4863,9 +4852,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
if (!se) {
add_nr_running(rq, task_delta);
#ifdef CONFIG_SCHED_HMP
inc_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, tcfs_rq);
#endif
}
/* determine whether we need to wake up potentially idle cpu */

View file

@ -1055,6 +1055,12 @@ static inline void sched_ttwu_pending(void) { }
#include "stats.h"
#include "auto_group.h"
enum sched_boost_policy {
SCHED_BOOST_NONE,
SCHED_BOOST_ON_BIG,
SCHED_BOOST_ON_ALL,
};
#ifdef CONFIG_SCHED_HMP
#define WINDOW_STATS_RECENT 0
@ -1139,12 +1145,6 @@ extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);
extern void update_avg_burst(struct task_struct *p);
extern void update_avg(u64 *avg, u64 sample);
enum sched_boost_policy {
SCHED_BOOST_NONE,
SCHED_BOOST_ON_BIG,
SCHED_BOOST_ON_ALL,
};
#define NO_BOOST 0
#define FULL_THROTTLE_BOOST 1
#define CONSERVATIVE_BOOST 2
@ -1496,6 +1496,16 @@ struct hmp_sched_stats;
struct related_thread_group;
struct sched_cluster;
static inline enum sched_boost_policy sched_boost_policy(void)
{
return SCHED_BOOST_NONE;
}
static inline bool task_sched_boost(struct task_struct *p)
{
return true;
}
static inline int got_boost_kick(void)
{
return 0;