sched: Remove sched_enable_hmp flag

Clean up the code and make it more maintainable by removing dependency
on the sched_enable_hmp flag. We do not support HMP scheduler without
recompiling. Enabling the HMP scheduler is done through enabling the
CONFIG_SCHED_HMP config.

Change-Id: I246c1b1889f8dcbc8f0a0805077c0ce5d4f083b0
Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
This commit is contained in:
Olav Haugan 2017-02-01 17:59:51 -08:00
parent af883d4db0
commit 475820b5bc
6 changed files with 38 additions and 77 deletions

View file

@ -156,9 +156,6 @@ void sched_boost_parse_dt(void)
struct device_node *sn;
const char *boost_policy;
if (!sched_enable_hmp)
return;
sn = of_find_node_by_path("/sched-hmp");
if (!sn)
return;
@ -175,9 +172,6 @@ int sched_set_boost(int type)
{
int ret = 0;
if (!sched_enable_hmp)
return -EINVAL;
mutex_lock(&boost_mutex);
if (verify_boost_params(sysctl_sched_boost, type))
@ -197,9 +191,6 @@ int sched_boost_handler(struct ctl_table *table, int write,
unsigned int *data = (unsigned int *)table->data;
unsigned int old_val;
if (!sched_enable_hmp)
return -EINVAL;
mutex_lock(&boost_mutex);
old_val = *data;

View file

@ -3025,8 +3025,9 @@ void sched_exec(void)
unsigned long flags;
int dest_cpu, curr_cpu;
if (sched_enable_hmp)
return;
#ifdef CONFIG_SCHED_HMP
return;
#endif
raw_spin_lock_irqsave(&p->pi_lock, flags);
curr_cpu = task_cpu(p);
@ -8215,8 +8216,9 @@ void __init sched_init(void)
int i, j;
unsigned long alloc_size = 0, ptr;
if (sched_enable_hmp)
pr_info("HMP scheduling enabled.\n");
#ifdef CONFIG_SCHED_HMP
pr_info("HMP scheduling enabled.\n");
#endif
BUG_ON(num_possible_cpus() > BITS_PER_LONG);

View file

@ -3332,9 +3332,9 @@ void _inc_hmp_sched_stats_fair(struct rq *rq,
* inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called
* from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
* efficiency by short-circuiting for_each_sched_entity() loop when
* !sched_enable_hmp || sched_disable_window_stats
* sched_disable_window_stats
*/
if (!sched_enable_hmp || sched_disable_window_stats)
if (sched_disable_window_stats)
return;
for_each_sched_entity(se) {
@ -3357,7 +3357,7 @@ _dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p, int change_cra)
struct sched_entity *se = &p->se;
/* See comment on efficiency in _inc_hmp_sched_stats_fair */
if (!sched_enable_hmp || sched_disable_window_stats)
if (sched_disable_window_stats)
return;
for_each_sched_entity(se) {
@ -3482,8 +3482,7 @@ static inline int migration_needed(struct task_struct *p, int cpu)
int nice;
struct related_thread_group *grp;
if (!sched_enable_hmp || p->state != TASK_RUNNING ||
p->nr_cpus_allowed == 1)
if (p->state != TASK_RUNNING || p->nr_cpus_allowed == 1)
return 0;
/* No need to migrate task that is about to be throttled */
@ -7024,8 +7023,9 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
int want_affine = 0;
int sync = wake_flags & WF_SYNC;
if (sched_enable_hmp)
return select_best_cpu(p, prev_cpu, 0, sync);
#ifdef CONFIG_SCHED_HMP
return select_best_cpu(p, prev_cpu, 0, sync);
#endif
if (sd_flag & SD_BALANCE_WAKE)
want_affine = (!wake_wide(p) && task_fits_max(p, cpu) &&
@ -9313,8 +9313,9 @@ static struct rq *find_busiest_queue(struct lb_env *env,
unsigned long busiest_load = 0, busiest_capacity = 1;
int i;
if (sched_enable_hmp)
return find_busiest_queue_hmp(env, group);
#ifdef CONFIG_SCHED_HMP
return find_busiest_queue_hmp(env, group);
#endif
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
unsigned long capacity, wl;
@ -10120,8 +10121,9 @@ static inline int find_new_ilb(int type)
{
int ilb;
if (sched_enable_hmp)
return find_new_hmp_ilb(type);
#ifdef CONFIG_SCHED_HMP
return find_new_hmp_ilb(type);
#endif
ilb = cpumask_first(nohz.idle_cpus_mask);
@ -10496,8 +10498,9 @@ static inline int _nohz_kick_needed(struct rq *rq, int cpu, int *type)
if (likely(!atomic_read(&nohz.nr_cpus)))
return 0;
if (sched_enable_hmp)
return _nohz_kick_needed_hmp(rq, cpu, type);
#ifdef CONFIG_SCHED_HMP
return _nohz_kick_needed_hmp(rq, cpu, type);
#endif
if (time_before(now, nohz.next_balance))
return 0;

View file

@ -616,19 +616,6 @@ int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
return 0;
}
int __init set_sched_enable_hmp(char *str)
{
int enable_hmp = 0;
get_option(&str, &enable_hmp);
sched_enable_hmp = !!enable_hmp;
return 0;
}
early_param("sched_enable_hmp", set_sched_enable_hmp);
/* Clear any HMP scheduler related requests pending from or on cpu */
void clear_hmp_request(int cpu)
{
@ -870,9 +857,6 @@ unsigned int max_task_load(void)
return sched_ravg_window;
}
/* Use this knob to turn on or off HMP-aware task placement logic */
unsigned int __read_mostly sched_enable_hmp;
/* A cpu can no longer accommodate more tasks if:
*
* rq->nr_running > sysctl_sched_spill_nr_run ||
@ -1245,7 +1229,7 @@ unlock:
void inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
{
if (!sched_enable_hmp || sched_disable_window_stats)
if (sched_disable_window_stats)
return;
if (is_big_task(p))
@ -1254,7 +1238,7 @@ void inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
void dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
{
if (!sched_enable_hmp || sched_disable_window_stats)
if (sched_disable_window_stats)
return;
if (is_big_task(p))
@ -1323,7 +1307,7 @@ void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
u64 new_task_load;
u64 old_task_load;
if (!sched_enable_hmp || sched_disable_window_stats)
if (sched_disable_window_stats)
return;
old_task_load = scale_load_to_cpu(task_load(p), task_cpu(p));
@ -1433,9 +1417,6 @@ int sched_window_update_handler(struct ctl_table *table, int write,
unsigned int *data = (unsigned int *)table->data;
unsigned int old_val;
if (!sched_enable_hmp)
return -EINVAL;
mutex_lock(&policy_mutex);
old_val = *data;
@ -1471,9 +1452,6 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
unsigned int *data = (unsigned int *)table->data;
int update_task_count = 0;
if (!sched_enable_hmp)
return 0;
/*
* The policy mutex is acquired with cpu_hotplug.lock
* held from cpu_up()->cpufreq_governor_interactive()->
@ -1776,9 +1754,6 @@ static int send_notification(struct rq *rq, int check_pred, int check_groups)
int rc = 0;
u64 group_load = 0, new_load = 0;
if (!sched_enable_hmp)
return 0;
if (check_pred) {
u64 prev = rq->old_busy_time;
u64 predicted = rq->hmp_stats.pred_demands_sum;
@ -3012,7 +2987,7 @@ void set_window_start(struct rq *rq)
{
static int sync_cpu_available;
if (rq->window_start || !sched_enable_hmp)
if (rq->window_start)
return;
if (!sync_cpu_available) {
@ -3620,7 +3595,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
bool new_task;
struct related_thread_group *grp;
if (!sched_enable_hmp || (!p->on_rq && p->state != TASK_WAKING))
if (!p->on_rq && p->state != TASK_WAKING)
return;
if (exiting_task(p)) {
@ -4418,9 +4393,6 @@ static int register_sched_callback(void)
{
int ret;
if (!sched_enable_hmp)
return 0;
ret = cpufreq_register_notifier(&notifier_policy_block,
CPUFREQ_POLICY_NOTIFIER);

View file

@ -1406,6 +1406,7 @@ static void yield_task_rt(struct rq *rq)
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);
#ifdef CONFIG_SCHED_HMP
static int
select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
{
@ -1419,6 +1420,7 @@ select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
return cpu;
}
#endif
static int
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
@ -1426,8 +1428,9 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
struct task_struct *curr;
struct rq *rq;
if (sched_enable_hmp)
return select_task_rq_rt_hmp(p, cpu, sd_flag, flags);
#ifdef CONFIG_SCHED_HMP
return select_task_rq_rt_hmp(p, cpu, sd_flag, flags);
#endif
/* For anything but wake ups, just return the task_cpu */
if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
@ -1796,14 +1799,6 @@ static int find_lowest_rq_hmp(struct task_struct *task)
return best_cpu;
}
#else /* CONFIG_SCHED_HMP */
static int find_lowest_rq_hmp(struct task_struct *task)
{
return -1;
}
#endif /* CONFIG_SCHED_HMP */
static int find_lowest_rq(struct task_struct *task)
@ -1813,8 +1808,9 @@ static int find_lowest_rq(struct task_struct *task)
int this_cpu = smp_processor_id();
int cpu = task_cpu(task);
if (sched_enable_hmp)
return find_lowest_rq_hmp(task);
#ifdef CONFIG_SCHED_HMP
return find_lowest_rq_hmp(task);
#endif
/* Make sure the mask is initialized first */
if (unlikely(!lowest_mask))

View file

@ -1083,7 +1083,6 @@ enum sched_boost_policy {
extern struct mutex policy_mutex;
extern unsigned int sched_ravg_window;
extern unsigned int sched_disable_window_stats;
extern unsigned int sched_enable_hmp;
extern unsigned int max_possible_freq;
extern unsigned int min_max_freq;
extern unsigned int pct_task_load(struct task_struct *p);
@ -1127,7 +1126,6 @@ extern void update_cluster_topology(void);
extern void note_task_waking(struct task_struct *p, u64 wallclock);
extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
extern void init_clusters(void);
extern int __init set_sched_enable_hmp(char *str);
extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
extern unsigned int max_task_load(void);
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
@ -1257,7 +1255,7 @@ inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
{
u32 task_load;
if (!sched_enable_hmp || sched_disable_window_stats)
if (sched_disable_window_stats)
return;
task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
@ -1272,7 +1270,7 @@ dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
{
u32 task_load;
if (!sched_enable_hmp || sched_disable_window_stats)
if (sched_disable_window_stats)
return;
task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
@ -1290,7 +1288,7 @@ fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
struct task_struct *p, s64 task_load_delta,
s64 pred_demand_delta)
{
if (!sched_enable_hmp || sched_disable_window_stats)
if (sched_disable_window_stats)
return;
stats->cumulative_runnable_avg += task_load_delta;
@ -1667,7 +1665,6 @@ static inline int update_preferred_cluster(struct related_thread_group *grp,
static inline void add_new_task_to_grp(struct task_struct *new) {}
#define sched_enable_hmp 0
#define PRED_DEMAND_DELTA (0)
static inline void