sched: Remove unused migration notifier code.

Migration notifiers were created to aid the CPU-boost driver manage
CPU frequencies when tasks migrate from one CPU to another. Over time
with the evolution of scheduler guided frequency, the scheduler now
directly manages load when tasks migrate. Consequently the CPU-boost
driver no longer makes use of this information. Remove unused code
pertaining to this feature.

Change-Id: I3529e4356e15e342a5fcfbcf3654396752a1d7cd
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
This commit is contained in:
Syed Rameez Mustafa 2016-07-25 17:10:46 -07:00
parent b855596ec1
commit ef1e55638d
5 changed files with 0 additions and 115 deletions

View file

@ -40,7 +40,6 @@ extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_wake_to_idle;
extern unsigned int sysctl_sched_wakeup_load_threshold;
extern unsigned int sysctl_sched_window_stats_policy;
extern unsigned int sysctl_sched_ravg_hist_size;
extern unsigned int sysctl_sched_cpu_high_irqload;

View file

@ -100,7 +100,6 @@ const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP",
"RQ_TO_RQ", "GROUP_TO_GROUP"};
ATOMIC_NOTIFIER_HEAD(migration_notifier_head);
ATOMIC_NOTIFIER_HEAD(load_alert_notifier_head);
DEFINE_MUTEX(sched_domains_mutex);
@ -4333,7 +4332,6 @@ static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_
static void notify_migration(int src_cpu, int dest_cpu, bool src_cpu_dead,
struct task_struct *p)
{
struct migration_notify_data mnd;
bool check_groups;
rcu_read_lock();
@ -4348,14 +4346,6 @@ static void notify_migration(int src_cpu, int dest_cpu, bool src_cpu_dead,
} else {
check_for_freq_change(cpu_rq(dest_cpu), true, check_groups);
}
if (task_notify_on_migrate(p)) {
mnd.src_cpu = src_cpu;
mnd.dest_cpu = dest_cpu;
mnd.load = pct_task_load(p);
atomic_notifier_call_chain(&migration_notifier_head, 0,
(void *)&mnd);
}
}
/*
@ -5181,8 +5171,6 @@ static void ttwu_queue(struct task_struct *p, int cpu)
raw_spin_unlock(&rq->lock);
}
__read_mostly unsigned int sysctl_sched_wakeup_load_threshold = 110;
/**
* try_to_wake_up - wake up a thread
* @p: the thread to be awakened
@ -5203,8 +5191,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
{
unsigned long flags;
int cpu, src_cpu, success = 0;
int notify = 0;
struct migration_notify_data mnd;
#ifdef CONFIG_SMP
unsigned int old_load;
struct rq *rq;
@ -5309,31 +5295,9 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
ttwu_queue(p, cpu);
stat:
ttwu_stat(p, cpu, wake_flags);
if (task_notify_on_migrate(p)) {
mnd.src_cpu = src_cpu;
mnd.dest_cpu = cpu;
mnd.load = pct_task_load(p);
/*
* Call the migration notifier with mnd for foreground task
* migrations as well as for wakeups if their load is above
* sysctl_sched_wakeup_load_threshold. This would prompt the
* cpu-boost to boost the CPU frequency on wake up of a heavy
* weight foreground task
*/
if ((src_cpu != cpu) || (mnd.load >
sysctl_sched_wakeup_load_threshold))
notify = 1;
}
out:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
if (notify)
atomic_notifier_call_chain(&migration_notifier_head,
0, (void *)&mnd);
if (freq_notif_allowed) {
if (!same_freq_domain(src_cpu, cpu)) {
check_for_freq_change(cpu_rq(cpu),
@ -11793,24 +11757,6 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset)
sched_move_task(task);
}
static u64 cpu_notify_on_migrate_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
struct task_group *tg = css_tg(css);
return tg->notify_on_migrate;
}
static int cpu_notify_on_migrate_write_u64(struct cgroup_subsys_state *css,
struct cftype *cft, u64 notify)
{
struct task_group *tg = css_tg(css);
tg->notify_on_migrate = (notify > 0);
return 0;
}
#ifdef CONFIG_SCHED_HMP
static u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
@ -12135,11 +12081,6 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
#endif /* CONFIG_RT_GROUP_SCHED */
static struct cftype cpu_files[] = {
{
.name = "notify_on_migrate",
.read_u64 = cpu_notify_on_migrate_read_u64,
.write_u64 = cpu_notify_on_migrate_write_u64,
},
#ifdef CONFIG_SCHED_HMP
{
.name = "upmigrate_discourage",

View file

@ -7579,9 +7579,6 @@ struct lb_env {
struct list_head tasks;
};
static DEFINE_PER_CPU(bool, dbs_boost_needed);
static DEFINE_PER_CPU(int, dbs_boost_load_moved);
/*
* Is this task likely cache-hot:
*/
@ -7822,7 +7819,6 @@ static struct task_struct *detach_one_task(struct lb_env *env)
* inside detach_tasks().
*/
schedstat_inc(env->sd, lb_gained[env->idle]);
per_cpu(dbs_boost_load_moved, env->dst_cpu) += pct_task_load(p);
return p;
}
@ -7895,7 +7891,6 @@ redo:
detached++;
env->imbalance -= load;
per_cpu(dbs_boost_load_moved, env->dst_cpu) += pct_task_load(p);
#ifdef CONFIG_PREEMPT
/*
@ -7949,8 +7944,6 @@ static void attach_task(struct rq *rq, struct task_struct *p)
activate_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
check_preempt_curr(rq, p, 0);
if (task_notify_on_migrate(p))
per_cpu(dbs_boost_needed, task_cpu(p)) = true;
}
/*
@ -9241,7 +9234,6 @@ static int load_balance(int this_cpu, struct rq *this_rq,
cpumask_copy(cpus, cpu_active_mask);
per_cpu(dbs_boost_load_moved, this_cpu) = 0;
schedstat_inc(sd, lb_count[idle]);
redo:
@ -9436,20 +9428,6 @@ no_move:
}
} else {
sd->nr_balance_failed = 0;
if (per_cpu(dbs_boost_needed, this_cpu)) {
struct migration_notify_data mnd;
mnd.src_cpu = cpu_of(busiest);
mnd.dest_cpu = this_cpu;
mnd.load = per_cpu(dbs_boost_load_moved, this_cpu);
if (mnd.load > 100)
mnd.load = 100;
atomic_notifier_call_chain(&migration_notifier_head,
0, (void *)&mnd);
per_cpu(dbs_boost_needed, this_cpu) = false;
per_cpu(dbs_boost_load_moved, this_cpu) = 0;
}
/* Assumes one 'busiest' cpu that we pulled tasks from */
if (!same_freq_domain(this_cpu, cpu_of(busiest))) {
@ -9681,8 +9659,6 @@ static int active_load_balance_cpu_stop(void *data)
raw_spin_lock_irq(&busiest_rq->lock);
per_cpu(dbs_boost_load_moved, target_cpu) = 0;
/* make sure the requested cpu hasn't gone down in the meantime */
if (unlikely(busiest_cpu != smp_processor_id() ||
!busiest_rq->active_balance))
@ -9765,20 +9741,6 @@ out_unlock:
check_for_freq_change(target_rq, true, false);
}
if (per_cpu(dbs_boost_needed, target_cpu)) {
struct migration_notify_data mnd;
mnd.src_cpu = cpu_of(busiest_rq);
mnd.dest_cpu = target_cpu;
mnd.load = per_cpu(dbs_boost_load_moved, target_cpu);
if (mnd.load > 100)
mnd.load = 100;
atomic_notifier_call_chain(&migration_notifier_head,
0, (void *)&mnd);
per_cpu(dbs_boost_needed, target_cpu) = false;
per_cpu(dbs_boost_load_moved, target_cpu) = 0;
}
return 0;
}

View file

@ -254,7 +254,6 @@ struct cfs_bandwidth {
struct task_group {
struct cgroup_subsys_state css;
bool notify_on_migrate;
#ifdef CONFIG_SCHED_HMP
bool upmigrate_discouraged;
#endif
@ -1496,11 +1495,6 @@ static inline struct task_group *task_group(struct task_struct *p)
return p->sched_task_group;
}
static inline bool task_notify_on_migrate(struct task_struct *p)
{
return task_group(p)->notify_on_migrate;
}
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
{
@ -1526,10 +1520,6 @@ static inline struct task_group *task_group(struct task_struct *p)
{
return NULL;
}
static inline bool task_notify_on_migrate(struct task_struct *p)
{
return false;
}
#endif /* CONFIG_CGROUP_SCHED */
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)

View file

@ -292,13 +292,6 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sched_wakeup_load_threshold",
.data = &sysctl_sched_wakeup_load_threshold,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_SCHED_FREQ_INPUT
{
.procname = "sched_freq_inc_notify",