sched: Move notify_migration() under CONFIG_SCHED_HMP

notify_migration() is a HMP specific function that relies on all
of its contents to be stubbed out for !CONFIG_SCHED_HMP. However,
it still maintains calls to rcu_read_lock/unlock(). In the !HMP
case these calls are simply redundant. Move the function under
CONFIG_SCHED_HMP and add a stub when the config is not defined so
that there is no overhead.

Change-Id: Iad914f31b629e81e403b0e89796b2b0f1d081695
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
This commit is contained in:
Syed Rameez Mustafa 2016-08-02 15:08:13 -07:00
parent 9095a09ab1
commit 67e0df6e33
3 changed files with 25 additions and 19 deletions

View file

@ -1134,25 +1134,6 @@ static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_
return rq;
}
static void notify_migration(int src_cpu, int dest_cpu, bool src_cpu_dead,
struct task_struct *p)
{
bool check_groups;
rcu_read_lock();
check_groups = task_in_related_thread_group(p);
rcu_read_unlock();
if (!same_freq_domain(src_cpu, dest_cpu)) {
if (!src_cpu_dead)
check_for_freq_change(cpu_rq(src_cpu), false,
check_groups);
check_for_freq_change(cpu_rq(dest_cpu), false, check_groups);
} else {
check_for_freq_change(cpu_rq(dest_cpu), true, check_groups);
}
}
/*
* migration_cpu_stop - this will be executed by a highprio stopper thread
* and performs thread migration by bumping thread off CPU then

View file

@ -1786,6 +1786,25 @@ void check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups)
(void *)(long)cpu);
}
void notify_migration(int src_cpu, int dest_cpu, bool src_cpu_dead,
struct task_struct *p)
{
bool check_groups;
rcu_read_lock();
check_groups = task_in_related_thread_group(p);
rcu_read_unlock();
if (!same_freq_domain(src_cpu, dest_cpu)) {
if (!src_cpu_dead)
check_for_freq_change(cpu_rq(src_cpu), false,
check_groups);
check_for_freq_change(cpu_rq(dest_cpu), false, check_groups);
} else {
check_for_freq_change(cpu_rq(dest_cpu), true, check_groups);
}
}
static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
u64 irqtime, int event)
{

View file

@ -1300,6 +1300,9 @@ struct related_thread_group *task_related_thread_group(struct task_struct *p)
extern void
check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
extern void notify_migration(int src_cpu, int dest_cpu,
bool src_cpu_dead, struct task_struct *p);
struct group_cpu_time {
u64 curr_runnable_sum;
u64 prev_runnable_sum;
@ -1580,6 +1583,9 @@ static inline int update_preferred_cluster(struct related_thread_group *grp,
static inline void
check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
static inline void notify_migration(int src_cpu, int dest_cpu,
bool src_cpu_dead, struct task_struct *p) { }
static inline int same_freq_domain(int src_cpu, int dst_cpu)
{
return 1;