Merge "sched: Restore previous implementation of check_for_migration()"

This commit is contained in:
Linux Build Service Account 2018-01-09 15:40:40 -08:00 committed by Gerrit - the friendly Code Review server
commit fcf826264c
2 changed files with 64 additions and 43 deletions

View file

@ -3657,6 +3657,68 @@ static inline int migration_needed(struct task_struct *p, int cpu)
return 0;
}
static inline int
kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
{
unsigned long flags;
int rc = 0;
/* Invoke active balance to force migrate currently running task */
raw_spin_lock_irqsave(&rq->lock, flags);
if (!rq->active_balance) {
rq->active_balance = 1;
rq->push_cpu = new_cpu;
get_task_struct(p);
rq->push_task = p;
rc = 1;
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
return rc;
}
static DEFINE_RAW_SPINLOCK(migration_lock);
static bool do_migration(int reason, int new_cpu, int cpu)
{
if ((reason == UP_MIGRATION || reason == DOWN_MIGRATION)
&& same_cluster(new_cpu, cpu))
return false;
/* Inter cluster high irqload migrations are OK */
return new_cpu != cpu;
}
/*
* Check if currently running task should be migrated to a better cpu.
*
* Todo: Effect this via changes to nohz_balancer_kick() and load balance?
*/
void check_for_migration(struct rq *rq, struct task_struct *p)
{
int cpu = cpu_of(rq), new_cpu;
int active_balance = 0, reason;
reason = migration_needed(p, cpu);
if (!reason)
return;
raw_spin_lock(&migration_lock);
new_cpu = select_best_cpu(p, cpu, reason, 0);
if (do_migration(reason, new_cpu, cpu)) {
active_balance = kick_active_balance(rq, p, new_cpu);
if (active_balance)
mark_reserved(new_cpu);
}
raw_spin_unlock(&migration_lock);
if (active_balance)
stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq,
&rq->active_balance_work);
}
#ifdef CONFIG_CFS_BANDWIDTH
static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
@ -11436,47 +11498,6 @@ static void rq_offline_fair(struct rq *rq)
unthrottle_offline_cfs_rqs(rq);
}
static inline int
kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
{
int rc = 0;
/* Invoke active balance to force migrate currently running task */
raw_spin_lock(&rq->lock);
if (!rq->active_balance) {
rq->active_balance = 1;
rq->push_cpu = new_cpu;
get_task_struct(p);
rq->push_task = p;
rc = 1;
}
raw_spin_unlock(&rq->lock);
return rc;
}
void check_for_migration(struct rq *rq, struct task_struct *p)
{
int new_cpu;
int active_balance;
int cpu = task_cpu(p);
if (rq->misfit_task) {
if (rq->curr->state != TASK_RUNNING ||
rq->curr->nr_cpus_allowed == 1)
return;
new_cpu = select_energy_cpu_brute(p, cpu, 0);
if (capacity_orig_of(new_cpu) > capacity_orig_of(cpu)) {
active_balance = kick_active_balance(rq, p, new_cpu);
if (active_balance)
stop_one_cpu_nowait(cpu,
active_load_balance_cpu_stop,
rq, &rq->active_balance_work);
}
}
}
#endif /* CONFIG_SMP */
/*

View file

@ -32,10 +32,8 @@ extern long calc_load_fold_active(struct rq *this_rq);
#ifdef CONFIG_SMP
extern void update_cpu_load_active(struct rq *this_rq);
extern void check_for_migration(struct rq *rq, struct task_struct *p);
#else
static inline void update_cpu_load_active(struct rq *this_rq) { }
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
#endif
/*
@ -1449,6 +1447,7 @@ static inline bool is_short_burst_task(struct task_struct *p)
p->ravg.avg_sleep_time > sysctl_sched_short_sleep;
}
extern void check_for_migration(struct rq *rq, struct task_struct *p);
extern void pre_big_task_count_change(const struct cpumask *cpus);
extern void post_big_task_count_change(const struct cpumask *cpus);
extern void set_hmp_defaults(void);
@ -1708,6 +1707,7 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
return 1;
}
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
static inline void pre_big_task_count_change(void) { }
static inline void post_big_task_count_change(void) { }
static inline void set_hmp_defaults(void) { }