sched: Unify load_balance{,_newidle}()
load_balance() and load_balance_newidle() look remarkably similar, one key point they differ in is the condition on when to active balance. So split out that logic into a separate function. One side effect is that previously load_balance_newidle() used to fail and return -1 under these conditions, whereas now it doesn't. I've not yet fully figured out the whole -1 return case for either load_balance{,_newidle}(). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
baa8c1102f
commit
1af3ed3ddf
1 changed files with 59 additions and 56 deletions
|
@ -2816,6 +2816,39 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
|
||||||
/* Working cpumask for load_balance and load_balance_newidle. */
|
/* Working cpumask for load_balance and load_balance_newidle. */
|
||||||
static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
|
static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
|
||||||
|
|
||||||
|
static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle)
|
||||||
|
{
|
||||||
|
if (idle == CPU_NEWLY_IDLE) {
|
||||||
|
/*
|
||||||
|
* The only task running in a non-idle cpu can be moved to this
|
||||||
|
* cpu in an attempt to completely freeup the other CPU
|
||||||
|
* package.
|
||||||
|
*
|
||||||
|
* The package power saving logic comes from
|
||||||
|
* find_busiest_group(). If there are no imbalance, then
|
||||||
|
* f_b_g() will return NULL. However when sched_mc={1,2} then
|
||||||
|
* f_b_g() will select a group from which a running task may be
|
||||||
|
* pulled to this cpu in order to make the other package idle.
|
||||||
|
* If there is no opportunity to make a package idle and if
|
||||||
|
* there are no imbalance, then f_b_g() will return NULL and no
|
||||||
|
* action will be taken in load_balance_newidle().
|
||||||
|
*
|
||||||
|
* Under normal task pull operation due to imbalance, there
|
||||||
|
* will be more than one task in the source run queue and
|
||||||
|
* move_tasks() will succeed. ld_moved will be true and this
|
||||||
|
* active balance code will not be triggered.
|
||||||
|
*/
|
||||||
|
if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
|
||||||
|
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check this_cpu to ensure it is balanced within domain. Attempt to move
|
* Check this_cpu to ensure it is balanced within domain. Attempt to move
|
||||||
* tasks if there is an imbalance.
|
* tasks if there is an imbalance.
|
||||||
|
@ -2902,8 +2935,7 @@ redo:
|
||||||
schedstat_inc(sd, lb_failed[idle]);
|
schedstat_inc(sd, lb_failed[idle]);
|
||||||
sd->nr_balance_failed++;
|
sd->nr_balance_failed++;
|
||||||
|
|
||||||
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
|
if (need_active_balance(sd, sd_idle, idle)) {
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&busiest->lock, flags);
|
raw_spin_lock_irqsave(&busiest->lock, flags);
|
||||||
|
|
||||||
/* don't kick the migration_thread, if the curr
|
/* don't kick the migration_thread, if the curr
|
||||||
|
@ -3049,66 +3081,37 @@ redo:
|
||||||
int active_balance = 0;
|
int active_balance = 0;
|
||||||
|
|
||||||
schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
|
schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
|
||||||
if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
|
sd->nr_balance_failed++;
|
||||||
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
|
if (need_active_balance(sd, sd_idle, CPU_NEWLY_IDLE)) {
|
||||||
return -1;
|
double_lock_balance(this_rq, busiest);
|
||||||
|
|
||||||
if (sd->nr_balance_failed++ < 2)
|
/*
|
||||||
return -1;
|
* don't kick the migration_thread, if the curr
|
||||||
|
* task on busiest cpu can't be moved to this_cpu
|
||||||
|
*/
|
||||||
|
if (!cpumask_test_cpu(this_cpu,
|
||||||
|
&busiest->curr->cpus_allowed)) {
|
||||||
|
double_unlock_balance(this_rq, busiest);
|
||||||
|
all_pinned = 1;
|
||||||
|
return ld_moved;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
if (!busiest->active_balance) {
|
||||||
* The only task running in a non-idle cpu can be moved to this
|
busiest->active_balance = 1;
|
||||||
* cpu in an attempt to completely freeup the other CPU
|
busiest->push_cpu = this_cpu;
|
||||||
* package. The same method used to move task in load_balance()
|
active_balance = 1;
|
||||||
* have been extended for load_balance_newidle() to speedup
|
}
|
||||||
* consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2)
|
|
||||||
*
|
|
||||||
* The package power saving logic comes from
|
|
||||||
* find_busiest_group(). If there are no imbalance, then
|
|
||||||
* f_b_g() will return NULL. However when sched_mc={1,2} then
|
|
||||||
* f_b_g() will select a group from which a running task may be
|
|
||||||
* pulled to this cpu in order to make the other package idle.
|
|
||||||
* If there is no opportunity to make a package idle and if
|
|
||||||
* there are no imbalance, then f_b_g() will return NULL and no
|
|
||||||
* action will be taken in load_balance_newidle().
|
|
||||||
*
|
|
||||||
* Under normal task pull operation due to imbalance, there
|
|
||||||
* will be more than one task in the source run queue and
|
|
||||||
* move_tasks() will succeed. ld_moved will be true and this
|
|
||||||
* active balance code will not be triggered.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Lock busiest in correct order while this_rq is held */
|
|
||||||
double_lock_balance(this_rq, busiest);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* don't kick the migration_thread, if the curr
|
|
||||||
* task on busiest cpu can't be moved to this_cpu
|
|
||||||
*/
|
|
||||||
if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
|
|
||||||
double_unlock_balance(this_rq, busiest);
|
double_unlock_balance(this_rq, busiest);
|
||||||
all_pinned = 1;
|
/*
|
||||||
return ld_moved;
|
* Should not call ttwu while holding a rq->lock
|
||||||
|
*/
|
||||||
|
raw_spin_unlock(&this_rq->lock);
|
||||||
|
if (active_balance)
|
||||||
|
wake_up_process(busiest->migration_thread);
|
||||||
|
raw_spin_lock(&this_rq->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!busiest->active_balance) {
|
|
||||||
busiest->active_balance = 1;
|
|
||||||
busiest->push_cpu = this_cpu;
|
|
||||||
active_balance = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
double_unlock_balance(this_rq, busiest);
|
|
||||||
/*
|
|
||||||
* Should not call ttwu while holding a rq->lock
|
|
||||||
*/
|
|
||||||
raw_spin_unlock(&this_rq->lock);
|
|
||||||
if (active_balance)
|
|
||||||
wake_up_process(busiest->migration_thread);
|
|
||||||
raw_spin_lock(&this_rq->lock);
|
|
||||||
|
|
||||||
} else
|
} else
|
||||||
sd->nr_balance_failed = 0;
|
sd->nr_balance_failed = 0;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue