sched: Extend active balance to accept 'push_task' argument

Active balance currently picks one task to migrate from busy cpu to
a chosen cpu (push_cpu). This patch extends active load balance to
recognize a particular task ('push_task') that needs to be migrated to
'push_cpu'. This capability will be leveraged by HMP-aware task
placement in a subsequent patch.

Change-Id: If31320111e6cc7044e617b5c3fd6d8e0c0e16952
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
[rameezmustafa@codeaurora.org]: Port to msm-3.18]
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
This commit is contained in:
Srivatsa Vaddagiri 2014-03-31 10:34:41 -07:00 committed by Todd Kjos
parent 3a353d6cea
commit 2da014c0d8
3 changed files with 34 additions and 10 deletions

View file

@ -7909,6 +7909,7 @@ void __init sched_init(void)
rq->active_balance = 0;
rq->next_balance = jiffies;
rq->push_cpu = 0;
rq->push_task = NULL;
rq->cpu = i;
rq->online = 0;
rq->idle_stamp = 0;

View file

@ -9429,8 +9429,18 @@ static int active_load_balance_cpu_stop(void *data)
int busiest_cpu = cpu_of(busiest_rq);
int target_cpu = busiest_rq->push_cpu;
struct rq *target_rq = cpu_rq(target_cpu);
struct sched_domain *sd;
struct sched_domain *sd = NULL;
struct task_struct *p = NULL;
struct task_struct *push_task;
int push_task_detached = 0;
struct lb_env env = {
.sd = sd,
.dst_cpu = target_cpu,
.dst_rq = target_rq,
.src_cpu = busiest_rq->cpu,
.src_rq = busiest_rq,
.idle = CPU_IDLE,
};
raw_spin_lock_irq(&busiest_rq->lock);
@ -9450,6 +9460,16 @@ static int active_load_balance_cpu_stop(void *data)
*/
BUG_ON(busiest_rq == target_rq);
push_task = busiest_rq->push_task;
if (push_task) {
if (task_on_rq_queued(push_task) &&
task_cpu(push_task) == busiest_cpu) {
detach_task(push_task, &env);
push_task_detached = 1;
}
goto out_unlock;
}
/* Search for an sd spanning us and the target CPU. */
rcu_read_lock();
for_each_domain(target_cpu, sd) {
@ -9459,15 +9479,7 @@ static int active_load_balance_cpu_stop(void *data)
}
if (likely(sd)) {
struct lb_env env = {
.sd = sd,
.dst_cpu = target_cpu,
.dst_rq = target_rq,
.src_cpu = busiest_rq->cpu,
.src_rq = busiest_rq,
.idle = CPU_IDLE,
};
env.sd = sd;
schedstat_inc(sd, alb_count);
update_rq_clock(busiest_rq);
@ -9485,8 +9497,18 @@ static int active_load_balance_cpu_stop(void *data)
rcu_read_unlock();
out_unlock:
busiest_rq->active_balance = 0;
if (push_task)
busiest_rq->push_task = NULL;
raw_spin_unlock(&busiest_rq->lock);
if (push_task) {
if (push_task_detached)
attach_one_task(target_rq, push_task);
put_task_struct(push_task);
}
if (p)
attach_one_task(target_rq, p);

View file

@ -664,6 +664,7 @@ struct rq {
/* For active balancing */
int active_balance;
int push_cpu;
struct task_struct *push_task;
struct cpu_stop_work active_balance_work;
/* cpu of this runqueue: */
int cpu;