sched: Fix herding issue

check_for_migration() could run concurrently on multiple cpus,
resulting in multiple tasks wanting to migrate to same cpu. This could
cause cpus to be underutilized and lead to increased scheduling
latencies for tasks. Fix this by serializing select_best_cpu() calls
from cpus running check_for_migration() check and marking selected
cpus as reserved, so that subsequent call to select_best_cpu() from
check_for_migration() will skip reserved cpus.

Change-Id: I73a22cacab32dee3c14267a98b700f572aa3900c
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
[rameezmustafa@codeaurora.org]: Port to msm-3.18]
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org
This commit is contained in:
Srivatsa Vaddagiri 2014-07-25 08:04:27 -07:00 committed by David Keitel
parent c820f1c5f2
commit 8e526b1ab4
3 changed files with 65 additions and 16 deletions

View file

@ -1133,9 +1133,11 @@ static inline void clear_hmp_request(int cpu)
unsigned long flags;
clear_boost_kick(cpu);
clear_reserved(cpu);
if (rq->push_task) {
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->push_task) {
clear_reserved(rq->push_cpu);
put_task_struct(rq->push_task);
rq->push_task = NULL;
}

View file

@ -3040,6 +3040,9 @@ static int skip_cpu(struct task_struct *p, int cpu, int reason)
if (!reason)
return 0;
if (is_reserved(cpu))
return 1;
switch (reason) {
case MOVE_TO_BIG_CPU:
skip = (rq->capacity <= task_rq->capacity);
@ -3396,23 +3399,13 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p)
return 0;
}
/*
* Check if currently running task should be migrated to a better cpu.
*
* Todo: Effect this via changes to nohz_balancer_kick() and load balance?
*/
void check_for_migration(struct rq *rq, struct task_struct *p)
static DEFINE_RAW_SPINLOCK(migration_lock);
static inline int
kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
{
int cpu = cpu_of(rq), new_cpu = cpu;
unsigned long flags;
int active_balance = 0, rc;
rc = migration_needed(rq, p);
if (rc)
new_cpu = select_best_cpu(p, cpu, rc);
if (new_cpu == cpu)
return;
int rc = 0;
/* Invoke active balance to force migrate currently running task */
raw_spin_lock_irqsave(&rq->lock, flags);
@ -3421,10 +3414,38 @@ void check_for_migration(struct rq *rq, struct task_struct *p)
rq->push_cpu = new_cpu;
get_task_struct(p);
rq->push_task = p;
active_balance = 1;
rc = 1;
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
return rc;
}
/*
* Check if currently running task should be migrated to a better cpu.
*
* Todo: Effect this via changes to nohz_balancer_kick() and load balance?
*/
void check_for_migration(struct rq *rq, struct task_struct *p)
{
int cpu = cpu_of(rq), new_cpu;
int active_balance = 0, reason;
reason = migration_needed(rq, p);
if (!reason)
return;
raw_spin_lock(&migration_lock);
new_cpu = select_best_cpu(p, cpu, reason);
if (new_cpu != cpu) {
active_balance = kick_active_balance(rq, p, new_cpu);
if (active_balance)
mark_reserved(new_cpu);
}
raw_spin_unlock(&migration_lock);
if (active_balance)
stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq,
&rq->active_balance_work);
@ -8702,6 +8723,7 @@ out_unlock:
if (push_task_detached)
attach_one_task(target_rq, push_task);
put_task_struct(push_task);
clear_reserved(target_cpu);
}
if (p)

View file

@ -1018,6 +1018,29 @@ static inline unsigned long capacity_scale_cpu_freq(int cpu)
#ifdef CONFIG_SCHED_HMP
#define BOOST_KICK 0
#define CPU_RESERVED 1
static inline int is_reserved(int cpu)
{
struct rq *rq = cpu_rq(cpu);
return test_bit(CPU_RESERVED, &rq->hmp_flags);
}
static inline int mark_reserved(int cpu)
{
struct rq *rq = cpu_rq(cpu);
/* Name boost_flags as hmp_flags? */
return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
}
static inline void clear_reserved(int cpu)
{
struct rq *rq = cpu_rq(cpu);
clear_bit(CPU_RESERVED, &rq->hmp_flags);
}
extern unsigned int sched_enable_hmp;
extern unsigned int sched_enable_power_aware;
@ -1051,6 +1074,8 @@ static inline void dec_nr_big_small_task(struct rq *rq, struct task_struct *p)
{
}
static inline void clear_reserved(int cpu) { }
#define power_cost_at_freq(...) 0
#define trace_sched_cpu_load(...)