sched: Only do EA migration when CPU throttling is imminent
We do not want to migrate tasks unnecessary to avoid cache hit and other migration latencies that could affect the performance of the system. Add a check to only try EA migration when CPU frequency throttling is imminent. CRs-Fixed: 764788 Change-Id: I92e86e62da10ce15f1e76a980df3545e93d76348 Signed-off-by: Olav Haugan <ohaugan@codeaurora.org> [rameezmustafa@codeaurora.org]: Port to msm-3.18] Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org
This commit is contained in:
parent
29a412dffa
commit
72fa561b0d
1 changed files with 20 additions and 1 deletions
|
@ -3623,6 +3623,8 @@ static int lower_power_cpu_available(struct task_struct *p, int cpu)
|
|||
return (lowest_power_cpu != task_cpu(p));
|
||||
}
|
||||
|
||||
static inline int is_cpu_throttling_imminent(int cpu);
|
||||
|
||||
/*
|
||||
* Check if a task is on the "wrong" cpu (i.e its current cpu is not the ideal
|
||||
* cpu as per its demand or priority)
|
||||
|
@ -3655,6 +3657,7 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p)
|
|||
return MOVE_TO_BIG_CPU;
|
||||
|
||||
if (sched_enable_power_aware &&
|
||||
is_cpu_throttling_imminent(cpu_of(rq)) &&
|
||||
lower_power_cpu_available(p, cpu_of(rq)))
|
||||
return MOVE_TO_POWER_EFFICIENT_CPU;
|
||||
|
||||
|
@ -3718,6 +3721,16 @@ static inline int nr_big_tasks(struct rq *rq)
|
|||
return rq->nr_big_tasks;
|
||||
}
|
||||
|
||||
static inline int is_cpu_throttling_imminent(int cpu)
|
||||
{
|
||||
int throttling = 0;
|
||||
struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
|
||||
|
||||
if (per_cpu_info)
|
||||
throttling = per_cpu_info[cpu].throttling;
|
||||
return throttling;
|
||||
}
|
||||
|
||||
#else /* CONFIG_SCHED_HMP */
|
||||
|
||||
#define sched_enable_power_aware 0
|
||||
|
@ -3774,6 +3787,11 @@ static inline int nr_big_tasks(struct rq *rq)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int is_cpu_throttling_imminent(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SCHED_HMP */
|
||||
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
|
@ -7745,7 +7763,8 @@ group_type group_classify(struct sched_group *group,
|
|||
if ((capacity(env->dst_rq) == group_rq_capacity(group)) &&
|
||||
sgs->sum_nr_running && (env->idle != CPU_NOT_IDLE) &&
|
||||
power_cost_at_freq(env->dst_cpu, 0) <
|
||||
power_cost_at_freq(cpumask_first(sched_group_cpus(group)), 0)) {
|
||||
power_cost_at_freq(cpumask_first(sched_group_cpus(group)), 0) &&
|
||||
is_cpu_throttling_imminent(cpumask_first(sched_group_cpus(group)))) {
|
||||
env->flags |= LBF_PWR_ACTIVE_BALANCE;
|
||||
return group_ea;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue