sched/fair: Code !is_big_little path into select_energy_cpu_brute()
This patch replaces the existing EAS upstream implementation of select_energy_cpu_brute() with the one of find_best_target() used in Android previously. It also removes the cpumask 'and' from select_energy_cpu_brute, see the existing use of 'cpu = smp_processor_id()' in select_task_rq_fair(). Change-Id: If678c002efaa87d1ba3ec9989a4e9f8df98b83ec Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com> [ added guarding for non-schedtune builds ] Signed-off-by: Chris Redpath <chris.redpath@arm.com>
This commit is contained in:
parent
f6f9314893
commit
9e92e8a24f
1 changed files with 29 additions and 37 deletions
|
@ -5891,65 +5891,57 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
|
||||||
|
|
||||||
static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
|
static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
int min_diff = 0, energy_cpu = prev_cpu, spare_cpu = prev_cpu;
|
|
||||||
unsigned long max_spare = 0;
|
|
||||||
struct sched_domain *sd;
|
struct sched_domain *sd;
|
||||||
|
int target_cpu = prev_cpu, tmp_target;
|
||||||
|
bool boosted, prefer_idle;
|
||||||
|
|
||||||
if (sysctl_sched_sync_hint_enable && sync) {
|
if (sysctl_sched_sync_hint_enable && sync) {
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
cpumask_t search_cpus;
|
|
||||||
cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
|
if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
|
||||||
if (cpumask_test_cpu(cpu, &search_cpus))
|
|
||||||
return cpu;
|
return cpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
#ifdef CONFIG_CGROUP_SCHEDTUNE
|
||||||
|
boosted = schedtune_task_boost(p) > 0;
|
||||||
|
prefer_idle = schedtune_prefer_idle(p) > 0;
|
||||||
|
#else
|
||||||
|
boosted = get_sysctl_sched_cfs_boost() > 0;
|
||||||
|
prefer_idle = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
|
sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
|
||||||
|
/* Find a cpu with sufficient capacity */
|
||||||
|
tmp_target = find_best_target(p, boosted, prefer_idle);
|
||||||
|
|
||||||
if (!sd)
|
if (!sd)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
if (tmp_target >= 0) {
|
||||||
|
target_cpu = tmp_target;
|
||||||
|
if ((boosted || prefer_idle) && idle_cpu(target_cpu))
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
for_each_cpu_and(i, tsk_cpus_allowed(p), sched_domain_span(sd)) {
|
if (target_cpu != prev_cpu) {
|
||||||
int diff;
|
|
||||||
unsigned long spare;
|
|
||||||
|
|
||||||
struct energy_env eenv = {
|
struct energy_env eenv = {
|
||||||
.util_delta = task_util(p),
|
.util_delta = task_util(p),
|
||||||
.src_cpu = prev_cpu,
|
.src_cpu = prev_cpu,
|
||||||
.dst_cpu = i,
|
.dst_cpu = target_cpu,
|
||||||
.task = p,
|
.task = p,
|
||||||
};
|
};
|
||||||
|
|
||||||
spare = capacity_spare_wake(i, p);
|
/* Not enough spare capacity on previous cpu */
|
||||||
|
if (cpu_overutilized(prev_cpu))
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
if (i == prev_cpu)
|
if (energy_diff(&eenv) >= 0)
|
||||||
continue;
|
target_cpu = prev_cpu;
|
||||||
|
|
||||||
if (spare > max_spare) {
|
|
||||||
max_spare = spare;
|
|
||||||
spare_cpu = i;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (spare * 1024 < capacity_margin * task_util(p))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
diff = energy_diff(&eenv);
|
|
||||||
|
|
||||||
if (diff < min_diff) {
|
|
||||||
min_diff = diff;
|
|
||||||
energy_cpu = i;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
return target_cpu;
|
||||||
if (energy_cpu == prev_cpu && !cpu_overutilized(prev_cpu))
|
|
||||||
return prev_cpu;
|
|
||||||
|
|
||||||
return energy_cpu != prev_cpu ? energy_cpu : spare_cpu;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Add table
Reference in a new issue