cpufreq: interactive: Use new task load from scheduler

Account amount of load contributed by new tasks within CPU load so that
governor can apply different policy when CPU is loaded by new tasks.

To be able to distinguish new task load a new tunable
sched_new_task_windows also introduced.  The tunable defines tasks as new
when the tasks are have been active less than configured windows.

Change-Id: I2e2e62e4103882f7362154b792ab978b181b9f59
Suggested-by: Saravana Kannan <skannan@codeaurora.org>
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
[junjiew@codeaurora.org: Dropped all changes on scheduler side because
those have been merged separately.]
Signed-off-by: Junjie Wu <junjiew@codeaurora.org>
This commit is contained in:
Joonwoo Park 2015-09-15 09:35:53 -07:00 committed by David Keitel
parent 512bf81410
commit ce83f7661b

View file

@ -52,7 +52,7 @@ struct cpufreq_interactive_policyinfo {
bool reject_notification;
int governor_enabled;
struct cpufreq_interactive_tunables *cached_tunables;
unsigned long *cpu_busy_times;
struct sched_load *sl;
};
/* Protected by per-policy load_lock */
@ -469,6 +469,7 @@ static void __cpufreq_interactive_timer(unsigned long data, bool is_notif)
unsigned long flags;
unsigned long max_cpu;
int i, fcpu;
struct sched_load *sl;
struct cpufreq_govinfo govinfo;
bool skip_hispeed_logic, skip_min_sample_time;
bool policy_max_fast_restore = false;
@ -484,14 +485,14 @@ static void __cpufreq_interactive_timer(unsigned long data, bool is_notif)
ppol->last_evaluated_jiffy = get_jiffies_64();
if (tunables->use_sched_load)
sched_get_cpus_busy(ppol->cpu_busy_times,
ppol->policy->related_cpus);
sched_get_cpus_busy(ppol->sl, ppol->policy->related_cpus);
max_cpu = cpumask_first(ppol->policy->cpus);
for_each_cpu(i, ppol->policy->cpus) {
pcpu = &per_cpu(cpuinfo, i);
sl = &ppol->sl[i - fcpu];
if (tunables->use_sched_load) {
cputime_speedadj = (u64)ppol->cpu_busy_times[i - fcpu]
* ppol->policy->cpuinfo.max_freq;
cputime_speedadj = (u64)sl->prev_load *
ppol->policy->cpuinfo.max_freq;
do_div(cputime_speedadj, tunables->timer_rate);
} else {
now = update_load(i);
@ -1525,7 +1526,7 @@ static struct cpufreq_interactive_policyinfo *get_policyinfo(
struct cpufreq_interactive_policyinfo *ppol =
per_cpu(polinfo, policy->cpu);
int i;
unsigned long *busy;
struct sched_load *sl;
/* polinfo already allocated for policy, return */
if (ppol)
@ -1535,13 +1536,13 @@ static struct cpufreq_interactive_policyinfo *get_policyinfo(
if (!ppol)
return ERR_PTR(-ENOMEM);
busy = kcalloc(cpumask_weight(policy->related_cpus), sizeof(*busy),
GFP_KERNEL);
if (!busy) {
sl = kcalloc(cpumask_weight(policy->related_cpus), sizeof(*sl),
GFP_KERNEL);
if (!sl) {
kfree(ppol);
return ERR_PTR(-ENOMEM);
}
ppol->cpu_busy_times = busy;
ppol->sl = sl;
init_timer_deferrable(&ppol->policy_timer);
ppol->policy_timer.function = cpufreq_interactive_timer;
@ -1569,7 +1570,7 @@ static void free_policyinfo(int cpu)
if (per_cpu(polinfo, j) == ppol)
per_cpu(polinfo, cpu) = NULL;
kfree(ppol->cached_tunables);
kfree(ppol->cpu_busy_times);
kfree(ppol->sl);
kfree(ppol);
}