2010-06-22 11:26:45 -07:00
|
|
|
/*
|
|
|
|
* drivers/cpufreq/cpufreq_interactive.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2010 Google, Inc.
|
|
|
|
*
|
|
|
|
* This software is licensed under the terms of the GNU General Public
|
|
|
|
* License version 2, as published by the Free Software Foundation, and
|
|
|
|
* may be copied, distributed, and modified under those terms.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* Author: Mike Chan (mike@android.com)
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/cpumask.h>
|
|
|
|
#include <linux/cpufreq.h>
|
|
|
|
#include <linux/module.h>
|
2012-11-01 09:59:52 +08:00
|
|
|
#include <linux/moduleparam.h>
|
2012-12-18 17:50:44 -08:00
|
|
|
#include <linux/rwsem.h>
|
2010-06-22 11:26:45 -07:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/sched/rt.h>
|
|
|
|
#include <linux/tick.h>
|
|
|
|
#include <linux/time.h>
|
|
|
|
#include <linux/timer.h>
|
2015-09-18 18:13:01 -07:00
|
|
|
#include <linux/hrtimer.h>
|
2010-06-22 11:26:45 -07:00
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/kthread.h>
|
2012-04-02 17:17:14 -07:00
|
|
|
#include <linux/slab.h>
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2012-02-16 16:27:59 -08:00
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include <trace/events/cpufreq_interactive.h>
|
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
struct cpufreq_interactive_policyinfo {
|
|
|
|
struct timer_list policy_timer;
|
|
|
|
struct timer_list policy_slack_timer;
|
2015-09-18 18:13:01 -07:00
|
|
|
struct hrtimer notif_timer;
|
2015-04-17 12:48:36 -07:00
|
|
|
spinlock_t load_lock; /* protects load tracking stat */
|
2014-04-28 15:11:47 -07:00
|
|
|
u64 last_evaluated_jiffy;
|
2010-06-22 11:26:45 -07:00
|
|
|
struct cpufreq_policy *policy;
|
2014-08-14 18:29:45 -07:00
|
|
|
struct cpufreq_policy p_nolim; /* policy copy with no limits */
|
2010-06-22 11:26:45 -07:00
|
|
|
struct cpufreq_frequency_table *freq_table;
|
2014-04-07 18:26:30 -07:00
|
|
|
spinlock_t target_freq_lock; /*protects target freq */
|
2010-06-22 11:26:45 -07:00
|
|
|
unsigned int target_freq;
|
2012-04-26 21:41:40 -07:00
|
|
|
unsigned int floor_freq;
|
2015-03-27 11:44:21 -07:00
|
|
|
unsigned int min_freq;
|
2015-04-17 12:48:36 -07:00
|
|
|
u64 floor_validate_time;
|
|
|
|
u64 hispeed_validate_time;
|
|
|
|
u64 max_freq_hyst_start_time;
|
2012-12-18 17:50:44 -08:00
|
|
|
struct rw_semaphore enable_sem;
|
2014-12-09 13:20:26 -08:00
|
|
|
bool reject_notification;
|
2015-09-23 12:00:33 -07:00
|
|
|
bool notif_pending;
|
2015-09-18 18:13:01 -07:00
|
|
|
unsigned long notif_cpu;
|
2010-06-22 11:26:45 -07:00
|
|
|
int governor_enabled;
|
2014-08-07 18:04:13 -07:00
|
|
|
struct cpufreq_interactive_tunables *cached_tunables;
|
2015-09-15 09:35:53 -07:00
|
|
|
struct sched_load *sl;
|
2010-06-22 11:26:45 -07:00
|
|
|
};
|
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
/* Protected by per-policy load_lock */
|
|
|
|
struct cpufreq_interactive_cpuinfo {
|
|
|
|
u64 time_in_idle;
|
|
|
|
u64 time_in_idle_timestamp;
|
|
|
|
u64 cputime_speedadj;
|
|
|
|
u64 cputime_speedadj_timestamp;
|
|
|
|
unsigned int loadadjfreq;
|
|
|
|
};
|
|
|
|
|
|
|
|
static DEFINE_PER_CPU(struct cpufreq_interactive_policyinfo *, polinfo);
|
2010-06-22 11:26:45 -07:00
|
|
|
static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
|
|
|
|
|
2012-07-16 17:07:15 -07:00
|
|
|
/* realtime thread handles frequency scaling */
|
|
|
|
static struct task_struct *speedchange_task;
|
|
|
|
static cpumask_t speedchange_cpumask;
|
|
|
|
static spinlock_t speedchange_cpumask_lock;
|
2013-01-07 14:15:51 +08:00
|
|
|
static struct mutex gov_lock;
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2014-04-28 16:22:24 -07:00
|
|
|
static int set_window_count;
|
|
|
|
static int migration_register_count;
|
|
|
|
static struct mutex sched_lock;
|
2015-07-29 18:22:21 -07:00
|
|
|
static cpumask_t controlled_cpus;
|
2014-04-28 16:22:24 -07:00
|
|
|
|
2012-11-28 17:58:17 -08:00
|
|
|
/* Target load. Lower values result in higher CPU speeds. */
|
|
|
|
#define DEFAULT_TARGET_LOAD 90
|
2012-11-14 11:41:21 -08:00
|
|
|
static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2012-04-17 17:39:34 -07:00
|
|
|
#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
|
2012-04-13 20:18:02 -07:00
|
|
|
#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
|
2013-02-25 23:48:04 +09:00
|
|
|
static unsigned int default_above_hispeed_delay[] = {
|
|
|
|
DEFAULT_ABOVE_HISPEED_DELAY };
|
2012-04-23 20:42:41 -07:00
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
struct cpufreq_interactive_tunables {
|
|
|
|
int usage_count;
|
|
|
|
/* Hi speed to bump to from lo speed when load burst (default max) */
|
|
|
|
unsigned int hispeed_freq;
|
|
|
|
/* Go to hi speed when CPU load at or above this value. */
|
|
|
|
#define DEFAULT_GO_HISPEED_LOAD 99
|
|
|
|
unsigned long go_hispeed_load;
|
|
|
|
/* Target load. Lower values result in higher CPU speeds. */
|
|
|
|
spinlock_t target_loads_lock;
|
|
|
|
unsigned int *target_loads;
|
|
|
|
int ntarget_loads;
|
|
|
|
/*
|
|
|
|
* The minimum amount of time to spend at a frequency before we can ramp
|
|
|
|
* down.
|
|
|
|
*/
|
|
|
|
#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
|
|
|
|
unsigned long min_sample_time;
|
|
|
|
/*
|
|
|
|
* The sample rate of the timer used to increase frequency
|
|
|
|
*/
|
|
|
|
unsigned long timer_rate;
|
|
|
|
/*
|
|
|
|
* Wait this long before raising speed above hispeed, by default a
|
|
|
|
* single timer interval.
|
|
|
|
*/
|
|
|
|
spinlock_t above_hispeed_delay_lock;
|
|
|
|
unsigned int *above_hispeed_delay;
|
|
|
|
int nabove_hispeed_delay;
|
|
|
|
/* Non-zero means indefinite speed boost active */
|
|
|
|
int boost_val;
|
|
|
|
/* Duration of a boot pulse in usecs */
|
|
|
|
int boostpulse_duration_val;
|
|
|
|
/* End time of boost pulse in ktime converted to usecs */
|
|
|
|
u64 boostpulse_endtime;
|
2014-12-02 17:20:50 -08:00
|
|
|
bool boosted;
|
2013-05-16 14:58:54 +05:30
|
|
|
/*
|
|
|
|
* Max additional time to wait in idle, beyond timer_rate, at speeds
|
|
|
|
* above minimum before wakeup to reduce speed, or -1 if unnecessary.
|
|
|
|
*/
|
2012-12-18 17:50:10 -08:00
|
|
|
#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
|
2013-05-16 14:58:54 +05:30
|
|
|
int timer_slack_val;
|
|
|
|
bool io_is_busy;
|
2014-04-28 16:22:24 -07:00
|
|
|
|
|
|
|
/* scheduler input related flags */
|
|
|
|
bool use_sched_load;
|
|
|
|
bool use_migration_notif;
|
2014-08-29 14:12:52 -07:00
|
|
|
|
2014-08-29 18:55:45 -07:00
|
|
|
/*
|
|
|
|
* Whether to align timer windows across all CPUs. When
|
|
|
|
* use_sched_load is true, this flag is ignored and windows
|
|
|
|
* will always be aligned.
|
|
|
|
*/
|
|
|
|
bool align_windows;
|
|
|
|
|
2014-08-29 14:12:52 -07:00
|
|
|
/*
|
|
|
|
* Stay at max freq for at least max_freq_hysteresis before dropping
|
|
|
|
* frequency.
|
|
|
|
*/
|
|
|
|
unsigned int max_freq_hysteresis;
|
2015-07-22 17:38:49 -07:00
|
|
|
|
2015-08-19 15:45:37 -07:00
|
|
|
/* Ignore hispeed_freq and above_hispeed_delay for notification */
|
|
|
|
bool ignore_hispeed_on_notif;
|
|
|
|
|
|
|
|
/* Ignore min_sample_time for notification */
|
2015-07-22 17:38:49 -07:00
|
|
|
bool fast_ramp_down;
|
2015-06-09 17:36:11 -07:00
|
|
|
|
|
|
|
/* Whether to enable prediction or not */
|
|
|
|
bool enable_prediction;
|
2013-05-16 14:58:54 +05:30
|
|
|
};
|
|
|
|
|
|
|
|
/* For cases where we have single governor instance for system */
|
2014-09-05 18:27:38 -07:00
|
|
|
static struct cpufreq_interactive_tunables *common_tunables;
|
2015-04-17 12:48:36 -07:00
|
|
|
static struct cpufreq_interactive_tunables *cached_common_tunables;
|
2012-11-01 09:59:52 +08:00
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static struct attribute_group *get_sysfs_attr(void);
|
2013-02-22 11:39:18 +08:00
|
|
|
|
2014-04-28 15:11:47 -07:00
|
|
|
/* Round to starting jiffy of next evaluation window */
|
|
|
|
static u64 round_to_nw_start(u64 jif,
|
|
|
|
struct cpufreq_interactive_tunables *tunables)
|
|
|
|
{
|
|
|
|
unsigned long step = usecs_to_jiffies(tunables->timer_rate);
|
2014-08-29 18:55:45 -07:00
|
|
|
u64 ret;
|
2014-04-28 15:11:47 -07:00
|
|
|
|
2014-08-29 18:55:45 -07:00
|
|
|
if (tunables->use_sched_load || tunables->align_windows) {
|
|
|
|
do_div(jif, step);
|
|
|
|
ret = (jif + 1) * step;
|
|
|
|
} else {
|
|
|
|
ret = jiffies + usecs_to_jiffies(tunables->timer_rate);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2014-04-28 15:11:47 -07:00
|
|
|
}
|
|
|
|
|
2014-04-28 16:22:24 -07:00
|
|
|
static inline int set_window_helper(
|
|
|
|
struct cpufreq_interactive_tunables *tunables)
|
|
|
|
{
|
|
|
|
return sched_set_window(round_to_nw_start(get_jiffies_64(), tunables),
|
|
|
|
usecs_to_jiffies(tunables->timer_rate));
|
|
|
|
}
|
|
|
|
|
2015-03-27 11:44:21 -07:00
|
|
|
static void cpufreq_interactive_timer_resched(unsigned long cpu,
|
|
|
|
bool slack_only)
|
2012-10-08 20:14:34 -07:00
|
|
|
{
|
2015-04-17 12:48:36 -07:00
|
|
|
struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
|
|
|
|
struct cpufreq_interactive_cpuinfo *pcpu;
|
2013-05-16 14:58:54 +05:30
|
|
|
struct cpufreq_interactive_tunables *tunables =
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->policy->governor_data;
|
2014-04-28 15:11:47 -07:00
|
|
|
u64 expires;
|
2013-01-02 13:14:00 -08:00
|
|
|
unsigned long flags;
|
2015-04-17 12:48:36 -07:00
|
|
|
int i;
|
2012-12-18 17:50:10 -08:00
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
spin_lock_irqsave(&ppol->load_lock, flags);
|
2015-07-20 10:20:08 -07:00
|
|
|
expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
|
2015-03-27 11:44:21 -07:00
|
|
|
if (!slack_only) {
|
2015-04-17 12:48:36 -07:00
|
|
|
for_each_cpu(i, ppol->policy->cpus) {
|
|
|
|
pcpu = &per_cpu(cpuinfo, i);
|
|
|
|
pcpu->time_in_idle = get_cpu_idle_time(i,
|
|
|
|
&pcpu->time_in_idle_timestamp,
|
|
|
|
tunables->io_is_busy);
|
|
|
|
pcpu->cputime_speedadj = 0;
|
|
|
|
pcpu->cputime_speedadj_timestamp =
|
|
|
|
pcpu->time_in_idle_timestamp;
|
|
|
|
}
|
|
|
|
del_timer(&ppol->policy_timer);
|
|
|
|
ppol->policy_timer.expires = expires;
|
|
|
|
add_timer(&ppol->policy_timer);
|
2015-03-27 11:44:21 -07:00
|
|
|
}
|
2013-04-05 13:25:21 -07:00
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
if (tunables->timer_slack_val >= 0 &&
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->target_freq > ppol->policy->min) {
|
2013-05-16 14:58:54 +05:30
|
|
|
expires += usecs_to_jiffies(tunables->timer_slack_val);
|
2015-04-17 12:48:36 -07:00
|
|
|
del_timer(&ppol->policy_slack_timer);
|
|
|
|
ppol->policy_slack_timer.expires = expires;
|
|
|
|
add_timer(&ppol->policy_slack_timer);
|
2013-04-05 13:25:21 -07:00
|
|
|
}
|
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
spin_unlock_irqrestore(&ppol->load_lock, flags);
|
2012-10-08 20:14:34 -07:00
|
|
|
}
|
|
|
|
|
2013-04-26 13:30:51 +08:00
|
|
|
/* The caller shall take enable_sem write semaphore to avoid any timer race.
|
2015-04-17 12:48:36 -07:00
|
|
|
* The policy_timer and policy_slack_timer must be deactivated when calling
|
|
|
|
* this function.
|
2013-04-26 13:30:51 +08:00
|
|
|
*/
|
2013-05-16 14:58:54 +05:30
|
|
|
static void cpufreq_interactive_timer_start(
|
|
|
|
struct cpufreq_interactive_tunables *tunables, int cpu)
|
2013-04-26 13:30:51 +08:00
|
|
|
{
|
2015-04-17 12:48:36 -07:00
|
|
|
struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
|
|
|
|
struct cpufreq_interactive_cpuinfo *pcpu;
|
|
|
|
u64 expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
|
2013-04-26 13:30:51 +08:00
|
|
|
unsigned long flags;
|
2015-04-17 12:48:36 -07:00
|
|
|
int i;
|
2013-04-26 13:30:51 +08:00
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
spin_lock_irqsave(&ppol->load_lock, flags);
|
|
|
|
ppol->policy_timer.expires = expires;
|
|
|
|
add_timer(&ppol->policy_timer);
|
2013-05-16 14:58:54 +05:30
|
|
|
if (tunables->timer_slack_val >= 0 &&
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->target_freq > ppol->policy->min) {
|
2013-05-16 14:58:54 +05:30
|
|
|
expires += usecs_to_jiffies(tunables->timer_slack_val);
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->policy_slack_timer.expires = expires;
|
|
|
|
add_timer(&ppol->policy_slack_timer);
|
2013-04-26 13:30:51 +08:00
|
|
|
}
|
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
for_each_cpu(i, ppol->policy->cpus) {
|
|
|
|
pcpu = &per_cpu(cpuinfo, i);
|
|
|
|
pcpu->time_in_idle =
|
|
|
|
get_cpu_idle_time(i, &pcpu->time_in_idle_timestamp,
|
|
|
|
tunables->io_is_busy);
|
|
|
|
pcpu->cputime_speedadj = 0;
|
|
|
|
pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&ppol->load_lock, flags);
|
2013-04-26 13:30:51 +08:00
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static unsigned int freq_to_above_hispeed_delay(
|
|
|
|
struct cpufreq_interactive_tunables *tunables,
|
|
|
|
unsigned int freq)
|
2013-02-25 23:48:04 +09:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
unsigned int ret;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
|
2013-02-25 23:48:04 +09:00
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
|
|
|
|
freq >= tunables->above_hispeed_delay[i+1]; i += 2)
|
2013-02-25 23:48:04 +09:00
|
|
|
;
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
ret = tunables->above_hispeed_delay[i];
|
|
|
|
spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
|
2013-02-25 23:48:04 +09:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static unsigned int freq_to_targetload(
|
|
|
|
struct cpufreq_interactive_tunables *tunables, unsigned int freq)
|
2012-11-14 11:41:21 -08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
unsigned int ret;
|
2013-01-02 13:14:00 -08:00
|
|
|
unsigned long flags;
|
2012-11-14 11:41:21 -08:00
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
spin_lock_irqsave(&tunables->target_loads_lock, flags);
|
2012-11-14 11:41:21 -08:00
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
for (i = 0; i < tunables->ntarget_loads - 1 &&
|
|
|
|
freq >= tunables->target_loads[i+1]; i += 2)
|
2012-11-14 11:41:21 -08:00
|
|
|
;
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
ret = tunables->target_loads[i];
|
|
|
|
spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
|
2012-11-14 11:41:21 -08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-07-29 18:22:21 -07:00
|
|
|
#define DEFAULT_MAX_LOAD 100
|
|
|
|
u32 get_freq_max_load(int cpu, unsigned int freq)
|
|
|
|
{
|
|
|
|
struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
|
|
|
|
|
|
|
|
if (!cpumask_test_cpu(cpu, &controlled_cpus))
|
|
|
|
return DEFAULT_MAX_LOAD;
|
|
|
|
|
|
|
|
if (have_governor_per_policy()) {
|
|
|
|
if (!ppol || !ppol->cached_tunables)
|
|
|
|
return DEFAULT_MAX_LOAD;
|
|
|
|
return freq_to_targetload(ppol->cached_tunables, freq);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cached_common_tunables)
|
|
|
|
return DEFAULT_MAX_LOAD;
|
|
|
|
return freq_to_targetload(cached_common_tunables, freq);
|
|
|
|
}
|
|
|
|
|
2012-11-14 11:41:21 -08:00
|
|
|
/*
|
|
|
|
* If increasing frequencies never map to a lower target load then
|
|
|
|
* choose_freq() will find the minimum frequency that does not exceed its
|
|
|
|
* target load given the current load.
|
|
|
|
*/
|
2015-04-17 12:48:36 -07:00
|
|
|
static unsigned int choose_freq(struct cpufreq_interactive_policyinfo *pcpu,
|
2013-05-16 14:58:54 +05:30
|
|
|
unsigned int loadadjfreq)
|
2012-11-14 11:41:21 -08:00
|
|
|
{
|
|
|
|
unsigned int freq = pcpu->policy->cur;
|
|
|
|
unsigned int prevfreq, freqmin, freqmax;
|
|
|
|
unsigned int tl;
|
|
|
|
int index;
|
|
|
|
|
|
|
|
freqmin = 0;
|
|
|
|
freqmax = UINT_MAX;
|
|
|
|
|
|
|
|
do {
|
|
|
|
prevfreq = freq;
|
2013-05-16 14:58:54 +05:30
|
|
|
tl = freq_to_targetload(pcpu->policy->governor_data, freq);
|
2012-11-14 11:41:21 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the lowest frequency where the computed load is less
|
|
|
|
* than or equal to the target load.
|
|
|
|
*/
|
|
|
|
|
2013-04-22 16:44:58 -07:00
|
|
|
if (cpufreq_frequency_table_target(
|
2014-08-14 18:29:45 -07:00
|
|
|
&pcpu->p_nolim, pcpu->freq_table, loadadjfreq / tl,
|
2013-04-22 16:44:58 -07:00
|
|
|
CPUFREQ_RELATION_L, &index))
|
|
|
|
break;
|
2012-11-14 11:41:21 -08:00
|
|
|
freq = pcpu->freq_table[index].frequency;
|
|
|
|
|
|
|
|
if (freq > prevfreq) {
|
|
|
|
/* The previous frequency is too low. */
|
|
|
|
freqmin = prevfreq;
|
|
|
|
|
|
|
|
if (freq >= freqmax) {
|
|
|
|
/*
|
|
|
|
* Find the highest frequency that is less
|
|
|
|
* than freqmax.
|
|
|
|
*/
|
2013-04-22 16:44:58 -07:00
|
|
|
if (cpufreq_frequency_table_target(
|
2014-08-14 18:29:45 -07:00
|
|
|
&pcpu->p_nolim, pcpu->freq_table,
|
2013-04-22 16:44:58 -07:00
|
|
|
freqmax - 1, CPUFREQ_RELATION_H,
|
|
|
|
&index))
|
|
|
|
break;
|
2012-11-14 11:41:21 -08:00
|
|
|
freq = pcpu->freq_table[index].frequency;
|
|
|
|
|
|
|
|
if (freq == freqmin) {
|
|
|
|
/*
|
|
|
|
* The first frequency below freqmax
|
|
|
|
* has already been found to be too
|
|
|
|
* low. freqmax is the lowest speed
|
|
|
|
* we found that is fast enough.
|
|
|
|
*/
|
|
|
|
freq = freqmax;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (freq < prevfreq) {
|
|
|
|
/* The previous frequency is high enough. */
|
|
|
|
freqmax = prevfreq;
|
|
|
|
|
|
|
|
if (freq <= freqmin) {
|
|
|
|
/*
|
|
|
|
* Find the lowest frequency that is higher
|
|
|
|
* than freqmin.
|
|
|
|
*/
|
2013-04-22 16:44:58 -07:00
|
|
|
if (cpufreq_frequency_table_target(
|
2014-08-14 18:29:45 -07:00
|
|
|
&pcpu->p_nolim, pcpu->freq_table,
|
2013-04-22 16:44:58 -07:00
|
|
|
freqmin + 1, CPUFREQ_RELATION_L,
|
|
|
|
&index))
|
|
|
|
break;
|
2012-11-14 11:41:21 -08:00
|
|
|
freq = pcpu->freq_table[index].frequency;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If freqmax is the first frequency above
|
|
|
|
* freqmin then we have already found that
|
|
|
|
* this speed is fast enough.
|
|
|
|
*/
|
|
|
|
if (freq == freqmax)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If same frequency chosen as previous then done. */
|
|
|
|
} while (freq != prevfreq);
|
|
|
|
|
|
|
|
return freq;
|
|
|
|
}
|
|
|
|
|
2012-12-11 16:05:03 -08:00
|
|
|
static u64 update_load(int cpu)
|
2010-06-22 11:26:45 -07:00
|
|
|
{
|
2015-04-17 12:48:36 -07:00
|
|
|
struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
|
2012-12-11 16:05:03 -08:00
|
|
|
struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
|
2013-05-16 14:58:54 +05:30
|
|
|
struct cpufreq_interactive_tunables *tunables =
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->policy->governor_data;
|
2012-11-05 13:09:03 -08:00
|
|
|
u64 now;
|
2012-12-11 16:05:03 -08:00
|
|
|
u64 now_idle;
|
2013-06-17 18:36:56 +01:00
|
|
|
u64 delta_idle;
|
|
|
|
u64 delta_time;
|
2012-12-11 16:05:03 -08:00
|
|
|
u64 active_time;
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
|
2013-06-17 18:36:56 +01:00
|
|
|
delta_idle = (now_idle - pcpu->time_in_idle);
|
|
|
|
delta_time = (now - pcpu->time_in_idle_timestamp);
|
2013-04-23 22:32:01 +09:00
|
|
|
|
|
|
|
if (delta_time <= delta_idle)
|
|
|
|
active_time = 0;
|
|
|
|
else
|
|
|
|
active_time = delta_time - delta_idle;
|
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
pcpu->cputime_speedadj += active_time * ppol->policy->cur;
|
2012-12-11 16:05:03 -08:00
|
|
|
|
|
|
|
pcpu->time_in_idle = now_idle;
|
|
|
|
pcpu->time_in_idle_timestamp = now;
|
|
|
|
return now;
|
|
|
|
}
|
|
|
|
|
2015-06-09 17:36:11 -07:00
|
|
|
static unsigned int sl_busy_to_laf(struct cpufreq_interactive_policyinfo *ppol,
|
|
|
|
unsigned long busy)
|
|
|
|
{
|
2016-02-08 17:44:07 +05:30
|
|
|
int prev_load;
|
2015-06-09 17:36:11 -07:00
|
|
|
struct cpufreq_interactive_tunables *tunables =
|
|
|
|
ppol->policy->governor_data;
|
|
|
|
|
2016-02-08 17:44:07 +05:30
|
|
|
prev_load = mult_frac(ppol->policy->cpuinfo.max_freq * 100,
|
|
|
|
busy, tunables->timer_rate);
|
|
|
|
return prev_load;
|
2015-06-09 17:36:11 -07:00
|
|
|
}
|
|
|
|
|
2015-09-14 17:16:17 -07:00
|
|
|
#define NEW_TASK_RATIO 75
|
2015-06-09 17:36:11 -07:00
|
|
|
#define PRED_TOLERANCE_PCT 10
|
2015-09-23 12:00:33 -07:00
|
|
|
static void cpufreq_interactive_timer(unsigned long data)
|
2012-12-11 16:05:03 -08:00
|
|
|
{
|
2015-06-09 17:36:11 -07:00
|
|
|
s64 now;
|
2012-12-11 16:05:03 -08:00
|
|
|
unsigned int delta_time;
|
|
|
|
u64 cputime_speedadj;
|
2010-06-22 11:26:45 -07:00
|
|
|
int cpu_load;
|
2015-06-09 17:36:11 -07:00
|
|
|
int pol_load = 0;
|
2015-04-17 12:48:36 -07:00
|
|
|
struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, data);
|
2013-05-16 14:58:54 +05:30
|
|
|
struct cpufreq_interactive_tunables *tunables =
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->policy->governor_data;
|
2015-06-09 17:36:11 -07:00
|
|
|
struct sched_load *sl = ppol->sl;
|
2015-04-17 12:48:36 -07:00
|
|
|
struct cpufreq_interactive_cpuinfo *pcpu;
|
2010-06-22 11:26:45 -07:00
|
|
|
unsigned int new_freq;
|
2015-06-09 17:36:11 -07:00
|
|
|
unsigned int prev_laf = 0, t_prevlaf;
|
|
|
|
unsigned int pred_laf = 0, t_predlaf = 0;
|
|
|
|
unsigned int prev_chfreq, pred_chfreq, chosen_freq;
|
2010-06-22 11:26:45 -07:00
|
|
|
unsigned int index;
|
|
|
|
unsigned long flags;
|
2015-04-17 12:48:36 -07:00
|
|
|
unsigned long max_cpu;
|
2015-06-09 17:36:11 -07:00
|
|
|
int cpu, i;
|
2015-09-14 17:16:17 -07:00
|
|
|
int new_load_pct = 0;
|
2015-06-09 17:36:11 -07:00
|
|
|
int prev_l, pred_l = 0;
|
2015-04-17 12:48:36 -07:00
|
|
|
struct cpufreq_govinfo govinfo;
|
2015-08-26 17:47:21 -07:00
|
|
|
bool skip_hispeed_logic, skip_min_sample_time;
|
2015-06-09 17:36:11 -07:00
|
|
|
bool jump_to_max_no_ts = false;
|
2015-09-14 17:16:17 -07:00
|
|
|
bool jump_to_max = false;
|
2017-07-26 23:33:28 +05:30
|
|
|
bool start_hyst = true;
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
if (!down_read_trylock(&ppol->enable_sem))
|
2012-12-18 17:50:44 -08:00
|
|
|
return;
|
2015-04-17 12:48:36 -07:00
|
|
|
if (!ppol->governor_enabled)
|
2010-06-22 11:26:45 -07:00
|
|
|
goto exit;
|
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
now = ktime_to_us(ktime_get());
|
2015-09-23 12:00:33 -07:00
|
|
|
|
2015-09-14 17:16:17 -07:00
|
|
|
spin_lock_irqsave(&ppol->target_freq_lock, flags);
|
|
|
|
spin_lock(&ppol->load_lock);
|
2015-09-23 12:00:33 -07:00
|
|
|
|
2016-12-05 17:36:49 -08:00
|
|
|
skip_hispeed_logic =
|
2015-06-09 17:36:11 -07:00
|
|
|
tunables->ignore_hispeed_on_notif && ppol->notif_pending;
|
2015-09-23 12:00:33 -07:00
|
|
|
skip_min_sample_time = tunables->fast_ramp_down && ppol->notif_pending;
|
|
|
|
ppol->notif_pending = false;
|
2015-06-09 17:36:11 -07:00
|
|
|
now = ktime_to_us(ktime_get());
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->last_evaluated_jiffy = get_jiffies_64();
|
|
|
|
|
2015-05-26 17:54:38 -07:00
|
|
|
if (tunables->use_sched_load)
|
2015-06-09 17:36:11 -07:00
|
|
|
sched_get_cpus_busy(sl, ppol->policy->cpus);
|
2015-04-17 12:48:36 -07:00
|
|
|
max_cpu = cpumask_first(ppol->policy->cpus);
|
2015-06-09 17:36:11 -07:00
|
|
|
i = 0;
|
|
|
|
for_each_cpu(cpu, ppol->policy->cpus) {
|
|
|
|
pcpu = &per_cpu(cpuinfo, cpu);
|
2015-04-17 12:48:36 -07:00
|
|
|
if (tunables->use_sched_load) {
|
2015-06-09 17:36:11 -07:00
|
|
|
t_prevlaf = sl_busy_to_laf(ppol, sl[i].prev_load);
|
|
|
|
prev_l = t_prevlaf / ppol->target_freq;
|
|
|
|
if (tunables->enable_prediction) {
|
|
|
|
t_predlaf = sl_busy_to_laf(ppol,
|
|
|
|
sl[i].predicted_load);
|
|
|
|
pred_l = t_predlaf / ppol->target_freq;
|
|
|
|
}
|
|
|
|
if (sl[i].prev_load)
|
|
|
|
new_load_pct = sl[i].new_task_load * 100 /
|
|
|
|
sl[i].prev_load;
|
|
|
|
else
|
|
|
|
new_load_pct = 0;
|
2015-04-17 12:48:36 -07:00
|
|
|
} else {
|
2015-06-09 17:36:11 -07:00
|
|
|
now = update_load(cpu);
|
2015-04-17 12:48:36 -07:00
|
|
|
delta_time = (unsigned int)
|
2014-04-28 16:22:24 -07:00
|
|
|
(now - pcpu->cputime_speedadj_timestamp);
|
2015-04-17 12:48:36 -07:00
|
|
|
if (WARN_ON_ONCE(!delta_time))
|
|
|
|
continue;
|
|
|
|
cputime_speedadj = pcpu->cputime_speedadj;
|
|
|
|
do_div(cputime_speedadj, delta_time);
|
2015-06-09 17:36:11 -07:00
|
|
|
t_prevlaf = (unsigned int)cputime_speedadj * 100;
|
|
|
|
prev_l = t_prevlaf / ppol->target_freq;
|
2015-04-17 12:48:36 -07:00
|
|
|
}
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2015-06-09 17:36:11 -07:00
|
|
|
/* find max of loadadjfreq inside policy */
|
|
|
|
if (t_prevlaf > prev_laf) {
|
|
|
|
prev_laf = t_prevlaf;
|
|
|
|
max_cpu = cpu;
|
2015-04-17 12:48:36 -07:00
|
|
|
}
|
2015-06-09 17:36:11 -07:00
|
|
|
pred_laf = max(t_predlaf, pred_laf);
|
2014-11-14 17:59:42 -08:00
|
|
|
|
2015-06-09 17:36:11 -07:00
|
|
|
cpu_load = max(prev_l, pred_l);
|
|
|
|
pol_load = max(pol_load, cpu_load);
|
|
|
|
trace_cpufreq_interactive_cpuload(cpu, cpu_load, new_load_pct,
|
|
|
|
prev_l, pred_l);
|
|
|
|
|
|
|
|
/* save loadadjfreq for notification */
|
|
|
|
pcpu->loadadjfreq = max(t_prevlaf, t_predlaf);
|
|
|
|
|
|
|
|
/* detect heavy new task and jump to policy->max */
|
|
|
|
if (prev_l >= tunables->go_hispeed_load &&
|
2015-09-14 17:16:17 -07:00
|
|
|
new_load_pct >= NEW_TASK_RATIO) {
|
|
|
|
skip_hispeed_logic = true;
|
|
|
|
jump_to_max = true;
|
|
|
|
}
|
2015-06-09 17:36:11 -07:00
|
|
|
i++;
|
2015-04-17 12:48:36 -07:00
|
|
|
}
|
2015-09-14 17:16:17 -07:00
|
|
|
spin_unlock(&ppol->load_lock);
|
2014-11-14 17:59:42 -08:00
|
|
|
|
2014-12-02 17:20:50 -08:00
|
|
|
tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2015-06-09 17:36:11 -07:00
|
|
|
prev_chfreq = choose_freq(ppol, prev_laf);
|
|
|
|
pred_chfreq = choose_freq(ppol, pred_laf);
|
|
|
|
chosen_freq = max(prev_chfreq, pred_chfreq);
|
|
|
|
|
|
|
|
if (prev_chfreq < ppol->policy->max && pred_chfreq >= ppol->policy->max)
|
|
|
|
if (!jump_to_max)
|
|
|
|
jump_to_max_no_ts = true;
|
|
|
|
|
2015-08-26 17:47:21 -07:00
|
|
|
if (now - ppol->max_freq_hyst_start_time <
|
|
|
|
tunables->max_freq_hysteresis &&
|
2015-06-09 17:36:11 -07:00
|
|
|
pol_load >= tunables->go_hispeed_load &&
|
2015-08-26 17:47:21 -07:00
|
|
|
ppol->target_freq < ppol->policy->max) {
|
|
|
|
skip_hispeed_logic = true;
|
|
|
|
skip_min_sample_time = true;
|
2015-06-09 17:36:11 -07:00
|
|
|
if (!jump_to_max)
|
|
|
|
jump_to_max_no_ts = true;
|
2015-08-26 17:47:21 -07:00
|
|
|
}
|
|
|
|
|
2015-06-09 17:36:11 -07:00
|
|
|
new_freq = chosen_freq;
|
|
|
|
if (jump_to_max_no_ts || jump_to_max) {
|
2014-08-14 18:29:45 -07:00
|
|
|
new_freq = ppol->policy->cpuinfo.max_freq;
|
2015-06-09 17:36:11 -07:00
|
|
|
} else if (!skip_hispeed_logic) {
|
|
|
|
if (pol_load >= tunables->go_hispeed_load ||
|
|
|
|
tunables->boosted) {
|
|
|
|
if (ppol->target_freq < tunables->hispeed_freq)
|
2013-05-16 14:58:54 +05:30
|
|
|
new_freq = tunables->hispeed_freq;
|
2015-06-09 17:36:11 -07:00
|
|
|
else
|
|
|
|
new_freq = max(new_freq,
|
|
|
|
tunables->hispeed_freq);
|
2012-12-19 16:06:48 -08:00
|
|
|
}
|
|
|
|
}
|
2012-11-08 15:06:55 -08:00
|
|
|
|
2015-08-26 17:47:21 -07:00
|
|
|
if (now - ppol->max_freq_hyst_start_time <
|
2017-07-26 23:33:28 +05:30
|
|
|
tunables->max_freq_hysteresis) {
|
|
|
|
if (new_freq < ppol->policy->max &&
|
|
|
|
ppol->policy->max <= tunables->hispeed_freq)
|
|
|
|
start_hyst = false;
|
2015-08-26 17:47:21 -07:00
|
|
|
new_freq = max(tunables->hispeed_freq, new_freq);
|
2017-07-26 23:33:28 +05:30
|
|
|
}
|
2015-08-26 17:47:21 -07:00
|
|
|
|
|
|
|
if (!skip_hispeed_logic &&
|
2015-08-17 16:02:55 -07:00
|
|
|
ppol->target_freq >= tunables->hispeed_freq &&
|
|
|
|
new_freq > ppol->target_freq &&
|
2015-04-17 12:48:36 -07:00
|
|
|
now - ppol->hispeed_validate_time <
|
2015-08-17 16:02:55 -07:00
|
|
|
freq_to_above_hispeed_delay(tunables, ppol->target_freq)) {
|
2012-11-08 15:06:55 -08:00
|
|
|
trace_cpufreq_interactive_notyet(
|
2015-06-09 17:36:11 -07:00
|
|
|
max_cpu, pol_load, ppol->target_freq,
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->policy->cur, new_freq);
|
|
|
|
spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
|
2012-11-08 15:06:55 -08:00
|
|
|
goto rearm;
|
2010-06-22 11:26:45 -07:00
|
|
|
}
|
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->hispeed_validate_time = now;
|
2012-05-10 23:28:06 -07:00
|
|
|
|
2014-08-14 18:29:45 -07:00
|
|
|
if (cpufreq_frequency_table_target(&ppol->p_nolim, ppol->freq_table,
|
2012-11-28 17:58:17 -08:00
|
|
|
new_freq, CPUFREQ_RELATION_L,
|
2014-04-07 18:26:30 -07:00
|
|
|
&index)) {
|
2015-04-17 12:48:36 -07:00
|
|
|
spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
|
2010-06-22 11:26:45 -07:00
|
|
|
goto rearm;
|
2014-04-07 18:26:30 -07:00
|
|
|
}
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
new_freq = ppol->freq_table[index].frequency;
|
2010-06-22 11:26:45 -07:00
|
|
|
|
|
|
|
/*
|
2012-04-26 21:41:40 -07:00
|
|
|
* Do not scale below floor_freq unless we have been at or above the
|
|
|
|
* floor frequency for the minimum sample time since last validated.
|
2010-06-22 11:26:45 -07:00
|
|
|
*/
|
2015-08-26 17:47:21 -07:00
|
|
|
if (!skip_min_sample_time && new_freq < ppol->floor_freq) {
|
2015-04-17 12:48:36 -07:00
|
|
|
if (now - ppol->floor_validate_time <
|
|
|
|
tunables->min_sample_time) {
|
2012-11-28 17:56:09 -08:00
|
|
|
trace_cpufreq_interactive_notyet(
|
2015-06-09 17:36:11 -07:00
|
|
|
max_cpu, pol_load, ppol->target_freq,
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->policy->cur, new_freq);
|
|
|
|
spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
|
2010-06-22 11:26:45 -07:00
|
|
|
goto rearm;
|
2012-02-16 16:27:59 -08:00
|
|
|
}
|
2010-06-22 11:26:45 -07:00
|
|
|
}
|
|
|
|
|
2012-12-14 17:31:19 -08:00
|
|
|
/*
|
|
|
|
* Update the timestamp for checking whether speed has been held at
|
|
|
|
* or above the selected frequency for a minimum of min_sample_time,
|
|
|
|
* if not boosted to hispeed_freq. If boosted to hispeed_freq then we
|
|
|
|
* allow the speed to drop as soon as the boostpulse duration expires
|
2015-08-26 17:47:21 -07:00
|
|
|
* (or the indefinite boost is turned off). If policy->max is restored
|
|
|
|
* for max_freq_hysteresis, don't extend the timestamp. Otherwise, it
|
|
|
|
* could incorrectly extended the duration of max_freq_hysteresis by
|
|
|
|
* min_sample_time.
|
2012-12-14 17:31:19 -08:00
|
|
|
*/
|
|
|
|
|
2015-08-26 17:47:21 -07:00
|
|
|
if ((!tunables->boosted || new_freq > tunables->hispeed_freq)
|
2015-06-09 17:36:11 -07:00
|
|
|
&& !jump_to_max_no_ts) {
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->floor_freq = new_freq;
|
|
|
|
ppol->floor_validate_time = now;
|
2012-12-14 17:31:19 -08:00
|
|
|
}
|
2012-04-06 19:59:36 -07:00
|
|
|
|
2017-07-26 23:33:28 +05:30
|
|
|
if (start_hyst && new_freq >= ppol->policy->max && !jump_to_max_no_ts)
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->max_freq_hyst_start_time = now;
|
|
|
|
|
|
|
|
if (ppol->target_freq == new_freq &&
|
|
|
|
ppol->target_freq <= ppol->policy->cur) {
|
2012-11-28 17:56:09 -08:00
|
|
|
trace_cpufreq_interactive_already(
|
2015-06-09 17:36:11 -07:00
|
|
|
max_cpu, pol_load, ppol->target_freq,
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->policy->cur, new_freq);
|
|
|
|
spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
|
cpufreq: interactive: Rearm governor timer at max freq
Interactive governor doesn't rearm per-cpu timer if target_freq is
equal to policy->max. However, this does not have clear performance
benefits. Profiling doesn't show any difference in benchmarks, games
or other workloads, if timers are always rearmed.
At same time, there are a few issues caused by not rearming timer
at policy->max.
1) min_sample_time enforcement is inconsistent
For target frequency that is lower than policy->max, it will not
drop until min_sample_time has passed since last frequency evaluation
selected current frequency. However, for policy->max, it will
always drop immediately as long as CPU has been run for longer than
min_sample_time. This is because timer is not running and thus
floor_freq and floor_validate_time is not updated.
Example: assume min_sample_time is 59ms and timer_rate is 20ms.
Frequency X < Y. Let's say CPU would pick the following frequencies
before accounting for min_sample_time in each 20ms sampling window.
Y, Y, Y, Y, X, X, X, X, X
If Y is not policy->max, the final target_freq after considering
min_sample_time will be Y, Y, Y, Y, *Y, *Y, X, X, X
* marks the windows where frequency is prevented from dropping.
If Y is policy->max, the final target_freq will be
Y, Y, Y, Y, X, X, X, X, X
2) Rearm timer in IDLE_START does not work as intended
IDLE_START/END is sent in arch_cpu_idle_enter/exit(). However, next
wake up is decided in tick_nohz_idle_enter(), which traverses the
timer list before idle notification is sent out. Therefore, rearming
timer in idle notification won't take effect until CPU wakes up at
least once. In rare scenarios when a CPU goes to idle and sleeps for a
long time immediately after a heavy load stops, it may not wake up
to drop its frequency vote for a long time, defeating the purpose of
having a slack_timer.
3) Need to rearm timer for policy->max change
commit 535a553fc1c4b4c3627c73214ade6326615a7463
(cpufreq: interactive: restructure CPUFREQ_GOV_LIMITS) mentions the
problem of timer getting indefinitely pushed back due to frequency
changes in policy->min/max. However, it still cancels and rearms timer
if policy->max is increased, and same problem could still happen if
policy->max is frequently changing after the fix. The best solution is
to always rearm timer for each CPU even if it's running at
policy->max.
Rearming timers even if target_freq is policy->max solves these
problems cleanly. It also simplifies the design and code of interactive
governor.
Change-Id: I973853d2375ea6f697fa4cee04a89efe6b8bf735
Reviewed-by: Saravana Kannan <skannan@codeaurora.org>
Signed-off-by: Junjie Wu <junjiew@codeaurora.org>
Signed-off-by: Rohit Gupta <rohgup@codeaurora.org>
2015-03-06 18:46:04 -08:00
|
|
|
goto rearm;
|
2012-04-06 19:59:36 -07:00
|
|
|
}
|
|
|
|
|
2015-06-09 17:36:11 -07:00
|
|
|
trace_cpufreq_interactive_target(max_cpu, pol_load, ppol->target_freq,
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->policy->cur, new_freq);
|
2012-02-16 16:27:59 -08:00
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->target_freq = new_freq;
|
|
|
|
spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
|
2012-07-16 17:07:15 -07:00
|
|
|
spin_lock_irqsave(&speedchange_cpumask_lock, flags);
|
2015-04-17 12:48:36 -07:00
|
|
|
cpumask_set_cpu(max_cpu, &speedchange_cpumask);
|
2012-07-16 17:07:15 -07:00
|
|
|
spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
|
2016-01-05 11:09:41 -08:00
|
|
|
wake_up_process_no_notif(speedchange_task);
|
2010-06-22 11:26:45 -07:00
|
|
|
|
|
|
|
rearm:
|
2015-04-17 12:48:36 -07:00
|
|
|
if (!timer_pending(&ppol->policy_timer))
|
2015-03-27 11:44:21 -07:00
|
|
|
cpufreq_interactive_timer_resched(data, false);
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2015-09-14 17:16:17 -07:00
|
|
|
/*
|
|
|
|
* Send govinfo notification.
|
|
|
|
* Govinfo notification could potentially wake up another thread
|
|
|
|
* managed by its clients. Thread wakeups might trigger a load
|
|
|
|
* change callback that executes this function again. Therefore
|
|
|
|
* no spinlock could be held when sending the notification.
|
|
|
|
*/
|
|
|
|
for_each_cpu(i, ppol->policy->cpus) {
|
|
|
|
pcpu = &per_cpu(cpuinfo, i);
|
|
|
|
govinfo.cpu = i;
|
|
|
|
govinfo.load = pcpu->loadadjfreq / ppol->policy->max;
|
|
|
|
govinfo.sampling_rate_us = tunables->timer_rate;
|
|
|
|
atomic_notifier_call_chain(&cpufreq_govinfo_notifier_list,
|
|
|
|
CPUFREQ_LOAD_CHANGE, &govinfo);
|
|
|
|
}
|
|
|
|
|
2010-06-22 11:26:45 -07:00
|
|
|
exit:
|
2015-04-17 12:48:36 -07:00
|
|
|
up_read(&ppol->enable_sem);
|
2010-06-22 11:26:45 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-07-16 17:07:15 -07:00
|
|
|
static int cpufreq_interactive_speedchange_task(void *data)
|
2010-06-22 11:26:45 -07:00
|
|
|
{
|
|
|
|
unsigned int cpu;
|
|
|
|
cpumask_t tmp_mask;
|
|
|
|
unsigned long flags;
|
2015-04-17 12:48:36 -07:00
|
|
|
struct cpufreq_interactive_policyinfo *ppol;
|
2010-06-22 11:26:45 -07:00
|
|
|
|
|
|
|
while (1) {
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
2012-07-16 17:07:15 -07:00
|
|
|
spin_lock_irqsave(&speedchange_cpumask_lock, flags);
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2012-07-16 17:07:15 -07:00
|
|
|
if (cpumask_empty(&speedchange_cpumask)) {
|
|
|
|
spin_unlock_irqrestore(&speedchange_cpumask_lock,
|
|
|
|
flags);
|
2010-06-22 11:26:45 -07:00
|
|
|
schedule();
|
|
|
|
|
|
|
|
if (kthread_should_stop())
|
|
|
|
break;
|
|
|
|
|
2012-07-16 17:07:15 -07:00
|
|
|
spin_lock_irqsave(&speedchange_cpumask_lock, flags);
|
2010-06-22 11:26:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
set_current_state(TASK_RUNNING);
|
2012-07-16 17:07:15 -07:00
|
|
|
tmp_mask = speedchange_cpumask;
|
|
|
|
cpumask_clear(&speedchange_cpumask);
|
|
|
|
spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
|
2010-06-22 11:26:45 -07:00
|
|
|
|
|
|
|
for_each_cpu(cpu, &tmp_mask) {
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol = per_cpu(polinfo, cpu);
|
|
|
|
if (!down_read_trylock(&ppol->enable_sem))
|
2012-12-18 17:50:44 -08:00
|
|
|
continue;
|
2015-04-17 12:48:36 -07:00
|
|
|
if (!ppol->governor_enabled) {
|
|
|
|
up_read(&ppol->enable_sem);
|
2010-06-22 11:26:45 -07:00
|
|
|
continue;
|
2012-12-18 17:50:44 -08:00
|
|
|
}
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
if (ppol->target_freq != ppol->policy->cur)
|
|
|
|
__cpufreq_driver_target(ppol->policy,
|
|
|
|
ppol->target_freq,
|
2010-06-22 11:26:45 -07:00
|
|
|
CPUFREQ_RELATION_H);
|
2012-07-16 17:07:15 -07:00
|
|
|
trace_cpufreq_interactive_setspeed(cpu,
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->target_freq,
|
|
|
|
ppol->policy->cur);
|
|
|
|
up_read(&ppol->enable_sem);
|
2010-06-22 11:26:45 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-12-02 17:20:50 -08:00
|
|
|
static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
|
2012-04-02 17:17:14 -07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int anyboost = 0;
|
2014-04-07 18:26:30 -07:00
|
|
|
unsigned long flags[2];
|
2015-04-17 12:48:36 -07:00
|
|
|
struct cpufreq_interactive_policyinfo *ppol;
|
2014-12-02 17:20:50 -08:00
|
|
|
|
|
|
|
tunables->boosted = true;
|
2012-04-02 17:17:14 -07:00
|
|
|
|
2014-04-07 18:26:30 -07:00
|
|
|
spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
|
2012-04-02 17:17:14 -07:00
|
|
|
|
|
|
|
for_each_online_cpu(i) {
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol = per_cpu(polinfo, i);
|
|
|
|
if (!ppol || tunables != ppol->policy->governor_data)
|
2014-12-02 17:20:50 -08:00
|
|
|
continue;
|
2012-04-02 17:17:14 -07:00
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
spin_lock_irqsave(&ppol->target_freq_lock, flags[1]);
|
|
|
|
if (ppol->target_freq < tunables->hispeed_freq) {
|
|
|
|
ppol->target_freq = tunables->hispeed_freq;
|
2012-07-16 17:07:15 -07:00
|
|
|
cpumask_set_cpu(i, &speedchange_cpumask);
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->hispeed_validate_time =
|
2012-12-07 20:08:45 -08:00
|
|
|
ktime_to_us(ktime_get());
|
2012-04-02 17:17:14 -07:00
|
|
|
anyboost = 1;
|
|
|
|
}
|
2015-04-17 12:48:36 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set floor freq and (re)start timer for when last
|
|
|
|
* validated.
|
|
|
|
*/
|
|
|
|
|
|
|
|
ppol->floor_freq = tunables->hispeed_freq;
|
|
|
|
ppol->floor_validate_time = ktime_to_us(ktime_get());
|
|
|
|
spin_unlock_irqrestore(&ppol->target_freq_lock, flags[1]);
|
|
|
|
break;
|
2012-04-02 17:17:14 -07:00
|
|
|
}
|
|
|
|
|
2014-04-07 18:26:30 -07:00
|
|
|
spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
|
2012-04-02 17:17:14 -07:00
|
|
|
|
|
|
|
if (anyboost)
|
2016-01-05 11:09:41 -08:00
|
|
|
wake_up_process_no_notif(speedchange_task);
|
2012-04-02 17:17:14 -07:00
|
|
|
}
|
|
|
|
|
2014-04-28 16:22:24 -07:00
|
|
|
static int load_change_callback(struct notifier_block *nb, unsigned long val,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
unsigned long cpu = (unsigned long) data;
|
2015-04-17 12:48:36 -07:00
|
|
|
struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
|
2014-04-28 16:22:24 -07:00
|
|
|
struct cpufreq_interactive_tunables *tunables;
|
2015-09-23 12:00:33 -07:00
|
|
|
unsigned long flags;
|
2014-04-28 16:22:24 -07:00
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
if (!ppol || ppol->reject_notification)
|
2014-12-09 13:20:26 -08:00
|
|
|
return 0;
|
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
if (!down_read_trylock(&ppol->enable_sem))
|
2014-09-17 18:51:41 -07:00
|
|
|
return 0;
|
2015-09-18 18:13:01 -07:00
|
|
|
if (!ppol->governor_enabled)
|
|
|
|
goto exit;
|
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
tunables = ppol->policy->governor_data;
|
2015-09-18 18:13:01 -07:00
|
|
|
if (!tunables->use_sched_load || !tunables->use_migration_notif)
|
|
|
|
goto exit;
|
2014-04-28 16:22:24 -07:00
|
|
|
|
2015-09-23 12:00:33 -07:00
|
|
|
spin_lock_irqsave(&ppol->target_freq_lock, flags);
|
|
|
|
ppol->notif_pending = true;
|
2015-09-18 18:13:01 -07:00
|
|
|
ppol->notif_cpu = cpu;
|
2015-09-23 12:00:33 -07:00
|
|
|
spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
|
2015-09-18 18:13:01 -07:00
|
|
|
|
|
|
|
if (!hrtimer_is_queued(&ppol->notif_timer))
|
|
|
|
hrtimer_start(&ppol->notif_timer, ms_to_ktime(1),
|
|
|
|
HRTIMER_MODE_REL);
|
|
|
|
exit:
|
|
|
|
up_read(&ppol->enable_sem);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum hrtimer_restart cpufreq_interactive_hrtimer(struct hrtimer *timer)
|
|
|
|
{
|
|
|
|
struct cpufreq_interactive_policyinfo *ppol = container_of(timer,
|
|
|
|
struct cpufreq_interactive_policyinfo, notif_timer);
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
if (!down_read_trylock(&ppol->enable_sem))
|
|
|
|
return 0;
|
|
|
|
if (!ppol->governor_enabled) {
|
|
|
|
up_read(&ppol->enable_sem);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
cpu = ppol->notif_cpu;
|
|
|
|
trace_cpufreq_interactive_load_change(cpu);
|
2015-04-17 12:48:36 -07:00
|
|
|
del_timer(&ppol->policy_timer);
|
|
|
|
del_timer(&ppol->policy_slack_timer);
|
2015-09-23 12:00:33 -07:00
|
|
|
cpufreq_interactive_timer(cpu);
|
2014-09-17 18:51:41 -07:00
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
up_read(&ppol->enable_sem);
|
2015-09-18 18:13:01 -07:00
|
|
|
return HRTIMER_NORESTART;
|
2014-04-28 16:22:24 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block load_notifier_block = {
|
|
|
|
.notifier_call = load_change_callback,
|
|
|
|
};
|
|
|
|
|
2012-12-11 16:05:03 -08:00
|
|
|
static int cpufreq_interactive_notifier(
|
|
|
|
struct notifier_block *nb, unsigned long val, void *data)
|
|
|
|
{
|
|
|
|
struct cpufreq_freqs *freq = data;
|
2015-04-17 12:48:36 -07:00
|
|
|
struct cpufreq_interactive_policyinfo *ppol;
|
2012-12-11 16:05:03 -08:00
|
|
|
int cpu;
|
2013-01-02 13:14:00 -08:00
|
|
|
unsigned long flags;
|
2012-12-11 16:05:03 -08:00
|
|
|
|
|
|
|
if (val == CPUFREQ_POSTCHANGE) {
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol = per_cpu(polinfo, freq->cpu);
|
|
|
|
if (!ppol)
|
|
|
|
return 0;
|
|
|
|
if (!down_read_trylock(&ppol->enable_sem))
|
2012-12-23 12:28:49 -08:00
|
|
|
return 0;
|
2015-04-17 12:48:36 -07:00
|
|
|
if (!ppol->governor_enabled) {
|
|
|
|
up_read(&ppol->enable_sem);
|
2012-12-23 12:28:49 -08:00
|
|
|
return 0;
|
|
|
|
}
|
2012-12-11 16:05:03 -08:00
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
if (cpumask_first(ppol->policy->cpus) != freq->cpu) {
|
|
|
|
up_read(&ppol->enable_sem);
|
|
|
|
return 0;
|
2012-12-11 16:05:03 -08:00
|
|
|
}
|
2015-04-17 12:48:36 -07:00
|
|
|
spin_lock_irqsave(&ppol->load_lock, flags);
|
|
|
|
for_each_cpu(cpu, ppol->policy->cpus)
|
|
|
|
update_load(cpu);
|
|
|
|
spin_unlock_irqrestore(&ppol->load_lock, flags);
|
2012-12-11 16:05:03 -08:00
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
up_read(&ppol->enable_sem);
|
2012-12-23 12:28:49 -08:00
|
|
|
}
|
2012-12-11 16:05:03 -08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block cpufreq_notifier_block = {
|
|
|
|
.notifier_call = cpufreq_interactive_notifier,
|
|
|
|
};
|
|
|
|
|
2013-02-25 23:48:04 +09:00
|
|
|
static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
|
|
|
|
{
|
|
|
|
const char *cp;
|
|
|
|
int i;
|
|
|
|
int ntokens = 1;
|
|
|
|
unsigned int *tokenized_data;
|
2013-03-20 15:40:46 -07:00
|
|
|
int err = -EINVAL;
|
2013-02-25 23:48:04 +09:00
|
|
|
|
|
|
|
cp = buf;
|
|
|
|
while ((cp = strpbrk(cp + 1, " :")))
|
|
|
|
ntokens++;
|
|
|
|
|
2013-03-20 15:40:46 -07:00
|
|
|
if (!(ntokens & 0x1))
|
2013-02-25 23:48:04 +09:00
|
|
|
goto err;
|
|
|
|
|
|
|
|
tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
|
|
|
|
if (!tokenized_data) {
|
2013-03-20 15:40:46 -07:00
|
|
|
err = -ENOMEM;
|
2013-02-25 23:48:04 +09:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
cp = buf;
|
|
|
|
i = 0;
|
|
|
|
while (i < ntokens) {
|
2013-03-20 15:40:46 -07:00
|
|
|
if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
|
2013-02-25 23:48:04 +09:00
|
|
|
goto err_kfree;
|
|
|
|
|
|
|
|
cp = strpbrk(cp, " :");
|
|
|
|
if (!cp)
|
|
|
|
break;
|
|
|
|
cp++;
|
|
|
|
}
|
|
|
|
|
2013-03-20 15:40:46 -07:00
|
|
|
if (i != ntokens)
|
2013-02-25 23:48:04 +09:00
|
|
|
goto err_kfree;
|
|
|
|
|
|
|
|
*num_tokens = ntokens;
|
|
|
|
return tokenized_data;
|
|
|
|
|
|
|
|
err_kfree:
|
|
|
|
kfree(tokenized_data);
|
|
|
|
err:
|
2013-03-20 15:40:46 -07:00
|
|
|
return ERR_PTR(err);
|
2013-02-25 23:48:04 +09:00
|
|
|
}
|
|
|
|
|
2012-11-14 11:41:21 -08:00
|
|
|
static ssize_t show_target_loads(
|
2013-05-16 14:58:54 +05:30
|
|
|
struct cpufreq_interactive_tunables *tunables,
|
|
|
|
char *buf)
|
2012-11-28 17:58:17 -08:00
|
|
|
{
|
2012-11-14 11:41:21 -08:00
|
|
|
int i;
|
|
|
|
ssize_t ret = 0;
|
2013-01-02 13:14:00 -08:00
|
|
|
unsigned long flags;
|
2012-11-14 11:41:21 -08:00
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
spin_lock_irqsave(&tunables->target_loads_lock, flags);
|
2012-11-14 11:41:21 -08:00
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
for (i = 0; i < tunables->ntarget_loads; i++)
|
|
|
|
ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
|
2012-11-14 11:41:21 -08:00
|
|
|
i & 0x1 ? ":" : " ");
|
|
|
|
|
2013-12-24 17:51:55 +08:00
|
|
|
sprintf(buf + ret - 1, "\n");
|
2013-05-16 14:58:54 +05:30
|
|
|
spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
|
2012-11-14 11:41:21 -08:00
|
|
|
return ret;
|
2012-11-28 17:58:17 -08:00
|
|
|
}
|
|
|
|
|
2012-11-14 11:41:21 -08:00
|
|
|
static ssize_t store_target_loads(
|
2013-05-16 14:58:54 +05:30
|
|
|
struct cpufreq_interactive_tunables *tunables,
|
|
|
|
const char *buf, size_t count)
|
2012-11-28 17:58:17 -08:00
|
|
|
{
|
2013-02-25 23:48:04 +09:00
|
|
|
int ntokens;
|
2012-11-14 11:41:21 -08:00
|
|
|
unsigned int *new_target_loads = NULL;
|
2013-01-02 13:14:00 -08:00
|
|
|
unsigned long flags;
|
2012-11-28 17:58:17 -08:00
|
|
|
|
2013-02-25 23:48:04 +09:00
|
|
|
new_target_loads = get_tokenized_data(buf, &ntokens);
|
|
|
|
if (IS_ERR(new_target_loads))
|
|
|
|
return PTR_RET(new_target_loads);
|
2012-11-14 11:41:21 -08:00
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
spin_lock_irqsave(&tunables->target_loads_lock, flags);
|
|
|
|
if (tunables->target_loads != default_target_loads)
|
|
|
|
kfree(tunables->target_loads);
|
|
|
|
tunables->target_loads = new_target_loads;
|
|
|
|
tunables->ntarget_loads = ntokens;
|
|
|
|
spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
|
2015-07-29 18:22:21 -07:00
|
|
|
|
|
|
|
sched_update_freq_max_load(&controlled_cpus);
|
|
|
|
|
2012-11-28 17:58:17 -08:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2013-02-25 23:48:04 +09:00
|
|
|
static ssize_t show_above_hispeed_delay(
|
2013-05-16 14:58:54 +05:30
|
|
|
struct cpufreq_interactive_tunables *tunables, char *buf)
|
2013-02-25 23:48:04 +09:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
ssize_t ret = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
|
2013-02-25 23:48:04 +09:00
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
for (i = 0; i < tunables->nabove_hispeed_delay; i++)
|
|
|
|
ret += sprintf(buf + ret, "%u%s",
|
|
|
|
tunables->above_hispeed_delay[i],
|
2013-02-25 23:48:04 +09:00
|
|
|
i & 0x1 ? ":" : " ");
|
|
|
|
|
2013-12-24 17:51:55 +08:00
|
|
|
sprintf(buf + ret - 1, "\n");
|
2013-05-16 14:58:54 +05:30
|
|
|
spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
|
2013-02-25 23:48:04 +09:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t store_above_hispeed_delay(
|
2013-05-16 14:58:54 +05:30
|
|
|
struct cpufreq_interactive_tunables *tunables,
|
|
|
|
const char *buf, size_t count)
|
2013-02-25 23:48:04 +09:00
|
|
|
{
|
|
|
|
int ntokens;
|
|
|
|
unsigned int *new_above_hispeed_delay = NULL;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
|
|
|
|
if (IS_ERR(new_above_hispeed_delay))
|
|
|
|
return PTR_RET(new_above_hispeed_delay);
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
|
|
|
|
if (tunables->above_hispeed_delay != default_above_hispeed_delay)
|
|
|
|
kfree(tunables->above_hispeed_delay);
|
|
|
|
tunables->above_hispeed_delay = new_above_hispeed_delay;
|
|
|
|
tunables->nabove_hispeed_delay = ntokens;
|
|
|
|
spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
|
2013-02-25 23:48:04 +09:00
|
|
|
return count;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
|
|
|
|
char *buf)
|
2010-06-22 11:26:45 -07:00
|
|
|
{
|
2013-05-16 14:58:54 +05:30
|
|
|
return sprintf(buf, "%u\n", tunables->hispeed_freq);
|
2010-06-22 11:26:45 -07:00
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
|
|
|
|
const char *buf, size_t count)
|
2010-06-22 11:26:45 -07:00
|
|
|
{
|
|
|
|
int ret;
|
2012-10-03 00:39:56 -07:00
|
|
|
long unsigned int val;
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2015-11-03 20:53:29 +05:30
|
|
|
ret = kstrtoul(buf, 0, &val);
|
2010-06-22 11:26:45 -07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2013-05-16 14:58:54 +05:30
|
|
|
tunables->hispeed_freq = val;
|
2010-06-22 11:26:45 -07:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2014-08-29 14:12:52 -07:00
|
|
|
#define show_store_one(file_name) \
|
|
|
|
static ssize_t show_##file_name( \
|
|
|
|
struct cpufreq_interactive_tunables *tunables, char *buf) \
|
|
|
|
{ \
|
|
|
|
return snprintf(buf, PAGE_SIZE, "%u\n", tunables->file_name); \
|
|
|
|
} \
|
|
|
|
static ssize_t store_##file_name( \
|
|
|
|
struct cpufreq_interactive_tunables *tunables, \
|
|
|
|
const char *buf, size_t count) \
|
|
|
|
{ \
|
|
|
|
int ret; \
|
|
|
|
unsigned long int val; \
|
|
|
|
\
|
|
|
|
ret = kstrtoul(buf, 0, &val); \
|
|
|
|
if (ret < 0) \
|
|
|
|
return ret; \
|
|
|
|
tunables->file_name = val; \
|
|
|
|
return count; \
|
|
|
|
}
|
|
|
|
show_store_one(max_freq_hysteresis);
|
2014-08-29 18:55:45 -07:00
|
|
|
show_store_one(align_windows);
|
2015-08-19 15:45:37 -07:00
|
|
|
show_store_one(ignore_hispeed_on_notif);
|
2015-07-22 17:38:49 -07:00
|
|
|
show_store_one(fast_ramp_down);
|
2015-06-09 17:36:11 -07:00
|
|
|
show_store_one(enable_prediction);
|
2014-08-29 14:12:52 -07:00
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
|
|
|
|
*tunables, char *buf)
|
2010-06-22 11:26:45 -07:00
|
|
|
{
|
2013-05-16 14:58:54 +05:30
|
|
|
return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
|
2010-06-22 11:26:45 -07:00
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
|
|
|
|
*tunables, const char *buf, size_t count)
|
2010-06-22 11:26:45 -07:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned long val;
|
|
|
|
|
2015-11-03 20:53:29 +05:30
|
|
|
ret = kstrtoul(buf, 0, &val);
|
2010-06-22 11:26:45 -07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2013-05-16 14:58:54 +05:30
|
|
|
tunables->go_hispeed_load = val;
|
2010-06-22 11:26:45 -07:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
|
|
|
|
*tunables, char *buf)
|
2010-06-22 11:26:45 -07:00
|
|
|
{
|
2013-05-16 14:58:54 +05:30
|
|
|
return sprintf(buf, "%lu\n", tunables->min_sample_time);
|
2010-06-22 11:26:45 -07:00
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
|
|
|
|
*tunables, const char *buf, size_t count)
|
2010-06-22 11:26:45 -07:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned long val;
|
|
|
|
|
2015-11-03 20:53:29 +05:30
|
|
|
ret = kstrtoul(buf, 0, &val);
|
2010-06-22 11:26:45 -07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2013-05-16 14:58:54 +05:30
|
|
|
tunables->min_sample_time = val;
|
2010-06-22 11:26:45 -07:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
|
|
|
|
char *buf)
|
2010-06-22 11:26:45 -07:00
|
|
|
{
|
2013-05-16 14:58:54 +05:30
|
|
|
return sprintf(buf, "%lu\n", tunables->timer_rate);
|
2010-06-22 11:26:45 -07:00
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
|
|
|
|
const char *buf, size_t count)
|
2010-06-22 11:26:45 -07:00
|
|
|
{
|
|
|
|
int ret;
|
2014-08-15 16:34:37 -07:00
|
|
|
unsigned long val, val_round;
|
2014-04-28 16:22:24 -07:00
|
|
|
struct cpufreq_interactive_tunables *t;
|
|
|
|
int cpu;
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2015-11-03 20:53:29 +05:30
|
|
|
ret = kstrtoul(buf, 0, &val);
|
2010-06-22 11:26:45 -07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2014-08-15 16:34:37 -07:00
|
|
|
|
|
|
|
val_round = jiffies_to_usecs(usecs_to_jiffies(val));
|
|
|
|
if (val != val_round)
|
|
|
|
pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
|
|
|
|
val_round);
|
|
|
|
tunables->timer_rate = val_round;
|
2014-04-28 16:22:24 -07:00
|
|
|
|
|
|
|
if (!tunables->use_sched_load)
|
|
|
|
return count;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
2015-04-17 12:48:36 -07:00
|
|
|
if (!per_cpu(polinfo, cpu))
|
|
|
|
continue;
|
|
|
|
t = per_cpu(polinfo, cpu)->cached_tunables;
|
2014-04-28 16:22:24 -07:00
|
|
|
if (t && t->use_sched_load)
|
|
|
|
t->timer_rate = val_round;
|
|
|
|
}
|
|
|
|
set_window_helper(tunables);
|
|
|
|
|
2010-06-22 11:26:45 -07:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
|
|
|
|
char *buf)
|
2012-12-18 17:50:10 -08:00
|
|
|
{
|
2013-05-16 14:58:54 +05:30
|
|
|
return sprintf(buf, "%d\n", tunables->timer_slack_val);
|
2012-12-18 17:50:10 -08:00
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
|
|
|
|
const char *buf, size_t count)
|
2012-12-18 17:50:10 -08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned long val;
|
|
|
|
|
|
|
|
ret = kstrtol(buf, 10, &val);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
tunables->timer_slack_val = val;
|
2012-12-18 17:50:10 -08:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
|
2012-04-23 20:42:41 -07:00
|
|
|
char *buf)
|
|
|
|
{
|
2013-05-16 14:58:54 +05:30
|
|
|
return sprintf(buf, "%d\n", tunables->boost_val);
|
2012-04-23 20:42:41 -07:00
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
|
2012-04-23 20:42:41 -07:00
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned long val;
|
|
|
|
|
|
|
|
ret = kstrtoul(buf, 0, &val);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
tunables->boost_val = val;
|
2012-04-23 20:42:41 -07:00
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
if (tunables->boost_val) {
|
2012-05-03 00:16:55 -07:00
|
|
|
trace_cpufreq_interactive_boost("on");
|
2014-12-02 17:20:50 -08:00
|
|
|
if (!tunables->boosted)
|
|
|
|
cpufreq_interactive_boost(tunables);
|
2012-05-03 00:16:55 -07:00
|
|
|
} else {
|
2014-04-09 16:47:59 -07:00
|
|
|
tunables->boostpulse_endtime = ktime_to_us(ktime_get());
|
2012-05-03 00:16:55 -07:00
|
|
|
trace_cpufreq_interactive_unboost("off");
|
|
|
|
}
|
2012-04-23 20:42:41 -07:00
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
|
2012-05-03 00:16:55 -07:00
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned long val;
|
|
|
|
|
|
|
|
ret = kstrtoul(buf, 0, &val);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
|
|
|
|
tunables->boostpulse_duration_val;
|
2012-05-03 00:16:55 -07:00
|
|
|
trace_cpufreq_interactive_boost("pulse");
|
2014-12-02 17:20:50 -08:00
|
|
|
if (!tunables->boosted)
|
|
|
|
cpufreq_interactive_boost(tunables);
|
2012-05-03 00:16:55 -07:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
|
|
|
|
*tunables, char *buf)
|
2012-12-14 17:31:19 -08:00
|
|
|
{
|
2013-05-16 14:58:54 +05:30
|
|
|
return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
|
2012-12-14 17:31:19 -08:00
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
|
|
|
|
*tunables, const char *buf, size_t count)
|
2012-12-14 17:31:19 -08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned long val;
|
|
|
|
|
|
|
|
ret = kstrtoul(buf, 0, &val);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
tunables->boostpulse_duration_val = val;
|
2012-12-14 17:31:19 -08:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
|
|
|
|
char *buf)
|
2013-02-22 11:39:18 +08:00
|
|
|
{
|
2013-05-16 14:58:54 +05:30
|
|
|
return sprintf(buf, "%u\n", tunables->io_is_busy);
|
2013-02-22 11:39:18 +08:00
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
|
|
|
|
const char *buf, size_t count)
|
2013-02-22 11:39:18 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned long val;
|
2014-04-28 16:22:24 -07:00
|
|
|
struct cpufreq_interactive_tunables *t;
|
|
|
|
int cpu;
|
2013-02-22 11:39:18 +08:00
|
|
|
|
|
|
|
ret = kstrtoul(buf, 0, &val);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2013-05-16 14:58:54 +05:30
|
|
|
tunables->io_is_busy = val;
|
2014-04-28 16:22:24 -07:00
|
|
|
|
|
|
|
if (!tunables->use_sched_load)
|
|
|
|
return count;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
2015-04-17 12:48:36 -07:00
|
|
|
if (!per_cpu(polinfo, cpu))
|
|
|
|
continue;
|
|
|
|
t = per_cpu(polinfo, cpu)->cached_tunables;
|
2014-04-28 16:22:24 -07:00
|
|
|
if (t && t->use_sched_load)
|
|
|
|
t->io_is_busy = val;
|
|
|
|
}
|
|
|
|
sched_set_io_is_busy(val);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cpufreq_interactive_enable_sched_input(
|
|
|
|
struct cpufreq_interactive_tunables *tunables)
|
|
|
|
{
|
|
|
|
int rc = 0, j;
|
|
|
|
struct cpufreq_interactive_tunables *t;
|
|
|
|
|
|
|
|
mutex_lock(&sched_lock);
|
|
|
|
|
|
|
|
set_window_count++;
|
2014-12-15 16:51:08 -08:00
|
|
|
if (set_window_count > 1) {
|
2014-04-28 16:22:24 -07:00
|
|
|
for_each_possible_cpu(j) {
|
2015-04-17 12:48:36 -07:00
|
|
|
if (!per_cpu(polinfo, j))
|
|
|
|
continue;
|
|
|
|
t = per_cpu(polinfo, j)->cached_tunables;
|
2014-04-28 16:22:24 -07:00
|
|
|
if (t && t->use_sched_load) {
|
|
|
|
tunables->timer_rate = t->timer_rate;
|
|
|
|
tunables->io_is_busy = t->io_is_busy;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2014-12-15 16:51:08 -08:00
|
|
|
} else {
|
|
|
|
rc = set_window_helper(tunables);
|
|
|
|
if (rc) {
|
|
|
|
pr_err("%s: Failed to set sched window\n", __func__);
|
|
|
|
set_window_count--;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
sched_set_io_is_busy(tunables->io_is_busy);
|
2014-04-28 16:22:24 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!tunables->use_migration_notif)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
migration_register_count++;
|
2014-12-15 16:51:08 -08:00
|
|
|
if (migration_register_count > 1)
|
2014-04-28 16:22:24 -07:00
|
|
|
goto out;
|
|
|
|
else
|
|
|
|
atomic_notifier_chain_register(&load_alert_notifier_head,
|
|
|
|
&load_notifier_block);
|
|
|
|
out:
|
|
|
|
mutex_unlock(&sched_lock);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cpufreq_interactive_disable_sched_input(
|
|
|
|
struct cpufreq_interactive_tunables *tunables)
|
|
|
|
{
|
|
|
|
mutex_lock(&sched_lock);
|
|
|
|
|
|
|
|
if (tunables->use_migration_notif) {
|
|
|
|
migration_register_count--;
|
2014-12-15 16:51:08 -08:00
|
|
|
if (migration_register_count < 1)
|
2014-04-28 16:22:24 -07:00
|
|
|
atomic_notifier_chain_unregister(
|
|
|
|
&load_alert_notifier_head,
|
|
|
|
&load_notifier_block);
|
|
|
|
}
|
|
|
|
set_window_count--;
|
|
|
|
|
|
|
|
mutex_unlock(&sched_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t show_use_sched_load(
|
|
|
|
struct cpufreq_interactive_tunables *tunables, char *buf)
|
|
|
|
{
|
|
|
|
return snprintf(buf, PAGE_SIZE, "%d\n", tunables->use_sched_load);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t store_use_sched_load(
|
|
|
|
struct cpufreq_interactive_tunables *tunables,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned long val;
|
|
|
|
|
|
|
|
ret = kstrtoul(buf, 0, &val);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (tunables->use_sched_load == (bool) val)
|
|
|
|
return count;
|
2015-06-30 15:19:39 +05:30
|
|
|
|
|
|
|
tunables->use_sched_load = val;
|
|
|
|
|
2014-04-28 16:22:24 -07:00
|
|
|
if (val)
|
|
|
|
ret = cpufreq_interactive_enable_sched_input(tunables);
|
|
|
|
else
|
|
|
|
ret = cpufreq_interactive_disable_sched_input(tunables);
|
|
|
|
|
2015-06-30 15:19:39 +05:30
|
|
|
if (ret) {
|
|
|
|
tunables->use_sched_load = !val;
|
2014-04-28 16:22:24 -07:00
|
|
|
return ret;
|
2015-06-30 15:19:39 +05:30
|
|
|
}
|
2014-04-28 16:22:24 -07:00
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t show_use_migration_notif(
|
|
|
|
struct cpufreq_interactive_tunables *tunables, char *buf)
|
|
|
|
{
|
|
|
|
return snprintf(buf, PAGE_SIZE, "%d\n",
|
|
|
|
tunables->use_migration_notif);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t store_use_migration_notif(
|
|
|
|
struct cpufreq_interactive_tunables *tunables,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned long val;
|
|
|
|
|
|
|
|
ret = kstrtoul(buf, 0, &val);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (tunables->use_migration_notif == (bool) val)
|
|
|
|
return count;
|
|
|
|
tunables->use_migration_notif = val;
|
|
|
|
|
|
|
|
if (!tunables->use_sched_load)
|
|
|
|
return count;
|
|
|
|
|
|
|
|
mutex_lock(&sched_lock);
|
|
|
|
if (val) {
|
|
|
|
migration_register_count++;
|
|
|
|
if (migration_register_count == 1)
|
|
|
|
atomic_notifier_chain_register(
|
|
|
|
&load_alert_notifier_head,
|
|
|
|
&load_notifier_block);
|
|
|
|
} else {
|
|
|
|
migration_register_count--;
|
|
|
|
if (!migration_register_count)
|
|
|
|
atomic_notifier_chain_unregister(
|
|
|
|
&load_alert_notifier_head,
|
|
|
|
&load_notifier_block);
|
|
|
|
}
|
|
|
|
mutex_unlock(&sched_lock);
|
|
|
|
|
2013-02-22 11:39:18 +08:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
/*
|
|
|
|
* Create show/store routines
|
|
|
|
* - sys: One governor instance for complete SYSTEM
|
|
|
|
* - pol: One governor instance per struct cpufreq_policy
|
|
|
|
*/
|
|
|
|
#define show_gov_pol_sys(file_name) \
|
|
|
|
static ssize_t show_##file_name##_gov_sys \
|
|
|
|
(struct kobject *kobj, struct attribute *attr, char *buf) \
|
|
|
|
{ \
|
|
|
|
return show_##file_name(common_tunables, buf); \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
static ssize_t show_##file_name##_gov_pol \
|
|
|
|
(struct cpufreq_policy *policy, char *buf) \
|
|
|
|
{ \
|
|
|
|
return show_##file_name(policy->governor_data, buf); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define store_gov_pol_sys(file_name) \
|
|
|
|
static ssize_t store_##file_name##_gov_sys \
|
|
|
|
(struct kobject *kobj, struct attribute *attr, const char *buf, \
|
|
|
|
size_t count) \
|
|
|
|
{ \
|
|
|
|
return store_##file_name(common_tunables, buf, count); \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
static ssize_t store_##file_name##_gov_pol \
|
|
|
|
(struct cpufreq_policy *policy, const char *buf, size_t count) \
|
|
|
|
{ \
|
|
|
|
return store_##file_name(policy->governor_data, buf, count); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define show_store_gov_pol_sys(file_name) \
|
|
|
|
show_gov_pol_sys(file_name); \
|
|
|
|
store_gov_pol_sys(file_name)
|
|
|
|
|
|
|
|
show_store_gov_pol_sys(target_loads);
|
|
|
|
show_store_gov_pol_sys(above_hispeed_delay);
|
|
|
|
show_store_gov_pol_sys(hispeed_freq);
|
|
|
|
show_store_gov_pol_sys(go_hispeed_load);
|
|
|
|
show_store_gov_pol_sys(min_sample_time);
|
|
|
|
show_store_gov_pol_sys(timer_rate);
|
|
|
|
show_store_gov_pol_sys(timer_slack);
|
|
|
|
show_store_gov_pol_sys(boost);
|
|
|
|
store_gov_pol_sys(boostpulse);
|
|
|
|
show_store_gov_pol_sys(boostpulse_duration);
|
|
|
|
show_store_gov_pol_sys(io_is_busy);
|
2014-04-28 16:22:24 -07:00
|
|
|
show_store_gov_pol_sys(use_sched_load);
|
|
|
|
show_store_gov_pol_sys(use_migration_notif);
|
2014-08-29 14:12:52 -07:00
|
|
|
show_store_gov_pol_sys(max_freq_hysteresis);
|
2014-08-29 18:55:45 -07:00
|
|
|
show_store_gov_pol_sys(align_windows);
|
2015-08-19 15:45:37 -07:00
|
|
|
show_store_gov_pol_sys(ignore_hispeed_on_notif);
|
2015-07-22 17:38:49 -07:00
|
|
|
show_store_gov_pol_sys(fast_ramp_down);
|
2015-06-09 17:36:11 -07:00
|
|
|
show_store_gov_pol_sys(enable_prediction);
|
2013-05-16 14:58:54 +05:30
|
|
|
|
|
|
|
#define gov_sys_attr_rw(_name) \
|
|
|
|
static struct global_attr _name##_gov_sys = \
|
|
|
|
__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
|
|
|
|
|
|
|
|
#define gov_pol_attr_rw(_name) \
|
|
|
|
static struct freq_attr _name##_gov_pol = \
|
|
|
|
__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
|
|
|
|
|
|
|
|
#define gov_sys_pol_attr_rw(_name) \
|
|
|
|
gov_sys_attr_rw(_name); \
|
|
|
|
gov_pol_attr_rw(_name)
|
|
|
|
|
|
|
|
gov_sys_pol_attr_rw(target_loads);
|
|
|
|
gov_sys_pol_attr_rw(above_hispeed_delay);
|
|
|
|
gov_sys_pol_attr_rw(hispeed_freq);
|
|
|
|
gov_sys_pol_attr_rw(go_hispeed_load);
|
|
|
|
gov_sys_pol_attr_rw(min_sample_time);
|
|
|
|
gov_sys_pol_attr_rw(timer_rate);
|
|
|
|
gov_sys_pol_attr_rw(timer_slack);
|
|
|
|
gov_sys_pol_attr_rw(boost);
|
|
|
|
gov_sys_pol_attr_rw(boostpulse_duration);
|
|
|
|
gov_sys_pol_attr_rw(io_is_busy);
|
2014-04-28 16:22:24 -07:00
|
|
|
gov_sys_pol_attr_rw(use_sched_load);
|
|
|
|
gov_sys_pol_attr_rw(use_migration_notif);
|
2014-08-29 14:12:52 -07:00
|
|
|
gov_sys_pol_attr_rw(max_freq_hysteresis);
|
2014-08-29 18:55:45 -07:00
|
|
|
gov_sys_pol_attr_rw(align_windows);
|
2015-08-19 15:45:37 -07:00
|
|
|
gov_sys_pol_attr_rw(ignore_hispeed_on_notif);
|
2015-07-22 17:38:49 -07:00
|
|
|
gov_sys_pol_attr_rw(fast_ramp_down);
|
2015-06-09 17:36:11 -07:00
|
|
|
gov_sys_pol_attr_rw(enable_prediction);
|
2013-05-16 14:58:54 +05:30
|
|
|
|
|
|
|
static struct global_attr boostpulse_gov_sys =
|
|
|
|
__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
|
|
|
|
|
|
|
|
static struct freq_attr boostpulse_gov_pol =
|
|
|
|
__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
|
|
|
|
|
|
|
|
/* One Governor instance for entire system */
|
|
|
|
static struct attribute *interactive_attributes_gov_sys[] = {
|
|
|
|
&target_loads_gov_sys.attr,
|
|
|
|
&above_hispeed_delay_gov_sys.attr,
|
|
|
|
&hispeed_freq_gov_sys.attr,
|
|
|
|
&go_hispeed_load_gov_sys.attr,
|
|
|
|
&min_sample_time_gov_sys.attr,
|
|
|
|
&timer_rate_gov_sys.attr,
|
|
|
|
&timer_slack_gov_sys.attr,
|
|
|
|
&boost_gov_sys.attr,
|
|
|
|
&boostpulse_gov_sys.attr,
|
|
|
|
&boostpulse_duration_gov_sys.attr,
|
|
|
|
&io_is_busy_gov_sys.attr,
|
2014-04-28 16:22:24 -07:00
|
|
|
&use_sched_load_gov_sys.attr,
|
|
|
|
&use_migration_notif_gov_sys.attr,
|
2014-08-29 14:12:52 -07:00
|
|
|
&max_freq_hysteresis_gov_sys.attr,
|
2014-08-29 18:55:45 -07:00
|
|
|
&align_windows_gov_sys.attr,
|
2015-08-19 15:45:37 -07:00
|
|
|
&ignore_hispeed_on_notif_gov_sys.attr,
|
2015-07-22 17:38:49 -07:00
|
|
|
&fast_ramp_down_gov_sys.attr,
|
2015-06-09 17:36:11 -07:00
|
|
|
&enable_prediction_gov_sys.attr,
|
2010-06-22 11:26:45 -07:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
static struct attribute_group interactive_attr_group_gov_sys = {
|
|
|
|
.attrs = interactive_attributes_gov_sys,
|
2010-06-22 11:26:45 -07:00
|
|
|
.name = "interactive",
|
|
|
|
};
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
/* Per policy governor instance */
|
|
|
|
static struct attribute *interactive_attributes_gov_pol[] = {
|
|
|
|
&target_loads_gov_pol.attr,
|
|
|
|
&above_hispeed_delay_gov_pol.attr,
|
|
|
|
&hispeed_freq_gov_pol.attr,
|
|
|
|
&go_hispeed_load_gov_pol.attr,
|
|
|
|
&min_sample_time_gov_pol.attr,
|
|
|
|
&timer_rate_gov_pol.attr,
|
|
|
|
&timer_slack_gov_pol.attr,
|
|
|
|
&boost_gov_pol.attr,
|
|
|
|
&boostpulse_gov_pol.attr,
|
|
|
|
&boostpulse_duration_gov_pol.attr,
|
|
|
|
&io_is_busy_gov_pol.attr,
|
2014-04-28 16:22:24 -07:00
|
|
|
&use_sched_load_gov_pol.attr,
|
|
|
|
&use_migration_notif_gov_pol.attr,
|
2014-08-29 14:12:52 -07:00
|
|
|
&max_freq_hysteresis_gov_pol.attr,
|
2014-08-29 18:55:45 -07:00
|
|
|
&align_windows_gov_pol.attr,
|
2015-08-19 15:45:37 -07:00
|
|
|
&ignore_hispeed_on_notif_gov_pol.attr,
|
2015-07-22 17:38:49 -07:00
|
|
|
&fast_ramp_down_gov_pol.attr,
|
2015-06-09 17:36:11 -07:00
|
|
|
&enable_prediction_gov_pol.attr,
|
2013-05-16 14:58:54 +05:30
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group interactive_attr_group_gov_pol = {
|
|
|
|
.attrs = interactive_attributes_gov_pol,
|
|
|
|
.name = "interactive",
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group *get_sysfs_attr(void)
|
|
|
|
{
|
|
|
|
if (have_governor_per_policy())
|
|
|
|
return &interactive_attr_group_gov_pol;
|
|
|
|
else
|
|
|
|
return &interactive_attr_group_gov_sys;
|
|
|
|
}
|
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
static void cpufreq_interactive_nop_timer(unsigned long data)
|
2014-08-18 16:35:09 -07:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2014-05-23 12:22:59 -07:00
|
|
|
static struct cpufreq_interactive_tunables *alloc_tunable(
|
|
|
|
struct cpufreq_policy *policy)
|
|
|
|
{
|
|
|
|
struct cpufreq_interactive_tunables *tunables;
|
|
|
|
|
|
|
|
tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
|
2015-04-17 12:48:36 -07:00
|
|
|
if (!tunables)
|
2014-05-23 12:22:59 -07:00
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
tunables->above_hispeed_delay = default_above_hispeed_delay;
|
|
|
|
tunables->nabove_hispeed_delay =
|
|
|
|
ARRAY_SIZE(default_above_hispeed_delay);
|
|
|
|
tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
|
|
|
|
tunables->target_loads = default_target_loads;
|
|
|
|
tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
|
|
|
|
tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
|
|
|
|
tunables->timer_rate = DEFAULT_TIMER_RATE;
|
|
|
|
tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
|
|
|
|
tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
|
|
|
|
|
|
|
|
spin_lock_init(&tunables->target_loads_lock);
|
|
|
|
spin_lock_init(&tunables->above_hispeed_delay_lock);
|
|
|
|
|
|
|
|
return tunables;
|
|
|
|
}
|
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
static struct cpufreq_interactive_policyinfo *get_policyinfo(
|
|
|
|
struct cpufreq_policy *policy)
|
2014-07-22 15:42:51 -07:00
|
|
|
{
|
2015-04-17 12:48:36 -07:00
|
|
|
struct cpufreq_interactive_policyinfo *ppol =
|
|
|
|
per_cpu(polinfo, policy->cpu);
|
|
|
|
int i;
|
2015-09-15 09:35:53 -07:00
|
|
|
struct sched_load *sl;
|
2014-07-22 15:42:51 -07:00
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
/* polinfo already allocated for policy, return */
|
|
|
|
if (ppol)
|
|
|
|
return ppol;
|
|
|
|
|
|
|
|
ppol = kzalloc(sizeof(*ppol), GFP_KERNEL);
|
|
|
|
if (!ppol)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2015-09-15 09:35:53 -07:00
|
|
|
sl = kcalloc(cpumask_weight(policy->related_cpus), sizeof(*sl),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!sl) {
|
2015-05-26 17:54:38 -07:00
|
|
|
kfree(ppol);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
2015-09-15 09:35:53 -07:00
|
|
|
ppol->sl = sl;
|
2015-05-26 17:54:38 -07:00
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
init_timer_deferrable(&ppol->policy_timer);
|
|
|
|
ppol->policy_timer.function = cpufreq_interactive_timer;
|
|
|
|
init_timer(&ppol->policy_slack_timer);
|
|
|
|
ppol->policy_slack_timer.function = cpufreq_interactive_nop_timer;
|
2015-09-18 18:13:01 -07:00
|
|
|
hrtimer_init(&ppol->notif_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
|
|
ppol->notif_timer.function = cpufreq_interactive_hrtimer;
|
2015-04-17 12:48:36 -07:00
|
|
|
spin_lock_init(&ppol->load_lock);
|
|
|
|
spin_lock_init(&ppol->target_freq_lock);
|
|
|
|
init_rwsem(&ppol->enable_sem);
|
|
|
|
|
|
|
|
for_each_cpu(i, policy->related_cpus)
|
|
|
|
per_cpu(polinfo, i) = ppol;
|
|
|
|
return ppol;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This function is not multithread-safe. */
|
|
|
|
static void free_policyinfo(int cpu)
|
|
|
|
{
|
|
|
|
struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
|
|
|
|
int j;
|
|
|
|
|
|
|
|
if (!ppol)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for_each_possible_cpu(j)
|
|
|
|
if (per_cpu(polinfo, j) == ppol)
|
|
|
|
per_cpu(polinfo, cpu) = NULL;
|
|
|
|
kfree(ppol->cached_tunables);
|
2015-09-15 09:35:53 -07:00
|
|
|
kfree(ppol->sl);
|
2015-04-17 12:48:36 -07:00
|
|
|
kfree(ppol);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct cpufreq_interactive_tunables *get_tunables(
|
|
|
|
struct cpufreq_interactive_policyinfo *ppol)
|
|
|
|
{
|
2014-07-22 15:42:51 -07:00
|
|
|
if (have_governor_per_policy())
|
2015-04-17 12:48:36 -07:00
|
|
|
return ppol->cached_tunables;
|
2014-07-22 15:42:51 -07:00
|
|
|
else
|
2015-04-17 12:48:36 -07:00
|
|
|
return cached_common_tunables;
|
2014-05-23 12:22:59 -07:00
|
|
|
}
|
|
|
|
|
2010-06-22 11:26:45 -07:00
|
|
|
static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
|
|
|
|
unsigned int event)
|
|
|
|
{
|
|
|
|
int rc;
|
2015-04-17 12:48:36 -07:00
|
|
|
struct cpufreq_interactive_policyinfo *ppol;
|
2010-06-22 11:26:45 -07:00
|
|
|
struct cpufreq_frequency_table *freq_table;
|
2013-05-16 14:58:54 +05:30
|
|
|
struct cpufreq_interactive_tunables *tunables;
|
|
|
|
|
|
|
|
if (have_governor_per_policy())
|
|
|
|
tunables = policy->governor_data;
|
|
|
|
else
|
|
|
|
tunables = common_tunables;
|
|
|
|
|
2014-08-05 11:24:32 -07:00
|
|
|
BUG_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
|
2010-06-22 11:26:45 -07:00
|
|
|
|
|
|
|
switch (event) {
|
2013-05-16 14:58:54 +05:30
|
|
|
case CPUFREQ_GOV_POLICY_INIT:
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol = get_policyinfo(policy);
|
|
|
|
if (IS_ERR(ppol))
|
|
|
|
return PTR_ERR(ppol);
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
if (have_governor_per_policy()) {
|
|
|
|
WARN_ON(tunables);
|
|
|
|
} else if (tunables) {
|
|
|
|
tunables->usage_count++;
|
2015-07-29 18:22:21 -07:00
|
|
|
cpumask_or(&controlled_cpus, &controlled_cpus,
|
|
|
|
policy->related_cpus);
|
|
|
|
sched_update_freq_max_load(policy->related_cpus);
|
2013-05-16 14:58:54 +05:30
|
|
|
policy->governor_data = tunables;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
tunables = get_tunables(ppol);
|
2013-05-16 14:58:54 +05:30
|
|
|
if (!tunables) {
|
2014-05-23 12:22:59 -07:00
|
|
|
tunables = alloc_tunable(policy);
|
|
|
|
if (IS_ERR(tunables))
|
|
|
|
return PTR_ERR(tunables);
|
2013-05-16 14:58:54 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
tunables->usage_count = 1;
|
2014-01-19 14:32:42 +09:00
|
|
|
policy->governor_data = tunables;
|
2015-11-18 14:49:29 -08:00
|
|
|
if (!have_governor_per_policy())
|
2014-01-19 14:32:42 +09:00
|
|
|
common_tunables = tunables;
|
|
|
|
|
|
|
|
rc = sysfs_create_group(get_governor_parent_kobj(policy),
|
|
|
|
get_sysfs_attr());
|
|
|
|
if (rc) {
|
|
|
|
kfree(tunables);
|
|
|
|
policy->governor_data = NULL;
|
2015-11-18 14:49:29 -08:00
|
|
|
if (!have_governor_per_policy())
|
2014-01-19 14:32:42 +09:00
|
|
|
common_tunables = NULL;
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
if (!policy->governor->initialized)
|
2013-05-16 14:58:54 +05:30
|
|
|
cpufreq_register_notifier(&cpufreq_notifier_block,
|
|
|
|
CPUFREQ_TRANSITION_NOTIFIER);
|
|
|
|
|
2014-04-28 16:22:24 -07:00
|
|
|
if (tunables->use_sched_load)
|
|
|
|
cpufreq_interactive_enable_sched_input(tunables);
|
|
|
|
|
2015-07-29 18:22:21 -07:00
|
|
|
cpumask_or(&controlled_cpus, &controlled_cpus,
|
|
|
|
policy->related_cpus);
|
|
|
|
sched_update_freq_max_load(policy->related_cpus);
|
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
if (have_governor_per_policy())
|
|
|
|
ppol->cached_tunables = tunables;
|
|
|
|
else
|
|
|
|
cached_common_tunables = tunables;
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
break;
|
|
|
|
|
|
|
|
case CPUFREQ_GOV_POLICY_EXIT:
|
2015-07-29 18:22:21 -07:00
|
|
|
cpumask_andnot(&controlled_cpus, &controlled_cpus,
|
|
|
|
policy->related_cpus);
|
|
|
|
sched_update_freq_max_load(cpu_possible_mask);
|
2013-05-16 14:58:54 +05:30
|
|
|
if (!--tunables->usage_count) {
|
2015-04-17 12:48:36 -07:00
|
|
|
if (policy->governor->initialized == 1)
|
2013-05-16 14:58:54 +05:30
|
|
|
cpufreq_unregister_notifier(&cpufreq_notifier_block,
|
|
|
|
CPUFREQ_TRANSITION_NOTIFIER);
|
|
|
|
|
|
|
|
sysfs_remove_group(get_governor_parent_kobj(policy),
|
|
|
|
get_sysfs_attr());
|
2014-12-08 10:08:35 -08:00
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
common_tunables = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
policy->governor_data = NULL;
|
2014-04-28 16:22:24 -07:00
|
|
|
|
|
|
|
if (tunables->use_sched_load)
|
|
|
|
cpufreq_interactive_disable_sched_input(tunables);
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
break;
|
|
|
|
|
2010-06-22 11:26:45 -07:00
|
|
|
case CPUFREQ_GOV_START:
|
2013-01-07 14:15:51 +08:00
|
|
|
mutex_lock(&gov_lock);
|
|
|
|
|
2013-05-16 14:58:54 +05:30
|
|
|
freq_table = cpufreq_frequency_get_table(policy->cpu);
|
|
|
|
if (!tunables->hispeed_freq)
|
|
|
|
tunables->hispeed_freq = policy->max;
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol = per_cpu(polinfo, policy->cpu);
|
|
|
|
ppol->policy = policy;
|
|
|
|
ppol->target_freq = policy->cur;
|
|
|
|
ppol->freq_table = freq_table;
|
2014-08-14 18:29:45 -07:00
|
|
|
ppol->p_nolim = *policy;
|
|
|
|
ppol->p_nolim.min = policy->cpuinfo.min_freq;
|
|
|
|
ppol->p_nolim.max = policy->cpuinfo.max_freq;
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol->floor_freq = ppol->target_freq;
|
|
|
|
ppol->floor_validate_time = ktime_to_us(ktime_get());
|
|
|
|
ppol->hispeed_validate_time = ppol->floor_validate_time;
|
|
|
|
ppol->min_freq = policy->min;
|
|
|
|
ppol->reject_notification = true;
|
2015-09-23 12:00:33 -07:00
|
|
|
ppol->notif_pending = false;
|
2015-04-17 12:48:36 -07:00
|
|
|
down_write(&ppol->enable_sem);
|
|
|
|
del_timer_sync(&ppol->policy_timer);
|
|
|
|
del_timer_sync(&ppol->policy_slack_timer);
|
|
|
|
ppol->policy_timer.data = policy->cpu;
|
|
|
|
ppol->last_evaluated_jiffy = get_jiffies_64();
|
|
|
|
cpufreq_interactive_timer_start(tunables, policy->cpu);
|
|
|
|
ppol->governor_enabled = 1;
|
|
|
|
up_write(&ppol->enable_sem);
|
|
|
|
ppol->reject_notification = false;
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2013-01-07 14:15:51 +08:00
|
|
|
mutex_unlock(&gov_lock);
|
2010-06-22 11:26:45 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
case CPUFREQ_GOV_STOP:
|
2013-01-07 14:15:51 +08:00
|
|
|
mutex_lock(&gov_lock);
|
2015-04-17 12:48:36 -07:00
|
|
|
|
|
|
|
ppol = per_cpu(polinfo, policy->cpu);
|
|
|
|
ppol->reject_notification = true;
|
|
|
|
down_write(&ppol->enable_sem);
|
|
|
|
ppol->governor_enabled = 0;
|
|
|
|
ppol->target_freq = 0;
|
|
|
|
del_timer_sync(&ppol->policy_timer);
|
|
|
|
del_timer_sync(&ppol->policy_slack_timer);
|
|
|
|
up_write(&ppol->enable_sem);
|
|
|
|
ppol->reject_notification = false;
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2013-01-07 14:15:51 +08:00
|
|
|
mutex_unlock(&gov_lock);
|
2010-06-22 11:26:45 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
case CPUFREQ_GOV_LIMITS:
|
2015-04-17 12:48:36 -07:00
|
|
|
ppol = per_cpu(polinfo, policy->cpu);
|
|
|
|
|
2014-08-14 18:29:45 -07:00
|
|
|
__cpufreq_driver_target(policy,
|
|
|
|
ppol->target_freq, CPUFREQ_RELATION_L);
|
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
down_read(&ppol->enable_sem);
|
|
|
|
if (ppol->governor_enabled) {
|
|
|
|
if (policy->min < ppol->min_freq)
|
|
|
|
cpufreq_interactive_timer_resched(policy->cpu,
|
|
|
|
true);
|
|
|
|
ppol->min_freq = policy->min;
|
2013-04-26 13:30:51 +08:00
|
|
|
}
|
2015-04-17 12:48:36 -07:00
|
|
|
up_read(&ppol->enable_sem);
|
|
|
|
|
2010-06-22 11:26:45 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-16 14:58:53 +05:30
|
|
|
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
|
|
|
|
static
|
|
|
|
#endif
|
|
|
|
struct cpufreq_governor cpufreq_gov_interactive = {
|
|
|
|
.name = "interactive",
|
|
|
|
.governor = cpufreq_governor_interactive,
|
|
|
|
.max_transition_latency = 10000000,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2010-06-22 11:26:45 -07:00
|
|
|
static int __init cpufreq_interactive_init(void)
|
|
|
|
{
|
|
|
|
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
|
2017-09-18 00:58:43 +05:30
|
|
|
int ret = 0;
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2012-07-16 17:07:15 -07:00
|
|
|
spin_lock_init(&speedchange_cpumask_lock);
|
2013-01-07 14:15:51 +08:00
|
|
|
mutex_init(&gov_lock);
|
2014-04-28 16:22:24 -07:00
|
|
|
mutex_init(&sched_lock);
|
2012-07-16 17:07:15 -07:00
|
|
|
speedchange_task =
|
|
|
|
kthread_create(cpufreq_interactive_speedchange_task, NULL,
|
|
|
|
"cfinteractive");
|
|
|
|
if (IS_ERR(speedchange_task))
|
|
|
|
return PTR_ERR(speedchange_task);
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2012-07-16 17:07:15 -07:00
|
|
|
sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m);
|
|
|
|
get_task_struct(speedchange_task);
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2012-06-27 12:55:56 -07:00
|
|
|
/* NB: wake up so the thread does not look hung to the freezer */
|
2016-01-05 11:09:41 -08:00
|
|
|
wake_up_process_no_notif(speedchange_task);
|
2010-06-22 11:26:45 -07:00
|
|
|
|
2017-09-18 00:58:43 +05:30
|
|
|
ret = cpufreq_register_governor(&cpufreq_gov_interactive);
|
|
|
|
if (ret) {
|
|
|
|
kthread_stop(speedchange_task);
|
|
|
|
put_task_struct(speedchange_task);
|
|
|
|
}
|
|
|
|
return ret;
|
2010-06-22 11:26:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
|
|
|
|
fs_initcall(cpufreq_interactive_init);
|
|
|
|
#else
|
|
|
|
module_init(cpufreq_interactive_init);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void __exit cpufreq_interactive_exit(void)
|
|
|
|
{
|
2014-05-23 12:22:59 -07:00
|
|
|
int cpu;
|
|
|
|
|
2010-06-22 11:26:45 -07:00
|
|
|
cpufreq_unregister_governor(&cpufreq_gov_interactive);
|
2012-07-16 17:07:15 -07:00
|
|
|
kthread_stop(speedchange_task);
|
|
|
|
put_task_struct(speedchange_task);
|
2014-05-23 12:22:59 -07:00
|
|
|
|
2015-04-17 12:48:36 -07:00
|
|
|
for_each_possible_cpu(cpu)
|
|
|
|
free_policyinfo(cpu);
|
2010-06-22 11:26:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
module_exit(cpufreq_interactive_exit);
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Mike Chan <mike@android.com>");
|
|
|
|
MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
|
|
|
|
"Latency sensitive workloads");
|
|
|
|
MODULE_LICENSE("GPL");
|