sched/cpufreq_sched: Consolidated update

Contains:

sched/cpufreq_sched: use shorter throttle for raising OPP

Avoid cases where a brief drop in load causes a change to a low OPP
for the full throttle period. Use a shorter throttle period for
raising OPP than for lowering OPP.

sched-freq: Fix handling of max/min frequency

This reverts commit 9726142608f5b3bf5df4280243c9d324e692a510.

Change-Id: Ia78095354f7ad9492f00deb509a2b45112361eda

sched/cpufreq: Increasing throttle_down_nsec to 50ms

Change-Id: I2d8969cf2a64fa719b9dd86f43f9dd14b1ff84fe

sched-freq: make throttle times tunable

Change-Id: I127879645367425b273441d7f0306bb15d5633cb

Signed-off-by: Srinath Sridharan <srinathsr@google.com>
Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
[jstultz: Fwdported to 4.4]
Signed-off-by: John Stultz <john.stultz@linaro.org>
This commit is contained in:
Srinath Sridharan 2016-08-01 11:34:05 +01:00 committed by Amit Pundir
parent f0ba6a5d0c
commit 24884e5434
2 changed files with 160 additions and 17 deletions

View file

@ -114,7 +114,7 @@ config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
config CPU_FREQ_DEFAULT_GOV_SCHED
bool "sched"
select CPU_FREQ_GOV_SCHED
select CPU_FREQ_GOV_INTERACTIVE
help
Use the CPUfreq governor 'sched' as default. This scales
cpu frequency using CPU utilization estimates from the

View file

@ -19,7 +19,8 @@
#include "sched.h"
#define THROTTLE_NSEC 50000000 /* 50ms default */
#define THROTTLE_DOWN_NSEC 50000000 /* 50ms default */
#define THROTTLE_UP_NSEC 500000 /* 500us default */
struct static_key __read_mostly __sched_freq = STATIC_KEY_INIT_FALSE;
static bool __read_mostly cpufreq_driver_slow;
@ -33,8 +34,10 @@ DEFINE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
/**
* gov_data - per-policy data internal to the governor
* @throttle: next throttling period expiry. Derived from throttle_nsec
* @throttle_nsec: throttle period length in nanoseconds
* @up_throttle: next throttling period expiry if increasing OPP
* @down_throttle: next throttling period expiry if decreasing OPP
* @up_throttle_nsec: throttle period length in nanoseconds if increasing OPP
* @down_throttle_nsec: throttle period length in nanoseconds if decreasing OPP
* @task: worker thread for dvfs transition that may block/sleep
* @irq_work: callback used to wake up worker thread
* @requested_freq: last frequency requested by the sched governor
@ -48,11 +51,14 @@ DEFINE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
* call down_write(policy->rwsem).
*/
struct gov_data {
ktime_t throttle;
unsigned int throttle_nsec;
ktime_t up_throttle;
ktime_t down_throttle;
unsigned int up_throttle_nsec;
unsigned int down_throttle_nsec;
struct task_struct *task;
struct irq_work irq_work;
unsigned int requested_freq;
int max;
};
static void cpufreq_sched_try_driver_target(struct cpufreq_policy *policy,
@ -66,25 +72,29 @@ static void cpufreq_sched_try_driver_target(struct cpufreq_policy *policy,
__cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
gd->throttle = ktime_add_ns(ktime_get(), gd->throttle_nsec);
gd->up_throttle = ktime_add_ns(ktime_get(), gd->up_throttle_nsec);
gd->down_throttle = ktime_add_ns(ktime_get(), gd->down_throttle_nsec);
up_write(&policy->rwsem);
}
static bool finish_last_request(struct gov_data *gd)
static bool finish_last_request(struct gov_data *gd, unsigned int cur_freq)
{
ktime_t now = ktime_get();
if (ktime_after(now, gd->throttle))
ktime_t throttle = gd->requested_freq < cur_freq ?
gd->down_throttle : gd->up_throttle;
if (ktime_after(now, throttle))
return false;
while (1) {
int usec_left = ktime_to_ns(ktime_sub(gd->throttle, now));
int usec_left = ktime_to_ns(ktime_sub(throttle, now));
usec_left /= NSEC_PER_USEC;
trace_cpufreq_sched_throttled(usec_left);
usleep_range(usec_left, usec_left + 100);
now = ktime_get();
if (ktime_after(now, gd->throttle))
if (ktime_after(now, throttle))
return true;
}
}
@ -128,7 +138,7 @@ static int cpufreq_sched_thread(void *data)
* if the frequency thread sleeps while waiting to be
* unthrottled, start over to check for a newer request
*/
if (finish_last_request(gd))
if (finish_last_request(gd, policy->cur))
continue;
last_request = new_request;
cpufreq_sched_try_driver_target(policy, new_request);
@ -183,16 +193,21 @@ static void update_fdomain_capacity_request(int cpu)
}
/* Convert the new maximum capacity request into a cpu frequency */
freq_new = capacity * policy->max >> SCHED_CAPACITY_SHIFT;
freq_new = capacity * gd->max >> SCHED_CAPACITY_SHIFT;
if (cpufreq_frequency_table_target(policy, policy->freq_table,
freq_new, CPUFREQ_RELATION_L,
&index_new))
goto out;
freq_new = policy->freq_table[index_new].frequency;
if (freq_new > policy->max)
freq_new = policy->max;
if (freq_new < policy->min)
freq_new = policy->min;
trace_cpufreq_sched_request_opp(cpu, capacity, freq_new,
gd->requested_freq);
if (freq_new == gd->requested_freq)
goto out;
@ -246,10 +261,17 @@ static inline void clear_sched_freq(void)
static_key_slow_dec(&__sched_freq);
}
static struct attribute_group sched_attr_group_gov_pol;
static struct attribute_group *get_sysfs_attr(void)
{
return &sched_attr_group_gov_pol;
}
static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
{
struct gov_data *gd;
int cpu;
int rc;
for_each_cpu(cpu, policy->cpus)
memset(&per_cpu(cpu_sched_capacity_reqs, cpu), 0,
@ -259,11 +281,20 @@ static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
if (!gd)
return -ENOMEM;
gd->throttle_nsec = policy->cpuinfo.transition_latency ?
gd->up_throttle_nsec = policy->cpuinfo.transition_latency ?
policy->cpuinfo.transition_latency :
THROTTLE_NSEC;
THROTTLE_UP_NSEC;
gd->down_throttle_nsec = THROTTLE_DOWN_NSEC;
pr_debug("%s: throttle threshold = %u [ns]\n",
__func__, gd->throttle_nsec);
__func__, gd->up_throttle_nsec);
gd->max = policy->max;
rc = sysfs_create_group(get_governor_parent_kobj(policy), get_sysfs_attr());
if (rc) {
pr_err("%s: couldn't create sysfs attributes: %d\n", __func__, rc);
goto err;
}
if (cpufreq_driver_is_slow()) {
cpufreq_driver_slow = true;
@ -301,6 +332,8 @@ static int cpufreq_sched_policy_exit(struct cpufreq_policy *policy)
put_task_struct(gd->task);
}
sysfs_remove_group(get_governor_parent_kobj(policy), get_sysfs_attr());
policy->governor_data = NULL;
kfree(gd);
@ -317,6 +350,32 @@ static int cpufreq_sched_start(struct cpufreq_policy *policy)
return 0;
}
static void cpufreq_sched_limits(struct cpufreq_policy *policy)
{
struct gov_data *gd;
pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
policy->cpu, policy->min, policy->max,
policy->cur);
if (!down_write_trylock(&policy->rwsem))
return;
/*
* Need to keep track of highest max frequency for
* capacity calculations
*/
gd = policy->governor_data;
if (gd->max < policy->max)
gd->max = policy->max;
if (policy->max < policy->cur)
__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
else if (policy->min > policy->cur)
__cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
up_write(&policy->rwsem);
}
static int cpufreq_sched_stop(struct cpufreq_policy *policy)
{
int cpu;
@ -340,11 +399,95 @@ static int cpufreq_sched_setup(struct cpufreq_policy *policy,
case CPUFREQ_GOV_STOP:
return cpufreq_sched_stop(policy);
case CPUFREQ_GOV_LIMITS:
cpufreq_sched_limits(policy);
break;
}
return 0;
}
/* Tunables */
static ssize_t show_up_throttle_nsec(struct gov_data *gd, char *buf)
{
return sprintf(buf, "%u\n", gd->up_throttle_nsec);
}
static ssize_t store_up_throttle_nsec(struct gov_data *gd,
const char *buf, size_t count)
{
int ret;
long unsigned int val;
ret = kstrtoul(buf, 0, &val);
if (ret < 0)
return ret;
gd->up_throttle_nsec = val;
return count;
}
static ssize_t show_down_throttle_nsec(struct gov_data *gd, char *buf)
{
return sprintf(buf, "%u\n", gd->down_throttle_nsec);
}
static ssize_t store_down_throttle_nsec(struct gov_data *gd,
const char *buf, size_t count)
{
int ret;
long unsigned int val;
ret = kstrtoul(buf, 0, &val);
if (ret < 0)
return ret;
gd->down_throttle_nsec = val;
return count;
}
/*
* Create show/store routines
* - sys: One governor instance for complete SYSTEM
* - pol: One governor instance per struct cpufreq_policy
*/
#define show_gov_pol_sys(file_name) \
static ssize_t show_##file_name##_gov_pol \
(struct cpufreq_policy *policy, char *buf) \
{ \
return show_##file_name(policy->governor_data, buf); \
}
#define store_gov_pol_sys(file_name) \
static ssize_t store_##file_name##_gov_pol \
(struct cpufreq_policy *policy, const char *buf, size_t count) \
{ \
return store_##file_name(policy->governor_data, buf, count); \
}
#define gov_pol_attr_rw(_name) \
static struct freq_attr _name##_gov_pol = \
__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
#define show_store_gov_pol_sys(file_name) \
show_gov_pol_sys(file_name); \
store_gov_pol_sys(file_name)
#define tunable_handlers(file_name) \
show_gov_pol_sys(file_name); \
store_gov_pol_sys(file_name); \
gov_pol_attr_rw(file_name)
tunable_handlers(down_throttle_nsec);
tunable_handlers(up_throttle_nsec);
/* Per policy governor instance */
static struct attribute *sched_attributes_gov_pol[] = {
&up_throttle_nsec_gov_pol.attr,
&down_throttle_nsec_gov_pol.attr,
NULL,
};
static struct attribute_group sched_attr_group_gov_pol = {
.attrs = sched_attributes_gov_pol,
.name = "sched",
};
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED
static
#endif