cpufreq: governors: Remove code redundancy between governors
With the inclusion of following patches: 9f4eb10 cpufreq: conservative: call dbs_check_cpu only when necessary 772b4b1 cpufreq: ondemand: call dbs_check_cpu only when necessary code redundancy between the conservative and ondemand governors is introduced again, so get rid of it. [rjw: Changelog] Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Tested-by: Fabio Baltieri <fabio.baltieri@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
This commit is contained in:
parent
8eeed09566
commit
4447266b84
4 changed files with 62 additions and 106 deletions
|
@ -111,58 +111,24 @@ static void cs_check_cpu(int cpu, unsigned int load)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cs_timer_update(struct cs_cpu_dbs_info_s *dbs_info, bool sample,
|
|
||||||
struct delayed_work *dw)
|
|
||||||
{
|
|
||||||
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
|
|
||||||
int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
|
|
||||||
|
|
||||||
if (sample)
|
|
||||||
dbs_check_cpu(&cs_dbs_data, cpu);
|
|
||||||
|
|
||||||
schedule_delayed_work_on(smp_processor_id(), dw, delay);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void cs_timer_coordinated(struct cs_cpu_dbs_info_s *dbs_info_local,
|
|
||||||
struct delayed_work *dw)
|
|
||||||
{
|
|
||||||
struct cs_cpu_dbs_info_s *dbs_info;
|
|
||||||
ktime_t time_now;
|
|
||||||
s64 delta_us;
|
|
||||||
bool sample = true;
|
|
||||||
|
|
||||||
/* use leader CPU's dbs_info */
|
|
||||||
dbs_info = &per_cpu(cs_cpu_dbs_info,
|
|
||||||
dbs_info_local->cdbs.cur_policy->cpu);
|
|
||||||
mutex_lock(&dbs_info->cdbs.timer_mutex);
|
|
||||||
|
|
||||||
time_now = ktime_get();
|
|
||||||
delta_us = ktime_us_delta(time_now, dbs_info->cdbs.time_stamp);
|
|
||||||
|
|
||||||
/* Do nothing if we recently have sampled */
|
|
||||||
if (delta_us < (s64)(cs_tuners.sampling_rate / 2))
|
|
||||||
sample = false;
|
|
||||||
else
|
|
||||||
dbs_info->cdbs.time_stamp = time_now;
|
|
||||||
|
|
||||||
cs_timer_update(dbs_info, sample, dw);
|
|
||||||
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void cs_dbs_timer(struct work_struct *work)
|
static void cs_dbs_timer(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct delayed_work *dw = to_delayed_work(work);
|
struct delayed_work *dw = to_delayed_work(work);
|
||||||
struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
|
struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
|
||||||
struct cs_cpu_dbs_info_s, cdbs.work.work);
|
struct cs_cpu_dbs_info_s, cdbs.work.work);
|
||||||
|
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
|
||||||
|
struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
|
||||||
|
cpu);
|
||||||
|
int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
|
||||||
|
|
||||||
if (policy_is_shared(dbs_info->cdbs.cur_policy)) {
|
mutex_lock(&core_dbs_info->cdbs.timer_mutex);
|
||||||
cs_timer_coordinated(dbs_info, dw);
|
if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate))
|
||||||
} else {
|
dbs_check_cpu(&cs_dbs_data, cpu);
|
||||||
mutex_lock(&dbs_info->cdbs.timer_mutex);
|
|
||||||
cs_timer_update(dbs_info, true, dw);
|
schedule_delayed_work_on(smp_processor_id(), dw, delay);
|
||||||
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
||||||
void *data)
|
void *data)
|
||||||
{
|
{
|
||||||
|
|
|
@ -177,6 +177,25 @@ static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu)
|
||||||
cancel_delayed_work_sync(&cdbs->work);
|
cancel_delayed_work_sync(&cdbs->work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Will return if we need to evaluate cpu load again or not */
|
||||||
|
bool need_load_eval(struct cpu_dbs_common_info *cdbs,
|
||||||
|
unsigned int sampling_rate)
|
||||||
|
{
|
||||||
|
if (policy_is_shared(cdbs->cur_policy)) {
|
||||||
|
ktime_t time_now = ktime_get();
|
||||||
|
s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
|
||||||
|
|
||||||
|
/* Do nothing if we recently have sampled */
|
||||||
|
if (delta_us < (s64)(sampling_rate / 2))
|
||||||
|
return false;
|
||||||
|
else
|
||||||
|
cdbs->time_stamp = time_now;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(need_load_eval);
|
||||||
|
|
||||||
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
|
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
|
||||||
struct cpufreq_policy *policy, unsigned int event)
|
struct cpufreq_policy *policy, unsigned int event)
|
||||||
{
|
{
|
||||||
|
|
|
@ -171,6 +171,8 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate)
|
||||||
|
|
||||||
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
|
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
|
||||||
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
|
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
|
||||||
|
bool need_load_eval(struct cpu_dbs_common_info *cdbs,
|
||||||
|
unsigned int sampling_rate);
|
||||||
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
|
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
|
||||||
struct cpufreq_policy *policy, unsigned int event);
|
struct cpufreq_policy *policy, unsigned int event);
|
||||||
#endif /* _CPUFREQ_GOVERNER_H */
|
#endif /* _CPUFREQ_GOVERNER_H */
|
||||||
|
|
|
@ -216,75 +216,44 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void od_timer_update(struct od_cpu_dbs_info_s *dbs_info, bool sample,
|
|
||||||
struct delayed_work *dw)
|
|
||||||
{
|
|
||||||
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
|
|
||||||
int delay, sample_type = dbs_info->sample_type;
|
|
||||||
|
|
||||||
/* Common NORMAL_SAMPLE setup */
|
|
||||||
dbs_info->sample_type = OD_NORMAL_SAMPLE;
|
|
||||||
if (sample_type == OD_SUB_SAMPLE) {
|
|
||||||
delay = dbs_info->freq_lo_jiffies;
|
|
||||||
if (sample)
|
|
||||||
__cpufreq_driver_target(dbs_info->cdbs.cur_policy,
|
|
||||||
dbs_info->freq_lo,
|
|
||||||
CPUFREQ_RELATION_H);
|
|
||||||
} else {
|
|
||||||
if (sample)
|
|
||||||
dbs_check_cpu(&od_dbs_data, cpu);
|
|
||||||
if (dbs_info->freq_lo) {
|
|
||||||
/* Setup timer for SUB_SAMPLE */
|
|
||||||
dbs_info->sample_type = OD_SUB_SAMPLE;
|
|
||||||
delay = dbs_info->freq_hi_jiffies;
|
|
||||||
} else {
|
|
||||||
delay = delay_for_sampling_rate(od_tuners.sampling_rate
|
|
||||||
* dbs_info->rate_mult);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
schedule_delayed_work_on(smp_processor_id(), dw, delay);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void od_timer_coordinated(struct od_cpu_dbs_info_s *dbs_info_local,
|
|
||||||
struct delayed_work *dw)
|
|
||||||
{
|
|
||||||
struct od_cpu_dbs_info_s *dbs_info;
|
|
||||||
ktime_t time_now;
|
|
||||||
s64 delta_us;
|
|
||||||
bool sample = true;
|
|
||||||
|
|
||||||
/* use leader CPU's dbs_info */
|
|
||||||
dbs_info = &per_cpu(od_cpu_dbs_info,
|
|
||||||
dbs_info_local->cdbs.cur_policy->cpu);
|
|
||||||
mutex_lock(&dbs_info->cdbs.timer_mutex);
|
|
||||||
|
|
||||||
time_now = ktime_get();
|
|
||||||
delta_us = ktime_us_delta(time_now, dbs_info->cdbs.time_stamp);
|
|
||||||
|
|
||||||
/* Do nothing if we recently have sampled */
|
|
||||||
if (delta_us < (s64)(od_tuners.sampling_rate / 2))
|
|
||||||
sample = false;
|
|
||||||
else
|
|
||||||
dbs_info->cdbs.time_stamp = time_now;
|
|
||||||
|
|
||||||
od_timer_update(dbs_info, sample, dw);
|
|
||||||
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void od_dbs_timer(struct work_struct *work)
|
static void od_dbs_timer(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct delayed_work *dw = to_delayed_work(work);
|
struct delayed_work *dw = to_delayed_work(work);
|
||||||
struct od_cpu_dbs_info_s *dbs_info =
|
struct od_cpu_dbs_info_s *dbs_info =
|
||||||
container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
|
container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
|
||||||
|
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
|
||||||
|
struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
|
||||||
|
cpu);
|
||||||
|
int delay, sample_type = core_dbs_info->sample_type;
|
||||||
|
bool eval_load;
|
||||||
|
|
||||||
if (policy_is_shared(dbs_info->cdbs.cur_policy)) {
|
mutex_lock(&core_dbs_info->cdbs.timer_mutex);
|
||||||
od_timer_coordinated(dbs_info, dw);
|
eval_load = need_load_eval(&core_dbs_info->cdbs,
|
||||||
|
od_tuners.sampling_rate);
|
||||||
|
|
||||||
|
/* Common NORMAL_SAMPLE setup */
|
||||||
|
core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
|
||||||
|
if (sample_type == OD_SUB_SAMPLE) {
|
||||||
|
delay = core_dbs_info->freq_lo_jiffies;
|
||||||
|
if (eval_load)
|
||||||
|
__cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
|
||||||
|
core_dbs_info->freq_lo,
|
||||||
|
CPUFREQ_RELATION_H);
|
||||||
} else {
|
} else {
|
||||||
mutex_lock(&dbs_info->cdbs.timer_mutex);
|
if (eval_load)
|
||||||
od_timer_update(dbs_info, true, dw);
|
dbs_check_cpu(&od_dbs_data, cpu);
|
||||||
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
if (core_dbs_info->freq_lo) {
|
||||||
|
/* Setup timer for SUB_SAMPLE */
|
||||||
|
core_dbs_info->sample_type = OD_SUB_SAMPLE;
|
||||||
|
delay = core_dbs_info->freq_hi_jiffies;
|
||||||
|
} else {
|
||||||
|
delay = delay_for_sampling_rate(od_tuners.sampling_rate
|
||||||
|
* core_dbs_info->rate_mult);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
schedule_delayed_work_on(smp_processor_id(), dw, delay);
|
||||||
|
mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
/************************** sysfs interface ************************/
|
/************************** sysfs interface ************************/
|
||||||
|
|
Loading…
Add table
Reference in a new issue