cpufreq: deoneplusify and remove PM QOS changes

Change-Id: I5a4fc49a9238d996cfe372a82e8988df3cfe0e30
This commit is contained in:
Pranav Vashi 2018-12-30 04:24:12 +05:30 committed by Daniel Hillenbrand
parent ef6786d280
commit 6fe8b6e686
5 changed files with 5 additions and 376 deletions

View file

@ -42,32 +42,6 @@ struct cpufreq_suspend_t {
};
static DEFINE_PER_CPU(struct cpufreq_suspend_t, suspend_data);
#define LITTLE_CPU_QOS_FREQ 1900800
#define BIG_CPU_QOS_FREQ 2361600
unsigned int cluster1_first_cpu;
static bool qos_cpufreq_flag;
static bool c1_cpufreq_update_flag;
static void c0_cpufreq_limit(struct work_struct *work);
static void c1_cpufreq_limit(struct work_struct *work);
static struct workqueue_struct *qos_cpufreq_work_queue;
static DECLARE_WORK(c0_cpufreq_limit_work, c0_cpufreq_limit);
static DECLARE_WORK(c1_cpufreq_limit_work, c1_cpufreq_limit);
struct qos_request_value {
bool flag;
unsigned int max_cpufreq;
unsigned int min_cpufreq;
};
static struct qos_request_value c0_qos_request_value = {
.flag = false,
.max_cpufreq = INT_MAX,
.min_cpufreq = MIN_CPUFREQ,
};
static struct qos_request_value c1_qos_request_value = {
.flag = false,
.max_cpufreq = INT_MAX,
.min_cpufreq = MIN_CPUFREQ,
};
static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq,
unsigned int index)
@ -103,22 +77,14 @@ static int msm_cpufreq_target(struct cpufreq_policy *policy,
mutex_lock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
if (target_freq == policy->cur) {
if (c1_cpufreq_update_flag)
c1_cpufreq_update_flag = false;
else
goto done;
}
if (target_freq == policy->cur)
goto done;
if (per_cpu(suspend_data, policy->cpu).device_suspended) {
if (likely(qos_cpufreq_flag)) {
qos_cpufreq_flag = false;
} else {
pr_debug("cpufreq: cpu%d scheduling frequency change "
pr_debug("cpufreq: cpu%d scheduling frequency change "
"in suspend.\n", policy->cpu);
ret = -EFAULT;
goto done;
}
ret = -EFAULT;
goto done;
}
table = cpufreq_frequency_get_table(policy->cpu);
@ -128,17 +94,6 @@ static int msm_cpufreq_target(struct cpufreq_policy *policy,
ret = -ENODEV;
goto done;
}
if (cluster1_first_cpu) {
if (policy->cpu >= cluster1_first_cpu) {
target_freq = min(c1_qos_request_value.max_cpufreq, target_freq);
target_freq = max(c1_qos_request_value.min_cpufreq, target_freq);
}else {
target_freq = min(c0_qos_request_value.max_cpufreq, target_freq);
target_freq = max(c0_qos_request_value.min_cpufreq, target_freq);
}
}
if (cpufreq_frequency_table_target(policy, table, target_freq, relation,
&index)) {
pr_err("cpufreq: invalid target_freq: %d\n", target_freq);
@ -488,10 +443,6 @@ static int __init msm_cpufreq_probe(struct platform_device *pdev)
devm_kfree(dev, ftbl);
}
ftbl = per_cpu(freq_table, cpu - 1);
} else {
if(!IS_ERR(ftbl))
cluster1_first_cpu = cpu;
//pr_info("cluster1_first_cpu: %d",cluster1_first_cpu);
}
per_cpu(freq_table, cpu) = ftbl;
}
@ -512,231 +463,6 @@ static struct platform_driver msm_cpufreq_plat_driver = {
},
};
static int get_c0_available_cpufreq(void)
{
unsigned int max_cpufreq_index, min_cpufreq_index;
unsigned int max_index;
unsigned int index_max, index_min;
struct cpufreq_frequency_table *table, *pos;
table = cpufreq_frequency_get_table(0);
if (!table) {
pr_err("cpufreq: Failed to get frequency table for CPU%u\n",0);
return -EINVAL;
}
max_cpufreq_index = (unsigned int)pm_qos_request(PM_QOS_C0_CPUFREQ_MAX);
min_cpufreq_index = (unsigned int)pm_qos_request(PM_QOS_C0_CPUFREQ_MIN);
/* you can limit the min cpufreq*/
if (min_cpufreq_index > max_cpufreq_index)
max_cpufreq_index = min_cpufreq_index;
/*get the available cpufreq*/
/* lock for the max available cpufreq*/
cpufreq_for_each_valid_entry(pos, table) {
max_index = pos - table;
}
if (max_cpufreq_index & MASK_CPUFREQ) {
index_max = MAX_CPUFREQ - max_cpufreq_index;
if (index_max> max_index)
index_max = 0;
index_max = max_index - index_max;
} else {
if (max_cpufreq_index > max_index)
index_max = max_index;
}
if (min_cpufreq_index & MASK_CPUFREQ) {
index_min = MAX_CPUFREQ - min_cpufreq_index;
if (index_min > max_index)
index_min = 0;
index_min = max_index - index_min;
} else {
if (min_cpufreq_index > max_index)
index_min = max_index;
}
c0_qos_request_value.max_cpufreq = table[index_max].frequency;
c0_qos_request_value.min_cpufreq = table[index_min].frequency;
pr_debug("::: m:%d, ii:%d-, mm:%d-",max_index, index_min,index_max);
return 0;
}
static int get_c1_available_cpufreq(void)
{
unsigned int max_cpufreq_index, min_cpufreq_index;
unsigned int max_index;
unsigned int index_max, index_min;
struct cpufreq_frequency_table *table, *pos;
table = cpufreq_frequency_get_table(cluster1_first_cpu);
if (!table) {
pr_err("cpufreq: Failed to get frequency table for CPU%u\n",
cluster1_first_cpu);
return -EINVAL;
}
max_cpufreq_index = (unsigned int)pm_qos_request(PM_QOS_C1_CPUFREQ_MAX);
min_cpufreq_index = (unsigned int)pm_qos_request(PM_QOS_C1_CPUFREQ_MIN);
/* you can limit the min cpufreq*/
if (min_cpufreq_index > max_cpufreq_index)
max_cpufreq_index = min_cpufreq_index;
/*get the available cpufreq*/
/* lock for the max available cpufreq*/
cpufreq_for_each_valid_entry(pos, table) {
max_index = pos - table;
}
/* add limits */
if (max_cpufreq_index & MASK_CPUFREQ) {
index_max = MAX_CPUFREQ - max_cpufreq_index;
if (index_max> max_index)
index_max = 0;
index_max = max_index - index_max;
} else {
if (max_cpufreq_index > max_index)
index_max = max_index;
}
if (min_cpufreq_index & MASK_CPUFREQ) {
index_min = MAX_CPUFREQ - min_cpufreq_index;
if (index_min > max_index)
index_min = 0;
index_min = max_index - index_min;
} else {
if (min_cpufreq_index > max_index)
index_min = max_index;
}
c1_qos_request_value.max_cpufreq = table[index_max].frequency;
c1_qos_request_value.min_cpufreq = table[index_min].frequency;
pr_debug("::: m:%d, ii:%d-, mm:%d-",max_index, index_min,index_max);
return 0;
}
static int c0_cpufreq_qos_handler(struct notifier_block *b, unsigned long val, void *v)
{
struct cpufreq_policy *policy;
int ret = -1;
//get_online_cpus();
policy = cpufreq_cpu_get(0);
if (!policy)
return NOTIFY_BAD;
if (!policy->governor) {
cpufreq_cpu_put(policy);
return NOTIFY_BAD;
}
if (strcmp(policy->governor->name, "interactive")) {
cpufreq_cpu_put(policy);
return NOTIFY_OK;
}
ret = get_c0_available_cpufreq();
if (ret) {
cpufreq_cpu_put(policy);
return NOTIFY_BAD;
}
__cpufreq_driver_target(policy,
c0_qos_request_value.min_cpufreq, CPUFREQ_RELATION_H);
cpufreq_cpu_put(policy);
//put_online_cpus();
return NOTIFY_OK;
}
static struct notifier_block c0_cpufreq_qos_notifier = {
.notifier_call = c0_cpufreq_qos_handler,
};
static int c1_cpufreq_qos_handler(struct notifier_block *b, unsigned long val, void *v)
{
struct cpufreq_policy *policy;
int ret = -1;
pr_info(":::update_policy\n");
/* in use, policy may be NULL, because hotplug can close first cpu core*/
//get_online_cpus();
policy = cpufreq_cpu_get(cluster1_first_cpu);
if (!policy)
return NOTIFY_BAD;
if (!policy->governor) {
cpufreq_cpu_put(policy);
return NOTIFY_BAD;
}
if (strcmp(policy->governor->name, "interactive")) {
cpufreq_cpu_put(policy);
return NOTIFY_OK;
}
ret = get_c1_available_cpufreq();
if (ret) {
cpufreq_cpu_put(policy);
return NOTIFY_BAD;
}
c1_cpufreq_update_flag = true;
__cpufreq_driver_target(policy,
c1_qos_request_value.min_cpufreq, CPUFREQ_RELATION_H);
//__cpufreq_driver_target(policy, val, CPUFREQ_RELATION_H);
cpufreq_cpu_put(policy);
//put_online_cpus();
return NOTIFY_OK;
}
static struct notifier_block c1_cpufreq_qos_notifier = {
.notifier_call = c1_cpufreq_qos_handler,
};
static void c0_cpufreq_limit(struct work_struct *work)
{
struct cpufreq_policy *policy;
policy = cpufreq_cpu_get(0);
if (policy) {
qos_cpufreq_flag = true;
cpufreq_driver_target(policy,
LITTLE_CPU_QOS_FREQ, CPUFREQ_RELATION_H);
cpufreq_cpu_put(policy);
}
sched_set_boost(1);
}
void c0_cpufreq_limit_queue(void)
{
if (qos_cpufreq_work_queue)
queue_work(qos_cpufreq_work_queue, &c0_cpufreq_limit_work);
}
EXPORT_SYMBOL_GPL(c0_cpufreq_limit_queue);
static void c1_cpufreq_limit(struct work_struct *work)
{
struct cpufreq_policy *policy;
policy = cpufreq_cpu_get(cluster1_first_cpu);
if (policy) {
qos_cpufreq_flag = true;
cpufreq_driver_target(policy,
BIG_CPU_QOS_FREQ, CPUFREQ_RELATION_H);
cpufreq_cpu_put(policy);
}
}
void c1_cpufreq_limit_queue(void)
{
if (qos_cpufreq_work_queue)
queue_work(qos_cpufreq_work_queue, &c1_cpufreq_limit_work);
}
EXPORT_SYMBOL_GPL(c1_cpufreq_limit_queue);
static int __init msm_cpufreq_register(void)
{
int cpu, rc;
@ -756,15 +482,6 @@ static int __init msm_cpufreq_register(void)
suspend_mutex));
return rc;
}
/* add cpufreq qos notify */
pm_qos_add_notifier(PM_QOS_C0_CPUFREQ_MAX, &c0_cpufreq_qos_notifier);
pm_qos_add_notifier(PM_QOS_C0_CPUFREQ_MIN, &c0_cpufreq_qos_notifier);
pm_qos_add_notifier(PM_QOS_C1_CPUFREQ_MAX, &c1_cpufreq_qos_notifier);
pm_qos_add_notifier(PM_QOS_C1_CPUFREQ_MIN, &c1_cpufreq_qos_notifier);
qos_cpufreq_work_queue = create_singlethread_workqueue("qos_cpufreq");
if (qos_cpufreq_work_queue == NULL)
pr_info("%s: failed to create work queue", __func__);
register_pm_notifier(&msm_cpufreq_pm_notifier);
return cpufreq_register_driver(&msm_cpufreq_driver);

View file

@ -885,7 +885,6 @@ static void msm_gpio_irq_handler(struct irq_desc *desc)
"soc:fpc_fpc1020", 16) != NULL ||
strnstr(irq_name, "gf_fp", 6) != NULL) {
fp_irq_cnt = true;
c0_cpufreq_limit_queue();
}
set_resume_wakeup_flag(irq_pin);
pr_warn("hwirq %s [irq_num=%d ]triggered\n",

View file

@ -39,14 +39,6 @@
#include <asm/fb.h>
#define LCDSPEEDUP_LITTLE_CPU_QOS_FREQ 1900800
#define LCDSPEEDUP_BIG_CPU_QOS_FREQ 2361600
#define LCD_QOS_TIMEOUT 250000
#define NO_BOOST 0
static struct pm_qos_request lcdspeedup_little_cpu_qos;
static struct pm_qos_request lcdspeedup_big_cpu_qos;
/*
* Frame buffer device initialization and setup routines
*/
@ -1930,78 +1922,3 @@ int fb_new_modelist(struct fb_info *info)
}
MODULE_LICENSE("GPL");
static int fb_state_change(struct notifier_block *nb,
unsigned long val, void *data)
{
struct fb_event *evdata = data;
struct fb_info *info = evdata->info;
unsigned int blank;
if (val != FB_EVENT_BLANK &&
val != FB_EARLY_EVENT_BLANK)
return NOTIFY_OK;
if (info->node)
return NOTIFY_OK;
blank = *(int *)evdata->data;
switch (blank) {
case FB_BLANK_POWERDOWN:
if (val == FB_EARLY_EVENT_BLANK) {
pm_qos_update_request(&lcdspeedup_little_cpu_qos, MIN_CPUFREQ);
pm_qos_update_request(&lcdspeedup_big_cpu_qos, MIN_CPUFREQ);
/* add print actvie ws */
pm_print_active_wakeup_sources_queue(true);
pr_debug("::: LCD start off :::\n");
}
break;
case FB_BLANK_UNBLANK:
if (val == FB_EARLY_EVENT_BLANK) {
struct cpufreq_policy *policy;
/* Speed up LCD on */
/* Fetch little cpu policy and drive the CPU towards target frequency */
pm_qos_update_request_timeout(
&lcdspeedup_little_cpu_qos, MAX_CPUFREQ,
LCD_QOS_TIMEOUT);
/* Fetch big cpu policy and drive big cpu towards target frequency */
policy = cpufreq_cpu_get(cluster1_first_cpu);
if (policy) {
cpufreq_driver_target(policy, LCDSPEEDUP_BIG_CPU_QOS_FREQ, CPUFREQ_RELATION_H);
pm_qos_update_request_timeout(&lcdspeedup_big_cpu_qos, (MAX_CPUFREQ-4), LCD_QOS_TIMEOUT);
} else
return NOTIFY_OK;
cpufreq_cpu_put(policy);
}
if (val == FB_EVENT_BLANK) {
//Wujialong 20160314 enable sched_boost when wakeup and disable sched_boost when screen on
sched_set_boost(NO_BOOST);
/* remove print actvie ws */
pm_print_active_wakeup_sources_queue(false);
pr_debug("::: LCD is on :::\n");
}
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block fb_block = {
.notifier_call = fb_state_change,
.priority = 1,
};
static int __init lcdscreen_speedup_init_pm_qos(void)
{
fb_register_client(&fb_block);
pm_qos_add_request(&lcdspeedup_little_cpu_qos, PM_QOS_C0_CPUFREQ_MIN, MIN_CPUFREQ);
pm_qos_add_request(&lcdspeedup_big_cpu_qos, PM_QOS_C1_CPUFREQ_MIN, MIN_CPUFREQ);
return 0;
}
late_initcall(lcdscreen_speedup_init_pm_qos);

View file

@ -154,10 +154,7 @@ struct cpufreq_policy {
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *policy);
extern unsigned int cluster1_first_cpu;
extern bool fp_irq_cnt;
extern void c0_cpufreq_limit_queue(void);
extern void c1_cpufreq_limit_queue(void);
#else
static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
{

View file

@ -202,7 +202,6 @@ void thaw_fingerprintd(void)
pm_nosig_freezing = false;
if (fp_irq_cnt) {
fp_irq_cnt = false;
c1_cpufreq_limit_queue();
}
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {