soc: qcom: msm_perf: Add sysfs node to report cpu hotplug events
cpufreq governor tunables could be updated only when cpu is online. Userspace might need to apply and restore these tunables depending on the power/perf need. Provide sysfs node for userspace to poll over and which gets notified on CPU_ONLINE and CPU_DEAD events for any cpu. This mechanism could be utilized to know when cpus come online and then restoring or applying cpufreq governor tunables. CPU_DEAD notification is also provided for completeness. Change-Id: I221a97c49a72da217e5e4b46a16a26042ef5c83f Signed-off-by: Omprakash Dhyade <odhyade@codeaurora.org>
This commit is contained in:
parent
c022987819
commit
025f0650f0
1 changed files with 115 additions and 0 deletions
|
@ -81,6 +81,7 @@ static DEFINE_PER_CPU(struct cpu_status, cpu_stats);
|
|||
static unsigned int num_online_managed(struct cpumask *mask);
|
||||
static int init_cluster_control(void);
|
||||
static int rm_high_pwr_cost_cpus(struct cluster *cl);
|
||||
static int init_events_group(void);
|
||||
|
||||
static DEFINE_PER_CPU(unsigned int, cpu_power_cost);
|
||||
|
||||
|
@ -93,6 +94,15 @@ struct load_stats {
|
|||
unsigned int cpu_load;
|
||||
};
|
||||
static DEFINE_PER_CPU(struct load_stats, cpu_load_stats);
|
||||
|
||||
struct events {
|
||||
spinlock_t cpu_hotplug_lock;
|
||||
bool cpu_hotplug;
|
||||
bool init_success;
|
||||
};
|
||||
static struct events events_group;
|
||||
static struct task_struct *events_notify_thread;
|
||||
|
||||
#define LAST_UPDATE_TOL USEC_PER_MSEC
|
||||
|
||||
/* Bitmask to keep track of the workloads being detected */
|
||||
|
@ -1082,6 +1092,25 @@ static struct attribute_group attr_group = {
|
|||
.attrs = attrs,
|
||||
};
|
||||
|
||||
/* CPU Hotplug */
|
||||
static struct kobject *events_kobj;
|
||||
|
||||
static ssize_t show_cpu_hotplug(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "\n");
|
||||
}
|
||||
static struct kobj_attribute cpu_hotplug_attr =
|
||||
__ATTR(cpu_hotplug, 0444, show_cpu_hotplug, NULL);
|
||||
|
||||
static struct attribute *events_attrs[] = {
|
||||
&cpu_hotplug_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group events_attr_group = {
|
||||
.attrs = events_attrs,
|
||||
};
|
||||
/*******************************sysfs ends************************************/
|
||||
|
||||
static unsigned int num_online_managed(struct cpumask *mask)
|
||||
|
@ -1185,6 +1214,54 @@ static int notify_userspace(void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void hotplug_notify(int action)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!events_group.init_success)
|
||||
return;
|
||||
|
||||
if ((action == CPU_ONLINE) || (action == CPU_DEAD)) {
|
||||
spin_lock_irqsave(&(events_group.cpu_hotplug_lock), flags);
|
||||
events_group.cpu_hotplug = true;
|
||||
spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock), flags);
|
||||
wake_up_process(events_notify_thread);
|
||||
}
|
||||
}
|
||||
|
||||
static int events_notify_userspace(void *data)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool notify_change;
|
||||
|
||||
while (1) {
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
spin_lock_irqsave(&(events_group.cpu_hotplug_lock), flags);
|
||||
|
||||
if (!events_group.cpu_hotplug) {
|
||||
spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock),
|
||||
flags);
|
||||
|
||||
schedule();
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
spin_lock_irqsave(&(events_group.cpu_hotplug_lock),
|
||||
flags);
|
||||
}
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
notify_change = events_group.cpu_hotplug;
|
||||
events_group.cpu_hotplug = false;
|
||||
spin_unlock_irqrestore(&(events_group.cpu_hotplug_lock), flags);
|
||||
|
||||
if (notify_change)
|
||||
sysfs_notify(events_kobj, NULL, "cpu_hotplug");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void check_cluster_iowait(struct cluster *cl, u64 now)
|
||||
{
|
||||
struct load_stats *pcpu_st;
|
||||
|
@ -1635,6 +1712,8 @@ static int __ref msm_performance_cpu_callback(struct notifier_block *nfb,
|
|||
unsigned int i;
|
||||
struct cluster *i_cl = NULL;
|
||||
|
||||
hotplug_notify(action);
|
||||
|
||||
if (!clusters_inited)
|
||||
return NOTIFY_OK;
|
||||
|
||||
|
@ -1783,6 +1862,40 @@ static int init_cluster_control(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int init_events_group(void)
|
||||
{
|
||||
int ret;
|
||||
struct kobject *module_kobj;
|
||||
|
||||
module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
|
||||
if (!module_kobj) {
|
||||
pr_err("msm_perf: Couldn't find module kobject\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
events_kobj = kobject_create_and_add("events", module_kobj);
|
||||
if (!events_kobj) {
|
||||
pr_err("msm_perf: Failed to add events_kobj\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = sysfs_create_group(events_kobj, &events_attr_group);
|
||||
if (ret) {
|
||||
pr_err("msm_perf: Failed to create sysfs\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock_init(&(events_group.cpu_hotplug_lock));
|
||||
events_notify_thread = kthread_run(events_notify_userspace,
|
||||
NULL, "msm_perf:events_notify");
|
||||
if (IS_ERR(events_notify_thread))
|
||||
return PTR_ERR(events_notify_thread);
|
||||
|
||||
events_group.init_success = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init msm_performance_init(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
@ -1795,6 +1908,8 @@ static int __init msm_performance_init(void)
|
|||
|
||||
register_cpu_notifier(&msm_performance_cpu_notifier);
|
||||
|
||||
init_events_group();
|
||||
|
||||
return 0;
|
||||
}
|
||||
late_initcall(msm_performance_init);
|
||||
|
|
Loading…
Add table
Reference in a new issue