soc: qcom: Add a msm_performance module

Sometimes for power saving reasons we might want to keep fewer CPUs
online without adversely affecting performance for certain real world
usecases. This module helps to provide that hotplug support to the
userspace such that it tries to make a best effort in keeping a certain
number of CPUs online as specified by the userspace.
It allows any userspace entity to specify the CPUs that it wants to
manage with this module and of those, the number of CPUs that should be
kept online.

Change-Id: I82c6d6e998d3740ad6f8c67b47344ce87f328b8b
Signed-off-by: Rohit Gupta <rohgup@codeaurora.org>
This commit is contained in:
Rohit Gupta 2014-10-24 15:03:22 -07:00 committed by David Keitel
parent 5c564b7cca
commit c4d4ef627d
8 changed files with 263 additions and 8 deletions

View file

@ -1,5 +1,4 @@
# CONFIG_LOCALVERSION_AUTO is not set # CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y CONFIG_FHANDLE=y
CONFIG_AUDIT=y CONFIG_AUDIT=y
@ -26,7 +25,6 @@ CONFIG_RT_GROUP_SCHED=y
CONFIG_SCHED_HMP=y CONFIG_SCHED_HMP=y
CONFIG_NAMESPACES=y CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set # CONFIG_UTS_NS is not set
# CONFIG_IPC_NS is not set
# CONFIG_NET_NS is not set # CONFIG_NET_NS is not set
CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y

View file

@ -1,5 +1,4 @@
# CONFIG_LOCALVERSION_AUTO is not set # CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y CONFIG_FHANDLE=y
CONFIG_AUDIT=y CONFIG_AUDIT=y
@ -26,7 +25,6 @@ CONFIG_RT_GROUP_SCHED=y
CONFIG_SCHED_HMP=y CONFIG_SCHED_HMP=y
CONFIG_NAMESPACES=y CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set # CONFIG_UTS_NS is not set
# CONFIG_IPC_NS is not set
# CONFIG_NET_NS is not set # CONFIG_NET_NS is not set
CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y

View file

@ -1,5 +1,4 @@
# CONFIG_LOCALVERSION_AUTO is not set # CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y CONFIG_FHANDLE=y
CONFIG_AUDIT=y CONFIG_AUDIT=y
@ -26,7 +25,6 @@ CONFIG_CGROUP_HUGETLB=y
CONFIG_RT_GROUP_SCHED=y CONFIG_RT_GROUP_SCHED=y
CONFIG_NAMESPACES=y CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set # CONFIG_UTS_NS is not set
# CONFIG_IPC_NS is not set
# CONFIG_NET_NS is not set # CONFIG_NET_NS is not set
CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y

View file

@ -1,5 +1,4 @@
# CONFIG_LOCALVERSION_AUTO is not set # CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y CONFIG_FHANDLE=y
CONFIG_AUDIT=y CONFIG_AUDIT=y
@ -26,7 +25,6 @@ CONFIG_CGROUP_HUGETLB=y
CONFIG_RT_GROUP_SCHED=y CONFIG_RT_GROUP_SCHED=y
CONFIG_NAMESPACES=y CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set # CONFIG_UTS_NS is not set
# CONFIG_IPC_NS is not set
# CONFIG_NET_NS is not set # CONFIG_NET_NS is not set
CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y

View file

@ -468,6 +468,13 @@ config MSM_ADSP_LOADER
for the platforms that use APRv2. for the platforms that use APRv2.
Say M if you want to enable this module. Say M if you want to enable this module.
config MSM_PERFORMANCE
tristate "Core control driver to support userspace hotplug requests"
help
This driver is used to provide CPU hotplug support to userspace.
It ensures that no more than a user specified number of CPUs stay
online at any given point in time.
endif # ARCH_QCOM endif # ARCH_QCOM
config MSM_SUBSYSTEM_RESTART config MSM_SUBSYSTEM_RESTART

View file

@ -40,6 +40,8 @@ obj-$(CONFIG_MSM_CORE_CTL_HELPER) += core_ctl_helper.o
obj-$(CONFIG_MSM_PFE_WA) += pfe-wa.o obj-$(CONFIG_MSM_PFE_WA) += pfe-wa.o
obj-$(CONFIG_ARCH_MSM8996) += msm_cpu_voltage.o obj-$(CONFIG_ARCH_MSM8996) += msm_cpu_voltage.o
obj-$(CONFIG_MSM_PERFORMANCE) += msm_performance.o
ifdef CONFIG_MSM_SUBSYSTEM_RESTART ifdef CONFIG_MSM_SUBSYSTEM_RESTART
obj-y += subsystem_notif.o obj-y += subsystem_notif.o
obj-y += subsystem_restart.o obj-y += subsystem_restart.o

View file

@ -0,0 +1,225 @@
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <linux/cpumask.h>
#include <trace/events/power.h>
/* Delay in jiffies for hotplugging to complete */
#define MIN_HOTPLUG_DELAY 3
/* Number of CPUs to maintain online */
static unsigned int max_cpus;
/* List of CPUs managed by this module */
static struct cpumask managed_cpus;
static struct mutex managed_cpus_lock;
/* To keep track of CPUs that the module decides to offline */
static struct cpumask managed_offline_cpus;
/* Work to evaluate the onlining/offlining CPUs */
struct delayed_work try_hotplug_work;
static unsigned int num_online_managed(void);
static int set_max_cpus(const char *buf, const struct kernel_param *kp)
{
unsigned int val;
if (sscanf(buf, "%u\n", &val) != 1)
return -EINVAL;
if (val > cpumask_weight(&managed_cpus))
return -EINVAL;
max_cpus = val;
schedule_delayed_work(&try_hotplug_work, 0);
trace_set_max_cpus(cpumask_bits(&managed_cpus)[0], max_cpus);
return 0;
}
static int get_max_cpus(char *buf, const struct kernel_param *kp)
{
return snprintf(buf, PAGE_SIZE, "%u", max_cpus);
}
static const struct kernel_param_ops param_ops_max_cpus = {
.set = set_max_cpus,
.get = get_max_cpus,
};
device_param_cb(max_cpus, &param_ops_max_cpus, NULL, 0644);
static int set_managed_cpus(const char *buf, const struct kernel_param *kp)
{
int ret;
mutex_lock(&managed_cpus_lock);
ret = cpulist_parse(buf, &managed_cpus);
cpumask_clear(&managed_offline_cpus);
mutex_unlock(&managed_cpus_lock);
return ret;
}
static int get_managed_cpus(char *buf, const struct kernel_param *kp)
{
return cpulist_scnprintf(buf, PAGE_SIZE, &managed_cpus);
}
static const struct kernel_param_ops param_ops_managed_cpus = {
.set = set_managed_cpus,
.get = get_managed_cpus,
};
device_param_cb(managed_cpus, &param_ops_managed_cpus, NULL, 0644);
/* To display all the online managed CPUs */
static int get_managed_online_cpus(char *buf, const struct kernel_param *kp)
{
struct cpumask tmp_mask;
cpumask_clear(&tmp_mask);
mutex_lock(&managed_cpus_lock);
cpumask_complement(&tmp_mask, &managed_offline_cpus);
cpumask_and(&tmp_mask, &managed_cpus, &tmp_mask);
mutex_unlock(&managed_cpus_lock);
return cpulist_scnprintf(buf, PAGE_SIZE, &tmp_mask);
}
static const struct kernel_param_ops param_ops_managed_online_cpus = {
.get = get_managed_online_cpus,
};
device_param_cb(managed_online_cpus, &param_ops_managed_online_cpus,
NULL, 0444);
static unsigned int num_online_managed(void)
{
struct cpumask tmp_mask;
cpumask_clear(&tmp_mask);
cpumask_and(&tmp_mask, &managed_cpus, cpu_online_mask);
return cpumask_weight(&tmp_mask);
}
/*
* try_hotplug tries to online/offline cores based on the current requirement.
* It loops through the currently managed CPUs and tries to online/offline
* them until the max_cpus criteria is met.
*/
static void __ref try_hotplug(struct work_struct *work)
{
unsigned int i;
if (cpumask_empty(&managed_cpus) || (num_online_managed() == max_cpus))
return;
pr_debug("msm_perf: Trying hotplug...%d:%d\n", num_online_managed(),
num_online_cpus());
mutex_lock(&managed_cpus_lock);
if (num_online_managed() > max_cpus) {
for (i = num_present_cpus() - 1; i >= 0; i--) {
if (!cpumask_test_cpu(i, &managed_cpus) ||
!cpu_online(i))
continue;
pr_debug("msm_perf: Offlining CPU%d\n", i);
cpumask_set_cpu(i, &managed_offline_cpus);
if (cpu_down(i)) {
cpumask_clear_cpu(i, &managed_offline_cpus);
pr_debug("msm_perf: Offlining CPU%d failed\n",
i);
continue;
}
if (num_online_managed() <= max_cpus)
break;
}
} else {
for_each_cpu(i, &managed_cpus) {
if (cpu_online(i))
continue;
pr_debug("msm_perf: Onlining CPU%d\n", i);
if (cpu_up(i)) {
pr_debug("msm_perf: Onlining CPU%d failed\n",
i);
continue;
}
cpumask_clear_cpu(i, &managed_offline_cpus);
if (num_online_managed() >= max_cpus)
break;
}
}
mutex_unlock(&managed_cpus_lock);
}
static int __ref msm_performance_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
uint32_t cpu = (uintptr_t)hcpu;
if (!cpumask_test_cpu(cpu, &managed_cpus))
return NOTIFY_OK;
if (action == CPU_UP_PREPARE || action == CPU_UP_PREPARE_FROZEN) {
/*
* Prevent onlining of a managed CPU if max_cpu criteria is
* already satisfied
*/
if (max_cpus <= num_online_managed()) {
pr_debug("msm_perf: Prevent CPU%d onlining\n", cpu);
return NOTIFY_BAD;
}
cpumask_clear_cpu(cpu, &managed_offline_cpus);
} else if (!cpumask_test_cpu(cpu, &managed_offline_cpus) &&
(action == CPU_DEAD)) {
/*
* Schedule a re-evaluation to check if any more CPUs can be
* brought online to meet the max_cpus requirement. This work
* is delayed to account for CPU hotplug latencies
*/
if (schedule_delayed_work(&try_hotplug_work, 0)) {
trace_reevaluate_hotplug(cpumask_bits(&managed_cpus)[0],
max_cpus);
pr_debug("msm_perf: Re-evaluation scheduled %d\n", cpu);
} else {
pr_debug("msm_perf: Work scheduling failed %d\n", cpu);
}
}
return NOTIFY_OK;
}
static struct notifier_block __refdata msm_performance_cpu_notifier = {
.notifier_call = msm_performance_cpu_callback,
};
static int __init msm_performance_init(void)
{
INIT_DELAYED_WORK(&try_hotplug_work, try_hotplug);
mutex_init(&managed_cpus_lock);
cpumask_clear(&managed_offline_cpus);
register_cpu_notifier(&msm_performance_cpu_notifier);
return 0;
}
late_initcall(msm_performance_init);

View file

@ -765,6 +765,35 @@ TRACE_EVENT(memlat_dev_update,
__entry->vote) __entry->vote)
); );
DECLARE_EVENT_CLASS(kpm_module,
TP_PROTO(unsigned int managed_cpus, unsigned int max_cpus),
TP_ARGS(managed_cpus, max_cpus),
TP_STRUCT__entry(
__field(u32, managed_cpus)
__field(u32, max_cpus)
),
TP_fast_assign(
__entry->managed_cpus = managed_cpus;
__entry->max_cpus = max_cpus;
),
TP_printk("managed:%x max_cpus=%u", (unsigned int)__entry->managed_cpus,
(unsigned int)__entry->max_cpus)
);
DEFINE_EVENT(kpm_module, set_max_cpus,
TP_PROTO(unsigned int managed_cpus, unsigned int max_cpus),
TP_ARGS(managed_cpus, max_cpus)
);
DEFINE_EVENT(kpm_module, reevaluate_hotplug,
TP_PROTO(unsigned int managed_cpus, unsigned int max_cpus),
TP_ARGS(managed_cpus, max_cpus)
);
#endif /* _TRACE_POWER_H */ #endif /* _TRACE_POWER_H */
/* This part must be outside protection */ /* This part must be outside protection */