soc: qcom: msm_perf: Fix a race condition in hotplug callback

msm_performance relies on userspace to initialize cluster related
variables such as num_clusters, managed_cpus etc. Once num_clusters
is set the cluster related data structures are initialized and
userspace is allowed to set them.
However there could be a race where between num_clusters being set
and the cluster data structures being allocated, there is a hotplug
activity which would invoke the hotplug callback which in turn tries
to access managed_cpus. managed_cpus might not have been allocated by
that time resulting in a NULL pointer access in the callback causing
kernel panic.

Change-Id: Ia40af624322a89e0c0f9598bf7eea059e6969ebe
Signed-off-by: Rohit Gupta <rohgup@codeaurora.org>
This commit is contained in:
Rohit Gupta 2015-01-07 09:56:22 -08:00 committed by David Keitel
parent a47233f55b
commit 11ff15ea43

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -33,6 +33,7 @@ struct cpu_hp {
cpumask_var_t offlined_cpus;
};
static struct cpu_hp **managed_clusters;
static bool clusters_inited;
/* Work to evaluate the onlining/offlining CPUs */
struct delayed_work evaluate_hotplug_work;
@ -86,7 +87,7 @@ static int set_max_cpus(const char *buf, const struct kernel_param *kp)
const char *cp = buf;
int val;
if (!num_clusters)
if (!clusters_inited)
return -EINVAL;
while ((cp = strpbrk(cp + 1, ":")))
@ -120,7 +121,7 @@ static int get_max_cpus(char *buf, const struct kernel_param *kp)
{
int i, cnt = 0;
if (!num_clusters)
if (!clusters_inited)
return cnt;
for (i = 0; i < num_clusters; i++)
@ -143,7 +144,7 @@ static int set_managed_cpus(const char *buf, const struct kernel_param *kp)
int i, ret;
struct cpumask tmp_mask;
if (!num_clusters)
if (!clusters_inited)
return -EINVAL;
ret = cpulist_parse(buf, &tmp_mask);
@ -168,7 +169,7 @@ static int get_managed_cpus(char *buf, const struct kernel_param *kp)
{
int i, cnt = 0;
if (!num_clusters)
if (!clusters_inited)
return cnt;
for (i = 0; i < num_clusters; i++) {
@ -195,7 +196,7 @@ static int get_managed_online_cpus(char *buf, const struct kernel_param *kp)
struct cpumask tmp_mask;
struct cpu_hp *i_cpu_hp;
if (!num_clusters)
if (!clusters_inited)
return cnt;
for (i = 0; i < num_clusters; i++) {
@ -498,6 +499,9 @@ static void __ref try_hotplug(struct cpu_hp *data)
{
unsigned int i;
if (!clusters_inited)
return;
pr_debug("msm_perf: Trying hotplug...%d:%d\n",
num_online_managed(data->cpus), num_online_cpus());
@ -590,6 +594,9 @@ static int __ref msm_performance_cpu_callback(struct notifier_block *nfb,
unsigned int i;
struct cpu_hp *i_hp = NULL;
if (!clusters_inited)
return NOTIFY_OK;
for (i = 0; i < num_clusters; i++) {
if (cpumask_test_cpu(cpu, managed_clusters[i]->cpus)) {
i_hp = managed_clusters[i];
@ -656,6 +663,7 @@ static int init_cluster_control(void)
INIT_DELAYED_WORK(&evaluate_hotplug_work, check_cluster_status);
mutex_init(&managed_cpus_lock);
clusters_inited = true;
return 0;
}