soc: qcom: msm_perf: Fix a race condition in hotplug callback
msm_performance relies on userspace to initialize cluster related variables such as num_clusters, managed_cpus etc. Once num_clusters is set the cluster related data structures are initialized and userspace is allowed to set them. However there could be a race where between num_clusters being set and the cluster data structures being allocated, there is a hotplug activity which would invoke the hotplug callback which in turn tries to access managed_cpus. managed_cpus might not have been allocated by that time resulting in a NULL pointer access in the callback causing kernel panic. Change-Id: Ia40af624322a89e0c0f9598bf7eea059e6969ebe Signed-off-by: Rohit Gupta <rohgup@codeaurora.org>
This commit is contained in:
parent
a47233f55b
commit
11ff15ea43
1 changed files with 14 additions and 6 deletions
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
|
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify
|
* This program is free software; you can redistribute it and/or modify
|
||||||
* it under the terms of the GNU General Public License version 2 and
|
* it under the terms of the GNU General Public License version 2 and
|
||||||
|
@ -33,6 +33,7 @@ struct cpu_hp {
|
||||||
cpumask_var_t offlined_cpus;
|
cpumask_var_t offlined_cpus;
|
||||||
};
|
};
|
||||||
static struct cpu_hp **managed_clusters;
|
static struct cpu_hp **managed_clusters;
|
||||||
|
static bool clusters_inited;
|
||||||
|
|
||||||
/* Work to evaluate the onlining/offlining CPUs */
|
/* Work to evaluate the onlining/offlining CPUs */
|
||||||
struct delayed_work evaluate_hotplug_work;
|
struct delayed_work evaluate_hotplug_work;
|
||||||
|
@ -86,7 +87,7 @@ static int set_max_cpus(const char *buf, const struct kernel_param *kp)
|
||||||
const char *cp = buf;
|
const char *cp = buf;
|
||||||
int val;
|
int val;
|
||||||
|
|
||||||
if (!num_clusters)
|
if (!clusters_inited)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
while ((cp = strpbrk(cp + 1, ":")))
|
while ((cp = strpbrk(cp + 1, ":")))
|
||||||
|
@ -120,7 +121,7 @@ static int get_max_cpus(char *buf, const struct kernel_param *kp)
|
||||||
{
|
{
|
||||||
int i, cnt = 0;
|
int i, cnt = 0;
|
||||||
|
|
||||||
if (!num_clusters)
|
if (!clusters_inited)
|
||||||
return cnt;
|
return cnt;
|
||||||
|
|
||||||
for (i = 0; i < num_clusters; i++)
|
for (i = 0; i < num_clusters; i++)
|
||||||
|
@ -143,7 +144,7 @@ static int set_managed_cpus(const char *buf, const struct kernel_param *kp)
|
||||||
int i, ret;
|
int i, ret;
|
||||||
struct cpumask tmp_mask;
|
struct cpumask tmp_mask;
|
||||||
|
|
||||||
if (!num_clusters)
|
if (!clusters_inited)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = cpulist_parse(buf, &tmp_mask);
|
ret = cpulist_parse(buf, &tmp_mask);
|
||||||
|
@ -168,7 +169,7 @@ static int get_managed_cpus(char *buf, const struct kernel_param *kp)
|
||||||
{
|
{
|
||||||
int i, cnt = 0;
|
int i, cnt = 0;
|
||||||
|
|
||||||
if (!num_clusters)
|
if (!clusters_inited)
|
||||||
return cnt;
|
return cnt;
|
||||||
|
|
||||||
for (i = 0; i < num_clusters; i++) {
|
for (i = 0; i < num_clusters; i++) {
|
||||||
|
@ -195,7 +196,7 @@ static int get_managed_online_cpus(char *buf, const struct kernel_param *kp)
|
||||||
struct cpumask tmp_mask;
|
struct cpumask tmp_mask;
|
||||||
struct cpu_hp *i_cpu_hp;
|
struct cpu_hp *i_cpu_hp;
|
||||||
|
|
||||||
if (!num_clusters)
|
if (!clusters_inited)
|
||||||
return cnt;
|
return cnt;
|
||||||
|
|
||||||
for (i = 0; i < num_clusters; i++) {
|
for (i = 0; i < num_clusters; i++) {
|
||||||
|
@ -498,6 +499,9 @@ static void __ref try_hotplug(struct cpu_hp *data)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
|
if (!clusters_inited)
|
||||||
|
return;
|
||||||
|
|
||||||
pr_debug("msm_perf: Trying hotplug...%d:%d\n",
|
pr_debug("msm_perf: Trying hotplug...%d:%d\n",
|
||||||
num_online_managed(data->cpus), num_online_cpus());
|
num_online_managed(data->cpus), num_online_cpus());
|
||||||
|
|
||||||
|
@ -590,6 +594,9 @@ static int __ref msm_performance_cpu_callback(struct notifier_block *nfb,
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
struct cpu_hp *i_hp = NULL;
|
struct cpu_hp *i_hp = NULL;
|
||||||
|
|
||||||
|
if (!clusters_inited)
|
||||||
|
return NOTIFY_OK;
|
||||||
|
|
||||||
for (i = 0; i < num_clusters; i++) {
|
for (i = 0; i < num_clusters; i++) {
|
||||||
if (cpumask_test_cpu(cpu, managed_clusters[i]->cpus)) {
|
if (cpumask_test_cpu(cpu, managed_clusters[i]->cpus)) {
|
||||||
i_hp = managed_clusters[i];
|
i_hp = managed_clusters[i];
|
||||||
|
@ -656,6 +663,7 @@ static int init_cluster_control(void)
|
||||||
INIT_DELAYED_WORK(&evaluate_hotplug_work, check_cluster_status);
|
INIT_DELAYED_WORK(&evaluate_hotplug_work, check_cluster_status);
|
||||||
mutex_init(&managed_cpus_lock);
|
mutex_init(&managed_cpus_lock);
|
||||||
|
|
||||||
|
clusters_inited = true;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue