coresight-remote-etm: Adds missing lock to avoid race condition
We take a lock of drvdata->mutex and further wait on a lock 'drvdata->handle->handle_lock' in below path remote_etm_disable => qmi_send_req_wait => qmi_encode_and_send_req So, it is possible that while we wait on a handle lock in above mentioned path The handle itself gets destroyed via below path remote_etm_svc_exit => qmi_handle_destroy and the handle is not valid anymore. This patch adds lock drvdata->mutex at number of possible cases. CRs-Fixed: 2045013 Change-Id: I4a5110630b78e551bab1b4f454d23aacd599c000 Signed-off-by: Mukesh Ojha <mojha@codeaurora.org>
This commit is contained in:
parent
9b3e8a81dc
commit
9021973bac
1 changed files with 7 additions and 4 deletions
|
@ -1,4 +1,4 @@
|
||||||
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
|
/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify
|
* This program is free software; you can redistribute it and/or modify
|
||||||
* it under the terms of the GNU General Public License version 2 and
|
* it under the terms of the GNU General Public License version 2 and
|
||||||
|
@ -186,10 +186,12 @@ static void remote_etm_rcv_msg(struct work_struct *work)
|
||||||
struct remote_etm_drvdata *drvdata = container_of(work,
|
struct remote_etm_drvdata *drvdata = container_of(work,
|
||||||
struct remote_etm_drvdata,
|
struct remote_etm_drvdata,
|
||||||
work_rcv_msg);
|
work_rcv_msg);
|
||||||
|
mutex_lock(&drvdata->mutex);
|
||||||
if (qmi_recv_msg(drvdata->handle) < 0)
|
if (qmi_recv_msg(drvdata->handle) < 0)
|
||||||
dev_err(drvdata->dev, "%s: Error receiving QMI message\n",
|
dev_err(drvdata->dev, "%s: Error receiving QMI message\n",
|
||||||
__func__);
|
__func__);
|
||||||
|
|
||||||
|
mutex_unlock(&drvdata->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void remote_etm_notify(struct qmi_handle *handle,
|
static void remote_etm_notify(struct qmi_handle *handle,
|
||||||
|
@ -227,6 +229,7 @@ static void remote_etm_svc_arrive(struct work_struct *work)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock(&drvdata->mutex);
|
||||||
if (qmi_connect_to_service(drvdata->handle, CORESIGHT_QMI_SVC_ID,
|
if (qmi_connect_to_service(drvdata->handle, CORESIGHT_QMI_SVC_ID,
|
||||||
CORESIGHT_QMI_VERSION,
|
CORESIGHT_QMI_VERSION,
|
||||||
drvdata->inst_id) < 0) {
|
drvdata->inst_id) < 0) {
|
||||||
|
@ -236,7 +239,6 @@ static void remote_etm_svc_arrive(struct work_struct *work)
|
||||||
drvdata->handle = NULL;
|
drvdata->handle = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&drvdata->mutex);
|
|
||||||
if (drvdata->inst_id < sizeof(int)*BITS_PER_BYTE
|
if (drvdata->inst_id < sizeof(int)*BITS_PER_BYTE
|
||||||
&& (boot_enable & BIT(drvdata->inst_id))) {
|
&& (boot_enable & BIT(drvdata->inst_id))) {
|
||||||
if (!drvdata->enable)
|
if (!drvdata->enable)
|
||||||
|
@ -251,9 +253,10 @@ static void remote_etm_svc_exit(struct work_struct *work)
|
||||||
struct remote_etm_drvdata *drvdata = container_of(work,
|
struct remote_etm_drvdata *drvdata = container_of(work,
|
||||||
struct remote_etm_drvdata,
|
struct remote_etm_drvdata,
|
||||||
work_svc_exit);
|
work_svc_exit);
|
||||||
|
mutex_lock(&drvdata->mutex);
|
||||||
qmi_handle_destroy(drvdata->handle);
|
qmi_handle_destroy(drvdata->handle);
|
||||||
drvdata->handle = NULL;
|
drvdata->handle = NULL;
|
||||||
|
mutex_unlock(&drvdata->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int remote_etm_svc_event_notify(struct notifier_block *this,
|
static int remote_etm_svc_event_notify(struct notifier_block *this,
|
||||||
|
|
Loading…
Add table
Reference in a new issue