msm: ipa: protect qmi context using mutex

This change adds logic to prevent invalid access of qmi ctx
during SSR clean up using mutex.

Change-Id: I689deaf093909a951a9e5847241ee3938fea240b
Acked-by: Chaitanya Pratapa <cpratapa@qti.qualcomm.com>
Acked-by: David Arinzon <darinzon@qti.qualcomm.com>
Signed-off-by: Sivan Reinstein <sivanr@codeaurora.org>
Signed-off-by: Ravinder Konka <rkonka@codeaurora.org>
This commit is contained in:
Sivan Reinstein 2015-08-31 16:00:21 +03:00 committed by David Keitel
parent b43518d9f5
commit b2d171c18b
3 changed files with 83 additions and 31 deletions

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -49,8 +49,10 @@ static bool qmi_modem_init_fin, qmi_indication_fin;
static struct work_struct ipa_qmi_service_init_work;
static uint32_t ipa_wan_platform;
struct ipa_qmi_context *ipa_qmi_ctx;
static bool workqueues_stopped;
static bool first_time_handshake;
static atomic_t workqueues_stopped;
static atomic_t ipa_qmi_initialized;
struct mutex ipa_qmi_lock;
/* QMI A5 service */
@ -320,7 +322,7 @@ static void qmi_ipa_a5_svc_ntfy(struct qmi_handle *handle,
{
switch (event) {
case QMI_RECV_MSG:
if (!workqueues_stopped)
if (!atomic_read(&workqueues_stopped))
queue_delayed_work(ipa_svc_workqueue,
&work_recv_msg, 0);
break;
@ -515,12 +517,17 @@ int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
req->filter_spec_list_len);
}
/* cache the qmi_filter_request */
memcpy(&(ipa_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[
ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg]),
req, sizeof(struct ipa_install_fltr_rule_req_msg_v01));
ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg++;
ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10;
mutex_lock(&ipa_qmi_lock);
if (ipa_qmi_ctx != NULL) {
/* cache the qmi_filter_request */
memcpy(&(ipa_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[
ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg]),
req,
sizeof(struct ipa_install_fltr_rule_req_msg_v01));
ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg++;
ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10;
}
mutex_unlock(&ipa_qmi_lock);
req_desc.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01;
req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01;
@ -655,13 +662,17 @@ int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req)
return -EINVAL;
}
/* cache the qmi_filter_request */
memcpy(&(ipa_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
req, sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
mutex_lock(&ipa_qmi_lock);
if (ipa_qmi_ctx != NULL) {
/* cache the qmi_filter_request */
memcpy(&(ipa_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
req,
sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
}
mutex_unlock(&ipa_qmi_lock);
req_desc.max_msg_len =
QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01;
req_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01;
@ -702,7 +713,7 @@ static void ipa_q6_clnt_notify(struct qmi_handle *handle,
switch (event) {
case QMI_RECV_MSG:
IPAWANDBG("client qmi recv message called");
if (!workqueues_stopped)
if (!atomic_read(&workqueues_stopped))
queue_delayed_work(ipa_clnt_resp_workqueue,
&work_recv_msg_client, 0);
break;
@ -765,8 +776,7 @@ static void ipa_q6_clnt_svc_arrive(struct work_struct *work)
IPA_Q6_SERVICE_INS_ID);
if (rc < 0) {
IPAWANERR("Server not found\n");
qmi_handle_destroy(ipa_q6_clnt);
ipa_q6_clnt = NULL;
ipa_q6_clnt_svc_exit(0);
return;
}
@ -823,9 +833,14 @@ static void ipa_q6_clnt_svc_arrive(struct work_struct *work)
static void ipa_q6_clnt_svc_exit(struct work_struct *work)
{
qmi_handle_destroy(ipa_q6_clnt);
mutex_lock(&ipa_qmi_lock);
if (ipa_q6_clnt)
qmi_handle_destroy(ipa_q6_clnt);
ipa_q6_clnt_reset = 1;
ipa_q6_clnt = NULL;
mutex_unlock(&ipa_qmi_lock);
}
@ -836,12 +851,12 @@ static int ipa_q6_clnt_svc_event_notify(struct notifier_block *this,
IPAWANDBG("event %ld\n", code);
switch (code) {
case QMI_SERVER_ARRIVE:
if (!workqueues_stopped)
if (!atomic_read(&workqueues_stopped))
queue_delayed_work(ipa_clnt_req_workqueue,
&work_svc_arrive, 0);
break;
case QMI_SERVER_EXIT:
if (!workqueues_stopped)
if (!atomic_read(&workqueues_stopped))
queue_delayed_work(ipa_clnt_req_workqueue,
&work_svc_exit, 0);
break;
@ -922,6 +937,7 @@ static void ipa_qmi_service_init_worker(struct work_struct *work)
goto destroy_clnt_resp_wq;
}
atomic_set(&ipa_qmi_initialized, 1);
/* get Q6 service and start send modem-initial to Q6 */
IPAWANDBG("wait service available\n");
return;
@ -949,9 +965,9 @@ int ipa_qmi_service_init(uint32_t wan_platform_type)
ipa_wan_platform = wan_platform_type;
qmi_modem_init_fin = false;
qmi_indication_fin = false;
workqueues_stopped = false;
atomic_set(&workqueues_stopped, 0);
if (!ipa_svc_handle) {
if (0 == atomic_read(&ipa_qmi_initialized)) {
INIT_WORK(&ipa_qmi_service_init_work,
ipa_qmi_service_init_worker);
schedule_work(&ipa_qmi_service_init_work);
@ -963,7 +979,7 @@ void ipa_qmi_service_exit(void)
{
int ret = 0;
workqueues_stopped = true;
atomic_set(&workqueues_stopped, 1);
/* qmi-service */
if (ipa_svc_handle) {
@ -984,6 +1000,7 @@ void ipa_qmi_service_exit(void)
IPAWANERR("Error destroying qmi handle %p, ret=%d\n",
ipa_svc_handle, ret);
}
ipa_svc_handle = 0;
/* qmi-client */
@ -1008,14 +1025,16 @@ void ipa_qmi_service_exit(void)
ipa_clnt_resp_workqueue = NULL;
}
mutex_lock(&ipa_qmi_lock);
/* clean the QMI msg cache */
if (ipa_qmi_ctx != NULL) {
vfree(ipa_qmi_ctx);
ipa_qmi_ctx = NULL;
}
ipa_svc_handle = 0;
mutex_unlock(&ipa_qmi_lock);
qmi_modem_init_fin = false;
qmi_indication_fin = false;
atomic_set(&ipa_qmi_initialized, 0);
}
void ipa_qmi_stop_workqueues(void)
@ -1023,7 +1042,7 @@ void ipa_qmi_stop_workqueues(void)
IPAWANDBG("Stopping all QMI workqueues\n");
/* Stopping all workqueues so new work won't be scheduled */
workqueues_stopped = true;
atomic_set(&workqueues_stopped, 1);
/* Making sure that the current scheduled work won't be executed */
cancel_delayed_work(&work_recv_msg);
@ -1178,3 +1197,13 @@ int ipa_qmi_stop_data_qouta(void)
QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result,
resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01");
}
void ipa_qmi_init(void)
{
mutex_init(&ipa_qmi_lock);
}
void ipa_qmi_cleanup(void)
{
mutex_destroy(&ipa_qmi_lock);
}

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -36,6 +36,7 @@
pr_err(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
extern struct ipa_qmi_context *ipa_qmi_ctx;
extern struct mutex ipa_qmi_lock;
struct ipa_qmi_context {
struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
@ -157,6 +158,10 @@ int ipa_qmi_stop_data_qouta(void);
void ipa_q6_handshake_complete(bool ssr_bootup);
void ipa_qmi_init(void);
void ipa_qmi_cleanup(void);
#else /* CONFIG_RMNET_IPA */
static inline int ipa_qmi_service_init(uint32_t wan_platform_type)
@ -262,6 +267,14 @@ static inline int ipa_qmi_stop_data_qouta(void)
static inline void ipa_q6_handshake_complete(bool ssr_bootup) { }
static inline void ipa_qmi_init(void)
{
}
static inline void ipa_qmi_cleanup(void)
{
}
#endif /* CONFIG_RMNET_IPA */
#endif /* IPA_QMI_SERVICE_H */

View file

@ -627,7 +627,8 @@ static int wwan_add_ul_flt_rule_to_ipa(void)
param->global = false;
param->num_rules = (uint8_t)1;
for (i = 0; i < num_q6_rule; i++) {
mutex_lock(&ipa_qmi_lock);
for (i = 0; i < num_q6_rule && (ipa_qmi_ctx != NULL); i++) {
param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add));
flt_rule_entry.at_rear = true;
@ -655,12 +656,14 @@ static int wwan_add_ul_flt_rule_to_ipa(void)
param->rules[0].flt_rule_hdl;
}
}
mutex_unlock(&ipa_qmi_lock);
/* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
req->source_pipe_index =
ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
req->install_status = QMI_RESULT_SUCCESS_V01;
req->filter_index_list_len = num_q6_rule;
mutex_lock(&ipa_qmi_lock);
for (i = 0; i < num_q6_rule; i++) {
if (ipa_qmi_ctx->q6_ul_filter_rule[i].ip == IPA_IP_v4) {
req->filter_index_list[i].filter_index = num_v4_rule;
@ -672,6 +675,7 @@ static int wwan_add_ul_flt_rule_to_ipa(void)
req->filter_index_list[i].filter_handle =
ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
}
mutex_unlock(&ipa_qmi_lock);
if (qmi_filter_notify_send(req)) {
IPAWANDBG("add filter rule index on A7-RX failed\n");
retval = -EFAULT;
@ -872,7 +876,8 @@ int wwan_update_mux_channel_prop(void)
int ret = 0, i;
/* install UL filter rules */
if (egress_set) {
if (ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
if (ipa_qmi_ctx &&
ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
IPAWANDBG("setup UL filter rules\n");
if (a7_ul_flt_set) {
IPAWANDBG("del previous UL filter rules\n");
@ -1450,7 +1455,8 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (num_q6_rule != 0) {
/* already got Q6 UL filter rules*/
if (ipa_qmi_ctx->modem_cfg_emb_pipe_flt
if (ipa_qmi_ctx &&
ipa_qmi_ctx->modem_cfg_emb_pipe_flt
== false)
rc = wwan_add_ul_flt_rule_to_ipa();
else
@ -2731,6 +2737,9 @@ static int __init ipa_wwan_init(void)
mutex_init(&ipa_to_apps_pipe_handle_guard);
ipa_to_apps_hdl = -1;
ipa_qmi_init();
/* Register for Modem SSR */
subsys_notify_handle = subsys_notif_register_notifier(SUBSYS_MODEM,
&ssr_notifier);
@ -2743,6 +2752,7 @@ static int __init ipa_wwan_init(void)
static void __exit ipa_wwan_cleanup(void)
{
int ret;
ipa_qmi_cleanup();
mutex_destroy(&ipa_to_apps_pipe_handle_guard);
ret = subsys_notif_unregister_notifier(subsys_notify_handle,
&ssr_notifier);