soc: qcom: hab: fix the soft lockup in vchan free schedule

With a certain probability soft lockup when do hab vchan free schedule.
one vchan do the local hab close while another vchan in the same context
through softirq also try to acquire write lock in the free schedule at the
same time, it will cause watchdog bite. Disable local softirq could avoid
race condition handling between tasklet and process context.

Change-Id: I4ee9b980dab7ecb1986af1d61f70157fc30d1048
Signed-off-by: Yao Jiang <yaojia@codeaurora.org>
This commit is contained in:
Yao Jiang 2018-09-27 09:43:32 +08:00 committed by Gerrit - the friendly Code Review server
parent 070bf44aba
commit 4873e89e68
2 changed files with 10 additions and 10 deletions

View file

@ -117,7 +117,7 @@ void hab_ctx_free(struct kref *ref)
struct export_desc *exp, *exp_tmp;
/* garbage-collect exp/imp buffers */
write_lock(&ctx->exp_lock);
write_lock_bh(&ctx->exp_lock);
list_for_each_entry_safe(exp, exp_tmp, &ctx->exp_whse, node) {
list_del(&exp->node);
pr_debug("potential leak exp %d vcid %X recovered\n",
@ -125,7 +125,7 @@ void hab_ctx_free(struct kref *ref)
habmem_hyp_revoke(exp->payload, exp->payload_count);
habmem_remove_export(exp);
}
write_unlock(&ctx->exp_lock);
write_unlock_bh(&ctx->exp_lock);
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
@ -159,27 +159,27 @@ void hab_ctx_free(struct kref *ref)
ctx->kernel, ctx->closing, ctx->owner);
/* check vchans in this ctx */
write_lock(&ctx->ctx_lock);
write_lock_bh(&ctx->ctx_lock);
list_for_each_entry(vchan, &ctx->vchannels, node) {
pr_warn("leak vchan id %X cnt %X remote %d in ctx\n",
vchan->id, get_refcnt(vchan->refcount),
vchan->otherend_id);
}
write_unlock(&ctx->ctx_lock);
write_unlock_bh(&ctx->ctx_lock);
/* check pending open */
if (ctx->pending_cnt)
pr_warn("potential leak of pendin_open nodes %d\n",
ctx->pending_cnt);
write_lock(&ctx->ctx_lock);
write_lock_bh(&ctx->ctx_lock);
list_for_each_entry(node, &ctx->pending_open, node) {
pr_warn("leak pending open vcid %X type %d subid %d openid %d\n",
node->request.xdata.vchan_id, node->request.type,
node->request.xdata.sub_id,
node->request.xdata.open_id);
}
write_unlock(&ctx->ctx_lock);
write_unlock_bh(&ctx->ctx_lock);
/* check vchans belong to this ctx in all hab/mmid devices */
for (i = 0; i < hab_driver.ndevices; i++) {

View file

@ -90,7 +90,7 @@ hab_vchan_free(struct kref *ref)
vchan->ctx = NULL;
/* release vchan from pchan. no more msg for this vchan */
write_lock(&pchan->vchans_lock);
write_lock_bh(&pchan->vchans_lock);
list_for_each_entry_safe(vc, vc_tmp, &pchan->vchannels, pnode) {
if (vchan == vc) {
list_del(&vc->pnode);
@ -99,7 +99,7 @@ hab_vchan_free(struct kref *ref)
break;
}
}
write_unlock(&pchan->vchans_lock);
write_unlock_bh(&pchan->vchans_lock);
/* release idr at the last so same idr will not be used early */
spin_lock_bh(&pchan->vid_lock);
@ -262,7 +262,7 @@ static void hab_vchan_schedule_free(struct kref *ref)
* similar logic is in ctx free. if ctx free runs first,
* this is skipped
*/
write_lock(&ctx->ctx_lock);
write_lock_bh(&ctx->ctx_lock);
list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
if (vchan == vchanin) {
pr_debug("vchan free refcnt = %d\n",
@ -273,7 +273,7 @@ static void hab_vchan_schedule_free(struct kref *ref)
break;
}
}
write_unlock(&ctx->ctx_lock);
write_unlock_bh(&ctx->ctx_lock);
if (bnotify)
hab_vchan_stop_notify(vchan);