diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c index 57e58a57fab7..323990a89e4d 100644 --- a/drivers/soc/qcom/glink.c +++ b/drivers/soc/qcom/glink.c @@ -4020,6 +4020,37 @@ static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx( return xprt_ptr; } +static struct channel_ctx *get_first_ch_ctx( + struct glink_core_xprt_ctx *xprt_ctx) +{ + unsigned long flags; + struct channel_ctx *ctx; + + spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags); + if (!list_empty(&xprt_ctx->channels)) { + ctx = list_first_entry(&xprt_ctx->channels, + struct channel_ctx, port_list_node); + rwref_get(&ctx->ch_state_lhb2); + } else { + ctx = NULL; + } + spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags); + return ctx; +} + +static void glink_core_move_ch_node(struct glink_core_xprt_ctx *xprt_ptr, + struct glink_core_xprt_ctx *dummy_xprt_ctx, struct channel_ctx *ctx) +{ + unsigned long flags, d_flags; + + spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); + spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags); + rwref_get(&dummy_xprt_ctx->xprt_state_lhb0); + list_move_tail(&ctx->port_list_node, &dummy_xprt_ctx->channels); + spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags); + spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); +} + /** * glink_core_channel_cleanup() - cleanup all channels for the transport * @@ -4030,7 +4061,7 @@ static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx( static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr) { unsigned long flags, d_flags; - struct channel_ctx *ctx, *tmp_ctx; + struct channel_ctx *ctx; struct channel_lcid *temp_lcid, *temp_lcid1; struct glink_core_xprt_ctx *dummy_xprt_ctx; @@ -4039,29 +4070,18 @@ static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr) GLINK_ERR("%s: Dummy Transport creation failed\n", __func__); return; } - rwref_read_get(&dummy_xprt_ctx->xprt_state_lhb0); rwref_read_get(&xprt_ptr->xprt_state_lhb0); - spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); - spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags); - - list_for_each_entry_safe(ctx, tmp_ctx, &xprt_ptr->channels, - port_list_node) { + ctx = get_first_ch_ctx(xprt_ptr); + while (ctx) { rwref_write_get_atomic(&ctx->ch_state_lhb2, true); if (ctx->local_open_state == GLINK_CHANNEL_OPENED || ctx->local_open_state == GLINK_CHANNEL_OPENING) { - rwref_get(&dummy_xprt_ctx->xprt_state_lhb0); - list_move_tail(&ctx->port_list_node, - &dummy_xprt_ctx->channels); ctx->transport_ptr = dummy_xprt_ctx; rwref_write_put(&ctx->ch_state_lhb2); + glink_core_move_ch_node(xprt_ptr, dummy_xprt_ctx, ctx); } else { /* local state is in either CLOSED or CLOSING */ - spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, - flags); - spin_unlock_irqrestore( - &dummy_xprt_ctx->xprt_ctx_lock_lhb1, - d_flags); glink_core_remote_close_common(ctx, true); if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) glink_core_ch_close_ack_common(ctx, true); @@ -4069,22 +4089,21 @@ static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr) if (ch_is_fully_closed(ctx)) glink_delete_ch_from_list(ctx, false); rwref_write_put(&ctx->ch_state_lhb2); - spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, - d_flags); - spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags); } + rwref_put(&ctx->ch_state_lhb2); + ctx = get_first_ch_ctx(xprt_ptr); } + spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags); list_for_each_entry_safe(temp_lcid, temp_lcid1, &xprt_ptr->free_lcid_list, list_node) { list_del(&temp_lcid->list_node); kfree(&temp_lcid->list_node); } - dummy_xprt_ctx->dummy_in_use = false; spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags); - spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); rwref_read_put(&xprt_ptr->xprt_state_lhb0); spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags); + dummy_xprt_ctx->dummy_in_use = false; while (!list_empty(&dummy_xprt_ctx->channels)) { ctx = list_first_entry(&dummy_xprt_ctx->channels, struct channel_ctx, port_list_node);