soc: qcom: glink: Refactor rwref lock mechanism

Add an option to rwref locks that allow the lock functions
to spin when acquiring the lock. Change completion variable
to use waitqueues for sleep functionality.

Change rwref reference function calls to use locking functions
where code reads or writes the context state.

CRs-Fixed: 988266
Change-Id: Ib2908b2495b1b01a6a130033143a7da8e5c0c231
Signed-off-by: Chris Lew <clew@codeaurora.org>
This commit is contained in:
Chris Lew 2016-04-06 15:07:07 -07:00 committed by Kyle Yan
parent 4ac9a6bc2a
commit 798f2438dd
2 changed files with 136 additions and 56 deletions

View file

@ -84,6 +84,7 @@ struct glink_qos_priority_bin {
* @tx_wq: workqueue to run @tx_kwork
* @tx_task: handle to the running kthread
* @channels: list of all existing channels on this transport
* @dummy_in_use: True when channels are being migrated to dummy.
* @mtu: MTU supported by this transport.
* @token_count: Number of tokens to be assigned per assignment.
* @curr_qos_rate_kBps: Aggregate of currently supported QoS requests.
@ -119,6 +120,7 @@ struct glink_core_xprt_ctx {
struct list_head channels;
uint32_t next_lcid;
struct list_head free_lcid_list;
bool dummy_in_use;
uint32_t max_cid;
uint32_t max_iid;
@ -394,7 +396,7 @@ static void ch_remove_tx_pending_remote_done(struct channel_ctx *ctx,
static void glink_core_rx_cmd_rx_intent_req_ack(struct glink_transport_if
*if_ptr, uint32_t rcid, bool granted);
static bool glink_core_remote_close_common(struct channel_ctx *ctx);
static bool glink_core_remote_close_common(struct channel_ctx *ctx, bool safe);
static void check_link_notifier_and_notify(struct glink_core_xprt_ctx *xprt_ptr,
enum glink_link_state link_state);
@ -467,17 +469,26 @@ EXPORT_SYMBOL(glink_ssr);
* glink_core_ch_close_ack_common() - handles the common operations during
* close ack.
* @ctx: Pointer to channel instance.
* @is_safe: Is function called while holding ctx lock
*
* Return: True if the channel is fully closed after the state change,
* false otherwise.
*/
static bool glink_core_ch_close_ack_common(struct channel_ctx *ctx)
static bool glink_core_ch_close_ack_common(struct channel_ctx *ctx, bool safe)
{
bool is_fully_closed;
if (ctx == NULL)
return false;
is_fully_closed = ch_update_local_state(ctx, GLINK_CHANNEL_CLOSED);
if (safe) {
ctx->local_open_state = GLINK_CHANNEL_CLOSED;
is_fully_closed = ch_is_fully_closed(ctx);
} else {
is_fully_closed = ch_update_local_state(ctx,
GLINK_CHANNEL_CLOSED);
}
GLINK_INFO_PERF_CH(ctx,
"%s: local:GLINK_CHANNEL_CLOSING->GLINK_CHANNEL_CLOSED\n",
__func__);
@ -498,17 +509,23 @@ static bool glink_core_ch_close_ack_common(struct channel_ctx *ctx)
* glink_core_remote_close_common() - Handles the common operations during
* a remote close.
* @ctx: Pointer to channel instance.
*
* @safe: Is function called with ctx rwref lock already acquired.
* Return: True if the channel is fully closed after the state change,
* false otherwise.
*/
static bool glink_core_remote_close_common(struct channel_ctx *ctx)
static bool glink_core_remote_close_common(struct channel_ctx *ctx, bool safe)
{
bool is_fully_closed;
if (ctx == NULL)
return false;
is_fully_closed = ch_update_rmt_state(ctx, false);
if (safe) {
ctx->remote_opened = false;
is_fully_closed = ch_is_fully_closed(ctx);
} else {
is_fully_closed = ch_update_rmt_state(ctx, false);
}
ctx->rcid = 0;
if (ctx->local_open_state != GLINK_CHANNEL_CLOSED &&
@ -2509,14 +2526,20 @@ EXPORT_SYMBOL(glink_get_channel_name_for_handle);
* information associated with it. It also adds the channel lcid to the free
* lcid list except if the channel is deleted in case of ssr/unregister case.
* It can only called when channel is fully closed.
*
* Return: true when transport_ptr->channels is empty.
*/
static void glink_delete_ch_from_list(struct channel_ctx *ctx, bool add_flcid)
static bool glink_delete_ch_from_list(struct channel_ctx *ctx, bool add_flcid)
{
unsigned long flags;
bool ret = false;
spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1,
flags);
if (!list_empty(&ctx->port_list_node))
list_del_init(&ctx->port_list_node);
if (list_empty(&ctx->transport_ptr->channels))
ret = true;
spin_unlock_irqrestore(
&ctx->transport_ptr->xprt_ctx_lock_lhb1,
flags);
@ -2526,6 +2549,7 @@ static void glink_delete_ch_from_list(struct channel_ctx *ctx, bool add_flcid)
glink_debugfs_remove_channel(ctx, ctx->transport_ptr);
mutex_unlock(&ctx->transport_ptr->xprt_dbgfs_lock_lhb4);
rwref_put(&ctx->ch_state_lhb2);
return ret;
}
/**
@ -2545,6 +2569,7 @@ int glink_close(void *handle)
struct channel_ctx *ctx = (struct channel_ctx *)handle;
int ret;
unsigned long flags;
bool is_empty = false;
if (!ctx)
return -EINVAL;
@ -2558,6 +2583,16 @@ int glink_close(void *handle)
return -EBUSY;
}
rwref_get(&ctx->ch_state_lhb2);
relock: xprt_ctx = ctx->transport_ptr;
rwref_read_get(&xprt_ctx->xprt_state_lhb0);
rwref_write_get(&ctx->ch_state_lhb2);
if (xprt_ctx != ctx->transport_ptr) {
rwref_write_put(&ctx->ch_state_lhb2);
rwref_read_put(&xprt_ctx->xprt_state_lhb0);
goto relock;
}
/* Set the channel state before removing it from xprt's list(s) */
GLINK_INFO_PERF_CH(ctx,
"%s: local:%u->GLINK_CHANNEL_CLOSING\n",
@ -2566,33 +2601,29 @@ int glink_close(void *handle)
ctx->pending_delete = true;
ctx->int_req_ack = false;
complete_all(&ctx->int_req_ack_complete);
complete_all(&ctx->int_req_complete);
spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
spin_lock_irqsave(&xprt_ctx->tx_ready_lock_lhb3, flags);
if (!list_empty(&ctx->tx_ready_list_node))
list_del_init(&ctx->tx_ready_list_node);
spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
spin_unlock_irqrestore(&xprt_ctx->tx_ready_lock_lhb3, flags);
if (ctx->transport_ptr->local_state != GLINK_XPRT_DOWN) {
if (xprt_ctx->local_state != GLINK_XPRT_DOWN) {
glink_qos_reset_priority(ctx);
ret = ctx->transport_ptr->ops->tx_cmd_ch_close(
ctx->transport_ptr->ops,
ctx->lcid);
} else if (!strcmp(ctx->transport_ptr->name, "dummy")) {
ret = xprt_ctx->ops->tx_cmd_ch_close(xprt_ctx->ops, ctx->lcid);
rwref_write_put(&ctx->ch_state_lhb2);
} else if (!strcmp(xprt_ctx->name, "dummy")) {
/*
* This check will avoid any race condition when clients call
* glink_close before the dummy xprt swapping happens in link
* down scenario.
*/
ret = 0;
xprt_ctx = ctx->transport_ptr;
rwref_write_get(&xprt_ctx->xprt_state_lhb0);
glink_core_ch_close_ack_common(ctx);
rwref_write_put(&ctx->ch_state_lhb2);
glink_core_ch_close_ack_common(ctx, false);
if (ch_is_fully_closed(ctx)) {
glink_delete_ch_from_list(ctx, false);
is_empty = glink_delete_ch_from_list(ctx, false);
rwref_put(&xprt_ctx->xprt_state_lhb0);
if (list_empty(&xprt_ctx->channels))
if (is_empty && !xprt_ctx->dummy_in_use)
/* For the xprt reference */
rwref_put(&xprt_ctx->xprt_state_lhb0);
} else {
@ -2600,9 +2631,12 @@ int glink_close(void *handle)
"channel Not closed yet local state [%d] remote_state [%d]\n",
ctx->local_open_state, ctx->remote_opened);
}
rwref_write_put(&xprt_ctx->xprt_state_lhb0);
}
complete_all(&ctx->int_req_ack_complete);
complete_all(&ctx->int_req_complete);
rwref_put(&ctx->ch_state_lhb2);
rwref_read_put(&xprt_ctx->xprt_state_lhb0);
return ret;
}
EXPORT_SYMBOL(glink_close);
@ -2663,25 +2697,25 @@ static int glink_tx_common(void *handle, void *pkt_priv,
if (!ctx)
return -EINVAL;
rwref_get(&ctx->ch_state_lhb2);
rwref_read_get_atomic(&ctx->ch_state_lhb2, is_atomic);
if (!(vbuf_provider || pbuf_provider)) {
rwref_put(&ctx->ch_state_lhb2);
rwref_read_put(&ctx->ch_state_lhb2);
return -EINVAL;
}
if (!ch_is_fully_opened(ctx)) {
rwref_put(&ctx->ch_state_lhb2);
rwref_read_put(&ctx->ch_state_lhb2);
return -EBUSY;
}
if (size > GLINK_MAX_PKT_SIZE) {
rwref_put(&ctx->ch_state_lhb2);
rwref_read_put(&ctx->ch_state_lhb2);
return -EINVAL;
}
if (unlikely(tx_flags & GLINK_TX_TRACER_PKT)) {
if (!(ctx->transport_ptr->capabilities & GCAP_TRACER_PKT)) {
rwref_put(&ctx->ch_state_lhb2);
rwref_read_put(&ctx->ch_state_lhb2);
return -EOPNOTSUPP;
}
tracer_pkt_log_event(data, GLINK_CORE_TX);
@ -2694,7 +2728,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
GLINK_ERR_CH(ctx,
"%s: R[%u]:%zu Intent not present for lcid\n",
__func__, riid, size);
rwref_put(&ctx->ch_state_lhb2);
rwref_read_put(&ctx->ch_state_lhb2);
return -EAGAIN;
}
if (is_atomic && !(ctx->transport_ptr->capabilities &
@ -2702,7 +2736,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
GLINK_ERR_CH(ctx,
"%s: Cannot request intent in atomic context\n",
__func__);
rwref_put(&ctx->ch_state_lhb2);
rwref_read_put(&ctx->ch_state_lhb2);
return -EINVAL;
}
@ -2713,12 +2747,14 @@ static int glink_tx_common(void *handle, void *pkt_priv,
if (ret) {
GLINK_ERR_CH(ctx, "%s: Request intent failed %d\n",
__func__, ret);
rwref_put(&ctx->ch_state_lhb2);
rwref_read_put(&ctx->ch_state_lhb2);
return ret;
}
while (ch_pop_remote_rx_intent(ctx, size, &riid,
&intent_size)) {
rwref_get(&ctx->ch_state_lhb2);
rwref_read_put(&ctx->ch_state_lhb2);
if (is_atomic) {
GLINK_ERR_CH(ctx,
"%s Intent of size %zu not ready\n",
@ -2768,6 +2804,8 @@ static int glink_tx_common(void *handle, void *pkt_priv,
}
reinit_completion(&ctx->int_req_complete);
rwref_read_get(&ctx->ch_state_lhb2);
rwref_put(&ctx->ch_state_lhb2);
}
}
@ -2787,7 +2825,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
if (!tx_info) {
GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
ch_push_remote_rx_intent(ctx, intent_size, riid);
rwref_put(&ctx->ch_state_lhb2);
rwref_read_put(&ctx->ch_state_lhb2);
return -ENOMEM;
}
rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release);
@ -2813,7 +2851,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
else
xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
rwref_put(&ctx->ch_state_lhb2);
rwref_read_put(&ctx->ch_state_lhb2);
return ret;
}
@ -3858,6 +3896,7 @@ static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx(
xprt_ptr->local_state = GLINK_XPRT_DOWN;
xprt_ptr->remote_neg_completed = false;
INIT_LIST_HEAD(&xprt_ptr->channels);
xprt_ptr->dummy_in_use = true;
spin_lock_init(&xprt_ptr->tx_ready_lock_lhb3);
mutex_init(&xprt_ptr->xprt_dbgfs_lock_lhb4);
return xprt_ptr;
@ -3883,41 +3922,49 @@ static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr)
return;
}
rwref_get(&dummy_xprt_ctx->xprt_state_lhb0);
rwref_read_get(&dummy_xprt_ctx->xprt_state_lhb0);
rwref_read_get(&xprt_ptr->xprt_state_lhb0);
spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
list_for_each_entry_safe(ctx, tmp_ctx, &xprt_ptr->channels,
port_list_node) {
rwref_get(&ctx->ch_state_lhb2);
rwref_write_get_atomic(&ctx->ch_state_lhb2, true);
if (ctx->local_open_state == GLINK_CHANNEL_OPENED ||
ctx->local_open_state == GLINK_CHANNEL_OPENING) {
rwref_get(&dummy_xprt_ctx->xprt_state_lhb0);
spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
d_flags);
list_move_tail(&ctx->port_list_node,
&dummy_xprt_ctx->channels);
spin_unlock_irqrestore(
&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
ctx->transport_ptr = dummy_xprt_ctx;
rwref_write_put(&ctx->ch_state_lhb2);
} else {
/* local state is in either CLOSED or CLOSING */
spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1,
flags);
glink_core_remote_close_common(ctx);
spin_unlock_irqrestore(
&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
d_flags);
glink_core_remote_close_common(ctx, true);
if (ctx->local_open_state == GLINK_CHANNEL_CLOSING)
glink_core_ch_close_ack_common(ctx);
glink_core_ch_close_ack_common(ctx, true);
/* Channel should be fully closed now. Delete here */
if (ch_is_fully_closed(ctx))
glink_delete_ch_from_list(ctx, false);
rwref_write_put(&ctx->ch_state_lhb2);
spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
d_flags);
spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
}
rwref_put(&ctx->ch_state_lhb2);
}
list_for_each_entry_safe(temp_lcid, temp_lcid1,
&xprt_ptr->free_lcid_list, list_node) {
list_del(&temp_lcid->list_node);
kfree(&temp_lcid->list_node);
}
dummy_xprt_ctx->dummy_in_use = false;
spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
rwref_read_put(&xprt_ptr->xprt_state_lhb0);
spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
list_for_each_entry_safe(ctx, tmp_ctx, &dummy_xprt_ctx->channels,
@ -3925,13 +3972,13 @@ static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr)
rwref_get(&ctx->ch_state_lhb2);
spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
d_flags);
glink_core_remote_close_common(ctx);
glink_core_remote_close_common(ctx, false);
spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
d_flags);
rwref_put(&ctx->ch_state_lhb2);
}
spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
rwref_put(&dummy_xprt_ctx->xprt_state_lhb0);
rwref_read_put(&dummy_xprt_ctx->xprt_state_lhb0);
}
/**
* glink_core_rx_cmd_version() - receive version/features from remote system
@ -4596,7 +4643,7 @@ static void glink_core_rx_cmd_ch_remote_close(
}
GLINK_INFO_CH(ctx, "%s: remote: OPENED->CLOSED\n", __func__);
is_ch_fully_closed = glink_core_remote_close_common(ctx);
is_ch_fully_closed = glink_core_remote_close_common(ctx, false);
ctx->pending_delete = true;
if_ptr->tx_cmd_ch_remote_close_ack(if_ptr, rcid);
@ -4638,7 +4685,7 @@ static void glink_core_rx_cmd_ch_close_ack(struct glink_transport_if *if_ptr,
return;
}
is_ch_fully_closed = glink_core_ch_close_ack_common(ctx);
is_ch_fully_closed = glink_core_ch_close_ack_common(ctx, false);
if (is_ch_fully_closed) {
glink_delete_ch_from_list(ctx, true);
flush_kthread_worker(&xprt_ptr->tx_wq);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -889,7 +889,7 @@ struct rwref_lock {
unsigned read_count;
unsigned write_count;
spinlock_t lock;
struct completion count_zero;
wait_queue_head_t count_zero;
void (*release)(struct rwref_lock *);
};
@ -923,7 +923,7 @@ static inline void rwref_lock_init(struct rwref_lock *lock_ptr,
lock_ptr->read_count = 0;
lock_ptr->write_count = 0;
spin_lock_init(&lock_ptr->lock);
init_completion(&lock_ptr->count_zero);
init_waitqueue_head(&lock_ptr->count_zero);
lock_ptr->release = release;
}
@ -952,12 +952,14 @@ static inline void rwref_put(struct rwref_lock *lock_ptr)
}
/**
* rwref_read_get() - gains a reference count for a read operation
* rwref_read_get_atomic() - gains a reference count for a read operation
* lock_ptr: pointer to lock structure
* is_atomic: if True, do not wait when acquiring lock
*
* Multiple readers may acquire the lock as long as the write count is zero.
*/
static inline void rwref_read_get(struct rwref_lock *lock_ptr)
static inline void rwref_read_get_atomic(struct rwref_lock *lock_ptr,
bool is_atomic)
{
unsigned long flags;
@ -972,10 +974,24 @@ static inline void rwref_read_get(struct rwref_lock *lock_ptr)
break;
}
spin_unlock_irqrestore(&lock_ptr->lock, flags);
wait_for_completion(&lock_ptr->count_zero);
if (!is_atomic) {
wait_event(lock_ptr->count_zero,
lock_ptr->write_count == 0);
}
}
}
/**
* rwref_read_get() - gains a reference count for a read operation
* lock_ptr: pointer to lock structure
*
* Multiple readers may acquire the lock as long as the write count is zero.
*/
static inline void rwref_read_get(struct rwref_lock *lock_ptr)
{
rwref_read_get_atomic(lock_ptr, false);
}
/**
* rwref_read_put() - returns a reference count for a read operation
* lock_ptr: pointer to lock structure
@ -991,18 +1007,20 @@ static inline void rwref_read_put(struct rwref_lock *lock_ptr)
spin_lock_irqsave(&lock_ptr->lock, flags);
BUG_ON(lock_ptr->read_count == 0);
if (--lock_ptr->read_count == 0)
complete(&lock_ptr->count_zero);
wake_up(&lock_ptr->count_zero);
spin_unlock_irqrestore(&lock_ptr->lock, flags);
kref_put(&lock_ptr->kref, rwref_lock_release);
}
/**
* rwref_write_get() - gains a reference count for a write operation
* rwref_write_get_atomic() - gains a reference count for a write operation
* lock_ptr: pointer to lock structure
* is_atomic: if True, do not wait when acquiring lock
*
* Only one writer may acquire the lock as long as the reader count is zero.
*/
static inline void rwref_write_get(struct rwref_lock *lock_ptr)
static inline void rwref_write_get_atomic(struct rwref_lock *lock_ptr,
bool is_atomic)
{
unsigned long flags;
@ -1017,10 +1035,25 @@ static inline void rwref_write_get(struct rwref_lock *lock_ptr)
break;
}
spin_unlock_irqrestore(&lock_ptr->lock, flags);
wait_for_completion(&lock_ptr->count_zero);
if (!is_atomic) {
wait_event(lock_ptr->count_zero,
(lock_ptr->read_count == 0 &&
lock_ptr->write_count == 0));
}
}
}
/**
* rwref_write_get() - gains a reference count for a write operation
* lock_ptr: pointer to lock structure
*
* Only one writer may acquire the lock as long as the reader count is zero.
*/
static inline void rwref_write_get(struct rwref_lock *lock_ptr)
{
rwref_write_get_atomic(lock_ptr, false);
}
/**
* rwref_write_put() - returns a reference count for a write operation
* lock_ptr: pointer to lock structure
@ -1036,7 +1069,7 @@ static inline void rwref_write_put(struct rwref_lock *lock_ptr)
spin_lock_irqsave(&lock_ptr->lock, flags);
BUG_ON(lock_ptr->write_count != 1);
if (--lock_ptr->write_count == 0)
complete(&lock_ptr->count_zero);
wake_up(&lock_ptr->count_zero);
spin_unlock_irqrestore(&lock_ptr->lock, flags);
kref_put(&lock_ptr->kref, rwref_lock_release);
}