diff --git a/drivers/soc/qcom/rpm-smd.c b/drivers/soc/qcom/rpm-smd.c index fdff6f2140a3..5eaf2db32d21 100644 --- a/drivers/soc/qcom/rpm-smd.c +++ b/drivers/soc/qcom/rpm-smd.c @@ -116,6 +116,7 @@ static ATOMIC_NOTIFIER_HEAD(msm_rpm_sleep_notifier); static bool standalone; static int probe_status = -EPROBE_DEFER; static int msm_rpm_read_smd_data(char *buf); +static void msm_rpm_process_ack(uint32_t msg_id, int errno); int msm_rpm_register_notifier(struct notifier_block *nb) { @@ -615,6 +616,7 @@ struct msm_rpm_wait_data { bool ack_recd; int errno; struct completion ack; + bool delete_on_ack; }; DEFINE_SPINLOCK(msm_rpm_list_lock); @@ -793,23 +795,45 @@ static int msm_rpm_read_sleep_ack(void) { int ret; char buf[MAX_ERR_BUFFER_SIZE] = {0}; + uint32_t msg_id; if (glink_enabled) ret = msm_rpm_glink_rx_poll(glink_data->glink_handle); else { ret = msm_rpm_read_smd_data(buf); - if (!ret) + if (!ret) { + /* + * Mimic Glink behavior to ensure that the + * data is read and the msg is removed from + * the wait list. We should have gotten here + * only when there are no drivers waiting on + * ACKs. msm_rpm_get_entry_from_msg_id() + * return non-NULL only then. + */ + msg_id = msm_rpm_get_msg_id_from_ack(buf); + msm_rpm_process_ack(msg_id, 0); ret = smd_is_pkt_avail(msm_rpm_data.ch_info); + } } return ret; } +static void msm_rpm_flush_noack_messages(void) +{ + while (!list_empty(&msm_rpm_wait_list)) { + if (!msm_rpm_read_sleep_ack()) + break; + } +} + static int msm_rpm_flush_requests(bool print) { struct rb_node *t; int ret; int count = 0; + msm_rpm_flush_noack_messages(); + for (t = rb_first(&tr_root); t; t = rb_next(t)) { struct slp_buf *s = rb_entry(t, struct slp_buf, node); @@ -1078,14 +1102,18 @@ static void msm_rpm_notify(void *data, unsigned event) bool msm_rpm_waiting_for_ack(void) { - bool ret; + bool ret = false; unsigned long flags; + struct msm_rpm_wait_data *elem = NULL; spin_lock_irqsave(&msm_rpm_list_lock, flags); - ret = list_empty(&msm_rpm_wait_list); + elem = list_first_entry_or_null(&msm_rpm_wait_list, + struct msm_rpm_wait_data, list); + if (elem) + ret = !elem->delete_on_ack; spin_unlock_irqrestore(&msm_rpm_list_lock, flags); - return !ret; + return ret; } static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id) @@ -1124,7 +1152,7 @@ static uint32_t msm_rpm_get_next_msg_id(void) return id; } -static int msm_rpm_add_wait_list(uint32_t msg_id) +static int msm_rpm_add_wait_list(uint32_t msg_id, bool delete_on_ack) { unsigned long flags; struct msm_rpm_wait_data *data = @@ -1137,8 +1165,12 @@ static int msm_rpm_add_wait_list(uint32_t msg_id) data->ack_recd = false; data->msg_id = msg_id; data->errno = INIT_ERROR; + data->delete_on_ack = delete_on_ack; spin_lock_irqsave(&msm_rpm_list_lock, flags); - list_add(&data->list, &msm_rpm_wait_list); + if (delete_on_ack) + list_add_tail(&data->list, &msm_rpm_wait_list); + else + list_add(&data->list, &msm_rpm_wait_list); spin_unlock_irqrestore(&msm_rpm_list_lock, flags); return 0; @@ -1156,21 +1188,24 @@ static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem) static void msm_rpm_process_ack(uint32_t msg_id, int errno) { - struct list_head *ptr; + struct list_head *ptr, *next; struct msm_rpm_wait_data *elem = NULL; unsigned long flags; spin_lock_irqsave(&msm_rpm_list_lock, flags); - list_for_each(ptr, &msm_rpm_wait_list) { + list_for_each_safe(ptr, next, &msm_rpm_wait_list) { elem = list_entry(ptr, struct msm_rpm_wait_data, list); - if (elem && (elem->msg_id == msg_id)) { + if (elem->msg_id == msg_id) { elem->errno = errno; elem->ack_recd = true; complete(&elem->ack); + if (elem->delete_on_ack) { + list_del(&elem->list); + kfree(elem); + } break; } - elem = NULL; } /* Special case where the sleep driver doesn't * wait for ACKs. This would decrease the latency involved with @@ -1533,8 +1568,7 @@ static int msm_rpm_send_data(struct msm_rpm_request *cdata, return ret; } - if (!noack) - msm_rpm_add_wait_list(msg_id); + msm_rpm_add_wait_list(msg_id, noack); ret = msm_rpm_send_buffer(&cdata->buf[0], msg_size, noirq);