msm: mhi: Enable M2 exit through CLKREQ assertion

Enable M2 power state transition exit through
CLKREQ GPIO assertion due to MHI doorbell write.

CRs-Fixed: 733370
Change-Id: I27b425ee305fc9c044812a8b15c76970987a5dae
Signed-off-by: Andrei Danaila <adanaila@codeaurora.org>
This commit is contained in:
Andrei Danaila 2015-03-17 15:35:05 -07:00 committed by David Keitel
parent 8d7c29c161
commit c966ff457f
6 changed files with 111 additions and 155 deletions

View file

@ -395,6 +395,7 @@ struct mhi_flags {
atomic_t pending_ssr;
atomic_t pending_powerup;
int stop_threads;
atomic_t device_wake;
u32 ssr;
};
@ -453,8 +454,10 @@ struct mhi_device_ctxt {
u32 outbound_evmod_rate;
struct mhi_counters counters;
struct mhi_flags flags;
u32 device_wake_asserted;
rwlock_t xfer_lock;
atomic_t m2_transition;
struct hrtimer m1_timer;
ktime_t m1_timeout;
ktime_t ul_acc_tmr_timeout;

View file

@ -115,6 +115,7 @@ static enum MHI_STATUS mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt)
rwlock_init(&mhi_dev_ctxt->xfer_lock);
mutex_init(&mhi_dev_ctxt->mhi_link_state);
mutex_init(&mhi_dev_ctxt->pm_lock);
atomic_set(&mhi_dev_ctxt->m2_transition, 0);
return MHI_STATUS_SUCCESS;
db_write_lock_free:
@ -368,14 +369,6 @@ static enum MHI_STATUS mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt,
static enum MHI_STATUS mhi_init_timers(struct mhi_device_ctxt *mhi_dev_ctxt)
{
hrtimer_init(&mhi_dev_ctxt->m1_timer,
CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
mhi_dev_ctxt->m1_timeout =
ktime_set(0, MHI_M1_ENTRY_DELAY_MS * 1E6L);
mhi_dev_ctxt->m1_timer.function = mhi_initiate_m1;
mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER,
"Starting M1 timer\n");
return MHI_STATUS_SUCCESS;
}

View file

@ -15,6 +15,7 @@
#define MHI_IPC_LOG_PAGES (100)
#define MHI_LOG_SIZE 0x1000
#define MHI_LINK_STABILITY_WAIT_MS 100
#define MHI_DEVICE_WAKE_DBOUNCE_TIMEOUT_MS 10
#define MHI_MAX_LINK_RETRIES 9
#define DT_WAIT_RETRIES 30
#define MHI_MAX_SUSPEND_RETRIES 1000
@ -54,10 +55,12 @@
#define MHI_DATA_SEG_WINDOW_START_ADDR 0x0ULL
#define MHI_DATA_SEG_WINDOW_END_ADDR 0x3E800000ULL
#define MHI_M1_ENTRY_DELAY_MS 100
#define MHI_M2_DEBOUNCE_TMR_MS 10
#define MHI_XFER_DB_INTERVAL 8
#define MHI_EV_DB_INTERVAL 32
#define MHI_DEV_WAKE_DB 127
#define MHI_HANDLE_MAGIC 0x12344321
/* PCIe Device Info */

View file

@ -497,6 +497,28 @@ void mhi_update_chan_db(struct mhi_device_ctxt *mhi_dev_ctxt,
chan, db_value);
}
}
enum MHI_STATUS mhi_check_m2_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
{
enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
mhi_log(MHI_MSG_VERBOSE, "state = %d\n", mhi_dev_ctxt->mhi_state);
if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) {
mhi_log(MHI_MSG_INFO, "M2 Transition flag value = %d\n",
(atomic_read(&mhi_dev_ctxt->m2_transition)));
if ((atomic_read(&mhi_dev_ctxt->m2_transition)) == 0) {
if (mhi_dev_ctxt->flags.link_up) {
mhi_assert_device_wake(mhi_dev_ctxt);
ret_val = MHI_STATUS_CHAN_NOT_READY;
}
} else{
mhi_log(MHI_MSG_INFO, "m2_transition flag is set\n");
ret_val = MHI_STATUS_CHAN_NOT_READY;
}
} else {
ret_val = MHI_STATUS_SUCCESS;
}
return ret_val;
}
static inline enum MHI_STATUS mhi_queue_tre(struct mhi_device_ctxt
*mhi_dev_ctxt,
@ -516,11 +538,12 @@ static inline enum MHI_STATUS mhi_queue_tre(struct mhi_device_ctxt
"Queued outbound pkt. Pending Acks %d\n",
atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
}
if (likely((((
(mhi_dev_ctxt->mhi_state == MHI_STATE_M0) ||
(mhi_dev_ctxt->mhi_state == MHI_STATE_M1)) &&
ret_val = mhi_check_m2_transition(mhi_dev_ctxt);
if (likely(((ret_val == MHI_STATUS_SUCCESS) &&
(((mhi_dev_ctxt->mhi_state == MHI_STATE_M0) ||
(mhi_dev_ctxt->mhi_state == MHI_STATE_M1))) &&
(chan_ctxt->mhi_chan_state != MHI_CHAN_STATE_ERROR)) &&
(!mhi_dev_ctxt->flags.pending_M3)))) {
(!mhi_dev_ctxt->flags.pending_M3))) {
if (likely(type == MHI_RING_TYPE_XFER_RING)) {
spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan],
flags);
@ -574,14 +597,6 @@ enum MHI_STATUS mhi_queue_xfer(struct mhi_client_handle *client_handle,
chan = client_handle->chan;
pm_runtime_get(&mhi_dev_ctxt->dev_info->plat_dev->dev);
/* Bump up the vote for pending data */
read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
atomic_inc(&mhi_dev_ctxt->flags.data_pending);
if (mhi_dev_ctxt->flags.link_up)
mhi_assert_device_wake(mhi_dev_ctxt);
read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
pkt_loc = mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp;
pkt_loc->data_tx_pkt.buffer_ptr = buf;
pkt_loc->type.info = mhi_flags;
@ -602,19 +617,20 @@ enum MHI_STATUS mhi_queue_xfer(struct mhi_client_handle *client_handle,
ret_val = ctxt_add_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
(void *)&pkt_loc);
if (unlikely(MHI_STATUS_SUCCESS != ret_val)) {
mhi_log(MHI_MSG_CRITICAL,
mhi_log(MHI_MSG_VERBOSE,
"Failed to insert trb in xfer ring\n");
goto error;
}
read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
atomic_inc(&mhi_dev_ctxt->flags.data_pending);
ret_val = mhi_queue_tre(mhi_dev_ctxt, chan, MHI_RING_TYPE_XFER_RING);
if (unlikely(MHI_STATUS_SUCCESS != ret_val))
mhi_log(MHI_MSG_VERBOSE, "Failed queue TRE.\n");
atomic_dec(&mhi_dev_ctxt->flags.data_pending);
read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
error:
atomic_dec(&mhi_dev_ctxt->flags.data_pending);
pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->plat_dev->dev);
pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->plat_dev->dev);
return ret_val;
@ -645,18 +661,18 @@ enum MHI_STATUS mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_dev_ctxt->dev_exec_env,
chan, cmd);
mhi_assert_device_wake(mhi_dev_ctxt);
atomic_inc(&mhi_dev_ctxt->flags.data_pending);
pm_runtime_get(&mhi_dev_ctxt->dev_info->plat_dev->dev);
/*
* If there is a cmd pending a device confirmation,
* do not send anymore for this channel
*/
if (MHI_CMD_PENDING == mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan]) {
mhi_log(MHI_MSG_ERROR, "Cmd Pending on chan %d", chan);
ret_val = MHI_STATUS_CMD_PENDING;
goto error_invalid;
}
atomic_inc(&mhi_dev_ctxt->flags.data_pending);
from_state =
mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan].mhi_chan_state;
@ -1341,21 +1357,38 @@ int mhi_get_epid(struct mhi_client_handle *client_handle)
int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
{
mhi_log(MHI_MSG_VERBOSE, "GPIO %d\n",
mhi_dev_ctxt->dev_props->device_wake_gpio);
gpio_direction_output(mhi_dev_ctxt->dev_props->device_wake_gpio, 1);
if ((mhi_dev_ctxt->channel_db_addr) &&
(mhi_dev_ctxt->flags.link_up)) {
mhi_log(MHI_MSG_VERBOSE, "LPM %d\n",
mhi_dev_ctxt->enable_lpm);
atomic_set(&mhi_dev_ctxt->flags.device_wake, 1);
mhi_write_db(mhi_dev_ctxt,
mhi_dev_ctxt->channel_db_addr,
MHI_DEV_WAKE_DB, 1);
mhi_dev_ctxt->device_wake_asserted = 1;
} else {
mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_dev_ctxt->enable_lpm);
}
return 0;
}
inline int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
{
mhi_log(MHI_MSG_VERBOSE, "GPIO %d\n",
mhi_dev_ctxt->dev_props->device_wake_gpio);
if (mhi_dev_ctxt->enable_lpm)
gpio_direction_output(
mhi_dev_ctxt->dev_props->device_wake_gpio, 0);
else
mhi_log(MHI_MSG_VERBOSE, "LPM Enabled\n");
if ((mhi_dev_ctxt->enable_lpm) &&
(atomic_read(&mhi_dev_ctxt->flags.device_wake)) &&
(mhi_dev_ctxt->channel_db_addr != NULL) &&
(mhi_dev_ctxt->flags.link_up)) {
mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_dev_ctxt->enable_lpm);
atomic_set(&mhi_dev_ctxt->flags.device_wake, 0);
mhi_write_db(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr,
MHI_DEV_WAKE_DB, 0);
mhi_dev_ctxt->device_wake_asserted = 0;
} else {
mhi_log(MHI_MSG_VERBOSE, "LPM %d DEV_WAKE %d link %d\n",
mhi_dev_ctxt->enable_lpm,
atomic_read(&mhi_dev_ctxt->flags.device_wake),
mhi_dev_ctxt->flags.link_up);
}
return 0;
}

View file

@ -113,46 +113,6 @@ exit:
return r;
}
enum hrtimer_restart mhi_initiate_m1(struct hrtimer *timer)
{
int ret_val = 0;
unsigned long flags;
ktime_t curr_time, timer_inc;
struct mhi_device_ctxt *mhi_dev_ctxt = container_of(timer,
struct mhi_device_ctxt,
m1_timer);
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
/*
* We will allow M1 if no data is pending, the current
* state is M0 and no M3 transition is pending
*/
if ((0 == atomic_read(&mhi_dev_ctxt->flags.data_pending)) &&
(MHI_STATE_M1 == mhi_dev_ctxt->mhi_state ||
MHI_STATE_M0 == mhi_dev_ctxt->mhi_state) &&
(0 == mhi_dev_ctxt->flags.pending_M3) &&
mhi_dev_ctxt->flags.mhi_initialized &&
(0 == atomic_read(
&mhi_dev_ctxt->counters.outbound_acks))) {
mhi_dev_ctxt->mhi_state = MHI_STATE_M1;
ret_val = mhi_deassert_device_wake(mhi_dev_ctxt);
mhi_dev_ctxt->counters.m0_m1++;
if (ret_val)
mhi_log(MHI_MSG_ERROR,
"Could not set DEVICE WAKE GPIO LOW\n");
}
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
if (mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
mhi_dev_ctxt->mhi_state == MHI_STATE_M1 ||
mhi_dev_ctxt->mhi_state == MHI_STATE_READY) {
curr_time = ktime_get();
timer_inc = ktime_set(0, MHI_M1_ENTRY_DELAY_MS * 1E6L);
hrtimer_forward(timer, curr_time, timer_inc);
return HRTIMER_RESTART;
}
return HRTIMER_NORESTART;
}
int mhi_init_pm_sysfs(struct device *dev)
{
return sysfs_create_group(&dev->kobj, &mhi_attribute_group);

View file

@ -122,11 +122,13 @@ static enum MHI_STATUS process_m0_transition(
} else if (mhi_dev_ctxt->mhi_state == MHI_STATE_READY) {
mhi_log(MHI_MSG_INFO,
"Transitioning from READY.\n");
} else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
mhi_log(MHI_MSG_INFO,
"Transitioning from M1.\n");
} else {
mhi_log(MHI_MSG_INFO,
"MHI State %d link state %d. Quitting\n",
mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up);
goto exit;
}
read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
@ -147,20 +149,20 @@ static enum MHI_STATUS process_m0_transition(
"Could not set bus frequency ret: %d\n",
ret_val);
mhi_dev_ctxt->flags.pending_M0 = 0;
wake_up_interruptible(mhi_dev_ctxt->M0_event);
if (ret_val == -ERESTARTSYS)
mhi_log(MHI_MSG_CRITICAL,
"Pending restart detected\n");
ret_val = hrtimer_start(&mhi_dev_ctxt->m1_timer,
mhi_dev_ctxt->m1_timeout,
HRTIMER_MODE_REL);
if (atomic_read(&mhi_dev_ctxt->flags.pending_powerup)) {
atomic_set(&mhi_dev_ctxt->flags.pending_ssr, 0);
atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 0);
}
mhi_log(MHI_MSG_VERBOSE, "Starting M1 timer, ret %d\n", ret_val);
exit:
wake_up_interruptible(mhi_dev_ctxt->M0_event);
if (ret_val == -ERESTARTSYS)
mhi_log(MHI_MSG_CRITICAL,
"Pending restart detected\n");
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
if ((!mhi_dev_ctxt->flags.pending_M3) &&
(mhi_dev_ctxt->flags.link_up))
mhi_deassert_device_wake(mhi_dev_ctxt);
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
mhi_log(MHI_MSG_INFO, "Exited\n");
return MHI_STATUS_SUCCESS;
}
@ -176,25 +178,10 @@ static enum MHI_STATUS process_m1_transition(
"Processing M1 state transition from state %d\n",
mhi_dev_ctxt->mhi_state);
mhi_dev_ctxt->counters.m0_m1++;
mhi_log(MHI_MSG_VERBOSE,
"Cancelling Inactivity timer\n");
switch (hrtimer_try_to_cancel(&mhi_dev_ctxt->m1_timer)) {
case 0:
mhi_log(MHI_MSG_VERBOSE,
"Timer was not active\n");
break;
case 1:
mhi_log(MHI_MSG_VERBOSE,
"Timer was active\n");
break;
case -1:
mhi_log(MHI_MSG_VERBOSE,
"Timer executing and can't stop\n");
break;
}
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
if (!mhi_dev_ctxt->flags.pending_M3) {
mhi_log(MHI_MSG_INFO, "Setting M2 Transition flag\n");
atomic_inc(&mhi_dev_ctxt->m2_transition);
mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n");
mhi_reg_write_field(mhi_dev_ctxt,
@ -206,24 +193,32 @@ static enum MHI_STATUS process_m1_transition(
mhi_dev_ctxt->counters.m1_m2++;
}
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
ret_val =
mhi_set_bus_request(mhi_dev_ctxt,
0);
ret_val = mhi_set_bus_request(mhi_dev_ctxt, 0);
if (ret_val)
mhi_log(MHI_MSG_INFO, "Failed to update bus request\n");
mhi_log(MHI_MSG_INFO, "Start Deferred Suspend usage_count: %d\n",
atomic_read(
&mhi_dev_ctxt->dev_info->plat_dev->dev.power.usage_count));
mhi_log(MHI_MSG_INFO, "Debouncing M2\n");
msleep(MHI_M2_DEBOUNCE_TMR_MS);
pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->plat_dev->dev);
r = pm_request_autosuspend(&mhi_dev_ctxt->dev_info->plat_dev->dev);
if (r) {
mhi_log(MHI_MSG_ERROR, "Failed to remove counter ret %d\n", r);
mhi_log(MHI_MSG_ERROR, "Usage counter is %d\n",
atomic_read(
&mhi_dev_ctxt->dev_info->plat_dev->dev.power.usage_count));
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
mhi_log(MHI_MSG_INFO, "Pending acks %d\n",
atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
if (atomic_read(&mhi_dev_ctxt->counters.outbound_acks) ||
mhi_dev_ctxt->flags.pending_M3) {
mhi_assert_device_wake(mhi_dev_ctxt);
} else {
pm_runtime_mark_last_busy(
&mhi_dev_ctxt->dev_info->plat_dev->dev);
r = pm_request_autosuspend(
&mhi_dev_ctxt->dev_info->plat_dev->dev);
if (r) {
mhi_log(MHI_MSG_ERROR,
"Failed to remove counter ret %d\n", r);
}
}
atomic_set(&mhi_dev_ctxt->m2_transition, 0);
mhi_log(MHI_MSG_INFO, "M2 transition complete.\n");
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
return MHI_STATUS_SUCCESS;
}
@ -235,19 +230,6 @@ static enum MHI_STATUS process_m3_transition(
unsigned long flags;
mhi_log(MHI_MSG_INFO,
"Processing M3 state transition\n");
switch (hrtimer_try_to_cancel(&mhi_dev_ctxt->m1_timer)) {
case 0:
mhi_log(MHI_MSG_VERBOSE,
"Timer was not active\n");
break;
case 1:
mhi_log(MHI_MSG_VERBOSE,
"Timer was active\n");
break;
case -1:
mhi_log(MHI_MSG_VERBOSE,
"Timer executing and can't stop\n");
}
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
mhi_dev_ctxt->flags.pending_M3 = 0;
@ -284,19 +266,6 @@ static enum MHI_STATUS mhi_process_link_down(
msleep(20);
}
switch (hrtimer_try_to_cancel(&mhi_dev_ctxt->m1_timer)) {
case 0:
mhi_log(MHI_MSG_CRITICAL,
"Timer was not active\n");
break;
case 1:
mhi_log(MHI_MSG_CRITICAL,
"Timer was active\n");
break;
case -1:
mhi_log(MHI_MSG_CRITICAL,
"Timer executing and can't stop\n");
}
r = mhi_set_bus_request(mhi_dev_ctxt, 0);
if (r)
mhi_log(MHI_MSG_INFO,
@ -578,6 +547,7 @@ static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_dev_ctxt->client_handle_list[i];
mhi_notify_client(client_handle,
MHI_CB_MHI_ENABLED);
mhi_deassert_device_wake(mhi_dev_ctxt);
}
}
break;
@ -897,7 +867,11 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
"Triggering wake out of M2\n");
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
mhi_dev_ctxt->flags.pending_M3 = 1;
mhi_assert_device_wake(mhi_dev_ctxt);
if ((atomic_read(&mhi_dev_ctxt->m2_transition)) == 0) {
mhi_log(MHI_MSG_INFO,
"M2_transition not set\n");
mhi_assert_device_wake(mhi_dev_ctxt);
}
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M0_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
@ -943,12 +917,6 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
r = -EAGAIN;
goto exit;
}
r = hrtimer_cancel(&mhi_dev_ctxt->m1_timer);
if (r)
mhi_log(MHI_MSG_INFO, "Cancelled M1 timer, timer was active\n");
else
mhi_log(MHI_MSG_INFO,
"Cancelled M1 timer, timer was not active\n");
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
if (mhi_dev_ctxt->flags.pending_M0) {
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
@ -974,17 +942,12 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_dev_ctxt->flags.pending_M3 = 0;
goto exit;
break;
case -ERESTARTSYS:
mhi_log(MHI_MSG_CRITICAL,
"Going Down...\n");
goto exit;
break;
default:
mhi_log(MHI_MSG_INFO,
"M3 completion received\n");
break;
}
mhi_deassert_device_wake(mhi_dev_ctxt);
mhi_turn_off_pcie_link(mhi_dev_ctxt);
r = mhi_set_bus_request(mhi_dev_ctxt, 0);
if (r)
@ -996,6 +959,7 @@ exit:
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
ring_all_chan_dbs(mhi_dev_ctxt);
atomic_dec(&mhi_dev_ctxt->flags.data_pending);
mhi_deassert_device_wake(mhi_dev_ctxt);
}
mhi_dev_ctxt->flags.pending_M3 = 0;
mutex_unlock(&mhi_dev_ctxt->pm_lock);