Merge "mhi: core: Add support for host triggered device ram dump"
This commit is contained in:
commit
9b5ee09c39
5 changed files with 198 additions and 32 deletions
|
@ -249,6 +249,13 @@ int bhi_rddm(struct mhi_device_ctxt *mhi_dev_ctxt, bool in_panic)
|
||||||
{
|
{
|
||||||
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
|
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
|
||||||
struct bhie_vec_table *rddm_table = &bhi_ctxt->rddm_table;
|
struct bhie_vec_table *rddm_table = &bhi_ctxt->rddm_table;
|
||||||
|
struct bhie_mem_info *bhie_mem_info;
|
||||||
|
u32 rx_sequence, val, current_seq;
|
||||||
|
u32 timeout = (bhi_ctxt->poll_timeout * 1000) / BHIE_RDDM_DELAY_TIME_US;
|
||||||
|
int i;
|
||||||
|
u32 cur_exec, prev_exec = 0;
|
||||||
|
u32 state, prev_state = 0;
|
||||||
|
u32 rx_status, prev_status = 0;
|
||||||
|
|
||||||
if (!rddm_table->bhie_mem_info) {
|
if (!rddm_table->bhie_mem_info) {
|
||||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "RDDM table == NULL\n");
|
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "RDDM table == NULL\n");
|
||||||
|
@ -258,9 +265,93 @@ int bhi_rddm(struct mhi_device_ctxt *mhi_dev_ctxt, bool in_panic)
|
||||||
if (!in_panic)
|
if (!in_panic)
|
||||||
return bhi_rddm_graceful(mhi_dev_ctxt);
|
return bhi_rddm_graceful(mhi_dev_ctxt);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Below code should only be executed during kernel panic,
|
||||||
|
* we expect other cores to be shutting down while we're
|
||||||
|
* executing rddm transfer. After returning from this function,
|
||||||
|
* we expect device to reset.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Trigger device into RDDM */
|
||||||
|
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "pm_state:0x%x mhi_state:%s\n",
|
||||||
|
mhi_dev_ctxt->mhi_pm_state,
|
||||||
|
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
||||||
|
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
|
||||||
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
||||||
"RDDM collection in panic not yet supported\n");
|
"Register access not allowed\n");
|
||||||
return -EINVAL;
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Normally we only set mhi_pm_state after grabbing pm_xfer_lock as a
|
||||||
|
* write, by function mhi_tryset_pm_state. Since we're in a kernel
|
||||||
|
* panic, we will set pm state w/o grabbing xfer lock. We're setting
|
||||||
|
* pm_state to LD as a safety precautions. If another core in middle
|
||||||
|
* of register access this should deter it. However, there is no
|
||||||
|
* no gurantee change will take effect.
|
||||||
|
*/
|
||||||
|
mhi_dev_ctxt->mhi_pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
|
||||||
|
/* change should take effect immediately */
|
||||||
|
smp_wmb();
|
||||||
|
|
||||||
|
bhie_mem_info = &rddm_table->
|
||||||
|
bhie_mem_info[rddm_table->segment_count - 1];
|
||||||
|
rx_sequence = rddm_table->sequence++;
|
||||||
|
|
||||||
|
/* program the vector table */
|
||||||
|
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Programming RXVEC table\n");
|
||||||
|
val = HIGH_WORD(bhie_mem_info->phys_addr);
|
||||||
|
mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base,
|
||||||
|
BHIE_RXVECADDR_HIGH_OFFS, val);
|
||||||
|
val = LOW_WORD(bhie_mem_info->phys_addr);
|
||||||
|
mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHIE_RXVECADDR_LOW_OFFS,
|
||||||
|
val);
|
||||||
|
val = (u32)bhie_mem_info->size;
|
||||||
|
mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHIE_RXVECSIZE_OFFS,
|
||||||
|
val);
|
||||||
|
mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHIE_RXVECDB_OFFS,
|
||||||
|
BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
|
||||||
|
rx_sequence);
|
||||||
|
|
||||||
|
/* trigger device into rddm */
|
||||||
|
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
|
||||||
|
"Triggering Device into RDDM mode\n");
|
||||||
|
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_SYS_ERR);
|
||||||
|
i = 0;
|
||||||
|
|
||||||
|
while (timeout--) {
|
||||||
|
cur_exec = mhi_reg_read(bhi_ctxt->bhi_base, BHI_EXECENV);
|
||||||
|
state = mhi_get_m_state(mhi_dev_ctxt);
|
||||||
|
rx_status = mhi_reg_read(bhi_ctxt->bhi_base,
|
||||||
|
BHIE_RXVECSTATUS_OFFS);
|
||||||
|
/* if reg. values changed or each sec (udelay(1000)) log it */
|
||||||
|
if (cur_exec != prev_exec || state != prev_state ||
|
||||||
|
rx_status != prev_status || !(i & (SZ_1K - 1))) {
|
||||||
|
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
|
||||||
|
"EXECENV:0x%x MHISTATE:0x%x RXSTATUS:0x%x\n",
|
||||||
|
cur_exec, state, rx_status);
|
||||||
|
prev_exec = cur_exec;
|
||||||
|
prev_state = state;
|
||||||
|
prev_status = rx_status;
|
||||||
|
};
|
||||||
|
current_seq = (rx_status & BHIE_TXVECSTATUS_SEQNUM_BMSK) >>
|
||||||
|
BHIE_TXVECSTATUS_SEQNUM_SHFT;
|
||||||
|
rx_status = (rx_status & BHIE_TXVECSTATUS_STATUS_BMSK) >>
|
||||||
|
BHIE_TXVECSTATUS_STATUS_SHFT;
|
||||||
|
|
||||||
|
if ((rx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) &&
|
||||||
|
(current_seq == rx_sequence)) {
|
||||||
|
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
|
||||||
|
"rddm transfer completed\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
udelay(BHIE_RDDM_DELAY_TIME_US);
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
|
||||||
|
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "rddm transfer timeout\n");
|
||||||
|
|
||||||
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bhi_load_firmware(struct mhi_device_ctxt *mhi_dev_ctxt,
|
static int bhi_load_firmware(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
|
@ -439,7 +530,6 @@ void bhi_firmware_download(struct work_struct *work)
|
||||||
struct bhi_ctxt_t *bhi_ctxt;
|
struct bhi_ctxt_t *bhi_ctxt;
|
||||||
struct bhie_mem_info mem_info;
|
struct bhie_mem_info mem_info;
|
||||||
int ret;
|
int ret;
|
||||||
long timeout;
|
|
||||||
|
|
||||||
mhi_dev_ctxt = container_of(work, struct mhi_device_ctxt,
|
mhi_dev_ctxt = container_of(work, struct mhi_device_ctxt,
|
||||||
bhi_ctxt.fw_load_work);
|
bhi_ctxt.fw_load_work);
|
||||||
|
@ -448,7 +538,14 @@ void bhi_firmware_download(struct work_struct *work)
|
||||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Enter\n");
|
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Enter\n");
|
||||||
|
|
||||||
wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
|
wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_BHI);
|
mhi_dev_ctxt->mhi_state == MHI_STATE_BHI ||
|
||||||
|
mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT);
|
||||||
|
if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT ||
|
||||||
|
mhi_dev_ctxt->mhi_state != MHI_STATE_BHI) {
|
||||||
|
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
||||||
|
"MHI is not in valid state for firmware download\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* PBL image is the first segment in firmware vector table */
|
/* PBL image is the first segment in firmware vector table */
|
||||||
mem_info = *bhi_ctxt->fw_table.bhie_mem_info;
|
mem_info = *bhi_ctxt->fw_table.bhie_mem_info;
|
||||||
|
@ -462,10 +559,12 @@ void bhi_firmware_download(struct work_struct *work)
|
||||||
mhi_init_state_transition(mhi_dev_ctxt,
|
mhi_init_state_transition(mhi_dev_ctxt,
|
||||||
STATE_TRANSITION_RESET);
|
STATE_TRANSITION_RESET);
|
||||||
|
|
||||||
timeout = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
|
wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
|
||||||
mhi_dev_ctxt->dev_exec_env == MHI_EXEC_ENV_BHIE,
|
mhi_dev_ctxt->dev_exec_env == MHI_EXEC_ENV_BHIE ||
|
||||||
|
mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT,
|
||||||
msecs_to_jiffies(bhi_ctxt->poll_timeout));
|
msecs_to_jiffies(bhi_ctxt->poll_timeout));
|
||||||
if (!timeout) {
|
if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT ||
|
||||||
|
mhi_dev_ctxt->dev_exec_env != MHI_EXEC_ENV_BHIE) {
|
||||||
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
||||||
"Failed to Enter EXEC_ENV_BHIE\n");
|
"Failed to Enter EXEC_ENV_BHIE\n");
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -87,6 +87,7 @@
|
||||||
|
|
||||||
#define BHI_POLL_SLEEP_TIME_MS 100
|
#define BHI_POLL_SLEEP_TIME_MS 100
|
||||||
#define BHI_POLL_TIMEOUT_MS 2000
|
#define BHI_POLL_TIMEOUT_MS 2000
|
||||||
|
#define BHIE_RDDM_DELAY_TIME_US (1000)
|
||||||
|
|
||||||
int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt);
|
int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||||
void bhi_firmware_download(struct work_struct *work);
|
void bhi_firmware_download(struct work_struct *work);
|
||||||
|
|
|
@ -22,6 +22,22 @@
|
||||||
#include "mhi_hwio.h"
|
#include "mhi_hwio.h"
|
||||||
#include "mhi_bhi.h"
|
#include "mhi_bhi.h"
|
||||||
|
|
||||||
|
static const char *const mhi_dev_ctrl_str[MHI_DEV_CTRL_MAXCMD] = {
|
||||||
|
[MHI_DEV_CTRL_INIT] = "INIT",
|
||||||
|
[MHI_DEV_CTRL_DE_INIT] = "DE-INIT",
|
||||||
|
[MHI_DEV_CTRL_SUSPEND] = "SUSPEND",
|
||||||
|
[MHI_DEV_CTRL_RESUME] = "RESUME",
|
||||||
|
[MHI_DEV_CTRL_POWER_OFF] = "OFF",
|
||||||
|
[MHI_DEV_CTRL_POWER_ON] = "ON",
|
||||||
|
[MHI_DEV_CTRL_TRIGGER_RDDM] = "TRIGGER RDDM",
|
||||||
|
[MHI_DEV_CTRL_RDDM] = "RDDM",
|
||||||
|
[MHI_DEV_CTRL_RDDM_KERNEL_PANIC] = "RDDM IN PANIC",
|
||||||
|
[MHI_DEV_CTRL_NOTIFY_LINK_ERROR] = "LD",
|
||||||
|
};
|
||||||
|
|
||||||
|
#define TO_MHI_DEV_CTRL_STR(cmd) ((cmd >= MHI_DEV_CTRL_MAXCMD) ? "INVALID" : \
|
||||||
|
mhi_dev_ctrl_str[cmd])
|
||||||
|
|
||||||
/* Write only sysfs attributes */
|
/* Write only sysfs attributes */
|
||||||
static DEVICE_ATTR(MHI_M0, S_IWUSR, NULL, sysfs_init_m0);
|
static DEVICE_ATTR(MHI_M0, S_IWUSR, NULL, sysfs_init_m0);
|
||||||
static DEVICE_ATTR(MHI_M3, S_IWUSR, NULL, sysfs_init_m3);
|
static DEVICE_ATTR(MHI_M3, S_IWUSR, NULL, sysfs_init_m3);
|
||||||
|
@ -98,11 +114,13 @@ static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
|
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
|
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
|
mhi_dev_ctxt->mhi_state == MHI_STATE_M1 ||
|
||||||
|
mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT,
|
||||||
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
|
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
|
||||||
if (!r) {
|
if (!r || mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT) {
|
||||||
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
||||||
"Failed to get M0||M1 event, timeout, current state:%s\n",
|
"Failed to get M0||M1 event or LD pm_state:0x%x state:%s\n",
|
||||||
|
mhi_dev_ctxt->mhi_pm_state,
|
||||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
@ -122,9 +140,10 @@ static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Waiting for M3 completion.\n");
|
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Waiting for M3 completion.\n");
|
||||||
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
|
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
|
mhi_dev_ctxt->mhi_state == MHI_STATE_M3 ||
|
||||||
|
mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT,
|
||||||
msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
|
msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
|
||||||
if (!r) {
|
if (!r || mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT) {
|
||||||
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
||||||
"Failed to get M3 event, timeout, current state:%s\n",
|
"Failed to get M3 event, timeout, current state:%s\n",
|
||||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
||||||
|
@ -159,11 +178,12 @@ static int mhi_pm_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
|
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
|
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
|
mhi_dev_ctxt->mhi_state == MHI_STATE_M1 ||
|
||||||
|
mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT,
|
||||||
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
|
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
|
||||||
if (!r) {
|
if (!r || mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT) {
|
||||||
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
||||||
"Failed to get M0 event, timeout\n");
|
"Failed to get M0 event, timeout or LD\n");
|
||||||
r = -EIO;
|
r = -EIO;
|
||||||
} else
|
} else
|
||||||
r = 0;
|
r = 0;
|
||||||
|
@ -295,13 +315,16 @@ static int mhi_pm_slave_mode_power_on(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
|
mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
|
||||||
read_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
read_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
|
||||||
ret_val = wait_for_completion_timeout(&mhi_dev_ctxt->cmd_complete,
|
wait_for_completion_timeout(&mhi_dev_ctxt->cmd_complete,
|
||||||
msecs_to_jiffies(timeout));
|
msecs_to_jiffies(timeout));
|
||||||
if (!ret_val || mhi_dev_ctxt->dev_exec_env != MHI_EXEC_ENV_AMSS)
|
if (mhi_dev_ctxt->dev_exec_env != MHI_EXEC_ENV_AMSS)
|
||||||
ret_val = -EIO;
|
ret_val = -EIO;
|
||||||
else
|
else
|
||||||
ret_val = 0;
|
ret_val = 0;
|
||||||
|
|
||||||
|
/* wait for firmware download to complete */
|
||||||
|
flush_work(&mhi_dev_ctxt->bhi_ctxt.fw_load_work);
|
||||||
|
|
||||||
if (ret_val) {
|
if (ret_val) {
|
||||||
read_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
read_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
|
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
|
||||||
|
@ -537,16 +560,16 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int mhi_pm_control_device(struct mhi_device *mhi_device,
|
int mhi_pm_control_device(struct mhi_device *mhi_device, enum mhi_dev_ctrl ctrl)
|
||||||
enum mhi_dev_ctrl ctrl)
|
|
||||||
{
|
{
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->mhi_dev_ctxt;
|
struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->mhi_dev_ctxt;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
if (!mhi_dev_ctxt)
|
if (!mhi_dev_ctxt)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
|
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered with cmd:%s\n",
|
||||||
"Entered with cmd:%d\n", ctrl);
|
TO_MHI_DEV_CTRL_STR(ctrl));
|
||||||
|
|
||||||
switch (ctrl) {
|
switch (ctrl) {
|
||||||
case MHI_DEV_CTRL_INIT:
|
case MHI_DEV_CTRL_INIT:
|
||||||
|
@ -560,12 +583,46 @@ int mhi_pm_control_device(struct mhi_device *mhi_device,
|
||||||
case MHI_DEV_CTRL_POWER_OFF:
|
case MHI_DEV_CTRL_POWER_OFF:
|
||||||
mhi_pm_slave_mode_power_off(mhi_dev_ctxt);
|
mhi_pm_slave_mode_power_off(mhi_dev_ctxt);
|
||||||
break;
|
break;
|
||||||
|
case MHI_DEV_CTRL_TRIGGER_RDDM:
|
||||||
|
write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
|
||||||
|
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
|
||||||
|
write_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock,
|
||||||
|
flags);
|
||||||
|
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
|
||||||
|
"failed to trigger rddm, no register access in state:0x%x\n",
|
||||||
|
mhi_dev_ctxt->mhi_pm_state);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_SYS_ERR);
|
||||||
|
write_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
|
||||||
|
break;
|
||||||
case MHI_DEV_CTRL_RDDM:
|
case MHI_DEV_CTRL_RDDM:
|
||||||
return bhi_rddm(mhi_dev_ctxt, false);
|
return bhi_rddm(mhi_dev_ctxt, false);
|
||||||
|
case MHI_DEV_CTRL_RDDM_KERNEL_PANIC:
|
||||||
|
return bhi_rddm(mhi_dev_ctxt, true);
|
||||||
case MHI_DEV_CTRL_DE_INIT:
|
case MHI_DEV_CTRL_DE_INIT:
|
||||||
if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_DISABLE)
|
if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_DISABLE) {
|
||||||
|
enum MHI_PM_STATE cur_state;
|
||||||
|
/*
|
||||||
|
* If bus master calls DE_INIT before calling POWER_OFF
|
||||||
|
* means a critical failure occurred during POWER_ON
|
||||||
|
* state transition and external PCIe device may not
|
||||||
|
* respond to host. Force PM state to PCIe linkdown
|
||||||
|
* state prior to starting shutdown process to avoid
|
||||||
|
* accessing PCIe link.
|
||||||
|
*/
|
||||||
|
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
cur_state = mhi_tryset_pm_state(mhi_dev_ctxt,
|
||||||
|
MHI_PM_LD_ERR_FATAL_DETECT);
|
||||||
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
if (unlikely(cur_state != MHI_PM_LD_ERR_FATAL_DETECT)) {
|
||||||
|
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
|
||||||
|
"Failed to transition to state 0x%x from 0x%x\n",
|
||||||
|
MHI_PM_LD_ERR_FATAL_DETECT, cur_state);
|
||||||
|
}
|
||||||
process_disable_transition(MHI_PM_SHUTDOWN_PROCESS,
|
process_disable_transition(MHI_PM_SHUTDOWN_PROCESS,
|
||||||
mhi_dev_ctxt);
|
mhi_dev_ctxt);
|
||||||
|
}
|
||||||
bhi_exit(mhi_dev_ctxt);
|
bhi_exit(mhi_dev_ctxt);
|
||||||
break;
|
break;
|
||||||
case MHI_DEV_CTRL_NOTIFY_LINK_ERROR:
|
case MHI_DEV_CTRL_NOTIFY_LINK_ERROR:
|
||||||
|
@ -580,6 +637,12 @@ int mhi_pm_control_device(struct mhi_device *mhi_device,
|
||||||
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
|
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
|
||||||
"Failed to transition to state 0x%x from 0x%x\n",
|
"Failed to transition to state 0x%x from 0x%x\n",
|
||||||
MHI_PM_LD_ERR_FATAL_DETECT, cur_state);
|
MHI_PM_LD_ERR_FATAL_DETECT, cur_state);
|
||||||
|
|
||||||
|
/* wake up all threads that's waiting for state change events */
|
||||||
|
complete(&mhi_dev_ctxt->cmd_complete);
|
||||||
|
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
|
||||||
|
wake_up(mhi_dev_ctxt->mhi_ev_wq.m0_event);
|
||||||
|
wake_up(mhi_dev_ctxt->mhi_ev_wq.m3_event);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -147,7 +147,8 @@ void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
* M1 -> M3_ENTER --> M3
|
* M1 -> M3_ENTER --> M3
|
||||||
* L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
|
* L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
|
||||||
* L2: SHUTDOWN_PROCESS -> DISABLE -> SSR_PENDING (via SSR Notification only)
|
* L2: SHUTDOWN_PROCESS -> DISABLE -> SSR_PENDING (via SSR Notification only)
|
||||||
* L3: LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
|
* L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
|
||||||
|
* LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
|
||||||
*/
|
*/
|
||||||
static const struct mhi_pm_transitions const mhi_state_transitions[] = {
|
static const struct mhi_pm_transitions const mhi_state_transitions[] = {
|
||||||
/* L0 States */
|
/* L0 States */
|
||||||
|
@ -216,7 +217,7 @@ static const struct mhi_pm_transitions const mhi_state_transitions[] = {
|
||||||
/* L3 States */
|
/* L3 States */
|
||||||
{
|
{
|
||||||
MHI_PM_LD_ERR_FATAL_DETECT,
|
MHI_PM_LD_ERR_FATAL_DETECT,
|
||||||
MHI_PM_SHUTDOWN_PROCESS
|
MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS
|
||||||
},
|
},
|
||||||
/* From SSR notification only */
|
/* From SSR notification only */
|
||||||
{
|
{
|
||||||
|
|
|
@ -160,9 +160,11 @@ enum mhi_dev_ctrl {
|
||||||
MHI_DEV_CTRL_RESUME,
|
MHI_DEV_CTRL_RESUME,
|
||||||
MHI_DEV_CTRL_POWER_OFF,
|
MHI_DEV_CTRL_POWER_OFF,
|
||||||
MHI_DEV_CTRL_POWER_ON,
|
MHI_DEV_CTRL_POWER_ON,
|
||||||
|
MHI_DEV_CTRL_TRIGGER_RDDM,
|
||||||
MHI_DEV_CTRL_RDDM,
|
MHI_DEV_CTRL_RDDM,
|
||||||
MHI_DEV_CTRL_RDDM_KERNEL_PANIC,
|
MHI_DEV_CTRL_RDDM_KERNEL_PANIC,
|
||||||
MHI_DEV_CTRL_NOTIFY_LINK_ERROR,
|
MHI_DEV_CTRL_NOTIFY_LINK_ERROR,
|
||||||
|
MHI_DEV_CTRL_MAXCMD,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum mhi_rddm_segment {
|
enum mhi_rddm_segment {
|
||||||
|
|
Loading…
Add table
Reference in a new issue