mhi: core: Preserve DB Mode state during lpm
When exiting low power modes (M3) do not reset the DB Mode state if DB mode preserve flag is set for channel. CRs-Fixed: 1022868 Change-Id: I6557d28afe9d0ac11b76c683ffba76d7d6ffd377 Signed-off-by: Sujeev Dias <sdias@codeaurora.org>
This commit is contained in:
parent
8526dcd425
commit
4267834dd4
5 changed files with 51 additions and 29 deletions
|
@ -261,6 +261,12 @@ enum MHI_EVENT_CCS {
|
||||||
MHI_EVENT_CC_BAD_TRE = 0x11,
|
MHI_EVENT_CC_BAD_TRE = 0x11,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct db_mode {
|
||||||
|
/* if set do not reset DB_Mode during M0 resume */
|
||||||
|
u32 preserve_db_state : 1;
|
||||||
|
u32 db_mode : 1;
|
||||||
|
};
|
||||||
|
|
||||||
struct mhi_ring {
|
struct mhi_ring {
|
||||||
void *base;
|
void *base;
|
||||||
void *wp;
|
void *wp;
|
||||||
|
@ -270,6 +276,7 @@ struct mhi_ring {
|
||||||
uintptr_t el_size;
|
uintptr_t el_size;
|
||||||
u32 overwrite_en;
|
u32 overwrite_en;
|
||||||
enum MHI_CHAN_DIR dir;
|
enum MHI_CHAN_DIR dir;
|
||||||
|
struct db_mode db_mode;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum MHI_CMD_STATUS {
|
enum MHI_CMD_STATUS {
|
||||||
|
@ -414,7 +421,6 @@ struct mhi_flags {
|
||||||
u32 ev_thread_stopped;
|
u32 ev_thread_stopped;
|
||||||
u32 st_thread_stopped;
|
u32 st_thread_stopped;
|
||||||
u32 uldl_enabled;
|
u32 uldl_enabled;
|
||||||
u32 db_mode[MHI_MAX_CHANNELS];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mhi_wait_queues {
|
struct mhi_wait_queues {
|
||||||
|
@ -577,7 +583,8 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
|
||||||
enum MHI_CHAN_DIR chan_type,
|
enum MHI_CHAN_DIR chan_type,
|
||||||
u32 event_ring,
|
u32 event_ring,
|
||||||
struct mhi_ring *ring,
|
struct mhi_ring *ring,
|
||||||
enum MHI_CHAN_STATE chan_state);
|
enum MHI_CHAN_STATE chan_state,
|
||||||
|
bool preserve_db_state);
|
||||||
int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt);
|
int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||||
int mhi_get_event_ring_for_channel(struct mhi_device_ctxt *mhi_dev_ctxt,
|
int mhi_get_event_ring_for_channel(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
u32 chan);
|
u32 chan);
|
||||||
|
|
|
@ -585,21 +585,23 @@ error_during_props:
|
||||||
/**
|
/**
|
||||||
* @brief Initialize the channel context and shadow context
|
* @brief Initialize the channel context and shadow context
|
||||||
*
|
*
|
||||||
* @cc_list: Context to initialize
|
* @cc_list: Context to initialize
|
||||||
* @trb_list_phy: Physical base address for the TRE ring
|
* @trb_list_phy: Physical base address for the TRE ring
|
||||||
* @trb_list_virt: Virtual base address for the TRE ring
|
* @trb_list_virt: Virtual base address for the TRE ring
|
||||||
* @el_per_ring: Number of TREs this ring will contain
|
* @el_per_ring: Number of TREs this ring will contain
|
||||||
* @chan_type: Type of channel IN/OUT
|
* @chan_type: Type of channel IN/OUT
|
||||||
* @event_ring: Event ring to be mapped to this channel context
|
* @event_ring: Event ring to be mapped to this channel context
|
||||||
* @ring: Shadow context to be initialized alongside
|
* @ring: Shadow context to be initialized alongside
|
||||||
*
|
* @chan_state: Channel state
|
||||||
|
* @preserve_db_state: Do not reset DB state during resume
|
||||||
* @Return errno
|
* @Return errno
|
||||||
*/
|
*/
|
||||||
int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
|
int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
|
||||||
uintptr_t trb_list_phy, uintptr_t trb_list_virt,
|
uintptr_t trb_list_phy, uintptr_t trb_list_virt,
|
||||||
u64 el_per_ring, enum MHI_CHAN_DIR chan_type,
|
u64 el_per_ring, enum MHI_CHAN_DIR chan_type,
|
||||||
u32 event_ring, struct mhi_ring *ring,
|
u32 event_ring, struct mhi_ring *ring,
|
||||||
enum MHI_CHAN_STATE chan_state)
|
enum MHI_CHAN_STATE chan_state,
|
||||||
|
bool preserve_db_state)
|
||||||
{
|
{
|
||||||
cc_list->mhi_chan_state = chan_state;
|
cc_list->mhi_chan_state = chan_state;
|
||||||
cc_list->mhi_chan_type = chan_type;
|
cc_list->mhi_chan_type = chan_type;
|
||||||
|
@ -617,6 +619,8 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
|
||||||
ring->el_size = sizeof(struct mhi_tx_pkt);
|
ring->el_size = sizeof(struct mhi_tx_pkt);
|
||||||
ring->overwrite_en = 0;
|
ring->overwrite_en = 0;
|
||||||
ring->dir = chan_type;
|
ring->dir = chan_type;
|
||||||
|
ring->db_mode.db_mode = 1;
|
||||||
|
ring->db_mode.preserve_db_state = (preserve_db_state) ? 1 : 0;
|
||||||
/* Flush writes to MMIO */
|
/* Flush writes to MMIO */
|
||||||
wmb();
|
wmb();
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -244,6 +244,10 @@
|
||||||
#define MHI_CHAN_TYPE__MASK (3)
|
#define MHI_CHAN_TYPE__MASK (3)
|
||||||
#define MHI_CHAN_TYPE__SHIFT (6)
|
#define MHI_CHAN_TYPE__SHIFT (6)
|
||||||
|
|
||||||
|
#define PRESERVE_DB_STATE
|
||||||
|
#define MHI_PRESERVE_DB_STATE__MASK (1)
|
||||||
|
#define MHI_PRESERVE_DB_STATE__SHIFT (8)
|
||||||
|
|
||||||
#define GET_CHAN_PROPS(_FIELD, _VAL) \
|
#define GET_CHAN_PROPS(_FIELD, _VAL) \
|
||||||
(((_VAL) >> MHI_##_FIELD ## __SHIFT) & MHI_##_FIELD ## __MASK)
|
(((_VAL) >> MHI_##_FIELD ## __SHIFT) & MHI_##_FIELD ## __MASK)
|
||||||
|
|
||||||
|
|
|
@ -212,7 +212,7 @@ int mhi_release_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
ring->len, ring->base,
|
ring->len, ring->base,
|
||||||
cc_list->mhi_trb_ring_base_addr);
|
cc_list->mhi_trb_ring_base_addr);
|
||||||
mhi_init_chan_ctxt(cc_list, 0, 0, 0, 0, 0, ring,
|
mhi_init_chan_ctxt(cc_list, 0, 0, 0, 0, 0, ring,
|
||||||
MHI_CHAN_STATE_DISABLED);
|
MHI_CHAN_STATE_DISABLED, false);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -259,7 +259,9 @@ static int populate_tre_ring(struct mhi_client_handle *client_handle)
|
||||||
client_handle->chan_info.flags),
|
client_handle->chan_info.flags),
|
||||||
client_handle->chan_info.ev_ring,
|
client_handle->chan_info.ev_ring,
|
||||||
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
|
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
|
||||||
MHI_CHAN_STATE_ENABLED);
|
MHI_CHAN_STATE_ENABLED,
|
||||||
|
GET_CHAN_PROPS(PRESERVE_DB_STATE,
|
||||||
|
client_handle->chan_info.flags));
|
||||||
mhi_log(MHI_MSG_INFO, "Exited\n");
|
mhi_log(MHI_MSG_INFO, "Exited\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -654,6 +656,7 @@ static int mhi_queue_dma_xfer(
|
||||||
MHI_ASSERT(VALID_BUF(buf, buf_len, mhi_dev_ctxt),
|
MHI_ASSERT(VALID_BUF(buf, buf_len, mhi_dev_ctxt),
|
||||||
"Client buffer is of invalid length\n");
|
"Client buffer is of invalid length\n");
|
||||||
chan = client_handle->chan_info.chan_nr;
|
chan = client_handle->chan_info.chan_nr;
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan);
|
mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan);
|
||||||
pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
||||||
|
|
||||||
|
@ -698,6 +701,7 @@ static int mhi_queue_dma_xfer(
|
||||||
|
|
||||||
error:
|
error:
|
||||||
pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Putting Reference %d", chan);
|
mhi_log(MHI_MSG_INFO, "Putting Reference %d", chan);
|
||||||
pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
||||||
return ret_val;
|
return ret_val;
|
||||||
|
@ -1169,12 +1173,13 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
|
||||||
|
|
||||||
mhi_dev_ctxt->flags.uldl_enabled = 1;
|
mhi_dev_ctxt->flags.uldl_enabled = 1;
|
||||||
chan = MHI_EV_READ_CHID(EV_CHID, event);
|
chan = MHI_EV_READ_CHID(EV_CHID, event);
|
||||||
mhi_dev_ctxt->flags.db_mode[chan] = 1;
|
|
||||||
chan_ctxt =
|
chan_ctxt =
|
||||||
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
|
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "DB_MODE/OOB Detected chan %d.\n", chan);
|
mhi_log(MHI_MSG_INFO, "DB_MODE/OOB Detected chan %d.\n", chan);
|
||||||
spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan],
|
spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan],
|
||||||
flags);
|
flags);
|
||||||
|
chan_ctxt->db_mode.db_mode = 1;
|
||||||
if (chan_ctxt->wp != chan_ctxt->rp) {
|
if (chan_ctxt->wp != chan_ctxt->rp) {
|
||||||
db_value = mhi_v2p_addr(mhi_dev_ctxt,
|
db_value = mhi_v2p_addr(mhi_dev_ctxt,
|
||||||
MHI_RING_TYPE_XFER_RING, chan,
|
MHI_RING_TYPE_XFER_RING, chan,
|
||||||
|
@ -1658,6 +1663,8 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
void __iomem *io_addr,
|
void __iomem *io_addr,
|
||||||
uintptr_t chan, u32 val)
|
uintptr_t chan, u32 val)
|
||||||
{
|
{
|
||||||
|
struct mhi_ring *chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
|
||||||
|
|
||||||
mhi_log(MHI_MSG_VERBOSE,
|
mhi_log(MHI_MSG_VERBOSE,
|
||||||
"db.set addr: %p io_offset 0x%lx val:0x%x\n",
|
"db.set addr: %p io_offset 0x%lx val:0x%x\n",
|
||||||
io_addr, chan, val);
|
io_addr, chan, val);
|
||||||
|
@ -1668,14 +1675,14 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
if (io_addr == mhi_dev_ctxt->mmio_info.chan_db_addr) {
|
if (io_addr == mhi_dev_ctxt->mmio_info.chan_db_addr) {
|
||||||
if (!(IS_HARDWARE_CHANNEL(chan) &&
|
if (!(IS_HARDWARE_CHANNEL(chan) &&
|
||||||
mhi_dev_ctxt->flags.uldl_enabled &&
|
mhi_dev_ctxt->flags.uldl_enabled &&
|
||||||
!mhi_dev_ctxt->flags.db_mode[chan])) {
|
!chan_ctxt->db_mode.db_mode)) {
|
||||||
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
|
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
|
||||||
mhi_dev_ctxt->flags.db_mode[chan] = 0;
|
chan_ctxt->db_mode.db_mode = 0;
|
||||||
} else {
|
} else {
|
||||||
mhi_log(MHI_MSG_INFO,
|
mhi_log(MHI_MSG_INFO,
|
||||||
"Not ringing xfer db, chan %ld, ul_dl %d db_mode %d\n",
|
"Not ringing xfer db, chan %ld, ul_dl %d db_mode %d\n",
|
||||||
chan, mhi_dev_ctxt->flags.uldl_enabled,
|
chan, mhi_dev_ctxt->flags.uldl_enabled,
|
||||||
mhi_dev_ctxt->flags.db_mode[chan]);
|
chan_ctxt->db_mode.db_mode);
|
||||||
}
|
}
|
||||||
/* Event Doorbell and Polling mode Disabled */
|
/* Event Doorbell and Polling mode Disabled */
|
||||||
} else if (io_addr == mhi_dev_ctxt->mmio_info.event_db_addr) {
|
} else if (io_addr == mhi_dev_ctxt->mmio_info.event_db_addr) {
|
||||||
|
@ -1683,11 +1690,9 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
if (IS_SW_EV_RING(mhi_dev_ctxt, chan) ||
|
if (IS_SW_EV_RING(mhi_dev_ctxt, chan) ||
|
||||||
!mhi_dev_ctxt->flags.uldl_enabled) {
|
!mhi_dev_ctxt->flags.uldl_enabled) {
|
||||||
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
|
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
|
||||||
mhi_dev_ctxt->flags.db_mode[chan] = 0;
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
|
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
|
||||||
mhi_dev_ctxt->flags.db_mode[chan] = 0;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -90,11 +90,13 @@ static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
for (i = 0; i < MHI_MAX_CHANNELS; ++i)
|
for (i = 0; i < MHI_MAX_CHANNELS; ++i)
|
||||||
if (VALID_CHAN_NR(i)) {
|
if (VALID_CHAN_NR(i)) {
|
||||||
local_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
|
local_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
|
||||||
if (IS_HARDWARE_CHANNEL(i) && reset_db_mode)
|
|
||||||
mhi_dev_ctxt->flags.db_mode[i] = 1;
|
/* Reset the DB Mode state to DB Mode */
|
||||||
if ((local_ctxt->wp != local_ctxt->rp) ||
|
if (local_ctxt->db_mode.preserve_db_state == 0
|
||||||
((local_ctxt->wp != local_ctxt->rp) &&
|
&& reset_db_mode)
|
||||||
(local_ctxt->dir == MHI_IN)))
|
local_ctxt->db_mode.db_mode = 1;
|
||||||
|
|
||||||
|
if (local_ctxt->wp != local_ctxt->rp)
|
||||||
conditional_chan_db_write(mhi_dev_ctxt, i);
|
conditional_chan_db_write(mhi_dev_ctxt, i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue