Merge "mhi: core: Add support for new MHI hardware channel"

This commit is contained in:
Linux Build Service Account 2017-02-23 21:35:29 -08:00 committed by Gerrit - the friendly Code Review server
commit e042032335
14 changed files with 236 additions and 146 deletions

View file

@ -625,6 +625,9 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev)
tx_ring_full_count[rmnet_mhi_ptr->dev_index]++;
netif_stop_queue(dev);
rmnet_log(MSG_VERBOSE, "Stopping Queue\n");
write_unlock_irqrestore(
&rmnet_mhi_ptr->out_chan_full_lock,
flags);
goto rmnet_mhi_xmit_error_cleanup;
} else {
retry = 1;
@ -652,7 +655,6 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev)
rmnet_mhi_xmit_error_cleanup:
rmnet_log(MSG_VERBOSE, "Ring full\n");
write_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock, flags);
return NETDEV_TX_BUSY;
}

View file

@ -261,6 +261,12 @@ enum MHI_EVENT_CCS {
MHI_EVENT_CC_BAD_TRE = 0x11,
};
struct db_mode {
/* if set do not reset DB_Mode during M0 resume */
u32 preserve_db_state : 1;
u32 db_mode : 1;
};
struct mhi_ring {
void *base;
void *wp;
@ -270,6 +276,9 @@ struct mhi_ring {
uintptr_t el_size;
u32 overwrite_en;
enum MHI_CHAN_DIR dir;
struct db_mode db_mode;
u32 msi_disable_cntr;
u32 msi_enable_cntr;
};
enum MHI_CMD_STATUS {
@ -344,12 +353,6 @@ struct mhi_client_handle {
int event_ring_index;
};
enum MHI_EVENT_POLLING {
MHI_EVENT_POLLING_DISABLED = 0x0,
MHI_EVENT_POLLING_ENABLED = 0x1,
MHI_EVENT_POLLING_reserved = 0x80000000
};
enum MHI_TYPE_EVENT_RING {
MHI_ER_DATA_TYPE = 0x1,
MHI_ER_CTRL_TYPE = 0x2,
@ -386,8 +389,6 @@ struct mhi_counters {
u32 m3_event_timeouts;
u32 m0_event_timeouts;
u32 m2_event_timeouts;
u32 msi_disable_cntr;
u32 msi_enable_cntr;
u32 nr_irq_migrations;
u32 *msi_counter;
u32 *ev_counter;
@ -414,7 +415,6 @@ struct mhi_flags {
u32 ev_thread_stopped;
u32 st_thread_stopped;
u32 uldl_enabled;
u32 db_mode[MHI_MAX_CHANNELS];
};
struct mhi_wait_queues {
@ -577,7 +577,8 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
enum MHI_CHAN_DIR chan_type,
u32 event_ring,
struct mhi_ring *ring,
enum MHI_CHAN_STATE chan_state);
enum MHI_CHAN_STATE chan_state,
bool preserve_db_state);
int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_get_event_ring_for_channel(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 chan);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -23,14 +23,14 @@
#define MHICFG (0x10)
#define MHICFG_RESERVED_BITS31_24_MASK 0xff000000
#define MHICFG_RESERVED_BITS31_24_SHIFT 0x18
#define MHICFG_NER_MASK 0xff0000
#define MHICFG_NER_SHIFT 0x10
#define MHICFG_RESERVED_BITS15_8_MASK 0xff00
#define MHICFG_RESERVED_BITS15_8_SHIFT 0x8
#define MHICFG_NCH_MASK 0xff
#define MHICFG_NCH_SHIFT 0x0
#define MHICFG_NHWER_MASK (0xff000000)
#define MHICFG_NHWER_SHIFT (24)
#define MHICFG_NER_MASK (0xff0000)
#define MHICFG_NER_SHIFT (16)
#define MHICFG_NHWCH_MASK (0xff00)
#define MHICFG_NHWCH_SHIFT (8)
#define MHICFG_NCH_MASK (0xff)
#define MHICFG_NCH_SHIFT (0)
#define CHDBOFF (0x18)

View file

@ -585,21 +585,23 @@ error_during_props:
/**
* @brief Initialize the channel context and shadow context
*
* @cc_list: Context to initialize
* @trb_list_phy: Physical base address for the TRE ring
* @trb_list_virt: Virtual base address for the TRE ring
* @el_per_ring: Number of TREs this ring will contain
* @chan_type: Type of channel IN/OUT
* @event_ring: Event ring to be mapped to this channel context
* @ring: Shadow context to be initialized alongside
*
* @cc_list: Context to initialize
* @trb_list_phy: Physical base address for the TRE ring
* @trb_list_virt: Virtual base address for the TRE ring
* @el_per_ring: Number of TREs this ring will contain
* @chan_type: Type of channel IN/OUT
* @event_ring: Event ring to be mapped to this channel context
* @ring: Shadow context to be initialized alongside
* @chan_state: Channel state
* @preserve_db_state: Do not reset DB state during resume
* @Return errno
*/
int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
uintptr_t trb_list_phy, uintptr_t trb_list_virt,
u64 el_per_ring, enum MHI_CHAN_DIR chan_type,
u32 event_ring, struct mhi_ring *ring,
enum MHI_CHAN_STATE chan_state)
uintptr_t trb_list_phy, uintptr_t trb_list_virt,
u64 el_per_ring, enum MHI_CHAN_DIR chan_type,
u32 event_ring, struct mhi_ring *ring,
enum MHI_CHAN_STATE chan_state,
bool preserve_db_state)
{
cc_list->mhi_chan_state = chan_state;
cc_list->mhi_chan_type = chan_type;
@ -617,6 +619,8 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
ring->el_size = sizeof(struct mhi_tx_pkt);
ring->overwrite_en = 0;
ring->dir = chan_type;
ring->db_mode.db_mode = 1;
ring->db_mode.preserve_db_state = (preserve_db_state) ? 1 : 0;
/* Flush writes to MMIO */
wmb();
return 0;

View file

@ -277,20 +277,22 @@ struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
void mhi_mask_irq(struct mhi_client_handle *client_handle)
{
disable_irq_nosync(MSI_TO_IRQ(client_handle->mhi_dev_ctxt,
client_handle->msi_vec));
client_handle->mhi_dev_ctxt->counters.msi_disable_cntr++;
if (client_handle->mhi_dev_ctxt->counters.msi_disable_cntr >
(client_handle->mhi_dev_ctxt->counters.msi_enable_cntr + 1))
mhi_log(MHI_MSG_INFO, "No nested IRQ disable Allowed\n");
struct mhi_device_ctxt *mhi_dev_ctxt =
client_handle->mhi_dev_ctxt;
struct mhi_ring *ev_ring = &mhi_dev_ctxt->
mhi_local_event_ctxt[client_handle->event_ring_index];
disable_irq_nosync(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
ev_ring->msi_disable_cntr++;
}
void mhi_unmask_irq(struct mhi_client_handle *client_handle)
{
client_handle->mhi_dev_ctxt->counters.msi_enable_cntr++;
enable_irq(MSI_TO_IRQ(client_handle->mhi_dev_ctxt,
client_handle->msi_vec));
if (client_handle->mhi_dev_ctxt->counters.msi_enable_cntr >
client_handle->mhi_dev_ctxt->counters.msi_disable_cntr)
mhi_log(MHI_MSG_INFO, "No nested IRQ enable Allowed\n");
struct mhi_device_ctxt *mhi_dev_ctxt =
client_handle->mhi_dev_ctxt;
struct mhi_ring *ev_ring = &mhi_dev_ctxt->
mhi_local_event_ctxt[client_handle->event_ring_index];
ev_ring->msi_enable_cntr++;
enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
}

View file

@ -96,7 +96,6 @@
((_mhi_dev_ctxt)->mmio_info.nr_event_rings - \
((_mhi_dev_ctxt)->mmio_info.nr_hw_event_rings)))
/* MHI Transfer Ring Elements 7.4.1*/
#define TX_TRB_LEN
#define MHI_TX_TRB_LEN__SHIFT (0)
@ -244,6 +243,10 @@
#define MHI_CHAN_TYPE__MASK (3)
#define MHI_CHAN_TYPE__SHIFT (6)
#define PRESERVE_DB_STATE
#define MHI_PRESERVE_DB_STATE__MASK (1)
#define MHI_PRESERVE_DB_STATE__SHIFT (8)
#define GET_CHAN_PROPS(_FIELD, _VAL) \
(((_VAL) >> MHI_##_FIELD ## __SHIFT) & MHI_##_FIELD ## __MASK)

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -212,7 +212,7 @@ int mhi_release_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
ring->len, ring->base,
cc_list->mhi_trb_ring_base_addr);
mhi_init_chan_ctxt(cc_list, 0, 0, 0, 0, 0, ring,
MHI_CHAN_STATE_DISABLED);
MHI_CHAN_STATE_DISABLED, false);
return 0;
}
@ -259,7 +259,9 @@ static int populate_tre_ring(struct mhi_client_handle *client_handle)
client_handle->chan_info.flags),
client_handle->chan_info.ev_ring,
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
MHI_CHAN_STATE_ENABLED);
MHI_CHAN_STATE_ENABLED,
GET_CHAN_PROPS(PRESERVE_DB_STATE,
client_handle->chan_info.flags));
mhi_log(MHI_MSG_INFO, "Exited\n");
return 0;
}
@ -520,9 +522,9 @@ static inline int mhi_queue_tre(struct mhi_device_ctxt
}
} else {
mhi_log(MHI_MSG_VERBOSE,
"Wakeup, pending data state %d chan state %d\n",
mhi_dev_ctxt->mhi_state,
chan_ctxt->mhi_chan_state);
"Wakeup, pending data state %s chan state %d\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
chan_ctxt->mhi_chan_state);
ret_val = 0;
}
return ret_val;
@ -654,6 +656,7 @@ static int mhi_queue_dma_xfer(
MHI_ASSERT(VALID_BUF(buf, buf_len, mhi_dev_ctxt),
"Client buffer is of invalid length\n");
chan = client_handle->chan_info.chan_nr;
mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan);
pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
@ -698,6 +701,7 @@ static int mhi_queue_dma_xfer(
error:
pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
mhi_log(MHI_MSG_INFO, "Putting Reference %d", chan);
pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
return ret_val;
@ -766,10 +770,9 @@ int mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
}
mhi_log(MHI_MSG_INFO,
"Entered, MHI state %d dev_exec_env %d chan %d cmd %d\n",
mhi_dev_ctxt->mhi_state,
mhi_dev_ctxt->dev_exec_env,
chan, cmd);
"Entered, MHI state %s dev_exec_env %d chan %d cmd %d\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
mhi_dev_ctxt->dev_exec_env, chan, cmd);
mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan);
pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
/*
@ -870,7 +873,6 @@ static void parse_inbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
result->buf_addr = bb->client_buf;
result->bytes_xferd = bb->filled_size;
result->transaction_status = 0;
/* At this point the bounce buffer is no longer necessary
* Whatever was received from the device was copied back to the
@ -1166,13 +1168,17 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
{
struct mhi_ring *chan_ctxt = NULL;
u64 db_value = 0;
unsigned long flags;
mhi_dev_ctxt->flags.uldl_enabled = 1;
chan = MHI_EV_READ_CHID(EV_CHID, event);
mhi_dev_ctxt->flags.db_mode[chan] = 1;
chan_ctxt =
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
mhi_log(MHI_MSG_INFO, "DB_MODE/OOB Detected chan %d.\n", chan);
spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan],
flags);
chan_ctxt->db_mode.db_mode = 1;
if (chan_ctxt->wp != chan_ctxt->rp) {
db_value = mhi_v2p_addr(mhi_dev_ctxt,
MHI_RING_TYPE_XFER_RING, chan,
@ -1182,8 +1188,10 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
db_value);
}
client_handle = mhi_dev_ctxt->client_handle_list[chan];
if (NULL != client_handle)
result->transaction_status = -ENOTCONN;
if (client_handle)
result->transaction_status = -ENOTCONN;
spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan],
flags);
break;
}
case MHI_EVENT_CC_BAD_TRE:
@ -1393,8 +1401,10 @@ static int start_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 chan;
MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
if (!VALID_CHAN_NR(chan))
if (!VALID_CHAN_NR(chan)) {
mhi_log(MHI_MSG_ERROR, "Bad chan: 0x%x\n", chan);
return -EINVAL;
}
mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] =
MHI_CMD_NOT_PENDING;
mhi_log(MHI_MSG_INFO, "Processed START CMD chan %d\n", chan);
@ -1652,6 +1662,8 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
void __iomem *io_addr,
uintptr_t chan, u32 val)
{
struct mhi_ring *chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
mhi_log(MHI_MSG_VERBOSE,
"db.set addr: %p io_offset 0x%lx val:0x%x\n",
io_addr, chan, val);
@ -1662,14 +1674,14 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
if (io_addr == mhi_dev_ctxt->mmio_info.chan_db_addr) {
if (!(IS_HARDWARE_CHANNEL(chan) &&
mhi_dev_ctxt->flags.uldl_enabled &&
!mhi_dev_ctxt->flags.db_mode[chan])) {
!chan_ctxt->db_mode.db_mode)) {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
mhi_dev_ctxt->flags.db_mode[chan] = 0;
chan_ctxt->db_mode.db_mode = 0;
} else {
mhi_log(MHI_MSG_INFO,
"Not ringing xfer db, chan %ld, ul_dl %d db_mode %d\n",
chan, mhi_dev_ctxt->flags.uldl_enabled,
mhi_dev_ctxt->flags.db_mode[chan]);
"Not ringing xfer db, chan %ld, ul_dl %d db_mode %d\n",
chan, mhi_dev_ctxt->flags.uldl_enabled,
chan_ctxt->db_mode.db_mode);
}
/* Event Doorbell and Polling mode Disabled */
} else if (io_addr == mhi_dev_ctxt->mmio_info.event_db_addr) {
@ -1677,11 +1689,9 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
if (IS_SW_EV_RING(mhi_dev_ctxt, chan) ||
!mhi_dev_ctxt->flags.uldl_enabled) {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
mhi_dev_ctxt->flags.db_mode[chan] = 0;
}
} else {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
mhi_dev_ctxt->flags.db_mode[chan] = 0;
}
}

View file

@ -144,6 +144,11 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
MHICFG,
MHICFG_NER_MASK, MHICFG_NER_SHIFT,
mhi_dev_ctxt->mmio_info.nr_event_rings);
mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr,
MHICFG,
MHICFG_NHWER_MASK,
MHICFG_NHWER_SHIFT,
mhi_dev_ctxt->mmio_info.nr_hw_event_rings);
pcie_dword_val = mhi_dev_ctxt->dev_space.ring_ctxt.dma_cc_list;
pcie_word_val = HIGH_WORD(pcie_dword_val);

View file

@ -46,8 +46,8 @@ int mhi_pci_suspend(struct device *dev)
if (NULL == mhi_dev_ctxt)
return -EINVAL;
mhi_log(MHI_MSG_INFO, "Entered, MHI state %d\n",
mhi_dev_ctxt->mhi_state);
mhi_log(MHI_MSG_INFO, "Entered, MHI state %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
atomic_set(&mhi_dev_ctxt->flags.pending_resume, 1);
r = mhi_initiate_m3(mhi_dev_ctxt);
@ -115,7 +115,8 @@ int mhi_pci_resume(struct device *dev)
break;
default:
mhi_log(MHI_MSG_INFO,
"Wait complete state: %d\n", mhi_dev_ctxt->mhi_state);
"Wait complete state: %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
r = 0;
}
exit:
@ -181,8 +182,8 @@ ssize_t sysfs_init_m0(struct device *dev, struct device_attribute *attr,
}
mhi_initiate_m0(mhi_dev_ctxt);
mhi_log(MHI_MSG_CRITICAL,
"Current mhi_state = 0x%x\n",
mhi_dev_ctxt->mhi_state);
"Current mhi_state = %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
return count;
}

View file

@ -48,6 +48,9 @@ static int add_element(struct mhi_ring *ring, void **rp,
*assigned_addr = (char *)ring->wp;
*wp = (void *)(((d_wp + 1) % ring_size) * ring->el_size +
(uintptr_t)ring->base);
/* force update visible to other cores */
smp_wmb();
return 0;
}
@ -101,6 +104,9 @@ int delete_element(struct mhi_ring *ring, void **rp,
*rp = (void *)(((d_rp + 1) % ring_size) * ring->el_size +
(uintptr_t)ring->base);
/* force update visible to other cores */
smp_wmb();
return 0;
}
@ -108,6 +114,7 @@ int mhi_get_free_desc(struct mhi_client_handle *client_handle)
{
u32 chan;
struct mhi_device_ctxt *ctxt;
int bb_ring, ch_ring;
if (!client_handle || MHI_HANDLE_MAGIC != client_handle->magic ||
!client_handle->mhi_dev_ctxt)
@ -115,7 +122,10 @@ int mhi_get_free_desc(struct mhi_client_handle *client_handle)
ctxt = client_handle->mhi_dev_ctxt;
chan = client_handle->chan_info.chan_nr;
return get_nr_avail_ring_elements(&ctxt->mhi_local_chan_ctxt[chan]);
bb_ring = get_nr_avail_ring_elements(&ctxt->chan_bb_list[chan]);
ch_ring = get_nr_avail_ring_elements(&ctxt->mhi_local_chan_ctxt[chan]);
return min(bb_ring, ch_ring);
}
EXPORT_SYMBOL(mhi_get_free_desc);

View file

@ -17,6 +17,29 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
static const char *state_transition_str(enum STATE_TRANSITION state)
{
static const char * const mhi_states_transition_str[] = {
"RESET",
"READY",
"M0",
"M1",
"M2",
"M3",
"BHI",
"SBL",
"AMSS",
"LINK_DOWN",
"WAKE"
};
if (state == STATE_TRANSITION_SYS_ERR)
return "SYS_ERR";
return (state <= STATE_TRANSITION_WAKE) ?
mhi_states_transition_str[state] : "Invalid";
}
static inline void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_STATE new_state)
{
@ -57,7 +80,8 @@ static void conditional_chan_db_write(
spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags);
}
static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt,
bool reset_db_mode)
{
u32 i = 0;
struct mhi_ring *local_ctxt = NULL;
@ -66,11 +90,13 @@ static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
for (i = 0; i < MHI_MAX_CHANNELS; ++i)
if (VALID_CHAN_NR(i)) {
local_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
if (IS_HARDWARE_CHANNEL(i))
mhi_dev_ctxt->flags.db_mode[i] = 1;
if ((local_ctxt->wp != local_ctxt->rp) ||
((local_ctxt->wp != local_ctxt->rp) &&
(local_ctxt->dir == MHI_IN)))
/* Reset the DB Mode state to DB Mode */
if (local_ctxt->db_mode.preserve_db_state == 0
&& reset_db_mode)
local_ctxt->db_mode.db_mode = 1;
if (local_ctxt->wp != local_ctxt->rp)
conditional_chan_db_write(mhi_dev_ctxt, i);
}
}
@ -150,8 +176,9 @@ static int process_m0_transition(
"Transitioning from M1.\n");
} else {
mhi_log(MHI_MSG_INFO,
"MHI State %d link state %d. Quitting\n",
mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up);
"MHI State %s link state %d. Quitting\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
mhi_dev_ctxt->flags.link_up);
}
read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
@ -162,7 +189,7 @@ static int process_m0_transition(
if (mhi_dev_ctxt->flags.mhi_initialized) {
ring_all_ev_dbs(mhi_dev_ctxt);
ring_all_chan_dbs(mhi_dev_ctxt);
ring_all_chan_dbs(mhi_dev_ctxt, true);
ring_all_cmd_dbs(mhi_dev_ctxt);
}
atomic_dec(&mhi_dev_ctxt->flags.data_pending);
@ -196,8 +223,8 @@ static int process_m1_transition(
int r = 0;
mhi_log(MHI_MSG_INFO,
"Processing M1 state transition from state %d\n",
mhi_dev_ctxt->mhi_state);
"Processing M1 state transition from state %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
if (!mhi_dev_ctxt->flags.pending_M3) {
@ -444,8 +471,8 @@ static int process_reset_transition(
STATE_TRANSITION_RESET);
if (0 != r)
mhi_log(MHI_MSG_CRITICAL,
"Failed to initiate 0x%x state trans\n",
STATE_TRANSITION_RESET);
"Failed to initiate %s state trans\n",
state_transition_str(STATE_TRANSITION_RESET));
break;
default:
mhi_log(MHI_MSG_CRITICAL,
@ -475,8 +502,8 @@ static int process_reset_transition(
STATE_TRANSITION_READY);
if (0 != r)
mhi_log(MHI_MSG_CRITICAL,
"Failed to initiate 0x%x state trans\n",
STATE_TRANSITION_READY);
"Failed to initiate %s state trans\n",
state_transition_str(STATE_TRANSITION_READY));
return r;
}
@ -594,7 +621,7 @@ static int process_amss_transition(
"Failed to set local chan state ret %d\n", r);
return r;
}
ring_all_chan_dbs(mhi_dev_ctxt);
ring_all_chan_dbs(mhi_dev_ctxt, true);
mhi_log(MHI_MSG_INFO,
"Notifying clients that MHI is enabled\n");
enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
@ -608,7 +635,7 @@ static int process_amss_transition(
i, r);
return r;
}
ring_all_chan_dbs(mhi_dev_ctxt);
ring_all_chan_dbs(mhi_dev_ctxt, true);
}
ring_all_ev_dbs(mhi_dev_ctxt);
atomic_dec(&mhi_dev_ctxt->flags.data_pending);
@ -636,8 +663,8 @@ int mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
STATE_TRANSITION_RESET);
if (0 != r)
mhi_log(MHI_MSG_CRITICAL,
"Failed to initiate 0x%x state trans ret %d\n",
STATE_TRANSITION_RESET, r);
"Failed to initiate %s state trans ret %d\n",
state_transition_str(STATE_TRANSITION_RESET), r);
mhi_log(MHI_MSG_INFO, "Exiting\n");
return r;
}
@ -648,8 +675,8 @@ static int process_stt_work_item(
{
int r = 0;
mhi_log(MHI_MSG_INFO, "Transitioning to %d\n",
(int)cur_work_item);
mhi_log(MHI_MSG_INFO, "Transitioning to %s\n",
state_transition_str(cur_work_item));
trace_mhi_state(cur_work_item);
switch (cur_work_item) {
case STATE_TRANSITION_BHI:
@ -689,7 +716,8 @@ static int process_stt_work_item(
break;
default:
mhi_log(MHI_MSG_ERROR,
"Unrecongized state: %d\n", cur_work_item);
"Unrecongized state: %s\n",
state_transition_str(cur_work_item));
break;
}
return r;
@ -762,8 +790,8 @@ int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
BUG_ON(nr_avail_work_items <= 0);
mhi_log(MHI_MSG_VERBOSE,
"Processing state transition %x\n",
new_state);
"Processing state transition %s\n",
state_transition_str(new_state));
*(enum STATE_TRANSITION *)stt_ring->wp = new_state;
r = ctxt_add_element(stt_ring, (void **)&cur_work_item);
BUG_ON(r);
@ -778,13 +806,14 @@ int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
unsigned long flags;
mhi_log(MHI_MSG_INFO,
"Entered MHI state %d, Pending M0 %d Pending M3 %d\n",
mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0,
mhi_dev_ctxt->flags.pending_M3);
"Entered MHI state %s, Pending M0 %d Pending M3 %d\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
mhi_dev_ctxt->flags.pending_M0,
mhi_dev_ctxt->flags.pending_M3);
mutex_lock(&mhi_dev_ctxt->pm_lock);
mhi_log(MHI_MSG_INFO,
"Waiting for M0 M1 or M3. Currently %d...\n",
mhi_dev_ctxt->mhi_state);
"Waiting for M0 M1 or M3. Currently %s...\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_M3 ||
@ -794,9 +823,9 @@ int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
switch (r) {
case 0:
mhi_log(MHI_MSG_CRITICAL,
"Timeout: State %d after %d ms\n",
mhi_dev_ctxt->mhi_state,
MHI_MAX_SUSPEND_TIMEOUT);
"Timeout: State %s after %d ms\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
MHI_MAX_SUSPEND_TIMEOUT);
mhi_dev_ctxt->counters.m0_event_timeouts++;
r = -ETIME;
goto exit;
@ -806,7 +835,8 @@ int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
goto exit;
default:
mhi_log(MHI_MSG_INFO,
"Wait complete state: %d\n", mhi_dev_ctxt->mhi_state);
"Wait complete state: %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
r = 0;
break;
}
@ -814,8 +844,8 @@ int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
mhi_assert_device_wake(mhi_dev_ctxt);
mhi_log(MHI_MSG_INFO,
"MHI state %d, done\n",
mhi_dev_ctxt->mhi_state);
"MHI state %s, done\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
goto exit;
} else {
if (0 != mhi_turn_on_pcie_link(mhi_dev_ctxt)) {
@ -864,9 +894,10 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
int r = 0, abort_m3 = 0;
mhi_log(MHI_MSG_INFO,
"Entered MHI state %d, Pending M0 %d Pending M3 %d\n",
mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0,
mhi_dev_ctxt->flags.pending_M3);
"Entered MHI state %s, Pending M0 %d Pending M3 %d\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
mhi_dev_ctxt->flags.pending_M0,
mhi_dev_ctxt->flags.pending_M3);
mutex_lock(&mhi_dev_ctxt->pm_lock);
switch (mhi_dev_ctxt->mhi_state) {
case MHI_STATE_RESET:
@ -881,47 +912,53 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
case MHI_STATE_M0:
case MHI_STATE_M1:
case MHI_STATE_M2:
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
mhi_log(MHI_MSG_INFO,
"Triggering wake out of M2\n");
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
mhi_dev_ctxt->flags.pending_M3 = 1;
if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) {
mhi_log(MHI_MSG_INFO,
"M2 transition not set\n");
mhi_assert_device_wake(mhi_dev_ctxt);
}
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
r = wait_event_interruptible_timeout(
if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) {
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock,
flags);
r = wait_event_interruptible_timeout(
*mhi_dev_ctxt->mhi_ev_wq.m0_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
mhi_dev_ctxt->mhi_state == MHI_STATE_M0,
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
if (0 == r || -ERESTARTSYS == r) {
mhi_log(MHI_MSG_CRITICAL,
"MDM failed to come out of M2.\n");
mhi_dev_ctxt->counters.m2_event_timeouts++;
r = -EAGAIN;
goto exit;
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
if (0 == r || -ERESTARTSYS == r) {
mhi_log(MHI_MSG_CRITICAL,
"MDM failed to come out of M2.\n");
mhi_dev_ctxt->counters.m2_event_timeouts++;
r = -EAGAIN;
goto unlock;
}
}
break;
case MHI_STATE_M3:
mhi_log(MHI_MSG_INFO,
"MHI state %d, link state %d.\n",
mhi_dev_ctxt->mhi_state,
mhi_dev_ctxt->flags.link_up);
"MHI state %s, link state %d.\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
mhi_dev_ctxt->flags.link_up);
if (mhi_dev_ctxt->flags.link_up)
r = -EAGAIN;
else
r = 0;
goto exit;
default:
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
mhi_log(MHI_MSG_INFO,
"MHI state %d, link state %d.\n",
mhi_dev_ctxt->mhi_state,
mhi_dev_ctxt->flags.link_up);
"MHI state %s, link state %d.\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
mhi_dev_ctxt->flags.link_up);
break;
}
while (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) {
if (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) {
mhi_log(MHI_MSG_INFO,
"There are still %d acks pending from device\n",
atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
@ -929,25 +966,23 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
__pm_relax(&mhi_dev_ctxt->w_lock);
abort_m3 = 1;
r = -EAGAIN;
goto exit;
goto unlock;
}
if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) {
abort_m3 = 1;
r = -EAGAIN;
goto exit;
goto unlock;
}
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
if (mhi_dev_ctxt->flags.pending_M0) {
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
r = -EAGAIN;
goto exit;
goto unlock;
}
mhi_dev_ctxt->flags.pending_M3 = 1;
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
mhi_log(MHI_MSG_INFO,
"Waiting for M3 completion.\n");
r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
@ -970,16 +1005,20 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
r = mhi_set_bus_request(mhi_dev_ctxt, 0);
if (r)
mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r);
exit:
goto exit;
unlock:
mhi_dev_ctxt->flags.pending_M3 = 0;
if (abort_m3) {
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
atomic_inc(&mhi_dev_ctxt->flags.data_pending);
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
ring_all_chan_dbs(mhi_dev_ctxt);
ring_all_chan_dbs(mhi_dev_ctxt, false);
ring_all_cmd_dbs(mhi_dev_ctxt);
atomic_dec(&mhi_dev_ctxt->flags.data_pending);
mhi_deassert_device_wake(mhi_dev_ctxt);
} else {
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
}
exit:
mhi_dev_ctxt->flags.pending_M3 = 0;
mutex_unlock(&mhi_dev_ctxt->pm_lock);
return r;

View file

@ -34,6 +34,18 @@ MODULE_PARM_DESC(mhi_msg_lvl, "dbg lvl");
module_param(mhi_ipc_log_lvl, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(mhi_ipc_log_lvl, "dbg lvl");
const char * const mhi_states_str[MHI_STATE_LIMIT] = {
"RESET",
"READY",
"M0",
"M1",
"M2",
"M3",
"Reserved: 0x06",
"BHI",
"SYS_ERR",
};
static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
size_t count, loff_t *offp)
{
@ -225,9 +237,9 @@ static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf,
amnt_copied =
scnprintf(mhi_dev_ctxt->chan_info,
MHI_LOG_SIZE,
"%s %u %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d, %s, %d, %s %d\n",
"%s %s %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d\n",
"Our State:",
mhi_dev_ctxt->mhi_state,
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
"M0->M1:",
mhi_dev_ctxt->counters.m0_m1,
"M0<-M1:",
@ -244,10 +256,6 @@ static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf,
mhi_dev_ctxt->counters.m3_event_timeouts,
"M0_ev_TO:",
mhi_dev_ctxt->counters.m0_event_timeouts,
"MSI_d:",
mhi_dev_ctxt->counters.msi_disable_cntr,
"MSI_e:",
mhi_dev_ctxt->counters.msi_enable_cntr,
"outstanding_acks:",
atomic_read(&mhi_dev_ctxt->counters.outbound_acks),
"LPM:",

View file

@ -46,6 +46,10 @@ extern void *mhi_ipc_log;
"[%s] " _msg, __func__, ##__VA_ARGS__); \
} while (0)
extern const char * const mhi_states_str[MHI_STATE_LIMIT];
#define TO_MHI_STATE_STR(state) (((state) >= MHI_STATE_LIMIT) ? \
"INVALID_STATE" : mhi_states_str[state])
irqreturn_t mhi_msi_handlr(int msi_number, void *dev_id);
struct mhi_meminfo {

View file

@ -63,9 +63,10 @@ enum MHI_CLIENT_CHANNEL {
MHI_CLIENT_RESERVED_1_UPPER = 99,
MHI_CLIENT_IP_HW_0_OUT = 100,
MHI_CLIENT_IP_HW_0_IN = 101,
MHI_CLIENT_RESERVED_2_LOWER = 102,
MHI_CLIENT_IP_HW_ADPL_IN = 102,
MHI_CLIENT_RESERVED_2_LOWER = 103,
MHI_CLIENT_RESERVED_2_UPPER = 127,
MHI_MAX_CHANNELS = 102
MHI_MAX_CHANNELS = 103
};
enum MHI_CB_REASON {