Merge "mhi: core: Add support for new MHI hardware channel"

This commit is contained in:
Linux Build Service Account 2017-02-23 21:35:29 -08:00 committed by Gerrit - the friendly Code Review server
commit e042032335
14 changed files with 236 additions and 146 deletions

View file

@ -625,6 +625,9 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev)
tx_ring_full_count[rmnet_mhi_ptr->dev_index]++; tx_ring_full_count[rmnet_mhi_ptr->dev_index]++;
netif_stop_queue(dev); netif_stop_queue(dev);
rmnet_log(MSG_VERBOSE, "Stopping Queue\n"); rmnet_log(MSG_VERBOSE, "Stopping Queue\n");
write_unlock_irqrestore(
&rmnet_mhi_ptr->out_chan_full_lock,
flags);
goto rmnet_mhi_xmit_error_cleanup; goto rmnet_mhi_xmit_error_cleanup;
} else { } else {
retry = 1; retry = 1;
@ -652,7 +655,6 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev)
rmnet_mhi_xmit_error_cleanup: rmnet_mhi_xmit_error_cleanup:
rmnet_log(MSG_VERBOSE, "Ring full\n"); rmnet_log(MSG_VERBOSE, "Ring full\n");
write_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock, flags);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }

View file

@ -261,6 +261,12 @@ enum MHI_EVENT_CCS {
MHI_EVENT_CC_BAD_TRE = 0x11, MHI_EVENT_CC_BAD_TRE = 0x11,
}; };
struct db_mode {
/* if set do not reset DB_Mode during M0 resume */
u32 preserve_db_state : 1;
u32 db_mode : 1;
};
struct mhi_ring { struct mhi_ring {
void *base; void *base;
void *wp; void *wp;
@ -270,6 +276,9 @@ struct mhi_ring {
uintptr_t el_size; uintptr_t el_size;
u32 overwrite_en; u32 overwrite_en;
enum MHI_CHAN_DIR dir; enum MHI_CHAN_DIR dir;
struct db_mode db_mode;
u32 msi_disable_cntr;
u32 msi_enable_cntr;
}; };
enum MHI_CMD_STATUS { enum MHI_CMD_STATUS {
@ -344,12 +353,6 @@ struct mhi_client_handle {
int event_ring_index; int event_ring_index;
}; };
enum MHI_EVENT_POLLING {
MHI_EVENT_POLLING_DISABLED = 0x0,
MHI_EVENT_POLLING_ENABLED = 0x1,
MHI_EVENT_POLLING_reserved = 0x80000000
};
enum MHI_TYPE_EVENT_RING { enum MHI_TYPE_EVENT_RING {
MHI_ER_DATA_TYPE = 0x1, MHI_ER_DATA_TYPE = 0x1,
MHI_ER_CTRL_TYPE = 0x2, MHI_ER_CTRL_TYPE = 0x2,
@ -386,8 +389,6 @@ struct mhi_counters {
u32 m3_event_timeouts; u32 m3_event_timeouts;
u32 m0_event_timeouts; u32 m0_event_timeouts;
u32 m2_event_timeouts; u32 m2_event_timeouts;
u32 msi_disable_cntr;
u32 msi_enable_cntr;
u32 nr_irq_migrations; u32 nr_irq_migrations;
u32 *msi_counter; u32 *msi_counter;
u32 *ev_counter; u32 *ev_counter;
@ -414,7 +415,6 @@ struct mhi_flags {
u32 ev_thread_stopped; u32 ev_thread_stopped;
u32 st_thread_stopped; u32 st_thread_stopped;
u32 uldl_enabled; u32 uldl_enabled;
u32 db_mode[MHI_MAX_CHANNELS];
}; };
struct mhi_wait_queues { struct mhi_wait_queues {
@ -577,7 +577,8 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
enum MHI_CHAN_DIR chan_type, enum MHI_CHAN_DIR chan_type,
u32 event_ring, u32 event_ring,
struct mhi_ring *ring, struct mhi_ring *ring,
enum MHI_CHAN_STATE chan_state); enum MHI_CHAN_STATE chan_state,
bool preserve_db_state);
int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt); int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_get_event_ring_for_channel(struct mhi_device_ctxt *mhi_dev_ctxt, int mhi_get_event_ring_for_channel(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 chan); u32 chan);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014, The Linux Foundation. All rights reserved. /* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
@ -23,14 +23,14 @@
#define MHICFG (0x10) #define MHICFG (0x10)
#define MHICFG_RESERVED_BITS31_24_MASK 0xff000000 #define MHICFG_NHWER_MASK (0xff000000)
#define MHICFG_RESERVED_BITS31_24_SHIFT 0x18 #define MHICFG_NHWER_SHIFT (24)
#define MHICFG_NER_MASK 0xff0000 #define MHICFG_NER_MASK (0xff0000)
#define MHICFG_NER_SHIFT 0x10 #define MHICFG_NER_SHIFT (16)
#define MHICFG_RESERVED_BITS15_8_MASK 0xff00 #define MHICFG_NHWCH_MASK (0xff00)
#define MHICFG_RESERVED_BITS15_8_SHIFT 0x8 #define MHICFG_NHWCH_SHIFT (8)
#define MHICFG_NCH_MASK 0xff #define MHICFG_NCH_MASK (0xff)
#define MHICFG_NCH_SHIFT 0x0 #define MHICFG_NCH_SHIFT (0)
#define CHDBOFF (0x18) #define CHDBOFF (0x18)

View file

@ -585,21 +585,23 @@ error_during_props:
/** /**
* @brief Initialize the channel context and shadow context * @brief Initialize the channel context and shadow context
* *
* @cc_list: Context to initialize * @cc_list: Context to initialize
* @trb_list_phy: Physical base address for the TRE ring * @trb_list_phy: Physical base address for the TRE ring
* @trb_list_virt: Virtual base address for the TRE ring * @trb_list_virt: Virtual base address for the TRE ring
* @el_per_ring: Number of TREs this ring will contain * @el_per_ring: Number of TREs this ring will contain
* @chan_type: Type of channel IN/OUT * @chan_type: Type of channel IN/OUT
* @event_ring: Event ring to be mapped to this channel context * @event_ring: Event ring to be mapped to this channel context
* @ring: Shadow context to be initialized alongside * @ring: Shadow context to be initialized alongside
* * @chan_state: Channel state
* @preserve_db_state: Do not reset DB state during resume
* @Return errno * @Return errno
*/ */
int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list, int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
uintptr_t trb_list_phy, uintptr_t trb_list_virt, uintptr_t trb_list_phy, uintptr_t trb_list_virt,
u64 el_per_ring, enum MHI_CHAN_DIR chan_type, u64 el_per_ring, enum MHI_CHAN_DIR chan_type,
u32 event_ring, struct mhi_ring *ring, u32 event_ring, struct mhi_ring *ring,
enum MHI_CHAN_STATE chan_state) enum MHI_CHAN_STATE chan_state,
bool preserve_db_state)
{ {
cc_list->mhi_chan_state = chan_state; cc_list->mhi_chan_state = chan_state;
cc_list->mhi_chan_type = chan_type; cc_list->mhi_chan_type = chan_type;
@ -617,6 +619,8 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
ring->el_size = sizeof(struct mhi_tx_pkt); ring->el_size = sizeof(struct mhi_tx_pkt);
ring->overwrite_en = 0; ring->overwrite_en = 0;
ring->dir = chan_type; ring->dir = chan_type;
ring->db_mode.db_mode = 1;
ring->db_mode.preserve_db_state = (preserve_db_state) ? 1 : 0;
/* Flush writes to MMIO */ /* Flush writes to MMIO */
wmb(); wmb();
return 0; return 0;

View file

@ -277,20 +277,22 @@ struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
void mhi_mask_irq(struct mhi_client_handle *client_handle) void mhi_mask_irq(struct mhi_client_handle *client_handle)
{ {
disable_irq_nosync(MSI_TO_IRQ(client_handle->mhi_dev_ctxt, struct mhi_device_ctxt *mhi_dev_ctxt =
client_handle->msi_vec)); client_handle->mhi_dev_ctxt;
client_handle->mhi_dev_ctxt->counters.msi_disable_cntr++; struct mhi_ring *ev_ring = &mhi_dev_ctxt->
if (client_handle->mhi_dev_ctxt->counters.msi_disable_cntr > mhi_local_event_ctxt[client_handle->event_ring_index];
(client_handle->mhi_dev_ctxt->counters.msi_enable_cntr + 1))
mhi_log(MHI_MSG_INFO, "No nested IRQ disable Allowed\n"); disable_irq_nosync(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
ev_ring->msi_disable_cntr++;
} }
void mhi_unmask_irq(struct mhi_client_handle *client_handle) void mhi_unmask_irq(struct mhi_client_handle *client_handle)
{ {
client_handle->mhi_dev_ctxt->counters.msi_enable_cntr++; struct mhi_device_ctxt *mhi_dev_ctxt =
enable_irq(MSI_TO_IRQ(client_handle->mhi_dev_ctxt, client_handle->mhi_dev_ctxt;
client_handle->msi_vec)); struct mhi_ring *ev_ring = &mhi_dev_ctxt->
if (client_handle->mhi_dev_ctxt->counters.msi_enable_cntr > mhi_local_event_ctxt[client_handle->event_ring_index];
client_handle->mhi_dev_ctxt->counters.msi_disable_cntr)
mhi_log(MHI_MSG_INFO, "No nested IRQ enable Allowed\n"); ev_ring->msi_enable_cntr++;
enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
} }

View file

@ -96,7 +96,6 @@
((_mhi_dev_ctxt)->mmio_info.nr_event_rings - \ ((_mhi_dev_ctxt)->mmio_info.nr_event_rings - \
((_mhi_dev_ctxt)->mmio_info.nr_hw_event_rings))) ((_mhi_dev_ctxt)->mmio_info.nr_hw_event_rings)))
/* MHI Transfer Ring Elements 7.4.1*/ /* MHI Transfer Ring Elements 7.4.1*/
#define TX_TRB_LEN #define TX_TRB_LEN
#define MHI_TX_TRB_LEN__SHIFT (0) #define MHI_TX_TRB_LEN__SHIFT (0)
@ -244,6 +243,10 @@
#define MHI_CHAN_TYPE__MASK (3) #define MHI_CHAN_TYPE__MASK (3)
#define MHI_CHAN_TYPE__SHIFT (6) #define MHI_CHAN_TYPE__SHIFT (6)
#define PRESERVE_DB_STATE
#define MHI_PRESERVE_DB_STATE__MASK (1)
#define MHI_PRESERVE_DB_STATE__SHIFT (8)
#define GET_CHAN_PROPS(_FIELD, _VAL) \ #define GET_CHAN_PROPS(_FIELD, _VAL) \
(((_VAL) >> MHI_##_FIELD ## __SHIFT) & MHI_##_FIELD ## __MASK) (((_VAL) >> MHI_##_FIELD ## __SHIFT) & MHI_##_FIELD ## __MASK)

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. /* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
@ -212,7 +212,7 @@ int mhi_release_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
ring->len, ring->base, ring->len, ring->base,
cc_list->mhi_trb_ring_base_addr); cc_list->mhi_trb_ring_base_addr);
mhi_init_chan_ctxt(cc_list, 0, 0, 0, 0, 0, ring, mhi_init_chan_ctxt(cc_list, 0, 0, 0, 0, 0, ring,
MHI_CHAN_STATE_DISABLED); MHI_CHAN_STATE_DISABLED, false);
return 0; return 0;
} }
@ -259,7 +259,9 @@ static int populate_tre_ring(struct mhi_client_handle *client_handle)
client_handle->chan_info.flags), client_handle->chan_info.flags),
client_handle->chan_info.ev_ring, client_handle->chan_info.ev_ring,
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan], &mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
MHI_CHAN_STATE_ENABLED); MHI_CHAN_STATE_ENABLED,
GET_CHAN_PROPS(PRESERVE_DB_STATE,
client_handle->chan_info.flags));
mhi_log(MHI_MSG_INFO, "Exited\n"); mhi_log(MHI_MSG_INFO, "Exited\n");
return 0; return 0;
} }
@ -520,9 +522,9 @@ static inline int mhi_queue_tre(struct mhi_device_ctxt
} }
} else { } else {
mhi_log(MHI_MSG_VERBOSE, mhi_log(MHI_MSG_VERBOSE,
"Wakeup, pending data state %d chan state %d\n", "Wakeup, pending data state %s chan state %d\n",
mhi_dev_ctxt->mhi_state, TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
chan_ctxt->mhi_chan_state); chan_ctxt->mhi_chan_state);
ret_val = 0; ret_val = 0;
} }
return ret_val; return ret_val;
@ -654,6 +656,7 @@ static int mhi_queue_dma_xfer(
MHI_ASSERT(VALID_BUF(buf, buf_len, mhi_dev_ctxt), MHI_ASSERT(VALID_BUF(buf, buf_len, mhi_dev_ctxt),
"Client buffer is of invalid length\n"); "Client buffer is of invalid length\n");
chan = client_handle->chan_info.chan_nr; chan = client_handle->chan_info.chan_nr;
mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan); mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan);
pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev); pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
@ -698,6 +701,7 @@ static int mhi_queue_dma_xfer(
error: error:
pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev); pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
mhi_log(MHI_MSG_INFO, "Putting Reference %d", chan); mhi_log(MHI_MSG_INFO, "Putting Reference %d", chan);
pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev); pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
return ret_val; return ret_val;
@ -766,10 +770,9 @@ int mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
} }
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"Entered, MHI state %d dev_exec_env %d chan %d cmd %d\n", "Entered, MHI state %s dev_exec_env %d chan %d cmd %d\n",
mhi_dev_ctxt->mhi_state, TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
mhi_dev_ctxt->dev_exec_env, mhi_dev_ctxt->dev_exec_env, chan, cmd);
chan, cmd);
mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan); mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan);
pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev); pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
/* /*
@ -870,7 +873,6 @@ static void parse_inbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
result->buf_addr = bb->client_buf; result->buf_addr = bb->client_buf;
result->bytes_xferd = bb->filled_size; result->bytes_xferd = bb->filled_size;
result->transaction_status = 0;
/* At this point the bounce buffer is no longer necessary /* At this point the bounce buffer is no longer necessary
* Whatever was received from the device was copied back to the * Whatever was received from the device was copied back to the
@ -1166,13 +1168,17 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
{ {
struct mhi_ring *chan_ctxt = NULL; struct mhi_ring *chan_ctxt = NULL;
u64 db_value = 0; u64 db_value = 0;
unsigned long flags;
mhi_dev_ctxt->flags.uldl_enabled = 1; mhi_dev_ctxt->flags.uldl_enabled = 1;
chan = MHI_EV_READ_CHID(EV_CHID, event); chan = MHI_EV_READ_CHID(EV_CHID, event);
mhi_dev_ctxt->flags.db_mode[chan] = 1;
chan_ctxt = chan_ctxt =
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
mhi_log(MHI_MSG_INFO, "DB_MODE/OOB Detected chan %d.\n", chan); mhi_log(MHI_MSG_INFO, "DB_MODE/OOB Detected chan %d.\n", chan);
spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan],
flags);
chan_ctxt->db_mode.db_mode = 1;
if (chan_ctxt->wp != chan_ctxt->rp) { if (chan_ctxt->wp != chan_ctxt->rp) {
db_value = mhi_v2p_addr(mhi_dev_ctxt, db_value = mhi_v2p_addr(mhi_dev_ctxt,
MHI_RING_TYPE_XFER_RING, chan, MHI_RING_TYPE_XFER_RING, chan,
@ -1182,8 +1188,10 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
db_value); db_value);
} }
client_handle = mhi_dev_ctxt->client_handle_list[chan]; client_handle = mhi_dev_ctxt->client_handle_list[chan];
if (NULL != client_handle) if (client_handle)
result->transaction_status = -ENOTCONN; result->transaction_status = -ENOTCONN;
spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan],
flags);
break; break;
} }
case MHI_EVENT_CC_BAD_TRE: case MHI_EVENT_CC_BAD_TRE:
@ -1393,8 +1401,10 @@ static int start_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 chan; u32 chan;
MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan); MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
if (!VALID_CHAN_NR(chan)) if (!VALID_CHAN_NR(chan)) {
mhi_log(MHI_MSG_ERROR, "Bad chan: 0x%x\n", chan); mhi_log(MHI_MSG_ERROR, "Bad chan: 0x%x\n", chan);
return -EINVAL;
}
mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] =
MHI_CMD_NOT_PENDING; MHI_CMD_NOT_PENDING;
mhi_log(MHI_MSG_INFO, "Processed START CMD chan %d\n", chan); mhi_log(MHI_MSG_INFO, "Processed START CMD chan %d\n", chan);
@ -1652,6 +1662,8 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
void __iomem *io_addr, void __iomem *io_addr,
uintptr_t chan, u32 val) uintptr_t chan, u32 val)
{ {
struct mhi_ring *chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
mhi_log(MHI_MSG_VERBOSE, mhi_log(MHI_MSG_VERBOSE,
"db.set addr: %p io_offset 0x%lx val:0x%x\n", "db.set addr: %p io_offset 0x%lx val:0x%x\n",
io_addr, chan, val); io_addr, chan, val);
@ -1662,14 +1674,14 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
if (io_addr == mhi_dev_ctxt->mmio_info.chan_db_addr) { if (io_addr == mhi_dev_ctxt->mmio_info.chan_db_addr) {
if (!(IS_HARDWARE_CHANNEL(chan) && if (!(IS_HARDWARE_CHANNEL(chan) &&
mhi_dev_ctxt->flags.uldl_enabled && mhi_dev_ctxt->flags.uldl_enabled &&
!mhi_dev_ctxt->flags.db_mode[chan])) { !chan_ctxt->db_mode.db_mode)) {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val); mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
mhi_dev_ctxt->flags.db_mode[chan] = 0; chan_ctxt->db_mode.db_mode = 0;
} else { } else {
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"Not ringing xfer db, chan %ld, ul_dl %d db_mode %d\n", "Not ringing xfer db, chan %ld, ul_dl %d db_mode %d\n",
chan, mhi_dev_ctxt->flags.uldl_enabled, chan, mhi_dev_ctxt->flags.uldl_enabled,
mhi_dev_ctxt->flags.db_mode[chan]); chan_ctxt->db_mode.db_mode);
} }
/* Event Doorbell and Polling mode Disabled */ /* Event Doorbell and Polling mode Disabled */
} else if (io_addr == mhi_dev_ctxt->mmio_info.event_db_addr) { } else if (io_addr == mhi_dev_ctxt->mmio_info.event_db_addr) {
@ -1677,11 +1689,9 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
if (IS_SW_EV_RING(mhi_dev_ctxt, chan) || if (IS_SW_EV_RING(mhi_dev_ctxt, chan) ||
!mhi_dev_ctxt->flags.uldl_enabled) { !mhi_dev_ctxt->flags.uldl_enabled) {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val); mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
mhi_dev_ctxt->flags.db_mode[chan] = 0;
} }
} else { } else {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val); mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
mhi_dev_ctxt->flags.db_mode[chan] = 0;
} }
} }

View file

@ -144,6 +144,11 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
MHICFG, MHICFG,
MHICFG_NER_MASK, MHICFG_NER_SHIFT, MHICFG_NER_MASK, MHICFG_NER_SHIFT,
mhi_dev_ctxt->mmio_info.nr_event_rings); mhi_dev_ctxt->mmio_info.nr_event_rings);
mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr,
MHICFG,
MHICFG_NHWER_MASK,
MHICFG_NHWER_SHIFT,
mhi_dev_ctxt->mmio_info.nr_hw_event_rings);
pcie_dword_val = mhi_dev_ctxt->dev_space.ring_ctxt.dma_cc_list; pcie_dword_val = mhi_dev_ctxt->dev_space.ring_ctxt.dma_cc_list;
pcie_word_val = HIGH_WORD(pcie_dword_val); pcie_word_val = HIGH_WORD(pcie_dword_val);

View file

@ -46,8 +46,8 @@ int mhi_pci_suspend(struct device *dev)
if (NULL == mhi_dev_ctxt) if (NULL == mhi_dev_ctxt)
return -EINVAL; return -EINVAL;
mhi_log(MHI_MSG_INFO, "Entered, MHI state %d\n", mhi_log(MHI_MSG_INFO, "Entered, MHI state %s\n",
mhi_dev_ctxt->mhi_state); TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
atomic_set(&mhi_dev_ctxt->flags.pending_resume, 1); atomic_set(&mhi_dev_ctxt->flags.pending_resume, 1);
r = mhi_initiate_m3(mhi_dev_ctxt); r = mhi_initiate_m3(mhi_dev_ctxt);
@ -115,7 +115,8 @@ int mhi_pci_resume(struct device *dev)
break; break;
default: default:
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"Wait complete state: %d\n", mhi_dev_ctxt->mhi_state); "Wait complete state: %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
r = 0; r = 0;
} }
exit: exit:
@ -181,8 +182,8 @@ ssize_t sysfs_init_m0(struct device *dev, struct device_attribute *attr,
} }
mhi_initiate_m0(mhi_dev_ctxt); mhi_initiate_m0(mhi_dev_ctxt);
mhi_log(MHI_MSG_CRITICAL, mhi_log(MHI_MSG_CRITICAL,
"Current mhi_state = 0x%x\n", "Current mhi_state = %s\n",
mhi_dev_ctxt->mhi_state); TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
return count; return count;
} }

View file

@ -48,6 +48,9 @@ static int add_element(struct mhi_ring *ring, void **rp,
*assigned_addr = (char *)ring->wp; *assigned_addr = (char *)ring->wp;
*wp = (void *)(((d_wp + 1) % ring_size) * ring->el_size + *wp = (void *)(((d_wp + 1) % ring_size) * ring->el_size +
(uintptr_t)ring->base); (uintptr_t)ring->base);
/* force update visible to other cores */
smp_wmb();
return 0; return 0;
} }
@ -101,6 +104,9 @@ int delete_element(struct mhi_ring *ring, void **rp,
*rp = (void *)(((d_rp + 1) % ring_size) * ring->el_size + *rp = (void *)(((d_rp + 1) % ring_size) * ring->el_size +
(uintptr_t)ring->base); (uintptr_t)ring->base);
/* force update visible to other cores */
smp_wmb();
return 0; return 0;
} }
@ -108,6 +114,7 @@ int mhi_get_free_desc(struct mhi_client_handle *client_handle)
{ {
u32 chan; u32 chan;
struct mhi_device_ctxt *ctxt; struct mhi_device_ctxt *ctxt;
int bb_ring, ch_ring;
if (!client_handle || MHI_HANDLE_MAGIC != client_handle->magic || if (!client_handle || MHI_HANDLE_MAGIC != client_handle->magic ||
!client_handle->mhi_dev_ctxt) !client_handle->mhi_dev_ctxt)
@ -115,7 +122,10 @@ int mhi_get_free_desc(struct mhi_client_handle *client_handle)
ctxt = client_handle->mhi_dev_ctxt; ctxt = client_handle->mhi_dev_ctxt;
chan = client_handle->chan_info.chan_nr; chan = client_handle->chan_info.chan_nr;
return get_nr_avail_ring_elements(&ctxt->mhi_local_chan_ctxt[chan]); bb_ring = get_nr_avail_ring_elements(&ctxt->chan_bb_list[chan]);
ch_ring = get_nr_avail_ring_elements(&ctxt->mhi_local_chan_ctxt[chan]);
return min(bb_ring, ch_ring);
} }
EXPORT_SYMBOL(mhi_get_free_desc); EXPORT_SYMBOL(mhi_get_free_desc);

View file

@ -17,6 +17,29 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
static const char *state_transition_str(enum STATE_TRANSITION state)
{
static const char * const mhi_states_transition_str[] = {
"RESET",
"READY",
"M0",
"M1",
"M2",
"M3",
"BHI",
"SBL",
"AMSS",
"LINK_DOWN",
"WAKE"
};
if (state == STATE_TRANSITION_SYS_ERR)
return "SYS_ERR";
return (state <= STATE_TRANSITION_WAKE) ?
mhi_states_transition_str[state] : "Invalid";
}
static inline void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt, static inline void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_STATE new_state) enum MHI_STATE new_state)
{ {
@ -57,7 +80,8 @@ static void conditional_chan_db_write(
spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags); spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags);
} }
static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt) static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt,
bool reset_db_mode)
{ {
u32 i = 0; u32 i = 0;
struct mhi_ring *local_ctxt = NULL; struct mhi_ring *local_ctxt = NULL;
@ -66,11 +90,13 @@ static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
for (i = 0; i < MHI_MAX_CHANNELS; ++i) for (i = 0; i < MHI_MAX_CHANNELS; ++i)
if (VALID_CHAN_NR(i)) { if (VALID_CHAN_NR(i)) {
local_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[i]; local_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
if (IS_HARDWARE_CHANNEL(i))
mhi_dev_ctxt->flags.db_mode[i] = 1; /* Reset the DB Mode state to DB Mode */
if ((local_ctxt->wp != local_ctxt->rp) || if (local_ctxt->db_mode.preserve_db_state == 0
((local_ctxt->wp != local_ctxt->rp) && && reset_db_mode)
(local_ctxt->dir == MHI_IN))) local_ctxt->db_mode.db_mode = 1;
if (local_ctxt->wp != local_ctxt->rp)
conditional_chan_db_write(mhi_dev_ctxt, i); conditional_chan_db_write(mhi_dev_ctxt, i);
} }
} }
@ -150,8 +176,9 @@ static int process_m0_transition(
"Transitioning from M1.\n"); "Transitioning from M1.\n");
} else { } else {
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"MHI State %d link state %d. Quitting\n", "MHI State %s link state %d. Quitting\n",
mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
mhi_dev_ctxt->flags.link_up);
} }
read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
@ -162,7 +189,7 @@ static int process_m0_transition(
if (mhi_dev_ctxt->flags.mhi_initialized) { if (mhi_dev_ctxt->flags.mhi_initialized) {
ring_all_ev_dbs(mhi_dev_ctxt); ring_all_ev_dbs(mhi_dev_ctxt);
ring_all_chan_dbs(mhi_dev_ctxt); ring_all_chan_dbs(mhi_dev_ctxt, true);
ring_all_cmd_dbs(mhi_dev_ctxt); ring_all_cmd_dbs(mhi_dev_ctxt);
} }
atomic_dec(&mhi_dev_ctxt->flags.data_pending); atomic_dec(&mhi_dev_ctxt->flags.data_pending);
@ -196,8 +223,8 @@ static int process_m1_transition(
int r = 0; int r = 0;
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"Processing M1 state transition from state %d\n", "Processing M1 state transition from state %s\n",
mhi_dev_ctxt->mhi_state); TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
if (!mhi_dev_ctxt->flags.pending_M3) { if (!mhi_dev_ctxt->flags.pending_M3) {
@ -444,8 +471,8 @@ static int process_reset_transition(
STATE_TRANSITION_RESET); STATE_TRANSITION_RESET);
if (0 != r) if (0 != r)
mhi_log(MHI_MSG_CRITICAL, mhi_log(MHI_MSG_CRITICAL,
"Failed to initiate 0x%x state trans\n", "Failed to initiate %s state trans\n",
STATE_TRANSITION_RESET); state_transition_str(STATE_TRANSITION_RESET));
break; break;
default: default:
mhi_log(MHI_MSG_CRITICAL, mhi_log(MHI_MSG_CRITICAL,
@ -475,8 +502,8 @@ static int process_reset_transition(
STATE_TRANSITION_READY); STATE_TRANSITION_READY);
if (0 != r) if (0 != r)
mhi_log(MHI_MSG_CRITICAL, mhi_log(MHI_MSG_CRITICAL,
"Failed to initiate 0x%x state trans\n", "Failed to initiate %s state trans\n",
STATE_TRANSITION_READY); state_transition_str(STATE_TRANSITION_READY));
return r; return r;
} }
@ -594,7 +621,7 @@ static int process_amss_transition(
"Failed to set local chan state ret %d\n", r); "Failed to set local chan state ret %d\n", r);
return r; return r;
} }
ring_all_chan_dbs(mhi_dev_ctxt); ring_all_chan_dbs(mhi_dev_ctxt, true);
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"Notifying clients that MHI is enabled\n"); "Notifying clients that MHI is enabled\n");
enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env); enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
@ -608,7 +635,7 @@ static int process_amss_transition(
i, r); i, r);
return r; return r;
} }
ring_all_chan_dbs(mhi_dev_ctxt); ring_all_chan_dbs(mhi_dev_ctxt, true);
} }
ring_all_ev_dbs(mhi_dev_ctxt); ring_all_ev_dbs(mhi_dev_ctxt);
atomic_dec(&mhi_dev_ctxt->flags.data_pending); atomic_dec(&mhi_dev_ctxt->flags.data_pending);
@ -636,8 +663,8 @@ int mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
STATE_TRANSITION_RESET); STATE_TRANSITION_RESET);
if (0 != r) if (0 != r)
mhi_log(MHI_MSG_CRITICAL, mhi_log(MHI_MSG_CRITICAL,
"Failed to initiate 0x%x state trans ret %d\n", "Failed to initiate %s state trans ret %d\n",
STATE_TRANSITION_RESET, r); state_transition_str(STATE_TRANSITION_RESET), r);
mhi_log(MHI_MSG_INFO, "Exiting\n"); mhi_log(MHI_MSG_INFO, "Exiting\n");
return r; return r;
} }
@ -648,8 +675,8 @@ static int process_stt_work_item(
{ {
int r = 0; int r = 0;
mhi_log(MHI_MSG_INFO, "Transitioning to %d\n", mhi_log(MHI_MSG_INFO, "Transitioning to %s\n",
(int)cur_work_item); state_transition_str(cur_work_item));
trace_mhi_state(cur_work_item); trace_mhi_state(cur_work_item);
switch (cur_work_item) { switch (cur_work_item) {
case STATE_TRANSITION_BHI: case STATE_TRANSITION_BHI:
@ -689,7 +716,8 @@ static int process_stt_work_item(
break; break;
default: default:
mhi_log(MHI_MSG_ERROR, mhi_log(MHI_MSG_ERROR,
"Unrecongized state: %d\n", cur_work_item); "Unrecongized state: %s\n",
state_transition_str(cur_work_item));
break; break;
} }
return r; return r;
@ -762,8 +790,8 @@ int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
BUG_ON(nr_avail_work_items <= 0); BUG_ON(nr_avail_work_items <= 0);
mhi_log(MHI_MSG_VERBOSE, mhi_log(MHI_MSG_VERBOSE,
"Processing state transition %x\n", "Processing state transition %s\n",
new_state); state_transition_str(new_state));
*(enum STATE_TRANSITION *)stt_ring->wp = new_state; *(enum STATE_TRANSITION *)stt_ring->wp = new_state;
r = ctxt_add_element(stt_ring, (void **)&cur_work_item); r = ctxt_add_element(stt_ring, (void **)&cur_work_item);
BUG_ON(r); BUG_ON(r);
@ -778,13 +806,14 @@ int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
unsigned long flags; unsigned long flags;
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"Entered MHI state %d, Pending M0 %d Pending M3 %d\n", "Entered MHI state %s, Pending M0 %d Pending M3 %d\n",
mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0, TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
mhi_dev_ctxt->flags.pending_M3); mhi_dev_ctxt->flags.pending_M0,
mhi_dev_ctxt->flags.pending_M3);
mutex_lock(&mhi_dev_ctxt->pm_lock); mutex_lock(&mhi_dev_ctxt->pm_lock);
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"Waiting for M0 M1 or M3. Currently %d...\n", "Waiting for M0 M1 or M3. Currently %s...\n",
mhi_dev_ctxt->mhi_state); TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event, r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_M3 || mhi_dev_ctxt->mhi_state == MHI_STATE_M3 ||
@ -794,9 +823,9 @@ int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
switch (r) { switch (r) {
case 0: case 0:
mhi_log(MHI_MSG_CRITICAL, mhi_log(MHI_MSG_CRITICAL,
"Timeout: State %d after %d ms\n", "Timeout: State %s after %d ms\n",
mhi_dev_ctxt->mhi_state, TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
MHI_MAX_SUSPEND_TIMEOUT); MHI_MAX_SUSPEND_TIMEOUT);
mhi_dev_ctxt->counters.m0_event_timeouts++; mhi_dev_ctxt->counters.m0_event_timeouts++;
r = -ETIME; r = -ETIME;
goto exit; goto exit;
@ -806,7 +835,8 @@ int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
goto exit; goto exit;
default: default:
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"Wait complete state: %d\n", mhi_dev_ctxt->mhi_state); "Wait complete state: %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
r = 0; r = 0;
break; break;
} }
@ -814,8 +844,8 @@ int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_dev_ctxt->mhi_state == MHI_STATE_M1) { mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
mhi_assert_device_wake(mhi_dev_ctxt); mhi_assert_device_wake(mhi_dev_ctxt);
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"MHI state %d, done\n", "MHI state %s, done\n",
mhi_dev_ctxt->mhi_state); TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
goto exit; goto exit;
} else { } else {
if (0 != mhi_turn_on_pcie_link(mhi_dev_ctxt)) { if (0 != mhi_turn_on_pcie_link(mhi_dev_ctxt)) {
@ -864,9 +894,10 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
int r = 0, abort_m3 = 0; int r = 0, abort_m3 = 0;
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"Entered MHI state %d, Pending M0 %d Pending M3 %d\n", "Entered MHI state %s, Pending M0 %d Pending M3 %d\n",
mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0, TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
mhi_dev_ctxt->flags.pending_M3); mhi_dev_ctxt->flags.pending_M0,
mhi_dev_ctxt->flags.pending_M3);
mutex_lock(&mhi_dev_ctxt->pm_lock); mutex_lock(&mhi_dev_ctxt->pm_lock);
switch (mhi_dev_ctxt->mhi_state) { switch (mhi_dev_ctxt->mhi_state) {
case MHI_STATE_RESET: case MHI_STATE_RESET:
@ -881,47 +912,53 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
case MHI_STATE_M0: case MHI_STATE_M0:
case MHI_STATE_M1: case MHI_STATE_M1:
case MHI_STATE_M2: case MHI_STATE_M2:
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"Triggering wake out of M2\n"); "Triggering wake out of M2\n");
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
mhi_dev_ctxt->flags.pending_M3 = 1; mhi_dev_ctxt->flags.pending_M3 = 1;
if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) { if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) {
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"M2 transition not set\n"); "M2 transition not set\n");
mhi_assert_device_wake(mhi_dev_ctxt); mhi_assert_device_wake(mhi_dev_ctxt);
} }
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
r = wait_event_interruptible_timeout( if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) {
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock,
flags);
r = wait_event_interruptible_timeout(
*mhi_dev_ctxt->mhi_ev_wq.m0_event, *mhi_dev_ctxt->mhi_ev_wq.m0_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || mhi_dev_ctxt->mhi_state == MHI_STATE_M0,
mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT)); msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
if (0 == r || -ERESTARTSYS == r) { write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
mhi_log(MHI_MSG_CRITICAL, if (0 == r || -ERESTARTSYS == r) {
"MDM failed to come out of M2.\n"); mhi_log(MHI_MSG_CRITICAL,
mhi_dev_ctxt->counters.m2_event_timeouts++; "MDM failed to come out of M2.\n");
r = -EAGAIN; mhi_dev_ctxt->counters.m2_event_timeouts++;
goto exit; r = -EAGAIN;
goto unlock;
}
} }
break; break;
case MHI_STATE_M3: case MHI_STATE_M3:
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"MHI state %d, link state %d.\n", "MHI state %s, link state %d.\n",
mhi_dev_ctxt->mhi_state, TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
mhi_dev_ctxt->flags.link_up); mhi_dev_ctxt->flags.link_up);
if (mhi_dev_ctxt->flags.link_up) if (mhi_dev_ctxt->flags.link_up)
r = -EAGAIN; r = -EAGAIN;
else else
r = 0; r = 0;
goto exit; goto exit;
default: default:
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"MHI state %d, link state %d.\n", "MHI state %s, link state %d.\n",
mhi_dev_ctxt->mhi_state, TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
mhi_dev_ctxt->flags.link_up); mhi_dev_ctxt->flags.link_up);
break; break;
} }
while (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) {
if (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) {
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"There are still %d acks pending from device\n", "There are still %d acks pending from device\n",
atomic_read(&mhi_dev_ctxt->counters.outbound_acks)); atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
@ -929,25 +966,23 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
__pm_relax(&mhi_dev_ctxt->w_lock); __pm_relax(&mhi_dev_ctxt->w_lock);
abort_m3 = 1; abort_m3 = 1;
r = -EAGAIN; r = -EAGAIN;
goto exit; goto unlock;
} }
if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) { if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) {
abort_m3 = 1; abort_m3 = 1;
r = -EAGAIN; r = -EAGAIN;
goto exit; goto unlock;
} }
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
if (mhi_dev_ctxt->flags.pending_M0) { if (mhi_dev_ctxt->flags.pending_M0) {
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
r = -EAGAIN; r = -EAGAIN;
goto exit; goto unlock;
} }
mhi_dev_ctxt->flags.pending_M3 = 1; mhi_dev_ctxt->flags.pending_M3 = 1;
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3); mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"Waiting for M3 completion.\n"); "Waiting for M3 completion.\n");
r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event, r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
@ -970,16 +1005,20 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
r = mhi_set_bus_request(mhi_dev_ctxt, 0); r = mhi_set_bus_request(mhi_dev_ctxt, 0);
if (r) if (r)
mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r);
exit: goto exit;
unlock:
mhi_dev_ctxt->flags.pending_M3 = 0;
if (abort_m3) { if (abort_m3) {
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
atomic_inc(&mhi_dev_ctxt->flags.data_pending); atomic_inc(&mhi_dev_ctxt->flags.data_pending);
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
ring_all_chan_dbs(mhi_dev_ctxt); ring_all_chan_dbs(mhi_dev_ctxt, false);
ring_all_cmd_dbs(mhi_dev_ctxt); ring_all_cmd_dbs(mhi_dev_ctxt);
atomic_dec(&mhi_dev_ctxt->flags.data_pending); atomic_dec(&mhi_dev_ctxt->flags.data_pending);
mhi_deassert_device_wake(mhi_dev_ctxt); mhi_deassert_device_wake(mhi_dev_ctxt);
} else {
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
} }
exit:
mhi_dev_ctxt->flags.pending_M3 = 0; mhi_dev_ctxt->flags.pending_M3 = 0;
mutex_unlock(&mhi_dev_ctxt->pm_lock); mutex_unlock(&mhi_dev_ctxt->pm_lock);
return r; return r;

View file

@ -34,6 +34,18 @@ MODULE_PARM_DESC(mhi_msg_lvl, "dbg lvl");
module_param(mhi_ipc_log_lvl, uint, S_IRUGO | S_IWUSR); module_param(mhi_ipc_log_lvl, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(mhi_ipc_log_lvl, "dbg lvl"); MODULE_PARM_DESC(mhi_ipc_log_lvl, "dbg lvl");
const char * const mhi_states_str[MHI_STATE_LIMIT] = {
"RESET",
"READY",
"M0",
"M1",
"M2",
"M3",
"Reserved: 0x06",
"BHI",
"SYS_ERR",
};
static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf, static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
size_t count, loff_t *offp) size_t count, loff_t *offp)
{ {
@ -225,9 +237,9 @@ static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf,
amnt_copied = amnt_copied =
scnprintf(mhi_dev_ctxt->chan_info, scnprintf(mhi_dev_ctxt->chan_info,
MHI_LOG_SIZE, MHI_LOG_SIZE,
"%s %u %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d, %s, %d, %s %d\n", "%s %s %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d\n",
"Our State:", "Our State:",
mhi_dev_ctxt->mhi_state, TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
"M0->M1:", "M0->M1:",
mhi_dev_ctxt->counters.m0_m1, mhi_dev_ctxt->counters.m0_m1,
"M0<-M1:", "M0<-M1:",
@ -244,10 +256,6 @@ static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf,
mhi_dev_ctxt->counters.m3_event_timeouts, mhi_dev_ctxt->counters.m3_event_timeouts,
"M0_ev_TO:", "M0_ev_TO:",
mhi_dev_ctxt->counters.m0_event_timeouts, mhi_dev_ctxt->counters.m0_event_timeouts,
"MSI_d:",
mhi_dev_ctxt->counters.msi_disable_cntr,
"MSI_e:",
mhi_dev_ctxt->counters.msi_enable_cntr,
"outstanding_acks:", "outstanding_acks:",
atomic_read(&mhi_dev_ctxt->counters.outbound_acks), atomic_read(&mhi_dev_ctxt->counters.outbound_acks),
"LPM:", "LPM:",

View file

@ -46,6 +46,10 @@ extern void *mhi_ipc_log;
"[%s] " _msg, __func__, ##__VA_ARGS__); \ "[%s] " _msg, __func__, ##__VA_ARGS__); \
} while (0) } while (0)
extern const char * const mhi_states_str[MHI_STATE_LIMIT];
#define TO_MHI_STATE_STR(state) (((state) >= MHI_STATE_LIMIT) ? \
"INVALID_STATE" : mhi_states_str[state])
irqreturn_t mhi_msi_handlr(int msi_number, void *dev_id); irqreturn_t mhi_msi_handlr(int msi_number, void *dev_id);
struct mhi_meminfo { struct mhi_meminfo {

View file

@ -63,9 +63,10 @@ enum MHI_CLIENT_CHANNEL {
MHI_CLIENT_RESERVED_1_UPPER = 99, MHI_CLIENT_RESERVED_1_UPPER = 99,
MHI_CLIENT_IP_HW_0_OUT = 100, MHI_CLIENT_IP_HW_0_OUT = 100,
MHI_CLIENT_IP_HW_0_IN = 101, MHI_CLIENT_IP_HW_0_IN = 101,
MHI_CLIENT_RESERVED_2_LOWER = 102, MHI_CLIENT_IP_HW_ADPL_IN = 102,
MHI_CLIENT_RESERVED_2_LOWER = 103,
MHI_CLIENT_RESERVED_2_UPPER = 127, MHI_CLIENT_RESERVED_2_UPPER = 127,
MHI_MAX_CHANNELS = 102 MHI_MAX_CHANNELS = 103
}; };
enum MHI_CB_REASON { enum MHI_CB_REASON {