Merge "mhi: core: power management redesign"
This commit is contained in:
commit
22c277e625
14 changed files with 1342 additions and 1484 deletions
|
@ -20,6 +20,7 @@
|
||||||
#include <linux/completion.h>
|
#include <linux/completion.h>
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/cdev.h>
|
#include <linux/cdev.h>
|
||||||
#include <linux/msm_pcie.h>
|
#include <linux/msm_pcie.h>
|
||||||
|
@ -29,6 +30,7 @@
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
|
|
||||||
extern struct mhi_pcie_devices mhi_devices;
|
extern struct mhi_pcie_devices mhi_devices;
|
||||||
|
struct mhi_device_ctxt;
|
||||||
|
|
||||||
enum MHI_DEBUG_LEVEL {
|
enum MHI_DEBUG_LEVEL {
|
||||||
MHI_MSG_RAW = 0x1,
|
MHI_MSG_RAW = 0x1,
|
||||||
|
@ -125,6 +127,31 @@ enum MHI_STATE {
|
||||||
MHI_STATE_reserved = 0x80000000
|
MHI_STATE_reserved = 0x80000000
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum MHI_BRSTMODE {
|
||||||
|
/* BRST Mode Enable for HW Channels, SW Channel Disabled */
|
||||||
|
MHI_BRSTMODE_DEFAULT = 0x0,
|
||||||
|
MHI_BRSTMODE_RESERVED = 0x1,
|
||||||
|
MHI_BRSTMODE_DISABLE = 0x2,
|
||||||
|
MHI_BRSTMODE_ENABLE = 0x3
|
||||||
|
};
|
||||||
|
|
||||||
|
enum MHI_PM_STATE {
|
||||||
|
MHI_PM_DISABLE = 0x0, /* MHI is not enabled */
|
||||||
|
MHI_PM_POR = 0x1, /* Power On Reset State */
|
||||||
|
MHI_PM_M0 = 0x2,
|
||||||
|
MHI_PM_M1 = 0x4,
|
||||||
|
MHI_PM_M1_M2_TRANSITION = 0x8, /* Register access not allowed */
|
||||||
|
MHI_PM_M2 = 0x10,
|
||||||
|
MHI_PM_M3_ENTER = 0x20,
|
||||||
|
MHI_PM_M3 = 0x40,
|
||||||
|
MHI_PM_M3_EXIT = 0x80,
|
||||||
|
};
|
||||||
|
|
||||||
|
#define MHI_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | MHI_PM_M1))
|
||||||
|
#define MHI_WAKE_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
|
||||||
|
MHI_PM_M1 | MHI_PM_M2))
|
||||||
|
#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state > MHI_PM_DISABLE) && \
|
||||||
|
(pm_state < MHI_PM_M3_EXIT))
|
||||||
struct __packed mhi_event_ctxt {
|
struct __packed mhi_event_ctxt {
|
||||||
u32 mhi_intmodt;
|
u32 mhi_intmodt;
|
||||||
u32 mhi_event_er_type;
|
u32 mhi_event_er_type;
|
||||||
|
@ -136,8 +163,11 @@ struct __packed mhi_event_ctxt {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct __packed mhi_chan_ctxt {
|
struct __packed mhi_chan_ctxt {
|
||||||
enum MHI_CHAN_STATE mhi_chan_state;
|
u32 chstate : 8;
|
||||||
enum MHI_CHAN_DIR mhi_chan_type;
|
u32 brstmode : 2;
|
||||||
|
u32 pollcfg : 6;
|
||||||
|
u32 reserved : 16;
|
||||||
|
u32 chtype;
|
||||||
u32 mhi_event_ring_index;
|
u32 mhi_event_ring_index;
|
||||||
u64 mhi_trb_ring_base_addr;
|
u64 mhi_trb_ring_base_addr;
|
||||||
u64 mhi_trb_ring_len;
|
u64 mhi_trb_ring_len;
|
||||||
|
@ -172,7 +202,6 @@ enum MHI_PKT_TYPE {
|
||||||
MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
|
MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
|
||||||
MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
|
MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
|
||||||
MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
|
MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
|
||||||
MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD = 0x1F,
|
|
||||||
MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
|
MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
|
||||||
MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
|
MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
|
||||||
MHI_PKT_TYPE_TX_EVENT = 0x22,
|
MHI_PKT_TYPE_TX_EVENT = 0x22,
|
||||||
|
@ -265,6 +294,11 @@ struct db_mode {
|
||||||
/* if set do not reset DB_Mode during M0 resume */
|
/* if set do not reset DB_Mode during M0 resume */
|
||||||
u32 preserve_db_state : 1;
|
u32 preserve_db_state : 1;
|
||||||
u32 db_mode : 1;
|
u32 db_mode : 1;
|
||||||
|
enum MHI_BRSTMODE brstmode;
|
||||||
|
void (*process_db)(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
|
void __iomem *io_addr,
|
||||||
|
uintptr_t chan,
|
||||||
|
u32 val);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mhi_ring {
|
struct mhi_ring {
|
||||||
|
@ -279,6 +313,7 @@ struct mhi_ring {
|
||||||
struct db_mode db_mode;
|
struct db_mode db_mode;
|
||||||
u32 msi_disable_cntr;
|
u32 msi_disable_cntr;
|
||||||
u32 msi_enable_cntr;
|
u32 msi_enable_cntr;
|
||||||
|
spinlock_t ring_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum MHI_CMD_STATUS {
|
enum MHI_CMD_STATUS {
|
||||||
|
@ -336,12 +371,19 @@ struct mhi_chan_info {
|
||||||
u32 flags;
|
u32 flags;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mhi_chan_cfg {
|
||||||
|
enum MHI_COMMAND current_cmd;
|
||||||
|
struct mutex chan_lock;
|
||||||
|
spinlock_t event_lock; /* completion event lock */
|
||||||
|
struct completion cmd_complete;
|
||||||
|
struct mhi_cmd_complete_event_pkt cmd_event_pkt;
|
||||||
|
union mhi_cmd_pkt cmd_pkt;
|
||||||
|
};
|
||||||
|
|
||||||
struct mhi_client_handle {
|
struct mhi_client_handle {
|
||||||
struct mhi_chan_info chan_info;
|
struct mhi_chan_info chan_info;
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt;
|
struct mhi_device_ctxt *mhi_dev_ctxt;
|
||||||
struct mhi_client_info_t client_info;
|
struct mhi_client_info_t client_info;
|
||||||
struct completion chan_reset_complete;
|
|
||||||
struct completion chan_open_complete;
|
|
||||||
void *user_data;
|
void *user_data;
|
||||||
struct mhi_result result;
|
struct mhi_result result;
|
||||||
u32 device_index;
|
u32 device_index;
|
||||||
|
@ -378,43 +420,27 @@ struct mhi_buf_info {
|
||||||
|
|
||||||
struct mhi_counters {
|
struct mhi_counters {
|
||||||
u32 m0_m1;
|
u32 m0_m1;
|
||||||
u32 m1_m0;
|
|
||||||
u32 m1_m2;
|
u32 m1_m2;
|
||||||
u32 m2_m0;
|
u32 m2_m0;
|
||||||
u32 m0_m3;
|
u32 m0_m3;
|
||||||
u32 m3_m0;
|
|
||||||
u32 m1_m3;
|
u32 m1_m3;
|
||||||
u32 mhi_reset_cntr;
|
u32 m3_m0;
|
||||||
u32 mhi_ready_cntr;
|
|
||||||
u32 m3_event_timeouts;
|
|
||||||
u32 m0_event_timeouts;
|
|
||||||
u32 m2_event_timeouts;
|
|
||||||
u32 nr_irq_migrations;
|
|
||||||
u32 *msi_counter;
|
|
||||||
u32 *ev_counter;
|
|
||||||
atomic_t outbound_acks;
|
|
||||||
u32 chan_pkts_xferd[MHI_MAX_CHANNELS];
|
u32 chan_pkts_xferd[MHI_MAX_CHANNELS];
|
||||||
u32 bb_used[MHI_MAX_CHANNELS];
|
u32 bb_used[MHI_MAX_CHANNELS];
|
||||||
|
atomic_t device_wake;
|
||||||
|
atomic_t outbound_acks;
|
||||||
|
atomic_t events_pending;
|
||||||
|
u32 *msi_counter;
|
||||||
|
u32 mhi_reset_cntr;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mhi_flags {
|
struct mhi_flags {
|
||||||
u32 mhi_initialized;
|
u32 mhi_initialized;
|
||||||
u32 pending_M3;
|
|
||||||
u32 pending_M0;
|
|
||||||
u32 link_up;
|
u32 link_up;
|
||||||
u32 kill_threads;
|
|
||||||
atomic_t data_pending;
|
|
||||||
atomic_t events_pending;
|
|
||||||
atomic_t pending_resume;
|
|
||||||
atomic_t pending_ssr;
|
|
||||||
atomic_t pending_powerup;
|
|
||||||
atomic_t m2_transition;
|
|
||||||
int stop_threads;
|
int stop_threads;
|
||||||
atomic_t device_wake;
|
u32 kill_threads;
|
||||||
u32 ssr;
|
|
||||||
u32 ev_thread_stopped;
|
u32 ev_thread_stopped;
|
||||||
u32 st_thread_stopped;
|
u32 st_thread_stopped;
|
||||||
u32 uldl_enabled;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mhi_wait_queues {
|
struct mhi_wait_queues {
|
||||||
|
@ -458,44 +484,35 @@ struct mhi_dev_space {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mhi_device_ctxt {
|
struct mhi_device_ctxt {
|
||||||
enum MHI_STATE mhi_state;
|
enum MHI_PM_STATE mhi_pm_state; /* Host driver state */
|
||||||
|
enum MHI_STATE mhi_state; /* protocol state */
|
||||||
enum MHI_EXEC_ENV dev_exec_env;
|
enum MHI_EXEC_ENV dev_exec_env;
|
||||||
|
|
||||||
struct mhi_dev_space dev_space;
|
struct mhi_dev_space dev_space;
|
||||||
struct mhi_pcie_dev_info *dev_info;
|
struct mhi_pcie_dev_info *dev_info;
|
||||||
struct pcie_core_info *dev_props;
|
struct pcie_core_info *dev_props;
|
||||||
struct mhi_ring chan_bb_list[MHI_MAX_CHANNELS];
|
struct mhi_ring chan_bb_list[MHI_MAX_CHANNELS];
|
||||||
|
|
||||||
struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS];
|
struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS];
|
||||||
|
|
||||||
struct mhi_ring *mhi_local_event_ctxt;
|
struct mhi_ring *mhi_local_event_ctxt;
|
||||||
struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS];
|
struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS];
|
||||||
|
struct mhi_chan_cfg mhi_chan_cfg[MHI_MAX_CHANNELS];
|
||||||
|
|
||||||
|
|
||||||
struct mutex *mhi_chan_mutex;
|
|
||||||
struct mutex mhi_link_state;
|
|
||||||
spinlock_t *mhi_ev_spinlock_list;
|
|
||||||
struct mutex *mhi_cmd_mutex_list;
|
|
||||||
struct mhi_client_handle *client_handle_list[MHI_MAX_CHANNELS];
|
struct mhi_client_handle *client_handle_list[MHI_MAX_CHANNELS];
|
||||||
struct mhi_event_ring_cfg *ev_ring_props;
|
struct mhi_event_ring_cfg *ev_ring_props;
|
||||||
struct task_struct *event_thread_handle;
|
struct task_struct *event_thread_handle;
|
||||||
struct task_struct *st_thread_handle;
|
struct task_struct *st_thread_handle;
|
||||||
|
struct tasklet_struct ev_task; /* Process control Events */
|
||||||
|
struct work_struct process_m1_worker;
|
||||||
struct mhi_wait_queues mhi_ev_wq;
|
struct mhi_wait_queues mhi_ev_wq;
|
||||||
struct dev_mmio_info mmio_info;
|
struct dev_mmio_info mmio_info;
|
||||||
|
|
||||||
u32 mhi_chan_db_order[MHI_MAX_CHANNELS];
|
|
||||||
u32 mhi_ev_db_order[MHI_MAX_CHANNELS];
|
|
||||||
spinlock_t *db_write_lock;
|
|
||||||
|
|
||||||
struct mhi_state_work_queue state_change_work_item_list;
|
struct mhi_state_work_queue state_change_work_item_list;
|
||||||
enum MHI_CMD_STATUS mhi_chan_pend_cmd_ack[MHI_MAX_CHANNELS];
|
|
||||||
|
|
||||||
u32 cmd_ring_order;
|
|
||||||
struct mhi_counters counters;
|
struct mhi_counters counters;
|
||||||
struct mhi_flags flags;
|
struct mhi_flags flags;
|
||||||
|
|
||||||
u32 device_wake_asserted;
|
|
||||||
|
|
||||||
rwlock_t xfer_lock;
|
|
||||||
struct hrtimer m1_timer;
|
struct hrtimer m1_timer;
|
||||||
ktime_t m1_timeout;
|
ktime_t m1_timeout;
|
||||||
|
|
||||||
|
@ -508,11 +525,12 @@ struct mhi_device_ctxt {
|
||||||
|
|
||||||
unsigned long esoc_notif;
|
unsigned long esoc_notif;
|
||||||
enum STATE_TRANSITION base_state;
|
enum STATE_TRANSITION base_state;
|
||||||
atomic_t outbound_acks;
|
|
||||||
|
rwlock_t pm_xfer_lock; /* lock to control PM State */
|
||||||
|
spinlock_t dev_wake_lock; /* lock to set wake bit */
|
||||||
struct mutex pm_lock;
|
struct mutex pm_lock;
|
||||||
struct wakeup_source w_lock;
|
struct wakeup_source w_lock;
|
||||||
|
|
||||||
int enable_lpm;
|
|
||||||
char *chan_info;
|
char *chan_info;
|
||||||
struct dentry *mhi_parent_folder;
|
struct dentry *mhi_parent_folder;
|
||||||
};
|
};
|
||||||
|
@ -578,7 +596,8 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
|
||||||
u32 event_ring,
|
u32 event_ring,
|
||||||
struct mhi_ring *ring,
|
struct mhi_ring *ring,
|
||||||
enum MHI_CHAN_STATE chan_state,
|
enum MHI_CHAN_STATE chan_state,
|
||||||
bool preserve_db_state);
|
bool preserve_db_state,
|
||||||
|
enum MHI_BRSTMODE brstmode);
|
||||||
int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt);
|
int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||||
int mhi_get_event_ring_for_channel(struct mhi_device_ctxt *mhi_dev_ctxt,
|
int mhi_get_event_ring_for_channel(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
u32 chan);
|
u32 chan);
|
||||||
|
@ -623,8 +642,9 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
enum MHI_CB_REASON reason);
|
enum MHI_CB_REASON reason);
|
||||||
void mhi_notify_client(struct mhi_client_handle *client_handle,
|
void mhi_notify_client(struct mhi_client_handle *client_handle,
|
||||||
enum MHI_CB_REASON reason);
|
enum MHI_CB_REASON reason);
|
||||||
int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
|
void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||||
int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
|
void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
|
bool force_set);
|
||||||
int mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt);
|
int mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||||
int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action,
|
int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action,
|
||||||
void *hcpu);
|
void *hcpu);
|
||||||
|
@ -636,6 +656,14 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||||
int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt,
|
int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
int index);
|
int index);
|
||||||
int start_chan_sync(struct mhi_client_handle *client_handle);
|
int start_chan_sync(struct mhi_client_handle *client_handle);
|
||||||
|
void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
|
void __iomem *io_addr,
|
||||||
|
uintptr_t chan,
|
||||||
|
u32 val);
|
||||||
|
void mhi_process_db_brstmode_disable(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
|
void __iomem *io_addr,
|
||||||
|
uintptr_t chan,
|
||||||
|
u32 val);
|
||||||
void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt, void __iomem *io_addr,
|
void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt, void __iomem *io_addr,
|
||||||
uintptr_t io_offset, u32 val);
|
uintptr_t io_offset, u32 val);
|
||||||
void mhi_reg_write_field(struct mhi_device_ctxt *mhi_dev_ctxt,
|
void mhi_reg_write_field(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
|
@ -652,12 +680,19 @@ int mhi_runtime_suspend(struct device *dev);
|
||||||
int get_chan_props(struct mhi_device_ctxt *mhi_dev_ctxt, int chan,
|
int get_chan_props(struct mhi_device_ctxt *mhi_dev_ctxt, int chan,
|
||||||
struct mhi_chan_info *chan_info);
|
struct mhi_chan_info *chan_info);
|
||||||
int mhi_runtime_resume(struct device *dev);
|
int mhi_runtime_resume(struct device *dev);
|
||||||
int mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt);
|
int mhi_runtime_idle(struct device *dev);
|
||||||
int init_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt,
|
int init_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
enum MHI_TYPE_EVENT_RING type);
|
enum MHI_TYPE_EVENT_RING type);
|
||||||
void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
|
void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
int index);
|
int index);
|
||||||
void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt);
|
void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||||
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt);
|
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||||
|
enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt);
|
||||||
|
void process_m1_transition(struct work_struct *work);
|
||||||
|
int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev);
|
||||||
|
void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
|
enum MHI_STATE new_state);
|
||||||
|
const char *state_transition_str(enum STATE_TRANSITION state);
|
||||||
|
void mhi_ctrl_ev_task(unsigned long data);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -41,6 +41,9 @@ static ssize_t bhi_write(struct file *file,
|
||||||
size_t amount_copied = 0;
|
size_t amount_copied = 0;
|
||||||
uintptr_t align_len = 0x1000;
|
uintptr_t align_len = 0x1000;
|
||||||
u32 tx_db_val = 0;
|
u32 tx_db_val = 0;
|
||||||
|
rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
|
||||||
|
const long bhi_timeout_ms = 1000;
|
||||||
|
long timeout;
|
||||||
|
|
||||||
if (buf == NULL || 0 == count)
|
if (buf == NULL || 0 == count)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
@ -48,8 +51,12 @@ static ssize_t bhi_write(struct file *file,
|
||||||
if (count > BHI_MAX_IMAGE_SIZE)
|
if (count > BHI_MAX_IMAGE_SIZE)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
|
timeout = wait_event_interruptible_timeout(
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_BHI);
|
*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
|
||||||
|
mhi_dev_ctxt->mhi_state == MHI_STATE_BHI,
|
||||||
|
msecs_to_jiffies(bhi_timeout_ms));
|
||||||
|
if (timeout <= 0 && mhi_dev_ctxt->mhi_state != MHI_STATE_BHI)
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Entered. User Image size 0x%zx\n", count);
|
mhi_log(MHI_MSG_INFO, "Entered. User Image size 0x%zx\n", count);
|
||||||
|
|
||||||
|
@ -95,6 +102,11 @@ static ssize_t bhi_write(struct file *file,
|
||||||
bhi_ctxt->image_size = count;
|
bhi_ctxt->image_size = count;
|
||||||
|
|
||||||
/* Write the image size */
|
/* Write the image size */
|
||||||
|
read_lock_bh(pm_xfer_lock);
|
||||||
|
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
|
||||||
|
read_unlock_bh(pm_xfer_lock);
|
||||||
|
goto bhi_copy_error;
|
||||||
|
}
|
||||||
pcie_word_val = HIGH_WORD(bhi_ctxt->phy_image_loc);
|
pcie_word_val = HIGH_WORD(bhi_ctxt->phy_image_loc);
|
||||||
mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
|
mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
|
||||||
BHI_IMGADDR_HIGH,
|
BHI_IMGADDR_HIGH,
|
||||||
|
@ -119,10 +131,15 @@ static ssize_t bhi_write(struct file *file,
|
||||||
BHI_IMGTXDB, 0xFFFFFFFF, 0, ++pcie_word_val);
|
BHI_IMGTXDB, 0xFFFFFFFF, 0, ++pcie_word_val);
|
||||||
|
|
||||||
mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_INTVEC, 0);
|
mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_INTVEC, 0);
|
||||||
|
read_unlock_bh(pm_xfer_lock);
|
||||||
for (i = 0; i < BHI_POLL_NR_RETRIES; ++i) {
|
for (i = 0; i < BHI_POLL_NR_RETRIES; ++i) {
|
||||||
u32 err = 0, errdbg1 = 0, errdbg2 = 0, errdbg3 = 0;
|
u32 err = 0, errdbg1 = 0, errdbg2 = 0, errdbg3 = 0;
|
||||||
|
|
||||||
|
read_lock_bh(pm_xfer_lock);
|
||||||
|
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
|
||||||
|
read_unlock_bh(pm_xfer_lock);
|
||||||
|
goto bhi_copy_error;
|
||||||
|
}
|
||||||
err = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRCODE);
|
err = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRCODE);
|
||||||
errdbg1 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG1);
|
errdbg1 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG1);
|
||||||
errdbg2 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG2);
|
errdbg2 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG2);
|
||||||
|
@ -131,6 +148,7 @@ static ssize_t bhi_write(struct file *file,
|
||||||
BHI_STATUS,
|
BHI_STATUS,
|
||||||
BHI_STATUS_MASK,
|
BHI_STATUS_MASK,
|
||||||
BHI_STATUS_SHIFT);
|
BHI_STATUS_SHIFT);
|
||||||
|
read_unlock_bh(pm_xfer_lock);
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
"BHI STATUS 0x%x, err:0x%x errdbg1:0x%x errdbg2:0x%x errdbg3:0x%x\n",
|
"BHI STATUS 0x%x, err:0x%x errdbg1:0x%x errdbg2:0x%x errdbg3:0x%x\n",
|
||||||
tx_db_val, err, errdbg1, errdbg2, errdbg3);
|
tx_db_val, err, errdbg1, errdbg2, errdbg3);
|
||||||
|
@ -176,9 +194,6 @@ int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device)
|
||||||
|| 0 == mhi_pcie_device->core.bar0_end)
|
|| 0 == mhi_pcie_device->core.bar0_end)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"Successfully registered char dev. bhi base is: 0x%p.\n",
|
|
||||||
bhi_ctxt->bhi_base);
|
|
||||||
ret_val = alloc_chrdev_region(&bhi_ctxt->bhi_dev, 0, 1, "bhi");
|
ret_val = alloc_chrdev_region(&bhi_ctxt->bhi_dev, 0, 1, "bhi");
|
||||||
if (IS_ERR_VALUE(ret_val)) {
|
if (IS_ERR_VALUE(ret_val)) {
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
|
|
|
@ -89,32 +89,31 @@ dt_error:
|
||||||
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
|
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
{
|
{
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
int i;
|
||||||
|
|
||||||
mhi_dev_ctxt->mhi_local_event_ctxt = kzalloc(sizeof(struct mhi_ring)*
|
mhi_dev_ctxt->mhi_local_event_ctxt = kzalloc(sizeof(struct mhi_ring)*
|
||||||
mhi_dev_ctxt->mmio_info.nr_event_rings,
|
mhi_dev_ctxt->mmio_info.nr_event_rings,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
|
||||||
if (!mhi_dev_ctxt->mhi_local_event_ctxt)
|
if (!mhi_dev_ctxt->mhi_local_event_ctxt)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
mhi_dev_ctxt->counters.ev_counter = kzalloc(sizeof(u32) *
|
|
||||||
mhi_dev_ctxt->mmio_info.nr_event_rings,
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!mhi_dev_ctxt->counters.ev_counter) {
|
|
||||||
r = -ENOMEM;
|
|
||||||
goto free_local_ec_list;
|
|
||||||
}
|
|
||||||
mhi_dev_ctxt->counters.msi_counter = kzalloc(sizeof(u32) *
|
mhi_dev_ctxt->counters.msi_counter = kzalloc(sizeof(u32) *
|
||||||
mhi_dev_ctxt->mmio_info.nr_event_rings,
|
mhi_dev_ctxt->mmio_info.nr_event_rings,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!mhi_dev_ctxt->counters.msi_counter) {
|
if (!mhi_dev_ctxt->counters.msi_counter) {
|
||||||
r = -ENOMEM;
|
r = -ENOMEM;
|
||||||
goto free_ev_counter;
|
goto free_local_ec_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; i++) {
|
||||||
|
struct mhi_ring *mhi_ring = &mhi_dev_ctxt->
|
||||||
|
mhi_local_event_ctxt[i];
|
||||||
|
|
||||||
|
spin_lock_init(&mhi_ring->ring_lock);
|
||||||
|
}
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
free_ev_counter:
|
|
||||||
kfree(mhi_dev_ctxt->counters.ev_counter);
|
|
||||||
free_local_ec_list:
|
free_local_ec_list:
|
||||||
kfree(mhi_dev_ctxt->mhi_local_event_ctxt);
|
kfree(mhi_dev_ctxt->mhi_local_event_ctxt);
|
||||||
return r;
|
return r;
|
||||||
|
@ -129,13 +128,18 @@ void ring_ev_db(struct mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index)
|
||||||
db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING,
|
db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING,
|
||||||
event_ring_index,
|
event_ring_index,
|
||||||
(uintptr_t) event_ctxt->wp);
|
(uintptr_t) event_ctxt->wp);
|
||||||
mhi_process_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.event_db_addr,
|
event_ctxt->db_mode.process_db(mhi_dev_ctxt,
|
||||||
event_ring_index, db_value);
|
mhi_dev_ctxt->mmio_info.event_db_addr,
|
||||||
|
event_ring_index,
|
||||||
|
db_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
|
static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
|
||||||
struct mhi_ring *ring, u32 el_per_ring,
|
struct mhi_ring *ring,
|
||||||
u32 intmodt_val, u32 msi_vec)
|
u32 el_per_ring,
|
||||||
|
u32 intmodt_val,
|
||||||
|
u32 msi_vec,
|
||||||
|
enum MHI_BRSTMODE brstmode)
|
||||||
{
|
{
|
||||||
ev_list->mhi_event_er_type = MHI_EVENT_RING_TYPE_VALID;
|
ev_list->mhi_event_er_type = MHI_EVENT_RING_TYPE_VALID;
|
||||||
ev_list->mhi_msi_vector = msi_vec;
|
ev_list->mhi_msi_vector = msi_vec;
|
||||||
|
@ -144,6 +148,20 @@ static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
|
||||||
ring->len = ((size_t)(el_per_ring)*sizeof(union mhi_event_pkt));
|
ring->len = ((size_t)(el_per_ring)*sizeof(union mhi_event_pkt));
|
||||||
ring->el_size = sizeof(union mhi_event_pkt);
|
ring->el_size = sizeof(union mhi_event_pkt);
|
||||||
ring->overwrite_en = 0;
|
ring->overwrite_en = 0;
|
||||||
|
|
||||||
|
ring->db_mode.db_mode = 1;
|
||||||
|
ring->db_mode.brstmode = brstmode;
|
||||||
|
switch (ring->db_mode.brstmode) {
|
||||||
|
case MHI_BRSTMODE_ENABLE:
|
||||||
|
ring->db_mode.process_db = mhi_process_db_brstmode;
|
||||||
|
break;
|
||||||
|
case MHI_BRSTMODE_DISABLE:
|
||||||
|
ring->db_mode.process_db = mhi_process_db_brstmode_disable;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
ring->db_mode.process_db = mhi_process_db;
|
||||||
|
}
|
||||||
|
|
||||||
/* Flush writes to MMIO */
|
/* Flush writes to MMIO */
|
||||||
wmb();
|
wmb();
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -159,9 +177,12 @@ void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
|
event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
|
||||||
mhi_local_event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
|
mhi_local_event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
|
||||||
mhi_event_ring_init(event_ctxt, mhi_local_event_ctxt,
|
mhi_event_ring_init(event_ctxt, mhi_local_event_ctxt,
|
||||||
mhi_dev_ctxt->ev_ring_props[i].nr_desc,
|
mhi_dev_ctxt->ev_ring_props[i].nr_desc,
|
||||||
mhi_dev_ctxt->ev_ring_props[i].intmod,
|
mhi_dev_ctxt->ev_ring_props[i].intmod,
|
||||||
mhi_dev_ctxt->ev_ring_props[i].msi_vec);
|
mhi_dev_ctxt->ev_ring_props[i].msi_vec,
|
||||||
|
GET_EV_PROPS(EV_BRSTMODE,
|
||||||
|
mhi_dev_ctxt->
|
||||||
|
ev_ring_props[i].flags));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,10 +240,9 @@ int mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
u32 i = 0;
|
u32 i = 0;
|
||||||
unsigned long flags = 0;
|
unsigned long flags = 0;
|
||||||
int ret_val = 0;
|
int ret_val = 0;
|
||||||
spinlock_t *lock =
|
|
||||||
&mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
|
|
||||||
struct mhi_ring *event_ctxt =
|
struct mhi_ring *event_ctxt =
|
||||||
&mhi_dev_ctxt->mhi_local_event_ctxt[ring_index];
|
&mhi_dev_ctxt->mhi_local_event_ctxt[ring_index];
|
||||||
|
spinlock_t *lock = &event_ctxt->ring_lock;
|
||||||
|
|
||||||
if (NULL == mhi_dev_ctxt || 0 == nr_ev_el) {
|
if (NULL == mhi_dev_ctxt || 0 == nr_ev_el) {
|
||||||
mhi_log(MHI_MSG_ERROR, "Bad Input data, quitting\n");
|
mhi_log(MHI_MSG_ERROR, "Bad Input data, quitting\n");
|
||||||
|
|
|
@ -96,22 +96,6 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
|
||||||
"Failed to register with esoc ret %d.\n",
|
"Failed to register with esoc ret %d.\n",
|
||||||
ret_val);
|
ret_val);
|
||||||
}
|
}
|
||||||
mhi_pcie_dev->mhi_ctxt.bus_scale_table =
|
|
||||||
msm_bus_cl_get_pdata(mhi_pcie_dev->plat_dev);
|
|
||||||
mhi_pcie_dev->mhi_ctxt.bus_client =
|
|
||||||
msm_bus_scale_register_client(
|
|
||||||
mhi_pcie_dev->mhi_ctxt.bus_scale_table);
|
|
||||||
if (!mhi_pcie_dev->mhi_ctxt.bus_client) {
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Could not register for bus control ret: %d.\n",
|
|
||||||
mhi_pcie_dev->mhi_ctxt.bus_client);
|
|
||||||
} else {
|
|
||||||
ret_val = mhi_set_bus_request(&mhi_pcie_dev->mhi_ctxt, 1);
|
|
||||||
if (ret_val)
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Could not set bus frequency ret: %d\n",
|
|
||||||
ret_val);
|
|
||||||
}
|
|
||||||
|
|
||||||
device_disable_async_suspend(&pcie_device->dev);
|
device_disable_async_suspend(&pcie_device->dev);
|
||||||
ret_val = pci_enable_msi_range(pcie_device, 1, requested_msi_number);
|
ret_val = pci_enable_msi_range(pcie_device, 1, requested_msi_number);
|
||||||
|
@ -188,9 +172,7 @@ mhi_state_transition_error:
|
||||||
mhi_dev_ctxt->dev_space.dev_mem_len,
|
mhi_dev_ctxt->dev_space.dev_mem_len,
|
||||||
mhi_dev_ctxt->dev_space.dev_mem_start,
|
mhi_dev_ctxt->dev_space.dev_mem_start,
|
||||||
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
|
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
|
||||||
kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
|
|
||||||
kfree(mhi_dev_ctxt->mhi_chan_mutex);
|
|
||||||
kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
|
|
||||||
kfree(mhi_dev_ctxt->ev_ring_props);
|
kfree(mhi_dev_ctxt->ev_ring_props);
|
||||||
mhi_rem_pm_sysfs(&pcie_device->dev);
|
mhi_rem_pm_sysfs(&pcie_device->dev);
|
||||||
sysfs_config_err:
|
sysfs_config_err:
|
||||||
|
@ -203,7 +185,9 @@ msi_config_err:
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct dev_pm_ops pm_ops = {
|
static const struct dev_pm_ops pm_ops = {
|
||||||
SET_RUNTIME_PM_OPS(mhi_runtime_suspend, mhi_runtime_resume, NULL)
|
SET_RUNTIME_PM_OPS(mhi_runtime_suspend,
|
||||||
|
mhi_runtime_resume,
|
||||||
|
mhi_runtime_idle)
|
||||||
SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
|
SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -217,14 +201,15 @@ static struct pci_driver mhi_pcie_driver = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static int mhi_pci_probe(struct pci_dev *pcie_device,
|
static int mhi_pci_probe(struct pci_dev *pcie_device,
|
||||||
const struct pci_device_id *mhi_device_id)
|
const struct pci_device_id *mhi_device_id)
|
||||||
{
|
{
|
||||||
int ret_val = 0;
|
int ret_val = 0;
|
||||||
struct mhi_pcie_dev_info *mhi_pcie_dev = NULL;
|
struct mhi_pcie_dev_info *mhi_pcie_dev = NULL;
|
||||||
struct platform_device *plat_dev;
|
struct platform_device *plat_dev;
|
||||||
|
struct mhi_device_ctxt *mhi_dev_ctxt;
|
||||||
u32 nr_dev = mhi_devices.nr_of_devices;
|
u32 nr_dev = mhi_devices.nr_of_devices;
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Entering.\n");
|
mhi_log(MHI_MSG_INFO, "Entering\n");
|
||||||
mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices];
|
mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices];
|
||||||
if (mhi_devices.nr_of_devices + 1 > MHI_MAX_SUPPORTED_DEVICES) {
|
if (mhi_devices.nr_of_devices + 1 > MHI_MAX_SUPPORTED_DEVICES) {
|
||||||
mhi_log(MHI_MSG_ERROR, "Error: Too many devices\n");
|
mhi_log(MHI_MSG_ERROR, "Error: Too many devices\n");
|
||||||
|
@ -234,29 +219,120 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
|
||||||
mhi_devices.nr_of_devices++;
|
mhi_devices.nr_of_devices++;
|
||||||
plat_dev = mhi_devices.device_list[nr_dev].plat_dev;
|
plat_dev = mhi_devices.device_list[nr_dev].plat_dev;
|
||||||
pcie_device->dev.of_node = plat_dev->dev.of_node;
|
pcie_device->dev.of_node = plat_dev->dev.of_node;
|
||||||
pm_runtime_put_noidle(&pcie_device->dev);
|
mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt;
|
||||||
|
mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE;
|
||||||
|
INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition);
|
||||||
|
mutex_init(&mhi_dev_ctxt->pm_lock);
|
||||||
|
rwlock_init(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
spin_lock_init(&mhi_dev_ctxt->dev_wake_lock);
|
||||||
|
tasklet_init(&mhi_dev_ctxt->ev_task,
|
||||||
|
mhi_ctrl_ev_task,
|
||||||
|
(unsigned long)mhi_dev_ctxt);
|
||||||
|
|
||||||
|
mhi_dev_ctxt->flags.link_up = 1;
|
||||||
|
ret_val = mhi_set_bus_request(mhi_dev_ctxt, 1);
|
||||||
mhi_pcie_dev->pcie_device = pcie_device;
|
mhi_pcie_dev->pcie_device = pcie_device;
|
||||||
mhi_pcie_dev->mhi_pcie_driver = &mhi_pcie_driver;
|
mhi_pcie_dev->mhi_pcie_driver = &mhi_pcie_driver;
|
||||||
mhi_pcie_dev->mhi_pci_link_event.events =
|
mhi_pcie_dev->mhi_pci_link_event.events =
|
||||||
(MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_LINKUP |
|
(MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_WAKEUP);
|
||||||
MSM_PCIE_EVENT_WAKEUP);
|
|
||||||
mhi_pcie_dev->mhi_pci_link_event.user = pcie_device;
|
mhi_pcie_dev->mhi_pci_link_event.user = pcie_device;
|
||||||
mhi_pcie_dev->mhi_pci_link_event.callback = mhi_link_state_cb;
|
mhi_pcie_dev->mhi_pci_link_event.callback = mhi_link_state_cb;
|
||||||
mhi_pcie_dev->mhi_pci_link_event.notify.data = mhi_pcie_dev;
|
mhi_pcie_dev->mhi_pci_link_event.notify.data = mhi_pcie_dev;
|
||||||
ret_val = msm_pcie_register_event(&mhi_pcie_dev->mhi_pci_link_event);
|
ret_val = msm_pcie_register_event(&mhi_pcie_dev->mhi_pci_link_event);
|
||||||
if (ret_val)
|
if (ret_val) {
|
||||||
mhi_log(MHI_MSG_ERROR,
|
mhi_log(MHI_MSG_ERROR,
|
||||||
"Failed to register for link notifications %d.\n",
|
"Failed to register for link notifications %d.\n",
|
||||||
ret_val);
|
ret_val);
|
||||||
|
return ret_val;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Initialize MHI CNTXT */
|
||||||
|
ret_val = mhi_ctxt_init(mhi_pcie_dev);
|
||||||
|
if (ret_val) {
|
||||||
|
mhi_log(MHI_MSG_ERROR,
|
||||||
|
"MHI Initialization failed, ret %d\n",
|
||||||
|
ret_val);
|
||||||
|
goto deregister_pcie;
|
||||||
|
}
|
||||||
|
pci_set_master(mhi_pcie_dev->pcie_device);
|
||||||
|
|
||||||
|
mutex_lock(&mhi_dev_ctxt->pm_lock);
|
||||||
|
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
mhi_dev_ctxt->mhi_pm_state = MHI_PM_POR;
|
||||||
|
ret_val = set_mhi_base_state(mhi_pcie_dev);
|
||||||
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
if (ret_val) {
|
||||||
|
mhi_log(MHI_MSG_ERROR,
|
||||||
|
"Error Setting MHI Base State %d\n", ret_val);
|
||||||
|
goto unlock_pm_lock;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mhi_dev_ctxt->base_state == STATE_TRANSITION_BHI) {
|
||||||
|
ret_val = bhi_probe(mhi_pcie_dev);
|
||||||
|
if (ret_val) {
|
||||||
|
mhi_log(MHI_MSG_ERROR,
|
||||||
|
"Error with bhi_probe ret:%d", ret_val);
|
||||||
|
goto unlock_pm_lock;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
init_mhi_base_state(mhi_dev_ctxt);
|
||||||
|
|
||||||
|
pm_runtime_set_autosuspend_delay(&pcie_device->dev,
|
||||||
|
MHI_RPM_AUTOSUSPEND_TMR_VAL_MS);
|
||||||
|
pm_runtime_use_autosuspend(&pcie_device->dev);
|
||||||
|
pm_suspend_ignore_children(&pcie_device->dev, true);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* pci framework will increment usage count (twice) before
|
||||||
|
* calling local device driver probe function.
|
||||||
|
* 1st pci.c pci_pm_init() calls pm_runtime_forbid
|
||||||
|
* 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync
|
||||||
|
* Framework expect pci device driver to call pm_runtime_put_noidle
|
||||||
|
* to decrement usage count after successful probe and
|
||||||
|
* and call pm_runtime_allow to enable runtime suspend.
|
||||||
|
* MHI will allow runtime after entering AMSS state.
|
||||||
|
*/
|
||||||
|
pm_runtime_mark_last_busy(&pcie_device->dev);
|
||||||
|
pm_runtime_put_noidle(&pcie_device->dev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Keep the MHI state in Active (M0) state until AMSS because EP
|
||||||
|
* would error fatal if we try to enter M1 before entering
|
||||||
|
* AMSS state.
|
||||||
|
*/
|
||||||
|
read_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
mhi_assert_device_wake(mhi_dev_ctxt, false);
|
||||||
|
read_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
|
||||||
|
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
unlock_pm_lock:
|
||||||
|
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
||||||
|
deregister_pcie:
|
||||||
|
msm_pcie_deregister_event(&mhi_pcie_dev->mhi_pci_link_event);
|
||||||
return ret_val;
|
return ret_val;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mhi_plat_probe(struct platform_device *pdev)
|
static int mhi_plat_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
u32 nr_dev = mhi_devices.nr_of_devices;
|
u32 nr_dev = mhi_devices.nr_of_devices;
|
||||||
|
struct mhi_device_ctxt *mhi_dev_ctxt;
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Entered\n");
|
mhi_log(MHI_MSG_INFO, "Entered\n");
|
||||||
|
mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt;
|
||||||
|
|
||||||
|
mhi_dev_ctxt->bus_scale_table = msm_bus_cl_get_pdata(pdev);
|
||||||
|
if (!mhi_dev_ctxt->bus_scale_table)
|
||||||
|
return -ENODATA;
|
||||||
|
mhi_dev_ctxt->bus_client = msm_bus_scale_register_client
|
||||||
|
(mhi_dev_ctxt->bus_scale_table);
|
||||||
|
if (!mhi_dev_ctxt->bus_client)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
mhi_devices.device_list[nr_dev].plat_dev = pdev;
|
mhi_devices.device_list[nr_dev].plat_dev = pdev;
|
||||||
r = dma_set_mask(&pdev->dev, MHI_DMA_MASK);
|
r = dma_set_mask(&pdev->dev, MHI_DMA_MASK);
|
||||||
if (r)
|
if (r)
|
||||||
|
|
|
@ -27,46 +27,21 @@ static int mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
mhi_dev_ctxt->mhi_ev_spinlock_list = kmalloc(sizeof(spinlock_t) *
|
for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
|
||||||
mhi_dev_ctxt->mmio_info.nr_event_rings,
|
struct mhi_ring *ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
|
||||||
GFP_KERNEL);
|
|
||||||
if (NULL == mhi_dev_ctxt->mhi_ev_spinlock_list)
|
mutex_init(&mhi_dev_ctxt->mhi_chan_cfg[i].chan_lock);
|
||||||
goto ev_mutex_free;
|
spin_lock_init(&mhi_dev_ctxt->mhi_chan_cfg[i].event_lock);
|
||||||
mhi_dev_ctxt->mhi_chan_mutex = kmalloc(sizeof(struct mutex) *
|
spin_lock_init(&ring->ring_lock);
|
||||||
MHI_MAX_CHANNELS, GFP_KERNEL);
|
}
|
||||||
if (NULL == mhi_dev_ctxt->mhi_chan_mutex)
|
|
||||||
goto chan_mutex_free;
|
for (i = 0; i < NR_OF_CMD_RINGS; i++) {
|
||||||
mhi_dev_ctxt->mhi_cmd_mutex_list = kmalloc(sizeof(struct mutex) *
|
struct mhi_ring *ring = &mhi_dev_ctxt->mhi_local_cmd_ctxt[i];
|
||||||
NR_OF_CMD_RINGS, GFP_KERNEL);
|
|
||||||
if (NULL == mhi_dev_ctxt->mhi_cmd_mutex_list)
|
spin_lock_init(&ring->ring_lock);
|
||||||
goto cmd_mutex_free;
|
}
|
||||||
|
|
||||||
mhi_dev_ctxt->db_write_lock = kmalloc(sizeof(spinlock_t) *
|
|
||||||
MHI_MAX_CHANNELS, GFP_KERNEL);
|
|
||||||
if (NULL == mhi_dev_ctxt->db_write_lock)
|
|
||||||
goto db_write_lock_free;
|
|
||||||
for (i = 0; i < MHI_MAX_CHANNELS; ++i)
|
|
||||||
mutex_init(&mhi_dev_ctxt->mhi_chan_mutex[i]);
|
|
||||||
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i)
|
|
||||||
spin_lock_init(&mhi_dev_ctxt->mhi_ev_spinlock_list[i]);
|
|
||||||
for (i = 0; i < NR_OF_CMD_RINGS; ++i)
|
|
||||||
mutex_init(&mhi_dev_ctxt->mhi_cmd_mutex_list[i]);
|
|
||||||
for (i = 0; i < MHI_MAX_CHANNELS; ++i)
|
|
||||||
spin_lock_init(&mhi_dev_ctxt->db_write_lock[i]);
|
|
||||||
rwlock_init(&mhi_dev_ctxt->xfer_lock);
|
|
||||||
mutex_init(&mhi_dev_ctxt->mhi_link_state);
|
|
||||||
mutex_init(&mhi_dev_ctxt->pm_lock);
|
|
||||||
atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
db_write_lock_free:
|
|
||||||
kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
|
|
||||||
cmd_mutex_free:
|
|
||||||
kfree(mhi_dev_ctxt->mhi_chan_mutex);
|
|
||||||
chan_mutex_free:
|
|
||||||
kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
|
|
||||||
ev_mutex_free:
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t calculate_mhi_space(struct mhi_device_ctxt *mhi_dev_ctxt)
|
size_t calculate_mhi_space(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
|
@ -115,7 +90,7 @@ void init_dev_chan_ctxt(struct mhi_chan_ctxt *chan_ctxt,
|
||||||
chan_ctxt->mhi_trb_write_ptr = p_base_addr;
|
chan_ctxt->mhi_trb_write_ptr = p_base_addr;
|
||||||
chan_ctxt->mhi_trb_ring_len = len;
|
chan_ctxt->mhi_trb_ring_len = len;
|
||||||
/* Prepulate the channel ctxt */
|
/* Prepulate the channel ctxt */
|
||||||
chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED;
|
chan_ctxt->chstate = MHI_CHAN_STATE_ENABLED;
|
||||||
chan_ctxt->mhi_event_ring_index = ev_index;
|
chan_ctxt->mhi_event_ring_index = ev_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -173,6 +148,8 @@ static int mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt,
|
||||||
ring[PRIMARY_CMD_RING].len = ring_size;
|
ring[PRIMARY_CMD_RING].len = ring_size;
|
||||||
ring[PRIMARY_CMD_RING].el_size = sizeof(union mhi_cmd_pkt);
|
ring[PRIMARY_CMD_RING].el_size = sizeof(union mhi_cmd_pkt);
|
||||||
ring[PRIMARY_CMD_RING].overwrite_en = 0;
|
ring[PRIMARY_CMD_RING].overwrite_en = 0;
|
||||||
|
ring[PRIMARY_CMD_RING].db_mode.process_db =
|
||||||
|
mhi_process_db_brstmode_disable;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -547,7 +524,6 @@ int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
|
||||||
}
|
}
|
||||||
init_event_ctxt_array(mhi_dev_ctxt);
|
init_event_ctxt_array(mhi_dev_ctxt);
|
||||||
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
|
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
|
||||||
mhi_dev_ctxt->enable_lpm = 1;
|
|
||||||
|
|
||||||
r = mhi_spawn_threads(mhi_dev_ctxt);
|
r = mhi_spawn_threads(mhi_dev_ctxt);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
@ -573,9 +549,6 @@ error_wq_init:
|
||||||
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
|
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
|
||||||
error_during_dev_mem_init:
|
error_during_dev_mem_init:
|
||||||
error_during_local_ev_ctxt:
|
error_during_local_ev_ctxt:
|
||||||
kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
|
|
||||||
kfree(mhi_dev_ctxt->mhi_chan_mutex);
|
|
||||||
kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
|
|
||||||
error_during_sync:
|
error_during_sync:
|
||||||
kfree(mhi_dev_ctxt->ev_ring_props);
|
kfree(mhi_dev_ctxt->ev_ring_props);
|
||||||
error_during_props:
|
error_during_props:
|
||||||
|
@ -601,10 +574,12 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
|
||||||
u64 el_per_ring, enum MHI_CHAN_DIR chan_type,
|
u64 el_per_ring, enum MHI_CHAN_DIR chan_type,
|
||||||
u32 event_ring, struct mhi_ring *ring,
|
u32 event_ring, struct mhi_ring *ring,
|
||||||
enum MHI_CHAN_STATE chan_state,
|
enum MHI_CHAN_STATE chan_state,
|
||||||
bool preserve_db_state)
|
bool preserve_db_state,
|
||||||
|
enum MHI_BRSTMODE brstmode)
|
||||||
{
|
{
|
||||||
cc_list->mhi_chan_state = chan_state;
|
cc_list->brstmode = brstmode;
|
||||||
cc_list->mhi_chan_type = chan_type;
|
cc_list->chstate = chan_state;
|
||||||
|
cc_list->chtype = chan_type;
|
||||||
cc_list->mhi_event_ring_index = event_ring;
|
cc_list->mhi_event_ring_index = event_ring;
|
||||||
cc_list->mhi_trb_ring_base_addr = trb_list_phy;
|
cc_list->mhi_trb_ring_base_addr = trb_list_phy;
|
||||||
cc_list->mhi_trb_ring_len =
|
cc_list->mhi_trb_ring_len =
|
||||||
|
@ -621,6 +596,19 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
|
||||||
ring->dir = chan_type;
|
ring->dir = chan_type;
|
||||||
ring->db_mode.db_mode = 1;
|
ring->db_mode.db_mode = 1;
|
||||||
ring->db_mode.preserve_db_state = (preserve_db_state) ? 1 : 0;
|
ring->db_mode.preserve_db_state = (preserve_db_state) ? 1 : 0;
|
||||||
|
ring->db_mode.brstmode = brstmode;
|
||||||
|
|
||||||
|
switch (ring->db_mode.brstmode) {
|
||||||
|
case MHI_BRSTMODE_ENABLE:
|
||||||
|
ring->db_mode.process_db = mhi_process_db_brstmode;
|
||||||
|
break;
|
||||||
|
case MHI_BRSTMODE_DISABLE:
|
||||||
|
ring->db_mode.process_db = mhi_process_db_brstmode_disable;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
ring->db_mode.process_db = mhi_process_db;
|
||||||
|
}
|
||||||
|
|
||||||
/* Flush writes to MMIO */
|
/* Flush writes to MMIO */
|
||||||
wmb();
|
wmb();
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -15,57 +15,6 @@
|
||||||
#include "mhi_sys.h"
|
#include "mhi_sys.h"
|
||||||
#include "mhi_trace.h"
|
#include "mhi_trace.h"
|
||||||
|
|
||||||
irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
|
|
||||||
{
|
|
||||||
struct device *mhi_device = dev_id;
|
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
|
|
||||||
|
|
||||||
if (!mhi_dev_ctxt) {
|
|
||||||
mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n");
|
|
||||||
return IRQ_HANDLED;
|
|
||||||
}
|
|
||||||
mhi_dev_ctxt->counters.msi_counter[
|
|
||||||
IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++;
|
|
||||||
mhi_log(MHI_MSG_VERBOSE,
|
|
||||||
"Got MSI 0x%x\n", IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
|
|
||||||
trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
|
|
||||||
atomic_inc(&mhi_dev_ctxt->flags.events_pending);
|
|
||||||
wake_up_interruptible(
|
|
||||||
mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
|
|
||||||
return IRQ_HANDLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id)
|
|
||||||
{
|
|
||||||
struct device *mhi_device = dev_id;
|
|
||||||
u32 client_index;
|
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
|
|
||||||
struct mhi_client_handle *client_handle;
|
|
||||||
struct mhi_client_info_t *client_info;
|
|
||||||
struct mhi_cb_info cb_info;
|
|
||||||
int msi_num = (IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
|
|
||||||
|
|
||||||
mhi_dev_ctxt->counters.msi_counter[msi_num]++;
|
|
||||||
mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi_num);
|
|
||||||
trace_mhi_msi(msi_num);
|
|
||||||
client_index = MHI_MAX_CHANNELS -
|
|
||||||
(mhi_dev_ctxt->mmio_info.nr_event_rings - msi_num);
|
|
||||||
client_handle = mhi_dev_ctxt->client_handle_list[client_index];
|
|
||||||
client_info = &client_handle->client_info;
|
|
||||||
if (likely(NULL != client_handle)) {
|
|
||||||
client_handle->result.user_data =
|
|
||||||
client_handle->user_data;
|
|
||||||
if (likely(NULL != &client_info->mhi_client_cb)) {
|
|
||||||
cb_info.result = &client_handle->result;
|
|
||||||
cb_info.cb_reason = MHI_CB_XFER;
|
|
||||||
cb_info.chan = client_handle->chan_info.chan_nr;
|
|
||||||
cb_info.result->transaction_status = 0;
|
|
||||||
client_info->mhi_client_cb(&cb_info);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return IRQ_HANDLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mhi_process_event_ring(
|
static int mhi_process_event_ring(
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt,
|
struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
u32 ev_index,
|
u32 ev_index,
|
||||||
|
@ -76,12 +25,17 @@ static int mhi_process_event_ring(
|
||||||
union mhi_event_pkt event_to_process;
|
union mhi_event_pkt event_to_process;
|
||||||
int ret_val = 0;
|
int ret_val = 0;
|
||||||
struct mhi_event_ctxt *ev_ctxt = NULL;
|
struct mhi_event_ctxt *ev_ctxt = NULL;
|
||||||
union mhi_cmd_pkt *cmd_pkt = NULL;
|
|
||||||
union mhi_event_pkt *ev_ptr = NULL;
|
|
||||||
struct mhi_ring *local_ev_ctxt =
|
struct mhi_ring *local_ev_ctxt =
|
||||||
&mhi_dev_ctxt->mhi_local_event_ctxt[ev_index];
|
&mhi_dev_ctxt->mhi_local_event_ctxt[ev_index];
|
||||||
u32 event_code;
|
|
||||||
|
|
||||||
|
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) {
|
||||||
|
mhi_log(MHI_MSG_ERROR, "Invalid MHI PM State\n");
|
||||||
|
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
mhi_assert_device_wake(mhi_dev_ctxt, false);
|
||||||
|
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[ev_index];
|
ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[ev_index];
|
||||||
|
|
||||||
device_rp = (union mhi_event_pkt *)mhi_p2v_addr(
|
device_rp = (union mhi_event_pkt *)mhi_p2v_addr(
|
||||||
|
@ -96,59 +50,84 @@ static int mhi_process_event_ring(
|
||||||
|
|
||||||
while ((local_rp != device_rp) && (event_quota > 0) &&
|
while ((local_rp != device_rp) && (event_quota > 0) &&
|
||||||
(device_rp != NULL) && (local_rp != NULL)) {
|
(device_rp != NULL) && (local_rp != NULL)) {
|
||||||
|
|
||||||
event_to_process = *local_rp;
|
event_to_process = *local_rp;
|
||||||
ev_ptr = &event_to_process;
|
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
event_code = get_cmd_pkt(mhi_dev_ctxt,
|
recycle_trb_and_ring(mhi_dev_ctxt,
|
||||||
ev_ptr, &cmd_pkt, ev_index);
|
local_ev_ctxt,
|
||||||
if (((MHI_TRB_READ_INFO(EV_TRB_TYPE, (&event_to_process)) ==
|
MHI_RING_TYPE_EVENT_RING,
|
||||||
MHI_PKT_TYPE_CMD_COMPLETION_EVENT)) &&
|
ev_index);
|
||||||
(event_code == MHI_EVENT_CC_SUCCESS)) {
|
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
mhi_log(MHI_MSG_INFO, "Command Completion event\n");
|
|
||||||
if ((MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt) ==
|
switch (MHI_TRB_READ_INFO(EV_TRB_TYPE, &event_to_process)) {
|
||||||
MHI_PKT_TYPE_RESET_CHAN_CMD)) {
|
|
||||||
mhi_log(MHI_MSG_INFO, "First Reset CC event\n");
|
|
||||||
MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt,
|
|
||||||
MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD);
|
|
||||||
ret_val = -EINPROGRESS;
|
|
||||||
break;
|
|
||||||
} else if ((MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt)
|
|
||||||
== MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD)) {
|
|
||||||
MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt,
|
|
||||||
MHI_PKT_TYPE_RESET_CHAN_CMD);
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"Processing Reset CC event\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (unlikely(0 != recycle_trb_and_ring(mhi_dev_ctxt,
|
|
||||||
local_ev_ctxt,
|
|
||||||
MHI_RING_TYPE_EVENT_RING,
|
|
||||||
ev_index)))
|
|
||||||
mhi_log(MHI_MSG_ERROR, "Failed to recycle ev pkt\n");
|
|
||||||
switch (MHI_TRB_READ_INFO(EV_TRB_TYPE, (&event_to_process))) {
|
|
||||||
case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
|
case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
|
||||||
mhi_log(MHI_MSG_INFO,
|
{
|
||||||
"MHI CCE received ring 0x%x\n",
|
union mhi_cmd_pkt *cmd_pkt;
|
||||||
ev_index);
|
u32 chan;
|
||||||
|
struct mhi_chan_cfg *cfg;
|
||||||
|
unsigned long flags;
|
||||||
|
struct mhi_ring *cmd_ring = &mhi_dev_ctxt->
|
||||||
|
mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
|
||||||
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
|
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
|
||||||
__pm_relax(&mhi_dev_ctxt->w_lock);
|
__pm_relax(&mhi_dev_ctxt->w_lock);
|
||||||
ret_val = parse_cmd_event(mhi_dev_ctxt,
|
get_cmd_pkt(mhi_dev_ctxt,
|
||||||
&event_to_process, ev_index);
|
&event_to_process,
|
||||||
|
&cmd_pkt, ev_index);
|
||||||
|
MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
|
||||||
|
cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
|
||||||
|
mhi_log(MHI_MSG_INFO,
|
||||||
|
"MHI CCE received ring 0x%x chan:%u\n",
|
||||||
|
ev_index,
|
||||||
|
chan);
|
||||||
|
spin_lock_irqsave(&cfg->event_lock, flags);
|
||||||
|
cfg->cmd_pkt = *cmd_pkt;
|
||||||
|
cfg->cmd_event_pkt =
|
||||||
|
event_to_process.cmd_complete_event_pkt;
|
||||||
|
complete(&cfg->cmd_complete);
|
||||||
|
spin_unlock_irqrestore(&cfg->event_lock, flags);
|
||||||
|
spin_lock_irqsave(&cmd_ring->ring_lock,
|
||||||
|
flags);
|
||||||
|
ctxt_del_element(cmd_ring, NULL);
|
||||||
|
spin_unlock_irqrestore(&cmd_ring->ring_lock,
|
||||||
|
flags);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
case MHI_PKT_TYPE_TX_EVENT:
|
case MHI_PKT_TYPE_TX_EVENT:
|
||||||
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
|
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
|
||||||
parse_xfer_event(mhi_dev_ctxt,
|
parse_xfer_event(mhi_dev_ctxt,
|
||||||
&event_to_process, ev_index);
|
&event_to_process,
|
||||||
|
ev_index);
|
||||||
__pm_relax(&mhi_dev_ctxt->w_lock);
|
__pm_relax(&mhi_dev_ctxt->w_lock);
|
||||||
break;
|
break;
|
||||||
case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
|
case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
|
||||||
{
|
{
|
||||||
enum STATE_TRANSITION new_state;
|
enum STATE_TRANSITION new_state;
|
||||||
|
unsigned long flags;
|
||||||
new_state = MHI_READ_STATE(&event_to_process);
|
new_state = MHI_READ_STATE(&event_to_process);
|
||||||
mhi_log(MHI_MSG_INFO,
|
mhi_log(MHI_MSG_INFO,
|
||||||
"MHI STE received ring 0x%x\n",
|
"MHI STE received ring 0x%x State:%s\n",
|
||||||
ev_index);
|
ev_index,
|
||||||
mhi_init_state_transition(mhi_dev_ctxt, new_state);
|
state_transition_str(new_state));
|
||||||
|
|
||||||
|
/* If transitioning to M1 schedule worker thread */
|
||||||
|
if (new_state == STATE_TRANSITION_M1) {
|
||||||
|
write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock,
|
||||||
|
flags);
|
||||||
|
mhi_dev_ctxt->mhi_state =
|
||||||
|
mhi_get_m_state(mhi_dev_ctxt);
|
||||||
|
if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
|
||||||
|
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1;
|
||||||
|
mhi_dev_ctxt->counters.m0_m1++;
|
||||||
|
schedule_work(&mhi_dev_ctxt->
|
||||||
|
process_m1_worker);
|
||||||
|
}
|
||||||
|
write_unlock_irqrestore(&mhi_dev_ctxt->
|
||||||
|
pm_xfer_lock,
|
||||||
|
flags);
|
||||||
|
} else {
|
||||||
|
mhi_init_state_transition(mhi_dev_ctxt,
|
||||||
|
new_state);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MHI_PKT_TYPE_EE_EVENT:
|
case MHI_PKT_TYPE_EE_EVENT:
|
||||||
|
@ -178,10 +157,7 @@ static int mhi_process_event_ring(
|
||||||
mhi_log(MHI_MSG_INFO,
|
mhi_log(MHI_MSG_INFO,
|
||||||
"MHI System Error Detected. Triggering Reset\n");
|
"MHI System Error Detected. Triggering Reset\n");
|
||||||
BUG();
|
BUG();
|
||||||
if (!mhi_trigger_reset(mhi_dev_ctxt))
|
break;
|
||||||
mhi_log(MHI_MSG_ERROR,
|
|
||||||
"Failed to reset for SYSERR recovery\n");
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
mhi_log(MHI_MSG_ERROR,
|
mhi_log(MHI_MSG_ERROR,
|
||||||
"Unsupported packet type code 0x%x\n",
|
"Unsupported packet type code 0x%x\n",
|
||||||
|
@ -198,6 +174,9 @@ static int mhi_process_event_ring(
|
||||||
ret_val = 0;
|
ret_val = 0;
|
||||||
--event_quota;
|
--event_quota;
|
||||||
}
|
}
|
||||||
|
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
mhi_deassert_device_wake(mhi_dev_ctxt);
|
||||||
|
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
return ret_val;
|
return ret_val;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -207,7 +186,7 @@ int parse_event_thread(void *ctxt)
|
||||||
u32 i = 0;
|
u32 i = 0;
|
||||||
int ret_val = 0;
|
int ret_val = 0;
|
||||||
int ret_val_process_event = 0;
|
int ret_val_process_event = 0;
|
||||||
atomic_t *ev_pen_ptr = &mhi_dev_ctxt->flags.events_pending;
|
atomic_t *ev_pen_ptr = &mhi_dev_ctxt->counters.events_pending;
|
||||||
|
|
||||||
/* Go through all event rings */
|
/* Go through all event rings */
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
@ -215,7 +194,7 @@ int parse_event_thread(void *ctxt)
|
||||||
wait_event_interruptible(
|
wait_event_interruptible(
|
||||||
*mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq,
|
*mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq,
|
||||||
((atomic_read(
|
((atomic_read(
|
||||||
&mhi_dev_ctxt->flags.events_pending) > 0) &&
|
&mhi_dev_ctxt->counters.events_pending) > 0) &&
|
||||||
!mhi_dev_ctxt->flags.stop_threads) ||
|
!mhi_dev_ctxt->flags.stop_threads) ||
|
||||||
mhi_dev_ctxt->flags.kill_threads ||
|
mhi_dev_ctxt->flags.kill_threads ||
|
||||||
(mhi_dev_ctxt->flags.stop_threads &&
|
(mhi_dev_ctxt->flags.stop_threads &&
|
||||||
|
@ -237,27 +216,45 @@ int parse_event_thread(void *ctxt)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
mhi_dev_ctxt->flags.ev_thread_stopped = 0;
|
mhi_dev_ctxt->flags.ev_thread_stopped = 0;
|
||||||
atomic_dec(&mhi_dev_ctxt->flags.events_pending);
|
atomic_dec(&mhi_dev_ctxt->counters.events_pending);
|
||||||
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
|
for (i = 1; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
|
||||||
if (mhi_dev_ctxt->mhi_state == MHI_STATE_SYS_ERR) {
|
if (mhi_dev_ctxt->mhi_state == MHI_STATE_SYS_ERR) {
|
||||||
mhi_log(MHI_MSG_INFO,
|
mhi_log(MHI_MSG_INFO,
|
||||||
"SYS_ERR detected, not processing events\n");
|
"SYS_ERR detected, not processing events\n");
|
||||||
atomic_set(&mhi_dev_ctxt->flags.events_pending,
|
atomic_set(&mhi_dev_ctxt->
|
||||||
|
counters.events_pending,
|
||||||
0);
|
0);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (GET_EV_PROPS(EV_MANAGED,
|
if (GET_EV_PROPS(EV_MANAGED,
|
||||||
mhi_dev_ctxt->ev_ring_props[i].flags)){
|
mhi_dev_ctxt->ev_ring_props[i].flags)) {
|
||||||
ret_val_process_event =
|
ret_val_process_event =
|
||||||
mhi_process_event_ring(mhi_dev_ctxt, i,
|
mhi_process_event_ring(mhi_dev_ctxt,
|
||||||
mhi_dev_ctxt->ev_ring_props[i].nr_desc);
|
i,
|
||||||
if (ret_val_process_event ==
|
mhi_dev_ctxt->
|
||||||
-EINPROGRESS)
|
ev_ring_props[i].nr_desc);
|
||||||
|
if (ret_val_process_event == -EINPROGRESS)
|
||||||
atomic_inc(ev_pen_ptr);
|
atomic_inc(ev_pen_ptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ret_val;
|
}
|
||||||
|
|
||||||
|
void mhi_ctrl_ev_task(unsigned long data)
|
||||||
|
{
|
||||||
|
struct mhi_device_ctxt *mhi_dev_ctxt =
|
||||||
|
(struct mhi_device_ctxt *)data;
|
||||||
|
const unsigned CTRL_EV_RING = 0;
|
||||||
|
struct mhi_event_ring_cfg *ring_props =
|
||||||
|
&mhi_dev_ctxt->ev_ring_props[CTRL_EV_RING];
|
||||||
|
|
||||||
|
mhi_log(MHI_MSG_VERBOSE, "Enter\n");
|
||||||
|
/* Process control event ring */
|
||||||
|
mhi_process_event_ring(mhi_dev_ctxt,
|
||||||
|
CTRL_EV_RING,
|
||||||
|
ring_props->nr_desc);
|
||||||
|
enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, CTRL_EV_RING));
|
||||||
|
mhi_log(MHI_MSG_VERBOSE, "Exit\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
|
struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
|
||||||
|
@ -268,8 +265,8 @@ struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
|
||||||
client_handle->result.bytes_xferd = 0;
|
client_handle->result.bytes_xferd = 0;
|
||||||
client_handle->result.transaction_status = 0;
|
client_handle->result.transaction_status = 0;
|
||||||
ret_val = mhi_process_event_ring(client_handle->mhi_dev_ctxt,
|
ret_val = mhi_process_event_ring(client_handle->mhi_dev_ctxt,
|
||||||
client_handle->event_ring_index,
|
client_handle->event_ring_index,
|
||||||
1);
|
1);
|
||||||
if (ret_val)
|
if (ret_val)
|
||||||
mhi_log(MHI_MSG_INFO, "NAPI failed to process event ring\n");
|
mhi_log(MHI_MSG_INFO, "NAPI failed to process event ring\n");
|
||||||
return &(client_handle->result);
|
return &(client_handle->result);
|
||||||
|
@ -296,3 +293,60 @@ void mhi_unmask_irq(struct mhi_client_handle *client_handle)
|
||||||
ev_ring->msi_enable_cntr++;
|
ev_ring->msi_enable_cntr++;
|
||||||
enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
|
enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
|
||||||
|
{
|
||||||
|
struct device *mhi_device = dev_id;
|
||||||
|
struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
|
||||||
|
int msi = IRQ_TO_MSI(mhi_dev_ctxt, irq_number);
|
||||||
|
|
||||||
|
if (!mhi_dev_ctxt) {
|
||||||
|
mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n");
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
}
|
||||||
|
mhi_dev_ctxt->counters.msi_counter[
|
||||||
|
IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++;
|
||||||
|
mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi);
|
||||||
|
trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
|
||||||
|
|
||||||
|
if (msi) {
|
||||||
|
atomic_inc(&mhi_dev_ctxt->counters.events_pending);
|
||||||
|
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
|
||||||
|
} else {
|
||||||
|
disable_irq_nosync(irq_number);
|
||||||
|
tasklet_schedule(&mhi_dev_ctxt->ev_task);
|
||||||
|
}
|
||||||
|
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
}
|
||||||
|
|
||||||
|
irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id)
|
||||||
|
{
|
||||||
|
struct device *mhi_device = dev_id;
|
||||||
|
u32 client_index;
|
||||||
|
struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
|
||||||
|
struct mhi_client_handle *client_handle;
|
||||||
|
struct mhi_client_info_t *client_info;
|
||||||
|
struct mhi_cb_info cb_info;
|
||||||
|
int msi_num = (IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
|
||||||
|
|
||||||
|
mhi_dev_ctxt->counters.msi_counter[msi_num]++;
|
||||||
|
mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi_num);
|
||||||
|
trace_mhi_msi(msi_num);
|
||||||
|
client_index = MHI_MAX_CHANNELS -
|
||||||
|
(mhi_dev_ctxt->mmio_info.nr_event_rings - msi_num);
|
||||||
|
client_handle = mhi_dev_ctxt->client_handle_list[client_index];
|
||||||
|
client_info = &client_handle->client_info;
|
||||||
|
if (likely(client_handle)) {
|
||||||
|
client_handle->result.user_data =
|
||||||
|
client_handle->user_data;
|
||||||
|
if (likely(client_info->mhi_client_cb)) {
|
||||||
|
cb_info.result = &client_handle->result;
|
||||||
|
cb_info.cb_reason = MHI_CB_XFER;
|
||||||
|
cb_info.chan = client_handle->chan_info.chan_nr;
|
||||||
|
cb_info.result->transaction_status = 0;
|
||||||
|
client_info->mhi_client_cb(&cb_info);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
}
|
||||||
|
|
|
@ -247,9 +247,17 @@
|
||||||
#define MHI_PRESERVE_DB_STATE__MASK (1)
|
#define MHI_PRESERVE_DB_STATE__MASK (1)
|
||||||
#define MHI_PRESERVE_DB_STATE__SHIFT (8)
|
#define MHI_PRESERVE_DB_STATE__SHIFT (8)
|
||||||
|
|
||||||
|
#define BRSTMODE
|
||||||
|
#define MHI_BRSTMODE__MASK (3)
|
||||||
|
#define MHI_BRSTMODE__SHIFT (9)
|
||||||
|
|
||||||
#define GET_CHAN_PROPS(_FIELD, _VAL) \
|
#define GET_CHAN_PROPS(_FIELD, _VAL) \
|
||||||
(((_VAL) >> MHI_##_FIELD ## __SHIFT) & MHI_##_FIELD ## __MASK)
|
(((_VAL) >> MHI_##_FIELD ## __SHIFT) & MHI_##_FIELD ## __MASK)
|
||||||
|
|
||||||
|
#define EV_BRSTMODE
|
||||||
|
#define MHI_EV_BRSTMODE__MASK (3)
|
||||||
|
#define MHI_EV_BRSTMODE__SHIFT (5)
|
||||||
|
|
||||||
#define EV_TYPE
|
#define EV_TYPE
|
||||||
#define MHI_EV_TYPE__MASK (3)
|
#define MHI_EV_TYPE__MASK (3)
|
||||||
#define MHI_EV_TYPE__SHIFT (3)
|
#define MHI_EV_TYPE__SHIFT (3)
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -9,6 +9,19 @@
|
||||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
* GNU General Public License for more details.
|
* GNU General Public License for more details.
|
||||||
*/
|
*/
|
||||||
|
#include <linux/completion.h>
|
||||||
|
#include <linux/of_device.h>
|
||||||
|
#include <linux/of_platform.h>
|
||||||
|
#include <linux/of_gpio.h>
|
||||||
|
#include <linux/gpio.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/msm-bus.h>
|
||||||
|
#include <linux/cpu.h>
|
||||||
|
#include <linux/kthread.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/completion.h>
|
||||||
|
#include <linux/pm_runtime.h>
|
||||||
#include "mhi_sys.h"
|
#include "mhi_sys.h"
|
||||||
#include "mhi_hwio.h"
|
#include "mhi_hwio.h"
|
||||||
#include "mhi.h"
|
#include "mhi.h"
|
||||||
|
@ -17,25 +30,40 @@ int mhi_test_for_device_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
{
|
{
|
||||||
u32 pcie_word_val = 0;
|
u32 pcie_word_val = 0;
|
||||||
u32 expiry_counter;
|
u32 expiry_counter;
|
||||||
|
unsigned long flags;
|
||||||
|
rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Waiting for MMIO RESET bit to be cleared.\n");
|
mhi_log(MHI_MSG_INFO, "Waiting for MMIO RESET bit to be cleared.\n");
|
||||||
|
read_lock_irqsave(pm_xfer_lock, flags);
|
||||||
|
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
|
||||||
|
read_unlock_irqrestore(pm_xfer_lock, flags);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
|
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
|
||||||
MHISTATUS);
|
MHISTATUS);
|
||||||
MHI_READ_FIELD(pcie_word_val,
|
MHI_READ_FIELD(pcie_word_val,
|
||||||
MHICTRL_RESET_MASK,
|
MHICTRL_RESET_MASK,
|
||||||
MHICTRL_RESET_SHIFT);
|
MHICTRL_RESET_SHIFT);
|
||||||
|
read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
|
||||||
if (pcie_word_val == 0xFFFFFFFF)
|
if (pcie_word_val == 0xFFFFFFFF)
|
||||||
return -ENOTCONN;
|
return -ENOTCONN;
|
||||||
|
|
||||||
while (MHI_STATE_RESET != pcie_word_val && expiry_counter < 100) {
|
while (MHI_STATE_RESET != pcie_word_val && expiry_counter < 100) {
|
||||||
expiry_counter++;
|
expiry_counter++;
|
||||||
mhi_log(MHI_MSG_ERROR,
|
mhi_log(MHI_MSG_ERROR,
|
||||||
"Device is not RESET, sleeping and retrying.\n");
|
"Device is not RESET, sleeping and retrying.\n");
|
||||||
msleep(MHI_READY_STATUS_TIMEOUT_MS);
|
msleep(MHI_READY_STATUS_TIMEOUT_MS);
|
||||||
|
read_lock_irqsave(pm_xfer_lock, flags);
|
||||||
|
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
|
||||||
|
read_unlock_irqrestore(pm_xfer_lock, flags);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
|
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
|
||||||
MHICTRL);
|
MHICTRL);
|
||||||
MHI_READ_FIELD(pcie_word_val,
|
MHI_READ_FIELD(pcie_word_val,
|
||||||
MHICTRL_RESET_MASK,
|
MHICTRL_RESET_MASK,
|
||||||
MHICTRL_RESET_SHIFT);
|
MHICTRL_RESET_SHIFT);
|
||||||
|
read_unlock_irqrestore(pm_xfer_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (MHI_STATE_READY != pcie_word_val)
|
if (MHI_STATE_READY != pcie_word_val)
|
||||||
|
@ -47,15 +75,23 @@ int mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
{
|
{
|
||||||
u32 pcie_word_val = 0;
|
u32 pcie_word_val = 0;
|
||||||
u32 expiry_counter;
|
u32 expiry_counter;
|
||||||
|
unsigned long flags;
|
||||||
|
rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Waiting for MMIO Ready bit to be set\n");
|
mhi_log(MHI_MSG_INFO, "Waiting for MMIO Ready bit to be set\n");
|
||||||
|
|
||||||
|
read_lock_irqsave(pm_xfer_lock, flags);
|
||||||
|
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
|
||||||
|
read_unlock_irqrestore(pm_xfer_lock, flags);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
/* Read MMIO and poll for READY bit to be set */
|
/* Read MMIO and poll for READY bit to be set */
|
||||||
pcie_word_val = mhi_reg_read(
|
pcie_word_val = mhi_reg_read(
|
||||||
mhi_dev_ctxt->mmio_info.mmio_addr, MHISTATUS);
|
mhi_dev_ctxt->mmio_info.mmio_addr, MHISTATUS);
|
||||||
MHI_READ_FIELD(pcie_word_val,
|
MHI_READ_FIELD(pcie_word_val,
|
||||||
MHISTATUS_READY_MASK,
|
MHISTATUS_READY_MASK,
|
||||||
MHISTATUS_READY_SHIFT);
|
MHISTATUS_READY_SHIFT);
|
||||||
|
read_unlock_irqrestore(pm_xfer_lock, flags);
|
||||||
|
|
||||||
if (pcie_word_val == 0xFFFFFFFF)
|
if (pcie_word_val == 0xFFFFFFFF)
|
||||||
return -ENOTCONN;
|
return -ENOTCONN;
|
||||||
|
@ -65,10 +101,16 @@ int mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
mhi_log(MHI_MSG_ERROR,
|
mhi_log(MHI_MSG_ERROR,
|
||||||
"Device is not ready, sleeping and retrying.\n");
|
"Device is not ready, sleeping and retrying.\n");
|
||||||
msleep(MHI_READY_STATUS_TIMEOUT_MS);
|
msleep(MHI_READY_STATUS_TIMEOUT_MS);
|
||||||
|
read_lock_irqsave(pm_xfer_lock, flags);
|
||||||
|
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
|
||||||
|
read_unlock_irqrestore(pm_xfer_lock, flags);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
|
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
|
||||||
MHISTATUS);
|
MHISTATUS);
|
||||||
MHI_READ_FIELD(pcie_word_val,
|
MHI_READ_FIELD(pcie_word_val,
|
||||||
MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT);
|
MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT);
|
||||||
|
read_unlock_irqrestore(pm_xfer_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pcie_word_val != MHI_STATE_READY)
|
if (pcie_word_val != MHI_STATE_READY)
|
||||||
|
@ -102,21 +144,20 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
mhi_dev_ctxt->dev_props->mhi_ver = mhi_reg_read(
|
mhi_dev_ctxt->dev_props->mhi_ver = mhi_reg_read(
|
||||||
mhi_dev_ctxt->mmio_info.mmio_addr, MHIVER);
|
mhi_dev_ctxt->mmio_info.mmio_addr, MHIVER);
|
||||||
if (MHI_VERSION != mhi_dev_ctxt->dev_props->mhi_ver) {
|
if (MHI_VERSION != mhi_dev_ctxt->dev_props->mhi_ver) {
|
||||||
mhi_log(MHI_MSG_CRITICAL, "Bad MMIO version, 0x%x\n",
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
mhi_dev_ctxt->dev_props->mhi_ver);
|
"Bad MMIO version, 0x%x\n",
|
||||||
if (mhi_dev_ctxt->dev_props->mhi_ver == 0xFFFFFFFF)
|
mhi_dev_ctxt->dev_props->mhi_ver);
|
||||||
ret_val = mhi_wait_for_mdm(mhi_dev_ctxt);
|
|
||||||
if (ret_val)
|
|
||||||
return ret_val;
|
return ret_val;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Enable the channels */
|
/* Enable the channels */
|
||||||
for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
|
for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
|
||||||
struct mhi_chan_ctxt *chan_ctxt =
|
struct mhi_chan_ctxt *chan_ctxt =
|
||||||
&mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i];
|
&mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i];
|
||||||
if (VALID_CHAN_NR(i))
|
if (VALID_CHAN_NR(i))
|
||||||
chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED;
|
chan_ctxt->chstate = MHI_CHAN_STATE_ENABLED;
|
||||||
else
|
else
|
||||||
chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_DISABLED;
|
chan_ctxt->chstate = MHI_CHAN_STATE_DISABLED;
|
||||||
}
|
}
|
||||||
mhi_log(MHI_MSG_INFO,
|
mhi_log(MHI_MSG_INFO,
|
||||||
"Read back MMIO Ready bit successfully. Moving on..\n");
|
"Read back MMIO Ready bit successfully. Moving on..\n");
|
||||||
|
|
|
@ -22,16 +22,12 @@
|
||||||
#include "mhi_hwio.h"
|
#include "mhi_hwio.h"
|
||||||
|
|
||||||
/* Write only sysfs attributes */
|
/* Write only sysfs attributes */
|
||||||
static DEVICE_ATTR(MHI_M3, S_IWUSR, NULL, sysfs_init_m3);
|
|
||||||
static DEVICE_ATTR(MHI_M0, S_IWUSR, NULL, sysfs_init_m0);
|
static DEVICE_ATTR(MHI_M0, S_IWUSR, NULL, sysfs_init_m0);
|
||||||
static DEVICE_ATTR(MHI_RESET, S_IWUSR, NULL, sysfs_init_mhi_reset);
|
|
||||||
|
|
||||||
/* Read only sysfs attributes */
|
/* Read only sysfs attributes */
|
||||||
|
|
||||||
static struct attribute *mhi_attributes[] = {
|
static struct attribute *mhi_attributes[] = {
|
||||||
&dev_attr_MHI_M3.attr,
|
|
||||||
&dev_attr_MHI_M0.attr,
|
&dev_attr_MHI_M0.attr,
|
||||||
&dev_attr_MHI_RESET.attr,
|
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -42,21 +38,20 @@ static struct attribute_group mhi_attribute_group = {
|
||||||
int mhi_pci_suspend(struct device *dev)
|
int mhi_pci_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
int r = 0;
|
int r = 0;
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
|
|
||||||
|
|
||||||
if (NULL == mhi_dev_ctxt)
|
mhi_log(MHI_MSG_INFO, "Entered\n");
|
||||||
return -EINVAL;
|
|
||||||
mhi_log(MHI_MSG_INFO, "Entered, MHI state %s\n",
|
|
||||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
|
||||||
atomic_set(&mhi_dev_ctxt->flags.pending_resume, 1);
|
|
||||||
|
|
||||||
r = mhi_initiate_m3(mhi_dev_ctxt);
|
/* if rpm status still active then force suspend */
|
||||||
|
if (!pm_runtime_status_suspended(dev)) {
|
||||||
|
r = mhi_runtime_suspend(dev);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
if (!r)
|
pm_runtime_set_suspended(dev);
|
||||||
return r;
|
pm_runtime_disable(dev);
|
||||||
|
|
||||||
atomic_set(&mhi_dev_ctxt->flags.pending_resume, 0);
|
mhi_log(MHI_MSG_INFO, "Exit\n");
|
||||||
mhi_log(MHI_MSG_INFO, "Exited, ret %d\n", r);
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,62 +60,150 @@ int mhi_runtime_suspend(struct device *dev)
|
||||||
int r = 0;
|
int r = 0;
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
|
struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Entered\n");
|
mutex_lock(&mhi_dev_ctxt->pm_lock);
|
||||||
r = mhi_initiate_m3(mhi_dev_ctxt);
|
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
if (r)
|
|
||||||
mhi_log(MHI_MSG_ERROR, "Init M3 failed ret %d\n", r);
|
|
||||||
|
|
||||||
pm_runtime_mark_last_busy(dev);
|
mhi_log(MHI_MSG_INFO, "Entered with State:0x%x %s\n",
|
||||||
|
mhi_dev_ctxt->mhi_pm_state,
|
||||||
|
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
||||||
|
|
||||||
|
/* Link is already disabled */
|
||||||
|
if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE ||
|
||||||
|
mhi_dev_ctxt->mhi_pm_state == MHI_PM_M3) {
|
||||||
|
mhi_log(MHI_MSG_INFO, "Already in active state, exiting\n");
|
||||||
|
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) {
|
||||||
|
mhi_log(MHI_MSG_INFO, "Busy, Aborting Runtime Suspend\n");
|
||||||
|
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
mhi_assert_device_wake(mhi_dev_ctxt, false);
|
||||||
|
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
|
||||||
|
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
|
||||||
|
mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
|
||||||
|
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
|
||||||
|
if (!r) {
|
||||||
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
|
"Failed to get M0||M1 event, timeout, current state:%s\n",
|
||||||
|
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
||||||
|
r = -EIO;
|
||||||
|
goto rpm_suspend_exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
mhi_log(MHI_MSG_INFO, "Allowing M3 State\n");
|
||||||
|
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
mhi_deassert_device_wake(mhi_dev_ctxt);
|
||||||
|
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_ENTER;
|
||||||
|
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
|
||||||
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
mhi_log(MHI_MSG_INFO,
|
||||||
|
"Waiting for M3 completion.\n");
|
||||||
|
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
|
||||||
|
mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
|
||||||
|
msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
|
||||||
|
if (!r) {
|
||||||
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
|
"Failed to get M3 event, timeout, current state:%s\n",
|
||||||
|
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
||||||
|
r = -EIO;
|
||||||
|
goto rpm_suspend_exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = mhi_turn_off_pcie_link(mhi_dev_ctxt);
|
||||||
|
if (r) {
|
||||||
|
mhi_log(MHI_MSG_ERROR,
|
||||||
|
"Failed to Turn off link ret:%d\n", r);
|
||||||
|
}
|
||||||
|
|
||||||
|
rpm_suspend_exit:
|
||||||
mhi_log(MHI_MSG_INFO, "Exited\n");
|
mhi_log(MHI_MSG_INFO, "Exited\n");
|
||||||
|
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int mhi_runtime_idle(struct device *dev)
|
||||||
|
{
|
||||||
|
mhi_log(MHI_MSG_INFO, "Entered returning -EBUSY\n");
|
||||||
|
|
||||||
|
/*
|
||||||
|
* RPM framework during runtime resume always calls
|
||||||
|
* rpm_idle to see if device ready to suspend.
|
||||||
|
* If dev.power usage_count count is 0, rpm fw will call
|
||||||
|
* rpm_idle cb to see if device is ready to suspend.
|
||||||
|
* if cb return 0, or cb not defined the framework will
|
||||||
|
* assume device driver is ready to suspend;
|
||||||
|
* therefore, fw will schedule runtime suspend.
|
||||||
|
* In MHI power management, MHI host shall go to
|
||||||
|
* runtime suspend only after entering MHI State M2, even if
|
||||||
|
* usage count is 0. Return -EBUSY to disable automatic suspend.
|
||||||
|
*/
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
int mhi_runtime_resume(struct device *dev)
|
int mhi_runtime_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
int r = 0;
|
int r = 0;
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
|
struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Entered\n");
|
mutex_lock(&mhi_dev_ctxt->pm_lock);
|
||||||
r = mhi_initiate_m0(mhi_dev_ctxt);
|
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
if (r)
|
WARN_ON(mhi_dev_ctxt->mhi_pm_state != MHI_PM_M3);
|
||||||
mhi_log(MHI_MSG_ERROR, "Init M0 failed ret %d\n", r);
|
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
pm_runtime_mark_last_busy(dev);
|
|
||||||
mhi_log(MHI_MSG_INFO, "Exited\n");
|
/* turn on link */
|
||||||
|
r = mhi_turn_on_pcie_link(mhi_dev_ctxt);
|
||||||
|
if (r) {
|
||||||
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
|
"Failed to resume link\n");
|
||||||
|
goto rpm_resume_exit;
|
||||||
|
}
|
||||||
|
|
||||||
|
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_EXIT;
|
||||||
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
|
||||||
|
/* Set and wait for M0 Event */
|
||||||
|
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
|
||||||
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
|
||||||
|
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
|
||||||
|
mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
|
||||||
|
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
|
||||||
|
if (!r) {
|
||||||
|
mhi_log(MHI_MSG_ERROR,
|
||||||
|
"Failed to get M0 event, timeout\n");
|
||||||
|
r = -EIO;
|
||||||
|
goto rpm_resume_exit;
|
||||||
|
}
|
||||||
|
r = 0; /* no errors */
|
||||||
|
|
||||||
|
rpm_resume_exit:
|
||||||
|
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
||||||
|
mhi_log(MHI_MSG_INFO, "Exited with :%d\n", r);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mhi_pci_resume(struct device *dev)
|
int mhi_pci_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
int r = 0;
|
int r = 0;
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
|
|
||||||
|
|
||||||
r = mhi_initiate_m0(mhi_dev_ctxt);
|
r = mhi_runtime_resume(dev);
|
||||||
if (r)
|
if (r) {
|
||||||
goto exit;
|
|
||||||
r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
|
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
|
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
|
|
||||||
msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
|
|
||||||
switch (r) {
|
|
||||||
case 0:
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
"Timeout: No M0 event after %d ms\n",
|
"Failed to resume link\n");
|
||||||
MHI_MAX_SUSPEND_TIMEOUT);
|
} else {
|
||||||
mhi_dev_ctxt->counters.m0_event_timeouts++;
|
pm_runtime_set_active(dev);
|
||||||
r = -ETIME;
|
pm_runtime_enable(dev);
|
||||||
break;
|
|
||||||
case -ERESTARTSYS:
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Going Down...\n");
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"Wait complete state: %s\n",
|
|
||||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
|
||||||
r = 0;
|
|
||||||
}
|
}
|
||||||
exit:
|
|
||||||
atomic_set(&mhi_dev_ctxt->flags.pending_resume, 0);
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,57 +217,15 @@ void mhi_rem_pm_sysfs(struct device *dev)
|
||||||
return sysfs_remove_group(&dev->kobj, &mhi_attribute_group);
|
return sysfs_remove_group(&dev->kobj, &mhi_attribute_group);
|
||||||
}
|
}
|
||||||
|
|
||||||
ssize_t sysfs_init_m3(struct device *dev, struct device_attribute *attr,
|
|
||||||
const char *buf, size_t count)
|
|
||||||
{
|
|
||||||
int r = 0;
|
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt =
|
|
||||||
&mhi_devices.device_list[0].mhi_ctxt;
|
|
||||||
r = mhi_initiate_m3(mhi_dev_ctxt);
|
|
||||||
if (r) {
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Failed to suspend %d\n", r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
r = mhi_turn_off_pcie_link(mhi_dev_ctxt);
|
|
||||||
if (r)
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Failed to turn off link ret %d\n", r);
|
|
||||||
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
ssize_t sysfs_init_mhi_reset(struct device *dev, struct device_attribute *attr,
|
|
||||||
const char *buf, size_t count)
|
|
||||||
{
|
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt =
|
|
||||||
&mhi_devices.device_list[0].mhi_ctxt;
|
|
||||||
int r = 0;
|
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Triggering MHI Reset.\n");
|
|
||||||
r = mhi_trigger_reset(mhi_dev_ctxt);
|
|
||||||
if (r != 0)
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Failed to trigger MHI RESET ret %d\n",
|
|
||||||
r);
|
|
||||||
else
|
|
||||||
mhi_log(MHI_MSG_INFO, "Triggered! MHI RESET\n");
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
ssize_t sysfs_init_m0(struct device *dev, struct device_attribute *attr,
|
ssize_t sysfs_init_m0(struct device *dev, struct device_attribute *attr,
|
||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt =
|
struct mhi_device_ctxt *mhi_dev_ctxt =
|
||||||
&mhi_devices.device_list[0].mhi_ctxt;
|
&mhi_devices.device_list[0].mhi_ctxt;
|
||||||
if (0 != mhi_turn_on_pcie_link(mhi_dev_ctxt)) {
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Failed to resume link\n");
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
mhi_initiate_m0(mhi_dev_ctxt);
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Current mhi_state = %s\n",
|
|
||||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
|
||||||
|
|
||||||
|
pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
||||||
|
pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
||||||
|
pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -195,35 +236,42 @@ int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Entered...\n");
|
mhi_log(MHI_MSG_INFO, "Entered...\n");
|
||||||
pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
|
pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
|
||||||
mutex_lock(&mhi_dev_ctxt->mhi_link_state);
|
|
||||||
if (0 == mhi_dev_ctxt->flags.link_up) {
|
if (0 == mhi_dev_ctxt->flags.link_up) {
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
"Link already marked as down, nothing to do\n");
|
"Link already marked as down, nothing to do\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
/* Disable shadow to avoid restoring D3 hot struct device */
|
|
||||||
r = msm_pcie_shadow_control(mhi_dev_ctxt->dev_info->pcie_device, 0);
|
|
||||||
if (r)
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Failed to stop shadow config space: %d\n", r);
|
|
||||||
|
|
||||||
r = pci_set_power_state(mhi_dev_ctxt->dev_info->pcie_device, PCI_D3hot);
|
r = pci_save_state(pcie_dev);
|
||||||
if (r) {
|
if (r) {
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
"Failed to set pcie power state to D3 hotret: %x\n", r);
|
"Failed to save pcie state ret: %d\n",
|
||||||
goto exit;
|
r);
|
||||||
}
|
}
|
||||||
|
mhi_dev_ctxt->dev_props->pcie_state = pci_store_saved_state(pcie_dev);
|
||||||
|
pci_disable_device(pcie_dev);
|
||||||
|
r = pci_set_power_state(pcie_dev, PCI_D3hot);
|
||||||
|
if (r) {
|
||||||
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
|
"Failed to set pcie power state to D3 hot ret: %d\n",
|
||||||
|
r);
|
||||||
|
}
|
||||||
|
|
||||||
r = msm_pcie_pm_control(MSM_PCIE_SUSPEND,
|
r = msm_pcie_pm_control(MSM_PCIE_SUSPEND,
|
||||||
mhi_dev_ctxt->dev_info->pcie_device->bus->number,
|
pcie_dev->bus->number,
|
||||||
mhi_dev_ctxt->dev_info->pcie_device,
|
pcie_dev,
|
||||||
NULL,
|
NULL,
|
||||||
0);
|
0);
|
||||||
if (r)
|
if (r)
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
"Failed to suspend pcie bus ret 0x%x\n", r);
|
"Failed to suspend pcie bus ret 0x%x\n", r);
|
||||||
|
|
||||||
|
r = mhi_set_bus_request(mhi_dev_ctxt, 0);
|
||||||
|
if (r)
|
||||||
|
mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r);
|
||||||
mhi_dev_ctxt->flags.link_up = 0;
|
mhi_dev_ctxt->flags.link_up = 0;
|
||||||
exit:
|
exit:
|
||||||
mutex_unlock(&mhi_dev_ctxt->mhi_link_state);
|
|
||||||
mhi_log(MHI_MSG_INFO, "Exited...\n");
|
mhi_log(MHI_MSG_INFO, "Exited...\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -235,37 +283,40 @@ int mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
|
|
||||||
pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
|
pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
|
||||||
|
|
||||||
mutex_lock(&mhi_dev_ctxt->mhi_link_state);
|
|
||||||
mhi_log(MHI_MSG_INFO, "Entered...\n");
|
mhi_log(MHI_MSG_INFO, "Entered...\n");
|
||||||
if (mhi_dev_ctxt->flags.link_up)
|
if (mhi_dev_ctxt->flags.link_up)
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
|
r = mhi_set_bus_request(mhi_dev_ctxt, 1);
|
||||||
|
if (r)
|
||||||
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
|
"Could not set bus frequency ret: %d\n",
|
||||||
|
r);
|
||||||
|
|
||||||
r = msm_pcie_pm_control(MSM_PCIE_RESUME,
|
r = msm_pcie_pm_control(MSM_PCIE_RESUME,
|
||||||
mhi_dev_ctxt->dev_info->pcie_device->bus->number,
|
pcie_dev->bus->number,
|
||||||
mhi_dev_ctxt->dev_info->pcie_device,
|
pcie_dev,
|
||||||
NULL, 0);
|
NULL,
|
||||||
|
0);
|
||||||
if (r) {
|
if (r) {
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
"Failed to resume pcie bus ret %d\n", r);
|
"Failed to resume pcie bus ret %d\n", r);
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = pci_set_power_state(mhi_dev_ctxt->dev_info->pcie_device,
|
r = pci_enable_device(pcie_dev);
|
||||||
PCI_D0);
|
if (r)
|
||||||
if (r) {
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
"Failed to load stored state %d\n", r);
|
"Failed to enable device ret:%d\n",
|
||||||
goto exit;
|
r);
|
||||||
}
|
|
||||||
r = msm_pcie_recover_config(mhi_dev_ctxt->dev_info->pcie_device);
|
pci_load_and_free_saved_state(pcie_dev,
|
||||||
if (r) {
|
&mhi_dev_ctxt->dev_props->pcie_state);
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
pci_restore_state(pcie_dev);
|
||||||
"Failed to Recover config space ret: %d\n", r);
|
pci_set_master(pcie_dev);
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
mhi_dev_ctxt->flags.link_up = 1;
|
mhi_dev_ctxt->flags.link_up = 1;
|
||||||
exit:
|
exit:
|
||||||
mutex_unlock(&mhi_dev_ctxt->mhi_link_state);
|
|
||||||
mhi_log(MHI_MSG_INFO, "Exited...\n");
|
mhi_log(MHI_MSG_INFO, "Exited...\n");
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
* GNU General Public License for more details.
|
* GNU General Public License for more details.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/pm_runtime.h>
|
||||||
#include <mhi_sys.h>
|
#include <mhi_sys.h>
|
||||||
#include <mhi.h>
|
#include <mhi.h>
|
||||||
#include <mhi_bhi.h>
|
#include <mhi_bhi.h>
|
||||||
|
@ -23,24 +24,11 @@
|
||||||
static int mhi_ssr_notify_cb(struct notifier_block *nb,
|
static int mhi_ssr_notify_cb(struct notifier_block *nb,
|
||||||
unsigned long action, void *data)
|
unsigned long action, void *data)
|
||||||
{
|
{
|
||||||
int ret_val = 0;
|
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt =
|
|
||||||
&mhi_devices.device_list[0].mhi_ctxt;
|
|
||||||
struct mhi_pcie_dev_info *mhi_pcie_dev = NULL;
|
|
||||||
|
|
||||||
mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices];
|
|
||||||
if (NULL != mhi_dev_ctxt)
|
|
||||||
mhi_dev_ctxt->esoc_notif = action;
|
|
||||||
switch (action) {
|
switch (action) {
|
||||||
case SUBSYS_BEFORE_POWERUP:
|
case SUBSYS_BEFORE_POWERUP:
|
||||||
mhi_log(MHI_MSG_INFO,
|
mhi_log(MHI_MSG_INFO,
|
||||||
"Received Subsystem event BEFORE_POWERUP\n");
|
"Received Subsystem event BEFORE_POWERUP\n");
|
||||||
atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 1);
|
|
||||||
ret_val = init_mhi_base_state(mhi_dev_ctxt);
|
|
||||||
if (0 != ret_val)
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Failed to transition to base state %d.\n",
|
|
||||||
ret_val);
|
|
||||||
break;
|
break;
|
||||||
case SUBSYS_AFTER_POWERUP:
|
case SUBSYS_AFTER_POWERUP:
|
||||||
mhi_log(MHI_MSG_INFO,
|
mhi_log(MHI_MSG_INFO,
|
||||||
|
@ -148,7 +136,7 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
|
int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
|
||||||
{
|
{
|
||||||
u32 pcie_word_val = 0;
|
u32 pcie_word_val = 0;
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
@ -159,13 +147,11 @@ static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
|
||||||
mhi_pcie_dev->bhi_ctxt.bhi_base += pcie_word_val;
|
mhi_pcie_dev->bhi_ctxt.bhi_base += pcie_word_val;
|
||||||
pcie_word_val = mhi_reg_read(mhi_pcie_dev->bhi_ctxt.bhi_base,
|
pcie_word_val = mhi_reg_read(mhi_pcie_dev->bhi_ctxt.bhi_base,
|
||||||
BHI_EXECENV);
|
BHI_EXECENV);
|
||||||
|
mhi_dev_ctxt->dev_exec_env = pcie_word_val;
|
||||||
if (pcie_word_val == MHI_EXEC_ENV_AMSS) {
|
if (pcie_word_val == MHI_EXEC_ENV_AMSS) {
|
||||||
mhi_dev_ctxt->base_state = STATE_TRANSITION_RESET;
|
mhi_dev_ctxt->base_state = STATE_TRANSITION_RESET;
|
||||||
} else if (pcie_word_val == MHI_EXEC_ENV_PBL) {
|
} else if (pcie_word_val == MHI_EXEC_ENV_PBL) {
|
||||||
mhi_dev_ctxt->base_state = STATE_TRANSITION_BHI;
|
mhi_dev_ctxt->base_state = STATE_TRANSITION_BHI;
|
||||||
r = bhi_probe(mhi_pcie_dev);
|
|
||||||
if (r)
|
|
||||||
mhi_log(MHI_MSG_ERROR, "Failed to initialize BHI.\n");
|
|
||||||
} else {
|
} else {
|
||||||
mhi_log(MHI_MSG_ERROR, "Invalid EXEC_ENV: 0x%x\n",
|
mhi_log(MHI_MSG_ERROR, "Invalid EXEC_ENV: 0x%x\n",
|
||||||
pcie_word_val);
|
pcie_word_val);
|
||||||
|
@ -178,10 +164,9 @@ static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
|
||||||
|
|
||||||
void mhi_link_state_cb(struct msm_pcie_notify *notify)
|
void mhi_link_state_cb(struct msm_pcie_notify *notify)
|
||||||
{
|
{
|
||||||
int ret_val = 0;
|
|
||||||
struct mhi_pcie_dev_info *mhi_pcie_dev;
|
struct mhi_pcie_dev_info *mhi_pcie_dev;
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
|
struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
|
||||||
int r = 0;
|
|
||||||
|
|
||||||
if (NULL == notify || NULL == notify->data) {
|
if (NULL == notify || NULL == notify->data) {
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
|
@ -198,32 +183,6 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify)
|
||||||
case MSM_PCIE_EVENT_LINKUP:
|
case MSM_PCIE_EVENT_LINKUP:
|
||||||
mhi_log(MHI_MSG_INFO,
|
mhi_log(MHI_MSG_INFO,
|
||||||
"Received MSM_PCIE_EVENT_LINKUP\n");
|
"Received MSM_PCIE_EVENT_LINKUP\n");
|
||||||
if (0 == mhi_pcie_dev->link_up_cntr) {
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"Initializing MHI for the first time\n");
|
|
||||||
r = mhi_ctxt_init(mhi_pcie_dev);
|
|
||||||
if (r) {
|
|
||||||
mhi_log(MHI_MSG_ERROR,
|
|
||||||
"MHI initialization failed, ret %d.\n",
|
|
||||||
r);
|
|
||||||
r = msm_pcie_register_event(
|
|
||||||
&mhi_pcie_dev->mhi_pci_link_event);
|
|
||||||
mhi_log(MHI_MSG_ERROR,
|
|
||||||
"Deregistered from PCIe notif r %d.\n",
|
|
||||||
r);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt;
|
|
||||||
mhi_pcie_dev->mhi_ctxt.flags.link_up = 1;
|
|
||||||
pci_set_master(mhi_pcie_dev->pcie_device);
|
|
||||||
r = set_mhi_base_state(mhi_pcie_dev);
|
|
||||||
if (r)
|
|
||||||
return;
|
|
||||||
init_mhi_base_state(mhi_dev_ctxt);
|
|
||||||
} else {
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"Received Link Up Callback\n");
|
|
||||||
}
|
|
||||||
mhi_pcie_dev->link_up_cntr++;
|
mhi_pcie_dev->link_up_cntr++;
|
||||||
break;
|
break;
|
||||||
case MSM_PCIE_EVENT_WAKEUP:
|
case MSM_PCIE_EVENT_WAKEUP:
|
||||||
|
@ -231,17 +190,14 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify)
|
||||||
"Received MSM_PCIE_EVENT_WAKE\n");
|
"Received MSM_PCIE_EVENT_WAKE\n");
|
||||||
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
|
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
|
||||||
__pm_relax(&mhi_dev_ctxt->w_lock);
|
__pm_relax(&mhi_dev_ctxt->w_lock);
|
||||||
if (atomic_read(&mhi_dev_ctxt->flags.pending_resume)) {
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
if (mhi_dev_ctxt->flags.mhi_initialized) {
|
||||||
"There is a pending resume, doing nothing.\n");
|
pm_runtime_get(&mhi_dev_ctxt->
|
||||||
return;
|
dev_info->pcie_device->dev);
|
||||||
}
|
pm_runtime_mark_last_busy(&mhi_dev_ctxt->
|
||||||
ret_val = mhi_init_state_transition(mhi_dev_ctxt,
|
dev_info->pcie_device->dev);
|
||||||
STATE_TRANSITION_WAKE);
|
pm_runtime_put_noidle(&mhi_dev_ctxt->
|
||||||
if (0 != ret_val) {
|
dev_info->pcie_device->dev);
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Failed to init state transition, to %d\n",
|
|
||||||
STATE_TRANSITION_WAKE);
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -255,12 +211,6 @@ int init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
{
|
{
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
mhi_assert_device_wake(mhi_dev_ctxt);
|
|
||||||
mhi_dev_ctxt->flags.link_up = 1;
|
|
||||||
r = mhi_set_bus_request(mhi_dev_ctxt, 1);
|
|
||||||
if (r)
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"Failed to scale bus request to active set.\n");
|
|
||||||
r = mhi_init_state_transition(mhi_dev_ctxt, mhi_dev_ctxt->base_state);
|
r = mhi_init_state_transition(mhi_dev_ctxt, mhi_dev_ctxt->base_state);
|
||||||
if (r) {
|
if (r) {
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
|
|
||||||
static const char *state_transition_str(enum STATE_TRANSITION state)
|
const char *state_transition_str(enum STATE_TRANSITION state)
|
||||||
{
|
{
|
||||||
static const char * const mhi_states_transition_str[] = {
|
static const char * const mhi_states_transition_str[] = {
|
||||||
"RESET",
|
"RESET",
|
||||||
|
@ -40,7 +40,17 @@ static const char *state_transition_str(enum STATE_TRANSITION state)
|
||||||
mhi_states_transition_str[state] : "Invalid";
|
mhi_states_transition_str[state] : "Invalid";
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
|
enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
|
{
|
||||||
|
u32 state = mhi_reg_read_field(mhi_dev_ctxt->mmio_info.mmio_addr,
|
||||||
|
MHISTATUS,
|
||||||
|
MHISTATUS_MHISTATE_MASK,
|
||||||
|
MHISTATUS_MHISTATE_SHIFT);
|
||||||
|
|
||||||
|
return (state >= MHI_STATE_LIMIT) ? MHI_STATE_LIMIT : state;
|
||||||
|
}
|
||||||
|
|
||||||
|
void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
enum MHI_STATE new_state)
|
enum MHI_STATE new_state)
|
||||||
{
|
{
|
||||||
if (MHI_STATE_RESET == new_state) {
|
if (MHI_STATE_RESET == new_state) {
|
||||||
|
@ -64,20 +74,18 @@ static void conditional_chan_db_write(
|
||||||
{
|
{
|
||||||
u64 db_value;
|
u64 db_value;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct mhi_ring *mhi_ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
|
||||||
|
|
||||||
mhi_dev_ctxt->mhi_chan_db_order[chan] = 0;
|
spin_lock_irqsave(&mhi_ring->ring_lock, flags);
|
||||||
spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], flags);
|
db_value = mhi_v2p_addr(mhi_dev_ctxt,
|
||||||
if (0 == mhi_dev_ctxt->mhi_chan_db_order[chan]) {
|
MHI_RING_TYPE_XFER_RING,
|
||||||
db_value =
|
chan,
|
||||||
mhi_v2p_addr(mhi_dev_ctxt,
|
(uintptr_t)mhi_ring->wp);
|
||||||
MHI_RING_TYPE_XFER_RING, chan,
|
mhi_ring->db_mode.process_db(mhi_dev_ctxt,
|
||||||
(uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp);
|
mhi_dev_ctxt->mmio_info.chan_db_addr,
|
||||||
mhi_process_db(mhi_dev_ctxt,
|
chan,
|
||||||
mhi_dev_ctxt->mmio_info.chan_db_addr,
|
db_value);
|
||||||
chan, db_value);
|
spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
|
||||||
}
|
|
||||||
mhi_dev_ctxt->mhi_chan_db_order[chan] = 0;
|
|
||||||
spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt,
|
static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
|
@ -103,29 +111,25 @@ static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
|
|
||||||
static void ring_all_cmd_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
|
static void ring_all_cmd_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
{
|
{
|
||||||
struct mutex *cmd_mutex = NULL;
|
|
||||||
u64 db_value;
|
u64 db_value;
|
||||||
u64 rp = 0;
|
u64 rp = 0;
|
||||||
struct mhi_ring *local_ctxt = NULL;
|
struct mhi_ring *local_ctxt = NULL;
|
||||||
|
|
||||||
mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n");
|
mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n");
|
||||||
cmd_mutex = &mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING];
|
|
||||||
mhi_dev_ctxt->cmd_ring_order = 0;
|
|
||||||
mutex_lock(cmd_mutex);
|
|
||||||
local_ctxt = &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
|
local_ctxt = &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
|
||||||
rp = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING,
|
rp = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING,
|
||||||
PRIMARY_CMD_RING,
|
PRIMARY_CMD_RING,
|
||||||
(uintptr_t)local_ctxt->rp);
|
(uintptr_t)local_ctxt->rp);
|
||||||
db_value =
|
db_value = mhi_v2p_addr(mhi_dev_ctxt,
|
||||||
mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING,
|
MHI_RING_TYPE_CMD_RING,
|
||||||
PRIMARY_CMD_RING,
|
PRIMARY_CMD_RING,
|
||||||
(uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[0].wp);
|
(uintptr_t)local_ctxt->wp);
|
||||||
if (0 == mhi_dev_ctxt->cmd_ring_order && rp != db_value)
|
if (rp != db_value)
|
||||||
mhi_process_db(mhi_dev_ctxt,
|
local_ctxt->db_mode.process_db(mhi_dev_ctxt,
|
||||||
mhi_dev_ctxt->mmio_info.cmd_db_addr,
|
mhi_dev_ctxt->mmio_info.cmd_db_addr,
|
||||||
0, db_value);
|
0,
|
||||||
mhi_dev_ctxt->cmd_ring_order = 0;
|
db_value);
|
||||||
mutex_unlock(cmd_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
|
static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
|
@ -133,24 +137,23 @@ static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
|
||||||
u32 i;
|
u32 i;
|
||||||
u64 db_value = 0;
|
u64 db_value = 0;
|
||||||
struct mhi_event_ctxt *event_ctxt = NULL;
|
struct mhi_event_ctxt *event_ctxt = NULL;
|
||||||
|
struct mhi_ring *mhi_ring;
|
||||||
spinlock_t *lock = NULL;
|
spinlock_t *lock = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
|
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
|
||||||
lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[i];
|
mhi_ring = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
|
||||||
mhi_dev_ctxt->mhi_ev_db_order[i] = 0;
|
lock = &mhi_ring->ring_lock;
|
||||||
spin_lock_irqsave(lock, flags);
|
spin_lock_irqsave(lock, flags);
|
||||||
event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
|
event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
|
||||||
db_value =
|
db_value = mhi_v2p_addr(mhi_dev_ctxt,
|
||||||
mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING,
|
MHI_RING_TYPE_EVENT_RING,
|
||||||
i,
|
i,
|
||||||
(uintptr_t)mhi_dev_ctxt->mhi_local_event_ctxt[i].wp);
|
(uintptr_t)mhi_ring->wp);
|
||||||
if (0 == mhi_dev_ctxt->mhi_ev_db_order[i]) {
|
mhi_ring->db_mode.process_db(mhi_dev_ctxt,
|
||||||
mhi_process_db(mhi_dev_ctxt,
|
mhi_dev_ctxt->mmio_info.event_db_addr,
|
||||||
mhi_dev_ctxt->mmio_info.event_db_addr,
|
i,
|
||||||
i, db_value);
|
db_value);
|
||||||
}
|
|
||||||
mhi_dev_ctxt->mhi_ev_db_order[i] = 0;
|
|
||||||
spin_unlock_irqrestore(lock, flags);
|
spin_unlock_irqrestore(lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -159,169 +162,121 @@ static int process_m0_transition(
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt,
|
struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
enum STATE_TRANSITION cur_work_item)
|
enum STATE_TRANSITION cur_work_item)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
int r = 0;
|
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Entered\n");
|
mhi_log(MHI_MSG_INFO, "Entered With State %s\n",
|
||||||
|
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
||||||
|
|
||||||
if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) {
|
switch (mhi_dev_ctxt->mhi_state) {
|
||||||
|
case MHI_STATE_M2:
|
||||||
mhi_dev_ctxt->counters.m2_m0++;
|
mhi_dev_ctxt->counters.m2_m0++;
|
||||||
} else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M3) {
|
break;
|
||||||
mhi_dev_ctxt->counters.m3_m0++;
|
case MHI_STATE_M3:
|
||||||
} else if (mhi_dev_ctxt->mhi_state == MHI_STATE_READY) {
|
mhi_dev_ctxt->counters.m3_m0++;
|
||||||
mhi_log(MHI_MSG_INFO,
|
break;
|
||||||
"Transitioning from READY.\n");
|
default:
|
||||||
} else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
|
break;
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"Transitioning from M1.\n");
|
|
||||||
} else {
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"MHI State %s link state %d. Quitting\n",
|
|
||||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
|
|
||||||
mhi_dev_ctxt->flags.link_up);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
|
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
mhi_dev_ctxt->mhi_state = MHI_STATE_M0;
|
mhi_dev_ctxt->mhi_state = MHI_STATE_M0;
|
||||||
atomic_inc(&mhi_dev_ctxt->flags.data_pending);
|
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M0;
|
||||||
mhi_assert_device_wake(mhi_dev_ctxt);
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
|
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
mhi_assert_device_wake(mhi_dev_ctxt, true);
|
||||||
|
|
||||||
if (mhi_dev_ctxt->flags.mhi_initialized) {
|
if (mhi_dev_ctxt->flags.mhi_initialized) {
|
||||||
ring_all_ev_dbs(mhi_dev_ctxt);
|
ring_all_ev_dbs(mhi_dev_ctxt);
|
||||||
ring_all_chan_dbs(mhi_dev_ctxt, true);
|
ring_all_chan_dbs(mhi_dev_ctxt, true);
|
||||||
ring_all_cmd_dbs(mhi_dev_ctxt);
|
ring_all_cmd_dbs(mhi_dev_ctxt);
|
||||||
}
|
}
|
||||||
atomic_dec(&mhi_dev_ctxt->flags.data_pending);
|
|
||||||
r = mhi_set_bus_request(mhi_dev_ctxt, 1);
|
|
||||||
if (r)
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Could not set bus frequency ret: %d\n",
|
|
||||||
r);
|
|
||||||
mhi_dev_ctxt->flags.pending_M0 = 0;
|
|
||||||
if (atomic_read(&mhi_dev_ctxt->flags.pending_powerup)) {
|
|
||||||
atomic_set(&mhi_dev_ctxt->flags.pending_ssr, 0);
|
|
||||||
atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 0);
|
|
||||||
}
|
|
||||||
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m0_event);
|
|
||||||
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
if (!mhi_dev_ctxt->flags.pending_M3 &&
|
|
||||||
mhi_dev_ctxt->flags.link_up &&
|
|
||||||
mhi_dev_ctxt->flags.mhi_initialized)
|
|
||||||
mhi_deassert_device_wake(mhi_dev_ctxt);
|
|
||||||
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
|
|
||||||
|
mhi_deassert_device_wake(mhi_dev_ctxt);
|
||||||
|
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
wake_up(mhi_dev_ctxt->mhi_ev_wq.m0_event);
|
||||||
mhi_log(MHI_MSG_INFO, "Exited\n");
|
mhi_log(MHI_MSG_INFO, "Exited\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int process_m1_transition(
|
void process_m1_transition(struct work_struct *work)
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt,
|
|
||||||
enum STATE_TRANSITION cur_work_item)
|
|
||||||
{
|
{
|
||||||
unsigned long flags = 0;
|
struct mhi_device_ctxt *mhi_dev_ctxt;
|
||||||
int r = 0;
|
|
||||||
|
mhi_dev_ctxt = container_of(work,
|
||||||
|
struct mhi_device_ctxt,
|
||||||
|
process_m1_worker);
|
||||||
|
mutex_lock(&mhi_dev_ctxt->pm_lock);
|
||||||
|
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
mhi_log(MHI_MSG_INFO,
|
||||||
"Processing M1 state transition from state %s\n",
|
"Processing M1 state transition from state %s\n",
|
||||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
||||||
|
|
||||||
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
|
/* We either Entered M3 or we did M3->M0 Exit */
|
||||||
if (!mhi_dev_ctxt->flags.pending_M3) {
|
if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_M1) {
|
||||||
mhi_log(MHI_MSG_INFO, "Setting M2 Transition flag\n");
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
atomic_inc(&mhi_dev_ctxt->flags.m2_transition);
|
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
||||||
mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
|
return;
|
||||||
mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n");
|
|
||||||
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M2);
|
|
||||||
mhi_dev_ctxt->counters.m1_m2++;
|
|
||||||
}
|
}
|
||||||
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
r = mhi_set_bus_request(mhi_dev_ctxt, 0);
|
|
||||||
if (r)
|
|
||||||
mhi_log(MHI_MSG_INFO, "Failed to update bus request\n");
|
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Debouncing M2\n");
|
mhi_log(MHI_MSG_INFO, "Transitioning to M2 Transition\n");
|
||||||
|
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1_M2_TRANSITION;
|
||||||
|
mhi_dev_ctxt->counters.m1_m2++;
|
||||||
|
mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
|
||||||
|
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M2);
|
||||||
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
|
||||||
msleep(MHI_M2_DEBOUNCE_TMR_MS);
|
msleep(MHI_M2_DEBOUNCE_TMR_MS);
|
||||||
|
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
|
||||||
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
|
/* During DEBOUNCE Time We could be receiving M0 Event */
|
||||||
mhi_log(MHI_MSG_INFO, "Pending acks %d\n",
|
if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_M1_M2_TRANSITION) {
|
||||||
atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
|
mhi_log(MHI_MSG_INFO, "Entered M2 State\n");
|
||||||
if (atomic_read(&mhi_dev_ctxt->counters.outbound_acks) ||
|
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M2;
|
||||||
mhi_dev_ctxt->flags.pending_M3) {
|
|
||||||
mhi_assert_device_wake(mhi_dev_ctxt);
|
|
||||||
} else {
|
|
||||||
pm_runtime_mark_last_busy(
|
|
||||||
&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
|
||||||
r = pm_request_autosuspend(
|
|
||||||
&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
|
||||||
if (r && r != -EAGAIN) {
|
|
||||||
mhi_log(MHI_MSG_ERROR,
|
|
||||||
"Failed to remove counter ret %d\n", r);
|
|
||||||
BUG_ON(mhi_dev_ctxt->dev_info->
|
|
||||||
pcie_device->dev.power.runtime_error);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0);
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
mhi_log(MHI_MSG_INFO, "M2 transition complete.\n");
|
|
||||||
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
BUG_ON(atomic_read(&mhi_dev_ctxt->outbound_acks) < 0);
|
|
||||||
|
|
||||||
return 0;
|
if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) {
|
||||||
|
mhi_log(MHI_MSG_INFO, "Exiting M2 Immediately, count:%d\n",
|
||||||
|
atomic_read(&mhi_dev_ctxt->counters.device_wake));
|
||||||
|
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
mhi_assert_device_wake(mhi_dev_ctxt, true);
|
||||||
|
mhi_deassert_device_wake(mhi_dev_ctxt);
|
||||||
|
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
} else {
|
||||||
|
mhi_log(MHI_MSG_INFO, "Schedule RPM suspend");
|
||||||
|
pm_runtime_mark_last_busy(&mhi_dev_ctxt->
|
||||||
|
dev_info->pcie_device->dev);
|
||||||
|
pm_request_autosuspend(&mhi_dev_ctxt->
|
||||||
|
dev_info->pcie_device->dev);
|
||||||
|
}
|
||||||
|
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int process_m3_transition(
|
static int process_m3_transition(
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt,
|
struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
enum STATE_TRANSITION cur_work_item)
|
enum STATE_TRANSITION cur_work_item)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
mhi_log(MHI_MSG_INFO,
|
||||||
"Processing M3 state transition\n");
|
"Entered with State %s\n",
|
||||||
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
|
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
||||||
mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
|
|
||||||
mhi_dev_ctxt->flags.pending_M3 = 0;
|
|
||||||
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m3_event);
|
|
||||||
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
mhi_dev_ctxt->counters.m0_m3++;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mhi_process_link_down(
|
switch (mhi_dev_ctxt->mhi_state) {
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt)
|
case MHI_STATE_M1:
|
||||||
{
|
mhi_dev_ctxt->counters.m1_m3++;
|
||||||
unsigned long flags;
|
break;
|
||||||
int r;
|
case MHI_STATE_M0:
|
||||||
|
mhi_dev_ctxt->counters.m0_m3++;
|
||||||
mhi_log(MHI_MSG_INFO, "Entered.\n");
|
break;
|
||||||
if (NULL == mhi_dev_ctxt)
|
default:
|
||||||
return -EINVAL;
|
break;
|
||||||
|
|
||||||
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
mhi_dev_ctxt->flags.mhi_initialized = 0;
|
|
||||||
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
|
|
||||||
mhi_deassert_device_wake(mhi_dev_ctxt);
|
|
||||||
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
|
|
||||||
mhi_dev_ctxt->flags.stop_threads = 1;
|
|
||||||
|
|
||||||
while (!mhi_dev_ctxt->flags.ev_thread_stopped) {
|
|
||||||
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"Waiting for threads to SUSPEND EVT: %d, STT: %d\n",
|
|
||||||
mhi_dev_ctxt->flags.st_thread_stopped,
|
|
||||||
mhi_dev_ctxt->flags.ev_thread_stopped);
|
|
||||||
msleep(20);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
r = mhi_set_bus_request(mhi_dev_ctxt, 0);
|
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
if (r)
|
mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
|
||||||
mhi_log(MHI_MSG_INFO,
|
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3;
|
||||||
"Failed to scale bus request to sleep set.\n");
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
mhi_turn_off_pcie_link(mhi_dev_ctxt);
|
wake_up(mhi_dev_ctxt->mhi_ev_wq.m3_event);
|
||||||
mhi_dev_ctxt->dev_info->link_down_cntr++;
|
|
||||||
atomic_set(&mhi_dev_ctxt->flags.data_pending, 0);
|
|
||||||
mhi_log(MHI_MSG_INFO, "Exited.\n");
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -329,51 +284,20 @@ static int process_link_down_transition(
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt,
|
struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
enum STATE_TRANSITION cur_work_item)
|
enum STATE_TRANSITION cur_work_item)
|
||||||
{
|
{
|
||||||
mhi_log(MHI_MSG_INFO, "Entered\n");
|
mhi_log(MHI_MSG_INFO,
|
||||||
if (0 !=
|
"Entered with State %s\n",
|
||||||
mhi_process_link_down(mhi_dev_ctxt)) {
|
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
return -EIO;
|
||||||
"Failed to process link down\n");
|
|
||||||
}
|
|
||||||
mhi_log(MHI_MSG_INFO, "Exited.\n");
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int process_wake_transition(
|
static int process_wake_transition(
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt,
|
struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
enum STATE_TRANSITION cur_work_item)
|
enum STATE_TRANSITION cur_work_item)
|
||||||
{
|
{
|
||||||
int r = 0;
|
mhi_log(MHI_MSG_INFO,
|
||||||
|
"Entered with State %s\n",
|
||||||
mhi_log(MHI_MSG_INFO, "Entered\n");
|
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
||||||
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
|
return -EIO;
|
||||||
|
|
||||||
if (atomic_read(&mhi_dev_ctxt->flags.pending_ssr)) {
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Pending SSR, Ignoring.\n");
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
if (mhi_dev_ctxt->flags.mhi_initialized) {
|
|
||||||
r = pm_request_resume(
|
|
||||||
&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
|
||||||
mhi_log(MHI_MSG_VERBOSE,
|
|
||||||
"MHI is initialized, transitioning to M0, ret %d\n", r);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!mhi_dev_ctxt->flags.mhi_initialized) {
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"MHI is not initialized transitioning to base.\n");
|
|
||||||
r = init_mhi_base_state(mhi_dev_ctxt);
|
|
||||||
if (0 != r)
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Failed to transition to base state %d.\n",
|
|
||||||
r);
|
|
||||||
}
|
|
||||||
|
|
||||||
exit:
|
|
||||||
__pm_relax(&mhi_dev_ctxt->w_lock);
|
|
||||||
mhi_log(MHI_MSG_INFO, "Exited.\n");
|
|
||||||
return r;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -381,9 +305,10 @@ static int process_bhi_transition(
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt,
|
struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
enum STATE_TRANSITION cur_work_item)
|
enum STATE_TRANSITION cur_work_item)
|
||||||
{
|
{
|
||||||
mhi_turn_on_pcie_link(mhi_dev_ctxt);
|
|
||||||
mhi_log(MHI_MSG_INFO, "Entered\n");
|
mhi_log(MHI_MSG_INFO, "Entered\n");
|
||||||
|
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
mhi_dev_ctxt->mhi_state = MHI_STATE_BHI;
|
mhi_dev_ctxt->mhi_state = MHI_STATE_BHI;
|
||||||
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
|
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
|
||||||
mhi_log(MHI_MSG_INFO, "Exited\n");
|
mhi_log(MHI_MSG_INFO, "Exited\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -396,36 +321,42 @@ static int process_ready_transition(
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Processing READY state transition\n");
|
mhi_log(MHI_MSG_INFO, "Processing READY state transition\n");
|
||||||
mhi_dev_ctxt->mhi_state = MHI_STATE_READY;
|
|
||||||
|
|
||||||
r = mhi_reset_all_thread_queues(mhi_dev_ctxt);
|
r = mhi_reset_all_thread_queues(mhi_dev_ctxt);
|
||||||
|
if (r) {
|
||||||
if (r)
|
|
||||||
mhi_log(MHI_MSG_ERROR,
|
mhi_log(MHI_MSG_ERROR,
|
||||||
"Failed to reset thread queues\n");
|
"Failed to reset thread queues\n");
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
mhi_dev_ctxt->mhi_state = MHI_STATE_READY;
|
||||||
r = mhi_init_mmio(mhi_dev_ctxt);
|
r = mhi_init_mmio(mhi_dev_ctxt);
|
||||||
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
/* Initialize MMIO */
|
/* Initialize MMIO */
|
||||||
if (r) {
|
if (r) {
|
||||||
mhi_log(MHI_MSG_ERROR,
|
mhi_log(MHI_MSG_ERROR,
|
||||||
"Failure during MMIO initialization\n");
|
"Failure during MMIO initialization\n");
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
r = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
|
r = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
|
||||||
cur_work_item);
|
cur_work_item);
|
||||||
|
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
if (r) {
|
if (r) {
|
||||||
mhi_log(MHI_MSG_ERROR,
|
mhi_log(MHI_MSG_ERROR,
|
||||||
"Failure during event ring init\n");
|
"Failure during event ring init\n");
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
mhi_dev_ctxt->flags.stop_threads = 0;
|
mhi_dev_ctxt->flags.stop_threads = 0;
|
||||||
mhi_assert_device_wake(mhi_dev_ctxt);
|
|
||||||
mhi_reg_write_field(mhi_dev_ctxt,
|
mhi_reg_write_field(mhi_dev_ctxt,
|
||||||
mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL,
|
mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL,
|
||||||
MHICTRL_MHISTATE_MASK,
|
MHICTRL_MHISTATE_MASK,
|
||||||
MHICTRL_MHISTATE_SHIFT,
|
MHICTRL_MHISTATE_SHIFT,
|
||||||
MHI_STATE_M0);
|
MHI_STATE_M0);
|
||||||
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -448,37 +379,22 @@ static int process_reset_transition(
|
||||||
enum STATE_TRANSITION cur_work_item)
|
enum STATE_TRANSITION cur_work_item)
|
||||||
{
|
{
|
||||||
int r = 0, i = 0;
|
int r = 0, i = 0;
|
||||||
unsigned long flags = 0;
|
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Processing RESET state transition\n");
|
mhi_log(MHI_MSG_INFO, "Processing RESET state transition\n");
|
||||||
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
|
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
|
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
|
||||||
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
|
||||||
mhi_dev_ctxt->counters.mhi_reset_cntr++;
|
mhi_dev_ctxt->counters.mhi_reset_cntr++;
|
||||||
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_PBL;
|
|
||||||
r = mhi_test_for_device_reset(mhi_dev_ctxt);
|
r = mhi_test_for_device_reset(mhi_dev_ctxt);
|
||||||
if (r)
|
if (r)
|
||||||
mhi_log(MHI_MSG_INFO, "Device not RESET ret %d\n", r);
|
mhi_log(MHI_MSG_INFO, "Device not RESET ret %d\n", r);
|
||||||
r = mhi_test_for_device_ready(mhi_dev_ctxt);
|
r = mhi_test_for_device_ready(mhi_dev_ctxt);
|
||||||
switch (r) {
|
if (r) {
|
||||||
case 0:
|
mhi_log(MHI_MSG_ERROR, "timed out waiting for ready ret:%d\n",
|
||||||
break;
|
r);
|
||||||
case -ENOTCONN:
|
return r;
|
||||||
mhi_log(MHI_MSG_CRITICAL, "Link down detected\n");
|
|
||||||
break;
|
|
||||||
case -ETIMEDOUT:
|
|
||||||
r = mhi_init_state_transition(mhi_dev_ctxt,
|
|
||||||
STATE_TRANSITION_RESET);
|
|
||||||
if (0 != r)
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Failed to initiate %s state trans\n",
|
|
||||||
state_transition_str(STATE_TRANSITION_RESET));
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Unexpected ret code detected for\n");
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < NR_OF_CMD_RINGS; ++i) {
|
for (i = 0; i < NR_OF_CMD_RINGS; ++i) {
|
||||||
mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp =
|
mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp =
|
||||||
mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
|
mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
|
||||||
|
@ -511,45 +427,10 @@ static int process_syserr_transition(
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt,
|
struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
enum STATE_TRANSITION cur_work_item)
|
enum STATE_TRANSITION cur_work_item)
|
||||||
{
|
{
|
||||||
int r = 0;
|
mhi_log(MHI_MSG_INFO,
|
||||||
|
"Entered with State %s\n",
|
||||||
mhi_log(MHI_MSG_CRITICAL, "Received SYS ERROR. Resetting MHI\n");
|
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
||||||
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
|
return -EIO;
|
||||||
r = mhi_init_state_transition(mhi_dev_ctxt,
|
|
||||||
STATE_TRANSITION_RESET);
|
|
||||||
if (r) {
|
|
||||||
mhi_log(MHI_MSG_ERROR,
|
|
||||||
"Failed to init state transition to RESET ret %d\n", r);
|
|
||||||
mhi_log(MHI_MSG_CRITICAL, "Failed to reset mhi\n");
|
|
||||||
}
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
int start_chan_sync(struct mhi_client_handle *client_handle)
|
|
||||||
{
|
|
||||||
int r = 0;
|
|
||||||
int chan = client_handle->chan_info.chan_nr;
|
|
||||||
|
|
||||||
init_completion(&client_handle->chan_open_complete);
|
|
||||||
r = mhi_send_cmd(client_handle->mhi_dev_ctxt,
|
|
||||||
MHI_COMMAND_START_CHAN,
|
|
||||||
chan);
|
|
||||||
if (r != 0) {
|
|
||||||
mhi_log(MHI_MSG_ERROR,
|
|
||||||
"Failed to send start command for chan %d ret %d\n",
|
|
||||||
chan, r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
r = wait_for_completion_timeout(
|
|
||||||
&client_handle->chan_open_complete,
|
|
||||||
msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
|
|
||||||
if (!r) {
|
|
||||||
mhi_log(MHI_MSG_ERROR,
|
|
||||||
"Timed out waiting for chan %d start completion\n",
|
|
||||||
chan);
|
|
||||||
r = -ETIME;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
|
static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
|
@ -573,8 +454,7 @@ static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
chan_info.flags))
|
chan_info.flags))
|
||||||
mhi_notify_client(client_handle, MHI_CB_MHI_ENABLED);
|
mhi_notify_client(client_handle, MHI_CB_MHI_ENABLED);
|
||||||
}
|
}
|
||||||
if (exec_env == MHI_EXEC_ENV_AMSS)
|
|
||||||
mhi_deassert_device_wake(mhi_dev_ctxt);
|
|
||||||
mhi_log(MHI_MSG_INFO, "Done.\n");
|
mhi_log(MHI_MSG_INFO, "Done.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -582,36 +462,25 @@ static int process_sbl_transition(
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt,
|
struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
enum STATE_TRANSITION cur_work_item)
|
enum STATE_TRANSITION cur_work_item)
|
||||||
{
|
{
|
||||||
int r = 0;
|
mhi_log(MHI_MSG_INFO, "Enabled\n");
|
||||||
pm_runtime_set_autosuspend_delay(
|
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
&mhi_dev_ctxt->dev_info->pcie_device->dev,
|
|
||||||
MHI_RPM_AUTOSUSPEND_TMR_VAL_MS);
|
|
||||||
pm_runtime_use_autosuspend(&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
|
||||||
r = pm_runtime_set_active(&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
|
||||||
if (r) {
|
|
||||||
mhi_log(MHI_MSG_ERROR,
|
|
||||||
"Failed to activate runtime pm ret %d\n", r);
|
|
||||||
}
|
|
||||||
pm_runtime_enable(&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
|
||||||
mhi_log(MHI_MSG_INFO, "Enabled runtime pm autosuspend\n");
|
|
||||||
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL;
|
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL;
|
||||||
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
|
enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
|
||||||
pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int process_amss_transition(
|
static int process_amss_transition(
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt,
|
struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
enum STATE_TRANSITION cur_work_item)
|
enum STATE_TRANSITION cur_work_item)
|
||||||
{
|
{
|
||||||
int r = 0, i = 0;
|
int r = 0;
|
||||||
struct mhi_client_handle *client_handle = NULL;
|
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Processing AMSS state transition\n");
|
mhi_log(MHI_MSG_INFO, "Processing AMSS state transition\n");
|
||||||
|
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_AMSS;
|
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_AMSS;
|
||||||
atomic_inc(&mhi_dev_ctxt->flags.data_pending);
|
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
mhi_assert_device_wake(mhi_dev_ctxt);
|
|
||||||
if (!mhi_dev_ctxt->flags.mhi_initialized) {
|
if (!mhi_dev_ctxt->flags.mhi_initialized) {
|
||||||
r = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
|
r = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
|
||||||
cur_work_item);
|
cur_work_item);
|
||||||
|
@ -619,56 +488,42 @@ static int process_amss_transition(
|
||||||
if (r) {
|
if (r) {
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
mhi_log(MHI_MSG_CRITICAL,
|
||||||
"Failed to set local chan state ret %d\n", r);
|
"Failed to set local chan state ret %d\n", r);
|
||||||
|
mhi_deassert_device_wake(mhi_dev_ctxt);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
ring_all_chan_dbs(mhi_dev_ctxt, true);
|
ring_all_chan_dbs(mhi_dev_ctxt, true);
|
||||||
|
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
mhi_log(MHI_MSG_INFO,
|
mhi_log(MHI_MSG_INFO,
|
||||||
"Notifying clients that MHI is enabled\n");
|
"Notifying clients that MHI is enabled\n");
|
||||||
enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
|
enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
|
||||||
} else {
|
} else {
|
||||||
mhi_log(MHI_MSG_INFO, "MHI is initialized\n");
|
mhi_log(MHI_MSG_INFO, "MHI is initialized\n");
|
||||||
for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
|
|
||||||
client_handle = mhi_dev_ctxt->client_handle_list[i];
|
|
||||||
if (client_handle && client_handle->chan_status)
|
|
||||||
r = start_chan_sync(client_handle);
|
|
||||||
WARN(r, "Failed to start chan %d ret %d\n",
|
|
||||||
i, r);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
ring_all_chan_dbs(mhi_dev_ctxt, true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
ring_all_ev_dbs(mhi_dev_ctxt);
|
ring_all_ev_dbs(mhi_dev_ctxt);
|
||||||
atomic_dec(&mhi_dev_ctxt->flags.data_pending);
|
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
if (!mhi_dev_ctxt->flags.pending_M3 &&
|
|
||||||
mhi_dev_ctxt->flags.link_up)
|
/*
|
||||||
mhi_deassert_device_wake(mhi_dev_ctxt);
|
* runtime_allow will decrement usage_count, counts were
|
||||||
|
* incremented by pci fw pci_pm_init() or by
|
||||||
|
* mhi shutdown/ssr apis.
|
||||||
|
*/
|
||||||
|
mhi_log(MHI_MSG_INFO, "Allow runtime suspend\n");
|
||||||
|
|
||||||
|
pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
||||||
|
pm_runtime_allow(&mhi_dev_ctxt->dev_info->pcie_device->dev);
|
||||||
|
|
||||||
|
/* During probe we incremented, releasing that count */
|
||||||
|
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
mhi_deassert_device_wake(mhi_dev_ctxt);
|
||||||
|
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Exited\n");
|
mhi_log(MHI_MSG_INFO, "Exited\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
|
|
||||||
{
|
|
||||||
int r = 0;
|
|
||||||
unsigned long flags = 0;
|
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Entered\n");
|
|
||||||
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
mhi_dev_ctxt->mhi_state = MHI_STATE_SYS_ERR;
|
|
||||||
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO, "Setting RESET to MDM.\n");
|
|
||||||
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_RESET);
|
|
||||||
mhi_log(MHI_MSG_INFO, "Transitioning state to RESET\n");
|
|
||||||
r = mhi_init_state_transition(mhi_dev_ctxt,
|
|
||||||
STATE_TRANSITION_RESET);
|
|
||||||
if (0 != r)
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Failed to initiate %s state trans ret %d\n",
|
|
||||||
state_transition_str(STATE_TRANSITION_RESET), r);
|
|
||||||
mhi_log(MHI_MSG_INFO, "Exiting\n");
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int process_stt_work_item(
|
static int process_stt_work_item(
|
||||||
struct mhi_device_ctxt *mhi_dev_ctxt,
|
struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
enum STATE_TRANSITION cur_work_item)
|
enum STATE_TRANSITION cur_work_item)
|
||||||
|
@ -697,9 +552,6 @@ static int process_stt_work_item(
|
||||||
case STATE_TRANSITION_M0:
|
case STATE_TRANSITION_M0:
|
||||||
r = process_m0_transition(mhi_dev_ctxt, cur_work_item);
|
r = process_m0_transition(mhi_dev_ctxt, cur_work_item);
|
||||||
break;
|
break;
|
||||||
case STATE_TRANSITION_M1:
|
|
||||||
r = process_m1_transition(mhi_dev_ctxt, cur_work_item);
|
|
||||||
break;
|
|
||||||
case STATE_TRANSITION_M3:
|
case STATE_TRANSITION_M3:
|
||||||
r = process_m3_transition(mhi_dev_ctxt, cur_work_item);
|
r = process_m3_transition(mhi_dev_ctxt, cur_work_item);
|
||||||
break;
|
break;
|
||||||
|
@ -799,227 +651,3 @@ int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
|
||||||
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
|
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
|
|
||||||
{
|
|
||||||
int r = 0;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"Entered MHI state %s, Pending M0 %d Pending M3 %d\n",
|
|
||||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
|
|
||||||
mhi_dev_ctxt->flags.pending_M0,
|
|
||||||
mhi_dev_ctxt->flags.pending_M3);
|
|
||||||
mutex_lock(&mhi_dev_ctxt->pm_lock);
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"Waiting for M0 M1 or M3. Currently %s...\n",
|
|
||||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
|
||||||
|
|
||||||
r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
|
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M3 ||
|
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
|
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
|
|
||||||
msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
|
|
||||||
switch (r) {
|
|
||||||
case 0:
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Timeout: State %s after %d ms\n",
|
|
||||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
|
|
||||||
MHI_MAX_SUSPEND_TIMEOUT);
|
|
||||||
mhi_dev_ctxt->counters.m0_event_timeouts++;
|
|
||||||
r = -ETIME;
|
|
||||||
goto exit;
|
|
||||||
case -ERESTARTSYS:
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Going Down...\n");
|
|
||||||
goto exit;
|
|
||||||
default:
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"Wait complete state: %s\n",
|
|
||||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
|
||||||
r = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
|
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
|
|
||||||
mhi_assert_device_wake(mhi_dev_ctxt);
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"MHI state %s, done\n",
|
|
||||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
|
|
||||||
goto exit;
|
|
||||||
} else {
|
|
||||||
if (0 != mhi_turn_on_pcie_link(mhi_dev_ctxt)) {
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"Failed to resume link\n");
|
|
||||||
r = -EIO;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
mhi_log(MHI_MSG_VERBOSE, "Setting M0 ...\n");
|
|
||||||
if (mhi_dev_ctxt->flags.pending_M3) {
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"Pending M3 detected, aborting M0 procedure\n");
|
|
||||||
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock,
|
|
||||||
flags);
|
|
||||||
r = -EPERM;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
if (mhi_dev_ctxt->flags.link_up) {
|
|
||||||
mhi_dev_ctxt->flags.pending_M0 = 1;
|
|
||||||
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
|
|
||||||
}
|
|
||||||
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
r = wait_event_interruptible_timeout(
|
|
||||||
*mhi_dev_ctxt->mhi_ev_wq.m0_event,
|
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
|
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
|
|
||||||
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
|
|
||||||
WARN_ON(!r || -ERESTARTSYS == r);
|
|
||||||
if (!r || -ERESTARTSYS == r)
|
|
||||||
mhi_log(MHI_MSG_ERROR,
|
|
||||||
"Failed to get M0 event ret %d\n", r);
|
|
||||||
r = 0;
|
|
||||||
}
|
|
||||||
exit:
|
|
||||||
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
|
||||||
mhi_log(MHI_MSG_INFO, "Exited...\n");
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
|
|
||||||
{
|
|
||||||
|
|
||||||
unsigned long flags;
|
|
||||||
int r = 0, abort_m3 = 0;
|
|
||||||
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"Entered MHI state %s, Pending M0 %d Pending M3 %d\n",
|
|
||||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
|
|
||||||
mhi_dev_ctxt->flags.pending_M0,
|
|
||||||
mhi_dev_ctxt->flags.pending_M3);
|
|
||||||
mutex_lock(&mhi_dev_ctxt->pm_lock);
|
|
||||||
switch (mhi_dev_ctxt->mhi_state) {
|
|
||||||
case MHI_STATE_RESET:
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"MHI in RESET turning link off and quitting\n");
|
|
||||||
mhi_turn_off_pcie_link(mhi_dev_ctxt);
|
|
||||||
r = mhi_set_bus_request(mhi_dev_ctxt, 0);
|
|
||||||
if (r)
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"Failed to set bus freq ret %d\n", r);
|
|
||||||
goto exit;
|
|
||||||
case MHI_STATE_M0:
|
|
||||||
case MHI_STATE_M1:
|
|
||||||
case MHI_STATE_M2:
|
|
||||||
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"Triggering wake out of M2\n");
|
|
||||||
mhi_dev_ctxt->flags.pending_M3 = 1;
|
|
||||||
if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) {
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"M2 transition not set\n");
|
|
||||||
mhi_assert_device_wake(mhi_dev_ctxt);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) {
|
|
||||||
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock,
|
|
||||||
flags);
|
|
||||||
r = wait_event_interruptible_timeout(
|
|
||||||
*mhi_dev_ctxt->mhi_ev_wq.m0_event,
|
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M0,
|
|
||||||
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
|
|
||||||
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
if (0 == r || -ERESTARTSYS == r) {
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"MDM failed to come out of M2.\n");
|
|
||||||
mhi_dev_ctxt->counters.m2_event_timeouts++;
|
|
||||||
r = -EAGAIN;
|
|
||||||
goto unlock;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case MHI_STATE_M3:
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"MHI state %s, link state %d.\n",
|
|
||||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
|
|
||||||
mhi_dev_ctxt->flags.link_up);
|
|
||||||
if (mhi_dev_ctxt->flags.link_up)
|
|
||||||
r = -EAGAIN;
|
|
||||||
else
|
|
||||||
r = 0;
|
|
||||||
goto exit;
|
|
||||||
default:
|
|
||||||
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"MHI state %s, link state %d.\n",
|
|
||||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
|
|
||||||
mhi_dev_ctxt->flags.link_up);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) {
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"There are still %d acks pending from device\n",
|
|
||||||
atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
|
|
||||||
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
|
|
||||||
__pm_relax(&mhi_dev_ctxt->w_lock);
|
|
||||||
abort_m3 = 1;
|
|
||||||
r = -EAGAIN;
|
|
||||||
goto unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) {
|
|
||||||
abort_m3 = 1;
|
|
||||||
r = -EAGAIN;
|
|
||||||
goto unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mhi_dev_ctxt->flags.pending_M0) {
|
|
||||||
r = -EAGAIN;
|
|
||||||
goto unlock;
|
|
||||||
}
|
|
||||||
mhi_dev_ctxt->flags.pending_M3 = 1;
|
|
||||||
|
|
||||||
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
|
|
||||||
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"Waiting for M3 completion.\n");
|
|
||||||
r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
|
|
||||||
mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
|
|
||||||
msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
|
|
||||||
switch (r) {
|
|
||||||
case 0:
|
|
||||||
mhi_log(MHI_MSG_CRITICAL,
|
|
||||||
"MDM failed to suspend after %d ms\n",
|
|
||||||
MHI_MAX_SUSPEND_TIMEOUT);
|
|
||||||
mhi_dev_ctxt->counters.m3_event_timeouts++;
|
|
||||||
mhi_dev_ctxt->flags.pending_M3 = 0;
|
|
||||||
goto exit;
|
|
||||||
default:
|
|
||||||
mhi_log(MHI_MSG_INFO,
|
|
||||||
"M3 completion received\n");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
mhi_turn_off_pcie_link(mhi_dev_ctxt);
|
|
||||||
r = mhi_set_bus_request(mhi_dev_ctxt, 0);
|
|
||||||
if (r)
|
|
||||||
mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r);
|
|
||||||
goto exit;
|
|
||||||
unlock:
|
|
||||||
mhi_dev_ctxt->flags.pending_M3 = 0;
|
|
||||||
if (abort_m3) {
|
|
||||||
atomic_inc(&mhi_dev_ctxt->flags.data_pending);
|
|
||||||
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
ring_all_chan_dbs(mhi_dev_ctxt, false);
|
|
||||||
ring_all_cmd_dbs(mhi_dev_ctxt);
|
|
||||||
atomic_dec(&mhi_dev_ctxt->flags.data_pending);
|
|
||||||
mhi_deassert_device_wake(mhi_dev_ctxt);
|
|
||||||
} else {
|
|
||||||
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
|
|
||||||
}
|
|
||||||
exit:
|
|
||||||
mhi_dev_ctxt->flags.pending_M3 = 0;
|
|
||||||
mutex_unlock(&mhi_dev_ctxt->pm_lock);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
|
@ -21,9 +21,9 @@
|
||||||
enum MHI_DEBUG_LEVEL mhi_msg_lvl = MHI_MSG_ERROR;
|
enum MHI_DEBUG_LEVEL mhi_msg_lvl = MHI_MSG_ERROR;
|
||||||
|
|
||||||
#ifdef CONFIG_MSM_MHI_DEBUG
|
#ifdef CONFIG_MSM_MHI_DEBUG
|
||||||
enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_VERBOSE;
|
enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_VERBOSE;
|
||||||
#else
|
#else
|
||||||
enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_ERROR;
|
enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_ERROR;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
unsigned int mhi_log_override;
|
unsigned int mhi_log_override;
|
||||||
|
@ -58,6 +58,7 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
|
||||||
int valid_chan = 0;
|
int valid_chan = 0;
|
||||||
struct mhi_chan_ctxt *cc_list;
|
struct mhi_chan_ctxt *cc_list;
|
||||||
struct mhi_client_handle *client_handle;
|
struct mhi_client_handle *client_handle;
|
||||||
|
int pkts_queued;
|
||||||
|
|
||||||
if (NULL == mhi_dev_ctxt)
|
if (NULL == mhi_dev_ctxt)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
@ -86,35 +87,37 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
|
||||||
mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
|
mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
|
||||||
&v_wp_index);
|
&v_wp_index);
|
||||||
|
|
||||||
|
pkts_queued = client_handle->chan_info.max_desc -
|
||||||
|
get_nr_avail_ring_elements(&mhi_dev_ctxt->
|
||||||
|
mhi_local_chan_ctxt[*offp]) - 1;
|
||||||
amnt_copied =
|
amnt_copied =
|
||||||
scnprintf(mhi_dev_ctxt->chan_info,
|
scnprintf(mhi_dev_ctxt->chan_info,
|
||||||
MHI_LOG_SIZE,
|
MHI_LOG_SIZE,
|
||||||
"%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d %s %u\n",
|
"%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d %s %u\n",
|
||||||
"chan:",
|
"chan:",
|
||||||
(unsigned int)*offp,
|
(unsigned int)*offp,
|
||||||
"pkts from dev:",
|
"pkts from dev:",
|
||||||
mhi_dev_ctxt->counters.chan_pkts_xferd[*offp],
|
mhi_dev_ctxt->counters.chan_pkts_xferd[*offp],
|
||||||
"state:",
|
"state:",
|
||||||
chan_ctxt->mhi_chan_state,
|
chan_ctxt->chstate,
|
||||||
"p_base:",
|
"p_base:",
|
||||||
chan_ctxt->mhi_trb_ring_base_addr,
|
chan_ctxt->mhi_trb_ring_base_addr,
|
||||||
"v_base:",
|
"v_base:",
|
||||||
mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].base,
|
mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].base,
|
||||||
"v_wp:",
|
"v_wp:",
|
||||||
mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
|
mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
|
||||||
"index:",
|
"index:",
|
||||||
v_wp_index,
|
v_wp_index,
|
||||||
"v_rp:",
|
"v_rp:",
|
||||||
mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].rp,
|
mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].rp,
|
||||||
"index:",
|
"index:",
|
||||||
v_rp_index,
|
v_rp_index,
|
||||||
"pkts_queued",
|
"pkts_queued",
|
||||||
get_nr_avail_ring_elements(
|
pkts_queued,
|
||||||
&mhi_dev_ctxt->mhi_local_chan_ctxt[*offp]),
|
"/",
|
||||||
"/",
|
client_handle->chan_info.max_desc,
|
||||||
client_handle->chan_info.max_desc,
|
"bb_used:",
|
||||||
"bb_used:",
|
mhi_dev_ctxt->counters.bb_used[*offp]);
|
||||||
mhi_dev_ctxt->counters.bb_used[*offp]);
|
|
||||||
|
|
||||||
*offp += 1;
|
*offp += 1;
|
||||||
|
|
||||||
|
@ -236,35 +239,37 @@ static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf,
|
||||||
msleep(100);
|
msleep(100);
|
||||||
amnt_copied =
|
amnt_copied =
|
||||||
scnprintf(mhi_dev_ctxt->chan_info,
|
scnprintf(mhi_dev_ctxt->chan_info,
|
||||||
MHI_LOG_SIZE,
|
MHI_LOG_SIZE,
|
||||||
"%s %s %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d\n",
|
"%s %s %s 0x%02x %s %u %s %u %s %u %s %u %s %u %s %u %s %d %s %d %s %d\n",
|
||||||
"Our State:",
|
"MHI State:",
|
||||||
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
|
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
|
||||||
"M0->M1:",
|
"PM State:",
|
||||||
mhi_dev_ctxt->counters.m0_m1,
|
mhi_dev_ctxt->mhi_pm_state,
|
||||||
"M0<-M1:",
|
"M0->M1:",
|
||||||
mhi_dev_ctxt->counters.m1_m0,
|
mhi_dev_ctxt->counters.m0_m1,
|
||||||
"M1->M2:",
|
"M1->M2:",
|
||||||
mhi_dev_ctxt->counters.m1_m2,
|
mhi_dev_ctxt->counters.m1_m2,
|
||||||
"M0<-M2:",
|
"M2->M0:",
|
||||||
mhi_dev_ctxt->counters.m2_m0,
|
mhi_dev_ctxt->counters.m2_m0,
|
||||||
"M0->M3:",
|
"M0->M3:",
|
||||||
mhi_dev_ctxt->counters.m0_m3,
|
mhi_dev_ctxt->counters.m0_m3,
|
||||||
"M0<-M3:",
|
"M1->M3:",
|
||||||
mhi_dev_ctxt->counters.m3_m0,
|
mhi_dev_ctxt->counters.m1_m3,
|
||||||
"M3_ev_TO:",
|
"M3->M0:",
|
||||||
mhi_dev_ctxt->counters.m3_event_timeouts,
|
mhi_dev_ctxt->counters.m3_m0,
|
||||||
"M0_ev_TO:",
|
"device_wake:",
|
||||||
mhi_dev_ctxt->counters.m0_event_timeouts,
|
atomic_read(&mhi_dev_ctxt->counters.device_wake),
|
||||||
"outstanding_acks:",
|
"usage_count:",
|
||||||
atomic_read(&mhi_dev_ctxt->counters.outbound_acks),
|
atomic_read(&mhi_dev_ctxt->dev_info->pcie_device->dev.
|
||||||
"LPM:",
|
power.usage_count),
|
||||||
mhi_dev_ctxt->enable_lpm);
|
"outbound_acks:",
|
||||||
|
atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
|
||||||
if (amnt_copied < count)
|
if (amnt_copied < count)
|
||||||
return amnt_copied - copy_to_user(buf,
|
return amnt_copied - copy_to_user(buf,
|
||||||
mhi_dev_ctxt->chan_info, amnt_copied);
|
mhi_dev_ctxt->chan_info, amnt_copied);
|
||||||
else
|
else
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct file_operations mhi_dbgfs_state_fops = {
|
static const struct file_operations mhi_dbgfs_state_fops = {
|
||||||
|
|
|
@ -215,7 +215,7 @@ int mhi_get_max_desc(struct mhi_client_handle *client_handle);
|
||||||
/* RmNET Reserved APIs, This APIs are reserved for use by the linux network
|
/* RmNET Reserved APIs, This APIs are reserved for use by the linux network
|
||||||
* stack only. Use by other clients will introduce system wide issues
|
* stack only. Use by other clients will introduce system wide issues
|
||||||
*/
|
*/
|
||||||
int mhi_set_lpm(struct mhi_client_handle *client_handle, int enable_lpm);
|
int mhi_set_lpm(struct mhi_client_handle *client_handle, bool enable_lpm);
|
||||||
int mhi_get_epid(struct mhi_client_handle *mhi_handle);
|
int mhi_get_epid(struct mhi_client_handle *mhi_handle);
|
||||||
struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle);
|
struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle);
|
||||||
void mhi_mask_irq(struct mhi_client_handle *client_handle);
|
void mhi_mask_irq(struct mhi_client_handle *client_handle);
|
||||||
|
|
Loading…
Add table
Reference in a new issue