diff --git a/drivers/platform/msm/mhi/mhi.h b/drivers/platform/msm/mhi/mhi.h index 6919a97116ab..6165d30c47a0 100644 --- a/drivers/platform/msm/mhi/mhi.h +++ b/drivers/platform/msm/mhi/mhi.h @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -29,6 +30,7 @@ #include extern struct mhi_pcie_devices mhi_devices; +struct mhi_device_ctxt; enum MHI_DEBUG_LEVEL { MHI_MSG_RAW = 0x1, @@ -125,6 +127,31 @@ enum MHI_STATE { MHI_STATE_reserved = 0x80000000 }; +enum MHI_BRSTMODE { + /* BRST Mode Enable for HW Channels, SW Channel Disabled */ + MHI_BRSTMODE_DEFAULT = 0x0, + MHI_BRSTMODE_RESERVED = 0x1, + MHI_BRSTMODE_DISABLE = 0x2, + MHI_BRSTMODE_ENABLE = 0x3 +}; + +enum MHI_PM_STATE { + MHI_PM_DISABLE = 0x0, /* MHI is not enabled */ + MHI_PM_POR = 0x1, /* Power On Reset State */ + MHI_PM_M0 = 0x2, + MHI_PM_M1 = 0x4, + MHI_PM_M1_M2_TRANSITION = 0x8, /* Register access not allowed */ + MHI_PM_M2 = 0x10, + MHI_PM_M3_ENTER = 0x20, + MHI_PM_M3 = 0x40, + MHI_PM_M3_EXIT = 0x80, +}; + +#define MHI_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | MHI_PM_M1)) +#define MHI_WAKE_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ + MHI_PM_M1 | MHI_PM_M2)) +#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state > MHI_PM_DISABLE) && \ + (pm_state < MHI_PM_M3_EXIT)) struct __packed mhi_event_ctxt { u32 mhi_intmodt; u32 mhi_event_er_type; @@ -136,8 +163,11 @@ struct __packed mhi_event_ctxt { }; struct __packed mhi_chan_ctxt { - enum MHI_CHAN_STATE mhi_chan_state; - enum MHI_CHAN_DIR mhi_chan_type; + u32 chstate : 8; + u32 brstmode : 2; + u32 pollcfg : 6; + u32 reserved : 16; + u32 chtype; u32 mhi_event_ring_index; u64 mhi_trb_ring_base_addr; u64 mhi_trb_ring_len; @@ -172,7 +202,6 @@ enum MHI_PKT_TYPE { MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, MHI_PKT_TYPE_START_CHAN_CMD = 0x12, - MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD = 0x1F, MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, MHI_PKT_TYPE_TX_EVENT = 0x22, @@ -265,6 +294,11 @@ struct db_mode { /* if set do not reset DB_Mode during M0 resume */ u32 preserve_db_state : 1; u32 db_mode : 1; + enum MHI_BRSTMODE brstmode; + void (*process_db)(struct mhi_device_ctxt *mhi_dev_ctxt, + void __iomem *io_addr, + uintptr_t chan, + u32 val); }; struct mhi_ring { @@ -279,6 +313,7 @@ struct mhi_ring { struct db_mode db_mode; u32 msi_disable_cntr; u32 msi_enable_cntr; + spinlock_t ring_lock; }; enum MHI_CMD_STATUS { @@ -336,12 +371,19 @@ struct mhi_chan_info { u32 flags; }; +struct mhi_chan_cfg { + enum MHI_COMMAND current_cmd; + struct mutex chan_lock; + spinlock_t event_lock; /* completion event lock */ + struct completion cmd_complete; + struct mhi_cmd_complete_event_pkt cmd_event_pkt; + union mhi_cmd_pkt cmd_pkt; +}; + struct mhi_client_handle { struct mhi_chan_info chan_info; struct mhi_device_ctxt *mhi_dev_ctxt; struct mhi_client_info_t client_info; - struct completion chan_reset_complete; - struct completion chan_open_complete; void *user_data; struct mhi_result result; u32 device_index; @@ -378,43 +420,27 @@ struct mhi_buf_info { struct mhi_counters { u32 m0_m1; - u32 m1_m0; u32 m1_m2; u32 m2_m0; u32 m0_m3; - u32 m3_m0; u32 m1_m3; - u32 mhi_reset_cntr; - u32 mhi_ready_cntr; - u32 m3_event_timeouts; - u32 m0_event_timeouts; - u32 m2_event_timeouts; - u32 nr_irq_migrations; - u32 *msi_counter; - u32 *ev_counter; - atomic_t outbound_acks; + u32 m3_m0; u32 chan_pkts_xferd[MHI_MAX_CHANNELS]; u32 bb_used[MHI_MAX_CHANNELS]; + atomic_t device_wake; + atomic_t outbound_acks; + atomic_t events_pending; + u32 *msi_counter; + u32 mhi_reset_cntr; }; struct mhi_flags { u32 mhi_initialized; - u32 pending_M3; - u32 pending_M0; u32 link_up; - u32 kill_threads; - atomic_t data_pending; - atomic_t events_pending; - atomic_t pending_resume; - atomic_t pending_ssr; - atomic_t pending_powerup; - atomic_t m2_transition; int stop_threads; - atomic_t device_wake; - u32 ssr; + u32 kill_threads; u32 ev_thread_stopped; u32 st_thread_stopped; - u32 uldl_enabled; }; struct mhi_wait_queues { @@ -458,44 +484,35 @@ struct mhi_dev_space { }; struct mhi_device_ctxt { - enum MHI_STATE mhi_state; + enum MHI_PM_STATE mhi_pm_state; /* Host driver state */ + enum MHI_STATE mhi_state; /* protocol state */ enum MHI_EXEC_ENV dev_exec_env; struct mhi_dev_space dev_space; struct mhi_pcie_dev_info *dev_info; struct pcie_core_info *dev_props; struct mhi_ring chan_bb_list[MHI_MAX_CHANNELS]; - struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS]; struct mhi_ring *mhi_local_event_ctxt; struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS]; + struct mhi_chan_cfg mhi_chan_cfg[MHI_MAX_CHANNELS]; + - struct mutex *mhi_chan_mutex; - struct mutex mhi_link_state; - spinlock_t *mhi_ev_spinlock_list; - struct mutex *mhi_cmd_mutex_list; struct mhi_client_handle *client_handle_list[MHI_MAX_CHANNELS]; struct mhi_event_ring_cfg *ev_ring_props; struct task_struct *event_thread_handle; struct task_struct *st_thread_handle; + struct tasklet_struct ev_task; /* Process control Events */ + struct work_struct process_m1_worker; struct mhi_wait_queues mhi_ev_wq; struct dev_mmio_info mmio_info; - u32 mhi_chan_db_order[MHI_MAX_CHANNELS]; - u32 mhi_ev_db_order[MHI_MAX_CHANNELS]; - spinlock_t *db_write_lock; - struct mhi_state_work_queue state_change_work_item_list; - enum MHI_CMD_STATUS mhi_chan_pend_cmd_ack[MHI_MAX_CHANNELS]; - u32 cmd_ring_order; struct mhi_counters counters; struct mhi_flags flags; - u32 device_wake_asserted; - - rwlock_t xfer_lock; struct hrtimer m1_timer; ktime_t m1_timeout; @@ -508,11 +525,12 @@ struct mhi_device_ctxt { unsigned long esoc_notif; enum STATE_TRANSITION base_state; - atomic_t outbound_acks; + + rwlock_t pm_xfer_lock; /* lock to control PM State */ + spinlock_t dev_wake_lock; /* lock to set wake bit */ struct mutex pm_lock; struct wakeup_source w_lock; - int enable_lpm; char *chan_info; struct dentry *mhi_parent_folder; }; @@ -578,7 +596,8 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list, u32 event_ring, struct mhi_ring *ring, enum MHI_CHAN_STATE chan_state, - bool preserve_db_state); + bool preserve_db_state, + enum MHI_BRSTMODE brstmode); int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt); int mhi_get_event_ring_for_channel(struct mhi_device_ctxt *mhi_dev_ctxt, u32 chan); @@ -623,8 +642,9 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt, enum MHI_CB_REASON reason); void mhi_notify_client(struct mhi_client_handle *client_handle, enum MHI_CB_REASON reason); -int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt); -int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt); +void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt); +void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt, + bool force_set); int mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt); int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action, void *hcpu); @@ -636,6 +656,14 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt); int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt, int index); int start_chan_sync(struct mhi_client_handle *client_handle); +void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt, + void __iomem *io_addr, + uintptr_t chan, + u32 val); +void mhi_process_db_brstmode_disable(struct mhi_device_ctxt *mhi_dev_ctxt, + void __iomem *io_addr, + uintptr_t chan, + u32 val); void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt, void __iomem *io_addr, uintptr_t io_offset, u32 val); void mhi_reg_write_field(struct mhi_device_ctxt *mhi_dev_ctxt, @@ -652,12 +680,19 @@ int mhi_runtime_suspend(struct device *dev); int get_chan_props(struct mhi_device_ctxt *mhi_dev_ctxt, int chan, struct mhi_chan_info *chan_info); int mhi_runtime_resume(struct device *dev); -int mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt); +int mhi_runtime_idle(struct device *dev); int init_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt, enum MHI_TYPE_EVENT_RING type); void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt, int index); void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt); int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt); +enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt); +void process_m1_transition(struct work_struct *work); +int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev); +void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt, + enum MHI_STATE new_state); +const char *state_transition_str(enum STATE_TRANSITION state); +void mhi_ctrl_ev_task(unsigned long data); #endif diff --git a/drivers/platform/msm/mhi/mhi_bhi.c b/drivers/platform/msm/mhi/mhi_bhi.c index 4b63e880a44f..113791a62c38 100644 --- a/drivers/platform/msm/mhi/mhi_bhi.c +++ b/drivers/platform/msm/mhi/mhi_bhi.c @@ -41,6 +41,9 @@ static ssize_t bhi_write(struct file *file, size_t amount_copied = 0; uintptr_t align_len = 0x1000; u32 tx_db_val = 0; + rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock; + const long bhi_timeout_ms = 1000; + long timeout; if (buf == NULL || 0 == count) return -EIO; @@ -48,8 +51,12 @@ static ssize_t bhi_write(struct file *file, if (count > BHI_MAX_IMAGE_SIZE) return -ENOMEM; - wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event, - mhi_dev_ctxt->mhi_state == MHI_STATE_BHI); + timeout = wait_event_interruptible_timeout( + *mhi_dev_ctxt->mhi_ev_wq.bhi_event, + mhi_dev_ctxt->mhi_state == MHI_STATE_BHI, + msecs_to_jiffies(bhi_timeout_ms)); + if (timeout <= 0 && mhi_dev_ctxt->mhi_state != MHI_STATE_BHI) + return -EIO; mhi_log(MHI_MSG_INFO, "Entered. User Image size 0x%zx\n", count); @@ -95,6 +102,11 @@ static ssize_t bhi_write(struct file *file, bhi_ctxt->image_size = count; /* Write the image size */ + read_lock_bh(pm_xfer_lock); + if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) { + read_unlock_bh(pm_xfer_lock); + goto bhi_copy_error; + } pcie_word_val = HIGH_WORD(bhi_ctxt->phy_image_loc); mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_IMGADDR_HIGH, @@ -119,10 +131,15 @@ static ssize_t bhi_write(struct file *file, BHI_IMGTXDB, 0xFFFFFFFF, 0, ++pcie_word_val); mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_INTVEC, 0); - + read_unlock_bh(pm_xfer_lock); for (i = 0; i < BHI_POLL_NR_RETRIES; ++i) { u32 err = 0, errdbg1 = 0, errdbg2 = 0, errdbg3 = 0; + read_lock_bh(pm_xfer_lock); + if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) { + read_unlock_bh(pm_xfer_lock); + goto bhi_copy_error; + } err = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRCODE); errdbg1 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG1); errdbg2 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG2); @@ -131,6 +148,7 @@ static ssize_t bhi_write(struct file *file, BHI_STATUS, BHI_STATUS_MASK, BHI_STATUS_SHIFT); + read_unlock_bh(pm_xfer_lock); mhi_log(MHI_MSG_CRITICAL, "BHI STATUS 0x%x, err:0x%x errdbg1:0x%x errdbg2:0x%x errdbg3:0x%x\n", tx_db_val, err, errdbg1, errdbg2, errdbg3); @@ -176,9 +194,6 @@ int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device) || 0 == mhi_pcie_device->core.bar0_end) return -EIO; - mhi_log(MHI_MSG_INFO, - "Successfully registered char dev. bhi base is: 0x%p.\n", - bhi_ctxt->bhi_base); ret_val = alloc_chrdev_region(&bhi_ctxt->bhi_dev, 0, 1, "bhi"); if (IS_ERR_VALUE(ret_val)) { mhi_log(MHI_MSG_CRITICAL, diff --git a/drivers/platform/msm/mhi/mhi_event.c b/drivers/platform/msm/mhi/mhi_event.c index aa8500d277cd..784306256d36 100644 --- a/drivers/platform/msm/mhi/mhi_event.c +++ b/drivers/platform/msm/mhi/mhi_event.c @@ -89,32 +89,31 @@ dt_error: int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt) { int r = 0; + int i; mhi_dev_ctxt->mhi_local_event_ctxt = kzalloc(sizeof(struct mhi_ring)* mhi_dev_ctxt->mmio_info.nr_event_rings, GFP_KERNEL); - if (!mhi_dev_ctxt->mhi_local_event_ctxt) return -ENOMEM; - mhi_dev_ctxt->counters.ev_counter = kzalloc(sizeof(u32) * - mhi_dev_ctxt->mmio_info.nr_event_rings, - GFP_KERNEL); - if (!mhi_dev_ctxt->counters.ev_counter) { - r = -ENOMEM; - goto free_local_ec_list; - } mhi_dev_ctxt->counters.msi_counter = kzalloc(sizeof(u32) * mhi_dev_ctxt->mmio_info.nr_event_rings, GFP_KERNEL); if (!mhi_dev_ctxt->counters.msi_counter) { r = -ENOMEM; - goto free_ev_counter; + goto free_local_ec_list; } + + for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; i++) { + struct mhi_ring *mhi_ring = &mhi_dev_ctxt-> + mhi_local_event_ctxt[i]; + + spin_lock_init(&mhi_ring->ring_lock); + } + return r; -free_ev_counter: - kfree(mhi_dev_ctxt->counters.ev_counter); free_local_ec_list: kfree(mhi_dev_ctxt->mhi_local_event_ctxt); return r; @@ -129,13 +128,18 @@ void ring_ev_db(struct mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index) db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING, event_ring_index, (uintptr_t) event_ctxt->wp); - mhi_process_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.event_db_addr, - event_ring_index, db_value); + event_ctxt->db_mode.process_db(mhi_dev_ctxt, + mhi_dev_ctxt->mmio_info.event_db_addr, + event_ring_index, + db_value); } static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list, - struct mhi_ring *ring, u32 el_per_ring, - u32 intmodt_val, u32 msi_vec) + struct mhi_ring *ring, + u32 el_per_ring, + u32 intmodt_val, + u32 msi_vec, + enum MHI_BRSTMODE brstmode) { ev_list->mhi_event_er_type = MHI_EVENT_RING_TYPE_VALID; ev_list->mhi_msi_vector = msi_vec; @@ -144,6 +148,20 @@ static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list, ring->len = ((size_t)(el_per_ring)*sizeof(union mhi_event_pkt)); ring->el_size = sizeof(union mhi_event_pkt); ring->overwrite_en = 0; + + ring->db_mode.db_mode = 1; + ring->db_mode.brstmode = brstmode; + switch (ring->db_mode.brstmode) { + case MHI_BRSTMODE_ENABLE: + ring->db_mode.process_db = mhi_process_db_brstmode; + break; + case MHI_BRSTMODE_DISABLE: + ring->db_mode.process_db = mhi_process_db_brstmode_disable; + break; + default: + ring->db_mode.process_db = mhi_process_db; + } + /* Flush writes to MMIO */ wmb(); return 0; @@ -159,9 +177,12 @@ void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt) event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i]; mhi_local_event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[i]; mhi_event_ring_init(event_ctxt, mhi_local_event_ctxt, - mhi_dev_ctxt->ev_ring_props[i].nr_desc, - mhi_dev_ctxt->ev_ring_props[i].intmod, - mhi_dev_ctxt->ev_ring_props[i].msi_vec); + mhi_dev_ctxt->ev_ring_props[i].nr_desc, + mhi_dev_ctxt->ev_ring_props[i].intmod, + mhi_dev_ctxt->ev_ring_props[i].msi_vec, + GET_EV_PROPS(EV_BRSTMODE, + mhi_dev_ctxt-> + ev_ring_props[i].flags)); } } @@ -219,10 +240,9 @@ int mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt, u32 i = 0; unsigned long flags = 0; int ret_val = 0; - spinlock_t *lock = - &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index]; struct mhi_ring *event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[ring_index]; + spinlock_t *lock = &event_ctxt->ring_lock; if (NULL == mhi_dev_ctxt || 0 == nr_ev_el) { mhi_log(MHI_MSG_ERROR, "Bad Input data, quitting\n"); diff --git a/drivers/platform/msm/mhi/mhi_iface.c b/drivers/platform/msm/mhi/mhi_iface.c index b3fff19d8fa4..395e19c91f35 100644 --- a/drivers/platform/msm/mhi/mhi_iface.c +++ b/drivers/platform/msm/mhi/mhi_iface.c @@ -96,22 +96,6 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev) "Failed to register with esoc ret %d.\n", ret_val); } - mhi_pcie_dev->mhi_ctxt.bus_scale_table = - msm_bus_cl_get_pdata(mhi_pcie_dev->plat_dev); - mhi_pcie_dev->mhi_ctxt.bus_client = - msm_bus_scale_register_client( - mhi_pcie_dev->mhi_ctxt.bus_scale_table); - if (!mhi_pcie_dev->mhi_ctxt.bus_client) { - mhi_log(MHI_MSG_CRITICAL, - "Could not register for bus control ret: %d.\n", - mhi_pcie_dev->mhi_ctxt.bus_client); - } else { - ret_val = mhi_set_bus_request(&mhi_pcie_dev->mhi_ctxt, 1); - if (ret_val) - mhi_log(MHI_MSG_CRITICAL, - "Could not set bus frequency ret: %d\n", - ret_val); - } device_disable_async_suspend(&pcie_device->dev); ret_val = pci_enable_msi_range(pcie_device, 1, requested_msi_number); @@ -188,9 +172,7 @@ mhi_state_transition_error: mhi_dev_ctxt->dev_space.dev_mem_len, mhi_dev_ctxt->dev_space.dev_mem_start, mhi_dev_ctxt->dev_space.dma_dev_mem_start); - kfree(mhi_dev_ctxt->mhi_cmd_mutex_list); - kfree(mhi_dev_ctxt->mhi_chan_mutex); - kfree(mhi_dev_ctxt->mhi_ev_spinlock_list); + kfree(mhi_dev_ctxt->ev_ring_props); mhi_rem_pm_sysfs(&pcie_device->dev); sysfs_config_err: @@ -203,7 +185,9 @@ msi_config_err: } static const struct dev_pm_ops pm_ops = { - SET_RUNTIME_PM_OPS(mhi_runtime_suspend, mhi_runtime_resume, NULL) + SET_RUNTIME_PM_OPS(mhi_runtime_suspend, + mhi_runtime_resume, + mhi_runtime_idle) SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume) }; @@ -217,14 +201,15 @@ static struct pci_driver mhi_pcie_driver = { }; static int mhi_pci_probe(struct pci_dev *pcie_device, - const struct pci_device_id *mhi_device_id) + const struct pci_device_id *mhi_device_id) { int ret_val = 0; struct mhi_pcie_dev_info *mhi_pcie_dev = NULL; struct platform_device *plat_dev; + struct mhi_device_ctxt *mhi_dev_ctxt; u32 nr_dev = mhi_devices.nr_of_devices; - mhi_log(MHI_MSG_INFO, "Entering.\n"); + mhi_log(MHI_MSG_INFO, "Entering\n"); mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices]; if (mhi_devices.nr_of_devices + 1 > MHI_MAX_SUPPORTED_DEVICES) { mhi_log(MHI_MSG_ERROR, "Error: Too many devices\n"); @@ -234,29 +219,120 @@ static int mhi_pci_probe(struct pci_dev *pcie_device, mhi_devices.nr_of_devices++; plat_dev = mhi_devices.device_list[nr_dev].plat_dev; pcie_device->dev.of_node = plat_dev->dev.of_node; - pm_runtime_put_noidle(&pcie_device->dev); + mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt; + mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE; + INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition); + mutex_init(&mhi_dev_ctxt->pm_lock); + rwlock_init(&mhi_dev_ctxt->pm_xfer_lock); + spin_lock_init(&mhi_dev_ctxt->dev_wake_lock); + tasklet_init(&mhi_dev_ctxt->ev_task, + mhi_ctrl_ev_task, + (unsigned long)mhi_dev_ctxt); + + mhi_dev_ctxt->flags.link_up = 1; + ret_val = mhi_set_bus_request(mhi_dev_ctxt, 1); mhi_pcie_dev->pcie_device = pcie_device; mhi_pcie_dev->mhi_pcie_driver = &mhi_pcie_driver; mhi_pcie_dev->mhi_pci_link_event.events = - (MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_LINKUP | - MSM_PCIE_EVENT_WAKEUP); + (MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_WAKEUP); mhi_pcie_dev->mhi_pci_link_event.user = pcie_device; mhi_pcie_dev->mhi_pci_link_event.callback = mhi_link_state_cb; mhi_pcie_dev->mhi_pci_link_event.notify.data = mhi_pcie_dev; ret_val = msm_pcie_register_event(&mhi_pcie_dev->mhi_pci_link_event); - if (ret_val) + if (ret_val) { mhi_log(MHI_MSG_ERROR, "Failed to register for link notifications %d.\n", ret_val); + return ret_val; + } + + /* Initialize MHI CNTXT */ + ret_val = mhi_ctxt_init(mhi_pcie_dev); + if (ret_val) { + mhi_log(MHI_MSG_ERROR, + "MHI Initialization failed, ret %d\n", + ret_val); + goto deregister_pcie; + } + pci_set_master(mhi_pcie_dev->pcie_device); + + mutex_lock(&mhi_dev_ctxt->pm_lock); + write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock); + mhi_dev_ctxt->mhi_pm_state = MHI_PM_POR; + ret_val = set_mhi_base_state(mhi_pcie_dev); + write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock); + if (ret_val) { + mhi_log(MHI_MSG_ERROR, + "Error Setting MHI Base State %d\n", ret_val); + goto unlock_pm_lock; + } + + if (mhi_dev_ctxt->base_state == STATE_TRANSITION_BHI) { + ret_val = bhi_probe(mhi_pcie_dev); + if (ret_val) { + mhi_log(MHI_MSG_ERROR, + "Error with bhi_probe ret:%d", ret_val); + goto unlock_pm_lock; + } + } + + init_mhi_base_state(mhi_dev_ctxt); + + pm_runtime_set_autosuspend_delay(&pcie_device->dev, + MHI_RPM_AUTOSUSPEND_TMR_VAL_MS); + pm_runtime_use_autosuspend(&pcie_device->dev); + pm_suspend_ignore_children(&pcie_device->dev, true); + + /* + * pci framework will increment usage count (twice) before + * calling local device driver probe function. + * 1st pci.c pci_pm_init() calls pm_runtime_forbid + * 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync + * Framework expect pci device driver to call pm_runtime_put_noidle + * to decrement usage count after successful probe and + * and call pm_runtime_allow to enable runtime suspend. + * MHI will allow runtime after entering AMSS state. + */ + pm_runtime_mark_last_busy(&pcie_device->dev); + pm_runtime_put_noidle(&pcie_device->dev); + + /* + * Keep the MHI state in Active (M0) state until AMSS because EP + * would error fatal if we try to enter M1 before entering + * AMSS state. + */ + read_lock_irq(&mhi_dev_ctxt->pm_xfer_lock); + mhi_assert_device_wake(mhi_dev_ctxt, false); + read_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock); + + mutex_unlock(&mhi_dev_ctxt->pm_lock); + + return 0; + +unlock_pm_lock: + mutex_unlock(&mhi_dev_ctxt->pm_lock); +deregister_pcie: + msm_pcie_deregister_event(&mhi_pcie_dev->mhi_pci_link_event); return ret_val; } static int mhi_plat_probe(struct platform_device *pdev) { u32 nr_dev = mhi_devices.nr_of_devices; + struct mhi_device_ctxt *mhi_dev_ctxt; int r = 0; mhi_log(MHI_MSG_INFO, "Entered\n"); + mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt; + + mhi_dev_ctxt->bus_scale_table = msm_bus_cl_get_pdata(pdev); + if (!mhi_dev_ctxt->bus_scale_table) + return -ENODATA; + mhi_dev_ctxt->bus_client = msm_bus_scale_register_client + (mhi_dev_ctxt->bus_scale_table); + if (!mhi_dev_ctxt->bus_client) + return -EINVAL; + mhi_devices.device_list[nr_dev].plat_dev = pdev; r = dma_set_mask(&pdev->dev, MHI_DMA_MASK); if (r) diff --git a/drivers/platform/msm/mhi/mhi_init.c b/drivers/platform/msm/mhi/mhi_init.c index 2982bd0831c7..52afc46be6d0 100644 --- a/drivers/platform/msm/mhi/mhi_init.c +++ b/drivers/platform/msm/mhi/mhi_init.c @@ -27,46 +27,21 @@ static int mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt) { int i; - mhi_dev_ctxt->mhi_ev_spinlock_list = kmalloc(sizeof(spinlock_t) * - mhi_dev_ctxt->mmio_info.nr_event_rings, - GFP_KERNEL); - if (NULL == mhi_dev_ctxt->mhi_ev_spinlock_list) - goto ev_mutex_free; - mhi_dev_ctxt->mhi_chan_mutex = kmalloc(sizeof(struct mutex) * - MHI_MAX_CHANNELS, GFP_KERNEL); - if (NULL == mhi_dev_ctxt->mhi_chan_mutex) - goto chan_mutex_free; - mhi_dev_ctxt->mhi_cmd_mutex_list = kmalloc(sizeof(struct mutex) * - NR_OF_CMD_RINGS, GFP_KERNEL); - if (NULL == mhi_dev_ctxt->mhi_cmd_mutex_list) - goto cmd_mutex_free; + for (i = 0; i < MHI_MAX_CHANNELS; ++i) { + struct mhi_ring *ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[i]; + + mutex_init(&mhi_dev_ctxt->mhi_chan_cfg[i].chan_lock); + spin_lock_init(&mhi_dev_ctxt->mhi_chan_cfg[i].event_lock); + spin_lock_init(&ring->ring_lock); + } + + for (i = 0; i < NR_OF_CMD_RINGS; i++) { + struct mhi_ring *ring = &mhi_dev_ctxt->mhi_local_cmd_ctxt[i]; + + spin_lock_init(&ring->ring_lock); + } - mhi_dev_ctxt->db_write_lock = kmalloc(sizeof(spinlock_t) * - MHI_MAX_CHANNELS, GFP_KERNEL); - if (NULL == mhi_dev_ctxt->db_write_lock) - goto db_write_lock_free; - for (i = 0; i < MHI_MAX_CHANNELS; ++i) - mutex_init(&mhi_dev_ctxt->mhi_chan_mutex[i]); - for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) - spin_lock_init(&mhi_dev_ctxt->mhi_ev_spinlock_list[i]); - for (i = 0; i < NR_OF_CMD_RINGS; ++i) - mutex_init(&mhi_dev_ctxt->mhi_cmd_mutex_list[i]); - for (i = 0; i < MHI_MAX_CHANNELS; ++i) - spin_lock_init(&mhi_dev_ctxt->db_write_lock[i]); - rwlock_init(&mhi_dev_ctxt->xfer_lock); - mutex_init(&mhi_dev_ctxt->mhi_link_state); - mutex_init(&mhi_dev_ctxt->pm_lock); - atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0); return 0; - -db_write_lock_free: - kfree(mhi_dev_ctxt->mhi_cmd_mutex_list); -cmd_mutex_free: - kfree(mhi_dev_ctxt->mhi_chan_mutex); -chan_mutex_free: - kfree(mhi_dev_ctxt->mhi_ev_spinlock_list); -ev_mutex_free: - return -ENOMEM; } size_t calculate_mhi_space(struct mhi_device_ctxt *mhi_dev_ctxt) @@ -115,7 +90,7 @@ void init_dev_chan_ctxt(struct mhi_chan_ctxt *chan_ctxt, chan_ctxt->mhi_trb_write_ptr = p_base_addr; chan_ctxt->mhi_trb_ring_len = len; /* Prepulate the channel ctxt */ - chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED; + chan_ctxt->chstate = MHI_CHAN_STATE_ENABLED; chan_ctxt->mhi_event_ring_index = ev_index; } @@ -173,6 +148,8 @@ static int mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt, ring[PRIMARY_CMD_RING].len = ring_size; ring[PRIMARY_CMD_RING].el_size = sizeof(union mhi_cmd_pkt); ring[PRIMARY_CMD_RING].overwrite_en = 0; + ring[PRIMARY_CMD_RING].db_mode.process_db = + mhi_process_db_brstmode_disable; return 0; } @@ -547,7 +524,6 @@ int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info, } init_event_ctxt_array(mhi_dev_ctxt); mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; - mhi_dev_ctxt->enable_lpm = 1; r = mhi_spawn_threads(mhi_dev_ctxt); if (r) { @@ -573,9 +549,6 @@ error_wq_init: mhi_dev_ctxt->dev_space.dma_dev_mem_start); error_during_dev_mem_init: error_during_local_ev_ctxt: - kfree(mhi_dev_ctxt->mhi_cmd_mutex_list); - kfree(mhi_dev_ctxt->mhi_chan_mutex); - kfree(mhi_dev_ctxt->mhi_ev_spinlock_list); error_during_sync: kfree(mhi_dev_ctxt->ev_ring_props); error_during_props: @@ -601,10 +574,12 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list, u64 el_per_ring, enum MHI_CHAN_DIR chan_type, u32 event_ring, struct mhi_ring *ring, enum MHI_CHAN_STATE chan_state, - bool preserve_db_state) + bool preserve_db_state, + enum MHI_BRSTMODE brstmode) { - cc_list->mhi_chan_state = chan_state; - cc_list->mhi_chan_type = chan_type; + cc_list->brstmode = brstmode; + cc_list->chstate = chan_state; + cc_list->chtype = chan_type; cc_list->mhi_event_ring_index = event_ring; cc_list->mhi_trb_ring_base_addr = trb_list_phy; cc_list->mhi_trb_ring_len = @@ -621,6 +596,19 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list, ring->dir = chan_type; ring->db_mode.db_mode = 1; ring->db_mode.preserve_db_state = (preserve_db_state) ? 1 : 0; + ring->db_mode.brstmode = brstmode; + + switch (ring->db_mode.brstmode) { + case MHI_BRSTMODE_ENABLE: + ring->db_mode.process_db = mhi_process_db_brstmode; + break; + case MHI_BRSTMODE_DISABLE: + ring->db_mode.process_db = mhi_process_db_brstmode_disable; + break; + default: + ring->db_mode.process_db = mhi_process_db; + } + /* Flush writes to MMIO */ wmb(); return 0; diff --git a/drivers/platform/msm/mhi/mhi_isr.c b/drivers/platform/msm/mhi/mhi_isr.c index 04778a360289..7a4c560d2f42 100644 --- a/drivers/platform/msm/mhi/mhi_isr.c +++ b/drivers/platform/msm/mhi/mhi_isr.c @@ -15,57 +15,6 @@ #include "mhi_sys.h" #include "mhi_trace.h" -irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id) -{ - struct device *mhi_device = dev_id; - struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data; - - if (!mhi_dev_ctxt) { - mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n"); - return IRQ_HANDLED; - } - mhi_dev_ctxt->counters.msi_counter[ - IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++; - mhi_log(MHI_MSG_VERBOSE, - "Got MSI 0x%x\n", IRQ_TO_MSI(mhi_dev_ctxt, irq_number)); - trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number)); - atomic_inc(&mhi_dev_ctxt->flags.events_pending); - wake_up_interruptible( - mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq); - return IRQ_HANDLED; -} - -irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id) -{ - struct device *mhi_device = dev_id; - u32 client_index; - struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data; - struct mhi_client_handle *client_handle; - struct mhi_client_info_t *client_info; - struct mhi_cb_info cb_info; - int msi_num = (IRQ_TO_MSI(mhi_dev_ctxt, irq_number)); - - mhi_dev_ctxt->counters.msi_counter[msi_num]++; - mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi_num); - trace_mhi_msi(msi_num); - client_index = MHI_MAX_CHANNELS - - (mhi_dev_ctxt->mmio_info.nr_event_rings - msi_num); - client_handle = mhi_dev_ctxt->client_handle_list[client_index]; - client_info = &client_handle->client_info; - if (likely(NULL != client_handle)) { - client_handle->result.user_data = - client_handle->user_data; - if (likely(NULL != &client_info->mhi_client_cb)) { - cb_info.result = &client_handle->result; - cb_info.cb_reason = MHI_CB_XFER; - cb_info.chan = client_handle->chan_info.chan_nr; - cb_info.result->transaction_status = 0; - client_info->mhi_client_cb(&cb_info); - } - } - return IRQ_HANDLED; -} - static int mhi_process_event_ring( struct mhi_device_ctxt *mhi_dev_ctxt, u32 ev_index, @@ -76,12 +25,17 @@ static int mhi_process_event_ring( union mhi_event_pkt event_to_process; int ret_val = 0; struct mhi_event_ctxt *ev_ctxt = NULL; - union mhi_cmd_pkt *cmd_pkt = NULL; - union mhi_event_pkt *ev_ptr = NULL; struct mhi_ring *local_ev_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[ev_index]; - u32 event_code; + read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock); + if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) { + mhi_log(MHI_MSG_ERROR, "Invalid MHI PM State\n"); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); + return -EIO; + } + mhi_assert_device_wake(mhi_dev_ctxt, false); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[ev_index]; device_rp = (union mhi_event_pkt *)mhi_p2v_addr( @@ -96,59 +50,84 @@ static int mhi_process_event_ring( while ((local_rp != device_rp) && (event_quota > 0) && (device_rp != NULL) && (local_rp != NULL)) { + event_to_process = *local_rp; - ev_ptr = &event_to_process; - event_code = get_cmd_pkt(mhi_dev_ctxt, - ev_ptr, &cmd_pkt, ev_index); - if (((MHI_TRB_READ_INFO(EV_TRB_TYPE, (&event_to_process)) == - MHI_PKT_TYPE_CMD_COMPLETION_EVENT)) && - (event_code == MHI_EVENT_CC_SUCCESS)) { - mhi_log(MHI_MSG_INFO, "Command Completion event\n"); - if ((MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt) == - MHI_PKT_TYPE_RESET_CHAN_CMD)) { - mhi_log(MHI_MSG_INFO, "First Reset CC event\n"); - MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt, - MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD); - ret_val = -EINPROGRESS; - break; - } else if ((MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt) - == MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD)) { - MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt, - MHI_PKT_TYPE_RESET_CHAN_CMD); - mhi_log(MHI_MSG_INFO, - "Processing Reset CC event\n"); - } - } - if (unlikely(0 != recycle_trb_and_ring(mhi_dev_ctxt, - local_ev_ctxt, - MHI_RING_TYPE_EVENT_RING, - ev_index))) - mhi_log(MHI_MSG_ERROR, "Failed to recycle ev pkt\n"); - switch (MHI_TRB_READ_INFO(EV_TRB_TYPE, (&event_to_process))) { + read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock); + recycle_trb_and_ring(mhi_dev_ctxt, + local_ev_ctxt, + MHI_RING_TYPE_EVENT_RING, + ev_index); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); + + switch (MHI_TRB_READ_INFO(EV_TRB_TYPE, &event_to_process)) { case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: - mhi_log(MHI_MSG_INFO, - "MHI CCE received ring 0x%x\n", - ev_index); + { + union mhi_cmd_pkt *cmd_pkt; + u32 chan; + struct mhi_chan_cfg *cfg; + unsigned long flags; + struct mhi_ring *cmd_ring = &mhi_dev_ctxt-> + mhi_local_cmd_ctxt[PRIMARY_CMD_RING]; __pm_stay_awake(&mhi_dev_ctxt->w_lock); __pm_relax(&mhi_dev_ctxt->w_lock); - ret_val = parse_cmd_event(mhi_dev_ctxt, - &event_to_process, ev_index); + get_cmd_pkt(mhi_dev_ctxt, + &event_to_process, + &cmd_pkt, ev_index); + MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan); + cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan]; + mhi_log(MHI_MSG_INFO, + "MHI CCE received ring 0x%x chan:%u\n", + ev_index, + chan); + spin_lock_irqsave(&cfg->event_lock, flags); + cfg->cmd_pkt = *cmd_pkt; + cfg->cmd_event_pkt = + event_to_process.cmd_complete_event_pkt; + complete(&cfg->cmd_complete); + spin_unlock_irqrestore(&cfg->event_lock, flags); + spin_lock_irqsave(&cmd_ring->ring_lock, + flags); + ctxt_del_element(cmd_ring, NULL); + spin_unlock_irqrestore(&cmd_ring->ring_lock, + flags); break; + } case MHI_PKT_TYPE_TX_EVENT: __pm_stay_awake(&mhi_dev_ctxt->w_lock); parse_xfer_event(mhi_dev_ctxt, - &event_to_process, ev_index); + &event_to_process, + ev_index); __pm_relax(&mhi_dev_ctxt->w_lock); break; case MHI_PKT_TYPE_STATE_CHANGE_EVENT: { enum STATE_TRANSITION new_state; - + unsigned long flags; new_state = MHI_READ_STATE(&event_to_process); mhi_log(MHI_MSG_INFO, - "MHI STE received ring 0x%x\n", - ev_index); - mhi_init_state_transition(mhi_dev_ctxt, new_state); + "MHI STE received ring 0x%x State:%s\n", + ev_index, + state_transition_str(new_state)); + + /* If transitioning to M1 schedule worker thread */ + if (new_state == STATE_TRANSITION_M1) { + write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, + flags); + mhi_dev_ctxt->mhi_state = + mhi_get_m_state(mhi_dev_ctxt); + if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) { + mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1; + mhi_dev_ctxt->counters.m0_m1++; + schedule_work(&mhi_dev_ctxt-> + process_m1_worker); + } + write_unlock_irqrestore(&mhi_dev_ctxt-> + pm_xfer_lock, + flags); + } else { + mhi_init_state_transition(mhi_dev_ctxt, + new_state); + } break; } case MHI_PKT_TYPE_EE_EVENT: @@ -178,10 +157,7 @@ static int mhi_process_event_ring( mhi_log(MHI_MSG_INFO, "MHI System Error Detected. Triggering Reset\n"); BUG(); - if (!mhi_trigger_reset(mhi_dev_ctxt)) - mhi_log(MHI_MSG_ERROR, - "Failed to reset for SYSERR recovery\n"); - break; + break; default: mhi_log(MHI_MSG_ERROR, "Unsupported packet type code 0x%x\n", @@ -198,6 +174,9 @@ static int mhi_process_event_ring( ret_val = 0; --event_quota; } + read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock); + mhi_deassert_device_wake(mhi_dev_ctxt); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); return ret_val; } @@ -207,7 +186,7 @@ int parse_event_thread(void *ctxt) u32 i = 0; int ret_val = 0; int ret_val_process_event = 0; - atomic_t *ev_pen_ptr = &mhi_dev_ctxt->flags.events_pending; + atomic_t *ev_pen_ptr = &mhi_dev_ctxt->counters.events_pending; /* Go through all event rings */ for (;;) { @@ -215,7 +194,7 @@ int parse_event_thread(void *ctxt) wait_event_interruptible( *mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq, ((atomic_read( - &mhi_dev_ctxt->flags.events_pending) > 0) && + &mhi_dev_ctxt->counters.events_pending) > 0) && !mhi_dev_ctxt->flags.stop_threads) || mhi_dev_ctxt->flags.kill_threads || (mhi_dev_ctxt->flags.stop_threads && @@ -237,27 +216,45 @@ int parse_event_thread(void *ctxt) break; } mhi_dev_ctxt->flags.ev_thread_stopped = 0; - atomic_dec(&mhi_dev_ctxt->flags.events_pending); - for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) { + atomic_dec(&mhi_dev_ctxt->counters.events_pending); + for (i = 1; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) { if (mhi_dev_ctxt->mhi_state == MHI_STATE_SYS_ERR) { mhi_log(MHI_MSG_INFO, - "SYS_ERR detected, not processing events\n"); - atomic_set(&mhi_dev_ctxt->flags.events_pending, + "SYS_ERR detected, not processing events\n"); + atomic_set(&mhi_dev_ctxt-> + counters.events_pending, 0); break; } if (GET_EV_PROPS(EV_MANAGED, - mhi_dev_ctxt->ev_ring_props[i].flags)){ + mhi_dev_ctxt->ev_ring_props[i].flags)) { ret_val_process_event = - mhi_process_event_ring(mhi_dev_ctxt, i, - mhi_dev_ctxt->ev_ring_props[i].nr_desc); - if (ret_val_process_event == - -EINPROGRESS) + mhi_process_event_ring(mhi_dev_ctxt, + i, + mhi_dev_ctxt-> + ev_ring_props[i].nr_desc); + if (ret_val_process_event == -EINPROGRESS) atomic_inc(ev_pen_ptr); } } } - return ret_val; +} + +void mhi_ctrl_ev_task(unsigned long data) +{ + struct mhi_device_ctxt *mhi_dev_ctxt = + (struct mhi_device_ctxt *)data; + const unsigned CTRL_EV_RING = 0; + struct mhi_event_ring_cfg *ring_props = + &mhi_dev_ctxt->ev_ring_props[CTRL_EV_RING]; + + mhi_log(MHI_MSG_VERBOSE, "Enter\n"); + /* Process control event ring */ + mhi_process_event_ring(mhi_dev_ctxt, + CTRL_EV_RING, + ring_props->nr_desc); + enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, CTRL_EV_RING)); + mhi_log(MHI_MSG_VERBOSE, "Exit\n"); } struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle) @@ -268,8 +265,8 @@ struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle) client_handle->result.bytes_xferd = 0; client_handle->result.transaction_status = 0; ret_val = mhi_process_event_ring(client_handle->mhi_dev_ctxt, - client_handle->event_ring_index, - 1); + client_handle->event_ring_index, + 1); if (ret_val) mhi_log(MHI_MSG_INFO, "NAPI failed to process event ring\n"); return &(client_handle->result); @@ -296,3 +293,60 @@ void mhi_unmask_irq(struct mhi_client_handle *client_handle) ev_ring->msi_enable_cntr++; enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec)); } + +irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id) +{ + struct device *mhi_device = dev_id; + struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data; + int msi = IRQ_TO_MSI(mhi_dev_ctxt, irq_number); + + if (!mhi_dev_ctxt) { + mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n"); + return IRQ_HANDLED; + } + mhi_dev_ctxt->counters.msi_counter[ + IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++; + mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi); + trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number)); + + if (msi) { + atomic_inc(&mhi_dev_ctxt->counters.events_pending); + wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq); + } else { + disable_irq_nosync(irq_number); + tasklet_schedule(&mhi_dev_ctxt->ev_task); + } + + return IRQ_HANDLED; +} + +irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id) +{ + struct device *mhi_device = dev_id; + u32 client_index; + struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data; + struct mhi_client_handle *client_handle; + struct mhi_client_info_t *client_info; + struct mhi_cb_info cb_info; + int msi_num = (IRQ_TO_MSI(mhi_dev_ctxt, irq_number)); + + mhi_dev_ctxt->counters.msi_counter[msi_num]++; + mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi_num); + trace_mhi_msi(msi_num); + client_index = MHI_MAX_CHANNELS - + (mhi_dev_ctxt->mmio_info.nr_event_rings - msi_num); + client_handle = mhi_dev_ctxt->client_handle_list[client_index]; + client_info = &client_handle->client_info; + if (likely(client_handle)) { + client_handle->result.user_data = + client_handle->user_data; + if (likely(client_info->mhi_client_cb)) { + cb_info.result = &client_handle->result; + cb_info.cb_reason = MHI_CB_XFER; + cb_info.chan = client_handle->chan_info.chan_nr; + cb_info.result->transaction_status = 0; + client_info->mhi_client_cb(&cb_info); + } + } + return IRQ_HANDLED; +} diff --git a/drivers/platform/msm/mhi/mhi_macros.h b/drivers/platform/msm/mhi/mhi_macros.h index 6f9ed293e6dd..133c0eeb034e 100644 --- a/drivers/platform/msm/mhi/mhi_macros.h +++ b/drivers/platform/msm/mhi/mhi_macros.h @@ -247,9 +247,17 @@ #define MHI_PRESERVE_DB_STATE__MASK (1) #define MHI_PRESERVE_DB_STATE__SHIFT (8) +#define BRSTMODE +#define MHI_BRSTMODE__MASK (3) +#define MHI_BRSTMODE__SHIFT (9) + #define GET_CHAN_PROPS(_FIELD, _VAL) \ (((_VAL) >> MHI_##_FIELD ## __SHIFT) & MHI_##_FIELD ## __MASK) +#define EV_BRSTMODE +#define MHI_EV_BRSTMODE__MASK (3) +#define MHI_EV_BRSTMODE__SHIFT (5) + #define EV_TYPE #define MHI_EV_TYPE__MASK (3) #define MHI_EV_TYPE__SHIFT (3) diff --git a/drivers/platform/msm/mhi/mhi_main.c b/drivers/platform/msm/mhi/mhi_main.c index 6da133ce8e04..a873ea9055fc 100644 --- a/drivers/platform/msm/mhi/mhi_main.c +++ b/drivers/platform/msm/mhi/mhi_main.c @@ -29,6 +29,9 @@ #include "mhi_macros.h" #include "mhi_trace.h" +static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, + union mhi_cmd_pkt *cmd_pkt); + static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el) { bb_ctxt->el_size = sizeof(struct mhi_buf_info); @@ -212,7 +215,9 @@ int mhi_release_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt, ring->len, ring->base, cc_list->mhi_trb_ring_base_addr); mhi_init_chan_ctxt(cc_list, 0, 0, 0, 0, 0, ring, - MHI_CHAN_STATE_DISABLED, false); + MHI_CHAN_STATE_DISABLED, + false, + MHI_BRSTMODE_DEFAULT); return 0; } @@ -261,7 +266,9 @@ static int populate_tre_ring(struct mhi_client_handle *client_handle) &mhi_dev_ctxt->mhi_local_chan_ctxt[chan], MHI_CHAN_STATE_ENABLED, GET_CHAN_PROPS(PRESERVE_DB_STATE, - client_handle->chan_info.flags)); + client_handle->chan_info.flags), + GET_CHAN_PROPS(BRSTMODE, + client_handle->chan_info.flags)); mhi_log(MHI_MSG_INFO, "Exited\n"); return 0; } @@ -270,48 +277,58 @@ int mhi_open_channel(struct mhi_client_handle *client_handle) { int ret_val = 0; struct mhi_device_ctxt *mhi_dev_ctxt; - int r = 0; int chan; + struct mhi_chan_cfg *cfg; + struct mhi_cmd_complete_event_pkt cmd_event_pkt; + union mhi_cmd_pkt cmd_pkt; + enum MHI_EVENT_CCS ev_code; - if (NULL == client_handle || - client_handle->magic != MHI_HANDLE_MAGIC) + if (!client_handle || client_handle->magic != MHI_HANDLE_MAGIC) return -EINVAL; mhi_dev_ctxt = client_handle->mhi_dev_ctxt; - r = get_chan_props(mhi_dev_ctxt, - client_handle->chan_info.chan_nr, - &client_handle->chan_info); - if (r) - return r; + ret_val = get_chan_props(mhi_dev_ctxt, + client_handle->chan_info.chan_nr, + &client_handle->chan_info); + if (ret_val) + return ret_val; chan = client_handle->chan_info.chan_nr; + cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan]; + mutex_lock(&cfg->chan_lock); mhi_log(MHI_MSG_INFO, "Entered: Client opening chan 0x%x\n", chan); if (mhi_dev_ctxt->dev_exec_env < GET_CHAN_PROPS(CHAN_BRINGUP_STAGE, - client_handle->chan_info.flags)) { + client_handle->chan_info.flags)) { mhi_log(MHI_MSG_INFO, "Chan %d, MHI exec_env %d, not ready!\n", - chan, mhi_dev_ctxt->dev_exec_env); + chan, + mhi_dev_ctxt->dev_exec_env); + mutex_unlock(&cfg->chan_lock); return -ENOTCONN; } - r = populate_tre_ring(client_handle); - if (r) { + ret_val = populate_tre_ring(client_handle); + if (ret_val) { mhi_log(MHI_MSG_ERROR, "Failed to initialize tre ring chan %d ret %d\n", - chan, r); - return r; + chan, + ret_val); + mutex_unlock(&cfg->chan_lock); + return ret_val; } client_handle->event_ring_index = - mhi_dev_ctxt->dev_space.ring_ctxt. - cc_list[chan].mhi_event_ring_index; - r = enable_bb_ctxt(&mhi_dev_ctxt->chan_bb_list[chan], - client_handle->chan_info.max_desc); - if (r) { + mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan]. + mhi_event_ring_index; + ret_val = enable_bb_ctxt(&mhi_dev_ctxt->chan_bb_list[chan], + client_handle->chan_info.max_desc); + if (ret_val) { mhi_log(MHI_MSG_ERROR, "Failed to initialize bb ctxt chan %d ret %d\n", - chan, r); - return r; + chan, + ret_val); + mutex_unlock(&cfg->chan_lock); + return ret_val; } client_handle->msi_vec = @@ -321,16 +338,67 @@ int mhi_open_channel(struct mhi_client_handle *client_handle) mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[ client_handle->event_ring_index].mhi_intmodt; - init_completion(&client_handle->chan_open_complete); - ret_val = start_chan_sync(client_handle); - - if (0 != ret_val) + init_completion(&cfg->cmd_complete); + read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock); + if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) { mhi_log(MHI_MSG_ERROR, - "Failed to start chan 0x%x, ret %d\n", chan, ret_val); - BUG_ON(ret_val); + "MHI State is disabled\n"); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); + mutex_unlock(&cfg->chan_lock); + return -EIO; + } + WARN_ON(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE); + mhi_assert_device_wake(mhi_dev_ctxt, false); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); + pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev); + + ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt, + MHI_COMMAND_START_CHAN, + chan); + if (ret_val) { + mhi_log(MHI_MSG_ERROR, + "Failed to send start cmd for chan %d ret %d\n", + chan, ret_val); + goto error_completion; + } + ret_val = wait_for_completion_timeout(&cfg->cmd_complete, + msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT)); + if (!ret_val) { + mhi_log(MHI_MSG_ERROR, + "Failed to receive cmd completion for %d\n", + chan); + goto error_completion; + } else { + ret_val = 0; + } + + spin_lock(&cfg->event_lock); + cmd_event_pkt = cfg->cmd_event_pkt; + cmd_pkt = cfg->cmd_pkt; + spin_unlock(&cfg->event_lock); + + ev_code = MHI_EV_READ_CODE(EV_TRB_CODE, + ((union mhi_event_pkt *)&cmd_event_pkt)); + if (ev_code != MHI_EVENT_CC_SUCCESS) { + mhi_log(MHI_MSG_ERROR, + "Error to receive event completion ev_code:0x%x\n", + ev_code); + ret_val = -EIO; + goto error_completion; + } + client_handle->chan_status = 1; + +error_completion: + + read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock); + mhi_deassert_device_wake(mhi_dev_ctxt); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); + pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev); + mutex_unlock(&cfg->chan_lock); + mhi_log(MHI_MSG_INFO, - "Exited chan 0x%x\n", chan); + "Exited chan 0x%x ret:%d\n", chan, ret_val); return ret_val; } EXPORT_SYMBOL(mhi_open_channel); @@ -389,46 +457,79 @@ EXPORT_SYMBOL(mhi_register_channel); void mhi_close_channel(struct mhi_client_handle *client_handle) { u32 chan; - int r = 0; int ret_val = 0; + struct mhi_chan_cfg *cfg; + struct mhi_device_ctxt *mhi_dev_ctxt; + struct mhi_cmd_complete_event_pkt cmd_event_pkt; + union mhi_cmd_pkt cmd_pkt; + enum MHI_EVENT_CCS ev_code; if (!client_handle || client_handle->magic != MHI_HANDLE_MAGIC || !client_handle->chan_status) return; + mhi_dev_ctxt = client_handle->mhi_dev_ctxt; chan = client_handle->chan_info.chan_nr; + cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan]; mhi_log(MHI_MSG_INFO, "Client attempting to close chan 0x%x\n", chan); - init_completion(&client_handle->chan_reset_complete); - if (!atomic_read(&client_handle->mhi_dev_ctxt->flags.pending_ssr)) { - ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt, - MHI_COMMAND_RESET_CHAN, chan); - if (ret_val != 0) { - mhi_log(MHI_MSG_ERROR, - "Failed to send reset cmd for chan %d ret %d\n", - chan, ret_val); - } - r = wait_for_completion_timeout( - &client_handle->chan_reset_complete, + mutex_lock(&cfg->chan_lock); + init_completion(&cfg->cmd_complete); + read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock); + WARN_ON(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE); + mhi_assert_device_wake(mhi_dev_ctxt, false); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); + pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev); + ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt, + MHI_COMMAND_RESET_CHAN, + chan); + if (ret_val) { + mhi_log(MHI_MSG_ERROR, + "Failed to send reset cmd for chan %d ret %d\n", + chan, + ret_val); + goto error_completion; + } + ret_val = wait_for_completion_timeout(&cfg->cmd_complete, msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT)); - if (!r) - mhi_log(MHI_MSG_ERROR, - "Failed to reset chan %d ret %d\n", - chan, r); - } else { - /* - * Assumption: Device is not playing with our - * buffers after BEFORE_SHUTDOWN - */ - mhi_log(MHI_MSG_INFO, - "Pending SSR local free only chan %d.\n", chan); + if (!ret_val) { + mhi_log(MHI_MSG_ERROR, + "Failed to receive cmd completion for %d\n", + chan); + goto error_completion; } + spin_lock_irq(&cfg->event_lock); + cmd_event_pkt = cfg->cmd_event_pkt; + cmd_pkt = cfg->cmd_pkt; + spin_unlock_irq(&cfg->event_lock); + ev_code = MHI_EV_READ_CODE(EV_TRB_CODE, + ((union mhi_event_pkt *)&cmd_event_pkt)); + if (ev_code != MHI_EVENT_CC_SUCCESS) { + mhi_log(MHI_MSG_ERROR, + "Error to receive event completion ev_cod:0x%x\n", + ev_code); + goto error_completion; + } + + ret_val = reset_chan_cmd(mhi_dev_ctxt, &cmd_pkt); + if (ret_val) + mhi_log(MHI_MSG_ERROR, + "Error resetting cmd ret:%d\n", + ret_val); + +error_completion: + + read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock); + mhi_deassert_device_wake(mhi_dev_ctxt); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); + pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev); mhi_log(MHI_MSG_INFO, "Freeing ring for chan 0x%x\n", chan); free_tre_ring(client_handle); mhi_log(MHI_MSG_INFO, "Chan 0x%x confirmed closed.\n", chan); client_handle->chan_status = 0; + mutex_unlock(&cfg->chan_lock); } EXPORT_SYMBOL(mhi_close_channel); @@ -441,93 +542,47 @@ void mhi_update_chan_db(struct mhi_device_ctxt *mhi_dev_ctxt, chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_XFER_RING, chan, (uintptr_t) chan_ctxt->wp); - mhi_dev_ctxt->mhi_chan_db_order[chan]++; - mhi_process_db(mhi_dev_ctxt, - mhi_dev_ctxt->mmio_info.chan_db_addr, - chan, db_value); -} + chan_ctxt->db_mode.process_db(mhi_dev_ctxt, + mhi_dev_ctxt->mmio_info.chan_db_addr, + chan, + db_value); -int mhi_check_m2_transition(struct mhi_device_ctxt *mhi_dev_ctxt) -{ - int ret_val = 0; - - if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) { - mhi_log(MHI_MSG_INFO, "M2 Transition flag value = %d\n", - (atomic_read(&mhi_dev_ctxt->flags.m2_transition))); - if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) { - if (mhi_dev_ctxt->flags.link_up) { - mhi_assert_device_wake(mhi_dev_ctxt); - ret_val = -ENOTCONN; - } - } else{ - mhi_log(MHI_MSG_INFO, "M2 transition flag is set\n"); - ret_val = -ENOTCONN; - } - } else { - ret_val = 0; - } - - return ret_val; } static inline int mhi_queue_tre(struct mhi_device_ctxt - *mhi_dev_ctxt, - u32 chan, - enum MHI_RING_TYPE type) + *mhi_dev_ctxt, + u32 chan, + enum MHI_RING_TYPE type) { struct mhi_chan_ctxt *chan_ctxt; unsigned long flags = 0; - int ret_val = 0; u64 db_value = 0; chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan]; - mhi_dev_ctxt->counters.m1_m0++; - if (type == MHI_RING_TYPE_CMD_RING) - atomic_inc(&mhi_dev_ctxt->counters.outbound_acks); + if (!MHI_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) + return -EACCES; - ret_val = mhi_check_m2_transition(mhi_dev_ctxt); - if (likely(((ret_val == 0) && - (((mhi_dev_ctxt->mhi_state == MHI_STATE_M0) || - (mhi_dev_ctxt->mhi_state == MHI_STATE_M1))) && - (chan_ctxt->mhi_chan_state != MHI_CHAN_STATE_ERROR)) && - (!mhi_dev_ctxt->flags.pending_M3))) { - if (likely(type == MHI_RING_TYPE_XFER_RING)) { - spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], - flags); - db_value = - mhi_v2p_addr( - mhi_dev_ctxt, - MHI_RING_TYPE_XFER_RING, - chan, - (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp); - mhi_dev_ctxt->mhi_chan_db_order[chan]++; - mhi_update_chan_db(mhi_dev_ctxt, chan); - spin_unlock_irqrestore( - &mhi_dev_ctxt->db_write_lock[chan], flags); - } else if (type == MHI_RING_TYPE_CMD_RING) { - db_value = mhi_v2p_addr(mhi_dev_ctxt, - MHI_RING_TYPE_CMD_RING, - PRIMARY_CMD_RING, - (uintptr_t) - mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING].wp); - mhi_dev_ctxt->cmd_ring_order++; - mhi_process_db(mhi_dev_ctxt, - mhi_dev_ctxt->mmio_info.cmd_db_addr, - 0, db_value); - } else { - mhi_log(MHI_MSG_VERBOSE, - "Wrong type of packet = %d\n", type); - ret_val = -EPROTO; - } + if (likely(type == MHI_RING_TYPE_XFER_RING)) { + struct mhi_ring *mhi_ring = + &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; + spin_lock_irqsave(&mhi_ring->ring_lock, flags); + mhi_update_chan_db(mhi_dev_ctxt, chan); + spin_unlock_irqrestore(&mhi_ring->ring_lock, flags); } else { - mhi_log(MHI_MSG_VERBOSE, - "Wakeup, pending data state %s chan state %d\n", - TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state), - chan_ctxt->mhi_chan_state); - ret_val = 0; + struct mhi_ring *cmd_ring = &mhi_dev_ctxt-> + mhi_local_cmd_ctxt[PRIMARY_CMD_RING]; + db_value = mhi_v2p_addr(mhi_dev_ctxt, + MHI_RING_TYPE_CMD_RING, + PRIMARY_CMD_RING, + (uintptr_t)cmd_ring->wp); + cmd_ring->db_mode.process_db + (mhi_dev_ctxt, + mhi_dev_ctxt->mmio_info.cmd_db_addr, + 0, + db_value); } - return ret_val; + return 0; } static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt, int chan, void *buf, size_t buf_len, @@ -647,19 +702,12 @@ static int mhi_queue_dma_xfer( int ret_val; enum MHI_CLIENT_CHANNEL chan; struct mhi_device_ctxt *mhi_dev_ctxt; - unsigned long flags; - - if (!client_handle || !buf || !buf_len) - return -EINVAL; mhi_dev_ctxt = client_handle->mhi_dev_ctxt; MHI_ASSERT(VALID_BUF(buf, buf_len, mhi_dev_ctxt), "Client buffer is of invalid length\n"); chan = client_handle->chan_info.chan_nr; - mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan); - pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev); - pkt_loc = mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp; pkt_loc->data_tx_pkt.buffer_ptr = buf; pkt_loc->type.info = mhi_flags; @@ -685,25 +733,14 @@ static int mhi_queue_dma_xfer( (void *)&pkt_loc); if (unlikely(0 != ret_val)) { mhi_log(MHI_MSG_VERBOSE, - "Failed to insert trb in xfer ring\n"); - goto error; + "Failed to insert trb in xfer ring\n"); + return ret_val; } - read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); - atomic_inc(&mhi_dev_ctxt->flags.data_pending); + if (MHI_OUT == GET_CHAN_PROPS(CHAN_DIR, client_handle->chan_info.flags)) atomic_inc(&mhi_dev_ctxt->counters.outbound_acks); - ret_val = mhi_queue_tre(mhi_dev_ctxt, chan, MHI_RING_TYPE_XFER_RING); - if (unlikely(ret_val)) - mhi_log(MHI_MSG_VERBOSE, "Failed queue TRE.\n"); - atomic_dec(&mhi_dev_ctxt->flags.data_pending); - read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); -error: - pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev); - - mhi_log(MHI_MSG_INFO, "Putting Reference %d", chan); - pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev); return ret_val; } @@ -713,10 +750,28 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle, int r; enum dma_data_direction dma_dir; struct mhi_buf_info *bb; + struct mhi_device_ctxt *mhi_dev_ctxt; + u32 chan; + unsigned long flags; if (!client_handle || !buf || !buf_len) return -EINVAL; + mhi_dev_ctxt = client_handle->mhi_dev_ctxt; + chan = client_handle->chan_info.chan_nr; + + read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags); + if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE) { + read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags); + mhi_log(MHI_MSG_ERROR, + "MHI is not in active state\n"); + return -EINVAL; + } + + pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev); + mhi_assert_device_wake(mhi_dev_ctxt, false); + read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags); + if (MHI_OUT == GET_CHAN_PROPS(CHAN_DIR, client_handle->chan_info.flags)) dma_dir = DMA_TO_DEVICE; else @@ -727,8 +782,16 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle, buf, buf_len, dma_dir, &bb); if (r) { mhi_log(MHI_MSG_VERBOSE, - "Failed to create BB, chan %d ret %d\n", - client_handle->chan_info.chan_nr, r); + "Failed to create BB, chan %d ret %d\n", + chan, + r); + pm_runtime_mark_last_busy(&mhi_dev_ctxt-> + dev_info->pcie_device->dev); + pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info-> + pcie_device->dev); + read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags); + mhi_deassert_device_wake(mhi_dev_ctxt); + read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags); return r; } @@ -737,9 +800,9 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle, buf, buf_len, (u64)bb->bb_p_addr, client_handle->chan_info.chan_nr); r = mhi_queue_dma_xfer(client_handle, - bb->bb_p_addr, - bb->buf_len, - mhi_flags); + bb->bb_p_addr, + bb->buf_len, + mhi_flags); /* * Assumption: If create_bounce_buffer did not fail, we do not @@ -747,25 +810,37 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle, * out of sync with the descriptor list which is problematic. */ BUG_ON(r); - return r; + + read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags); + mhi_queue_tre(mhi_dev_ctxt, chan, MHI_RING_TYPE_XFER_RING); + if (dma_dir == DMA_FROM_DEVICE) { + pm_runtime_mark_last_busy(&mhi_dev_ctxt-> + dev_info->pcie_device->dev); + pm_runtime_put_noidle(&mhi_dev_ctxt-> + dev_info->pcie_device->dev); + mhi_deassert_device_wake(mhi_dev_ctxt); + } + read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags); + return 0; } EXPORT_SYMBOL(mhi_queue_xfer); int mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, enum MHI_COMMAND cmd, u32 chan) { - unsigned long flags = 0; union mhi_cmd_pkt *cmd_pkt = NULL; enum MHI_CHAN_STATE from_state = MHI_CHAN_STATE_DISABLED; enum MHI_CHAN_STATE to_state = MHI_CHAN_STATE_DISABLED; enum MHI_PKT_TYPE ring_el_type = MHI_PKT_TYPE_NOOP_CMD; - struct mutex *chan_mutex = NULL; int ret_val = 0; + unsigned long flags, flags2; + struct mhi_ring *mhi_ring = &mhi_dev_ctxt-> + mhi_local_cmd_ctxt[PRIMARY_CMD_RING]; - if (chan >= MHI_MAX_CHANNELS || - cmd >= MHI_COMMAND_MAX_NR || mhi_dev_ctxt == NULL) { + if (chan >= MHI_MAX_CHANNELS || cmd >= MHI_COMMAND_MAX_NR) { mhi_log(MHI_MSG_ERROR, - "Invalid channel id, received id: 0x%x", chan); + "Invalid channel id, received id: 0x%x", + chan); return -EINVAL; } @@ -773,21 +848,9 @@ int mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, "Entered, MHI state %s dev_exec_env %d chan %d cmd %d\n", TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state), mhi_dev_ctxt->dev_exec_env, chan, cmd); - mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan); - pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev); - /* - * If there is a cmd pending a device confirmation, - * do not send anymore for this channel - */ - if (MHI_CMD_PENDING == mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan]) { - mhi_log(MHI_MSG_ERROR, "Cmd Pending on chan %d", chan); - ret_val = -EALREADY; - goto error_invalid; - } - atomic_inc(&mhi_dev_ctxt->flags.data_pending); from_state = - mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan].mhi_chan_state; + mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan].chstate; switch (cmd) { break; @@ -815,34 +878,26 @@ int mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, mhi_log(MHI_MSG_ERROR, "Bad command received\n"); } - mutex_lock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]); - ret_val = ctxt_add_element(mhi_dev_ctxt->mhi_local_cmd_ctxt, - (void *)&cmd_pkt); + spin_lock_irqsave(&mhi_ring->ring_lock, flags); + ret_val = ctxt_add_element(mhi_ring, (void *)&cmd_pkt); if (ret_val) { mhi_log(MHI_MSG_ERROR, "Failed to insert element\n"); - goto error_general; + spin_unlock_irqrestore(&mhi_ring->ring_lock, flags); + return ret_val; } - chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan]; - mutex_lock(chan_mutex); + MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt, ring_el_type); MHI_TRB_SET_INFO(CMD_TRB_CHID, cmd_pkt, chan); - mutex_unlock(chan_mutex); - mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_PENDING; - - read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); + read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags2); mhi_queue_tre(mhi_dev_ctxt, 0, MHI_RING_TYPE_CMD_RING); - read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); + read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags2); + spin_unlock_irqrestore(&mhi_ring->ring_lock, flags); - mhi_log(MHI_MSG_VERBOSE, "Sent command 0x%x for chan %d\n", - cmd, chan); -error_general: - mutex_unlock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]); + mhi_log(MHI_MSG_VERBOSE, + "Sent command 0x%x for chan %d\n", + cmd, + chan); error_invalid: - pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev); - mhi_log(MHI_MSG_INFO, "Putting Reference %d", chan); - pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev); - - atomic_dec(&mhi_dev_ctxt->flags.data_pending); mhi_log(MHI_MSG_INFO, "Exited ret %d.\n", ret_val); return ret_val; } @@ -924,6 +979,9 @@ static int parse_outbound(struct mhi_device_ctxt *mhi_dev_ctxt, mhi_log(MHI_MSG_RAW, "Removing BB from head, chan %d\n", chan); atomic_dec(&mhi_dev_ctxt->counters.outbound_acks); + mhi_deassert_device_wake(mhi_dev_ctxt); + pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev); + pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev); ret_val = ctxt_del_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan], NULL); BUG_ON(ret_val); @@ -1020,16 +1078,8 @@ static int parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt, ctxt_index_rp, ctxt_index_wp, chan); BUG_ON(bb_index != ctxt_index_rp); } else { - /* Hardware Channel, no client registerered, - drop data */ - recycle_trb_and_ring(mhi_dev_ctxt, - &mhi_dev_ctxt->mhi_local_chan_ctxt[chan], - MHI_RING_TYPE_XFER_RING, - chan); BUG(); - /* No bounce buffer to recycle as no user request - * can be present. - */ + } } return 0; @@ -1122,6 +1172,7 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt, mhi_log(MHI_MSG_CRITICAL, "Failed to get nr available trbs ret: %d.\n", ret_val); + panic("critical error"); return ret_val; } do { @@ -1170,28 +1221,25 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt, u64 db_value = 0; unsigned long flags; - mhi_dev_ctxt->flags.uldl_enabled = 1; chan = MHI_EV_READ_CHID(EV_CHID, event); chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; mhi_log(MHI_MSG_INFO, "DB_MODE/OOB Detected chan %d.\n", chan); - spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], - flags); + spin_lock_irqsave(&chan_ctxt->ring_lock, flags); chan_ctxt->db_mode.db_mode = 1; if (chan_ctxt->wp != chan_ctxt->rp) { db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_XFER_RING, chan, (uintptr_t) chan_ctxt->wp); - mhi_process_db(mhi_dev_ctxt, + chan_ctxt->db_mode.process_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.chan_db_addr, chan, db_value); } client_handle = mhi_dev_ctxt->client_handle_list[chan]; if (client_handle) result->transaction_status = -ENOTCONN; - spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], - flags); + spin_unlock_irqrestore(&chan_ctxt->ring_lock, flags); break; } case MHI_EVENT_CC_BAD_TRE: @@ -1224,6 +1272,10 @@ int recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt, u64 db_value = 0; void *removed_element = NULL; void *added_element = NULL; + spinlock_t *lock; + unsigned long flags; + struct mhi_ring *mhi_ring = &mhi_dev_ctxt-> + mhi_local_event_ctxt[ring_index]; ret_val = ctxt_del_element(ring, &removed_element); @@ -1232,102 +1284,27 @@ int recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt, return ret_val; } ret_val = ctxt_add_element(ring, &added_element); - if (0 != ret_val) + if (ret_val) { mhi_log(MHI_MSG_ERROR, "Could not add element to ring\n"); - db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type, ring_index, - (uintptr_t) ring->wp); - if (0 != ret_val) return ret_val; - if (MHI_RING_TYPE_XFER_RING == ring_type) { - union mhi_xfer_pkt *removed_xfer_pkt = - (union mhi_xfer_pkt *)removed_element; - union mhi_xfer_pkt *added_xfer_pkt = - (union mhi_xfer_pkt *)added_element; - added_xfer_pkt->data_tx_pkt = - *(struct mhi_tx_pkt *)removed_xfer_pkt; - } else if (MHI_RING_TYPE_EVENT_RING == ring_type) { - - spinlock_t *lock; - unsigned long flags; - - if (ring_index >= mhi_dev_ctxt->mmio_info.nr_event_rings) - return -ERANGE; - lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index]; - spin_lock_irqsave(lock, flags); - db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type, ring_index, - (uintptr_t) ring->wp); - mhi_log(MHI_MSG_INFO, - "Updating ctxt, ring index %d\n", ring_index); - mhi_update_ctxt(mhi_dev_ctxt, - mhi_dev_ctxt->mmio_info.event_db_addr, - ring_index, db_value); - mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1; - mhi_dev_ctxt->counters.ev_counter[ring_index]++; - spin_unlock_irqrestore(lock, flags); } - atomic_inc(&mhi_dev_ctxt->flags.data_pending); - /* Asserting Device Wake here, will imediately wake mdm */ - if ((MHI_STATE_M0 == mhi_dev_ctxt->mhi_state || - MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) && - mhi_dev_ctxt->flags.link_up) { - switch (ring_type) { - case MHI_RING_TYPE_CMD_RING: - { - struct mutex *cmd_mutex = NULL; - cmd_mutex = - &mhi_dev_ctxt-> - mhi_cmd_mutex_list[PRIMARY_CMD_RING]; - mutex_lock(cmd_mutex); - mhi_dev_ctxt->cmd_ring_order = 1; - mhi_process_db(mhi_dev_ctxt, - mhi_dev_ctxt->mmio_info.cmd_db_addr, - ring_index, db_value); - mutex_unlock(cmd_mutex); - break; - } - case MHI_RING_TYPE_EVENT_RING: - { - spinlock_t *lock = NULL; - unsigned long flags = 0; + if (!MHI_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) + return -EACCES; - lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index]; - spin_lock_irqsave(lock, flags); - mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1; - if ((mhi_dev_ctxt->counters.ev_counter[ring_index] % - MHI_EV_DB_INTERVAL) == 0) { - db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type, - ring_index, - (uintptr_t) ring->wp); - mhi_process_db(mhi_dev_ctxt, - mhi_dev_ctxt->mmio_info.event_db_addr, - ring_index, db_value); - } - spin_unlock_irqrestore(lock, flags); - break; - } - case MHI_RING_TYPE_XFER_RING: - { - unsigned long flags = 0; + lock = &mhi_ring->ring_lock; + spin_lock_irqsave(lock, flags); + db_value = mhi_v2p_addr(mhi_dev_ctxt, + ring_type, + ring_index, + (uintptr_t) ring->wp); + mhi_ring->db_mode.process_db(mhi_dev_ctxt, + mhi_dev_ctxt->mmio_info.event_db_addr, + ring_index, db_value); + spin_unlock_irqrestore(lock, flags); + + return 0; - spin_lock_irqsave( - &mhi_dev_ctxt->db_write_lock[ring_index], - flags); - mhi_dev_ctxt->mhi_chan_db_order[ring_index] = 1; - mhi_process_db(mhi_dev_ctxt, - mhi_dev_ctxt->mmio_info.chan_db_addr, - ring_index, db_value); - spin_unlock_irqrestore( - &mhi_dev_ctxt->db_write_lock[ring_index], - flags); - break; - } - default: - mhi_log(MHI_MSG_ERROR, "Bad ring type\n"); - } - } - atomic_dec(&mhi_dev_ctxt->flags.data_pending); - return ret_val; } static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, @@ -1338,13 +1315,11 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, struct mhi_ring *local_chan_ctxt; struct mhi_chan_ctxt *chan_ctxt; struct mhi_client_handle *client_handle = NULL; - struct mutex *chan_mutex; - int pending_el = 0; + int pending_el = 0, i; struct mhi_ring *bb_ctxt; MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan); - if (!VALID_CHAN_NR(chan)) { mhi_log(MHI_MSG_ERROR, "Bad channel number for CCE\n"); @@ -1352,8 +1327,6 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, } bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan]; - chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan]; - mutex_lock(chan_mutex); client_handle = mhi_dev_ctxt->client_handle_list[chan]; local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan]; @@ -1373,6 +1346,18 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, chan, pending_el); atomic_sub(pending_el, &mhi_dev_ctxt->counters.outbound_acks); + read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock); + for (i = 0; i < pending_el; i++) + mhi_deassert_device_wake(mhi_dev_ctxt); + + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); + + for (i = 0; i < pending_el; i++) { + pm_runtime_put_noidle(&mhi_dev_ctxt-> + dev_info->pcie_device->dev); + pm_runtime_mark_last_busy(&mhi_dev_ctxt-> + dev_info->pcie_device->dev); + } /* Reset the local channel context */ local_chan_ctxt->rp = local_chan_ctxt->base; @@ -1380,39 +1365,17 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, local_chan_ctxt->ack_rp = local_chan_ctxt->base; /* Reset the mhi channel context */ - chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_DISABLED; + chan_ctxt->chstate = MHI_CHAN_STATE_DISABLED; chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr; chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr; mhi_log(MHI_MSG_INFO, "Cleaning up BB list\n"); reset_bb_ctxt(mhi_dev_ctxt, bb_ctxt); - mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_NOT_PENDING; - mutex_unlock(chan_mutex); mhi_log(MHI_MSG_INFO, "Reset complete.\n"); - if (NULL != client_handle) - complete(&client_handle->chan_reset_complete); return ret_val; } -static int start_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, - union mhi_cmd_pkt *cmd_pkt) -{ - u32 chan; - - MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan); - if (!VALID_CHAN_NR(chan)) { - mhi_log(MHI_MSG_ERROR, "Bad chan: 0x%x\n", chan); - return -EINVAL; - } - mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = - MHI_CMD_NOT_PENDING; - mhi_log(MHI_MSG_INFO, "Processed START CMD chan %d\n", chan); - if (NULL != mhi_dev_ctxt->client_handle_list[chan]) - complete( - &mhi_dev_ctxt->client_handle_list[chan]->chan_open_complete); - return 0; -} enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt, union mhi_event_pkt *ev_pkt, union mhi_cmd_pkt **cmd_pkt, @@ -1431,68 +1394,13 @@ enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt, return MHI_EV_READ_CODE(EV_TRB_CODE, ev_pkt); } -int parse_cmd_event(struct mhi_device_ctxt *mhi_dev_ctxt, - union mhi_event_pkt *ev_pkt, u32 event_index) -{ - int ret_val = 0; - union mhi_cmd_pkt *cmd_pkt = NULL; - u32 event_code = 0; - - event_code = get_cmd_pkt(mhi_dev_ctxt, ev_pkt, &cmd_pkt, event_index); - switch (event_code) { - case MHI_EVENT_CC_SUCCESS: - { - u32 chan = 0; - - MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan); - switch (MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt)) { - mhi_log(MHI_MSG_INFO, "CCE chan %d cmd %d\n", chan, - MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt)); - case MHI_PKT_TYPE_RESET_CHAN_CMD: - ret_val = reset_chan_cmd(mhi_dev_ctxt, cmd_pkt); - if (ret_val) - mhi_log(MHI_MSG_INFO, - "Failed to process reset cmd ret %d\n", - ret_val); - break; - case MHI_PKT_TYPE_STOP_CHAN_CMD: - if (ret_val) { - mhi_log(MHI_MSG_INFO, - "Failed to set chan state\n"); - return ret_val; - } - break; - case MHI_PKT_TYPE_START_CHAN_CMD: - ret_val = start_chan_cmd(mhi_dev_ctxt, cmd_pkt); - if (ret_val) - mhi_log(MHI_MSG_INFO, - "Failed to process reset cmd\n"); - break; - default: - mhi_log(MHI_MSG_INFO, - "Bad cmd type 0x%x\n", - MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt)); - break; - } - mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_NOT_PENDING; - atomic_dec(&mhi_dev_ctxt->counters.outbound_acks); - break; - } - default: - mhi_log(MHI_MSG_INFO, "Unhandled mhi completion code\n"); - break; - } - ctxt_del_element(mhi_dev_ctxt->mhi_local_cmd_ctxt, NULL); - return 0; -} - int mhi_poll_inbound(struct mhi_client_handle *client_handle, struct mhi_result *result) { struct mhi_tx_pkt *pending_trb = 0; struct mhi_device_ctxt *mhi_dev_ctxt = NULL; struct mhi_ring *local_chan_ctxt = NULL; - struct mutex *chan_mutex = NULL; + struct mhi_chan_cfg *cfg; struct mhi_ring *bb_ctxt = NULL; struct mhi_buf_info *bb = NULL; int chan = 0, r = 0; @@ -1505,10 +1413,10 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle, mhi_dev_ctxt = client_handle->mhi_dev_ctxt; chan = client_handle->chan_info.chan_nr; local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; - chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan]; + cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan]; bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan]; - mutex_lock(chan_mutex); + mutex_lock(&cfg->chan_lock); if (bb_ctxt->rp != bb_ctxt->ack_rp) { pending_trb = (struct mhi_tx_pkt *)(local_chan_ctxt->ack_rp); result->flags = pending_trb->info; @@ -1534,7 +1442,7 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle, result->bytes_xferd = 0; r = -ENODATA; } - mutex_unlock(chan_mutex); + mutex_unlock(&cfg->chan_lock); mhi_log(MHI_MSG_VERBOSE, "Exited Result: Buf addr: 0x%p Bytes xfed 0x%zx chan %d\n", result->buf_addr, result->bytes_xferd, chan); @@ -1591,49 +1499,80 @@ int mhi_get_epid(struct mhi_client_handle *client_handle) return MHI_EPID; } -int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt) +/* + * mhi_assert_device_wake - Set WAKE_DB register + * force_set - if true, will set bit regardless of counts + */ +void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt, + bool force_set) { - if ((mhi_dev_ctxt->mmio_info.chan_db_addr) && - (mhi_dev_ctxt->flags.link_up)) { - mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", - mhi_dev_ctxt->enable_lpm); - atomic_set(&mhi_dev_ctxt->flags.device_wake, 1); + unsigned long flags; + + if (unlikely(force_set)) { + spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags); + atomic_inc(&mhi_dev_ctxt->counters.device_wake); + mhi_write_db(mhi_dev_ctxt, + mhi_dev_ctxt->mmio_info.chan_db_addr, + MHI_DEV_WAKE_DB, 1); + spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags); + } else { + if (likely(atomic_add_unless(&mhi_dev_ctxt-> + counters.device_wake, + 1, + 0))) + return; + + spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags); + if ((atomic_inc_return(&mhi_dev_ctxt->counters.device_wake) + == 1) && + MHI_WAKE_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) { mhi_write_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.chan_db_addr, - MHI_DEV_WAKE_DB, 1); - mhi_dev_ctxt->device_wake_asserted = 1; - } else { - mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_dev_ctxt->enable_lpm); + MHI_DEV_WAKE_DB, + 1); + } + spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags); } - return 0; } -inline int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt) +void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt) { - if ((mhi_dev_ctxt->enable_lpm) && - (atomic_read(&mhi_dev_ctxt->flags.device_wake)) && - (mhi_dev_ctxt->mmio_info.chan_db_addr != NULL) && - (mhi_dev_ctxt->flags.link_up)) { - mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_dev_ctxt->enable_lpm); - atomic_set(&mhi_dev_ctxt->flags.device_wake, 0); - mhi_write_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.chan_db_addr, - MHI_DEV_WAKE_DB, 0); - mhi_dev_ctxt->device_wake_asserted = 0; - } else { - mhi_log(MHI_MSG_VERBOSE, "LPM %d DEV_WAKE %d link %d\n", - mhi_dev_ctxt->enable_lpm, - atomic_read(&mhi_dev_ctxt->flags.device_wake), - mhi_dev_ctxt->flags.link_up); - } - return 0; + unsigned long flags; + + WARN_ON(atomic_read(&mhi_dev_ctxt->counters.device_wake) == 0); + + if (likely(atomic_add_unless + (&mhi_dev_ctxt->counters.device_wake, -1, 1))) + return; + + spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags); + if ((atomic_dec_return(&mhi_dev_ctxt->counters.device_wake) == 0) && + MHI_WAKE_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) + mhi_write_db(mhi_dev_ctxt, + mhi_dev_ctxt->mmio_info.chan_db_addr, + MHI_DEV_WAKE_DB, + 0); + spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags); } -int mhi_set_lpm(struct mhi_client_handle *client_handle, int enable_lpm) +int mhi_set_lpm(struct mhi_client_handle *client_handle, bool enable_lpm) { - mhi_log(MHI_MSG_VERBOSE, "LPM Set %d\n", enable_lpm); - client_handle->mhi_dev_ctxt->enable_lpm = enable_lpm ? 1 : 0; + struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt; + unsigned long flags; + + read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags); + + /* Disable low power mode by asserting Wake */ + if (enable_lpm == false) + mhi_assert_device_wake(mhi_dev_ctxt, false); + else + mhi_deassert_device_wake(mhi_dev_ctxt); + + read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags); + return 0; } +EXPORT_SYMBOL(mhi_set_lpm); int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt, int index) @@ -1658,11 +1597,56 @@ int mhi_deregister_channel(struct mhi_client_handle } EXPORT_SYMBOL(mhi_deregister_channel); +void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt, + void __iomem *io_addr, + uintptr_t chan, + u32 val) +{ + struct mhi_ring *ring_ctxt = + &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; + + if (io_addr == mhi_dev_ctxt->mmio_info.chan_db_addr) + ring_ctxt = &mhi_dev_ctxt-> + mhi_local_chan_ctxt[chan]; + else + ring_ctxt = &mhi_dev_ctxt-> + mhi_local_event_ctxt[chan]; + + mhi_log(MHI_MSG_VERBOSE, + "db.set addr: %p io_offset 0x%lx val:0x%x\n", + io_addr, chan, val); + + mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val); + + if (ring_ctxt->db_mode.db_mode) { + mhi_write_db(mhi_dev_ctxt, io_addr, chan, val); + ring_ctxt->db_mode.db_mode = 0; + } else { + mhi_log(MHI_MSG_INFO, + "Not ringing xfer db, chan %ld, brstmode %d db_mode %d\n", + chan, + ring_ctxt->db_mode.brstmode, + ring_ctxt->db_mode.db_mode); + } +} + +void mhi_process_db_brstmode_disable(struct mhi_device_ctxt *mhi_dev_ctxt, + void __iomem *io_addr, + uintptr_t chan, + u32 val) +{ + mhi_log(MHI_MSG_VERBOSE, + "db.set addr: %p io_offset 0x%lx val:0x%x\n", + io_addr, chan, val); + + mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val); + mhi_write_db(mhi_dev_ctxt, io_addr, chan, val); +} + void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt, void __iomem *io_addr, uintptr_t chan, u32 val) { - struct mhi_ring *chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; mhi_log(MHI_MSG_VERBOSE, "db.set addr: %p io_offset 0x%lx val:0x%x\n", @@ -1672,22 +1656,25 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt, /* Channel Doorbell and Polling Mode Disabled or Software Channel*/ if (io_addr == mhi_dev_ctxt->mmio_info.chan_db_addr) { + struct mhi_ring *chan_ctxt = + &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; if (!(IS_HARDWARE_CHANNEL(chan) && - mhi_dev_ctxt->flags.uldl_enabled && !chan_ctxt->db_mode.db_mode)) { mhi_write_db(mhi_dev_ctxt, io_addr, chan, val); chan_ctxt->db_mode.db_mode = 0; } else { mhi_log(MHI_MSG_INFO, - "Not ringing xfer db, chan %ld, ul_dl %d db_mode %d\n", - chan, mhi_dev_ctxt->flags.uldl_enabled, + "Not ringing xfer db, chan %ld, brstmode %d db_mode %d\n", + chan, chan_ctxt->db_mode.brstmode, chan_ctxt->db_mode.db_mode); } /* Event Doorbell and Polling mode Disabled */ } else if (io_addr == mhi_dev_ctxt->mmio_info.event_db_addr) { - /* Only ring for software channel */ - if (IS_SW_EV_RING(mhi_dev_ctxt, chan) || - !mhi_dev_ctxt->flags.uldl_enabled) { + struct mhi_ring *ev_ctxt = + &mhi_dev_ctxt->mhi_local_event_ctxt[chan]; + /* Only ring for software channel or db mode*/ + if (!(IS_HW_EV_RING(mhi_dev_ctxt, chan) && + !ev_ctxt->db_mode.db_mode)) { mhi_write_db(mhi_dev_ctxt, io_addr, chan, val); } } else { diff --git a/drivers/platform/msm/mhi/mhi_mmio_ops.c b/drivers/platform/msm/mhi/mhi_mmio_ops.c index 6f56587a172b..b4447378683e 100644 --- a/drivers/platform/msm/mhi/mhi_mmio_ops.c +++ b/drivers/platform/msm/mhi/mhi_mmio_ops.c @@ -9,6 +9,19 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include "mhi_sys.h" #include "mhi_hwio.h" #include "mhi.h" @@ -17,25 +30,40 @@ int mhi_test_for_device_reset(struct mhi_device_ctxt *mhi_dev_ctxt) { u32 pcie_word_val = 0; u32 expiry_counter; + unsigned long flags; + rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock; mhi_log(MHI_MSG_INFO, "Waiting for MMIO RESET bit to be cleared.\n"); + read_lock_irqsave(pm_xfer_lock, flags); + if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) { + read_unlock_irqrestore(pm_xfer_lock, flags); + return -EIO; + } pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr, - MHISTATUS); + MHISTATUS); MHI_READ_FIELD(pcie_word_val, MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT); + read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags); if (pcie_word_val == 0xFFFFFFFF) return -ENOTCONN; + while (MHI_STATE_RESET != pcie_word_val && expiry_counter < 100) { expiry_counter++; mhi_log(MHI_MSG_ERROR, "Device is not RESET, sleeping and retrying.\n"); msleep(MHI_READY_STATUS_TIMEOUT_MS); + read_lock_irqsave(pm_xfer_lock, flags); + if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) { + read_unlock_irqrestore(pm_xfer_lock, flags); + return -EIO; + } pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL); MHI_READ_FIELD(pcie_word_val, MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT); + read_unlock_irqrestore(pm_xfer_lock, flags); } if (MHI_STATE_READY != pcie_word_val) @@ -47,15 +75,23 @@ int mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt) { u32 pcie_word_val = 0; u32 expiry_counter; + unsigned long flags; + rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock; mhi_log(MHI_MSG_INFO, "Waiting for MMIO Ready bit to be set\n"); + read_lock_irqsave(pm_xfer_lock, flags); + if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) { + read_unlock_irqrestore(pm_xfer_lock, flags); + return -EIO; + } /* Read MMIO and poll for READY bit to be set */ pcie_word_val = mhi_reg_read( mhi_dev_ctxt->mmio_info.mmio_addr, MHISTATUS); MHI_READ_FIELD(pcie_word_val, MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT); + read_unlock_irqrestore(pm_xfer_lock, flags); if (pcie_word_val == 0xFFFFFFFF) return -ENOTCONN; @@ -65,10 +101,16 @@ int mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt) mhi_log(MHI_MSG_ERROR, "Device is not ready, sleeping and retrying.\n"); msleep(MHI_READY_STATUS_TIMEOUT_MS); + read_lock_irqsave(pm_xfer_lock, flags); + if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) { + read_unlock_irqrestore(pm_xfer_lock, flags); + return -EIO; + } pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr, MHISTATUS); MHI_READ_FIELD(pcie_word_val, MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT); + read_unlock_irqrestore(pm_xfer_lock, flags); } if (pcie_word_val != MHI_STATE_READY) @@ -102,21 +144,20 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt) mhi_dev_ctxt->dev_props->mhi_ver = mhi_reg_read( mhi_dev_ctxt->mmio_info.mmio_addr, MHIVER); if (MHI_VERSION != mhi_dev_ctxt->dev_props->mhi_ver) { - mhi_log(MHI_MSG_CRITICAL, "Bad MMIO version, 0x%x\n", - mhi_dev_ctxt->dev_props->mhi_ver); - if (mhi_dev_ctxt->dev_props->mhi_ver == 0xFFFFFFFF) - ret_val = mhi_wait_for_mdm(mhi_dev_ctxt); - if (ret_val) + mhi_log(MHI_MSG_CRITICAL, + "Bad MMIO version, 0x%x\n", + mhi_dev_ctxt->dev_props->mhi_ver); return ret_val; } + /* Enable the channels */ for (i = 0; i < MHI_MAX_CHANNELS; ++i) { struct mhi_chan_ctxt *chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i]; if (VALID_CHAN_NR(i)) - chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED; + chan_ctxt->chstate = MHI_CHAN_STATE_ENABLED; else - chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_DISABLED; + chan_ctxt->chstate = MHI_CHAN_STATE_DISABLED; } mhi_log(MHI_MSG_INFO, "Read back MMIO Ready bit successfully. Moving on..\n"); diff --git a/drivers/platform/msm/mhi/mhi_pm.c b/drivers/platform/msm/mhi/mhi_pm.c index d62d4189d3f3..2f44601e225e 100644 --- a/drivers/platform/msm/mhi/mhi_pm.c +++ b/drivers/platform/msm/mhi/mhi_pm.c @@ -22,16 +22,12 @@ #include "mhi_hwio.h" /* Write only sysfs attributes */ -static DEVICE_ATTR(MHI_M3, S_IWUSR, NULL, sysfs_init_m3); static DEVICE_ATTR(MHI_M0, S_IWUSR, NULL, sysfs_init_m0); -static DEVICE_ATTR(MHI_RESET, S_IWUSR, NULL, sysfs_init_mhi_reset); /* Read only sysfs attributes */ static struct attribute *mhi_attributes[] = { - &dev_attr_MHI_M3.attr, &dev_attr_MHI_M0.attr, - &dev_attr_MHI_RESET.attr, NULL, }; @@ -42,21 +38,20 @@ static struct attribute_group mhi_attribute_group = { int mhi_pci_suspend(struct device *dev) { int r = 0; - struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data; - if (NULL == mhi_dev_ctxt) - return -EINVAL; - mhi_log(MHI_MSG_INFO, "Entered, MHI state %s\n", - TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state)); - atomic_set(&mhi_dev_ctxt->flags.pending_resume, 1); + mhi_log(MHI_MSG_INFO, "Entered\n"); - r = mhi_initiate_m3(mhi_dev_ctxt); + /* if rpm status still active then force suspend */ + if (!pm_runtime_status_suspended(dev)) { + r = mhi_runtime_suspend(dev); + if (r) + return r; + } - if (!r) - return r; + pm_runtime_set_suspended(dev); + pm_runtime_disable(dev); - atomic_set(&mhi_dev_ctxt->flags.pending_resume, 0); - mhi_log(MHI_MSG_INFO, "Exited, ret %d\n", r); + mhi_log(MHI_MSG_INFO, "Exit\n"); return r; } @@ -65,62 +60,150 @@ int mhi_runtime_suspend(struct device *dev) int r = 0; struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data; - mhi_log(MHI_MSG_INFO, "Entered\n"); - r = mhi_initiate_m3(mhi_dev_ctxt); - if (r) - mhi_log(MHI_MSG_ERROR, "Init M3 failed ret %d\n", r); + mutex_lock(&mhi_dev_ctxt->pm_lock); + read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock); - pm_runtime_mark_last_busy(dev); + mhi_log(MHI_MSG_INFO, "Entered with State:0x%x %s\n", + mhi_dev_ctxt->mhi_pm_state, + TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state)); + + /* Link is already disabled */ + if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE || + mhi_dev_ctxt->mhi_pm_state == MHI_PM_M3) { + mhi_log(MHI_MSG_INFO, "Already in active state, exiting\n"); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); + mutex_unlock(&mhi_dev_ctxt->pm_lock); + return 0; + } + + if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) { + mhi_log(MHI_MSG_INFO, "Busy, Aborting Runtime Suspend\n"); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); + mutex_unlock(&mhi_dev_ctxt->pm_lock); + return -EBUSY; + } + + mhi_assert_device_wake(mhi_dev_ctxt, false); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); + r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event, + mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || + mhi_dev_ctxt->mhi_state == MHI_STATE_M1, + msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT)); + if (!r) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to get M0||M1 event, timeout, current state:%s\n", + TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state)); + r = -EIO; + goto rpm_suspend_exit; + } + + mhi_log(MHI_MSG_INFO, "Allowing M3 State\n"); + write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock); + mhi_deassert_device_wake(mhi_dev_ctxt); + mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_ENTER; + mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3); + write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock); + mhi_log(MHI_MSG_INFO, + "Waiting for M3 completion.\n"); + r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event, + mhi_dev_ctxt->mhi_state == MHI_STATE_M3, + msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT)); + if (!r) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to get M3 event, timeout, current state:%s\n", + TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state)); + r = -EIO; + goto rpm_suspend_exit; + } + + r = mhi_turn_off_pcie_link(mhi_dev_ctxt); + if (r) { + mhi_log(MHI_MSG_ERROR, + "Failed to Turn off link ret:%d\n", r); + } + +rpm_suspend_exit: mhi_log(MHI_MSG_INFO, "Exited\n"); + mutex_unlock(&mhi_dev_ctxt->pm_lock); return r; } +int mhi_runtime_idle(struct device *dev) +{ + mhi_log(MHI_MSG_INFO, "Entered returning -EBUSY\n"); + + /* + * RPM framework during runtime resume always calls + * rpm_idle to see if device ready to suspend. + * If dev.power usage_count count is 0, rpm fw will call + * rpm_idle cb to see if device is ready to suspend. + * if cb return 0, or cb not defined the framework will + * assume device driver is ready to suspend; + * therefore, fw will schedule runtime suspend. + * In MHI power management, MHI host shall go to + * runtime suspend only after entering MHI State M2, even if + * usage count is 0. Return -EBUSY to disable automatic suspend. + */ + return -EBUSY; +} + int mhi_runtime_resume(struct device *dev) { int r = 0; struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data; - mhi_log(MHI_MSG_INFO, "Entered\n"); - r = mhi_initiate_m0(mhi_dev_ctxt); - if (r) - mhi_log(MHI_MSG_ERROR, "Init M0 failed ret %d\n", r); - pm_runtime_mark_last_busy(dev); - mhi_log(MHI_MSG_INFO, "Exited\n"); + mutex_lock(&mhi_dev_ctxt->pm_lock); + read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock); + WARN_ON(mhi_dev_ctxt->mhi_pm_state != MHI_PM_M3); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); + + /* turn on link */ + r = mhi_turn_on_pcie_link(mhi_dev_ctxt); + if (r) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to resume link\n"); + goto rpm_resume_exit; + } + + write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock); + mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_EXIT; + write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock); + + /* Set and wait for M0 Event */ + write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock); + mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0); + write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock); + r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event, + mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || + mhi_dev_ctxt->mhi_state == MHI_STATE_M1, + msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT)); + if (!r) { + mhi_log(MHI_MSG_ERROR, + "Failed to get M0 event, timeout\n"); + r = -EIO; + goto rpm_resume_exit; + } + r = 0; /* no errors */ + +rpm_resume_exit: + mutex_unlock(&mhi_dev_ctxt->pm_lock); + mhi_log(MHI_MSG_INFO, "Exited with :%d\n", r); return r; } int mhi_pci_resume(struct device *dev) { int r = 0; - struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data; - r = mhi_initiate_m0(mhi_dev_ctxt); - if (r) - goto exit; - r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event, - mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || - mhi_dev_ctxt->mhi_state == MHI_STATE_M1, - msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT)); - switch (r) { - case 0: + r = mhi_runtime_resume(dev); + if (r) { mhi_log(MHI_MSG_CRITICAL, - "Timeout: No M0 event after %d ms\n", - MHI_MAX_SUSPEND_TIMEOUT); - mhi_dev_ctxt->counters.m0_event_timeouts++; - r = -ETIME; - break; - case -ERESTARTSYS: - mhi_log(MHI_MSG_CRITICAL, - "Going Down...\n"); - break; - default: - mhi_log(MHI_MSG_INFO, - "Wait complete state: %s\n", - TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state)); - r = 0; + "Failed to resume link\n"); + } else { + pm_runtime_set_active(dev); + pm_runtime_enable(dev); } -exit: - atomic_set(&mhi_dev_ctxt->flags.pending_resume, 0); + return r; } @@ -134,57 +217,15 @@ void mhi_rem_pm_sysfs(struct device *dev) return sysfs_remove_group(&dev->kobj, &mhi_attribute_group); } -ssize_t sysfs_init_m3(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - int r = 0; - struct mhi_device_ctxt *mhi_dev_ctxt = - &mhi_devices.device_list[0].mhi_ctxt; - r = mhi_initiate_m3(mhi_dev_ctxt); - if (r) { - mhi_log(MHI_MSG_CRITICAL, - "Failed to suspend %d\n", r); - return r; - } - r = mhi_turn_off_pcie_link(mhi_dev_ctxt); - if (r) - mhi_log(MHI_MSG_CRITICAL, - "Failed to turn off link ret %d\n", r); - - return count; -} -ssize_t sysfs_init_mhi_reset(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct mhi_device_ctxt *mhi_dev_ctxt = - &mhi_devices.device_list[0].mhi_ctxt; - int r = 0; - - mhi_log(MHI_MSG_INFO, "Triggering MHI Reset.\n"); - r = mhi_trigger_reset(mhi_dev_ctxt); - if (r != 0) - mhi_log(MHI_MSG_CRITICAL, - "Failed to trigger MHI RESET ret %d\n", - r); - else - mhi_log(MHI_MSG_INFO, "Triggered! MHI RESET\n"); - return count; -} ssize_t sysfs_init_m0(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mhi_device_ctxt *mhi_dev_ctxt = &mhi_devices.device_list[0].mhi_ctxt; - if (0 != mhi_turn_on_pcie_link(mhi_dev_ctxt)) { - mhi_log(MHI_MSG_CRITICAL, - "Failed to resume link\n"); - return count; - } - mhi_initiate_m0(mhi_dev_ctxt); - mhi_log(MHI_MSG_CRITICAL, - "Current mhi_state = %s\n", - TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state)); + pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev); + pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev); + pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev); return count; } @@ -195,35 +236,42 @@ int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt) mhi_log(MHI_MSG_INFO, "Entered...\n"); pcie_dev = mhi_dev_ctxt->dev_info->pcie_device; - mutex_lock(&mhi_dev_ctxt->mhi_link_state); + if (0 == mhi_dev_ctxt->flags.link_up) { mhi_log(MHI_MSG_CRITICAL, "Link already marked as down, nothing to do\n"); goto exit; } - /* Disable shadow to avoid restoring D3 hot struct device */ - r = msm_pcie_shadow_control(mhi_dev_ctxt->dev_info->pcie_device, 0); - if (r) - mhi_log(MHI_MSG_CRITICAL, - "Failed to stop shadow config space: %d\n", r); - r = pci_set_power_state(mhi_dev_ctxt->dev_info->pcie_device, PCI_D3hot); + r = pci_save_state(pcie_dev); if (r) { mhi_log(MHI_MSG_CRITICAL, - "Failed to set pcie power state to D3 hotret: %x\n", r); - goto exit; + "Failed to save pcie state ret: %d\n", + r); } + mhi_dev_ctxt->dev_props->pcie_state = pci_store_saved_state(pcie_dev); + pci_disable_device(pcie_dev); + r = pci_set_power_state(pcie_dev, PCI_D3hot); + if (r) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to set pcie power state to D3 hot ret: %d\n", + r); + } + r = msm_pcie_pm_control(MSM_PCIE_SUSPEND, - mhi_dev_ctxt->dev_info->pcie_device->bus->number, - mhi_dev_ctxt->dev_info->pcie_device, + pcie_dev->bus->number, + pcie_dev, NULL, 0); if (r) mhi_log(MHI_MSG_CRITICAL, - "Failed to suspend pcie bus ret 0x%x\n", r); + "Failed to suspend pcie bus ret 0x%x\n", r); + + r = mhi_set_bus_request(mhi_dev_ctxt, 0); + if (r) + mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); mhi_dev_ctxt->flags.link_up = 0; exit: - mutex_unlock(&mhi_dev_ctxt->mhi_link_state); mhi_log(MHI_MSG_INFO, "Exited...\n"); return 0; } @@ -235,37 +283,40 @@ int mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt) pcie_dev = mhi_dev_ctxt->dev_info->pcie_device; - mutex_lock(&mhi_dev_ctxt->mhi_link_state); mhi_log(MHI_MSG_INFO, "Entered...\n"); if (mhi_dev_ctxt->flags.link_up) goto exit; + + r = mhi_set_bus_request(mhi_dev_ctxt, 1); + if (r) + mhi_log(MHI_MSG_CRITICAL, + "Could not set bus frequency ret: %d\n", + r); + r = msm_pcie_pm_control(MSM_PCIE_RESUME, - mhi_dev_ctxt->dev_info->pcie_device->bus->number, - mhi_dev_ctxt->dev_info->pcie_device, - NULL, 0); + pcie_dev->bus->number, + pcie_dev, + NULL, + 0); if (r) { mhi_log(MHI_MSG_CRITICAL, "Failed to resume pcie bus ret %d\n", r); goto exit; } - r = pci_set_power_state(mhi_dev_ctxt->dev_info->pcie_device, - PCI_D0); - if (r) { + r = pci_enable_device(pcie_dev); + if (r) mhi_log(MHI_MSG_CRITICAL, - "Failed to load stored state %d\n", r); - goto exit; - } - r = msm_pcie_recover_config(mhi_dev_ctxt->dev_info->pcie_device); - if (r) { - mhi_log(MHI_MSG_CRITICAL, - "Failed to Recover config space ret: %d\n", r); - goto exit; - } + "Failed to enable device ret:%d\n", + r); + + pci_load_and_free_saved_state(pcie_dev, + &mhi_dev_ctxt->dev_props->pcie_state); + pci_restore_state(pcie_dev); + pci_set_master(pcie_dev); + mhi_dev_ctxt->flags.link_up = 1; exit: - mutex_unlock(&mhi_dev_ctxt->mhi_link_state); mhi_log(MHI_MSG_INFO, "Exited...\n"); return r; } - diff --git a/drivers/platform/msm/mhi/mhi_ssr.c b/drivers/platform/msm/mhi/mhi_ssr.c index 8ee3deddbb95..defd6f4fd137 100644 --- a/drivers/platform/msm/mhi/mhi_ssr.c +++ b/drivers/platform/msm/mhi/mhi_ssr.c @@ -10,6 +10,7 @@ * GNU General Public License for more details. */ +#include #include #include #include @@ -23,24 +24,11 @@ static int mhi_ssr_notify_cb(struct notifier_block *nb, unsigned long action, void *data) { - int ret_val = 0; - struct mhi_device_ctxt *mhi_dev_ctxt = - &mhi_devices.device_list[0].mhi_ctxt; - struct mhi_pcie_dev_info *mhi_pcie_dev = NULL; - mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices]; - if (NULL != mhi_dev_ctxt) - mhi_dev_ctxt->esoc_notif = action; switch (action) { case SUBSYS_BEFORE_POWERUP: mhi_log(MHI_MSG_INFO, "Received Subsystem event BEFORE_POWERUP\n"); - atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 1); - ret_val = init_mhi_base_state(mhi_dev_ctxt); - if (0 != ret_val) - mhi_log(MHI_MSG_CRITICAL, - "Failed to transition to base state %d.\n", - ret_val); break; case SUBSYS_AFTER_POWERUP: mhi_log(MHI_MSG_INFO, @@ -148,7 +136,7 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt, } } -static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev) +int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev) { u32 pcie_word_val = 0; int r = 0; @@ -159,13 +147,11 @@ static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev) mhi_pcie_dev->bhi_ctxt.bhi_base += pcie_word_val; pcie_word_val = mhi_reg_read(mhi_pcie_dev->bhi_ctxt.bhi_base, BHI_EXECENV); + mhi_dev_ctxt->dev_exec_env = pcie_word_val; if (pcie_word_val == MHI_EXEC_ENV_AMSS) { mhi_dev_ctxt->base_state = STATE_TRANSITION_RESET; } else if (pcie_word_val == MHI_EXEC_ENV_PBL) { mhi_dev_ctxt->base_state = STATE_TRANSITION_BHI; - r = bhi_probe(mhi_pcie_dev); - if (r) - mhi_log(MHI_MSG_ERROR, "Failed to initialize BHI.\n"); } else { mhi_log(MHI_MSG_ERROR, "Invalid EXEC_ENV: 0x%x\n", pcie_word_val); @@ -178,10 +164,9 @@ static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev) void mhi_link_state_cb(struct msm_pcie_notify *notify) { - int ret_val = 0; + struct mhi_pcie_dev_info *mhi_pcie_dev; struct mhi_device_ctxt *mhi_dev_ctxt = NULL; - int r = 0; if (NULL == notify || NULL == notify->data) { mhi_log(MHI_MSG_CRITICAL, @@ -198,32 +183,6 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify) case MSM_PCIE_EVENT_LINKUP: mhi_log(MHI_MSG_INFO, "Received MSM_PCIE_EVENT_LINKUP\n"); - if (0 == mhi_pcie_dev->link_up_cntr) { - mhi_log(MHI_MSG_INFO, - "Initializing MHI for the first time\n"); - r = mhi_ctxt_init(mhi_pcie_dev); - if (r) { - mhi_log(MHI_MSG_ERROR, - "MHI initialization failed, ret %d.\n", - r); - r = msm_pcie_register_event( - &mhi_pcie_dev->mhi_pci_link_event); - mhi_log(MHI_MSG_ERROR, - "Deregistered from PCIe notif r %d.\n", - r); - return; - } - mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt; - mhi_pcie_dev->mhi_ctxt.flags.link_up = 1; - pci_set_master(mhi_pcie_dev->pcie_device); - r = set_mhi_base_state(mhi_pcie_dev); - if (r) - return; - init_mhi_base_state(mhi_dev_ctxt); - } else { - mhi_log(MHI_MSG_INFO, - "Received Link Up Callback\n"); - } mhi_pcie_dev->link_up_cntr++; break; case MSM_PCIE_EVENT_WAKEUP: @@ -231,17 +190,14 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify) "Received MSM_PCIE_EVENT_WAKE\n"); __pm_stay_awake(&mhi_dev_ctxt->w_lock); __pm_relax(&mhi_dev_ctxt->w_lock); - if (atomic_read(&mhi_dev_ctxt->flags.pending_resume)) { - mhi_log(MHI_MSG_INFO, - "There is a pending resume, doing nothing.\n"); - return; - } - ret_val = mhi_init_state_transition(mhi_dev_ctxt, - STATE_TRANSITION_WAKE); - if (0 != ret_val) { - mhi_log(MHI_MSG_CRITICAL, - "Failed to init state transition, to %d\n", - STATE_TRANSITION_WAKE); + + if (mhi_dev_ctxt->flags.mhi_initialized) { + pm_runtime_get(&mhi_dev_ctxt-> + dev_info->pcie_device->dev); + pm_runtime_mark_last_busy(&mhi_dev_ctxt-> + dev_info->pcie_device->dev); + pm_runtime_put_noidle(&mhi_dev_ctxt-> + dev_info->pcie_device->dev); } break; default: @@ -255,12 +211,6 @@ int init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt) { int r = 0; - mhi_assert_device_wake(mhi_dev_ctxt); - mhi_dev_ctxt->flags.link_up = 1; - r = mhi_set_bus_request(mhi_dev_ctxt, 1); - if (r) - mhi_log(MHI_MSG_INFO, - "Failed to scale bus request to active set.\n"); r = mhi_init_state_transition(mhi_dev_ctxt, mhi_dev_ctxt->base_state); if (r) { mhi_log(MHI_MSG_CRITICAL, diff --git a/drivers/platform/msm/mhi/mhi_states.c b/drivers/platform/msm/mhi/mhi_states.c index f0404da98cf7..1021a56d1b3d 100644 --- a/drivers/platform/msm/mhi/mhi_states.c +++ b/drivers/platform/msm/mhi/mhi_states.c @@ -17,7 +17,7 @@ #include #include -static const char *state_transition_str(enum STATE_TRANSITION state) +const char *state_transition_str(enum STATE_TRANSITION state) { static const char * const mhi_states_transition_str[] = { "RESET", @@ -40,7 +40,17 @@ static const char *state_transition_str(enum STATE_TRANSITION state) mhi_states_transition_str[state] : "Invalid"; } -static inline void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt, +enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + u32 state = mhi_reg_read_field(mhi_dev_ctxt->mmio_info.mmio_addr, + MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT); + + return (state >= MHI_STATE_LIMIT) ? MHI_STATE_LIMIT : state; +} + +void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt, enum MHI_STATE new_state) { if (MHI_STATE_RESET == new_state) { @@ -64,20 +74,18 @@ static void conditional_chan_db_write( { u64 db_value; unsigned long flags; + struct mhi_ring *mhi_ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; - mhi_dev_ctxt->mhi_chan_db_order[chan] = 0; - spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], flags); - if (0 == mhi_dev_ctxt->mhi_chan_db_order[chan]) { - db_value = - mhi_v2p_addr(mhi_dev_ctxt, - MHI_RING_TYPE_XFER_RING, chan, - (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp); - mhi_process_db(mhi_dev_ctxt, - mhi_dev_ctxt->mmio_info.chan_db_addr, - chan, db_value); - } - mhi_dev_ctxt->mhi_chan_db_order[chan] = 0; - spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags); + spin_lock_irqsave(&mhi_ring->ring_lock, flags); + db_value = mhi_v2p_addr(mhi_dev_ctxt, + MHI_RING_TYPE_XFER_RING, + chan, + (uintptr_t)mhi_ring->wp); + mhi_ring->db_mode.process_db(mhi_dev_ctxt, + mhi_dev_ctxt->mmio_info.chan_db_addr, + chan, + db_value); + spin_unlock_irqrestore(&mhi_ring->ring_lock, flags); } static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt, @@ -103,29 +111,25 @@ static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt, static void ring_all_cmd_dbs(struct mhi_device_ctxt *mhi_dev_ctxt) { - struct mutex *cmd_mutex = NULL; u64 db_value; u64 rp = 0; struct mhi_ring *local_ctxt = NULL; mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n"); - cmd_mutex = &mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]; - mhi_dev_ctxt->cmd_ring_order = 0; - mutex_lock(cmd_mutex); + local_ctxt = &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING]; rp = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING, PRIMARY_CMD_RING, (uintptr_t)local_ctxt->rp); - db_value = - mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING, - PRIMARY_CMD_RING, - (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[0].wp); - if (0 == mhi_dev_ctxt->cmd_ring_order && rp != db_value) - mhi_process_db(mhi_dev_ctxt, - mhi_dev_ctxt->mmio_info.cmd_db_addr, - 0, db_value); - mhi_dev_ctxt->cmd_ring_order = 0; - mutex_unlock(cmd_mutex); + db_value = mhi_v2p_addr(mhi_dev_ctxt, + MHI_RING_TYPE_CMD_RING, + PRIMARY_CMD_RING, + (uintptr_t)local_ctxt->wp); + if (rp != db_value) + local_ctxt->db_mode.process_db(mhi_dev_ctxt, + mhi_dev_ctxt->mmio_info.cmd_db_addr, + 0, + db_value); } static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt) @@ -133,24 +137,23 @@ static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt) u32 i; u64 db_value = 0; struct mhi_event_ctxt *event_ctxt = NULL; + struct mhi_ring *mhi_ring; spinlock_t *lock = NULL; unsigned long flags; for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) { - lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[i]; - mhi_dev_ctxt->mhi_ev_db_order[i] = 0; + mhi_ring = &mhi_dev_ctxt->mhi_local_event_ctxt[i]; + lock = &mhi_ring->ring_lock; spin_lock_irqsave(lock, flags); event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i]; - db_value = - mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING, - i, - (uintptr_t)mhi_dev_ctxt->mhi_local_event_ctxt[i].wp); - if (0 == mhi_dev_ctxt->mhi_ev_db_order[i]) { - mhi_process_db(mhi_dev_ctxt, - mhi_dev_ctxt->mmio_info.event_db_addr, - i, db_value); - } - mhi_dev_ctxt->mhi_ev_db_order[i] = 0; + db_value = mhi_v2p_addr(mhi_dev_ctxt, + MHI_RING_TYPE_EVENT_RING, + i, + (uintptr_t)mhi_ring->wp); + mhi_ring->db_mode.process_db(mhi_dev_ctxt, + mhi_dev_ctxt->mmio_info.event_db_addr, + i, + db_value); spin_unlock_irqrestore(lock, flags); } } @@ -159,169 +162,121 @@ static int process_m0_transition( struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION cur_work_item) { - unsigned long flags; - int r = 0; - mhi_log(MHI_MSG_INFO, "Entered\n"); + mhi_log(MHI_MSG_INFO, "Entered With State %s\n", + TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state)); - if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) { + switch (mhi_dev_ctxt->mhi_state) { + case MHI_STATE_M2: mhi_dev_ctxt->counters.m2_m0++; - } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M3) { - mhi_dev_ctxt->counters.m3_m0++; - } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_READY) { - mhi_log(MHI_MSG_INFO, - "Transitioning from READY.\n"); - } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) { - mhi_log(MHI_MSG_INFO, - "Transitioning from M1.\n"); - } else { - mhi_log(MHI_MSG_INFO, - "MHI State %s link state %d. Quitting\n", - TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state), - mhi_dev_ctxt->flags.link_up); + break; + case MHI_STATE_M3: + mhi_dev_ctxt->counters.m3_m0++; + break; + default: + break; } - read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); + write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock); mhi_dev_ctxt->mhi_state = MHI_STATE_M0; - atomic_inc(&mhi_dev_ctxt->flags.data_pending); - mhi_assert_device_wake(mhi_dev_ctxt); - read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); + mhi_dev_ctxt->mhi_pm_state = MHI_PM_M0; + write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock); + read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock); + mhi_assert_device_wake(mhi_dev_ctxt, true); if (mhi_dev_ctxt->flags.mhi_initialized) { ring_all_ev_dbs(mhi_dev_ctxt); ring_all_chan_dbs(mhi_dev_ctxt, true); ring_all_cmd_dbs(mhi_dev_ctxt); } - atomic_dec(&mhi_dev_ctxt->flags.data_pending); - r = mhi_set_bus_request(mhi_dev_ctxt, 1); - if (r) - mhi_log(MHI_MSG_CRITICAL, - "Could not set bus frequency ret: %d\n", - r); - mhi_dev_ctxt->flags.pending_M0 = 0; - if (atomic_read(&mhi_dev_ctxt->flags.pending_powerup)) { - atomic_set(&mhi_dev_ctxt->flags.pending_ssr, 0); - atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 0); - } - wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m0_event); - write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); - if (!mhi_dev_ctxt->flags.pending_M3 && - mhi_dev_ctxt->flags.link_up && - mhi_dev_ctxt->flags.mhi_initialized) - mhi_deassert_device_wake(mhi_dev_ctxt); - write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); + mhi_deassert_device_wake(mhi_dev_ctxt); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); + wake_up(mhi_dev_ctxt->mhi_ev_wq.m0_event); mhi_log(MHI_MSG_INFO, "Exited\n"); return 0; } -static int process_m1_transition( - struct mhi_device_ctxt *mhi_dev_ctxt, - enum STATE_TRANSITION cur_work_item) +void process_m1_transition(struct work_struct *work) { - unsigned long flags = 0; - int r = 0; + struct mhi_device_ctxt *mhi_dev_ctxt; + + mhi_dev_ctxt = container_of(work, + struct mhi_device_ctxt, + process_m1_worker); + mutex_lock(&mhi_dev_ctxt->pm_lock); + write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock); mhi_log(MHI_MSG_INFO, "Processing M1 state transition from state %s\n", TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state)); - write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); - if (!mhi_dev_ctxt->flags.pending_M3) { - mhi_log(MHI_MSG_INFO, "Setting M2 Transition flag\n"); - atomic_inc(&mhi_dev_ctxt->flags.m2_transition); - mhi_dev_ctxt->mhi_state = MHI_STATE_M2; - mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n"); - mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M2); - mhi_dev_ctxt->counters.m1_m2++; + /* We either Entered M3 or we did M3->M0 Exit */ + if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_M1) { + write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock); + mutex_unlock(&mhi_dev_ctxt->pm_lock); + return; } - write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); - r = mhi_set_bus_request(mhi_dev_ctxt, 0); - if (r) - mhi_log(MHI_MSG_INFO, "Failed to update bus request\n"); - mhi_log(MHI_MSG_INFO, "Debouncing M2\n"); + mhi_log(MHI_MSG_INFO, "Transitioning to M2 Transition\n"); + mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1_M2_TRANSITION; + mhi_dev_ctxt->counters.m1_m2++; + mhi_dev_ctxt->mhi_state = MHI_STATE_M2; + mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M2); + write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock); + msleep(MHI_M2_DEBOUNCE_TMR_MS); + write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock); - write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); - mhi_log(MHI_MSG_INFO, "Pending acks %d\n", - atomic_read(&mhi_dev_ctxt->counters.outbound_acks)); - if (atomic_read(&mhi_dev_ctxt->counters.outbound_acks) || - mhi_dev_ctxt->flags.pending_M3) { - mhi_assert_device_wake(mhi_dev_ctxt); - } else { - pm_runtime_mark_last_busy( - &mhi_dev_ctxt->dev_info->pcie_device->dev); - r = pm_request_autosuspend( - &mhi_dev_ctxt->dev_info->pcie_device->dev); - if (r && r != -EAGAIN) { - mhi_log(MHI_MSG_ERROR, - "Failed to remove counter ret %d\n", r); - BUG_ON(mhi_dev_ctxt->dev_info-> - pcie_device->dev.power.runtime_error); - } + /* During DEBOUNCE Time We could be receiving M0 Event */ + if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_M1_M2_TRANSITION) { + mhi_log(MHI_MSG_INFO, "Entered M2 State\n"); + mhi_dev_ctxt->mhi_pm_state = MHI_PM_M2; } - atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0); - mhi_log(MHI_MSG_INFO, "M2 transition complete.\n"); - write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); - BUG_ON(atomic_read(&mhi_dev_ctxt->outbound_acks) < 0); + write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock); - return 0; + if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) { + mhi_log(MHI_MSG_INFO, "Exiting M2 Immediately, count:%d\n", + atomic_read(&mhi_dev_ctxt->counters.device_wake)); + read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock); + mhi_assert_device_wake(mhi_dev_ctxt, true); + mhi_deassert_device_wake(mhi_dev_ctxt); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); + } else { + mhi_log(MHI_MSG_INFO, "Schedule RPM suspend"); + pm_runtime_mark_last_busy(&mhi_dev_ctxt-> + dev_info->pcie_device->dev); + pm_request_autosuspend(&mhi_dev_ctxt-> + dev_info->pcie_device->dev); + } + mutex_unlock(&mhi_dev_ctxt->pm_lock); } static int process_m3_transition( struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION cur_work_item) { - unsigned long flags; mhi_log(MHI_MSG_INFO, - "Processing M3 state transition\n"); - write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); - mhi_dev_ctxt->mhi_state = MHI_STATE_M3; - mhi_dev_ctxt->flags.pending_M3 = 0; - wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m3_event); - write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); - mhi_dev_ctxt->counters.m0_m3++; - return 0; -} + "Entered with State %s\n", + TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state)); -static int mhi_process_link_down( - struct mhi_device_ctxt *mhi_dev_ctxt) -{ - unsigned long flags; - int r; - - mhi_log(MHI_MSG_INFO, "Entered.\n"); - if (NULL == mhi_dev_ctxt) - return -EINVAL; - - write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); - mhi_dev_ctxt->flags.mhi_initialized = 0; - mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; - mhi_deassert_device_wake(mhi_dev_ctxt); - write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); - - mhi_dev_ctxt->flags.stop_threads = 1; - - while (!mhi_dev_ctxt->flags.ev_thread_stopped) { - wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq); - mhi_log(MHI_MSG_INFO, - "Waiting for threads to SUSPEND EVT: %d, STT: %d\n", - mhi_dev_ctxt->flags.st_thread_stopped, - mhi_dev_ctxt->flags.ev_thread_stopped); - msleep(20); + switch (mhi_dev_ctxt->mhi_state) { + case MHI_STATE_M1: + mhi_dev_ctxt->counters.m1_m3++; + break; + case MHI_STATE_M0: + mhi_dev_ctxt->counters.m0_m3++; + break; + default: + break; } - r = mhi_set_bus_request(mhi_dev_ctxt, 0); - if (r) - mhi_log(MHI_MSG_INFO, - "Failed to scale bus request to sleep set.\n"); - mhi_turn_off_pcie_link(mhi_dev_ctxt); - mhi_dev_ctxt->dev_info->link_down_cntr++; - atomic_set(&mhi_dev_ctxt->flags.data_pending, 0); - mhi_log(MHI_MSG_INFO, "Exited.\n"); - + write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock); + mhi_dev_ctxt->mhi_state = MHI_STATE_M3; + mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3; + write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock); + wake_up(mhi_dev_ctxt->mhi_ev_wq.m3_event); return 0; } @@ -329,51 +284,20 @@ static int process_link_down_transition( struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION cur_work_item) { - mhi_log(MHI_MSG_INFO, "Entered\n"); - if (0 != - mhi_process_link_down(mhi_dev_ctxt)) { - mhi_log(MHI_MSG_CRITICAL, - "Failed to process link down\n"); - } - mhi_log(MHI_MSG_INFO, "Exited.\n"); - return 0; + mhi_log(MHI_MSG_INFO, + "Entered with State %s\n", + TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state)); + return -EIO; } static int process_wake_transition( struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION cur_work_item) { - int r = 0; - - mhi_log(MHI_MSG_INFO, "Entered\n"); - __pm_stay_awake(&mhi_dev_ctxt->w_lock); - - if (atomic_read(&mhi_dev_ctxt->flags.pending_ssr)) { - mhi_log(MHI_MSG_CRITICAL, - "Pending SSR, Ignoring.\n"); - goto exit; - } - if (mhi_dev_ctxt->flags.mhi_initialized) { - r = pm_request_resume( - &mhi_dev_ctxt->dev_info->pcie_device->dev); - mhi_log(MHI_MSG_VERBOSE, - "MHI is initialized, transitioning to M0, ret %d\n", r); - } - - if (!mhi_dev_ctxt->flags.mhi_initialized) { - mhi_log(MHI_MSG_INFO, - "MHI is not initialized transitioning to base.\n"); - r = init_mhi_base_state(mhi_dev_ctxt); - if (0 != r) - mhi_log(MHI_MSG_CRITICAL, - "Failed to transition to base state %d.\n", - r); - } - -exit: - __pm_relax(&mhi_dev_ctxt->w_lock); - mhi_log(MHI_MSG_INFO, "Exited.\n"); - return r; + mhi_log(MHI_MSG_INFO, + "Entered with State %s\n", + TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state)); + return -EIO; } @@ -381,9 +305,10 @@ static int process_bhi_transition( struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION cur_work_item) { - mhi_turn_on_pcie_link(mhi_dev_ctxt); mhi_log(MHI_MSG_INFO, "Entered\n"); + write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock); mhi_dev_ctxt->mhi_state = MHI_STATE_BHI; + write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock); wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event); mhi_log(MHI_MSG_INFO, "Exited\n"); return 0; @@ -396,36 +321,42 @@ static int process_ready_transition( int r = 0; mhi_log(MHI_MSG_INFO, "Processing READY state transition\n"); - mhi_dev_ctxt->mhi_state = MHI_STATE_READY; r = mhi_reset_all_thread_queues(mhi_dev_ctxt); - - if (r) + if (r) { mhi_log(MHI_MSG_ERROR, "Failed to reset thread queues\n"); + return r; + } + + write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock); + mhi_dev_ctxt->mhi_state = MHI_STATE_READY; r = mhi_init_mmio(mhi_dev_ctxt); + write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock); /* Initialize MMIO */ if (r) { mhi_log(MHI_MSG_ERROR, "Failure during MMIO initialization\n"); return r; } + read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock); r = mhi_add_elements_to_event_rings(mhi_dev_ctxt, cur_work_item); - + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); if (r) { mhi_log(MHI_MSG_ERROR, "Failure during event ring init\n"); return r; } + write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock); mhi_dev_ctxt->flags.stop_threads = 0; - mhi_assert_device_wake(mhi_dev_ctxt); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL, MHICTRL_MHISTATE_MASK, MHICTRL_MHISTATE_SHIFT, MHI_STATE_M0); + write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock); return r; } @@ -448,37 +379,22 @@ static int process_reset_transition( enum STATE_TRANSITION cur_work_item) { int r = 0, i = 0; - unsigned long flags = 0; - mhi_log(MHI_MSG_INFO, "Processing RESET state transition\n"); - write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); + write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock); mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; - write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); + write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock); + mhi_dev_ctxt->counters.mhi_reset_cntr++; - mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_PBL; r = mhi_test_for_device_reset(mhi_dev_ctxt); if (r) mhi_log(MHI_MSG_INFO, "Device not RESET ret %d\n", r); r = mhi_test_for_device_ready(mhi_dev_ctxt); - switch (r) { - case 0: - break; - case -ENOTCONN: - mhi_log(MHI_MSG_CRITICAL, "Link down detected\n"); - break; - case -ETIMEDOUT: - r = mhi_init_state_transition(mhi_dev_ctxt, - STATE_TRANSITION_RESET); - if (0 != r) - mhi_log(MHI_MSG_CRITICAL, - "Failed to initiate %s state trans\n", - state_transition_str(STATE_TRANSITION_RESET)); - break; - default: - mhi_log(MHI_MSG_CRITICAL, - "Unexpected ret code detected for\n"); - break; + if (r) { + mhi_log(MHI_MSG_ERROR, "timed out waiting for ready ret:%d\n", + r); + return r; } + for (i = 0; i < NR_OF_CMD_RINGS; ++i) { mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp = mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base; @@ -511,45 +427,10 @@ static int process_syserr_transition( struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION cur_work_item) { - int r = 0; - - mhi_log(MHI_MSG_CRITICAL, "Received SYS ERROR. Resetting MHI\n"); - mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; - r = mhi_init_state_transition(mhi_dev_ctxt, - STATE_TRANSITION_RESET); - if (r) { - mhi_log(MHI_MSG_ERROR, - "Failed to init state transition to RESET ret %d\n", r); - mhi_log(MHI_MSG_CRITICAL, "Failed to reset mhi\n"); - } - return r; -} - -int start_chan_sync(struct mhi_client_handle *client_handle) -{ - int r = 0; - int chan = client_handle->chan_info.chan_nr; - - init_completion(&client_handle->chan_open_complete); - r = mhi_send_cmd(client_handle->mhi_dev_ctxt, - MHI_COMMAND_START_CHAN, - chan); - if (r != 0) { - mhi_log(MHI_MSG_ERROR, - "Failed to send start command for chan %d ret %d\n", - chan, r); - return r; - } - r = wait_for_completion_timeout( - &client_handle->chan_open_complete, - msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT)); - if (!r) { - mhi_log(MHI_MSG_ERROR, - "Timed out waiting for chan %d start completion\n", - chan); - r = -ETIME; - } - return 0; + mhi_log(MHI_MSG_INFO, + "Entered with State %s\n", + TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state)); + return -EIO; } static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt, @@ -573,8 +454,7 @@ static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt, chan_info.flags)) mhi_notify_client(client_handle, MHI_CB_MHI_ENABLED); } - if (exec_env == MHI_EXEC_ENV_AMSS) - mhi_deassert_device_wake(mhi_dev_ctxt); + mhi_log(MHI_MSG_INFO, "Done.\n"); } @@ -582,36 +462,25 @@ static int process_sbl_transition( struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION cur_work_item) { - int r = 0; - pm_runtime_set_autosuspend_delay( - &mhi_dev_ctxt->dev_info->pcie_device->dev, - MHI_RPM_AUTOSUSPEND_TMR_VAL_MS); - pm_runtime_use_autosuspend(&mhi_dev_ctxt->dev_info->pcie_device->dev); - r = pm_runtime_set_active(&mhi_dev_ctxt->dev_info->pcie_device->dev); - if (r) { - mhi_log(MHI_MSG_ERROR, - "Failed to activate runtime pm ret %d\n", r); - } - pm_runtime_enable(&mhi_dev_ctxt->dev_info->pcie_device->dev); - mhi_log(MHI_MSG_INFO, "Enabled runtime pm autosuspend\n"); + mhi_log(MHI_MSG_INFO, "Enabled\n"); + write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock); mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL; + write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock); enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env); - pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev); return 0; - } static int process_amss_transition( struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION cur_work_item) { - int r = 0, i = 0; - struct mhi_client_handle *client_handle = NULL; + int r = 0; mhi_log(MHI_MSG_INFO, "Processing AMSS state transition\n"); + write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock); mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_AMSS; - atomic_inc(&mhi_dev_ctxt->flags.data_pending); - mhi_assert_device_wake(mhi_dev_ctxt); + write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock); + if (!mhi_dev_ctxt->flags.mhi_initialized) { r = mhi_add_elements_to_event_rings(mhi_dev_ctxt, cur_work_item); @@ -619,56 +488,42 @@ static int process_amss_transition( if (r) { mhi_log(MHI_MSG_CRITICAL, "Failed to set local chan state ret %d\n", r); + mhi_deassert_device_wake(mhi_dev_ctxt); return r; } + read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock); ring_all_chan_dbs(mhi_dev_ctxt, true); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); mhi_log(MHI_MSG_INFO, "Notifying clients that MHI is enabled\n"); enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env); } else { mhi_log(MHI_MSG_INFO, "MHI is initialized\n"); - for (i = 0; i < MHI_MAX_CHANNELS; ++i) { - client_handle = mhi_dev_ctxt->client_handle_list[i]; - if (client_handle && client_handle->chan_status) - r = start_chan_sync(client_handle); - WARN(r, "Failed to start chan %d ret %d\n", - i, r); - return r; - } - ring_all_chan_dbs(mhi_dev_ctxt, true); } + + read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock); ring_all_ev_dbs(mhi_dev_ctxt); - atomic_dec(&mhi_dev_ctxt->flags.data_pending); - if (!mhi_dev_ctxt->flags.pending_M3 && - mhi_dev_ctxt->flags.link_up) - mhi_deassert_device_wake(mhi_dev_ctxt); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); + + /* + * runtime_allow will decrement usage_count, counts were + * incremented by pci fw pci_pm_init() or by + * mhi shutdown/ssr apis. + */ + mhi_log(MHI_MSG_INFO, "Allow runtime suspend\n"); + + pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev); + pm_runtime_allow(&mhi_dev_ctxt->dev_info->pcie_device->dev); + + /* During probe we incremented, releasing that count */ + read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock); + mhi_deassert_device_wake(mhi_dev_ctxt); + read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock); + mhi_log(MHI_MSG_INFO, "Exited\n"); return 0; } -int mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt) -{ - int r = 0; - unsigned long flags = 0; - - mhi_log(MHI_MSG_INFO, "Entered\n"); - write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); - mhi_dev_ctxt->mhi_state = MHI_STATE_SYS_ERR; - write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); - - mhi_log(MHI_MSG_INFO, "Setting RESET to MDM.\n"); - mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_RESET); - mhi_log(MHI_MSG_INFO, "Transitioning state to RESET\n"); - r = mhi_init_state_transition(mhi_dev_ctxt, - STATE_TRANSITION_RESET); - if (0 != r) - mhi_log(MHI_MSG_CRITICAL, - "Failed to initiate %s state trans ret %d\n", - state_transition_str(STATE_TRANSITION_RESET), r); - mhi_log(MHI_MSG_INFO, "Exiting\n"); - return r; -} - static int process_stt_work_item( struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION cur_work_item) @@ -697,9 +552,6 @@ static int process_stt_work_item( case STATE_TRANSITION_M0: r = process_m0_transition(mhi_dev_ctxt, cur_work_item); break; - case STATE_TRANSITION_M1: - r = process_m1_transition(mhi_dev_ctxt, cur_work_item); - break; case STATE_TRANSITION_M3: r = process_m3_transition(mhi_dev_ctxt, cur_work_item); break; @@ -799,227 +651,3 @@ int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt, wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.state_change_event); return r; } - -int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt) -{ - int r = 0; - unsigned long flags; - - mhi_log(MHI_MSG_INFO, - "Entered MHI state %s, Pending M0 %d Pending M3 %d\n", - TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state), - mhi_dev_ctxt->flags.pending_M0, - mhi_dev_ctxt->flags.pending_M3); - mutex_lock(&mhi_dev_ctxt->pm_lock); - mhi_log(MHI_MSG_INFO, - "Waiting for M0 M1 or M3. Currently %s...\n", - TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state)); - - r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event, - mhi_dev_ctxt->mhi_state == MHI_STATE_M3 || - mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || - mhi_dev_ctxt->mhi_state == MHI_STATE_M1, - msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT)); - switch (r) { - case 0: - mhi_log(MHI_MSG_CRITICAL, - "Timeout: State %s after %d ms\n", - TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state), - MHI_MAX_SUSPEND_TIMEOUT); - mhi_dev_ctxt->counters.m0_event_timeouts++; - r = -ETIME; - goto exit; - case -ERESTARTSYS: - mhi_log(MHI_MSG_CRITICAL, - "Going Down...\n"); - goto exit; - default: - mhi_log(MHI_MSG_INFO, - "Wait complete state: %s\n", - TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state)); - r = 0; - break; - } - if (mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || - mhi_dev_ctxt->mhi_state == MHI_STATE_M1) { - mhi_assert_device_wake(mhi_dev_ctxt); - mhi_log(MHI_MSG_INFO, - "MHI state %s, done\n", - TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state)); - goto exit; - } else { - if (0 != mhi_turn_on_pcie_link(mhi_dev_ctxt)) { - mhi_log(MHI_MSG_CRITICAL, - "Failed to resume link\n"); - r = -EIO; - goto exit; - } - - write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); - mhi_log(MHI_MSG_VERBOSE, "Setting M0 ...\n"); - if (mhi_dev_ctxt->flags.pending_M3) { - mhi_log(MHI_MSG_INFO, - "Pending M3 detected, aborting M0 procedure\n"); - write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, - flags); - r = -EPERM; - goto exit; - } - if (mhi_dev_ctxt->flags.link_up) { - mhi_dev_ctxt->flags.pending_M0 = 1; - mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0); - } - write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); - r = wait_event_interruptible_timeout( - *mhi_dev_ctxt->mhi_ev_wq.m0_event, - mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || - mhi_dev_ctxt->mhi_state == MHI_STATE_M1, - msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT)); - WARN_ON(!r || -ERESTARTSYS == r); - if (!r || -ERESTARTSYS == r) - mhi_log(MHI_MSG_ERROR, - "Failed to get M0 event ret %d\n", r); - r = 0; - } -exit: - mutex_unlock(&mhi_dev_ctxt->pm_lock); - mhi_log(MHI_MSG_INFO, "Exited...\n"); - return r; -} - -int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt) -{ - - unsigned long flags; - int r = 0, abort_m3 = 0; - - mhi_log(MHI_MSG_INFO, - "Entered MHI state %s, Pending M0 %d Pending M3 %d\n", - TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state), - mhi_dev_ctxt->flags.pending_M0, - mhi_dev_ctxt->flags.pending_M3); - mutex_lock(&mhi_dev_ctxt->pm_lock); - switch (mhi_dev_ctxt->mhi_state) { - case MHI_STATE_RESET: - mhi_log(MHI_MSG_INFO, - "MHI in RESET turning link off and quitting\n"); - mhi_turn_off_pcie_link(mhi_dev_ctxt); - r = mhi_set_bus_request(mhi_dev_ctxt, 0); - if (r) - mhi_log(MHI_MSG_INFO, - "Failed to set bus freq ret %d\n", r); - goto exit; - case MHI_STATE_M0: - case MHI_STATE_M1: - case MHI_STATE_M2: - write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); - mhi_log(MHI_MSG_INFO, - "Triggering wake out of M2\n"); - mhi_dev_ctxt->flags.pending_M3 = 1; - if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) { - mhi_log(MHI_MSG_INFO, - "M2 transition not set\n"); - mhi_assert_device_wake(mhi_dev_ctxt); - } - - if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) { - write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, - flags); - r = wait_event_interruptible_timeout( - *mhi_dev_ctxt->mhi_ev_wq.m0_event, - mhi_dev_ctxt->mhi_state == MHI_STATE_M0, - msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT)); - write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); - if (0 == r || -ERESTARTSYS == r) { - mhi_log(MHI_MSG_CRITICAL, - "MDM failed to come out of M2.\n"); - mhi_dev_ctxt->counters.m2_event_timeouts++; - r = -EAGAIN; - goto unlock; - } - } - break; - case MHI_STATE_M3: - mhi_log(MHI_MSG_INFO, - "MHI state %s, link state %d.\n", - TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state), - mhi_dev_ctxt->flags.link_up); - if (mhi_dev_ctxt->flags.link_up) - r = -EAGAIN; - else - r = 0; - goto exit; - default: - write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); - mhi_log(MHI_MSG_INFO, - "MHI state %s, link state %d.\n", - TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state), - mhi_dev_ctxt->flags.link_up); - break; - } - - if (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) { - mhi_log(MHI_MSG_INFO, - "There are still %d acks pending from device\n", - atomic_read(&mhi_dev_ctxt->counters.outbound_acks)); - __pm_stay_awake(&mhi_dev_ctxt->w_lock); - __pm_relax(&mhi_dev_ctxt->w_lock); - abort_m3 = 1; - r = -EAGAIN; - goto unlock; - } - - if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) { - abort_m3 = 1; - r = -EAGAIN; - goto unlock; - } - - if (mhi_dev_ctxt->flags.pending_M0) { - r = -EAGAIN; - goto unlock; - } - mhi_dev_ctxt->flags.pending_M3 = 1; - - mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3); - write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); - mhi_log(MHI_MSG_INFO, - "Waiting for M3 completion.\n"); - r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event, - mhi_dev_ctxt->mhi_state == MHI_STATE_M3, - msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT)); - switch (r) { - case 0: - mhi_log(MHI_MSG_CRITICAL, - "MDM failed to suspend after %d ms\n", - MHI_MAX_SUSPEND_TIMEOUT); - mhi_dev_ctxt->counters.m3_event_timeouts++; - mhi_dev_ctxt->flags.pending_M3 = 0; - goto exit; - default: - mhi_log(MHI_MSG_INFO, - "M3 completion received\n"); - break; - } - mhi_turn_off_pcie_link(mhi_dev_ctxt); - r = mhi_set_bus_request(mhi_dev_ctxt, 0); - if (r) - mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); - goto exit; -unlock: - mhi_dev_ctxt->flags.pending_M3 = 0; - if (abort_m3) { - atomic_inc(&mhi_dev_ctxt->flags.data_pending); - write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); - ring_all_chan_dbs(mhi_dev_ctxt, false); - ring_all_cmd_dbs(mhi_dev_ctxt); - atomic_dec(&mhi_dev_ctxt->flags.data_pending); - mhi_deassert_device_wake(mhi_dev_ctxt); - } else { - write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); - } -exit: - mhi_dev_ctxt->flags.pending_M3 = 0; - mutex_unlock(&mhi_dev_ctxt->pm_lock); - return r; -} diff --git a/drivers/platform/msm/mhi/mhi_sys.c b/drivers/platform/msm/mhi/mhi_sys.c index 3316b2694896..c5c025b8585a 100644 --- a/drivers/platform/msm/mhi/mhi_sys.c +++ b/drivers/platform/msm/mhi/mhi_sys.c @@ -21,9 +21,9 @@ enum MHI_DEBUG_LEVEL mhi_msg_lvl = MHI_MSG_ERROR; #ifdef CONFIG_MSM_MHI_DEBUG - enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_VERBOSE; +enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_VERBOSE; #else - enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_ERROR; +enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_ERROR; #endif unsigned int mhi_log_override; @@ -58,6 +58,7 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf, int valid_chan = 0; struct mhi_chan_ctxt *cc_list; struct mhi_client_handle *client_handle; + int pkts_queued; if (NULL == mhi_dev_ctxt) return -EIO; @@ -86,35 +87,37 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf, mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp, &v_wp_index); + pkts_queued = client_handle->chan_info.max_desc - + get_nr_avail_ring_elements(&mhi_dev_ctxt-> + mhi_local_chan_ctxt[*offp]) - 1; amnt_copied = scnprintf(mhi_dev_ctxt->chan_info, - MHI_LOG_SIZE, - "%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d %s %u\n", - "chan:", - (unsigned int)*offp, - "pkts from dev:", - mhi_dev_ctxt->counters.chan_pkts_xferd[*offp], - "state:", - chan_ctxt->mhi_chan_state, - "p_base:", - chan_ctxt->mhi_trb_ring_base_addr, - "v_base:", - mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].base, - "v_wp:", - mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp, - "index:", - v_wp_index, - "v_rp:", - mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].rp, - "index:", - v_rp_index, - "pkts_queued", - get_nr_avail_ring_elements( - &mhi_dev_ctxt->mhi_local_chan_ctxt[*offp]), - "/", - client_handle->chan_info.max_desc, - "bb_used:", - mhi_dev_ctxt->counters.bb_used[*offp]); + MHI_LOG_SIZE, + "%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d %s %u\n", + "chan:", + (unsigned int)*offp, + "pkts from dev:", + mhi_dev_ctxt->counters.chan_pkts_xferd[*offp], + "state:", + chan_ctxt->chstate, + "p_base:", + chan_ctxt->mhi_trb_ring_base_addr, + "v_base:", + mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].base, + "v_wp:", + mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp, + "index:", + v_wp_index, + "v_rp:", + mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].rp, + "index:", + v_rp_index, + "pkts_queued", + pkts_queued, + "/", + client_handle->chan_info.max_desc, + "bb_used:", + mhi_dev_ctxt->counters.bb_used[*offp]); *offp += 1; @@ -236,35 +239,37 @@ static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf, msleep(100); amnt_copied = scnprintf(mhi_dev_ctxt->chan_info, - MHI_LOG_SIZE, - "%s %s %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d\n", - "Our State:", - TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state), - "M0->M1:", - mhi_dev_ctxt->counters.m0_m1, - "M0<-M1:", - mhi_dev_ctxt->counters.m1_m0, - "M1->M2:", - mhi_dev_ctxt->counters.m1_m2, - "M0<-M2:", - mhi_dev_ctxt->counters.m2_m0, - "M0->M3:", - mhi_dev_ctxt->counters.m0_m3, - "M0<-M3:", - mhi_dev_ctxt->counters.m3_m0, - "M3_ev_TO:", - mhi_dev_ctxt->counters.m3_event_timeouts, - "M0_ev_TO:", - mhi_dev_ctxt->counters.m0_event_timeouts, - "outstanding_acks:", - atomic_read(&mhi_dev_ctxt->counters.outbound_acks), - "LPM:", - mhi_dev_ctxt->enable_lpm); + MHI_LOG_SIZE, + "%s %s %s 0x%02x %s %u %s %u %s %u %s %u %s %u %s %u %s %d %s %d %s %d\n", + "MHI State:", + TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state), + "PM State:", + mhi_dev_ctxt->mhi_pm_state, + "M0->M1:", + mhi_dev_ctxt->counters.m0_m1, + "M1->M2:", + mhi_dev_ctxt->counters.m1_m2, + "M2->M0:", + mhi_dev_ctxt->counters.m2_m0, + "M0->M3:", + mhi_dev_ctxt->counters.m0_m3, + "M1->M3:", + mhi_dev_ctxt->counters.m1_m3, + "M3->M0:", + mhi_dev_ctxt->counters.m3_m0, + "device_wake:", + atomic_read(&mhi_dev_ctxt->counters.device_wake), + "usage_count:", + atomic_read(&mhi_dev_ctxt->dev_info->pcie_device->dev. + power.usage_count), + "outbound_acks:", + atomic_read(&mhi_dev_ctxt->counters.outbound_acks)); if (amnt_copied < count) return amnt_copied - copy_to_user(buf, mhi_dev_ctxt->chan_info, amnt_copied); else return -ENOMEM; + return 0; } static const struct file_operations mhi_dbgfs_state_fops = { diff --git a/include/linux/msm_mhi.h b/include/linux/msm_mhi.h index 263b8bba9e2c..f8ba31ea7573 100644 --- a/include/linux/msm_mhi.h +++ b/include/linux/msm_mhi.h @@ -215,7 +215,7 @@ int mhi_get_max_desc(struct mhi_client_handle *client_handle); /* RmNET Reserved APIs, This APIs are reserved for use by the linux network * stack only. Use by other clients will introduce system wide issues */ -int mhi_set_lpm(struct mhi_client_handle *client_handle, int enable_lpm); +int mhi_set_lpm(struct mhi_client_handle *client_handle, bool enable_lpm); int mhi_get_epid(struct mhi_client_handle *mhi_handle); struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle); void mhi_mask_irq(struct mhi_client_handle *client_handle);