diff --git a/Documentation/devicetree/bindings/mhi/msm_mhi.txt b/Documentation/devicetree/bindings/mhi/msm_mhi.txt index 0c49fc7a5fc2..36d8866b7354 100644 --- a/Documentation/devicetree/bindings/mhi/msm_mhi.txt +++ b/Documentation/devicetree/bindings/mhi/msm_mhi.txt @@ -11,7 +11,6 @@ Required properties: below properties: - esoc-names - esoc-0 - - wakeup-gpios: gpio used to wake device from low power mode. - Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for below optional properties: - qcom,msm-bus,name @@ -28,8 +27,6 @@ Example: compatible = "qcom,mhi"; esoc-names = "mdm"; esoc-0 = <&mdm1>; - mhi-device-wake-gpio = - <&msmgpio 108 0>; qcom,msm-bus,name = "mhi"; qcom,msm-bus,num-cases = <2>; qcom,msm-bus,num-paths = <1>; diff --git a/drivers/platform/msm/mhi/mhi.h b/drivers/platform/msm/mhi/mhi.h index 3c4d2500a1dc..7d55b6781f76 100644 --- a/drivers/platform/msm/mhi/mhi.h +++ b/drivers/platform/msm/mhi/mhi.h @@ -49,7 +49,6 @@ struct pcie_core_info { void __iomem *bar0_end; void __iomem *bar2_base; void __iomem *bar2_end; - u32 device_wake_gpio; u32 irq_base; u32 max_nr_msis; struct pci_saved_state *pcie_state; @@ -252,7 +251,7 @@ enum MHI_EVENT_CCS { MHI_EVENT_CC_OOB = 0x5, MHI_EVENT_CC_DB_MODE = 0x6, MHI_EVENT_CC_UNDEFINED_ERR = 0x10, - MHI_EVENT_CC_RING_EL_ERR = 0x11, + MHI_EVENT_CC_BAD_TRE = 0x11, }; struct mhi_ring { @@ -357,12 +356,14 @@ struct mhi_state_work_queue { enum STATE_TRANSITION buf[MHI_WORK_Q_MAX_SIZE]; }; -struct mhi_control_seg { - union mhi_cmd_pkt cmd_trb_list[NR_OF_CMD_RINGS][CMD_EL_PER_RING + 1]; - struct mhi_cmd_ctxt mhi_cmd_ctxt_list[NR_OF_CMD_RINGS]; - struct mhi_chan_ctxt mhi_cc_list[MHI_MAX_CHANNELS]; - struct mhi_event_ctxt *mhi_ec_list; - u32 padding; +struct mhi_buf_info { + dma_addr_t bb_p_addr; + void *bb_v_addr; + void *client_buf; + size_t buf_len; + size_t filled_size; + enum dma_data_direction dir; + int bb_active; }; struct mhi_counters { @@ -384,6 +385,7 @@ struct mhi_counters { u32 *ev_counter; atomic_t outbound_acks; u32 chan_pkts_xferd[MHI_MAX_CHANNELS]; + u32 bb_used[MHI_MAX_CHANNELS]; }; struct mhi_flags { @@ -423,19 +425,37 @@ struct dev_mmio_info { u64 mmio_len; u32 nr_event_rings; dma_addr_t dma_ev_ctxt; /* Bus address of ECABAP*/ - void *dma_ev_rings; +}; + +struct mhi_ring_ctxt { + struct mhi_event_ctxt *ec_list; + struct mhi_chan_ctxt *cc_list; + struct mhi_cmd_ctxt *cmd_ctxt; + dma_addr_t dma_ec_list; + dma_addr_t dma_cc_list; + dma_addr_t dma_cmd_ctxt; +}; + +struct mhi_dev_space { + void *dev_mem_start; + dma_addr_t dma_dev_mem_start; + size_t dev_mem_len; + struct mhi_ring_ctxt ring_ctxt; + dma_addr_t start_win_addr; + dma_addr_t end_win_addr; }; struct mhi_device_ctxt { enum MHI_STATE mhi_state; enum MHI_EXEC_ENV dev_exec_env; + struct mhi_dev_space dev_space; struct mhi_pcie_dev_info *dev_info; struct pcie_core_info *dev_props; - struct mhi_control_seg *mhi_ctrl_seg; - struct mhi_meminfo *mhi_ctrl_seg_info; + struct mhi_ring chan_bb_list[MHI_MAX_CHANNELS]; struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS]; + struct mhi_ring *mhi_local_event_ctxt; struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS]; @@ -511,8 +531,14 @@ struct mhi_event_ring_cfg { irqreturn_t (*mhi_handler_ptr)(int , void *); }; +struct mhi_data_buf { + dma_addr_t bounce_buffer; + dma_addr_t client_buffer; + u32 bounce_flag; +}; + irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id); -enum MHI_STATUS mhi_reset_all_thread_queues( +int mhi_reset_all_thread_queues( struct mhi_device_ctxt *mhi_dev_ctxt); enum MHI_STATUS mhi_add_elements_to_event_rings( struct mhi_device_ctxt *mhi_dev_ctxt, @@ -521,20 +547,18 @@ int get_nr_avail_ring_elements(struct mhi_ring *ring); enum MHI_STATUS get_nr_enclosed_el(struct mhi_ring *ring, void *loc_1, void *loc_2, u32 *nr_el); enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt); -enum MHI_STATUS mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info, +int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info, struct mhi_device_ctxt *mhi_dev_ctxt); enum MHI_STATUS mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt, u32 nr_ev_el, u32 event_ring_index); /*Mhi Initialization functions */ -enum MHI_STATUS mhi_clean_init_stage(struct mhi_device_ctxt *mhi_dev_ctxt, - enum MHI_INIT_ERROR_STAGE cleanup_stage); enum MHI_STATUS mhi_send_cmd(struct mhi_device_ctxt *dest_device, enum MHI_COMMAND which_cmd, u32 chan); enum MHI_STATUS mhi_queue_tx_pkt(struct mhi_device_ctxt *mhi_dev_ctxt, enum MHI_CLIENT_CHANNEL chan, void *payload, size_t payload_size); -enum MHI_STATUS mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list, +int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list, uintptr_t trb_list_phy, uintptr_t trb_list_virt, u64 el_per_ring, @@ -545,11 +569,11 @@ enum MHI_STATUS mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list, int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt); int mhi_get_event_ring_for_channel(struct mhi_device_ctxt *mhi_dev_ctxt, u32 chan); -enum MHI_STATUS delete_element(struct mhi_ring *ring, void **rp, +int delete_element(struct mhi_ring *ring, void **rp, void **wp, void **assigned_addr); -enum MHI_STATUS ctxt_add_element(struct mhi_ring *ring, void **assigned_addr); -enum MHI_STATUS ctxt_del_element(struct mhi_ring *ring, void **assigned_addr); -enum MHI_STATUS get_element_index(struct mhi_ring *ring, void *address, +int ctxt_add_element(struct mhi_ring *ring, void **assigned_addr); +int ctxt_del_element(struct mhi_ring *ring, void **assigned_addr); +int get_element_index(struct mhi_ring *ring, void *address, uintptr_t *index); enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt, struct mhi_ring *ring, enum MHI_RING_TYPE ring_type, u32 ring_index); @@ -565,8 +589,8 @@ enum MHI_STATUS mhi_test_for_device_ready( struct mhi_device_ctxt *mhi_dev_ctxt); enum MHI_STATUS mhi_test_for_device_reset( struct mhi_device_ctxt *mhi_dev_ctxt); -enum MHI_STATUS validate_ring_el_addr(struct mhi_ring *ring, uintptr_t addr); -enum MHI_STATUS validate_ev_el_addr(struct mhi_ring *ring, uintptr_t addr); +int validate_ring_el_addr(struct mhi_ring *ring, uintptr_t addr); +int validate_ev_el_addr(struct mhi_ring *ring, uintptr_t addr); int mhi_state_change_thread(void *ctxt); enum MHI_STATUS mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION new_state); @@ -575,7 +599,6 @@ enum hrtimer_restart mhi_initiate_m1(struct hrtimer *timer); int mhi_pci_suspend(struct pci_dev *dev, pm_message_t state); int mhi_pci_resume(struct pci_dev *dev); int mhi_init_pcie_device(struct mhi_pcie_dev_info *mhi_pcie_dev); -int mhi_init_gpios(struct mhi_pcie_dev_info *mhi_pcie_dev); int mhi_init_pm_sysfs(struct device *dev); void mhi_rem_pm_sysfs(struct device *dev); void mhi_pci_remove(struct pci_dev *mhi_device); @@ -589,7 +612,7 @@ void mhi_notify_client(struct mhi_client_handle *client_handle, enum MHI_CB_REASON reason); int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt); int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt); -enum MHI_STATUS mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt); +int mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt); int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action, void *hcpu); enum MHI_STATUS init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt); @@ -621,7 +644,7 @@ int init_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt, enum MHI_TYPE_EVENT_RING type); void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt, int index); -int init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt); -int create_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt); +void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt); +int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt); #endif diff --git a/drivers/platform/msm/mhi/mhi_bhi.c b/drivers/platform/msm/mhi/mhi_bhi.c index a99d38e515fb..456b9c8e719f 100644 --- a/drivers/platform/msm/mhi/mhi_bhi.c +++ b/drivers/platform/msm/mhi/mhi_bhi.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "mhi_sys.h" #include "mhi.h" @@ -73,10 +74,12 @@ static ssize_t bhi_write(struct file *file, goto bhi_copy_error; } amount_copied = count; + /* Flush the writes, in anticipation for a device read */ wmb(); mhi_log(MHI_MSG_INFO, "Copied image from user at addr: %p\n", bhi_ctxt->image_loc); - bhi_ctxt->phy_image_loc = dma_map_single(NULL, + bhi_ctxt->phy_image_loc = dma_map_single( + &mhi_dev_ctxt->dev_info->plat_dev->dev, bhi_ctxt->image_loc, bhi_ctxt->image_size, DMA_TO_DEVICE); @@ -131,7 +134,8 @@ static ssize_t bhi_write(struct file *file, break; usleep_range(20000, 25000); } - dma_unmap_single(NULL, bhi_ctxt->phy_image_loc, + dma_unmap_single(&mhi_dev_ctxt->dev_info->plat_dev->dev, + bhi_ctxt->phy_image_loc, bhi_ctxt->image_size, DMA_TO_DEVICE); kfree(bhi_ctxt->unaligned_image_loc); @@ -168,7 +172,6 @@ int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device) bhi_ctxt->bhi_base = mhi_pcie_device->core.bar0_base; pcie_word_val = mhi_reg_read(bhi_ctxt->bhi_base, BHIOFF); bhi_ctxt->bhi_base += pcie_word_val; - wmb(); mhi_log(MHI_MSG_INFO, "Successfully registered char dev. bhi base is: 0x%p.\n", diff --git a/drivers/platform/msm/mhi/mhi_event.c b/drivers/platform/msm/mhi/mhi_event.c index 25deabf77eff..871573f4a55a 100644 --- a/drivers/platform/msm/mhi/mhi_event.c +++ b/drivers/platform/msm/mhi/mhi_event.c @@ -73,30 +73,17 @@ dt_error: return r; } -int create_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt) +int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt) { - int r = 0, i; - struct mhi_event_ctxt *ev_ctxt = NULL; + int r = 0; - size_t ctxt_size = sizeof(struct mhi_event_ctxt) * - mhi_dev_ctxt->mmio_info.nr_event_rings; - /* Allocate the event contexts in uncached memory */ - mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list = - dma_alloc_coherent( - &mhi_dev_ctxt->dev_info->plat_dev->dev, - ctxt_size, - &mhi_dev_ctxt->mmio_info.dma_ev_ctxt, - GFP_KERNEL); - if (!mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list) + mhi_dev_ctxt->mhi_local_event_ctxt = kzalloc(sizeof(struct mhi_ring)* + mhi_dev_ctxt->mmio_info.nr_event_rings, + GFP_KERNEL); + + if (!mhi_dev_ctxt->mhi_local_event_ctxt) return -ENOMEM; - mhi_dev_ctxt->mhi_local_event_ctxt = kzalloc(sizeof(struct mhi_ring) * - mhi_dev_ctxt->mmio_info.nr_event_rings, - GFP_KERNEL); - if (!mhi_dev_ctxt->mhi_local_event_ctxt) { - r = -ENOMEM; - goto free_ec_list; - } mhi_dev_ctxt->counters.ev_counter = kzalloc(sizeof(u32) * mhi_dev_ctxt->mmio_info.nr_event_rings, GFP_KERNEL); @@ -111,73 +98,19 @@ int create_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt) r = -ENOMEM; goto free_ev_counter; } - - mhi_dev_ctxt->mmio_info.dma_ev_rings = kzalloc(sizeof(void *) * - mhi_dev_ctxt->mmio_info.nr_event_rings, - GFP_KERNEL); - if (!mhi_dev_ctxt->mmio_info.dma_ev_rings) { - r = -ENOMEM; - goto free_msi_counter; - } - mhi_log(MHI_MSG_INFO, "Allocated ECABAP at Virt: 0x%p, Phys 0x%lx\n", - mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list, - (uintptr_t)mhi_dev_ctxt->mmio_info.dma_ev_ctxt); - - /* Allocate event ring elements for each ring */ - for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) { - dma_addr_t ring_base_addr; - ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[i]; - mhi_dev_ctxt->mhi_local_event_ctxt[i].base = - dma_alloc_coherent( - &mhi_dev_ctxt->dev_info->plat_dev->dev, - sizeof(union mhi_event_pkt) * - mhi_dev_ctxt->ev_ring_props[i].nr_desc, - &ring_base_addr, - GFP_KERNEL); - if (!mhi_dev_ctxt->mhi_local_event_ctxt[i].base) { - r = -ENOMEM; - goto free_event_ring; - } - - ev_ctxt->mhi_event_ring_base_addr = ring_base_addr; - ev_ctxt->mhi_event_read_ptr = ring_base_addr; - ev_ctxt->mhi_event_write_ptr = ring_base_addr; - - mhi_dev_ctxt->mhi_local_event_ctxt[i].wp = - mhi_dev_ctxt->mhi_local_event_ctxt[i].base; - mhi_dev_ctxt->mhi_local_event_ctxt[i].rp = - mhi_dev_ctxt->mhi_local_event_ctxt[i].base; - mhi_log(MHI_MSG_INFO, "Allocated Event Ring %d\n", i); - } return r; -free_event_ring: - for (; i > 0; --i) { - ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[i]; - dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev, - sizeof(union mhi_event_pkt *) * - mhi_dev_ctxt->ev_ring_props[i].nr_desc, - mhi_dev_ctxt->mhi_local_event_ctxt[i].base, - ev_ctxt->mhi_event_ring_base_addr); - } - kfree(mhi_dev_ctxt->mmio_info.dma_ev_rings); -free_msi_counter: - kfree(mhi_dev_ctxt->counters.msi_counter); free_ev_counter: kfree(mhi_dev_ctxt->counters.ev_counter); free_local_ec_list: kfree(mhi_dev_ctxt->mhi_local_event_ctxt); -free_ec_list: - dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev, - ctxt_size, - mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list, - mhi_dev_ctxt->mmio_info.dma_ev_ctxt); return r; } void ring_ev_db(struct mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index) { struct mhi_ring *event_ctxt = NULL; u64 db_value = 0; + event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index]; db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING, @@ -203,22 +136,20 @@ static enum MHI_STATUS mhi_event_ring_init(struct mhi_event_ctxt *ev_list, return MHI_STATUS_SUCCESS; } -int init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt) +void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt) { int i; struct mhi_ring *mhi_local_event_ctxt = NULL; struct mhi_event_ctxt *event_ctxt; - struct mhi_control_seg *mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg; for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) { - event_ctxt = &mhi_ctrl->mhi_ec_list[i]; + event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i]; mhi_local_event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[i]; mhi_event_ring_init(event_ctxt, mhi_local_event_ctxt, mhi_dev_ctxt->ev_ring_props[i].nr_desc, mhi_dev_ctxt->ev_ring_props[i].intmod, mhi_dev_ctxt->ev_ring_props[i].msi_vec); } - return 0; } int init_local_ev_ring_by_type(struct mhi_device_ctxt *mhi_dev_ctxt, @@ -291,7 +222,8 @@ enum MHI_STATUS mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt, mhi_log(MHI_MSG_INFO, "mmio_addr = 0x%p, mmio_len = 0x%llx\n", mhi_dev_ctxt->mmio_info.mmio_addr, mhi_dev_ctxt->mmio_info.mmio_len); - mhi_log(MHI_MSG_INFO, "Initializing event ring %d\n", ring_index); + mhi_log(MHI_MSG_INFO, "Initializing event ring %d with %d desc\n", + ring_index, nr_ev_el); for (i = 0; i < nr_ev_el - 1; ++i) { ret_val = ctxt_add_element(event_ctxt, (void *)&ev_pkt); @@ -312,16 +244,20 @@ void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt, { struct mhi_event_ctxt *ev_ctxt; struct mhi_ring *local_ev_ctxt; + mhi_log(MHI_MSG_VERBOSE, "Resetting event index %d\n", index); ev_ctxt = - &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[index]; + &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[index]; local_ev_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[index]; ev_ctxt->mhi_event_read_ptr = ev_ctxt->mhi_event_ring_base_addr; ev_ctxt->mhi_event_write_ptr = ev_ctxt->mhi_event_ring_base_addr; local_ev_ctxt->rp = local_ev_ctxt->base; local_ev_ctxt->wp = local_ev_ctxt->base; + + ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[index]; + ev_ctxt->mhi_event_read_ptr = ev_ctxt->mhi_event_ring_base_addr; + ev_ctxt->mhi_event_write_ptr = ev_ctxt->mhi_event_ring_base_addr; /* Flush writes to MMIO */ wmb(); } - diff --git a/drivers/platform/msm/mhi/mhi_iface.c b/drivers/platform/msm/mhi/mhi_iface.c index 1f4b319d1571..6b37ba853a84 100644 --- a/drivers/platform/msm/mhi/mhi_iface.c +++ b/drivers/platform/msm/mhi/mhi_iface.c @@ -20,6 +20,8 @@ #include #include #include +#include +#include #define CREATE_TRACE_POINTS #include "mhi_trace.h" @@ -64,8 +66,7 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev) { int ret_val = 0; u32 i = 0, j = 0; - u32 retry_count = 0; - u32 msi_number = 32; + u32 requested_msi_number = 32, actual_msi_number = 0; struct mhi_device_ctxt *mhi_dev_ctxt = NULL; struct pci_dev *pcie_device = NULL; @@ -74,15 +75,14 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev) pcie_device = mhi_pcie_dev->pcie_device; ret_val = mhi_init_pcie_device(mhi_pcie_dev); - if (0 != ret_val) { + if (ret_val) { mhi_log(MHI_MSG_CRITICAL, "Failed to initialize pcie device, ret %d\n", ret_val); return -ENODEV; } - ret_val = mhi_init_device_ctxt(mhi_pcie_dev, - &mhi_pcie_dev->mhi_ctxt); - if (MHI_STATUS_SUCCESS != ret_val) { + ret_val = mhi_init_device_ctxt(mhi_pcie_dev, &mhi_pcie_dev->mhi_ctxt); + if (ret_val) { mhi_log(MHI_MSG_CRITICAL, "Failed to initialize main MHI ctxt ret %d\n", ret_val); @@ -112,12 +112,20 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev) } device_disable_async_suspend(&pcie_device->dev); - ret_val = pci_enable_msi_range(pcie_device, 0, msi_number); - if (0 != ret_val) { + ret_val = pci_enable_msi_range(pcie_device, 1, requested_msi_number); + if (IS_ERR_VALUE(ret_val)) { mhi_log(MHI_MSG_ERROR, "Failed to enable MSIs for pcie dev ret_val %d.\n", ret_val); goto msi_config_err; + } else if (ret_val) { + mhi_log(MHI_MSG_INFO, + "Hrmmm, got fewer MSIs than we requested. Requested %d, got %d.\n", + requested_msi_number, ret_val); + actual_msi_number = ret_val; + } else { + mhi_log(MHI_MSG_VERBOSE, + "Got all requested MSIs, moving on\n"); } mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt; @@ -142,23 +150,7 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev) mhi_pcie_dev->core.irq_base = pcie_device->irq; mhi_log(MHI_MSG_VERBOSE, "Setting IRQ Base to 0x%x\n", mhi_pcie_dev->core.irq_base); - mhi_pcie_dev->core.max_nr_msis = msi_number; - do { - ret_val = mhi_init_gpios(mhi_pcie_dev); - switch (ret_val) { - case -EPROBE_DEFER: - mhi_log(MHI_MSG_VERBOSE, - "DT requested probe defer, wait and retry\n"); - break; - case 0: - break; - default: - mhi_log(MHI_MSG_CRITICAL, - "Could not get gpio from struct device tree!\n"); - goto msi_config_err; - } - retry_count++; - } while ((retry_count < DT_WAIT_RETRIES) && (ret_val == -EPROBE_DEFER)); + mhi_pcie_dev->core.max_nr_msis = requested_msi_number; ret_val = mhi_init_pm_sysfs(&pcie_device->dev); if (ret_val != 0) { mhi_log(MHI_MSG_ERROR, "Failed to setup sysfs.\n"); @@ -189,17 +181,26 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev) return ret_val; mhi_state_transition_error: - if (MHI_STATUS_SUCCESS != mhi_clean_init_stage(&mhi_pcie_dev->mhi_ctxt, - MHI_INIT_ERROR_STAGE_UNWIND_ALL)) - mhi_log(MHI_MSG_ERROR, "Could not clean up context\n"); + kfree(mhi_dev_ctxt->state_change_work_item_list.q_lock); + kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq); + kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event); + kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event); + kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event); + kfree(mhi_dev_ctxt->mhi_ev_wq.bhi_event); + dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev, + mhi_dev_ctxt->dev_space.dev_mem_len, + mhi_dev_ctxt->dev_space.dev_mem_start, + mhi_dev_ctxt->dev_space.dma_dev_mem_start); + kfree(mhi_dev_ctxt->mhi_cmd_mutex_list); + kfree(mhi_dev_ctxt->mhi_chan_mutex); + kfree(mhi_dev_ctxt->mhi_ev_spinlock_list); + kfree(mhi_dev_ctxt->ev_ring_props); mhi_rem_pm_sysfs(&pcie_device->dev); sysfs_config_err: - gpio_free(mhi_pcie_dev->core.device_wake_gpio); for (; i >= 0; --i) free_irq(pcie_device->irq + i, &pcie_device->dev); debugfs_remove_recursive(mhi_pcie_dev->mhi_ctxt.mhi_parent_folder); msi_config_err: - pci_disable_msi(pcie_device); pci_disable_device(pcie_device); return ret_val; } @@ -255,6 +256,7 @@ static int mhi_pci_probe(struct pci_dev *pcie_device, static int mhi_plat_probe(struct platform_device *pdev) { u32 nr_dev = mhi_devices.nr_of_devices; + mhi_log(MHI_MSG_INFO, "Entered\n"); mhi_devices.device_list[nr_dev].plat_dev = pdev; mhi_log(MHI_MSG_INFO, "Exited\n"); diff --git a/drivers/platform/msm/mhi/mhi_init.c b/drivers/platform/msm/mhi/mhi_init.c index b7f118b05381..15ef91fae4df 100644 --- a/drivers/platform/msm/mhi/mhi_init.c +++ b/drivers/platform/msm/mhi/mhi_init.c @@ -20,40 +20,11 @@ #include #include #include +#include -enum MHI_STATUS mhi_clean_init_stage(struct mhi_device_ctxt *mhi_dev_ctxt, - enum MHI_INIT_ERROR_STAGE cleanup_stage) +static int mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt) { - enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; - switch (cleanup_stage) { - case MHI_INIT_ERROR_STAGE_UNWIND_ALL: - case MHI_INIT_ERROR_TIMERS: - case MHI_INIT_ERROR_STAGE_DEVICE_CTRL: - mhi_freememregion(mhi_dev_ctxt->mhi_ctrl_seg_info); - case MHI_INIT_ERROR_STAGE_THREAD_QUEUES: - case MHI_INIT_ERROR_STAGE_THREADS: - kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq); - kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event); - kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event); - case MHI_INIT_ERROR_STAGE_EVENTS: - kfree(mhi_dev_ctxt->mhi_ctrl_seg_info); - case MHI_INIT_ERROR_STAGE_MEM_ZONES: - kfree(mhi_dev_ctxt->mhi_cmd_mutex_list); - kfree(mhi_dev_ctxt->mhi_chan_mutex); - kfree(mhi_dev_ctxt->mhi_ev_spinlock_list); - case MHI_INIT_ERROR_STAGE_SYNC: - kfree(mhi_dev_ctxt->ev_ring_props); - break; - default: - ret_val = MHI_STATUS_ERROR; - break; - } - return ret_val; -} - -static enum MHI_STATUS mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt) -{ - u32 i = 0; + int i; mhi_dev_ctxt->mhi_ev_spinlock_list = kmalloc(sizeof(spinlock_t) * mhi_dev_ctxt->mmio_info.nr_event_rings, @@ -85,7 +56,7 @@ static enum MHI_STATUS mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt) mutex_init(&mhi_dev_ctxt->mhi_link_state); mutex_init(&mhi_dev_ctxt->pm_lock); atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0); - return MHI_STATUS_SUCCESS; + return 0; db_write_lock_free: kfree(mhi_dev_ctxt->mhi_cmd_mutex_list); @@ -94,21 +65,327 @@ cmd_mutex_free: chan_mutex_free: kfree(mhi_dev_ctxt->mhi_ev_spinlock_list); ev_mutex_free: - return MHI_STATUS_ALLOC_ERROR; + return -ENOMEM; } -static enum MHI_STATUS mhi_init_ctrl_zone(struct mhi_pcie_dev_info *dev_info, - struct mhi_device_ctxt *mhi_dev_ctxt) +size_t calculate_mhi_space(struct mhi_device_ctxt *mhi_dev_ctxt) { - mhi_dev_ctxt->mhi_ctrl_seg_info = kmalloc(sizeof(struct mhi_meminfo), - GFP_KERNEL); - if (NULL == mhi_dev_ctxt->mhi_ctrl_seg_info) - return MHI_STATUS_ALLOC_ERROR; - mhi_dev_ctxt->mhi_ctrl_seg_info->dev = &dev_info->pcie_device->dev; - return MHI_STATUS_SUCCESS; + int i, r; + size_t mhi_dev_mem = 0; + struct mhi_chan_info chan_info; + + /* Calculate size needed for contexts */ + mhi_dev_mem += (MHI_MAX_CHANNELS * sizeof(struct mhi_chan_ctxt)) + + (NR_OF_CMD_RINGS * sizeof(struct mhi_chan_ctxt)) + + (mhi_dev_ctxt->mmio_info.nr_event_rings * + sizeof(struct mhi_event_ctxt)); + mhi_log(MHI_MSG_INFO, "Reserved %zd bytes for context info\n", + mhi_dev_mem); + /*Calculate size needed for cmd TREs */ + mhi_dev_mem += (CMD_EL_PER_RING * sizeof(union mhi_cmd_pkt)); + + /* Calculate size needed for event TREs */ + for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) + mhi_dev_mem += (sizeof(union mhi_event_pkt) * + mhi_dev_ctxt->ev_ring_props[i].nr_desc); + + /* Calculate size needed for xfer TREs and bounce buffers */ + for (i = 0; i < MHI_MAX_CHANNELS; ++i) + if (VALID_CHAN_NR(i)) { + r = get_chan_props(mhi_dev_ctxt, i, &chan_info); + if (r) + continue; + /* Add size of TREs */ + mhi_dev_mem += (sizeof(union mhi_xfer_pkt) * + chan_info.max_desc); + /* Add bounce buffer size */ + if (mhi_dev_ctxt->flags.bb_enabled) { + mhi_log(MHI_MSG_INFO, + "Enabling BB list, chan %d\n", i); + /*mhi_dev_mem += (MAX_BOUNCE_BUF_SIZE * + chan_info.max_desc); */ + } + } + mhi_log(MHI_MSG_INFO, "Final bytes for MHI device space %zd\n", + mhi_dev_mem); + return mhi_dev_mem; } -static enum MHI_STATUS mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt) +void init_dev_ev_ctxt(struct mhi_event_ctxt *ev_ctxt, + dma_addr_t p_base_addr, size_t len) +{ + ev_ctxt->mhi_event_ring_base_addr = p_base_addr; + ev_ctxt->mhi_event_read_ptr = p_base_addr; + ev_ctxt->mhi_event_write_ptr = p_base_addr; + ev_ctxt->mhi_event_ring_len = len; +} + +void init_local_ev_ctxt(struct mhi_ring *ev_ctxt, + void *v_base_addr, size_t len) +{ + ev_ctxt->base = v_base_addr; + ev_ctxt->rp = v_base_addr; + ev_ctxt->wp = v_base_addr; + ev_ctxt->len = len; + ev_ctxt->el_size = sizeof(union mhi_event_pkt); + ev_ctxt->overwrite_en = 0; +} + +void init_dev_chan_ctxt(struct mhi_chan_ctxt *chan_ctxt, + dma_addr_t p_base_addr, size_t len, int ev_index) +{ + chan_ctxt->mhi_trb_ring_base_addr = p_base_addr; + chan_ctxt->mhi_trb_read_ptr = p_base_addr; + chan_ctxt->mhi_trb_write_ptr = p_base_addr; + chan_ctxt->mhi_trb_ring_len = len; + /* Prepulate the channel ctxt */ + chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED; + chan_ctxt->mhi_event_ring_index = ev_index; +} + +void init_local_chan_ctxt(struct mhi_ring *chan_ctxt, + void *v_base_addr, size_t len) +{ + chan_ctxt->base = v_base_addr; + chan_ctxt->rp = v_base_addr; + chan_ctxt->wp = v_base_addr; + chan_ctxt->len = len; + chan_ctxt->el_size = sizeof(union mhi_event_pkt); + chan_ctxt->overwrite_en = 0; +} + +int populate_bb_list(struct list_head *bb_list, int num_bb) +{ + struct mhi_buf_info *mhi_buf = NULL; + int i; + + for (i = 0; i < num_bb; ++i) { + mhi_buf = kzalloc(sizeof(struct mhi_buf_info), GFP_KERNEL); + if (!mhi_buf) + return -ENOMEM; + mhi_buf->bb_p_addr = 0; + mhi_buf->bb_v_addr = NULL; + mhi_log(MHI_MSG_INFO, + "Allocated BB v_addr 0x%p, p_addr 0x%llx\n", + mhi_buf->bb_v_addr, (u64)mhi_buf->bb_p_addr); + } + return 0; +} +/** + * mhi_cmd_ring_init- Initialization of the command ring + * + * @cmd_ctxt: command ring context to initialize + * @trb_list_phy_addr: Pointer to the dma address of the tre ring + * @trb_list_virt_addr: Pointer to the virtual address of the tre ring + * @ring_size: Ring size + * @ring: Pointer to the shadow command context + * + * @Return MHI_STATUS + */ +static int mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt, + void *trb_list_virt_addr, + dma_addr_t trb_list_phy_addr, + size_t ring_size, struct mhi_ring *ring) +{ + cmd_ctxt->mhi_cmd_ring_base_addr = trb_list_phy_addr; + cmd_ctxt->mhi_cmd_ring_read_ptr = trb_list_phy_addr; + cmd_ctxt->mhi_cmd_ring_write_ptr = trb_list_phy_addr; + cmd_ctxt->mhi_cmd_ring_len = ring_size; + ring[PRIMARY_CMD_RING].wp = trb_list_virt_addr; + ring[PRIMARY_CMD_RING].rp = trb_list_virt_addr; + ring[PRIMARY_CMD_RING].base = trb_list_virt_addr; + ring[PRIMARY_CMD_RING].len = ring_size; + ring[PRIMARY_CMD_RING].el_size = sizeof(union mhi_cmd_pkt); + ring[PRIMARY_CMD_RING].overwrite_en = 0; + return 0; +} + + +static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el) +{ + bb_ctxt->el_size = sizeof(struct mhi_buf_info); + bb_ctxt->len = bb_ctxt->el_size * nr_el; + bb_ctxt->base = kzalloc(bb_ctxt->len, GFP_KERNEL); + bb_ctxt->wp = bb_ctxt->base; + bb_ctxt->rp = bb_ctxt->base; + bb_ctxt->ack_rp = bb_ctxt->base; + if (!bb_ctxt->base) + return -ENOMEM; + return 0; +} + +static void calculate_mhi_addressing_window( + struct mhi_device_ctxt *mhi_dev_ctxt) +{ + dma_addr_t dma_dev_mem_start; + dma_addr_t dma_seg_size = 0x1FF00000UL; + dma_addr_t dma_max_addr = (dma_addr_t)(-1); + + dma_dev_mem_start = mhi_dev_ctxt->dev_space.dma_dev_mem_start; + + if (dma_dev_mem_start < dma_seg_size) { + mhi_dev_ctxt->dev_space.start_win_addr = 0; + mhi_dev_ctxt->dev_space.end_win_addr = + dma_dev_mem_start + dma_seg_size + + (dma_seg_size - dma_dev_mem_start); + } else if (dma_dev_mem_start >= dma_seg_size && + dma_dev_mem_start <= (dma_max_addr - dma_seg_size)) { + mhi_dev_ctxt->dev_space.start_win_addr = + dma_dev_mem_start - dma_seg_size; + mhi_dev_ctxt->dev_space.end_win_addr = + dma_dev_mem_start + dma_seg_size; + } else if (dma_dev_mem_start > (dma_max_addr - dma_seg_size)) { + mhi_dev_ctxt->dev_space.start_win_addr = + dma_dev_mem_start - (dma_seg_size + + (dma_seg_size - (dma_max_addr - + dma_dev_mem_start))); + mhi_dev_ctxt->dev_space.end_win_addr = dma_max_addr; + } + mhi_log(MHI_MSG_INFO, + "MHI start address at 0x%llx, Window Start 0x%llx Window End 0x%llx\n", + (u64)dma_dev_mem_start, + (u64)mhi_dev_ctxt->dev_space.start_win_addr, + (u64)mhi_dev_ctxt->dev_space.end_win_addr); + +} + +int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + size_t mhi_mem_index = 0, ring_len; + void *dev_mem_start; + dma_addr_t dma_dev_mem_start; + int i, r; + + mhi_dev_ctxt->dev_space.dev_mem_len = + calculate_mhi_space(mhi_dev_ctxt); + + mhi_dev_ctxt->dev_space.dev_mem_start = + dma_alloc_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev, + mhi_dev_ctxt->dev_space.dev_mem_len, + &mhi_dev_ctxt->dev_space.dma_dev_mem_start, + GFP_KERNEL); + if (!mhi_dev_ctxt->dev_space.dev_mem_start) { + mhi_log(MHI_MSG_ERROR, + "Failed to allocate memory of size %zd bytes\n", + mhi_dev_ctxt->dev_space.dev_mem_len); + return -ENOMEM; + } + dev_mem_start = mhi_dev_ctxt->dev_space.dev_mem_start; + dma_dev_mem_start = mhi_dev_ctxt->dev_space.dma_dev_mem_start; + memset(dev_mem_start, 0, mhi_dev_ctxt->dev_space.dev_mem_len); + + calculate_mhi_addressing_window(mhi_dev_ctxt); + + mhi_log(MHI_MSG_INFO, "Starting Seg address: virt 0x%p, dma 0x%llx\n", + dev_mem_start, (u64)dma_dev_mem_start); + + mhi_log(MHI_MSG_INFO, "Initializing CCABAP at virt 0x%p, dma 0x%llx\n", + dev_mem_start + mhi_mem_index, + (u64)dma_dev_mem_start + mhi_mem_index); + mhi_dev_ctxt->dev_space.ring_ctxt.cc_list = dev_mem_start; + mhi_dev_ctxt->dev_space.ring_ctxt.dma_cc_list = dma_dev_mem_start; + mhi_mem_index += MHI_MAX_CHANNELS * sizeof(struct mhi_chan_ctxt); + + mhi_log(MHI_MSG_INFO, "Initializing CRCBAP at virt 0x%p, dma 0x%llx\n", + dev_mem_start + mhi_mem_index, + (u64)dma_dev_mem_start + mhi_mem_index); + + mhi_dev_ctxt->dev_space.ring_ctxt.cmd_ctxt = + dev_mem_start + mhi_mem_index; + mhi_dev_ctxt->dev_space.ring_ctxt.dma_cmd_ctxt = + dma_dev_mem_start + mhi_mem_index; + mhi_mem_index += NR_OF_CMD_RINGS * sizeof(struct mhi_chan_ctxt); + + mhi_log(MHI_MSG_INFO, "Initializing ECABAP at virt 0x%p, dma 0x%llx\n", + dev_mem_start + mhi_mem_index, + (u64)dma_dev_mem_start + mhi_mem_index); + mhi_dev_ctxt->dev_space.ring_ctxt.ec_list = + dev_mem_start + mhi_mem_index; + mhi_dev_ctxt->dev_space.ring_ctxt.dma_ec_list = + dma_dev_mem_start + mhi_mem_index; + mhi_mem_index += mhi_dev_ctxt->mmio_info.nr_event_rings * + sizeof(struct mhi_event_ctxt); + + mhi_log(MHI_MSG_INFO, + "Initializing CMD context at virt 0x%p, dma 0x%llx\n", + dev_mem_start + mhi_mem_index, + (u64)dma_dev_mem_start + mhi_mem_index); + + /* TODO: Initialize both the local and device cmd context */ + ring_len = (CMD_EL_PER_RING * sizeof(union mhi_cmd_pkt)); + mhi_cmd_ring_init(mhi_dev_ctxt->dev_space.ring_ctxt.cmd_ctxt, + dev_mem_start + mhi_mem_index, + dma_dev_mem_start + mhi_mem_index, + ring_len, + mhi_dev_ctxt->mhi_local_cmd_ctxt); + mhi_mem_index += ring_len; + + /* Initialize both the local and device event contexts */ + for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) { + ring_len = sizeof(union mhi_event_pkt) * + mhi_dev_ctxt->ev_ring_props[i].nr_desc; + init_dev_ev_ctxt(&mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i], + dma_dev_mem_start + mhi_mem_index, + ring_len); + init_local_ev_ctxt(&mhi_dev_ctxt->mhi_local_event_ctxt[i], + dev_mem_start + mhi_mem_index, + ring_len); + mhi_log(MHI_MSG_INFO, + "Initializing EV_%d TRE list at virt 0x%p dma 0x%llx\n", + i, dev_mem_start + mhi_mem_index, + (u64)dma_dev_mem_start + mhi_mem_index); + mhi_mem_index += ring_len; + } + + /* Initialize both the local and device xfer contexts */ + for (i = 0; i < MHI_MAX_CHANNELS; ++i) + if (VALID_CHAN_NR(i)) { + struct mhi_chan_info chan_info; + + r = get_chan_props(mhi_dev_ctxt, i, &chan_info); + if (r) + continue; + mhi_log(MHI_MSG_INFO, "Initializing chan ctxt %d\n", i); + ring_len = (sizeof(union mhi_xfer_pkt) * + chan_info.max_desc); + init_dev_chan_ctxt( + &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i], + dma_dev_mem_start + mhi_mem_index, + ring_len, chan_info.ev_ring); + /* TODO: May not need to do this. It would be best for + * the client to set it during chan open */ + mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i]. + mhi_chan_type = (i % 2) + 1; + init_local_chan_ctxt( + &mhi_dev_ctxt->mhi_local_chan_ctxt[i], + dev_mem_start + mhi_mem_index, + ring_len); + /* TODO: May not need to do this. It would be best for + * the client to set it during chan open */ + mhi_dev_ctxt->mhi_local_chan_ctxt[i].dir = (i % 2) + 1; + /* Add size of TREs */ + mhi_mem_index += ring_len; + if (mhi_dev_ctxt->flags.bb_enabled) { + r = enable_bb_ctxt( + &mhi_dev_ctxt->chan_bb_list[i], + chan_info.max_desc); + if (r) + goto error_during_bb_list; + } + } + return 0; + +error_during_bb_list: + for (; i >= 0; --i) + kfree(mhi_dev_ctxt->chan_bb_list[i].base); + dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev, + mhi_dev_ctxt->dev_space.dev_mem_len, + mhi_dev_ctxt->dev_space.dev_mem_start, + mhi_dev_ctxt->dev_space.dma_dev_mem_start); + return r; +} + +static int mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt) { mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq = kmalloc( @@ -155,7 +432,7 @@ static enum MHI_STATUS mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt) init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.m3_event); init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.bhi_event); - return MHI_STATUS_SUCCESS; + return 0; error_bhi_event: kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event); error_m0_event: @@ -167,7 +444,7 @@ error_event_handle_alloc: return MHI_STATUS_ERROR; } -static enum MHI_STATUS mhi_init_state_change_thread_work_queue( +static int mhi_init_state_change_thread_work_queue( struct mhi_state_work_queue *q) { bool lock_acquired = 0; @@ -176,7 +453,7 @@ static enum MHI_STATUS mhi_init_state_change_thread_work_queue( if (NULL == q->q_lock) { q->q_lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL); if (NULL == q->q_lock) - return MHI_STATUS_ALLOC_ERROR; + return -ENOMEM; spin_lock_init(q->q_lock); } else { spin_lock_irqsave(q->q_lock, flags); @@ -192,124 +469,15 @@ static enum MHI_STATUS mhi_init_state_change_thread_work_queue( if (lock_acquired) spin_unlock_irqrestore(q->q_lock, flags); - return MHI_STATUS_SUCCESS; + return 0; } -static enum MHI_STATUS mhi_init_device_ctrl(struct mhi_device_ctxt - *mhi_dev_ctxt) -{ - size_t ctrl_seg_size = 0; - size_t ctrl_seg_offset = 0; - int i = 0; - u32 align_len = sizeof(u64) * 2; - enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; - - if (NULL == mhi_dev_ctxt || NULL == mhi_dev_ctxt->mhi_ctrl_seg_info || - NULL == mhi_dev_ctxt->mhi_ctrl_seg_info->dev) - return MHI_STATUS_ERROR; - - mhi_dev_ctxt->enable_lpm = 1; - mhi_dev_ctxt->flags.mhi_initialized = 0; - - mhi_log(MHI_MSG_INFO, "Allocating control segment.\n"); - ctrl_seg_size += sizeof(struct mhi_control_seg); - /* Calculate the size of the control segment needed */ - - ctrl_seg_size += align_len - (ctrl_seg_size % align_len); - ret_val = mhi_mallocmemregion(mhi_dev_ctxt, mhi_dev_ctxt->mhi_ctrl_seg_info, - ctrl_seg_size); - if (MHI_STATUS_SUCCESS != ret_val) - return MHI_STATUS_ERROR; - mhi_dev_ctxt->mhi_ctrl_seg = - mhi_get_virt_addr(mhi_dev_ctxt->mhi_ctrl_seg_info); - - if (!mhi_dev_ctxt->mhi_ctrl_seg) - return MHI_STATUS_ALLOC_ERROR; - - /* Set the channel contexts, event contexts and cmd context */ - ctrl_seg_offset = (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg + - sizeof(struct mhi_control_seg); - - /* Set the channel direction and state */ - ctrl_seg_offset += align_len - (ctrl_seg_offset % align_len); - for (i = 0; i < MHI_MAX_CHANNELS; ++i) { - mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i]. - mhi_chan_type = (i % 2) + 1; - mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i]. - mhi_chan_state = - MHI_CHAN_STATE_ENABLED; - } - return MHI_STATUS_SUCCESS; -} -/** - * mhi_cmd_ring_init- Initialization of the command ring - * - * @cmd_ctxt: command ring context to initialize - * @trb_list_phy_addr: Pointer to the pysical address of the tre ring - * @trb_list_virt_addr: Pointer to the virtual address of the tre ring - * @el_per_ring: Number of elements in this command ring - * @ring: Pointer to the shadow command context - * - * @Return MHI_STATUS - */ -static enum MHI_STATUS mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt, - uintptr_t trb_list_phy_addr, - uintptr_t trb_list_virt_addr, - size_t el_per_ring, struct mhi_ring *ring) -{ - cmd_ctxt->mhi_cmd_ring_base_addr = trb_list_phy_addr; - cmd_ctxt->mhi_cmd_ring_read_ptr = trb_list_phy_addr; - cmd_ctxt->mhi_cmd_ring_write_ptr = trb_list_phy_addr; - cmd_ctxt->mhi_cmd_ring_len = - (size_t)el_per_ring*sizeof(union mhi_cmd_pkt); - ring[PRIMARY_CMD_RING].wp = (void *)trb_list_virt_addr; - ring[PRIMARY_CMD_RING].rp = (void *)trb_list_virt_addr; - ring[PRIMARY_CMD_RING].base = (void *)trb_list_virt_addr; - ring[PRIMARY_CMD_RING].len = - (size_t)el_per_ring*sizeof(union mhi_cmd_pkt); - ring[PRIMARY_CMD_RING].el_size = sizeof(union mhi_cmd_pkt); - ring[PRIMARY_CMD_RING].overwrite_en = 0; - return MHI_STATUS_SUCCESS; -} - -static enum MHI_STATUS mhi_init_timers(struct mhi_device_ctxt *mhi_dev_ctxt) -{ - return MHI_STATUS_SUCCESS; -} - -static enum MHI_STATUS mhi_init_wakelock(struct mhi_device_ctxt *mhi_dev_ctxt) +static void mhi_init_wakelock(struct mhi_device_ctxt *mhi_dev_ctxt) { wakeup_source_init(&mhi_dev_ctxt->w_lock, "mhi_wakeup_source"); - return MHI_STATUS_SUCCESS; } -static enum MHI_STATUS mhi_init_contexts(struct mhi_device_ctxt *mhi_dev_ctxt) -{ - int r = 0; - u64 phy_cmd_trb_addr; - - struct mhi_control_seg *mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg; - - r = init_event_ctxt_array(mhi_dev_ctxt); - if (r) - return MHI_STATUS_ERROR; - - /* Init Command Ring */ - phy_cmd_trb_addr = - ((uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->cmd_trb_list[PRIMARY_CMD_RING] - - mhi_dev_ctxt->mhi_ctrl_seg_info->va_aligned)+ - mhi_dev_ctxt->mhi_ctrl_seg_info->pa_aligned; - mhi_cmd_ring_init(&mhi_ctrl->mhi_cmd_ctxt_list[PRIMARY_CMD_RING], - phy_cmd_trb_addr, - (uintptr_t)mhi_ctrl->cmd_trb_list[PRIMARY_CMD_RING], - CMD_EL_PER_RING, - &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING]); - - mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; - return MHI_STATUS_SUCCESS; -} - -static enum MHI_STATUS mhi_spawn_threads(struct mhi_device_ctxt *mhi_dev_ctxt) +static int mhi_spawn_threads(struct mhi_device_ctxt *mhi_dev_ctxt) { mhi_dev_ctxt->event_thread_handle = kthread_run(parse_event_thread, mhi_dev_ctxt, @@ -321,7 +489,7 @@ static enum MHI_STATUS mhi_spawn_threads(struct mhi_device_ctxt *mhi_dev_ctxt) "mhi_st_thrd"); if (IS_ERR(mhi_dev_ctxt->event_thread_handle)) return MHI_STATUS_ERROR; - return MHI_STATUS_SUCCESS; + return 0; } /** @@ -335,14 +503,15 @@ static enum MHI_STATUS mhi_spawn_threads(struct mhi_device_ctxt *mhi_dev_ctxt) * * @return MHI_STATUS */ -enum MHI_STATUS mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info, +int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info, struct mhi_device_ctxt *mhi_dev_ctxt) { int r = 0; if (NULL == dev_info || NULL == mhi_dev_ctxt) - return MHI_STATUS_ERROR; - mhi_log(MHI_MSG_VERBOSE, "mhi_init_device_ctxt>Init MHI dev ctxt\n"); + return -EINVAL; + + mhi_log(MHI_MSG_VERBOSE, "Entered\n"); mhi_dev_ctxt->dev_info = dev_info; mhi_dev_ctxt->dev_props = &dev_info->core; @@ -351,64 +520,72 @@ enum MHI_STATUS mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info, if (r) { mhi_log(MHI_MSG_ERROR, "Failed to get event ring properties ret %d\n", r); - mhi_clean_init_stage(mhi_dev_ctxt, MHI_INIT_ERROR_STAGE_SYNC); - return MHI_STATUS_ERROR; + goto error_during_props; } - - if (MHI_STATUS_SUCCESS != mhi_init_sync(mhi_dev_ctxt)) { + r = mhi_init_sync(mhi_dev_ctxt); + if (r) { mhi_log(MHI_MSG_ERROR, "Failed to initialize mhi sync\n"); - mhi_clean_init_stage(mhi_dev_ctxt, MHI_INIT_ERROR_STAGE_SYNC); - return MHI_STATUS_ERROR; + goto error_during_sync; } + r = create_local_ev_ctxt(mhi_dev_ctxt); + if (r) { + mhi_log(MHI_MSG_ERROR, + "Failed to initialize local event ctxt ret %d\n", r); + goto error_during_local_ev_ctxt; + } + r = init_mhi_dev_mem(mhi_dev_ctxt); + if (r) { + mhi_log(MHI_MSG_ERROR, + "Failed to initialize device memory ret %d\n", r); + goto error_during_dev_mem_init; + } + r = mhi_init_events(mhi_dev_ctxt); + if (r) { + mhi_log(MHI_MSG_ERROR, + "Failed to initialize mhi events ret %d\n", r); + goto error_wq_init; + } + r = mhi_reset_all_thread_queues(mhi_dev_ctxt); + if (r) { + mhi_log(MHI_MSG_ERROR, + "Failed to initialize work queues ret %d\n", r); + goto error_during_thread_init; + } + init_event_ctxt_array(mhi_dev_ctxt); + mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; + mhi_dev_ctxt->enable_lpm = 1; - if (MHI_STATUS_SUCCESS != mhi_init_ctrl_zone(dev_info, mhi_dev_ctxt)) { - mhi_log(MHI_MSG_ERROR, "Failed to initialize memory zones\n"); - mhi_clean_init_stage(mhi_dev_ctxt, - MHI_INIT_ERROR_STAGE_MEM_ZONES); - return MHI_STATUS_ERROR; + r = mhi_spawn_threads(mhi_dev_ctxt); + if (r) { + mhi_log(MHI_MSG_ERROR, "Failed to spawn threads ret %d\n", r); + goto error_during_thread_spawn; } - if (MHI_STATUS_SUCCESS != mhi_init_events(mhi_dev_ctxt)) { - mhi_log(MHI_MSG_ERROR, "Failed to initialize mhi events\n"); - mhi_clean_init_stage(mhi_dev_ctxt, MHI_INIT_ERROR_STAGE_EVENTS); - return MHI_STATUS_ERROR; - } - if (MHI_STATUS_SUCCESS != mhi_reset_all_thread_queues(mhi_dev_ctxt)) { - mhi_log(MHI_MSG_ERROR, "Failed to initialize work queues\n"); - mhi_clean_init_stage(mhi_dev_ctxt, - MHI_INIT_ERROR_STAGE_THREAD_QUEUES); - return MHI_STATUS_ERROR; - } - if (MHI_STATUS_SUCCESS != mhi_init_device_ctrl(mhi_dev_ctxt)) { - mhi_log(MHI_MSG_ERROR, "Failed to initialize ctrl seg\n"); - mhi_clean_init_stage(mhi_dev_ctxt, - MHI_INIT_ERROR_STAGE_THREAD_QUEUES); - return MHI_STATUS_ERROR; - } - create_ev_rings(mhi_dev_ctxt); + mhi_init_wakelock(mhi_dev_ctxt); - if (MHI_STATUS_SUCCESS != mhi_init_contexts(mhi_dev_ctxt)) { - mhi_log(MHI_MSG_ERROR, "Failed initializing contexts\n"); - mhi_clean_init_stage(mhi_dev_ctxt, - MHI_INIT_ERROR_STAGE_DEVICE_CTRL); - return MHI_STATUS_ERROR; - } - if (MHI_STATUS_SUCCESS != mhi_spawn_threads(mhi_dev_ctxt)) { - mhi_log(MHI_MSG_ERROR, "Failed to spawn threads\n"); - return MHI_STATUS_ERROR; - } - if (MHI_STATUS_SUCCESS != mhi_init_timers(mhi_dev_ctxt)) { - mhi_log(MHI_MSG_ERROR, "Failed initializing timers\n"); - mhi_clean_init_stage(mhi_dev_ctxt, - MHI_INIT_ERROR_STAGE_DEVICE_CTRL); - return MHI_STATUS_ERROR; - } - if (MHI_STATUS_SUCCESS != mhi_init_wakelock(mhi_dev_ctxt)) { - mhi_log(MHI_MSG_ERROR, "Failed to initialize wakelock\n"); - mhi_clean_init_stage(mhi_dev_ctxt, - MHI_INIT_ERROR_STAGE_DEVICE_CTRL); - return MHI_STATUS_ERROR; - } - return MHI_STATUS_SUCCESS; + return r; + +error_during_thread_spawn: + kfree(mhi_dev_ctxt->state_change_work_item_list.q_lock); +error_during_thread_init: + kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq); + kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event); + kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event); + kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event); + kfree(mhi_dev_ctxt->mhi_ev_wq.bhi_event); +error_wq_init: + dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev, + mhi_dev_ctxt->dev_space.dev_mem_len, + mhi_dev_ctxt->dev_space.dev_mem_start, + mhi_dev_ctxt->dev_space.dma_dev_mem_start); +error_during_dev_mem_init: +error_during_local_ev_ctxt: + kfree(mhi_dev_ctxt->mhi_cmd_mutex_list); + kfree(mhi_dev_ctxt->mhi_chan_mutex); + kfree(mhi_dev_ctxt->mhi_ev_spinlock_list); +error_during_sync: + kfree(mhi_dev_ctxt->ev_ring_props); +error_during_props: + return r; } /** @@ -424,7 +601,7 @@ enum MHI_STATUS mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info, * * @Return MHI_STATUS */ -enum MHI_STATUS mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list, +int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list, uintptr_t trb_list_phy, uintptr_t trb_list_virt, u64 el_per_ring, enum MHI_CHAN_TYPE chan_type, u32 event_ring, struct mhi_ring *ring, @@ -448,26 +625,25 @@ enum MHI_STATUS mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list, ring->dir = chan_type; /* Flush writes to MMIO */ wmb(); - return MHI_STATUS_SUCCESS; + return 0; } -enum MHI_STATUS mhi_reset_all_thread_queues( +int mhi_reset_all_thread_queues( struct mhi_device_ctxt *mhi_dev_ctxt) { - enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + int ret_val = 0; - mhi_init_state_change_thread_work_queue( + ret_val = mhi_init_state_change_thread_work_queue( &mhi_dev_ctxt->state_change_work_item_list); - if (MHI_STATUS_SUCCESS != ret_val) { + if (ret_val) mhi_log(MHI_MSG_ERROR, "Failed to reset STT work queue\n"); - return ret_val; - } return ret_val; } -enum MHI_STATUS mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt) +int mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt) { u32 ret_val; + if (NULL == mhi_dev_ctxt) return MHI_STATUS_ERROR; mhi_dev_ctxt->mhi_cpu_notifier.notifier_call = mhi_cpu_notifier_cb; @@ -475,6 +651,5 @@ enum MHI_STATUS mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt) if (ret_val) return MHI_STATUS_ERROR; else - return MHI_STATUS_SUCCESS; + return 0; } - diff --git a/drivers/platform/msm/mhi/mhi_isr.c b/drivers/platform/msm/mhi/mhi_isr.c index da23eaa3279c..5ae858ae7c9d 100644 --- a/drivers/platform/msm/mhi/mhi_isr.c +++ b/drivers/platform/msm/mhi/mhi_isr.c @@ -19,6 +19,7 @@ irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id) { struct device *mhi_device = dev_id; struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data; + if (!mhi_dev_ctxt) { mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n"); return IRQ_HANDLED; @@ -28,9 +29,9 @@ irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id) mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", IRQ_TO_MSI(mhi_dev_ctxt, irq_number)); trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number)); - atomic_inc(&mhi_dev_ctxt->flags.events_pending); - wake_up_interruptible( - mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq); + atomic_inc(&mhi_dev_ctxt->flags.events_pending); + wake_up_interruptible( + mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq); return IRQ_HANDLED; } @@ -54,7 +55,7 @@ irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id) if (likely(NULL != client_handle)) { client_handle->result.user_data = client_handle->user_data; -if (likely(NULL != &client_info->mhi_client_cb)) { + if (likely(NULL != &client_info->mhi_client_cb)) { cb_info.result = &client_handle->result; cb_info.cb_reason = MHI_CB_XFER; cb_info.chan = client_handle->chan_info.chan_nr; @@ -82,15 +83,18 @@ static enum MHI_STATUS mhi_process_event_ring( &mhi_dev_ctxt->mhi_local_event_ctxt[ev_index]; u32 event_code; - ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ev_index]; + ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[ev_index]; + device_rp = (union mhi_event_pkt *)mhi_p2v_addr( - mhi_dev_ctxt, - MHI_RING_TYPE_EVENT_RING, - ev_index, - ev_ctxt->mhi_event_read_ptr); + mhi_dev_ctxt, + MHI_RING_TYPE_EVENT_RING, + ev_index, + ev_ctxt->mhi_event_read_ptr); + local_rp = (union mhi_event_pkt *)local_ev_ctxt->rp; BUG_ON(validate_ev_el_addr(local_ev_ctxt, (uintptr_t)device_rp)); + while ((local_rp != device_rp) && (event_quota > 0) && (device_rp != NULL) && (local_rp != NULL)) { event_to_process = *local_rp; @@ -141,6 +145,7 @@ static enum MHI_STATUS mhi_process_event_ring( case MHI_PKT_TYPE_STATE_CHANGE_EVENT: { enum STATE_TRANSITION new_state; + new_state = MHI_READ_STATE(&event_to_process); mhi_log(MHI_MSG_INFO, "MHI STE received ring 0x%x\n", @@ -151,6 +156,7 @@ static enum MHI_STATUS mhi_process_event_ring( case MHI_PKT_TYPE_EE_EVENT: { enum STATE_TRANSITION new_state; + mhi_log(MHI_MSG_INFO, "MHI EEE received ring 0x%x\n", ev_index); @@ -203,8 +209,7 @@ int parse_event_thread(void *ctxt) u32 i = 0; int ret_val = 0; int ret_val_process_event = 0; - atomic_t *ev_pen_ptr; - ev_pen_ptr = &mhi_dev_ctxt->flags.events_pending; + atomic_t *ev_pen_ptr = &mhi_dev_ctxt->flags.events_pending; /* Go through all event rings */ for (;;) { @@ -221,7 +226,6 @@ int parse_event_thread(void *ctxt) switch (ret_val) { case -ERESTARTSYS: return 0; - break; default: if (mhi_dev_ctxt->flags.kill_threads) { mhi_log(MHI_MSG_INFO, @@ -262,7 +266,7 @@ struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle) { enum MHI_STATUS ret_val; - client_handle->result.payload_buf = 0; + client_handle->result.buf_addr = NULL; client_handle->result.bytes_xferd = 0; client_handle->result.transaction_status = 0; ret_val = mhi_process_event_ring(client_handle->mhi_dev_ctxt, diff --git a/drivers/platform/msm/mhi/mhi_macros.h b/drivers/platform/msm/mhi/mhi_macros.h index f2244938c289..8ff25c79601a 100644 --- a/drivers/platform/msm/mhi/mhi_macros.h +++ b/drivers/platform/msm/mhi/mhi_macros.h @@ -13,6 +13,7 @@ #define _H_MHI_MACROS #define MHI_IPC_LOG_PAGES (100) +#define MAX_BOUNCE_BUF_SIZE 0x2000 #define MHI_LOG_SIZE 0x1000 #define MHI_LINK_STABILITY_WAIT_MS 100 #define MHI_DEVICE_WAKE_DBOUNCE_TIMEOUT_MS 10 @@ -51,7 +52,7 @@ #define MHI_M2_DEBOUNCE_TMR_MS 10 #define MHI_XFER_DB_INTERVAL 8 -#define MHI_EV_DB_INTERVAL 32 +#define MHI_EV_DB_INTERVAL 1 #define MHI_DEV_WAKE_DB 127 @@ -79,10 +80,11 @@ #define VALID_CHAN_NR(_CHAN_NR) (IS_HARDWARE_CHANNEL(_CHAN_NR) || \ IS_SOFTWARE_CHANNEL(_CHAN_NR)) -#define VALID_BUF(_BUF_ADDR, _BUF_LEN) \ - (((uintptr_t)(_BUF_ADDR) >= MHI_DATA_SEG_WINDOW_START_ADDR) && \ +#define VALID_BUF(_BUF_ADDR, _BUF_LEN, _MHI_DEV_CTXT) \ + (((uintptr_t)(_BUF_ADDR) >= \ + mhi_dev_ctxt->dev_space.start_win_addr) && \ (((uintptr_t)(_BUF_ADDR) + (uintptr_t)(_BUF_LEN) < \ - MHI_DATA_SEG_WINDOW_END_ADDR))) + mhi_dev_ctxt->dev_space.end_win_addr))) #define MHI_HW_INTMOD_VAL_MS 2 /* Timeout Values */ @@ -108,7 +110,7 @@ } #define MHI_TX_TRB_GET_LEN(_FIELD, _PKT) \ (((_PKT)->data_tx_pkt).buf_len & (((MHI_##_FIELD ## __MASK) << \ - MHI_##_FIELD ## __SHIFT))); \ + MHI_##_FIELD ## __SHIFT))) \ /* MHI Event Ring Elements 7.4.1*/ #define EV_TRB_CODE @@ -216,7 +218,7 @@ (_CTXT)->mhi_intmodt &= (~((MHI_##_FIELD ## __MASK) << \ MHI_##_FIELD ## __SHIFT)); \ (_CTXT)->mhi_intmodt |= new_val; \ -}; +} #define MHI_GET_EV_CTXT(_FIELD, _CTXT) \ (((_CTXT)->mhi_intmodt >> MHI_##_FIELD ## __SHIFT) & \ diff --git a/drivers/platform/msm/mhi/mhi_main.c b/drivers/platform/msm/mhi/mhi_main.c index 6ba4b859b29b..1b818312a16f 100644 --- a/drivers/platform/msm/mhi/mhi_main.c +++ b/drivers/platform/msm/mhi/mhi_main.c @@ -45,18 +45,27 @@ static void mhi_update_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt, uintptr_t chan, u64 val) { - wmb(); if (mhi_dev_ctxt->mmio_info.chan_db_addr == io_addr) { - mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]. + mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan]. mhi_trb_write_ptr = val; } else if (mhi_dev_ctxt->mmio_info.event_db_addr == io_addr) { - if (chan < mhi_dev_ctxt->mmio_info.nr_event_rings) - mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[chan]. + if (chan < mhi_dev_ctxt->mmio_info.nr_event_rings) { + mhi_log(MHI_MSG_INFO, + "EV ctxt: %ld val 0x%llx WP 0x%llx RP: 0x%llx", + chan, val, + mhi_dev_ctxt->dev_space.ring_ctxt. + ec_list[chan].mhi_event_read_ptr, + mhi_dev_ctxt->dev_space.ring_ctxt. + ec_list[chan].mhi_event_write_ptr); + mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[chan]. mhi_event_write_ptr = val; - else + } else { mhi_log(MHI_MSG_ERROR, "Bad EV ring index: %lx\n", chan); + } } + /* Flush ctxt update to main memory for device visibility */ + wmb(); } int mhi_init_pcie_device(struct mhi_pcie_dev_info *mhi_pcie_dev) @@ -65,11 +74,12 @@ int mhi_init_pcie_device(struct mhi_pcie_dev_info *mhi_pcie_dev) long int sleep_time = 100; struct pci_dev *pcie_device = (struct pci_dev *)mhi_pcie_dev->pcie_device; + do { ret_val = pci_enable_device(mhi_pcie_dev->pcie_device); if (0 != ret_val) { mhi_log(MHI_MSG_ERROR, - "Failed to enable pcie struct device ret_val %d\n", + "Failed to enable pcie struct device r: %d\n", ret_val); mhi_log(MHI_MSG_ERROR, "Sleeping for ~ %li uS, and retrying.\n", @@ -83,18 +93,16 @@ int mhi_init_pcie_device(struct mhi_pcie_dev_info *mhi_pcie_dev) mhi_pcie_dev->core.bar0_base = ioremap_nocache(pci_resource_start(pcie_device, 0), pci_resource_len(pcie_device, 0)); - if (!mhi_pcie_dev->core.bar0_base) { + if (!mhi_pcie_dev->core.bar0_base) goto mhi_device_list_error; - } mhi_pcie_dev->core.bar0_end = mhi_pcie_dev->core.bar0_base + pci_resource_len(pcie_device, 0); mhi_pcie_dev->core.bar2_base = ioremap_nocache(pci_resource_start(pcie_device, 2), pci_resource_len(pcie_device, 2)); - if (!mhi_pcie_dev->core.bar2_base) { + if (!mhi_pcie_dev->core.bar2_base) goto io_map_err; - } mhi_pcie_dev->core.bar2_end = mhi_pcie_dev->core.bar2_base + pci_resource_len(pcie_device, 2); @@ -166,60 +174,6 @@ int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action, return NOTIFY_OK; } -int mhi_init_gpios(struct mhi_pcie_dev_info *mhi_pcie_dev) -{ - int ret_val = 0; - struct device *dev = &mhi_pcie_dev->pcie_device->dev; - struct device_node *np; - - np = dev->of_node; - mhi_log(MHI_MSG_VERBOSE, - "Attempting to grab DEVICE_WAKE gpio\n"); - ret_val = of_get_named_gpio(np, "mhi-device-wake-gpio", 0); - switch (ret_val) { - case -EPROBE_DEFER: - mhi_log(MHI_MSG_VERBOSE, "DT is not ready\n"); - return ret_val; - case -ENOENT: - mhi_log(MHI_MSG_ERROR, "Failed to find device wake gpio\n"); - return ret_val; - case 0: - mhi_log(MHI_MSG_CRITICAL, - "Could not get gpio from struct device tree!\n"); - return -EIO; - default: - mhi_pcie_dev->core.device_wake_gpio = ret_val; - mhi_log(MHI_MSG_CRITICAL, - "Got DEVICE_WAKE GPIO nr 0x%x from struct device tree\n", - mhi_pcie_dev->core.device_wake_gpio); - break; - } - - ret_val = gpio_request(mhi_pcie_dev->core.device_wake_gpio, "mhi"); - if (ret_val) { - mhi_log(MHI_MSG_CRITICAL, - "Could not obtain struct device WAKE gpio\n"); - return ret_val; - } - mhi_log(MHI_MSG_VERBOSE, - "Attempting to set output direction to DEVICE_WAKE gpio\n"); - /* This GPIO must never sleep as it can be set in timer ctxt */ - gpio_set_value_cansleep(mhi_pcie_dev->core.device_wake_gpio, 0); - - ret_val = gpio_direction_output(mhi_pcie_dev->core.device_wake_gpio, 1); - - if (ret_val) { - mhi_log(MHI_MSG_VERBOSE, - "Failed to set output direction of DEVICE_WAKE gpio\n"); - goto mhi_gpio_dir_err; - } - return 0; - -mhi_gpio_dir_err: - gpio_free(mhi_pcie_dev->core.device_wake_gpio); - return -EIO; -} - int get_chan_props(struct mhi_device_ctxt *mhi_dev_ctxt, int chan, struct mhi_chan_info *chan_info) { @@ -231,77 +185,13 @@ int get_chan_props(struct mhi_device_ctxt *mhi_dev_ctxt, int chan, mhi_dev_ctxt->dev_info->plat_dev->dev.of_node, dt_prop, (u32 *)chan_info, sizeof(struct mhi_chan_info) / sizeof(u32)); - if (r) - mhi_log(MHI_MSG_VERBOSE, - "Failed to pull chan %d info from DT, %d\n", chan, r); return r; } -int mhi_release_chan_ctxt(struct mhi_chan_ctxt *cc_list, - struct mhi_ring *ring) -{ - if (cc_list == NULL || ring == NULL) - return -EINVAL; - - dma_free_coherent(NULL, ring->len, ring->base, - cc_list->mhi_trb_ring_base_addr); - mhi_init_chan_ctxt(cc_list, 0, 0, 0, 0, 0, ring, - MHI_CHAN_STATE_DISABLED); - return 0; -} - -void free_tre_ring(struct mhi_client_handle *client_handle) -{ - struct mhi_chan_ctxt *chan_ctxt; - struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt; - int chan = client_handle->chan_info.chan_nr; - int r; - - chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]; - r = mhi_release_chan_ctxt(chan_ctxt, - &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]); - if (r) - mhi_log(MHI_MSG_ERROR, - "Failed to release chan %d ret %d\n", chan, r); -} - -static int populate_tre_ring(struct mhi_client_handle *client_handle) -{ - dma_addr_t ring_dma_addr; - void *ring_local_addr; - struct mhi_chan_ctxt *chan_ctxt; - struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt; - u32 chan = client_handle->chan_info.chan_nr; - u32 nr_desc = client_handle->chan_info.max_desc; - - mhi_log(MHI_MSG_INFO, - "Entered chan %d requested desc %d\n", chan, nr_desc); - - chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]; - ring_local_addr = dma_alloc_coherent(NULL, - nr_desc * sizeof(union mhi_xfer_pkt), - &ring_dma_addr, GFP_KERNEL); - - if (ring_local_addr == NULL) - return -ENOMEM; - - mhi_init_chan_ctxt(chan_ctxt, ring_dma_addr, - (uintptr_t)ring_local_addr, - nr_desc, - GET_CHAN_PROPS(CHAN_DIR, - client_handle->chan_info.flags), - client_handle->chan_info.ev_ring, - &mhi_dev_ctxt->mhi_local_chan_ctxt[chan], - MHI_CHAN_STATE_ENABLED); - mhi_log(MHI_MSG_INFO, "Exited\n"); - return 0; -} - enum MHI_STATUS mhi_open_channel(struct mhi_client_handle *client_handle) { enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; struct mhi_device_ctxt *mhi_dev_ctxt; - struct mhi_control_seg *mhi_ctrl_seg = NULL; int r = 0; int chan; @@ -328,22 +218,15 @@ enum MHI_STATUS mhi_open_channel(struct mhi_client_handle *client_handle) return MHI_STATUS_DEVICE_NOT_READY; } - mhi_ctrl_seg = client_handle->mhi_dev_ctxt->mhi_ctrl_seg; - - r = populate_tre_ring(client_handle); - if (r) { - mhi_log(MHI_MSG_ERROR, - "Failed to initialize tre ring chan %d ret %d\n", - chan, r); - return MHI_STATUS_ERROR; - } - client_handle->event_ring_index = - mhi_ctrl_seg->mhi_cc_list[chan].mhi_event_ring_index; + mhi_dev_ctxt->dev_space.ring_ctxt. + cc_list[chan].mhi_event_ring_index; + client_handle->msi_vec = - mhi_ctrl_seg->mhi_ec_list[ + mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[ client_handle->event_ring_index].mhi_msi_vector; - client_handle->intmod_t = mhi_ctrl_seg->mhi_ec_list[ + client_handle->intmod_t = + mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[ client_handle->event_ring_index].mhi_intmodt; init_completion(&client_handle->chan_open_complete); @@ -450,7 +333,6 @@ void mhi_close_channel(struct mhi_client_handle *client_handle) } mhi_log(MHI_MSG_INFO, "Freeing ring for chan 0x%x\n", chan); - free_tre_ring(client_handle); mhi_log(MHI_MSG_INFO, "Chan 0x%x confirmed closed.\n", chan); client_handle->chan_status = 0; } @@ -466,23 +348,15 @@ void mhi_update_chan_db(struct mhi_device_ctxt *mhi_dev_ctxt, db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_XFER_RING, chan, (uintptr_t) chan_ctxt->wp); mhi_dev_ctxt->mhi_chan_db_order[chan]++; - if (IS_HARDWARE_CHANNEL(chan) && chan_ctxt->dir == MHI_IN) { - if ((mhi_dev_ctxt->counters.chan_pkts_xferd[chan] % - MHI_XFER_DB_INTERVAL) == 0) - mhi_process_db(mhi_dev_ctxt, - mhi_dev_ctxt->mmio_info.chan_db_addr, - chan, db_value); - } else { mhi_process_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.chan_db_addr, chan, db_value); - } } enum MHI_STATUS mhi_check_m2_transition(struct mhi_device_ctxt *mhi_dev_ctxt) { enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; - mhi_log(MHI_MSG_VERBOSE, "state = %d\n", mhi_dev_ctxt->mhi_state); + if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) { mhi_log(MHI_MSG_INFO, "M2 Transition flag value = %d\n", (atomic_read(&mhi_dev_ctxt->flags.m2_transition))); @@ -496,7 +370,7 @@ enum MHI_STATUS mhi_check_m2_transition(struct mhi_device_ctxt *mhi_dev_ctxt) ret_val = MHI_STATUS_CHAN_NOT_READY; } } else { - ret_val = MHI_STATUS_SUCCESS; + ret_val = MHI_STATUS_SUCCESS; } return ret_val; @@ -511,9 +385,9 @@ static inline enum MHI_STATUS mhi_queue_tre(struct mhi_device_ctxt unsigned long flags = 0; enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; u64 db_value = 0; - chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]; + + chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan]; mhi_dev_ctxt->counters.m1_m0++; - mhi_log(MHI_MSG_VERBOSE, "Entered"); if (type == MHI_RING_TYPE_CMD_RING) atomic_inc(&mhi_dev_ctxt->counters.outbound_acks); @@ -562,8 +436,117 @@ static inline enum MHI_STATUS mhi_queue_tre(struct mhi_device_ctxt } return ret_val; } +static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt, + int chan, void *buf, size_t buf_len, + enum dma_data_direction dir, struct mhi_buf_info **bb) +{ -enum MHI_STATUS mhi_queue_xfer(struct mhi_client_handle *client_handle, + struct mhi_ring *bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan]; + struct mhi_buf_info *bb_info; + int r; + uintptr_t bb_index, ctxt_index_wp, ctxt_index_rp; + + mhi_log(MHI_MSG_RAW, "Entered chan %d\n", chan); + get_element_index(bb_ctxt, bb_ctxt->wp, &bb_index); + get_element_index(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan], + mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp, + &ctxt_index_wp); + get_element_index(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan], + mhi_dev_ctxt->mhi_local_chan_ctxt[chan].rp, + &ctxt_index_rp); + BUG_ON(bb_index != ctxt_index_wp); + mhi_log(MHI_MSG_VERBOSE, + "Chan RP index %ld Chan WP index %ld, chan %d\n", + ctxt_index_rp, ctxt_index_wp, chan); + r = ctxt_add_element(bb_ctxt, (void **)&bb_info); + if (r) + return r; + + bb_info->buf_len = buf_len; + bb_info->client_buf = buf; + bb_info->dir = dir; + bb_info->bb_p_addr = dma_map_single( + &mhi_dev_ctxt->dev_info->plat_dev->dev, + bb_info->client_buf, + bb_info->buf_len, + bb_info->dir); + if (!VALID_BUF(bb_info->bb_p_addr, bb_info->buf_len, mhi_dev_ctxt)) { + mhi_log(MHI_MSG_INFO, + "Buffer outside DMA range 0x%lx, size 0x%zx\n", + (uintptr_t)bb_info->bb_p_addr, buf_len); + dma_unmap_single(&mhi_dev_ctxt->dev_info->plat_dev->dev, + bb_info->bb_p_addr, + bb_info->buf_len, + bb_info->dir); + mhi_log(MHI_MSG_RAW, "Allocating BB, chan %d\n", chan); + bb_info->bb_v_addr = dma_alloc_coherent( + &mhi_dev_ctxt->dev_info->plat_dev->dev, + bb_info->buf_len, + &bb_info->bb_p_addr, + GFP_ATOMIC); + if (!bb_info->bb_v_addr) + return -ENOMEM; + mhi_dev_ctxt->counters.bb_used[chan]++; + if (dir == DMA_TO_DEVICE) { + mhi_log(MHI_MSG_INFO, "Copying client buf into BB.\n"); + memcpy(bb_info->bb_v_addr, buf, bb_info->buf_len); + /* Flush out data to bounce buffer */ + wmb(); + } + bb_info->bb_active = 1; + } + *bb = bb_info; + mhi_log(MHI_MSG_RAW, "Exited chan %d\n", chan); + return 0; +} + +static void free_bounce_buffer(struct mhi_device_ctxt *mhi_dev_ctxt, + struct mhi_buf_info *bb) +{ + mhi_log(MHI_MSG_RAW, "Entered\n"); + if (!bb->bb_active) + /* This buffer was maped directly to device */ + dma_unmap_single(&mhi_dev_ctxt->dev_info->plat_dev->dev, + bb->bb_p_addr, bb->buf_len, bb->dir); + else + /* This buffer was bounced */ + dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev, + bb->buf_len, + bb->bb_v_addr, + bb->bb_p_addr); + bb->bb_active = 0; + mhi_log(MHI_MSG_RAW, "Exited\n"); +} + +void reset_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt, + struct mhi_ring *bb_ctxt) +{ + int r = 0; + struct mhi_buf_info *bb = NULL; + + mhi_log(MHI_MSG_VERBOSE, "Entered\n"); + /* + Assumption: No events are expected during or after + this operation is occurring for this channel. + If a bounce buffer was allocated, the coherent memory is + expected to be already freed. + If the user's bounce buffer was mapped, it is expected to be + already unmapped. + Failure of any of the above conditions will result in + a memory leak or subtle memory corruption. + */ + while (!r) { + r = ctxt_del_element(bb_ctxt, (void **)&bb); + if (bb) + free_bounce_buffer(mhi_dev_ctxt, bb); + } + bb_ctxt->ack_rp = bb_ctxt->base; + bb_ctxt->rp = bb_ctxt->base; + bb_ctxt->wp = bb_ctxt->base; + mhi_log(MHI_MSG_VERBOSE, "Exited\n"); +} + +static enum MHI_STATUS mhi_queue_xfer(struct mhi_client_handle *client_handle, dma_addr_t buf, size_t buf_len, enum MHI_FLAGS mhi_flags) { union mhi_xfer_pkt *pkt_loc; @@ -576,9 +559,9 @@ enum MHI_STATUS mhi_queue_xfer(struct mhi_client_handle *client_handle, mhi_log(MHI_MSG_CRITICAL, "Bad input args\n"); return MHI_STATUS_ERROR; } - MHI_ASSERT(VALID_BUF(buf, buf_len), - "Client buffer is of invalid length\n"); mhi_dev_ctxt = client_handle->mhi_dev_ctxt; + MHI_ASSERT(VALID_BUF(buf, buf_len, mhi_dev_ctxt), + "Client buffer is of invalid length\n"); chan = client_handle->chan_info.chan_nr; pm_runtime_get(&mhi_dev_ctxt->dev_info->plat_dev->dev); @@ -628,6 +611,48 @@ error: } EXPORT_SYMBOL(mhi_queue_xfer); +int mhi_queue_virt_xfer(struct mhi_client_handle *client_handle, + void *buf, size_t buf_len, enum MHI_FLAGS mhi_flags) +{ + int r; + enum dma_data_direction dma_dir; + struct mhi_buf_info *bb; + + if (!client_handle || !buf || !buf_len) + return -EINVAL; + + if (MHI_OUT == GET_CHAN_PROPS(CHAN_DIR, client_handle->chan_info.flags)) + dma_dir = DMA_TO_DEVICE; + else + dma_dir = DMA_FROM_DEVICE; + + r = create_bb(client_handle->mhi_dev_ctxt, + client_handle->chan_info.chan_nr, + buf, buf_len, dma_dir, &bb); + if (r) { + mhi_log(MHI_MSG_VERBOSE, + "Failed to create BB, chan %d ret %d\n", + client_handle->chan_info.chan_nr, r); + return r; + } + + mhi_log(MHI_MSG_VERBOSE, + "Queueing to HW: Client Buf 0x%p, size 0x%zx, DMA %llx, chan %d\n", + buf, buf_len, (u64)bb->bb_p_addr, + client_handle->chan_info.chan_nr); + r = mhi_queue_xfer(client_handle, + bb->bb_p_addr, + bb->buf_len, + mhi_flags); + + /* Assumption: If create_bb did not fail, we do not + * expect mhi_queue_xfer to fail, if it does, the bb list will be + * out of sync with the descriptor list which is problematic. + */ + BUG_ON(r); + return r; +} + enum MHI_STATUS mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, enum MHI_COMMAND cmd, u32 chan) { @@ -665,7 +690,7 @@ enum MHI_STATUS mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, atomic_inc(&mhi_dev_ctxt->flags.data_pending); from_state = - mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan].mhi_chan_state; + mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan].mhi_chan_state; switch (cmd) { break; @@ -682,8 +707,7 @@ enum MHI_STATUS mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, break; default: mhi_log(MHI_MSG_ERROR, - "Invalid state transition for " - "cmd 0x%x, from_state 0x%x\n", + "Invalid stt cmd 0x%x, from_state 0x%x\n", cmd, from_state); ret_val = MHI_STATUS_BAD_STATE; goto error_invalid; @@ -725,6 +749,62 @@ error_invalid: return ret_val; } +static void parse_inbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt, + struct mhi_ring *bb_ctxt, + struct mhi_result *result, + size_t bounced_data_size) +{ + + struct mhi_buf_info *bb; + + mhi_log(MHI_MSG_INFO, "Entered\n"); + bb = bb_ctxt->rp; + bb->filled_size = bounced_data_size; + + /* Data corruption will occur */ + BUG_ON(bb->dir != DMA_FROM_DEVICE); + BUG_ON(bb->filled_size > bb->buf_len); + + if (bb->bb_active) { + /* This is coherent memory, no cache management is needed */ + memcpy(bb->client_buf, bb->bb_v_addr, bb->filled_size); + mhi_log(MHI_MSG_RAW, + "Bounce from BB:0x%p to Client Buf: 0x%p Len 0x%zx\n", + bb->client_buf, bb->bb_v_addr, bb->filled_size); + } + + result->buf_addr = bb->client_buf; + result->bytes_xferd = bb->filled_size; + result->transaction_status = 0; + + /* At this point the bounce buffer is no longer necessary + * Whatever was received from the device was copied back to the + * user buffer. Free up the bounce buffer, but do not move the bb ring + * rp, since it can be moved async by mhi_poll_inbound + */ + free_bounce_buffer(mhi_dev_ctxt, bb); + mhi_log(MHI_MSG_INFO, "Exited\n"); +} + +static void parse_outbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt, + struct mhi_ring *bb_ctxt, + struct mhi_result *result, + size_t bounced_data_size) +{ + struct mhi_buf_info *bb; + + bb = bb_ctxt->rp; + mhi_log(MHI_MSG_RAW, "Entered\n"); + BUG_ON(bb->dir != DMA_TO_DEVICE); + bb->filled_size = bounced_data_size; + BUG_ON(bb->filled_size != bb->buf_len); + result->buf_addr = bb->client_buf; + result->bytes_xferd = bb->filled_size; + result->transaction_status = 0; + free_bounce_buffer(mhi_dev_ctxt, bb); + mhi_log(MHI_MSG_RAW, "Exited\n"); +} + static enum MHI_STATUS parse_outbound(struct mhi_device_ctxt *mhi_dev_ctxt, u32 chan, union mhi_xfer_pkt *local_ev_trb_loc, u16 xfer_len) { @@ -733,6 +813,8 @@ static enum MHI_STATUS parse_outbound(struct mhi_device_ctxt *mhi_dev_ctxt, struct mhi_client_handle *client_handle = NULL; struct mhi_ring *local_chan_ctxt = NULL; struct mhi_cb_info cb_info; + struct mhi_ring *bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan]; + local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; client_handle = mhi_dev_ctxt->client_handle_list[chan]; @@ -740,20 +822,28 @@ static enum MHI_STATUS parse_outbound(struct mhi_device_ctxt *mhi_dev_ctxt, MHI_ASSERT(!unlikely(mhi_dev_ctxt->mhi_local_chan_ctxt[chan].rp == mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp), "Empty Event Ring\n"); + parse_outbound_bb(mhi_dev_ctxt, bb_ctxt, + &client_handle->result, xfer_len); + + mhi_log(MHI_MSG_RAW, "Removing BB from head, chan %d\n", chan); + atomic_dec(&mhi_dev_ctxt->counters.outbound_acks); + ret_val = ctxt_del_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan], + NULL); + BUG_ON(ret_val); + ret_val = ctxt_del_element(bb_ctxt, NULL); + BUG_ON(ret_val); if (NULL != client_handle) { result = &mhi_dev_ctxt->client_handle_list[chan]->result; - if (NULL != (&client_handle->client_info.mhi_client_cb)) { + client_handle->result.user_data = + client_handle->user_data; cb_info.cb_reason = MHI_CB_XFER; cb_info.result = &client_handle->result; cb_info.chan = chan; client_handle->client_info.mhi_client_cb(&cb_info); } } - ret_val = ctxt_del_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan], - NULL); - atomic_dec(&mhi_dev_ctxt->counters.outbound_acks); - mhi_log(MHI_MSG_VERBOSE, + mhi_log(MHI_MSG_RAW, "Processed outbound ack chan %d Pending acks %d.\n", chan, atomic_read(&mhi_dev_ctxt->counters.outbound_acks)); return MHI_STATUS_SUCCESS; @@ -766,6 +856,9 @@ static enum MHI_STATUS parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt, struct mhi_ring *local_chan_ctxt; struct mhi_result *result; struct mhi_cb_info cb_info; + struct mhi_ring *bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan]; + int r; + uintptr_t bb_index, ctxt_index_rp, ctxt_index_wp; client_handle = mhi_dev_ctxt->client_handle_list[chan]; local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; @@ -776,13 +869,29 @@ static enum MHI_STATUS parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt, if (NULL != mhi_dev_ctxt->client_handle_list[chan]) result = &mhi_dev_ctxt->client_handle_list[chan]->result; - /* If a client is registered */ + parse_inbound_bb(mhi_dev_ctxt, bb_ctxt, + &client_handle->result, xfer_len); + if (unlikely(IS_SOFTWARE_CHANNEL(chan))) { - MHI_TX_TRB_SET_LEN(TX_TRB_LEN, - local_ev_trb_loc, - xfer_len); - ctxt_del_element(local_chan_ctxt, NULL); + MHI_TX_TRB_SET_LEN(TX_TRB_LEN, local_ev_trb_loc, xfer_len); + r = ctxt_del_element(local_chan_ctxt, NULL); + BUG_ON(r); + r = ctxt_del_element(bb_ctxt, NULL); + BUG_ON(r); + get_element_index(bb_ctxt, bb_ctxt->rp, &bb_index); + get_element_index(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan], + mhi_dev_ctxt->mhi_local_chan_ctxt[chan].rp, + &ctxt_index_rp); + get_element_index(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan], + mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp, + &ctxt_index_wp); + mhi_log(MHI_MSG_VERBOSE, + "Chan RP index %ld Chan WP index %ld chan %d\n", + ctxt_index_rp, ctxt_index_wp, chan); + BUG_ON(bb_index != ctxt_index_rp); if (NULL != client_handle->client_info.mhi_client_cb) { + client_handle->result.user_data = + client_handle->user_data; cb_info.cb_reason = MHI_CB_XFER; cb_info.result = &client_handle->result; cb_info.chan = chan; @@ -792,17 +901,38 @@ static enum MHI_STATUS parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt, "No client registered chan %d\n", chan); } } else { - /* IN Hardware channel with no client - * registered, we are done with this TRB*/ - if (likely(NULL != client_handle)) { + if (likely(client_handle)) { + /* Move the rp for both the descriptor and + * the bb rings. The caller will get all the buffer + * references in the result structure. We do not need + * to keep further track of the user buffer. + */ + ctxt_del_element(bb_ctxt, NULL); ctxt_del_element(local_chan_ctxt, NULL); - /* A client is not registred for this IN channel */ - } else {/* Hardware Channel, no client registerered, + get_element_index(bb_ctxt, bb_ctxt->rp, &bb_index); + get_element_index( + &mhi_dev_ctxt->mhi_local_chan_ctxt[chan], + mhi_dev_ctxt->mhi_local_chan_ctxt[chan].rp, + &ctxt_index_rp); + get_element_index( + &mhi_dev_ctxt->mhi_local_chan_ctxt[chan], + mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp, + &ctxt_index_wp); + mhi_log(MHI_MSG_VERBOSE, + "Chan RP index %ld Chan WP index %ld chan %d\n", + ctxt_index_rp, ctxt_index_wp, chan); + BUG_ON(bb_index != ctxt_index_rp); + } else { + /* Hardware Channel, no client registerered, drop data */ recycle_trb_and_ring(mhi_dev_ctxt, &mhi_dev_ctxt->mhi_local_chan_ctxt[chan], MHI_RING_TYPE_XFER_RING, chan); + BUG(); + /* No bounce buffer to recycle as no user request + * can be present. + */ } } return MHI_STATUS_SUCCESS; @@ -817,6 +947,17 @@ static enum MHI_STATUS validate_xfer_el_addr(struct mhi_chan_ctxt *ring, MHI_STATUS_ERROR : MHI_STATUS_SUCCESS; } +static void print_tre(int chan, struct mhi_ring *ring, struct mhi_tx_pkt *tre) +{ + uintptr_t el_index; + + get_element_index(ring, tre, &el_index); + mhi_log(MHI_MSG_ERROR, "Printing TRE 0x%p index %lx for channel %d:\n", + tre, el_index, chan); + mhi_log(MHI_MSG_ERROR, "Buffer Pointer 0x%llx, len 0x%x, info 0x%x\n", + tre->buffer_ptr, tre->buf_len, tre->info); +} + enum MHI_STATUS parse_xfer_event(struct mhi_device_ctxt *ctxt, union mhi_event_pkt *event, u32 event_id) { @@ -865,21 +1006,13 @@ enum MHI_STATUS parse_xfer_event(struct mhi_device_ctxt *ctxt, &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; phy_ev_trb_loc = MHI_EV_READ_PTR(EV_PTR, event); - chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]; - ret_val = validate_xfer_el_addr(chan_ctxt, - phy_ev_trb_loc); - - if (unlikely(MHI_STATUS_SUCCESS != ret_val)) { - mhi_log(MHI_MSG_ERROR, "Bad event trb ptr.\n"); - break; - } + chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan]; + BUG_ON(validate_xfer_el_addr(chan_ctxt, phy_ev_trb_loc)); /* Get the TRB this event points to */ local_ev_trb_loc = (void *)mhi_p2v_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING, event_id, phy_ev_trb_loc); - mhi_log(MHI_MSG_CRITICAL, "mhi_p2v_addr = %p\n", - local_ev_trb_loc); local_trb_loc = (union mhi_xfer_pkt *)local_chan_ctxt->rp; trace_mhi_tre(local_trb_loc, chan, 1); @@ -896,6 +1029,7 @@ enum MHI_STATUS parse_xfer_event(struct mhi_device_ctxt *ctxt, } do { u64 phy_buf_loc; + MHI_TRB_GET_INFO(TX_TRB_IEOT, local_trb_loc, ieot_flag); phy_buf_loc = local_trb_loc->data_tx_pkt.buffer_ptr; trb_data_loc = (dma_addr_t)phy_buf_loc; @@ -905,18 +1039,12 @@ enum MHI_STATUS parse_xfer_event(struct mhi_device_ctxt *ctxt, xfer_len = MHI_TX_TRB_GET_LEN(TX_TRB_LEN, local_trb_loc); - if (!VALID_BUF(trb_data_loc, xfer_len)) { + if (!VALID_BUF(trb_data_loc, xfer_len, mhi_dev_ctxt)) { mhi_log(MHI_MSG_CRITICAL, "Bad buffer ptr: %lx.\n", (uintptr_t)trb_data_loc); return MHI_STATUS_ERROR; } - - if (NULL != client_handle) { - result->payload_buf = trb_data_loc; - result->bytes_xferd = xfer_len; - result->user_data = client_handle->user_data; - } if (local_chan_ctxt->dir == MHI_IN) { parse_inbound(mhi_dev_ctxt, chan, local_ev_trb_loc, xfer_len); @@ -926,15 +1054,14 @@ enum MHI_STATUS parse_xfer_event(struct mhi_device_ctxt *ctxt, } mhi_dev_ctxt->counters.chan_pkts_xferd[chan]++; if (local_trb_loc == - (union mhi_xfer_pkt *)local_chan_ctxt->rp) { + (union mhi_xfer_pkt *)local_chan_ctxt->rp) { mhi_log(MHI_MSG_CRITICAL, "Done. Processed until: %lx.\n", (uintptr_t)trb_data_loc); break; } else { local_trb_loc = - (union mhi_xfer_pkt *)local_chan_ctxt-> - rp; + (union mhi_xfer_pkt *)local_chan_ctxt->rp; } i++; } while (i < nr_trb_to_parse); @@ -945,6 +1072,7 @@ enum MHI_STATUS parse_xfer_event(struct mhi_device_ctxt *ctxt, { struct mhi_ring *chan_ctxt = NULL; u64 db_value = 0; + mhi_dev_ctxt->flags.uldl_enabled = 1; chan = MHI_EV_READ_CHID(EV_CHID, event); mhi_dev_ctxt->flags.db_mode[chan] = 1; @@ -966,9 +1094,22 @@ enum MHI_STATUS parse_xfer_event(struct mhi_device_ctxt *ctxt, } break; } + case MHI_EVENT_CC_BAD_TRE: + phy_ev_trb_loc = MHI_EV_READ_PTR(EV_PTR, event); + local_ev_trb_loc = (void *)mhi_p2v_addr(mhi_dev_ctxt, + MHI_RING_TYPE_EVENT_RING, event_id, + phy_ev_trb_loc); + mhi_log(MHI_MSG_ERROR, + "Received BAD TRE event for ring %d, pointer 0x%p\n", + chan, local_ev_trb_loc); + print_tre(chan, &mhi_dev_ctxt->mhi_local_chan_ctxt[chan], + (struct mhi_tx_pkt *)local_ev_trb_loc); + BUG(); + break; default: mhi_log(MHI_MSG_ERROR, "Unknown TX completion.\n"); + break; } /*switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */ return 0; @@ -1004,9 +1145,7 @@ enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt, (union mhi_xfer_pkt *)added_element; added_xfer_pkt->data_tx_pkt = *(struct mhi_tx_pkt *)removed_xfer_pkt; - } else if (MHI_RING_TYPE_EVENT_RING == ring_type && - mhi_dev_ctxt->counters.m0_m3 > 0 && - IS_HARDWARE_CHANNEL(ring_index)) { + } else if (MHI_RING_TYPE_EVENT_RING == ring_type) { spinlock_t *lock; unsigned long flags; @@ -1017,6 +1156,8 @@ enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt, spin_lock_irqsave(lock, flags); db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type, ring_index, (uintptr_t) ring->wp); + mhi_log(MHI_MSG_INFO, + "Updating ctxt, ring index %d\n", ring_index); mhi_update_ctxt(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.event_db_addr, ring_index, db_value); @@ -1033,6 +1174,7 @@ enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt, case MHI_RING_TYPE_CMD_RING: { struct mutex *cmd_mutex = NULL; + cmd_mutex = &mhi_dev_ctxt-> mhi_cmd_mutex_list[PRIMARY_CMD_RING]; @@ -1048,6 +1190,7 @@ enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt, { spinlock_t *lock = NULL; unsigned long flags = 0; + lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index]; spin_lock_irqsave(lock, flags); mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1; @@ -1066,6 +1209,7 @@ enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt, case MHI_RING_TYPE_XFER_RING: { unsigned long flags = 0; + spin_lock_irqsave( &mhi_dev_ctxt->db_write_lock[ring_index], flags); @@ -1096,7 +1240,8 @@ static enum MHI_STATUS reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, struct mhi_client_handle *client_handle = NULL; struct mutex *chan_mutex; int pending_el = 0; - struct mhi_ring *ring; + struct mhi_ring *bb_ctxt; + MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan); @@ -1106,20 +1251,24 @@ static enum MHI_STATUS reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, return MHI_STATUS_ERROR; } + bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan]; chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan]; mutex_lock(chan_mutex); client_handle = mhi_dev_ctxt->client_handle_list[chan]; local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; - chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]; + chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan]; mhi_log(MHI_MSG_INFO, "Processed cmd reset event\n"); /* * If outbound elements are pending, they must be cleared since * they will never be acked after a channel reset. */ - ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; - if (ring->dir == MHI_OUT) - get_nr_enclosed_el(ring, ring->rp, ring->wp, &pending_el); + local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; + if (local_chan_ctxt->dir == MHI_OUT) + get_nr_enclosed_el(local_chan_ctxt, + local_chan_ctxt->rp, + local_chan_ctxt->wp, + &pending_el); mhi_log(MHI_MSG_INFO, "Decrementing chan %d out acks by %d.\n", chan, pending_el); @@ -1135,6 +1284,9 @@ static enum MHI_STATUS reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr; chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr; + mhi_log(MHI_MSG_INFO, "Cleaning up BB list\n"); + reset_bb_ctxt(mhi_dev_ctxt, bb_ctxt); + mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_NOT_PENDING; mutex_unlock(chan_mutex); mhi_log(MHI_MSG_INFO, "Reset complete.\n"); @@ -1147,6 +1299,7 @@ static enum MHI_STATUS start_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, union mhi_cmd_pkt *cmd_pkt) { u32 chan; + MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan); if (!VALID_CHAN_NR(chan)) mhi_log(MHI_MSG_ERROR, "Bad chan: 0x%x\n", chan); @@ -1164,6 +1317,7 @@ enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt, u32 event_index) { uintptr_t phy_trb_loc = 0; + if (NULL != ev_pkt) phy_trb_loc = (uintptr_t)MHI_EV_READ_PTR(EV_PTR, ev_pkt); @@ -1172,7 +1326,6 @@ enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt, *cmd_pkt = (union mhi_cmd_pkt *)mhi_p2v_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING, event_index, phy_trb_loc); - mhi_log(MHI_MSG_INFO, "mhi_p2v_addr %p\n", (void *)(*cmd_pkt)); return MHI_EV_READ_CODE(EV_TRB_CODE, ev_pkt); } @@ -1181,12 +1334,15 @@ enum MHI_STATUS parse_cmd_event(struct mhi_device_ctxt *mhi_dev_ctxt, { enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; union mhi_cmd_pkt *cmd_pkt = NULL; + u32 event_code; + event_code = get_cmd_pkt(mhi_dev_ctxt, ev_pkt, &cmd_pkt, event_index); switch (event_code) { case MHI_EVENT_CC_SUCCESS: { u32 chan; + MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan); switch (MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt)) { @@ -1221,7 +1377,6 @@ enum MHI_STATUS parse_cmd_event(struct mhi_device_ctxt *mhi_dev_ctxt, } mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_NOT_PENDING; atomic_dec(&mhi_dev_ctxt->counters.outbound_acks); - BUG_ON(atomic_read(&mhi_dev_ctxt->counters.outbound_acks) >= 0); break; } default: @@ -1240,59 +1395,69 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle, u32 chan = 0; struct mhi_ring *local_chan_ctxt; struct mutex *chan_mutex = NULL; - int ret_val = 0; + int r = 0; + struct mhi_ring *bb_ctxt; + struct mhi_buf_info *bb; - if (NULL == client_handle || NULL == result || - NULL == client_handle->mhi_dev_ctxt) + if (!client_handle || !result || !client_handle->mhi_dev_ctxt) return -EINVAL; + + mhi_log(MHI_MSG_VERBOSE, "Entered\n"); + mhi_dev_ctxt = client_handle->mhi_dev_ctxt; chan = client_handle->chan_info.chan_nr; local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan]; + bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan]; + mutex_lock(chan_mutex); - if ((local_chan_ctxt->rp != local_chan_ctxt->ack_rp)) { + if (local_chan_ctxt->rp != local_chan_ctxt->ack_rp) { pending_trb = (struct mhi_tx_pkt *)(local_chan_ctxt->ack_rp); - result->payload_buf = pending_trb->buffer_ptr; - result->bytes_xferd = MHI_TX_TRB_GET_LEN(TX_TRB_LEN, - (union mhi_xfer_pkt *)pending_trb); result->flags = pending_trb->info; - ret_val = delete_element(local_chan_ctxt, + bb = bb_ctxt->ack_rp; + if (bb->bb_active) { + mhi_log(MHI_MSG_VERBOSE, + "Bounce buffer active chan %d, copying data\n", + chan); + } + result->buf_addr = bb->client_buf; + result->bytes_xferd = bb->filled_size; + result->transaction_status = 0; + r = delete_element(local_chan_ctxt, &local_chan_ctxt->ack_rp, &local_chan_ctxt->rp, NULL); - if (ret_val != MHI_STATUS_SUCCESS) { - mhi_log( - MHI_MSG_ERROR, - "Internal Failure,inconsistent ring,ret %d chan %d\n", - ret_val, chan); - result->payload_buf = 0; - result->bytes_xferd = 0; - result->transaction_status = MHI_STATUS_ERROR; - } + BUG_ON(r); + r = delete_element(bb_ctxt, + &bb_ctxt->ack_rp, + &bb_ctxt->rp, NULL); + BUG_ON(r); } else { - result->payload_buf = 0; + result->buf_addr = 0; result->bytes_xferd = 0; - ret_val = MHI_STATUS_RING_EMPTY; + r = -ENODATA; } mutex_unlock(chan_mutex); - return ret_val; + mhi_log(MHI_MSG_VERBOSE, + "Exited Result: Buf addr: 0x%p Bytes xfed 0x%zx chan %d\n", + result->buf_addr, result->bytes_xferd, chan); + return r; } EXPORT_SYMBOL(mhi_poll_inbound); - -enum MHI_STATUS validate_ev_el_addr(struct mhi_ring *ring, uintptr_t addr) +int validate_ev_el_addr(struct mhi_ring *ring, uintptr_t addr) { return (addr < (uintptr_t)(ring->base) || addr > ((uintptr_t)(ring->base) + (ring->len - 1))) ? - MHI_STATUS_ERROR : MHI_STATUS_SUCCESS; + -ERANGE : 0; } -enum MHI_STATUS validate_ring_el_addr(struct mhi_ring *ring, uintptr_t addr) +int validate_ring_el_addr(struct mhi_ring *ring, uintptr_t addr) { return (addr < (uintptr_t)(ring->base) || addr > ((uintptr_t)(ring->base) + (ring->len - 1))) ? - MHI_STATUS_ERROR : MHI_STATUS_SUCCESS; + -ERANGE : 0; } enum MHI_STATUS mhi_wait_for_mdm(struct mhi_device_ctxt *mhi_dev_ctxt) @@ -1382,6 +1547,7 @@ enum MHI_STATUS mhi_deregister_channel(struct mhi_client_handle *client_handle) { enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; int chan; + if (!client_handle || client_handle->magic != MHI_HANDLE_MAGIC) return MHI_STATUS_ERROR; chan = client_handle->chan_info.chan_nr; @@ -1409,6 +1575,11 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt, !mhi_dev_ctxt->flags.db_mode[chan])) { mhi_write_db(mhi_dev_ctxt, io_addr, chan, val); mhi_dev_ctxt->flags.db_mode[chan] = 0; + } else { + mhi_log(MHI_MSG_INFO, + "Not ringing xfer db, chan %ld, ul_dl %d db_mode %d\n", + chan, mhi_dev_ctxt->flags.uldl_enabled, + mhi_dev_ctxt->flags.db_mode[chan]); } /* Event Doorbell and Polling mode Disabled */ } else if (io_addr == mhi_dev_ctxt->mmio_info.event_db_addr) { @@ -1430,6 +1601,7 @@ void mhi_reg_write_field(struct mhi_device_ctxt *mhi_dev_ctxt, u32 mask, u32 shift, u32 val) { u32 reg_val; + reg_val = mhi_reg_read(io_addr, io_offset); reg_val &= ~mask; reg_val = reg_val | (val << shift); @@ -1440,9 +1612,11 @@ void mhi_reg_write(struct mhi_device_ctxt *mhi_dev_ctxt, void __iomem *io_addr, uintptr_t io_offset, u32 val) { - mhi_log(MHI_MSG_VERBOSE, "d.s 0x%p off: 0x%lx 0x%x\n", + mhi_log(MHI_MSG_RAW, "d.s 0x%p off: 0x%lx 0x%x\n", io_addr, io_offset, val); iowrite32(val, io_addr + io_offset); + + /* Flush write to device */ wmb(); } diff --git a/drivers/platform/msm/mhi/mhi_mmio_ops.c b/drivers/platform/msm/mhi/mhi_mmio_ops.c index 378081d0faaf..1d780c9863d5 100644 --- a/drivers/platform/msm/mhi/mhi_mmio_ops.c +++ b/drivers/platform/msm/mhi/mhi_mmio_ops.c @@ -17,6 +17,7 @@ enum MHI_STATUS mhi_test_for_device_reset(struct mhi_device_ctxt *mhi_dev_ctxt) { u32 pcie_word_val = 0; u32 expiry_counter; + mhi_log(MHI_MSG_INFO, "Waiting for MMIO RESET bit to be cleared.\n"); pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr, MHISTATUS); @@ -46,6 +47,7 @@ enum MHI_STATUS mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt) { u32 pcie_word_val = 0; u32 expiry_counter; + mhi_log(MHI_MSG_INFO, "Waiting for MMIO Ready bit to be set\n"); /* Read MMIO and poll for READY bit to be set */ @@ -111,7 +113,7 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt) /* Enable the channels */ for (i = 0; i < MHI_MAX_CHANNELS; ++i) { struct mhi_chan_ctxt *chan_ctxt = - &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i]; + &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i]; if (VALID_CHAN_NR(i)) chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED; else @@ -144,9 +146,7 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt) MHICFG_NER_MASK, MHICFG_NER_SHIFT, mhi_dev_ctxt->mmio_info.nr_event_rings); - pcie_dword_val = ((uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list - - mhi_dev_ctxt->mhi_ctrl_seg_info->va_aligned)+ - mhi_dev_ctxt->mhi_ctrl_seg_info->pa_aligned; + pcie_dword_val = mhi_dev_ctxt->dev_space.ring_ctxt.dma_cc_list; pcie_word_val = HIGH_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr, CCABAP_HIGHER, @@ -161,7 +161,7 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt) pcie_word_val); /* Write the Event Context Base Address Register High and Low parts */ - pcie_dword_val = mhi_dev_ctxt->mmio_info.dma_ev_ctxt; + pcie_dword_val = mhi_dev_ctxt->dev_space.ring_ctxt.dma_ec_list; pcie_word_val = HIGH_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr, ECABAP_HIGHER, @@ -174,12 +174,8 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt) ECABAP_LOWER_ECABAP_LOWER_MASK, ECABAP_LOWER_ECABAP_LOWER_SHIFT, pcie_word_val); - /* Write the Command Ring Control Register High and Low parts */ - pcie_dword_val = - ((uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list - - mhi_dev_ctxt->mhi_ctrl_seg_info->va_aligned)+ - mhi_dev_ctxt->mhi_ctrl_seg_info->pa_aligned; + pcie_dword_val = mhi_dev_ctxt->dev_space.ring_ctxt.dma_cmd_ctxt; pcie_word_val = HIGH_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr, @@ -196,45 +192,14 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt) mhi_dev_ctxt->mmio_info.cmd_db_addr = mhi_dev_ctxt->mmio_info.mmio_addr + CRDB_LOWER; - /* Set the control segment in the MMIO */ - pcie_dword_val = ((uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg - - mhi_dev_ctxt->mhi_ctrl_seg_info->va_aligned)+ - mhi_dev_ctxt->mhi_ctrl_seg_info->pa_aligned; + /* Set the control and data segments device MMIO */ + pcie_dword_val = mhi_dev_ctxt->dev_space.start_win_addr; pcie_word_val = HIGH_WORD(pcie_dword_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLBASE_HIGHER, MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK, MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT, - 0); - - pcie_word_val = LOW_WORD(pcie_dword_val); - mhi_reg_write_field(mhi_dev_ctxt, - mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLBASE_LOWER, - MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK, - MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT, - 0); - - pcie_dword_val = (((uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg - - mhi_dev_ctxt->mhi_ctrl_seg_info->va_aligned) + - mhi_dev_ctxt->mhi_ctrl_seg_info->pa_aligned) + - mhi_get_memregion_len(mhi_dev_ctxt->mhi_ctrl_seg_info) - 1; - - pcie_word_val = HIGH_WORD(pcie_dword_val); - mhi_reg_write_field(mhi_dev_ctxt, - mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLLIMIT_HIGHER, - MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK, - MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT, - 0); - pcie_word_val = LOW_WORD(pcie_dword_val); - mhi_reg_write_field(mhi_dev_ctxt, - mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLLIMIT_LOWER, - MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK, - MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT, - MHI_DATA_SEG_WINDOW_END_ADDR); - - /* Set the data segment in the MMIO */ - pcie_dword_val = MHI_DATA_SEG_WINDOW_START_ADDR; - pcie_word_val = HIGH_WORD(pcie_dword_val); + pcie_word_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr, MHIDATABASE_HIGHER, MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK, @@ -242,28 +207,44 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt) pcie_word_val); pcie_word_val = LOW_WORD(pcie_dword_val); + mhi_reg_write_field(mhi_dev_ctxt, + mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLBASE_LOWER, + MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK, + MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT, + pcie_word_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr, MHIDATABASE_LOWER, MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK, MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT, pcie_word_val); - pcie_dword_val = MHI_DATA_SEG_WINDOW_END_ADDR; + pcie_dword_val = mhi_dev_ctxt->dev_space.end_win_addr; pcie_word_val = HIGH_WORD(pcie_dword_val); + mhi_reg_write_field(mhi_dev_ctxt, + mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLLIMIT_HIGHER, + MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK, + MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT, + pcie_word_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr, MHIDATALIMIT_HIGHER, MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK, MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT, pcie_word_val); + pcie_word_val = LOW_WORD(pcie_dword_val); + + mhi_reg_write_field(mhi_dev_ctxt, + mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLLIMIT_LOWER, + MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK, + MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT, + pcie_word_val); mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr, MHIDATALIMIT_LOWER, MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK, MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT, pcie_word_val); - mhi_log(MHI_MSG_INFO, "Done..\n"); return MHI_STATUS_SUCCESS; } diff --git a/drivers/platform/msm/mhi/mhi_pm.c b/drivers/platform/msm/mhi/mhi_pm.c index 5866e8aa8cdb..dbdefe87fa5d 100644 --- a/drivers/platform/msm/mhi/mhi_pm.c +++ b/drivers/platform/msm/mhi/mhi_pm.c @@ -64,6 +64,7 @@ int mhi_runtime_suspend(struct device *dev) { int r = 0; struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data; + mhi_log(MHI_MSG_INFO, "Runtime Suspend - Entered\n"); r = mhi_initiate_m3(mhi_dev_ctxt); pm_runtime_mark_last_busy(dev); @@ -75,6 +76,7 @@ int mhi_runtime_resume(struct device *dev) { int r = 0; struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data; + mhi_log(MHI_MSG_INFO, "Runtime Resume - Entered\n"); r = mhi_initiate_m0(mhi_dev_ctxt); pm_runtime_mark_last_busy(dev); @@ -86,6 +88,7 @@ int mhi_pci_resume(struct pci_dev *pcie_dev) { int r = 0; struct mhi_device_ctxt *mhi_dev_ctxt = pcie_dev->dev.platform_data; + r = mhi_initiate_m0(mhi_dev_ctxt); if (r) goto exit; @@ -149,6 +152,7 @@ ssize_t sysfs_init_mhi_reset(struct device *dev, struct device_attribute *attr, struct mhi_device_ctxt *mhi_dev_ctxt = &mhi_devices.device_list[0].mhi_ctxt; enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + mhi_log(MHI_MSG_INFO, "Triggering MHI Reset.\n"); ret_val = mhi_trigger_reset(mhi_dev_ctxt); if (ret_val != MHI_STATUS_SUCCESS) @@ -182,6 +186,7 @@ enum MHI_STATUS mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt) int r; struct pci_dev *pcie_dev; enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + mhi_log(MHI_MSG_INFO, "Entered...\n"); pcie_dev = mhi_dev_ctxt->dev_info->pcie_device; mutex_lock(&mhi_dev_ctxt->mhi_link_state); @@ -223,6 +228,7 @@ enum MHI_STATUS mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt) int r = 0; struct pci_dev *pcie_dev; enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + pcie_dev = mhi_dev_ctxt->dev_info->pcie_device; mutex_lock(&mhi_dev_ctxt->mhi_link_state); diff --git a/drivers/platform/msm/mhi/mhi_ring_ops.c b/drivers/platform/msm/mhi/mhi_ring_ops.c index 8b9e1a2931ce..a27049a093eb 100644 --- a/drivers/platform/msm/mhi/mhi_ring_ops.c +++ b/drivers/platform/msm/mhi/mhi_ring_ops.c @@ -13,25 +13,26 @@ #include "mhi_sys.h" #include "mhi.h" -static enum MHI_STATUS add_element(struct mhi_ring *ring, void **rp, +static int add_element(struct mhi_ring *ring, void **rp, void **wp, void **assigned_addr) { uintptr_t d_wp = 0, d_rp = 0, ring_size = 0; + int r; if (0 == ring->el_size || NULL == ring || NULL == ring->base || 0 == ring->len) { mhi_log(MHI_MSG_ERROR, "Bad input parameters, quitting.\n"); - return MHI_STATUS_ERROR; + return -EINVAL; } - if (MHI_STATUS_SUCCESS != get_element_index(ring, *rp, &d_rp)) { - mhi_log(MHI_MSG_CRITICAL, "Bad element index.\n"); - return MHI_STATUS_ERROR; - } - if (MHI_STATUS_SUCCESS != get_element_index(ring, *wp, &d_wp)) { - mhi_log(MHI_MSG_CRITICAL, "Bad element index.\n"); - return MHI_STATUS_ERROR; - } + r = get_element_index(ring, *rp, &d_rp); + if (r) + return r; + r = get_element_index(ring, *wp, &d_wp); + + if (r) + return r; + ring_size = ring->len / ring->el_size; if ((d_wp + 1) % ring_size == d_rp) { @@ -40,22 +41,22 @@ static enum MHI_STATUS add_element(struct mhi_ring *ring, void **rp, } else { mhi_log(MHI_MSG_INFO, "Ring 0x%lX is full\n", (uintptr_t)ring->base); - return MHI_STATUS_RING_FULL; + return -ENOSPC; } } if (NULL != assigned_addr) *assigned_addr = (char *)ring->wp; *wp = (void *)(((d_wp + 1) % ring_size) * ring->el_size + (uintptr_t)ring->base); - return MHI_STATUS_SUCCESS; + return 0; } -inline enum MHI_STATUS ctxt_add_element(struct mhi_ring *ring, +inline int ctxt_add_element(struct mhi_ring *ring, void **assigned_addr) { return add_element(ring, &ring->rp, &ring->wp, assigned_addr); } -inline enum MHI_STATUS ctxt_del_element(struct mhi_ring *ring, +inline int ctxt_del_element(struct mhi_ring *ring, void **assigned_addr) { return delete_element(ring, &ring->rp, &ring->wp, assigned_addr); @@ -70,32 +71,29 @@ inline enum MHI_STATUS ctxt_del_element(struct mhi_ring *ring, * @wp ring write pointer * @assigned_addr location of the element just deleted */ -enum MHI_STATUS delete_element(struct mhi_ring *ring, void **rp, +int delete_element(struct mhi_ring *ring, void **rp, void **wp, void **assigned_addr) { uintptr_t d_wp = 0, d_rp = 0, ring_size = 0; + int r; if (0 == ring->el_size || NULL == ring || - NULL == ring->base || 0 == ring->len) { - mhi_log(MHI_MSG_ERROR, "Bad input parameters, quitting.\n"); - return MHI_STATUS_ERROR; - } - ring_size = ring->len / ring->el_size; + NULL == ring->base || 0 == ring->len) + return -EINVAL; - if (MHI_STATUS_SUCCESS != get_element_index(ring, *rp, &d_rp)) { - mhi_log(MHI_MSG_CRITICAL, "Bad element index.\n"); - return MHI_STATUS_ERROR; - } - if (MHI_STATUS_SUCCESS != get_element_index(ring, *wp, &d_wp)) { - mhi_log(MHI_MSG_CRITICAL, "Bad element index.\n"); - return MHI_STATUS_ERROR; - } + ring_size = ring->len / ring->el_size; + r = get_element_index(ring, *rp, &d_rp); + if (r) + return r; + r = get_element_index(ring, *wp, &d_wp); + if (r) + return r; if (d_wp == d_rp) { - mhi_log(MHI_MSG_VERBOSE, "Ring 0x%lX is empty\n", + mhi_log(MHI_MSG_VERBOSE, "Ring 0x%lx is empty\n", (uintptr_t)ring->base); if (NULL != assigned_addr) *assigned_addr = NULL; - return MHI_STATUS_RING_EMPTY; + return -ENODATA; } if (NULL != assigned_addr) @@ -103,14 +101,14 @@ enum MHI_STATUS delete_element(struct mhi_ring *ring, void **rp, *rp = (void *)(((d_rp + 1) % ring_size) * ring->el_size + (uintptr_t)ring->base); - - return MHI_STATUS_SUCCESS; + return 0; } int mhi_get_free_desc(struct mhi_client_handle *client_handle) { u32 chan; struct mhi_device_ctxt *ctxt; + if (!client_handle || MHI_HANDLE_MAGIC != client_handle->magic || !client_handle->mhi_dev_ctxt) return -EINVAL; @@ -126,6 +124,7 @@ int get_nr_avail_ring_elements(struct mhi_ring *ring) u32 nr_el = 0; uintptr_t ring_size = 0; enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + ring_size = ring->len / ring->el_size; ret_val = get_nr_enclosed_el(ring, ring->rp, ring->wp, &nr_el); if (ret_val != MHI_STATUS_SUCCESS) { @@ -142,6 +141,7 @@ enum MHI_STATUS get_nr_enclosed_el(struct mhi_ring *ring, void *rp, uintptr_t index_rp = 0; uintptr_t index_wp = 0; uintptr_t ring_size = 0; + if (0 == ring->el_size || NULL == ring || NULL == ring->base || 0 == ring->len) { mhi_log(MHI_MSG_ERROR, "Bad input parameters, quitting.\n"); @@ -167,20 +167,22 @@ enum MHI_STATUS get_nr_enclosed_el(struct mhi_ring *ring, void *rp, return MHI_STATUS_SUCCESS; } -enum MHI_STATUS get_element_index(struct mhi_ring *ring, +int get_element_index(struct mhi_ring *ring, void *address, uintptr_t *index) { - if (MHI_STATUS_SUCCESS != validate_ring_el_addr(ring, - (uintptr_t)address)) - return MHI_STATUS_ERROR; + int r = validate_ring_el_addr(ring, (uintptr_t)address); + + if (r) + return r; *index = ((uintptr_t)address - (uintptr_t)ring->base) / ring->el_size; - return MHI_STATUS_SUCCESS; + return r; } enum MHI_STATUS get_element_addr(struct mhi_ring *ring, uintptr_t index, void **address) { uintptr_t ring_size = 0; + if (NULL == ring || NULL == address) return MHI_STATUS_ERROR; ring_size = ring->len / ring->el_size; diff --git a/drivers/platform/msm/mhi/mhi_ssr.c b/drivers/platform/msm/mhi/mhi_ssr.c index 680ea6bd4fda..19ceee2f40ca 100644 --- a/drivers/platform/msm/mhi/mhi_ssr.c +++ b/drivers/platform/msm/mhi/mhi_ssr.c @@ -25,6 +25,7 @@ static int mhi_ssr_notify_cb(struct notifier_block *nb, struct mhi_device_ctxt *mhi_dev_ctxt = &mhi_devices.device_list[0].mhi_ctxt; struct mhi_pcie_dev_info *mhi_pcie_dev = NULL; + mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices]; if (NULL != mhi_dev_ctxt) mhi_dev_ctxt->esoc_notif = action; @@ -81,6 +82,7 @@ static struct notifier_block mhi_ssr_nb = { static void esoc_parse_link_type(struct mhi_device_ctxt *mhi_dev_ctxt) { int ret_val; + ret_val = strcmp(mhi_dev_ctxt->esoc_handle->link, "HSIC+PCIe"); mhi_log(MHI_MSG_VERBOSE, "Link type is %s as indicated by ESOC\n", mhi_dev_ctxt->esoc_handle->link); @@ -97,6 +99,7 @@ int mhi_esoc_register(struct mhi_device_ctxt *mhi_dev_ctxt) struct pci_driver *mhi_driver; struct device *dev = &mhi_dev_ctxt->dev_info->pcie_device->dev; + mhi_dev_ctxt->base_state = STATE_TRANSITION_BHI; mhi_driver = mhi_dev_ctxt->dev_info->mhi_pcie_driver; np = dev->of_node; mhi_dev_ctxt->esoc_handle = devm_register_esoc_client(dev, "mdm"); @@ -164,6 +167,8 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify) enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; struct mhi_pcie_dev_info *mhi_pcie_dev = notify->data; struct mhi_device_ctxt *mhi_dev_ctxt = NULL; + int r = 0; + if (NULL == notify || NULL == notify->data) { mhi_log(MHI_MSG_CRITICAL, "Incomplete handle received\n"); @@ -180,7 +185,18 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify) if (0 == mhi_pcie_dev->link_up_cntr) { mhi_log(MHI_MSG_INFO, "Initializing MHI for the first time\n"); - mhi_ctxt_init(mhi_pcie_dev); + r = mhi_ctxt_init(mhi_pcie_dev); + if (r) { + mhi_log(MHI_MSG_ERROR, + "MHI initialization failed, ret %d.\n", + r); + r = msm_pcie_register_event( + &mhi_pcie_dev->mhi_pci_link_event); + mhi_log(MHI_MSG_ERROR, + "Deregistered from PCIe notif r %d.\n", + r); + return; + } mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt; mhi_pcie_dev->mhi_ctxt.flags.link_up = 1; pci_set_master(mhi_pcie_dev->pcie_device); @@ -213,7 +229,6 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify) mhi_log(MHI_MSG_INFO, "Received bad link event\n"); return; - break; } } diff --git a/drivers/platform/msm/mhi/mhi_states.c b/drivers/platform/msm/mhi/mhi_states.c index d9bcc3008b02..bda1701fa1a1 100644 --- a/drivers/platform/msm/mhi/mhi_states.c +++ b/drivers/platform/msm/mhi/mhi_states.c @@ -107,16 +107,14 @@ static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt) u32 i; u64 db_value = 0; struct mhi_event_ctxt *event_ctxt = NULL; - struct mhi_control_seg *mhi_ctrl = NULL; spinlock_t *lock = NULL; unsigned long flags; - mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg; for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) { lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[i]; mhi_dev_ctxt->mhi_ev_db_order[i] = 0; spin_lock_irqsave(lock, flags); - event_ctxt = &mhi_ctrl->mhi_ec_list[i]; + event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i]; db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING, i, @@ -137,6 +135,7 @@ static enum MHI_STATUS process_m0_transition( { unsigned long flags; int ret_val; + mhi_log(MHI_MSG_INFO, "Entered\n"); if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) { @@ -196,6 +195,7 @@ static enum MHI_STATUS process_m1_transition( unsigned long flags = 0; int ret_val = 0; int r = 0; + mhi_log(MHI_MSG_INFO, "Processing M1 state transition from state %d\n", mhi_dev_ctxt->mhi_state); @@ -248,6 +248,7 @@ static enum MHI_STATUS process_m3_transition( enum STATE_TRANSITION cur_work_item) { unsigned long flags; + mhi_log(MHI_MSG_INFO, "Processing M3 state transition\n"); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); @@ -264,6 +265,7 @@ static enum MHI_STATUS mhi_process_link_down( { unsigned long flags; int r; + mhi_log(MHI_MSG_INFO, "Entered.\n"); if (NULL == mhi_dev_ctxt) return MHI_STATUS_ERROR; @@ -366,6 +368,7 @@ static enum MHI_STATUS process_ready_transition( enum STATE_TRANSITION cur_work_item) { enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + mhi_log(MHI_MSG_INFO, "Processing READY state transition\n"); mhi_dev_ctxt->mhi_state = MHI_STATE_READY; @@ -404,7 +407,7 @@ static void mhi_reset_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt, int chan) { struct mhi_chan_ctxt *chan_ctxt = - &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]; + &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan]; struct mhi_ring *local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr; @@ -454,9 +457,10 @@ static enum MHI_STATUS process_reset_transition( mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base; mhi_dev_ctxt->mhi_local_cmd_ctxt[i].wp = mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base; - mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list[i]. + mhi_dev_ctxt->dev_space.ring_ctxt.cmd_ctxt[i]. mhi_cmd_ring_read_ptr = - mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING, + mhi_v2p_addr(mhi_dev_ctxt, + MHI_RING_TYPE_CMD_RING, i, (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp); } @@ -481,6 +485,7 @@ static enum MHI_STATUS process_syserr_transition( enum STATE_TRANSITION cur_work_item) { enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + mhi_log(MHI_MSG_CRITICAL, "Received SYS ERROR. Resetting MHI\n"); if (MHI_STATUS_SUCCESS != ret_val) { mhi_log(MHI_MSG_CRITICAL, "Failed to reset mhi\n"); @@ -554,8 +559,8 @@ static enum MHI_STATUS process_sbl_transition( enum STATE_TRANSITION cur_work_item) { int r; - mhi_log(MHI_MSG_INFO, "Processing SBL state transition\n"); + mhi_log(MHI_MSG_INFO, "Processing SBL state transition\n"); pm_runtime_set_autosuspend_delay(&mhi_dev_ctxt->dev_info->plat_dev->dev, MHI_RPM_AUTOSUSPEND_TMR_VAL_MS); pm_runtime_use_autosuspend(&mhi_dev_ctxt->dev_info->plat_dev->dev); @@ -567,7 +572,6 @@ static enum MHI_STATUS process_sbl_transition( pm_runtime_enable(&mhi_dev_ctxt->dev_info->plat_dev->dev); mhi_log(MHI_MSG_INFO, "Enabled runtime pm\n"); mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL; - wmb(); enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env); return MHI_STATUS_SUCCESS; } @@ -774,7 +778,6 @@ enum MHI_STATUS mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt, new_state); *(enum STATE_TRANSITION *)stt_ring->wp = new_state; ret_val = ctxt_add_element(stt_ring, (void **)&cur_work_item); - wmb(); MHI_ASSERT(MHI_STATUS_SUCCESS == ret_val, "Failed to add selement to STT workqueue\n"); spin_unlock_irqrestore(work_q->q_lock, flags); @@ -810,12 +813,10 @@ int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt) mhi_dev_ctxt->counters.m0_event_timeouts++; r = -ETIME; goto exit; - break; case -ERESTARTSYS: mhi_log(MHI_MSG_CRITICAL, "Going Down...\n"); goto exit; - break; default: mhi_log(MHI_MSG_INFO, "Wait complete state: %d\n", mhi_dev_ctxt->mhi_state); @@ -891,7 +892,6 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt) mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); goto exit; - break; case MHI_STATE_M0: case MHI_STATE_M1: case MHI_STATE_M2: @@ -974,8 +974,6 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt) mhi_dev_ctxt->counters.m3_event_timeouts++; mhi_dev_ctxt->flags.pending_M3 = 0; goto exit; - break; - default: mhi_log(MHI_MSG_INFO, "M3 completion received\n"); diff --git a/drivers/platform/msm/mhi/mhi_sys.c b/drivers/platform/msm/mhi/mhi_sys.c index 5abe56d555a9..e1b862675f25 100644 --- a/drivers/platform/msm/mhi/mhi_sys.c +++ b/drivers/platform/msm/mhi/mhi_sys.c @@ -46,7 +46,7 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf, if (NULL == mhi_dev_ctxt) return -EIO; - cc_list = mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list; + cc_list = mhi_dev_ctxt->dev_space.ring_ctxt.cc_list; *offp = (u32)(*offp) % MHI_MAX_CHANNELS; while (!valid_chan) { @@ -74,7 +74,7 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf, amnt_copied = scnprintf(mhi_dev_ctxt->chan_info, MHI_LOG_SIZE, - "%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d\n", + "%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d %s %u\n", "chan:", (unsigned int)*offp, "pkts from dev:", @@ -97,7 +97,9 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf, get_nr_avail_ring_elements( &mhi_dev_ctxt->mhi_local_chan_ctxt[*offp]), "/", - client_handle->chan_info.max_desc); + client_handle->chan_info.max_desc, + "bb_used:", + mhi_dev_ctxt->counters.bb_used[*offp]); *offp += 1; @@ -129,7 +131,7 @@ static ssize_t mhi_dbgfs_ev_read(struct file *fp, char __user *buf, return -EIO; *offp = (u32)(*offp) % mhi_dev_ctxt->mmio_info.nr_event_rings; event_ring_index = *offp; - ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[event_ring_index]; + ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[event_ring_index]; if (*offp == (mhi_dev_ctxt->mmio_info.nr_event_rings - 1)) msleep(1000); @@ -196,6 +198,7 @@ static ssize_t mhi_dbgfs_trigger_msi(struct file *fp, const char __user *buf, { u32 msi_nr = 0; void *irq_ctxt = &((mhi_devices.device_list[0]).pcie_device->dev); + if (copy_from_user(&msi_nr, buf, sizeof(msi_nr))) return -ENOMEM; mhi_msi_handlr(msi_nr, irq_ctxt); @@ -258,57 +261,13 @@ static const struct file_operations mhi_dbgfs_state_fops = { .write = NULL, }; -inline void *mhi_get_virt_addr(struct mhi_meminfo *meminfo) -{ - return (void *)meminfo->va_aligned; -} - -inline u64 mhi_get_memregion_len(struct mhi_meminfo *meminfo) -{ - return meminfo->size; -} - -enum MHI_STATUS mhi_mallocmemregion(struct mhi_device_ctxt *mhi_dev_ctxt, - struct mhi_meminfo *meminfo, size_t size) -{ - meminfo->va_unaligned = (uintptr_t)dma_alloc_coherent( - meminfo->dev, - size, - (dma_addr_t *)&(meminfo->pa_unaligned), - GFP_KERNEL); - if (!meminfo->va_unaligned) - return MHI_STATUS_ERROR; - meminfo->va_aligned = meminfo->va_unaligned; - meminfo->pa_aligned = meminfo->pa_unaligned; - meminfo->size = size; - if ((meminfo->pa_unaligned + size) >= MHI_DATA_SEG_WINDOW_END_ADDR) - return MHI_STATUS_ERROR; - - if (0 == meminfo->va_unaligned) - return MHI_STATUS_ERROR; - mb(); - return MHI_STATUS_SUCCESS; -} - -void mhi_freememregion(struct mhi_meminfo *meminfo) -{ - mb(); - dma_free_coherent(meminfo->dev, - meminfo->size, - (dma_addr_t *)&meminfo->pa_unaligned, - GFP_KERNEL); - meminfo->va_aligned = 0; - meminfo->pa_aligned = 0; - meminfo->va_unaligned = 0; - meminfo->pa_unaligned = 0; -} - int mhi_init_debugfs(struct mhi_device_ctxt *mhi_dev_ctxt) { struct dentry *mhi_chan_stats; struct dentry *mhi_state_stats; struct dentry *mhi_msi_trigger; struct dentry *mhi_ev_stats; + mhi_dev_ctxt->mhi_parent_folder = debugfs_create_dir("mhi", NULL); if (mhi_dev_ctxt->mhi_parent_folder == NULL) { @@ -365,22 +324,22 @@ uintptr_t mhi_p2v_addr(struct mhi_device_ctxt *mhi_dev_ctxt, u32 chan, uintptr_t phy_ptr) { uintptr_t virtual_ptr; - struct mhi_control_seg *cs; - cs = mhi_dev_ctxt->mhi_ctrl_seg; + struct mhi_ring_ctxt *cs = &mhi_dev_ctxt->dev_space.ring_ctxt; + switch (type) { case MHI_RING_TYPE_EVENT_RING: virtual_ptr = (uintptr_t)((phy_ptr - - (uintptr_t)cs->mhi_ec_list[chan].mhi_event_ring_base_addr) + (uintptr_t)cs->ec_list[chan].mhi_event_ring_base_addr) + mhi_dev_ctxt->mhi_local_event_ctxt[chan].base); break; case MHI_RING_TYPE_XFER_RING: virtual_ptr = (uintptr_t)((phy_ptr - - (uintptr_t)cs->mhi_cc_list[chan].mhi_trb_ring_base_addr) + (uintptr_t)cs->cc_list[chan].mhi_trb_ring_base_addr) + mhi_dev_ctxt->mhi_local_chan_ctxt[chan].base); break; case MHI_RING_TYPE_CMD_RING: virtual_ptr = (uintptr_t)((phy_ptr - - (uintptr_t)cs->mhi_cmd_ctxt_list[chan].mhi_cmd_ring_base_addr) + (uintptr_t)cs->cmd_ctxt[chan].mhi_cmd_ring_base_addr) + mhi_dev_ctxt->mhi_local_cmd_ctxt[chan].base); break; default: @@ -389,29 +348,28 @@ uintptr_t mhi_p2v_addr(struct mhi_device_ctxt *mhi_dev_ctxt, return virtual_ptr; } - dma_addr_t mhi_v2p_addr(struct mhi_device_ctxt *mhi_dev_ctxt, enum MHI_RING_TYPE type, u32 chan, uintptr_t va_ptr) { dma_addr_t phy_ptr; - struct mhi_control_seg *cs; - cs = mhi_dev_ctxt->mhi_ctrl_seg; + struct mhi_ring_ctxt *cs = &mhi_dev_ctxt->dev_space.ring_ctxt; + switch (type) { case MHI_RING_TYPE_EVENT_RING: phy_ptr = (dma_addr_t)((va_ptr - (uintptr_t)mhi_dev_ctxt->mhi_local_event_ctxt[chan].base) + - (uintptr_t)cs->mhi_ec_list[chan].mhi_event_ring_base_addr); + (uintptr_t)cs->ec_list[chan].mhi_event_ring_base_addr); break; case MHI_RING_TYPE_XFER_RING: phy_ptr = (dma_addr_t)((va_ptr - (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].base) + - ((uintptr_t)cs->mhi_cc_list[chan].mhi_trb_ring_base_addr)); + ((uintptr_t)cs->cc_list[chan].mhi_trb_ring_base_addr)); break; case MHI_RING_TYPE_CMD_RING: phy_ptr = (dma_addr_t)((va_ptr - (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[chan].base) + - ((uintptr_t)cs->mhi_cmd_ctxt_list[chan].mhi_cmd_ring_base_addr)); + ((uintptr_t)cs->cmd_ctxt[chan].mhi_cmd_ring_base_addr)); break; default: break; diff --git a/drivers/platform/msm/mhi/mhi_sys.h b/drivers/platform/msm/mhi/mhi_sys.h index 37cc9a0b426c..8ba88b297a11 100644 --- a/drivers/platform/msm/mhi/mhi_sys.h +++ b/drivers/platform/msm/mhi/mhi_sys.h @@ -64,18 +64,12 @@ struct mhi_meminfo { uintptr_t size; }; -enum MHI_STATUS mhi_mallocmemregion(struct mhi_device_ctxt *mhi_dev_ctxt, - struct mhi_meminfo *meminfo, size_t size); - uintptr_t mhi_get_phy_addr(struct mhi_meminfo *meminfo); -void *mhi_get_virt_addr(struct mhi_meminfo *meminfo); uintptr_t mhi_p2v_addr(struct mhi_device_ctxt *mhi_dev_ctxt, enum MHI_RING_TYPE, u32 chan, uintptr_t phy_ptr); dma_addr_t mhi_v2p_addr(struct mhi_device_ctxt *mhi_dev_ctxt, enum MHI_RING_TYPE type, u32 chan, uintptr_t va_ptr); -u64 mhi_get_memregion_len(struct mhi_meminfo *meminfo); -void mhi_freememregion(struct mhi_meminfo *meminfo); void print_ring(struct mhi_ring *local_chan_ctxt, u32 ring_id); int mhi_init_debugfs(struct mhi_device_ctxt *mhi_dev_ctxt);