mhi: core: Enable BB scheme for MHI

Enable bounce buffering scheme in MHI for the cases
where allocated APQ memory falls outside of the memory range
which the device can access.

Change-Id: I9f40b0dda2f49111b7deb22973e6399fada90094
Signed-off-by: Andrei Danaila <adanaila@codeaurora.org>
This commit is contained in:
Andrei Danaila 2015-07-30 15:26:37 -07:00 committed by David Keitel
parent 78c5836a96
commit b47ed73f99
16 changed files with 1054 additions and 784 deletions

View file

@ -11,7 +11,6 @@ Required properties:
below properties:
- esoc-names
- esoc-0
- wakeup-gpios: gpio used to wake device from low power mode.
- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
below optional properties:
- qcom,msm-bus,name
@ -28,8 +27,6 @@ Example:
compatible = "qcom,mhi";
esoc-names = "mdm";
esoc-0 = <&mdm1>;
mhi-device-wake-gpio =
<&msmgpio 108 0>;
qcom,msm-bus,name = "mhi";
qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>;

View file

@ -49,7 +49,6 @@ struct pcie_core_info {
void __iomem *bar0_end;
void __iomem *bar2_base;
void __iomem *bar2_end;
u32 device_wake_gpio;
u32 irq_base;
u32 max_nr_msis;
struct pci_saved_state *pcie_state;
@ -252,7 +251,7 @@ enum MHI_EVENT_CCS {
MHI_EVENT_CC_OOB = 0x5,
MHI_EVENT_CC_DB_MODE = 0x6,
MHI_EVENT_CC_UNDEFINED_ERR = 0x10,
MHI_EVENT_CC_RING_EL_ERR = 0x11,
MHI_EVENT_CC_BAD_TRE = 0x11,
};
struct mhi_ring {
@ -357,12 +356,14 @@ struct mhi_state_work_queue {
enum STATE_TRANSITION buf[MHI_WORK_Q_MAX_SIZE];
};
struct mhi_control_seg {
union mhi_cmd_pkt cmd_trb_list[NR_OF_CMD_RINGS][CMD_EL_PER_RING + 1];
struct mhi_cmd_ctxt mhi_cmd_ctxt_list[NR_OF_CMD_RINGS];
struct mhi_chan_ctxt mhi_cc_list[MHI_MAX_CHANNELS];
struct mhi_event_ctxt *mhi_ec_list;
u32 padding;
struct mhi_buf_info {
dma_addr_t bb_p_addr;
void *bb_v_addr;
void *client_buf;
size_t buf_len;
size_t filled_size;
enum dma_data_direction dir;
int bb_active;
};
struct mhi_counters {
@ -384,6 +385,7 @@ struct mhi_counters {
u32 *ev_counter;
atomic_t outbound_acks;
u32 chan_pkts_xferd[MHI_MAX_CHANNELS];
u32 bb_used[MHI_MAX_CHANNELS];
};
struct mhi_flags {
@ -423,19 +425,37 @@ struct dev_mmio_info {
u64 mmio_len;
u32 nr_event_rings;
dma_addr_t dma_ev_ctxt; /* Bus address of ECABAP*/
void *dma_ev_rings;
};
struct mhi_ring_ctxt {
struct mhi_event_ctxt *ec_list;
struct mhi_chan_ctxt *cc_list;
struct mhi_cmd_ctxt *cmd_ctxt;
dma_addr_t dma_ec_list;
dma_addr_t dma_cc_list;
dma_addr_t dma_cmd_ctxt;
};
struct mhi_dev_space {
void *dev_mem_start;
dma_addr_t dma_dev_mem_start;
size_t dev_mem_len;
struct mhi_ring_ctxt ring_ctxt;
dma_addr_t start_win_addr;
dma_addr_t end_win_addr;
};
struct mhi_device_ctxt {
enum MHI_STATE mhi_state;
enum MHI_EXEC_ENV dev_exec_env;
struct mhi_dev_space dev_space;
struct mhi_pcie_dev_info *dev_info;
struct pcie_core_info *dev_props;
struct mhi_control_seg *mhi_ctrl_seg;
struct mhi_meminfo *mhi_ctrl_seg_info;
struct mhi_ring chan_bb_list[MHI_MAX_CHANNELS];
struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS];
struct mhi_ring *mhi_local_event_ctxt;
struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS];
@ -511,8 +531,14 @@ struct mhi_event_ring_cfg {
irqreturn_t (*mhi_handler_ptr)(int , void *);
};
struct mhi_data_buf {
dma_addr_t bounce_buffer;
dma_addr_t client_buffer;
u32 bounce_flag;
};
irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id);
enum MHI_STATUS mhi_reset_all_thread_queues(
int mhi_reset_all_thread_queues(
struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS mhi_add_elements_to_event_rings(
struct mhi_device_ctxt *mhi_dev_ctxt,
@ -521,20 +547,18 @@ int get_nr_avail_ring_elements(struct mhi_ring *ring);
enum MHI_STATUS get_nr_enclosed_el(struct mhi_ring *ring, void *loc_1,
void *loc_2, u32 *nr_el);
enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 nr_ev_el, u32 event_ring_index);
/*Mhi Initialization functions */
enum MHI_STATUS mhi_clean_init_stage(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_INIT_ERROR_STAGE cleanup_stage);
enum MHI_STATUS mhi_send_cmd(struct mhi_device_ctxt *dest_device,
enum MHI_COMMAND which_cmd, u32 chan);
enum MHI_STATUS mhi_queue_tx_pkt(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_CLIENT_CHANNEL chan,
void *payload,
size_t payload_size);
enum MHI_STATUS mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
uintptr_t trb_list_phy,
uintptr_t trb_list_virt,
u64 el_per_ring,
@ -545,11 +569,11 @@ enum MHI_STATUS mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_get_event_ring_for_channel(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 chan);
enum MHI_STATUS delete_element(struct mhi_ring *ring, void **rp,
int delete_element(struct mhi_ring *ring, void **rp,
void **wp, void **assigned_addr);
enum MHI_STATUS ctxt_add_element(struct mhi_ring *ring, void **assigned_addr);
enum MHI_STATUS ctxt_del_element(struct mhi_ring *ring, void **assigned_addr);
enum MHI_STATUS get_element_index(struct mhi_ring *ring, void *address,
int ctxt_add_element(struct mhi_ring *ring, void **assigned_addr);
int ctxt_del_element(struct mhi_ring *ring, void **assigned_addr);
int get_element_index(struct mhi_ring *ring, void *address,
uintptr_t *index);
enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_ring *ring, enum MHI_RING_TYPE ring_type, u32 ring_index);
@ -565,8 +589,8 @@ enum MHI_STATUS mhi_test_for_device_ready(
struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS mhi_test_for_device_reset(
struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS validate_ring_el_addr(struct mhi_ring *ring, uintptr_t addr);
enum MHI_STATUS validate_ev_el_addr(struct mhi_ring *ring, uintptr_t addr);
int validate_ring_el_addr(struct mhi_ring *ring, uintptr_t addr);
int validate_ev_el_addr(struct mhi_ring *ring, uintptr_t addr);
int mhi_state_change_thread(void *ctxt);
enum MHI_STATUS mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION new_state);
@ -575,7 +599,6 @@ enum hrtimer_restart mhi_initiate_m1(struct hrtimer *timer);
int mhi_pci_suspend(struct pci_dev *dev, pm_message_t state);
int mhi_pci_resume(struct pci_dev *dev);
int mhi_init_pcie_device(struct mhi_pcie_dev_info *mhi_pcie_dev);
int mhi_init_gpios(struct mhi_pcie_dev_info *mhi_pcie_dev);
int mhi_init_pm_sysfs(struct device *dev);
void mhi_rem_pm_sysfs(struct device *dev);
void mhi_pci_remove(struct pci_dev *mhi_device);
@ -589,7 +612,7 @@ void mhi_notify_client(struct mhi_client_handle *client_handle,
enum MHI_CB_REASON reason);
int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action,
void *hcpu);
enum MHI_STATUS init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt);
@ -621,7 +644,7 @@ int init_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_TYPE_EVENT_RING type);
void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
int index);
int init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt);
int create_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt);
void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt);
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt);
#endif

View file

@ -13,6 +13,7 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include "mhi_sys.h"
#include "mhi.h"
@ -73,10 +74,12 @@ static ssize_t bhi_write(struct file *file,
goto bhi_copy_error;
}
amount_copied = count;
/* Flush the writes, in anticipation for a device read */
wmb();
mhi_log(MHI_MSG_INFO,
"Copied image from user at addr: %p\n", bhi_ctxt->image_loc);
bhi_ctxt->phy_image_loc = dma_map_single(NULL,
bhi_ctxt->phy_image_loc = dma_map_single(
&mhi_dev_ctxt->dev_info->plat_dev->dev,
bhi_ctxt->image_loc,
bhi_ctxt->image_size,
DMA_TO_DEVICE);
@ -131,7 +134,8 @@ static ssize_t bhi_write(struct file *file,
break;
usleep_range(20000, 25000);
}
dma_unmap_single(NULL, bhi_ctxt->phy_image_loc,
dma_unmap_single(&mhi_dev_ctxt->dev_info->plat_dev->dev,
bhi_ctxt->phy_image_loc,
bhi_ctxt->image_size, DMA_TO_DEVICE);
kfree(bhi_ctxt->unaligned_image_loc);
@ -168,7 +172,6 @@ int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device)
bhi_ctxt->bhi_base = mhi_pcie_device->core.bar0_base;
pcie_word_val = mhi_reg_read(bhi_ctxt->bhi_base, BHIOFF);
bhi_ctxt->bhi_base += pcie_word_val;
wmb();
mhi_log(MHI_MSG_INFO,
"Successfully registered char dev. bhi base is: 0x%p.\n",

View file

@ -73,30 +73,17 @@ dt_error:
return r;
}
int create_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt)
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int r = 0, i;
struct mhi_event_ctxt *ev_ctxt = NULL;
int r = 0;
size_t ctxt_size = sizeof(struct mhi_event_ctxt) *
mhi_dev_ctxt->mmio_info.nr_event_rings;
/* Allocate the event contexts in uncached memory */
mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list =
dma_alloc_coherent(
&mhi_dev_ctxt->dev_info->plat_dev->dev,
ctxt_size,
&mhi_dev_ctxt->mmio_info.dma_ev_ctxt,
GFP_KERNEL);
if (!mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list)
mhi_dev_ctxt->mhi_local_event_ctxt = kzalloc(sizeof(struct mhi_ring)*
mhi_dev_ctxt->mmio_info.nr_event_rings,
GFP_KERNEL);
if (!mhi_dev_ctxt->mhi_local_event_ctxt)
return -ENOMEM;
mhi_dev_ctxt->mhi_local_event_ctxt = kzalloc(sizeof(struct mhi_ring) *
mhi_dev_ctxt->mmio_info.nr_event_rings,
GFP_KERNEL);
if (!mhi_dev_ctxt->mhi_local_event_ctxt) {
r = -ENOMEM;
goto free_ec_list;
}
mhi_dev_ctxt->counters.ev_counter = kzalloc(sizeof(u32) *
mhi_dev_ctxt->mmio_info.nr_event_rings,
GFP_KERNEL);
@ -111,73 +98,19 @@ int create_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt)
r = -ENOMEM;
goto free_ev_counter;
}
mhi_dev_ctxt->mmio_info.dma_ev_rings = kzalloc(sizeof(void *) *
mhi_dev_ctxt->mmio_info.nr_event_rings,
GFP_KERNEL);
if (!mhi_dev_ctxt->mmio_info.dma_ev_rings) {
r = -ENOMEM;
goto free_msi_counter;
}
mhi_log(MHI_MSG_INFO, "Allocated ECABAP at Virt: 0x%p, Phys 0x%lx\n",
mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list,
(uintptr_t)mhi_dev_ctxt->mmio_info.dma_ev_ctxt);
/* Allocate event ring elements for each ring */
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
dma_addr_t ring_base_addr;
ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[i];
mhi_dev_ctxt->mhi_local_event_ctxt[i].base =
dma_alloc_coherent(
&mhi_dev_ctxt->dev_info->plat_dev->dev,
sizeof(union mhi_event_pkt) *
mhi_dev_ctxt->ev_ring_props[i].nr_desc,
&ring_base_addr,
GFP_KERNEL);
if (!mhi_dev_ctxt->mhi_local_event_ctxt[i].base) {
r = -ENOMEM;
goto free_event_ring;
}
ev_ctxt->mhi_event_ring_base_addr = ring_base_addr;
ev_ctxt->mhi_event_read_ptr = ring_base_addr;
ev_ctxt->mhi_event_write_ptr = ring_base_addr;
mhi_dev_ctxt->mhi_local_event_ctxt[i].wp =
mhi_dev_ctxt->mhi_local_event_ctxt[i].base;
mhi_dev_ctxt->mhi_local_event_ctxt[i].rp =
mhi_dev_ctxt->mhi_local_event_ctxt[i].base;
mhi_log(MHI_MSG_INFO, "Allocated Event Ring %d\n", i);
}
return r;
free_event_ring:
for (; i > 0; --i) {
ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[i];
dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
sizeof(union mhi_event_pkt *) *
mhi_dev_ctxt->ev_ring_props[i].nr_desc,
mhi_dev_ctxt->mhi_local_event_ctxt[i].base,
ev_ctxt->mhi_event_ring_base_addr);
}
kfree(mhi_dev_ctxt->mmio_info.dma_ev_rings);
free_msi_counter:
kfree(mhi_dev_ctxt->counters.msi_counter);
free_ev_counter:
kfree(mhi_dev_ctxt->counters.ev_counter);
free_local_ec_list:
kfree(mhi_dev_ctxt->mhi_local_event_ctxt);
free_ec_list:
dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
ctxt_size,
mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list,
mhi_dev_ctxt->mmio_info.dma_ev_ctxt);
return r;
}
void ring_ev_db(struct mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index)
{
struct mhi_ring *event_ctxt = NULL;
u64 db_value = 0;
event_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index];
db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING,
@ -203,22 +136,20 @@ static enum MHI_STATUS mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
return MHI_STATUS_SUCCESS;
}
int init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt)
void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int i;
struct mhi_ring *mhi_local_event_ctxt = NULL;
struct mhi_event_ctxt *event_ctxt;
struct mhi_control_seg *mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg;
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
event_ctxt = &mhi_ctrl->mhi_ec_list[i];
event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
mhi_local_event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
mhi_event_ring_init(event_ctxt, mhi_local_event_ctxt,
mhi_dev_ctxt->ev_ring_props[i].nr_desc,
mhi_dev_ctxt->ev_ring_props[i].intmod,
mhi_dev_ctxt->ev_ring_props[i].msi_vec);
}
return 0;
}
int init_local_ev_ring_by_type(struct mhi_device_ctxt *mhi_dev_ctxt,
@ -291,7 +222,8 @@ enum MHI_STATUS mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_log(MHI_MSG_INFO, "mmio_addr = 0x%p, mmio_len = 0x%llx\n",
mhi_dev_ctxt->mmio_info.mmio_addr,
mhi_dev_ctxt->mmio_info.mmio_len);
mhi_log(MHI_MSG_INFO, "Initializing event ring %d\n", ring_index);
mhi_log(MHI_MSG_INFO, "Initializing event ring %d with %d desc\n",
ring_index, nr_ev_el);
for (i = 0; i < nr_ev_el - 1; ++i) {
ret_val = ctxt_add_element(event_ctxt, (void *)&ev_pkt);
@ -312,16 +244,20 @@ void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
{
struct mhi_event_ctxt *ev_ctxt;
struct mhi_ring *local_ev_ctxt;
mhi_log(MHI_MSG_VERBOSE, "Resetting event index %d\n", index);
ev_ctxt =
&mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[index];
&mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[index];
local_ev_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[index];
ev_ctxt->mhi_event_read_ptr = ev_ctxt->mhi_event_ring_base_addr;
ev_ctxt->mhi_event_write_ptr = ev_ctxt->mhi_event_ring_base_addr;
local_ev_ctxt->rp = local_ev_ctxt->base;
local_ev_ctxt->wp = local_ev_ctxt->base;
ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[index];
ev_ctxt->mhi_event_read_ptr = ev_ctxt->mhi_event_ring_base_addr;
ev_ctxt->mhi_event_write_ptr = ev_ctxt->mhi_event_ring_base_addr;
/* Flush writes to MMIO */
wmb();
}

View file

@ -20,6 +20,8 @@
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/err.h>
#define CREATE_TRACE_POINTS
#include "mhi_trace.h"
@ -64,8 +66,7 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
{
int ret_val = 0;
u32 i = 0, j = 0;
u32 retry_count = 0;
u32 msi_number = 32;
u32 requested_msi_number = 32, actual_msi_number = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
struct pci_dev *pcie_device = NULL;
@ -74,15 +75,14 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
pcie_device = mhi_pcie_dev->pcie_device;
ret_val = mhi_init_pcie_device(mhi_pcie_dev);
if (0 != ret_val) {
if (ret_val) {
mhi_log(MHI_MSG_CRITICAL,
"Failed to initialize pcie device, ret %d\n",
ret_val);
return -ENODEV;
}
ret_val = mhi_init_device_ctxt(mhi_pcie_dev,
&mhi_pcie_dev->mhi_ctxt);
if (MHI_STATUS_SUCCESS != ret_val) {
ret_val = mhi_init_device_ctxt(mhi_pcie_dev, &mhi_pcie_dev->mhi_ctxt);
if (ret_val) {
mhi_log(MHI_MSG_CRITICAL,
"Failed to initialize main MHI ctxt ret %d\n",
ret_val);
@ -112,12 +112,20 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
}
device_disable_async_suspend(&pcie_device->dev);
ret_val = pci_enable_msi_range(pcie_device, 0, msi_number);
if (0 != ret_val) {
ret_val = pci_enable_msi_range(pcie_device, 1, requested_msi_number);
if (IS_ERR_VALUE(ret_val)) {
mhi_log(MHI_MSG_ERROR,
"Failed to enable MSIs for pcie dev ret_val %d.\n",
ret_val);
goto msi_config_err;
} else if (ret_val) {
mhi_log(MHI_MSG_INFO,
"Hrmmm, got fewer MSIs than we requested. Requested %d, got %d.\n",
requested_msi_number, ret_val);
actual_msi_number = ret_val;
} else {
mhi_log(MHI_MSG_VERBOSE,
"Got all requested MSIs, moving on\n");
}
mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt;
@ -142,23 +150,7 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
mhi_pcie_dev->core.irq_base = pcie_device->irq;
mhi_log(MHI_MSG_VERBOSE,
"Setting IRQ Base to 0x%x\n", mhi_pcie_dev->core.irq_base);
mhi_pcie_dev->core.max_nr_msis = msi_number;
do {
ret_val = mhi_init_gpios(mhi_pcie_dev);
switch (ret_val) {
case -EPROBE_DEFER:
mhi_log(MHI_MSG_VERBOSE,
"DT requested probe defer, wait and retry\n");
break;
case 0:
break;
default:
mhi_log(MHI_MSG_CRITICAL,
"Could not get gpio from struct device tree!\n");
goto msi_config_err;
}
retry_count++;
} while ((retry_count < DT_WAIT_RETRIES) && (ret_val == -EPROBE_DEFER));
mhi_pcie_dev->core.max_nr_msis = requested_msi_number;
ret_val = mhi_init_pm_sysfs(&pcie_device->dev);
if (ret_val != 0) {
mhi_log(MHI_MSG_ERROR, "Failed to setup sysfs.\n");
@ -189,17 +181,26 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
return ret_val;
mhi_state_transition_error:
if (MHI_STATUS_SUCCESS != mhi_clean_init_stage(&mhi_pcie_dev->mhi_ctxt,
MHI_INIT_ERROR_STAGE_UNWIND_ALL))
mhi_log(MHI_MSG_ERROR, "Could not clean up context\n");
kfree(mhi_dev_ctxt->state_change_work_item_list.q_lock);
kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
mhi_dev_ctxt->dev_space.dev_mem_len,
mhi_dev_ctxt->dev_space.dev_mem_start,
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
kfree(mhi_dev_ctxt->mhi_chan_mutex);
kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
kfree(mhi_dev_ctxt->ev_ring_props);
mhi_rem_pm_sysfs(&pcie_device->dev);
sysfs_config_err:
gpio_free(mhi_pcie_dev->core.device_wake_gpio);
for (; i >= 0; --i)
free_irq(pcie_device->irq + i, &pcie_device->dev);
debugfs_remove_recursive(mhi_pcie_dev->mhi_ctxt.mhi_parent_folder);
msi_config_err:
pci_disable_msi(pcie_device);
pci_disable_device(pcie_device);
return ret_val;
}
@ -255,6 +256,7 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
static int mhi_plat_probe(struct platform_device *pdev)
{
u32 nr_dev = mhi_devices.nr_of_devices;
mhi_log(MHI_MSG_INFO, "Entered\n");
mhi_devices.device_list[nr_dev].plat_dev = pdev;
mhi_log(MHI_MSG_INFO, "Exited\n");

View file

@ -20,40 +20,11 @@
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/platform_device.h>
enum MHI_STATUS mhi_clean_init_stage(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_INIT_ERROR_STAGE cleanup_stage)
static int mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt)
{
enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
switch (cleanup_stage) {
case MHI_INIT_ERROR_STAGE_UNWIND_ALL:
case MHI_INIT_ERROR_TIMERS:
case MHI_INIT_ERROR_STAGE_DEVICE_CTRL:
mhi_freememregion(mhi_dev_ctxt->mhi_ctrl_seg_info);
case MHI_INIT_ERROR_STAGE_THREAD_QUEUES:
case MHI_INIT_ERROR_STAGE_THREADS:
kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event);
case MHI_INIT_ERROR_STAGE_EVENTS:
kfree(mhi_dev_ctxt->mhi_ctrl_seg_info);
case MHI_INIT_ERROR_STAGE_MEM_ZONES:
kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
kfree(mhi_dev_ctxt->mhi_chan_mutex);
kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
case MHI_INIT_ERROR_STAGE_SYNC:
kfree(mhi_dev_ctxt->ev_ring_props);
break;
default:
ret_val = MHI_STATUS_ERROR;
break;
}
return ret_val;
}
static enum MHI_STATUS mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 i = 0;
int i;
mhi_dev_ctxt->mhi_ev_spinlock_list = kmalloc(sizeof(spinlock_t) *
mhi_dev_ctxt->mmio_info.nr_event_rings,
@ -85,7 +56,7 @@ static enum MHI_STATUS mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt)
mutex_init(&mhi_dev_ctxt->mhi_link_state);
mutex_init(&mhi_dev_ctxt->pm_lock);
atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0);
return MHI_STATUS_SUCCESS;
return 0;
db_write_lock_free:
kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
@ -94,21 +65,327 @@ cmd_mutex_free:
chan_mutex_free:
kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
ev_mutex_free:
return MHI_STATUS_ALLOC_ERROR;
return -ENOMEM;
}
static enum MHI_STATUS mhi_init_ctrl_zone(struct mhi_pcie_dev_info *dev_info,
struct mhi_device_ctxt *mhi_dev_ctxt)
size_t calculate_mhi_space(struct mhi_device_ctxt *mhi_dev_ctxt)
{
mhi_dev_ctxt->mhi_ctrl_seg_info = kmalloc(sizeof(struct mhi_meminfo),
GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_ctrl_seg_info)
return MHI_STATUS_ALLOC_ERROR;
mhi_dev_ctxt->mhi_ctrl_seg_info->dev = &dev_info->pcie_device->dev;
return MHI_STATUS_SUCCESS;
int i, r;
size_t mhi_dev_mem = 0;
struct mhi_chan_info chan_info;
/* Calculate size needed for contexts */
mhi_dev_mem += (MHI_MAX_CHANNELS * sizeof(struct mhi_chan_ctxt)) +
(NR_OF_CMD_RINGS * sizeof(struct mhi_chan_ctxt)) +
(mhi_dev_ctxt->mmio_info.nr_event_rings *
sizeof(struct mhi_event_ctxt));
mhi_log(MHI_MSG_INFO, "Reserved %zd bytes for context info\n",
mhi_dev_mem);
/*Calculate size needed for cmd TREs */
mhi_dev_mem += (CMD_EL_PER_RING * sizeof(union mhi_cmd_pkt));
/* Calculate size needed for event TREs */
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i)
mhi_dev_mem += (sizeof(union mhi_event_pkt) *
mhi_dev_ctxt->ev_ring_props[i].nr_desc);
/* Calculate size needed for xfer TREs and bounce buffers */
for (i = 0; i < MHI_MAX_CHANNELS; ++i)
if (VALID_CHAN_NR(i)) {
r = get_chan_props(mhi_dev_ctxt, i, &chan_info);
if (r)
continue;
/* Add size of TREs */
mhi_dev_mem += (sizeof(union mhi_xfer_pkt) *
chan_info.max_desc);
/* Add bounce buffer size */
if (mhi_dev_ctxt->flags.bb_enabled) {
mhi_log(MHI_MSG_INFO,
"Enabling BB list, chan %d\n", i);
/*mhi_dev_mem += (MAX_BOUNCE_BUF_SIZE *
chan_info.max_desc); */
}
}
mhi_log(MHI_MSG_INFO, "Final bytes for MHI device space %zd\n",
mhi_dev_mem);
return mhi_dev_mem;
}
static enum MHI_STATUS mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt)
void init_dev_ev_ctxt(struct mhi_event_ctxt *ev_ctxt,
dma_addr_t p_base_addr, size_t len)
{
ev_ctxt->mhi_event_ring_base_addr = p_base_addr;
ev_ctxt->mhi_event_read_ptr = p_base_addr;
ev_ctxt->mhi_event_write_ptr = p_base_addr;
ev_ctxt->mhi_event_ring_len = len;
}
void init_local_ev_ctxt(struct mhi_ring *ev_ctxt,
void *v_base_addr, size_t len)
{
ev_ctxt->base = v_base_addr;
ev_ctxt->rp = v_base_addr;
ev_ctxt->wp = v_base_addr;
ev_ctxt->len = len;
ev_ctxt->el_size = sizeof(union mhi_event_pkt);
ev_ctxt->overwrite_en = 0;
}
void init_dev_chan_ctxt(struct mhi_chan_ctxt *chan_ctxt,
dma_addr_t p_base_addr, size_t len, int ev_index)
{
chan_ctxt->mhi_trb_ring_base_addr = p_base_addr;
chan_ctxt->mhi_trb_read_ptr = p_base_addr;
chan_ctxt->mhi_trb_write_ptr = p_base_addr;
chan_ctxt->mhi_trb_ring_len = len;
/* Prepulate the channel ctxt */
chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED;
chan_ctxt->mhi_event_ring_index = ev_index;
}
void init_local_chan_ctxt(struct mhi_ring *chan_ctxt,
void *v_base_addr, size_t len)
{
chan_ctxt->base = v_base_addr;
chan_ctxt->rp = v_base_addr;
chan_ctxt->wp = v_base_addr;
chan_ctxt->len = len;
chan_ctxt->el_size = sizeof(union mhi_event_pkt);
chan_ctxt->overwrite_en = 0;
}
int populate_bb_list(struct list_head *bb_list, int num_bb)
{
struct mhi_buf_info *mhi_buf = NULL;
int i;
for (i = 0; i < num_bb; ++i) {
mhi_buf = kzalloc(sizeof(struct mhi_buf_info), GFP_KERNEL);
if (!mhi_buf)
return -ENOMEM;
mhi_buf->bb_p_addr = 0;
mhi_buf->bb_v_addr = NULL;
mhi_log(MHI_MSG_INFO,
"Allocated BB v_addr 0x%p, p_addr 0x%llx\n",
mhi_buf->bb_v_addr, (u64)mhi_buf->bb_p_addr);
}
return 0;
}
/**
* mhi_cmd_ring_init- Initialization of the command ring
*
* @cmd_ctxt: command ring context to initialize
* @trb_list_phy_addr: Pointer to the dma address of the tre ring
* @trb_list_virt_addr: Pointer to the virtual address of the tre ring
* @ring_size: Ring size
* @ring: Pointer to the shadow command context
*
* @Return MHI_STATUS
*/
static int mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt,
void *trb_list_virt_addr,
dma_addr_t trb_list_phy_addr,
size_t ring_size, struct mhi_ring *ring)
{
cmd_ctxt->mhi_cmd_ring_base_addr = trb_list_phy_addr;
cmd_ctxt->mhi_cmd_ring_read_ptr = trb_list_phy_addr;
cmd_ctxt->mhi_cmd_ring_write_ptr = trb_list_phy_addr;
cmd_ctxt->mhi_cmd_ring_len = ring_size;
ring[PRIMARY_CMD_RING].wp = trb_list_virt_addr;
ring[PRIMARY_CMD_RING].rp = trb_list_virt_addr;
ring[PRIMARY_CMD_RING].base = trb_list_virt_addr;
ring[PRIMARY_CMD_RING].len = ring_size;
ring[PRIMARY_CMD_RING].el_size = sizeof(union mhi_cmd_pkt);
ring[PRIMARY_CMD_RING].overwrite_en = 0;
return 0;
}
static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el)
{
bb_ctxt->el_size = sizeof(struct mhi_buf_info);
bb_ctxt->len = bb_ctxt->el_size * nr_el;
bb_ctxt->base = kzalloc(bb_ctxt->len, GFP_KERNEL);
bb_ctxt->wp = bb_ctxt->base;
bb_ctxt->rp = bb_ctxt->base;
bb_ctxt->ack_rp = bb_ctxt->base;
if (!bb_ctxt->base)
return -ENOMEM;
return 0;
}
static void calculate_mhi_addressing_window(
struct mhi_device_ctxt *mhi_dev_ctxt)
{
dma_addr_t dma_dev_mem_start;
dma_addr_t dma_seg_size = 0x1FF00000UL;
dma_addr_t dma_max_addr = (dma_addr_t)(-1);
dma_dev_mem_start = mhi_dev_ctxt->dev_space.dma_dev_mem_start;
if (dma_dev_mem_start < dma_seg_size) {
mhi_dev_ctxt->dev_space.start_win_addr = 0;
mhi_dev_ctxt->dev_space.end_win_addr =
dma_dev_mem_start + dma_seg_size +
(dma_seg_size - dma_dev_mem_start);
} else if (dma_dev_mem_start >= dma_seg_size &&
dma_dev_mem_start <= (dma_max_addr - dma_seg_size)) {
mhi_dev_ctxt->dev_space.start_win_addr =
dma_dev_mem_start - dma_seg_size;
mhi_dev_ctxt->dev_space.end_win_addr =
dma_dev_mem_start + dma_seg_size;
} else if (dma_dev_mem_start > (dma_max_addr - dma_seg_size)) {
mhi_dev_ctxt->dev_space.start_win_addr =
dma_dev_mem_start - (dma_seg_size +
(dma_seg_size - (dma_max_addr -
dma_dev_mem_start)));
mhi_dev_ctxt->dev_space.end_win_addr = dma_max_addr;
}
mhi_log(MHI_MSG_INFO,
"MHI start address at 0x%llx, Window Start 0x%llx Window End 0x%llx\n",
(u64)dma_dev_mem_start,
(u64)mhi_dev_ctxt->dev_space.start_win_addr,
(u64)mhi_dev_ctxt->dev_space.end_win_addr);
}
int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
{
size_t mhi_mem_index = 0, ring_len;
void *dev_mem_start;
dma_addr_t dma_dev_mem_start;
int i, r;
mhi_dev_ctxt->dev_space.dev_mem_len =
calculate_mhi_space(mhi_dev_ctxt);
mhi_dev_ctxt->dev_space.dev_mem_start =
dma_alloc_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
mhi_dev_ctxt->dev_space.dev_mem_len,
&mhi_dev_ctxt->dev_space.dma_dev_mem_start,
GFP_KERNEL);
if (!mhi_dev_ctxt->dev_space.dev_mem_start) {
mhi_log(MHI_MSG_ERROR,
"Failed to allocate memory of size %zd bytes\n",
mhi_dev_ctxt->dev_space.dev_mem_len);
return -ENOMEM;
}
dev_mem_start = mhi_dev_ctxt->dev_space.dev_mem_start;
dma_dev_mem_start = mhi_dev_ctxt->dev_space.dma_dev_mem_start;
memset(dev_mem_start, 0, mhi_dev_ctxt->dev_space.dev_mem_len);
calculate_mhi_addressing_window(mhi_dev_ctxt);
mhi_log(MHI_MSG_INFO, "Starting Seg address: virt 0x%p, dma 0x%llx\n",
dev_mem_start, (u64)dma_dev_mem_start);
mhi_log(MHI_MSG_INFO, "Initializing CCABAP at virt 0x%p, dma 0x%llx\n",
dev_mem_start + mhi_mem_index,
(u64)dma_dev_mem_start + mhi_mem_index);
mhi_dev_ctxt->dev_space.ring_ctxt.cc_list = dev_mem_start;
mhi_dev_ctxt->dev_space.ring_ctxt.dma_cc_list = dma_dev_mem_start;
mhi_mem_index += MHI_MAX_CHANNELS * sizeof(struct mhi_chan_ctxt);
mhi_log(MHI_MSG_INFO, "Initializing CRCBAP at virt 0x%p, dma 0x%llx\n",
dev_mem_start + mhi_mem_index,
(u64)dma_dev_mem_start + mhi_mem_index);
mhi_dev_ctxt->dev_space.ring_ctxt.cmd_ctxt =
dev_mem_start + mhi_mem_index;
mhi_dev_ctxt->dev_space.ring_ctxt.dma_cmd_ctxt =
dma_dev_mem_start + mhi_mem_index;
mhi_mem_index += NR_OF_CMD_RINGS * sizeof(struct mhi_chan_ctxt);
mhi_log(MHI_MSG_INFO, "Initializing ECABAP at virt 0x%p, dma 0x%llx\n",
dev_mem_start + mhi_mem_index,
(u64)dma_dev_mem_start + mhi_mem_index);
mhi_dev_ctxt->dev_space.ring_ctxt.ec_list =
dev_mem_start + mhi_mem_index;
mhi_dev_ctxt->dev_space.ring_ctxt.dma_ec_list =
dma_dev_mem_start + mhi_mem_index;
mhi_mem_index += mhi_dev_ctxt->mmio_info.nr_event_rings *
sizeof(struct mhi_event_ctxt);
mhi_log(MHI_MSG_INFO,
"Initializing CMD context at virt 0x%p, dma 0x%llx\n",
dev_mem_start + mhi_mem_index,
(u64)dma_dev_mem_start + mhi_mem_index);
/* TODO: Initialize both the local and device cmd context */
ring_len = (CMD_EL_PER_RING * sizeof(union mhi_cmd_pkt));
mhi_cmd_ring_init(mhi_dev_ctxt->dev_space.ring_ctxt.cmd_ctxt,
dev_mem_start + mhi_mem_index,
dma_dev_mem_start + mhi_mem_index,
ring_len,
mhi_dev_ctxt->mhi_local_cmd_ctxt);
mhi_mem_index += ring_len;
/* Initialize both the local and device event contexts */
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
ring_len = sizeof(union mhi_event_pkt) *
mhi_dev_ctxt->ev_ring_props[i].nr_desc;
init_dev_ev_ctxt(&mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i],
dma_dev_mem_start + mhi_mem_index,
ring_len);
init_local_ev_ctxt(&mhi_dev_ctxt->mhi_local_event_ctxt[i],
dev_mem_start + mhi_mem_index,
ring_len);
mhi_log(MHI_MSG_INFO,
"Initializing EV_%d TRE list at virt 0x%p dma 0x%llx\n",
i, dev_mem_start + mhi_mem_index,
(u64)dma_dev_mem_start + mhi_mem_index);
mhi_mem_index += ring_len;
}
/* Initialize both the local and device xfer contexts */
for (i = 0; i < MHI_MAX_CHANNELS; ++i)
if (VALID_CHAN_NR(i)) {
struct mhi_chan_info chan_info;
r = get_chan_props(mhi_dev_ctxt, i, &chan_info);
if (r)
continue;
mhi_log(MHI_MSG_INFO, "Initializing chan ctxt %d\n", i);
ring_len = (sizeof(union mhi_xfer_pkt) *
chan_info.max_desc);
init_dev_chan_ctxt(
&mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i],
dma_dev_mem_start + mhi_mem_index,
ring_len, chan_info.ev_ring);
/* TODO: May not need to do this. It would be best for
* the client to set it during chan open */
mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i].
mhi_chan_type = (i % 2) + 1;
init_local_chan_ctxt(
&mhi_dev_ctxt->mhi_local_chan_ctxt[i],
dev_mem_start + mhi_mem_index,
ring_len);
/* TODO: May not need to do this. It would be best for
* the client to set it during chan open */
mhi_dev_ctxt->mhi_local_chan_ctxt[i].dir = (i % 2) + 1;
/* Add size of TREs */
mhi_mem_index += ring_len;
if (mhi_dev_ctxt->flags.bb_enabled) {
r = enable_bb_ctxt(
&mhi_dev_ctxt->chan_bb_list[i],
chan_info.max_desc);
if (r)
goto error_during_bb_list;
}
}
return 0;
error_during_bb_list:
for (; i >= 0; --i)
kfree(mhi_dev_ctxt->chan_bb_list[i].base);
dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
mhi_dev_ctxt->dev_space.dev_mem_len,
mhi_dev_ctxt->dev_space.dev_mem_start,
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
return r;
}
static int mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt)
{
mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq = kmalloc(
@ -155,7 +432,7 @@ static enum MHI_STATUS mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt)
init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.m3_event);
init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
return MHI_STATUS_SUCCESS;
return 0;
error_bhi_event:
kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event);
error_m0_event:
@ -167,7 +444,7 @@ error_event_handle_alloc:
return MHI_STATUS_ERROR;
}
static enum MHI_STATUS mhi_init_state_change_thread_work_queue(
static int mhi_init_state_change_thread_work_queue(
struct mhi_state_work_queue *q)
{
bool lock_acquired = 0;
@ -176,7 +453,7 @@ static enum MHI_STATUS mhi_init_state_change_thread_work_queue(
if (NULL == q->q_lock) {
q->q_lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
if (NULL == q->q_lock)
return MHI_STATUS_ALLOC_ERROR;
return -ENOMEM;
spin_lock_init(q->q_lock);
} else {
spin_lock_irqsave(q->q_lock, flags);
@ -192,124 +469,15 @@ static enum MHI_STATUS mhi_init_state_change_thread_work_queue(
if (lock_acquired)
spin_unlock_irqrestore(q->q_lock, flags);
return MHI_STATUS_SUCCESS;
return 0;
}
static enum MHI_STATUS mhi_init_device_ctrl(struct mhi_device_ctxt
*mhi_dev_ctxt)
{
size_t ctrl_seg_size = 0;
size_t ctrl_seg_offset = 0;
int i = 0;
u32 align_len = sizeof(u64) * 2;
enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
if (NULL == mhi_dev_ctxt || NULL == mhi_dev_ctxt->mhi_ctrl_seg_info ||
NULL == mhi_dev_ctxt->mhi_ctrl_seg_info->dev)
return MHI_STATUS_ERROR;
mhi_dev_ctxt->enable_lpm = 1;
mhi_dev_ctxt->flags.mhi_initialized = 0;
mhi_log(MHI_MSG_INFO, "Allocating control segment.\n");
ctrl_seg_size += sizeof(struct mhi_control_seg);
/* Calculate the size of the control segment needed */
ctrl_seg_size += align_len - (ctrl_seg_size % align_len);
ret_val = mhi_mallocmemregion(mhi_dev_ctxt, mhi_dev_ctxt->mhi_ctrl_seg_info,
ctrl_seg_size);
if (MHI_STATUS_SUCCESS != ret_val)
return MHI_STATUS_ERROR;
mhi_dev_ctxt->mhi_ctrl_seg =
mhi_get_virt_addr(mhi_dev_ctxt->mhi_ctrl_seg_info);
if (!mhi_dev_ctxt->mhi_ctrl_seg)
return MHI_STATUS_ALLOC_ERROR;
/* Set the channel contexts, event contexts and cmd context */
ctrl_seg_offset = (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg +
sizeof(struct mhi_control_seg);
/* Set the channel direction and state */
ctrl_seg_offset += align_len - (ctrl_seg_offset % align_len);
for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i].
mhi_chan_type = (i % 2) + 1;
mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i].
mhi_chan_state =
MHI_CHAN_STATE_ENABLED;
}
return MHI_STATUS_SUCCESS;
}
/**
* mhi_cmd_ring_init- Initialization of the command ring
*
* @cmd_ctxt: command ring context to initialize
* @trb_list_phy_addr: Pointer to the pysical address of the tre ring
* @trb_list_virt_addr: Pointer to the virtual address of the tre ring
* @el_per_ring: Number of elements in this command ring
* @ring: Pointer to the shadow command context
*
* @Return MHI_STATUS
*/
static enum MHI_STATUS mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt,
uintptr_t trb_list_phy_addr,
uintptr_t trb_list_virt_addr,
size_t el_per_ring, struct mhi_ring *ring)
{
cmd_ctxt->mhi_cmd_ring_base_addr = trb_list_phy_addr;
cmd_ctxt->mhi_cmd_ring_read_ptr = trb_list_phy_addr;
cmd_ctxt->mhi_cmd_ring_write_ptr = trb_list_phy_addr;
cmd_ctxt->mhi_cmd_ring_len =
(size_t)el_per_ring*sizeof(union mhi_cmd_pkt);
ring[PRIMARY_CMD_RING].wp = (void *)trb_list_virt_addr;
ring[PRIMARY_CMD_RING].rp = (void *)trb_list_virt_addr;
ring[PRIMARY_CMD_RING].base = (void *)trb_list_virt_addr;
ring[PRIMARY_CMD_RING].len =
(size_t)el_per_ring*sizeof(union mhi_cmd_pkt);
ring[PRIMARY_CMD_RING].el_size = sizeof(union mhi_cmd_pkt);
ring[PRIMARY_CMD_RING].overwrite_en = 0;
return MHI_STATUS_SUCCESS;
}
static enum MHI_STATUS mhi_init_timers(struct mhi_device_ctxt *mhi_dev_ctxt)
{
return MHI_STATUS_SUCCESS;
}
static enum MHI_STATUS mhi_init_wakelock(struct mhi_device_ctxt *mhi_dev_ctxt)
static void mhi_init_wakelock(struct mhi_device_ctxt *mhi_dev_ctxt)
{
wakeup_source_init(&mhi_dev_ctxt->w_lock, "mhi_wakeup_source");
return MHI_STATUS_SUCCESS;
}
static enum MHI_STATUS mhi_init_contexts(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int r = 0;
u64 phy_cmd_trb_addr;
struct mhi_control_seg *mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg;
r = init_event_ctxt_array(mhi_dev_ctxt);
if (r)
return MHI_STATUS_ERROR;
/* Init Command Ring */
phy_cmd_trb_addr =
((uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->cmd_trb_list[PRIMARY_CMD_RING] -
mhi_dev_ctxt->mhi_ctrl_seg_info->va_aligned)+
mhi_dev_ctxt->mhi_ctrl_seg_info->pa_aligned;
mhi_cmd_ring_init(&mhi_ctrl->mhi_cmd_ctxt_list[PRIMARY_CMD_RING],
phy_cmd_trb_addr,
(uintptr_t)mhi_ctrl->cmd_trb_list[PRIMARY_CMD_RING],
CMD_EL_PER_RING,
&mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING]);
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
return MHI_STATUS_SUCCESS;
}
static enum MHI_STATUS mhi_spawn_threads(struct mhi_device_ctxt *mhi_dev_ctxt)
static int mhi_spawn_threads(struct mhi_device_ctxt *mhi_dev_ctxt)
{
mhi_dev_ctxt->event_thread_handle = kthread_run(parse_event_thread,
mhi_dev_ctxt,
@ -321,7 +489,7 @@ static enum MHI_STATUS mhi_spawn_threads(struct mhi_device_ctxt *mhi_dev_ctxt)
"mhi_st_thrd");
if (IS_ERR(mhi_dev_ctxt->event_thread_handle))
return MHI_STATUS_ERROR;
return MHI_STATUS_SUCCESS;
return 0;
}
/**
@ -335,14 +503,15 @@ static enum MHI_STATUS mhi_spawn_threads(struct mhi_device_ctxt *mhi_dev_ctxt)
*
* @return MHI_STATUS
*/
enum MHI_STATUS mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
struct mhi_device_ctxt *mhi_dev_ctxt)
{
int r = 0;
if (NULL == dev_info || NULL == mhi_dev_ctxt)
return MHI_STATUS_ERROR;
mhi_log(MHI_MSG_VERBOSE, "mhi_init_device_ctxt>Init MHI dev ctxt\n");
return -EINVAL;
mhi_log(MHI_MSG_VERBOSE, "Entered\n");
mhi_dev_ctxt->dev_info = dev_info;
mhi_dev_ctxt->dev_props = &dev_info->core;
@ -351,64 +520,72 @@ enum MHI_STATUS mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
if (r) {
mhi_log(MHI_MSG_ERROR,
"Failed to get event ring properties ret %d\n", r);
mhi_clean_init_stage(mhi_dev_ctxt, MHI_INIT_ERROR_STAGE_SYNC);
return MHI_STATUS_ERROR;
goto error_during_props;
}
if (MHI_STATUS_SUCCESS != mhi_init_sync(mhi_dev_ctxt)) {
r = mhi_init_sync(mhi_dev_ctxt);
if (r) {
mhi_log(MHI_MSG_ERROR, "Failed to initialize mhi sync\n");
mhi_clean_init_stage(mhi_dev_ctxt, MHI_INIT_ERROR_STAGE_SYNC);
return MHI_STATUS_ERROR;
goto error_during_sync;
}
r = create_local_ev_ctxt(mhi_dev_ctxt);
if (r) {
mhi_log(MHI_MSG_ERROR,
"Failed to initialize local event ctxt ret %d\n", r);
goto error_during_local_ev_ctxt;
}
r = init_mhi_dev_mem(mhi_dev_ctxt);
if (r) {
mhi_log(MHI_MSG_ERROR,
"Failed to initialize device memory ret %d\n", r);
goto error_during_dev_mem_init;
}
r = mhi_init_events(mhi_dev_ctxt);
if (r) {
mhi_log(MHI_MSG_ERROR,
"Failed to initialize mhi events ret %d\n", r);
goto error_wq_init;
}
r = mhi_reset_all_thread_queues(mhi_dev_ctxt);
if (r) {
mhi_log(MHI_MSG_ERROR,
"Failed to initialize work queues ret %d\n", r);
goto error_during_thread_init;
}
init_event_ctxt_array(mhi_dev_ctxt);
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
mhi_dev_ctxt->enable_lpm = 1;
if (MHI_STATUS_SUCCESS != mhi_init_ctrl_zone(dev_info, mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed to initialize memory zones\n");
mhi_clean_init_stage(mhi_dev_ctxt,
MHI_INIT_ERROR_STAGE_MEM_ZONES);
return MHI_STATUS_ERROR;
r = mhi_spawn_threads(mhi_dev_ctxt);
if (r) {
mhi_log(MHI_MSG_ERROR, "Failed to spawn threads ret %d\n", r);
goto error_during_thread_spawn;
}
if (MHI_STATUS_SUCCESS != mhi_init_events(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed to initialize mhi events\n");
mhi_clean_init_stage(mhi_dev_ctxt, MHI_INIT_ERROR_STAGE_EVENTS);
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != mhi_reset_all_thread_queues(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed to initialize work queues\n");
mhi_clean_init_stage(mhi_dev_ctxt,
MHI_INIT_ERROR_STAGE_THREAD_QUEUES);
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != mhi_init_device_ctrl(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed to initialize ctrl seg\n");
mhi_clean_init_stage(mhi_dev_ctxt,
MHI_INIT_ERROR_STAGE_THREAD_QUEUES);
return MHI_STATUS_ERROR;
}
create_ev_rings(mhi_dev_ctxt);
mhi_init_wakelock(mhi_dev_ctxt);
if (MHI_STATUS_SUCCESS != mhi_init_contexts(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed initializing contexts\n");
mhi_clean_init_stage(mhi_dev_ctxt,
MHI_INIT_ERROR_STAGE_DEVICE_CTRL);
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != mhi_spawn_threads(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed to spawn threads\n");
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != mhi_init_timers(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed initializing timers\n");
mhi_clean_init_stage(mhi_dev_ctxt,
MHI_INIT_ERROR_STAGE_DEVICE_CTRL);
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != mhi_init_wakelock(mhi_dev_ctxt)) {
mhi_log(MHI_MSG_ERROR, "Failed to initialize wakelock\n");
mhi_clean_init_stage(mhi_dev_ctxt,
MHI_INIT_ERROR_STAGE_DEVICE_CTRL);
return MHI_STATUS_ERROR;
}
return MHI_STATUS_SUCCESS;
return r;
error_during_thread_spawn:
kfree(mhi_dev_ctxt->state_change_work_item_list.q_lock);
error_during_thread_init:
kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
error_wq_init:
dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
mhi_dev_ctxt->dev_space.dev_mem_len,
mhi_dev_ctxt->dev_space.dev_mem_start,
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
error_during_dev_mem_init:
error_during_local_ev_ctxt:
kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
kfree(mhi_dev_ctxt->mhi_chan_mutex);
kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
error_during_sync:
kfree(mhi_dev_ctxt->ev_ring_props);
error_during_props:
return r;
}
/**
@ -424,7 +601,7 @@ enum MHI_STATUS mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
*
* @Return MHI_STATUS
*/
enum MHI_STATUS mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
uintptr_t trb_list_phy, uintptr_t trb_list_virt,
u64 el_per_ring, enum MHI_CHAN_TYPE chan_type,
u32 event_ring, struct mhi_ring *ring,
@ -448,26 +625,25 @@ enum MHI_STATUS mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
ring->dir = chan_type;
/* Flush writes to MMIO */
wmb();
return MHI_STATUS_SUCCESS;
return 0;
}
enum MHI_STATUS mhi_reset_all_thread_queues(
int mhi_reset_all_thread_queues(
struct mhi_device_ctxt *mhi_dev_ctxt)
{
enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
int ret_val = 0;
mhi_init_state_change_thread_work_queue(
ret_val = mhi_init_state_change_thread_work_queue(
&mhi_dev_ctxt->state_change_work_item_list);
if (MHI_STATUS_SUCCESS != ret_val) {
if (ret_val)
mhi_log(MHI_MSG_ERROR, "Failed to reset STT work queue\n");
return ret_val;
}
return ret_val;
}
enum MHI_STATUS mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt)
int mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 ret_val;
if (NULL == mhi_dev_ctxt)
return MHI_STATUS_ERROR;
mhi_dev_ctxt->mhi_cpu_notifier.notifier_call = mhi_cpu_notifier_cb;
@ -475,6 +651,5 @@ enum MHI_STATUS mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt)
if (ret_val)
return MHI_STATUS_ERROR;
else
return MHI_STATUS_SUCCESS;
return 0;
}

View file

@ -19,6 +19,7 @@ irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
{
struct device *mhi_device = dev_id;
struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
if (!mhi_dev_ctxt) {
mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n");
return IRQ_HANDLED;
@ -28,9 +29,9 @@ irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
mhi_log(MHI_MSG_VERBOSE,
"Got MSI 0x%x\n", IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
atomic_inc(&mhi_dev_ctxt->flags.events_pending);
wake_up_interruptible(
mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
atomic_inc(&mhi_dev_ctxt->flags.events_pending);
wake_up_interruptible(
mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
return IRQ_HANDLED;
}
@ -54,7 +55,7 @@ irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id)
if (likely(NULL != client_handle)) {
client_handle->result.user_data =
client_handle->user_data;
if (likely(NULL != &client_info->mhi_client_cb)) {
if (likely(NULL != &client_info->mhi_client_cb)) {
cb_info.result = &client_handle->result;
cb_info.cb_reason = MHI_CB_XFER;
cb_info.chan = client_handle->chan_info.chan_nr;
@ -82,15 +83,18 @@ static enum MHI_STATUS mhi_process_event_ring(
&mhi_dev_ctxt->mhi_local_event_ctxt[ev_index];
u32 event_code;
ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ev_index];
ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[ev_index];
device_rp = (union mhi_event_pkt *)mhi_p2v_addr(
mhi_dev_ctxt,
MHI_RING_TYPE_EVENT_RING,
ev_index,
ev_ctxt->mhi_event_read_ptr);
mhi_dev_ctxt,
MHI_RING_TYPE_EVENT_RING,
ev_index,
ev_ctxt->mhi_event_read_ptr);
local_rp = (union mhi_event_pkt *)local_ev_ctxt->rp;
BUG_ON(validate_ev_el_addr(local_ev_ctxt, (uintptr_t)device_rp));
while ((local_rp != device_rp) && (event_quota > 0) &&
(device_rp != NULL) && (local_rp != NULL)) {
event_to_process = *local_rp;
@ -141,6 +145,7 @@ static enum MHI_STATUS mhi_process_event_ring(
case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
{
enum STATE_TRANSITION new_state;
new_state = MHI_READ_STATE(&event_to_process);
mhi_log(MHI_MSG_INFO,
"MHI STE received ring 0x%x\n",
@ -151,6 +156,7 @@ static enum MHI_STATUS mhi_process_event_ring(
case MHI_PKT_TYPE_EE_EVENT:
{
enum STATE_TRANSITION new_state;
mhi_log(MHI_MSG_INFO,
"MHI EEE received ring 0x%x\n",
ev_index);
@ -203,8 +209,7 @@ int parse_event_thread(void *ctxt)
u32 i = 0;
int ret_val = 0;
int ret_val_process_event = 0;
atomic_t *ev_pen_ptr;
ev_pen_ptr = &mhi_dev_ctxt->flags.events_pending;
atomic_t *ev_pen_ptr = &mhi_dev_ctxt->flags.events_pending;
/* Go through all event rings */
for (;;) {
@ -221,7 +226,6 @@ int parse_event_thread(void *ctxt)
switch (ret_val) {
case -ERESTARTSYS:
return 0;
break;
default:
if (mhi_dev_ctxt->flags.kill_threads) {
mhi_log(MHI_MSG_INFO,
@ -262,7 +266,7 @@ struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
{
enum MHI_STATUS ret_val;
client_handle->result.payload_buf = 0;
client_handle->result.buf_addr = NULL;
client_handle->result.bytes_xferd = 0;
client_handle->result.transaction_status = 0;
ret_val = mhi_process_event_ring(client_handle->mhi_dev_ctxt,

View file

@ -13,6 +13,7 @@
#define _H_MHI_MACROS
#define MHI_IPC_LOG_PAGES (100)
#define MAX_BOUNCE_BUF_SIZE 0x2000
#define MHI_LOG_SIZE 0x1000
#define MHI_LINK_STABILITY_WAIT_MS 100
#define MHI_DEVICE_WAKE_DBOUNCE_TIMEOUT_MS 10
@ -51,7 +52,7 @@
#define MHI_M2_DEBOUNCE_TMR_MS 10
#define MHI_XFER_DB_INTERVAL 8
#define MHI_EV_DB_INTERVAL 32
#define MHI_EV_DB_INTERVAL 1
#define MHI_DEV_WAKE_DB 127
@ -79,10 +80,11 @@
#define VALID_CHAN_NR(_CHAN_NR) (IS_HARDWARE_CHANNEL(_CHAN_NR) || \
IS_SOFTWARE_CHANNEL(_CHAN_NR))
#define VALID_BUF(_BUF_ADDR, _BUF_LEN) \
(((uintptr_t)(_BUF_ADDR) >= MHI_DATA_SEG_WINDOW_START_ADDR) && \
#define VALID_BUF(_BUF_ADDR, _BUF_LEN, _MHI_DEV_CTXT) \
(((uintptr_t)(_BUF_ADDR) >= \
mhi_dev_ctxt->dev_space.start_win_addr) && \
(((uintptr_t)(_BUF_ADDR) + (uintptr_t)(_BUF_LEN) < \
MHI_DATA_SEG_WINDOW_END_ADDR)))
mhi_dev_ctxt->dev_space.end_win_addr)))
#define MHI_HW_INTMOD_VAL_MS 2
/* Timeout Values */
@ -108,7 +110,7 @@
}
#define MHI_TX_TRB_GET_LEN(_FIELD, _PKT) \
(((_PKT)->data_tx_pkt).buf_len & (((MHI_##_FIELD ## __MASK) << \
MHI_##_FIELD ## __SHIFT))); \
MHI_##_FIELD ## __SHIFT))) \
/* MHI Event Ring Elements 7.4.1*/
#define EV_TRB_CODE
@ -216,7 +218,7 @@
(_CTXT)->mhi_intmodt &= (~((MHI_##_FIELD ## __MASK) << \
MHI_##_FIELD ## __SHIFT)); \
(_CTXT)->mhi_intmodt |= new_val; \
};
}
#define MHI_GET_EV_CTXT(_FIELD, _CTXT) \
(((_CTXT)->mhi_intmodt >> MHI_##_FIELD ## __SHIFT) & \

File diff suppressed because it is too large Load diff

View file

@ -17,6 +17,7 @@ enum MHI_STATUS mhi_test_for_device_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 pcie_word_val = 0;
u32 expiry_counter;
mhi_log(MHI_MSG_INFO, "Waiting for MMIO RESET bit to be cleared.\n");
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
MHISTATUS);
@ -46,6 +47,7 @@ enum MHI_STATUS mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 pcie_word_val = 0;
u32 expiry_counter;
mhi_log(MHI_MSG_INFO, "Waiting for MMIO Ready bit to be set\n");
/* Read MMIO and poll for READY bit to be set */
@ -111,7 +113,7 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
/* Enable the channels */
for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
struct mhi_chan_ctxt *chan_ctxt =
&mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i];
&mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i];
if (VALID_CHAN_NR(i))
chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED;
else
@ -144,9 +146,7 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
MHICFG_NER_MASK, MHICFG_NER_SHIFT,
mhi_dev_ctxt->mmio_info.nr_event_rings);
pcie_dword_val = ((uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list -
mhi_dev_ctxt->mhi_ctrl_seg_info->va_aligned)+
mhi_dev_ctxt->mhi_ctrl_seg_info->pa_aligned;
pcie_dword_val = mhi_dev_ctxt->dev_space.ring_ctxt.dma_cc_list;
pcie_word_val = HIGH_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, CCABAP_HIGHER,
@ -161,7 +161,7 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
pcie_word_val);
/* Write the Event Context Base Address Register High and Low parts */
pcie_dword_val = mhi_dev_ctxt->mmio_info.dma_ev_ctxt;
pcie_dword_val = mhi_dev_ctxt->dev_space.ring_ctxt.dma_ec_list;
pcie_word_val = HIGH_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, ECABAP_HIGHER,
@ -174,12 +174,8 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
ECABAP_LOWER_ECABAP_LOWER_MASK,
ECABAP_LOWER_ECABAP_LOWER_SHIFT, pcie_word_val);
/* Write the Command Ring Control Register High and Low parts */
pcie_dword_val =
((uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list -
mhi_dev_ctxt->mhi_ctrl_seg_info->va_aligned)+
mhi_dev_ctxt->mhi_ctrl_seg_info->pa_aligned;
pcie_dword_val = mhi_dev_ctxt->dev_space.ring_ctxt.dma_cmd_ctxt;
pcie_word_val = HIGH_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr,
@ -196,45 +192,14 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_dev_ctxt->mmio_info.cmd_db_addr =
mhi_dev_ctxt->mmio_info.mmio_addr + CRDB_LOWER;
/* Set the control segment in the MMIO */
pcie_dword_val = ((uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg -
mhi_dev_ctxt->mhi_ctrl_seg_info->va_aligned)+
mhi_dev_ctxt->mhi_ctrl_seg_info->pa_aligned;
/* Set the control and data segments device MMIO */
pcie_dword_val = mhi_dev_ctxt->dev_space.start_win_addr;
pcie_word_val = HIGH_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLBASE_HIGHER,
MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK,
MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT,
0);
pcie_word_val = LOW_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLBASE_LOWER,
MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK,
MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT,
0);
pcie_dword_val = (((uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg -
mhi_dev_ctxt->mhi_ctrl_seg_info->va_aligned) +
mhi_dev_ctxt->mhi_ctrl_seg_info->pa_aligned) +
mhi_get_memregion_len(mhi_dev_ctxt->mhi_ctrl_seg_info) - 1;
pcie_word_val = HIGH_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLLIMIT_HIGHER,
MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK,
MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT,
0);
pcie_word_val = LOW_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLLIMIT_LOWER,
MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK,
MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT,
MHI_DATA_SEG_WINDOW_END_ADDR);
/* Set the data segment in the MMIO */
pcie_dword_val = MHI_DATA_SEG_WINDOW_START_ADDR;
pcie_word_val = HIGH_WORD(pcie_dword_val);
pcie_word_val);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHIDATABASE_HIGHER,
MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK,
@ -242,28 +207,44 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
pcie_word_val);
pcie_word_val = LOW_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLBASE_LOWER,
MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK,
MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT,
pcie_word_val);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHIDATABASE_LOWER,
MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK,
MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT,
pcie_word_val);
pcie_dword_val = MHI_DATA_SEG_WINDOW_END_ADDR;
pcie_dword_val = mhi_dev_ctxt->dev_space.end_win_addr;
pcie_word_val = HIGH_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLLIMIT_HIGHER,
MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK,
MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT,
pcie_word_val);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHIDATALIMIT_HIGHER,
MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK,
MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT,
pcie_word_val);
pcie_word_val = LOW_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLLIMIT_LOWER,
MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK,
MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT,
pcie_word_val);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr,
MHIDATALIMIT_LOWER,
MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK,
MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT,
pcie_word_val);
mhi_log(MHI_MSG_INFO, "Done..\n");
return MHI_STATUS_SUCCESS;
}

View file

@ -64,6 +64,7 @@ int mhi_runtime_suspend(struct device *dev)
{
int r = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
mhi_log(MHI_MSG_INFO, "Runtime Suspend - Entered\n");
r = mhi_initiate_m3(mhi_dev_ctxt);
pm_runtime_mark_last_busy(dev);
@ -75,6 +76,7 @@ int mhi_runtime_resume(struct device *dev)
{
int r = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
mhi_log(MHI_MSG_INFO, "Runtime Resume - Entered\n");
r = mhi_initiate_m0(mhi_dev_ctxt);
pm_runtime_mark_last_busy(dev);
@ -86,6 +88,7 @@ int mhi_pci_resume(struct pci_dev *pcie_dev)
{
int r = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = pcie_dev->dev.platform_data;
r = mhi_initiate_m0(mhi_dev_ctxt);
if (r)
goto exit;
@ -149,6 +152,7 @@ ssize_t sysfs_init_mhi_reset(struct device *dev, struct device_attribute *attr,
struct mhi_device_ctxt *mhi_dev_ctxt =
&mhi_devices.device_list[0].mhi_ctxt;
enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
mhi_log(MHI_MSG_INFO, "Triggering MHI Reset.\n");
ret_val = mhi_trigger_reset(mhi_dev_ctxt);
if (ret_val != MHI_STATUS_SUCCESS)
@ -182,6 +186,7 @@ enum MHI_STATUS mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
int r;
struct pci_dev *pcie_dev;
enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
mhi_log(MHI_MSG_INFO, "Entered...\n");
pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
mutex_lock(&mhi_dev_ctxt->mhi_link_state);
@ -223,6 +228,7 @@ enum MHI_STATUS mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
int r = 0;
struct pci_dev *pcie_dev;
enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
mutex_lock(&mhi_dev_ctxt->mhi_link_state);

View file

@ -13,25 +13,26 @@
#include "mhi_sys.h"
#include "mhi.h"
static enum MHI_STATUS add_element(struct mhi_ring *ring, void **rp,
static int add_element(struct mhi_ring *ring, void **rp,
void **wp, void **assigned_addr)
{
uintptr_t d_wp = 0, d_rp = 0, ring_size = 0;
int r;
if (0 == ring->el_size || NULL == ring
|| NULL == ring->base || 0 == ring->len) {
mhi_log(MHI_MSG_ERROR, "Bad input parameters, quitting.\n");
return MHI_STATUS_ERROR;
return -EINVAL;
}
if (MHI_STATUS_SUCCESS != get_element_index(ring, *rp, &d_rp)) {
mhi_log(MHI_MSG_CRITICAL, "Bad element index.\n");
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != get_element_index(ring, *wp, &d_wp)) {
mhi_log(MHI_MSG_CRITICAL, "Bad element index.\n");
return MHI_STATUS_ERROR;
}
r = get_element_index(ring, *rp, &d_rp);
if (r)
return r;
r = get_element_index(ring, *wp, &d_wp);
if (r)
return r;
ring_size = ring->len / ring->el_size;
if ((d_wp + 1) % ring_size == d_rp) {
@ -40,22 +41,22 @@ static enum MHI_STATUS add_element(struct mhi_ring *ring, void **rp,
} else {
mhi_log(MHI_MSG_INFO, "Ring 0x%lX is full\n",
(uintptr_t)ring->base);
return MHI_STATUS_RING_FULL;
return -ENOSPC;
}
}
if (NULL != assigned_addr)
*assigned_addr = (char *)ring->wp;
*wp = (void *)(((d_wp + 1) % ring_size) * ring->el_size +
(uintptr_t)ring->base);
return MHI_STATUS_SUCCESS;
return 0;
}
inline enum MHI_STATUS ctxt_add_element(struct mhi_ring *ring,
inline int ctxt_add_element(struct mhi_ring *ring,
void **assigned_addr)
{
return add_element(ring, &ring->rp, &ring->wp, assigned_addr);
}
inline enum MHI_STATUS ctxt_del_element(struct mhi_ring *ring,
inline int ctxt_del_element(struct mhi_ring *ring,
void **assigned_addr)
{
return delete_element(ring, &ring->rp, &ring->wp, assigned_addr);
@ -70,32 +71,29 @@ inline enum MHI_STATUS ctxt_del_element(struct mhi_ring *ring,
* @wp ring write pointer
* @assigned_addr location of the element just deleted
*/
enum MHI_STATUS delete_element(struct mhi_ring *ring, void **rp,
int delete_element(struct mhi_ring *ring, void **rp,
void **wp, void **assigned_addr)
{
uintptr_t d_wp = 0, d_rp = 0, ring_size = 0;
int r;
if (0 == ring->el_size || NULL == ring ||
NULL == ring->base || 0 == ring->len) {
mhi_log(MHI_MSG_ERROR, "Bad input parameters, quitting.\n");
return MHI_STATUS_ERROR;
}
ring_size = ring->len / ring->el_size;
NULL == ring->base || 0 == ring->len)
return -EINVAL;
if (MHI_STATUS_SUCCESS != get_element_index(ring, *rp, &d_rp)) {
mhi_log(MHI_MSG_CRITICAL, "Bad element index.\n");
return MHI_STATUS_ERROR;
}
if (MHI_STATUS_SUCCESS != get_element_index(ring, *wp, &d_wp)) {
mhi_log(MHI_MSG_CRITICAL, "Bad element index.\n");
return MHI_STATUS_ERROR;
}
ring_size = ring->len / ring->el_size;
r = get_element_index(ring, *rp, &d_rp);
if (r)
return r;
r = get_element_index(ring, *wp, &d_wp);
if (r)
return r;
if (d_wp == d_rp) {
mhi_log(MHI_MSG_VERBOSE, "Ring 0x%lX is empty\n",
mhi_log(MHI_MSG_VERBOSE, "Ring 0x%lx is empty\n",
(uintptr_t)ring->base);
if (NULL != assigned_addr)
*assigned_addr = NULL;
return MHI_STATUS_RING_EMPTY;
return -ENODATA;
}
if (NULL != assigned_addr)
@ -103,14 +101,14 @@ enum MHI_STATUS delete_element(struct mhi_ring *ring, void **rp,
*rp = (void *)(((d_rp + 1) % ring_size) * ring->el_size +
(uintptr_t)ring->base);
return MHI_STATUS_SUCCESS;
return 0;
}
int mhi_get_free_desc(struct mhi_client_handle *client_handle)
{
u32 chan;
struct mhi_device_ctxt *ctxt;
if (!client_handle || MHI_HANDLE_MAGIC != client_handle->magic ||
!client_handle->mhi_dev_ctxt)
return -EINVAL;
@ -126,6 +124,7 @@ int get_nr_avail_ring_elements(struct mhi_ring *ring)
u32 nr_el = 0;
uintptr_t ring_size = 0;
enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
ring_size = ring->len / ring->el_size;
ret_val = get_nr_enclosed_el(ring, ring->rp, ring->wp, &nr_el);
if (ret_val != MHI_STATUS_SUCCESS) {
@ -142,6 +141,7 @@ enum MHI_STATUS get_nr_enclosed_el(struct mhi_ring *ring, void *rp,
uintptr_t index_rp = 0;
uintptr_t index_wp = 0;
uintptr_t ring_size = 0;
if (0 == ring->el_size || NULL == ring ||
NULL == ring->base || 0 == ring->len) {
mhi_log(MHI_MSG_ERROR, "Bad input parameters, quitting.\n");
@ -167,20 +167,22 @@ enum MHI_STATUS get_nr_enclosed_el(struct mhi_ring *ring, void *rp,
return MHI_STATUS_SUCCESS;
}
enum MHI_STATUS get_element_index(struct mhi_ring *ring,
int get_element_index(struct mhi_ring *ring,
void *address, uintptr_t *index)
{
if (MHI_STATUS_SUCCESS != validate_ring_el_addr(ring,
(uintptr_t)address))
return MHI_STATUS_ERROR;
int r = validate_ring_el_addr(ring, (uintptr_t)address);
if (r)
return r;
*index = ((uintptr_t)address - (uintptr_t)ring->base) / ring->el_size;
return MHI_STATUS_SUCCESS;
return r;
}
enum MHI_STATUS get_element_addr(struct mhi_ring *ring,
uintptr_t index, void **address)
{
uintptr_t ring_size = 0;
if (NULL == ring || NULL == address)
return MHI_STATUS_ERROR;
ring_size = ring->len / ring->el_size;

View file

@ -25,6 +25,7 @@ static int mhi_ssr_notify_cb(struct notifier_block *nb,
struct mhi_device_ctxt *mhi_dev_ctxt =
&mhi_devices.device_list[0].mhi_ctxt;
struct mhi_pcie_dev_info *mhi_pcie_dev = NULL;
mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices];
if (NULL != mhi_dev_ctxt)
mhi_dev_ctxt->esoc_notif = action;
@ -81,6 +82,7 @@ static struct notifier_block mhi_ssr_nb = {
static void esoc_parse_link_type(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int ret_val;
ret_val = strcmp(mhi_dev_ctxt->esoc_handle->link, "HSIC+PCIe");
mhi_log(MHI_MSG_VERBOSE, "Link type is %s as indicated by ESOC\n",
mhi_dev_ctxt->esoc_handle->link);
@ -97,6 +99,7 @@ int mhi_esoc_register(struct mhi_device_ctxt *mhi_dev_ctxt)
struct pci_driver *mhi_driver;
struct device *dev = &mhi_dev_ctxt->dev_info->pcie_device->dev;
mhi_dev_ctxt->base_state = STATE_TRANSITION_BHI;
mhi_driver = mhi_dev_ctxt->dev_info->mhi_pcie_driver;
np = dev->of_node;
mhi_dev_ctxt->esoc_handle = devm_register_esoc_client(dev, "mdm");
@ -164,6 +167,8 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify)
enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
struct mhi_pcie_dev_info *mhi_pcie_dev = notify->data;
struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
int r = 0;
if (NULL == notify || NULL == notify->data) {
mhi_log(MHI_MSG_CRITICAL,
"Incomplete handle received\n");
@ -180,7 +185,18 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify)
if (0 == mhi_pcie_dev->link_up_cntr) {
mhi_log(MHI_MSG_INFO,
"Initializing MHI for the first time\n");
mhi_ctxt_init(mhi_pcie_dev);
r = mhi_ctxt_init(mhi_pcie_dev);
if (r) {
mhi_log(MHI_MSG_ERROR,
"MHI initialization failed, ret %d.\n",
r);
r = msm_pcie_register_event(
&mhi_pcie_dev->mhi_pci_link_event);
mhi_log(MHI_MSG_ERROR,
"Deregistered from PCIe notif r %d.\n",
r);
return;
}
mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt;
mhi_pcie_dev->mhi_ctxt.flags.link_up = 1;
pci_set_master(mhi_pcie_dev->pcie_device);
@ -213,7 +229,6 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify)
mhi_log(MHI_MSG_INFO,
"Received bad link event\n");
return;
break;
}
}

View file

@ -107,16 +107,14 @@ static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
u32 i;
u64 db_value = 0;
struct mhi_event_ctxt *event_ctxt = NULL;
struct mhi_control_seg *mhi_ctrl = NULL;
spinlock_t *lock = NULL;
unsigned long flags;
mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg;
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[i];
mhi_dev_ctxt->mhi_ev_db_order[i] = 0;
spin_lock_irqsave(lock, flags);
event_ctxt = &mhi_ctrl->mhi_ec_list[i];
event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
db_value =
mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING,
i,
@ -137,6 +135,7 @@ static enum MHI_STATUS process_m0_transition(
{
unsigned long flags;
int ret_val;
mhi_log(MHI_MSG_INFO, "Entered\n");
if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) {
@ -196,6 +195,7 @@ static enum MHI_STATUS process_m1_transition(
unsigned long flags = 0;
int ret_val = 0;
int r = 0;
mhi_log(MHI_MSG_INFO,
"Processing M1 state transition from state %d\n",
mhi_dev_ctxt->mhi_state);
@ -248,6 +248,7 @@ static enum MHI_STATUS process_m3_transition(
enum STATE_TRANSITION cur_work_item)
{
unsigned long flags;
mhi_log(MHI_MSG_INFO,
"Processing M3 state transition\n");
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
@ -264,6 +265,7 @@ static enum MHI_STATUS mhi_process_link_down(
{
unsigned long flags;
int r;
mhi_log(MHI_MSG_INFO, "Entered.\n");
if (NULL == mhi_dev_ctxt)
return MHI_STATUS_ERROR;
@ -366,6 +368,7 @@ static enum MHI_STATUS process_ready_transition(
enum STATE_TRANSITION cur_work_item)
{
enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
mhi_log(MHI_MSG_INFO, "Processing READY state transition\n");
mhi_dev_ctxt->mhi_state = MHI_STATE_READY;
@ -404,7 +407,7 @@ static void mhi_reset_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
int chan)
{
struct mhi_chan_ctxt *chan_ctxt =
&mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan];
&mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
struct mhi_ring *local_chan_ctxt =
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr;
@ -454,9 +457,10 @@ static enum MHI_STATUS process_reset_transition(
mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
mhi_dev_ctxt->mhi_local_cmd_ctxt[i].wp =
mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list[i].
mhi_dev_ctxt->dev_space.ring_ctxt.cmd_ctxt[i].
mhi_cmd_ring_read_ptr =
mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING,
mhi_v2p_addr(mhi_dev_ctxt,
MHI_RING_TYPE_CMD_RING,
i,
(uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp);
}
@ -481,6 +485,7 @@ static enum MHI_STATUS process_syserr_transition(
enum STATE_TRANSITION cur_work_item)
{
enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
mhi_log(MHI_MSG_CRITICAL, "Received SYS ERROR. Resetting MHI\n");
if (MHI_STATUS_SUCCESS != ret_val) {
mhi_log(MHI_MSG_CRITICAL, "Failed to reset mhi\n");
@ -554,8 +559,8 @@ static enum MHI_STATUS process_sbl_transition(
enum STATE_TRANSITION cur_work_item)
{
int r;
mhi_log(MHI_MSG_INFO, "Processing SBL state transition\n");
mhi_log(MHI_MSG_INFO, "Processing SBL state transition\n");
pm_runtime_set_autosuspend_delay(&mhi_dev_ctxt->dev_info->plat_dev->dev,
MHI_RPM_AUTOSUSPEND_TMR_VAL_MS);
pm_runtime_use_autosuspend(&mhi_dev_ctxt->dev_info->plat_dev->dev);
@ -567,7 +572,6 @@ static enum MHI_STATUS process_sbl_transition(
pm_runtime_enable(&mhi_dev_ctxt->dev_info->plat_dev->dev);
mhi_log(MHI_MSG_INFO, "Enabled runtime pm\n");
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL;
wmb();
enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
return MHI_STATUS_SUCCESS;
}
@ -774,7 +778,6 @@ enum MHI_STATUS mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
new_state);
*(enum STATE_TRANSITION *)stt_ring->wp = new_state;
ret_val = ctxt_add_element(stt_ring, (void **)&cur_work_item);
wmb();
MHI_ASSERT(MHI_STATUS_SUCCESS == ret_val,
"Failed to add selement to STT workqueue\n");
spin_unlock_irqrestore(work_q->q_lock, flags);
@ -810,12 +813,10 @@ int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_dev_ctxt->counters.m0_event_timeouts++;
r = -ETIME;
goto exit;
break;
case -ERESTARTSYS:
mhi_log(MHI_MSG_CRITICAL,
"Going Down...\n");
goto exit;
break;
default:
mhi_log(MHI_MSG_INFO,
"Wait complete state: %d\n", mhi_dev_ctxt->mhi_state);
@ -891,7 +892,6 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_log(MHI_MSG_INFO,
"Failed to set bus freq ret %d\n", r);
goto exit;
break;
case MHI_STATE_M0:
case MHI_STATE_M1:
case MHI_STATE_M2:
@ -974,8 +974,6 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_dev_ctxt->counters.m3_event_timeouts++;
mhi_dev_ctxt->flags.pending_M3 = 0;
goto exit;
break;
default:
mhi_log(MHI_MSG_INFO,
"M3 completion received\n");

View file

@ -46,7 +46,7 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
if (NULL == mhi_dev_ctxt)
return -EIO;
cc_list = mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list;
cc_list = mhi_dev_ctxt->dev_space.ring_ctxt.cc_list;
*offp = (u32)(*offp) % MHI_MAX_CHANNELS;
while (!valid_chan) {
@ -74,7 +74,7 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
amnt_copied =
scnprintf(mhi_dev_ctxt->chan_info,
MHI_LOG_SIZE,
"%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d\n",
"%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d %s %u\n",
"chan:",
(unsigned int)*offp,
"pkts from dev:",
@ -97,7 +97,9 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
get_nr_avail_ring_elements(
&mhi_dev_ctxt->mhi_local_chan_ctxt[*offp]),
"/",
client_handle->chan_info.max_desc);
client_handle->chan_info.max_desc,
"bb_used:",
mhi_dev_ctxt->counters.bb_used[*offp]);
*offp += 1;
@ -129,7 +131,7 @@ static ssize_t mhi_dbgfs_ev_read(struct file *fp, char __user *buf,
return -EIO;
*offp = (u32)(*offp) % mhi_dev_ctxt->mmio_info.nr_event_rings;
event_ring_index = *offp;
ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[event_ring_index];
ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[event_ring_index];
if (*offp == (mhi_dev_ctxt->mmio_info.nr_event_rings - 1))
msleep(1000);
@ -196,6 +198,7 @@ static ssize_t mhi_dbgfs_trigger_msi(struct file *fp, const char __user *buf,
{
u32 msi_nr = 0;
void *irq_ctxt = &((mhi_devices.device_list[0]).pcie_device->dev);
if (copy_from_user(&msi_nr, buf, sizeof(msi_nr)))
return -ENOMEM;
mhi_msi_handlr(msi_nr, irq_ctxt);
@ -258,57 +261,13 @@ static const struct file_operations mhi_dbgfs_state_fops = {
.write = NULL,
};
inline void *mhi_get_virt_addr(struct mhi_meminfo *meminfo)
{
return (void *)meminfo->va_aligned;
}
inline u64 mhi_get_memregion_len(struct mhi_meminfo *meminfo)
{
return meminfo->size;
}
enum MHI_STATUS mhi_mallocmemregion(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_meminfo *meminfo, size_t size)
{
meminfo->va_unaligned = (uintptr_t)dma_alloc_coherent(
meminfo->dev,
size,
(dma_addr_t *)&(meminfo->pa_unaligned),
GFP_KERNEL);
if (!meminfo->va_unaligned)
return MHI_STATUS_ERROR;
meminfo->va_aligned = meminfo->va_unaligned;
meminfo->pa_aligned = meminfo->pa_unaligned;
meminfo->size = size;
if ((meminfo->pa_unaligned + size) >= MHI_DATA_SEG_WINDOW_END_ADDR)
return MHI_STATUS_ERROR;
if (0 == meminfo->va_unaligned)
return MHI_STATUS_ERROR;
mb();
return MHI_STATUS_SUCCESS;
}
void mhi_freememregion(struct mhi_meminfo *meminfo)
{
mb();
dma_free_coherent(meminfo->dev,
meminfo->size,
(dma_addr_t *)&meminfo->pa_unaligned,
GFP_KERNEL);
meminfo->va_aligned = 0;
meminfo->pa_aligned = 0;
meminfo->va_unaligned = 0;
meminfo->pa_unaligned = 0;
}
int mhi_init_debugfs(struct mhi_device_ctxt *mhi_dev_ctxt)
{
struct dentry *mhi_chan_stats;
struct dentry *mhi_state_stats;
struct dentry *mhi_msi_trigger;
struct dentry *mhi_ev_stats;
mhi_dev_ctxt->mhi_parent_folder =
debugfs_create_dir("mhi", NULL);
if (mhi_dev_ctxt->mhi_parent_folder == NULL) {
@ -365,22 +324,22 @@ uintptr_t mhi_p2v_addr(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 chan, uintptr_t phy_ptr)
{
uintptr_t virtual_ptr;
struct mhi_control_seg *cs;
cs = mhi_dev_ctxt->mhi_ctrl_seg;
struct mhi_ring_ctxt *cs = &mhi_dev_ctxt->dev_space.ring_ctxt;
switch (type) {
case MHI_RING_TYPE_EVENT_RING:
virtual_ptr = (uintptr_t)((phy_ptr -
(uintptr_t)cs->mhi_ec_list[chan].mhi_event_ring_base_addr)
(uintptr_t)cs->ec_list[chan].mhi_event_ring_base_addr)
+ mhi_dev_ctxt->mhi_local_event_ctxt[chan].base);
break;
case MHI_RING_TYPE_XFER_RING:
virtual_ptr = (uintptr_t)((phy_ptr -
(uintptr_t)cs->mhi_cc_list[chan].mhi_trb_ring_base_addr)
(uintptr_t)cs->cc_list[chan].mhi_trb_ring_base_addr)
+ mhi_dev_ctxt->mhi_local_chan_ctxt[chan].base);
break;
case MHI_RING_TYPE_CMD_RING:
virtual_ptr = (uintptr_t)((phy_ptr -
(uintptr_t)cs->mhi_cmd_ctxt_list[chan].mhi_cmd_ring_base_addr)
(uintptr_t)cs->cmd_ctxt[chan].mhi_cmd_ring_base_addr)
+ mhi_dev_ctxt->mhi_local_cmd_ctxt[chan].base);
break;
default:
@ -389,29 +348,28 @@ uintptr_t mhi_p2v_addr(struct mhi_device_ctxt *mhi_dev_ctxt,
return virtual_ptr;
}
dma_addr_t mhi_v2p_addr(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_RING_TYPE type,
u32 chan, uintptr_t va_ptr)
{
dma_addr_t phy_ptr;
struct mhi_control_seg *cs;
cs = mhi_dev_ctxt->mhi_ctrl_seg;
struct mhi_ring_ctxt *cs = &mhi_dev_ctxt->dev_space.ring_ctxt;
switch (type) {
case MHI_RING_TYPE_EVENT_RING:
phy_ptr = (dma_addr_t)((va_ptr -
(uintptr_t)mhi_dev_ctxt->mhi_local_event_ctxt[chan].base) +
(uintptr_t)cs->mhi_ec_list[chan].mhi_event_ring_base_addr);
(uintptr_t)cs->ec_list[chan].mhi_event_ring_base_addr);
break;
case MHI_RING_TYPE_XFER_RING:
phy_ptr = (dma_addr_t)((va_ptr -
(uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].base) +
((uintptr_t)cs->mhi_cc_list[chan].mhi_trb_ring_base_addr));
((uintptr_t)cs->cc_list[chan].mhi_trb_ring_base_addr));
break;
case MHI_RING_TYPE_CMD_RING:
phy_ptr = (dma_addr_t)((va_ptr -
(uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[chan].base) +
((uintptr_t)cs->mhi_cmd_ctxt_list[chan].mhi_cmd_ring_base_addr));
((uintptr_t)cs->cmd_ctxt[chan].mhi_cmd_ring_base_addr));
break;
default:
break;

View file

@ -64,18 +64,12 @@ struct mhi_meminfo {
uintptr_t size;
};
enum MHI_STATUS mhi_mallocmemregion(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_meminfo *meminfo, size_t size);
uintptr_t mhi_get_phy_addr(struct mhi_meminfo *meminfo);
void *mhi_get_virt_addr(struct mhi_meminfo *meminfo);
uintptr_t mhi_p2v_addr(struct mhi_device_ctxt *mhi_dev_ctxt, enum MHI_RING_TYPE,
u32 chan, uintptr_t phy_ptr);
dma_addr_t mhi_v2p_addr(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_RING_TYPE type,
u32 chan, uintptr_t va_ptr);
u64 mhi_get_memregion_len(struct mhi_meminfo *meminfo);
void mhi_freememregion(struct mhi_meminfo *meminfo);
void print_ring(struct mhi_ring *local_chan_ctxt, u32 ring_id);
int mhi_init_debugfs(struct mhi_device_ctxt *mhi_dev_ctxt);