mhi: core: Clean-up MHI context data structure

Remove unused MHI data structure and reorganize the main
MHI context data structure for easier debugging.

Change-Id: I2658bef7fcda95181bf8d80f58991277cf854449
Signed-off-by: Andrei Danaila <adanaila@codeaurora.org>
This commit is contained in:
Andrei Danaila 2015-04-15 17:27:52 -07:00 committed by David Keitel
parent e1732cf655
commit 9cf5caae4e
11 changed files with 251 additions and 268 deletions

View file

@ -200,13 +200,6 @@ struct __packed mhi_reset_chan_cmd_pkt {
u32 info; u32 info;
}; };
struct __packed mhi_stop_chan_cmd_pkt {
u32 reserved1;
u32 reserved2;
u32 reserved3;
u32 info;
};
struct __packed mhi_ee_state_change_event { struct __packed mhi_ee_state_change_event {
u64 reserved1; u64 reserved1;
u32 exec_env; u32 exec_env;
@ -238,7 +231,6 @@ union __packed mhi_xfer_pkt {
}; };
union __packed mhi_cmd_pkt { union __packed mhi_cmd_pkt {
struct mhi_stop_chan_cmd_pkt stop_cmd_pkt;
struct mhi_reset_chan_cmd_pkt reset_cmd_pkt; struct mhi_reset_chan_cmd_pkt reset_cmd_pkt;
struct mhi_noop_cmd_pkt noop_cmd_pkt; struct mhi_noop_cmd_pkt noop_cmd_pkt;
struct mhi_noop_cmd_pkt type; struct mhi_noop_cmd_pkt type;
@ -272,6 +264,7 @@ struct mhi_ring {
uintptr_t len; uintptr_t len;
uintptr_t el_size; uintptr_t el_size;
u32 overwrite_en; u32 overwrite_en;
enum MHI_CHAN_TYPE dir;
}; };
enum MHI_CMD_STATUS { enum MHI_CMD_STATUS {
@ -355,19 +348,14 @@ struct mhi_state_work_queue {
struct mhi_control_seg { struct mhi_control_seg {
union mhi_xfer_pkt *xfer_trb_list[MHI_MAX_CHANNELS]; union mhi_xfer_pkt *xfer_trb_list[MHI_MAX_CHANNELS];
union mhi_event_pkt *ev_trb_list[EVENT_RINGS_ALLOCATED]; union mhi_event_pkt *ev_trb_list[NR_EV_RINGS];
union mhi_cmd_pkt cmd_trb_list[NR_OF_CMD_RINGS][CMD_EL_PER_RING + 1]; union mhi_cmd_pkt cmd_trb_list[NR_OF_CMD_RINGS][CMD_EL_PER_RING + 1];
struct mhi_cmd_ctxt mhi_cmd_ctxt_list[NR_OF_CMD_RINGS]; struct mhi_cmd_ctxt mhi_cmd_ctxt_list[NR_OF_CMD_RINGS];
struct mhi_chan_ctxt mhi_cc_list[MHI_MAX_CHANNELS]; struct mhi_chan_ctxt mhi_cc_list[MHI_MAX_CHANNELS];
struct mhi_event_ctxt mhi_ec_list[EVENT_RINGS_ALLOCATED]; struct mhi_event_ctxt mhi_ec_list[NR_EV_RINGS];
u32 padding; u32 padding;
}; };
struct mhi_chan_counters {
u32 pkts_xferd;
u32 ev_processed;
};
struct mhi_counters { struct mhi_counters {
u32 m0_m1; u32 m0_m1;
u32 m1_m0; u32 m1_m0;
@ -383,7 +371,10 @@ struct mhi_counters {
u32 msi_disable_cntr; u32 msi_disable_cntr;
u32 msi_enable_cntr; u32 msi_enable_cntr;
u32 nr_irq_migrations; u32 nr_irq_migrations;
u32 msi_counter[NR_EV_RINGS];
u32 ev_counter[NR_EV_RINGS];
atomic_t outbound_acks; atomic_t outbound_acks;
u32 chan_pkts_xferd[MHI_MAX_CHANNELS];
}; };
struct mhi_flags { struct mhi_flags {
@ -397,29 +388,45 @@ struct mhi_flags {
atomic_t pending_resume; atomic_t pending_resume;
atomic_t pending_ssr; atomic_t pending_ssr;
atomic_t pending_powerup; atomic_t pending_powerup;
atomic_t m2_transition;
int stop_threads; int stop_threads;
atomic_t device_wake; atomic_t device_wake;
u32 ssr; u32 ssr;
u32 ev_thread_stopped;
u32 st_thread_stopped;
u32 uldl_enabled;
u32 db_mode[MHI_MAX_CHANNELS];
};
struct mhi_wait_queues {
wait_queue_head_t *mhi_event_wq;
wait_queue_head_t *state_change_event;
wait_queue_head_t *m0_event;
wait_queue_head_t *m3_event;
wait_queue_head_t *bhi_event;
};
struct dev_mmio_info {
void __iomem *mmio_addr;
void __iomem *chan_db_addr;
void __iomem *event_db_addr;
void __iomem *cmd_db_addr;
u64 mmio_len;
}; };
struct mhi_device_ctxt { struct mhi_device_ctxt {
struct mhi_pcie_dev_info *dev_info;
struct pcie_core_info *dev_props;
void __iomem *mmio_addr;
void __iomem *channel_db_addr;
void __iomem *event_db_addr;
void __iomem *cmd_db_addr;
struct mhi_control_seg *mhi_ctrl_seg;
struct mhi_meminfo *mhi_ctrl_seg_info;
u64 nr_of_cc;
u64 nr_of_ec;
u64 nr_of_cmdc;
enum MHI_STATE mhi_state; enum MHI_STATE mhi_state;
enum MHI_EXEC_ENV dev_exec_env; enum MHI_EXEC_ENV dev_exec_env;
u64 mmio_len;
struct mhi_pcie_dev_info *dev_info;
struct pcie_core_info *dev_props;
struct mhi_control_seg *mhi_ctrl_seg;
struct mhi_meminfo *mhi_ctrl_seg_info;
struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS]; struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS];
struct mhi_ring mhi_local_event_ctxt[MHI_MAX_CHANNELS]; struct mhi_ring mhi_local_event_ctxt[NR_EV_RINGS];
struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS]; struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS];
struct mutex *mhi_chan_mutex; struct mutex *mhi_chan_mutex;
struct mutex mhi_link_state; struct mutex mhi_link_state;
spinlock_t *mhi_ev_spinlock_list; spinlock_t *mhi_ev_spinlock_list;
@ -427,48 +434,33 @@ struct mhi_device_ctxt {
struct mhi_client_handle *client_handle_list[MHI_MAX_CHANNELS]; struct mhi_client_handle *client_handle_list[MHI_MAX_CHANNELS];
struct task_struct *event_thread_handle; struct task_struct *event_thread_handle;
struct task_struct *st_thread_handle; struct task_struct *st_thread_handle;
u32 ev_thread_stopped; struct mhi_wait_queues mhi_ev_wq;
u32 st_thread_stopped; struct dev_mmio_info mmio_info;
wait_queue_head_t *event_handle;
wait_queue_head_t *state_change_event_handle;
wait_queue_head_t *M0_event;
wait_queue_head_t *M3_event;
wait_queue_head_t *bhi_event;
wait_queue_head_t *chan_start_complete;
u32 mhi_chan_db_order[MHI_MAX_CHANNELS]; u32 mhi_chan_db_order[MHI_MAX_CHANNELS];
u32 mhi_ev_db_order[MHI_MAX_CHANNELS]; u32 mhi_ev_db_order[MHI_MAX_CHANNELS];
spinlock_t *db_write_lock; spinlock_t *db_write_lock;
struct platform_device *mhi_uci_dev;
struct platform_device *mhi_rmnet_dev;
atomic_t link_ops_flag;
struct mhi_state_work_queue state_change_work_item_list; struct mhi_state_work_queue state_change_work_item_list;
enum MHI_CMD_STATUS mhi_chan_pend_cmd_ack[MHI_MAX_CHANNELS]; enum MHI_CMD_STATUS mhi_chan_pend_cmd_ack[MHI_MAX_CHANNELS];
u32 cmd_ring_order; u32 cmd_ring_order;
u32 alloced_ev_rings[EVENT_RINGS_ALLOCATED]; u32 alloced_ev_rings[NR_EV_RINGS];
u32 ev_ring_props[EVENT_RINGS_ALLOCATED]; u32 ev_ring_props[NR_EV_RINGS];
u32 msi_counter[EVENT_RINGS_ALLOCATED];
u32 db_mode[MHI_MAX_CHANNELS];
u32 uldl_enabled;
u32 hw_intmod_rate;
u32 outbound_evmod_rate;
struct mhi_counters counters; struct mhi_counters counters;
struct mhi_flags flags; struct mhi_flags flags;
u32 device_wake_asserted; u32 device_wake_asserted;
rwlock_t xfer_lock; rwlock_t xfer_lock;
atomic_t m2_transition;
struct hrtimer m1_timer; struct hrtimer m1_timer;
ktime_t m1_timeout; ktime_t m1_timeout;
ktime_t ul_acc_tmr_timeout;
struct mhi_chan_counters mhi_chan_cntr[MHI_MAX_CHANNELS];
u32 ev_counter[MHI_MAX_CHANNELS];
u32 bus_client;
struct esoc_desc *esoc_handle; struct esoc_desc *esoc_handle;
void *esoc_ssr_handle; void *esoc_ssr_handle;
u32 bus_client;
struct msm_bus_scale_pdata *bus_scale_table; struct msm_bus_scale_pdata *bus_scale_table;
struct notifier_block mhi_cpu_notifier; struct notifier_block mhi_cpu_notifier;
@ -477,6 +469,7 @@ struct mhi_device_ctxt {
atomic_t outbound_acks; atomic_t outbound_acks;
struct mutex pm_lock; struct mutex pm_lock;
struct wakeup_source w_lock; struct wakeup_source w_lock;
int enable_lpm; int enable_lpm;
char *chan_info; char *chan_info;
struct dentry *mhi_parent_folder; struct dentry *mhi_parent_folder;

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014, The Linux Foundation. All rights reserved. /* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
@ -47,7 +47,7 @@ static ssize_t bhi_write(struct file *file,
if (count > BHI_MAX_IMAGE_SIZE) if (count > BHI_MAX_IMAGE_SIZE)
return -ENOMEM; return -ENOMEM;
wait_event_interruptible(*mhi_dev_ctxt->bhi_event, wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_BHI); mhi_dev_ctxt->mhi_state == MHI_STATE_BHI);
mhi_log(MHI_MSG_INFO, "Entered. User Image size 0x%x\n", count); mhi_log(MHI_MSG_INFO, "Entered. User Image size 0x%x\n", count);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014, The Linux Foundation. All rights reserved. /* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
@ -156,7 +156,8 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
if (!mhi_init_debugfs(&mhi_pcie_dev->mhi_ctxt)) if (!mhi_init_debugfs(&mhi_pcie_dev->mhi_ctxt))
mhi_log(MHI_MSG_ERROR, "Failed to init debugfs.\n"); mhi_log(MHI_MSG_ERROR, "Failed to init debugfs.\n");
mhi_pcie_dev->mhi_ctxt.mmio_addr = mhi_pcie_dev->core.bar0_base; mhi_pcie_dev->mhi_ctxt.mmio_info.mmio_addr =
mhi_pcie_dev->core.bar0_base;
pcie_device->dev.platform_data = &mhi_pcie_dev->mhi_ctxt; pcie_device->dev.platform_data = &mhi_pcie_dev->mhi_ctxt;
mhi_pcie_dev->mhi_ctxt.dev_info->plat_dev->dev.platform_data = mhi_pcie_dev->mhi_ctxt.dev_info->plat_dev->dev.platform_data =
&mhi_pcie_dev->mhi_ctxt; &mhi_pcie_dev->mhi_ctxt;

View file

@ -26,9 +26,6 @@ static enum MHI_STATUS mhi_create_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
if (NULL == mhi_dev_ctxt) if (NULL == mhi_dev_ctxt)
return MHI_STATUS_ALLOC_ERROR; return MHI_STATUS_ALLOC_ERROR;
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
mhi_dev_ctxt->nr_of_cc = MHI_MAX_CHANNELS;
mhi_dev_ctxt->nr_of_ec = EVENT_RINGS_ALLOCATED;
mhi_dev_ctxt->nr_of_cmdc = NR_OF_CMD_RINGS;
mhi_dev_ctxt->alloced_ev_rings[PRIMARY_EVENT_RING] = 0; mhi_dev_ctxt->alloced_ev_rings[PRIMARY_EVENT_RING] = 0;
mhi_dev_ctxt->alloced_ev_rings[IPA_OUT_EV_RING] = IPA_OUT_EV_RING; mhi_dev_ctxt->alloced_ev_rings[IPA_OUT_EV_RING] = IPA_OUT_EV_RING;
@ -44,7 +41,7 @@ static enum MHI_STATUS mhi_create_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_dev_ctxt->ev_ring_props[IPA_IN_EV_RING], mhi_dev_ctxt->ev_ring_props[IPA_IN_EV_RING],
MHI_EVENT_POLLING_DISABLED); MHI_EVENT_POLLING_DISABLED);
for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { for (i = 0; i < NR_EV_RINGS; ++i) {
MHI_SET_EVENT_RING_INFO(EVENT_RING_MSI_VEC, MHI_SET_EVENT_RING_INFO(EVENT_RING_MSI_VEC,
mhi_dev_ctxt->ev_ring_props[i], mhi_dev_ctxt->ev_ring_props[i],
i); i);
@ -63,9 +60,9 @@ enum MHI_STATUS mhi_clean_init_stage(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_freememregion(mhi_dev_ctxt->mhi_ctrl_seg_info); mhi_freememregion(mhi_dev_ctxt->mhi_ctrl_seg_info);
case MHI_INIT_ERROR_STAGE_THREAD_QUEUES: case MHI_INIT_ERROR_STAGE_THREAD_QUEUES:
case MHI_INIT_ERROR_STAGE_THREADS: case MHI_INIT_ERROR_STAGE_THREADS:
kfree(mhi_dev_ctxt->event_handle); kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
kfree(mhi_dev_ctxt->state_change_event_handle); kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
kfree(mhi_dev_ctxt->M0_event); kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event);
case MHI_INIT_ERROR_STAGE_EVENTS: case MHI_INIT_ERROR_STAGE_EVENTS:
kfree(mhi_dev_ctxt->mhi_ctrl_seg_info); kfree(mhi_dev_ctxt->mhi_ctrl_seg_info);
case MHI_INIT_ERROR_STAGE_MEM_ZONES: case MHI_INIT_ERROR_STAGE_MEM_ZONES:
@ -87,7 +84,7 @@ static enum MHI_STATUS mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt)
u32 i = 0; u32 i = 0;
mhi_dev_ctxt->mhi_ev_spinlock_list = kmalloc(sizeof(spinlock_t) * mhi_dev_ctxt->mhi_ev_spinlock_list = kmalloc(sizeof(spinlock_t) *
MHI_MAX_CHANNELS, NR_EV_RINGS,
GFP_KERNEL); GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_ev_spinlock_list) if (NULL == mhi_dev_ctxt->mhi_ev_spinlock_list)
goto ev_mutex_free; goto ev_mutex_free;
@ -104,18 +101,18 @@ static enum MHI_STATUS mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt)
MHI_MAX_CHANNELS, GFP_KERNEL); MHI_MAX_CHANNELS, GFP_KERNEL);
if (NULL == mhi_dev_ctxt->db_write_lock) if (NULL == mhi_dev_ctxt->db_write_lock)
goto db_write_lock_free; goto db_write_lock_free;
for (i = 0; i < mhi_dev_ctxt->nr_of_cc; ++i)
mutex_init(&mhi_dev_ctxt->mhi_chan_mutex[i]);
for (i = 0; i < MHI_MAX_CHANNELS; ++i) for (i = 0; i < MHI_MAX_CHANNELS; ++i)
mutex_init(&mhi_dev_ctxt->mhi_chan_mutex[i]);
for (i = 0; i < NR_EV_RINGS; ++i)
spin_lock_init(&mhi_dev_ctxt->mhi_ev_spinlock_list[i]); spin_lock_init(&mhi_dev_ctxt->mhi_ev_spinlock_list[i]);
for (i = 0; i < mhi_dev_ctxt->nr_of_cmdc; ++i) for (i = 0; i < NR_OF_CMD_RINGS; ++i)
mutex_init(&mhi_dev_ctxt->mhi_cmd_mutex_list[i]); mutex_init(&mhi_dev_ctxt->mhi_cmd_mutex_list[i]);
for (i = 0; i < MHI_MAX_CHANNELS; ++i) for (i = 0; i < MHI_MAX_CHANNELS; ++i)
spin_lock_init(&mhi_dev_ctxt->db_write_lock[i]); spin_lock_init(&mhi_dev_ctxt->db_write_lock[i]);
rwlock_init(&mhi_dev_ctxt->xfer_lock); rwlock_init(&mhi_dev_ctxt->xfer_lock);
mutex_init(&mhi_dev_ctxt->mhi_link_state); mutex_init(&mhi_dev_ctxt->mhi_link_state);
mutex_init(&mhi_dev_ctxt->pm_lock); mutex_init(&mhi_dev_ctxt->pm_lock);
atomic_set(&mhi_dev_ctxt->m2_transition, 0); atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0);
return MHI_STATUS_SUCCESS; return MHI_STATUS_SUCCESS;
db_write_lock_free: db_write_lock_free:
@ -142,65 +139,59 @@ static enum MHI_STATUS mhi_init_ctrl_zone(struct mhi_pcie_dev_info *dev_info,
static enum MHI_STATUS mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt) static enum MHI_STATUS mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt)
{ {
mhi_dev_ctxt->event_handle = kmalloc(sizeof(wait_queue_head_t), mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq = kmalloc(
sizeof(wait_queue_head_t),
GFP_KERNEL); GFP_KERNEL);
if (NULL == mhi_dev_ctxt->event_handle) { if (NULL == mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq) {
mhi_log(MHI_MSG_ERROR, "Failed to init event"); mhi_log(MHI_MSG_ERROR, "Failed to init event");
return MHI_STATUS_ERROR; return MHI_STATUS_ERROR;
} }
mhi_dev_ctxt->state_change_event_handle = mhi_dev_ctxt->mhi_ev_wq.state_change_event =
kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL); kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
if (NULL == mhi_dev_ctxt->state_change_event_handle) { if (NULL == mhi_dev_ctxt->mhi_ev_wq.state_change_event) {
mhi_log(MHI_MSG_ERROR, "Failed to init event"); mhi_log(MHI_MSG_ERROR, "Failed to init event");
goto error_event_handle_alloc; goto error_event_handle_alloc;
} }
/* Initialize the event which signals M0 */ /* Initialize the event which signals M0 */
mhi_dev_ctxt->M0_event = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL); mhi_dev_ctxt->mhi_ev_wq.m0_event = kmalloc(sizeof(wait_queue_head_t),
if (NULL == mhi_dev_ctxt->M0_event) { GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_ev_wq.m0_event) {
mhi_log(MHI_MSG_ERROR, "Failed to init event"); mhi_log(MHI_MSG_ERROR, "Failed to init event");
goto error_state_change_event_handle; goto error_state_change_event_handle;
} }
/* Initialize the event which signals M0 */ /* Initialize the event which signals M0 */
mhi_dev_ctxt->M3_event = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL); mhi_dev_ctxt->mhi_ev_wq.m3_event = kmalloc(sizeof(wait_queue_head_t),
if (NULL == mhi_dev_ctxt->M3_event) { GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_ev_wq.m3_event) {
mhi_log(MHI_MSG_ERROR, "Failed to init event"); mhi_log(MHI_MSG_ERROR, "Failed to init event");
goto error_M0_event; goto error_m0_event;
} }
/* Initialize the event which signals M0 */ /* Initialize the event which signals M0 */
mhi_dev_ctxt->bhi_event = kmalloc(sizeof(wait_queue_head_t), mhi_dev_ctxt->mhi_ev_wq.bhi_event = kmalloc(sizeof(wait_queue_head_t),
GFP_KERNEL); GFP_KERNEL);
if (NULL == mhi_dev_ctxt->bhi_event) { if (NULL == mhi_dev_ctxt->mhi_ev_wq.bhi_event) {
mhi_log(MHI_MSG_ERROR, "Failed to init event"); mhi_log(MHI_MSG_ERROR, "Failed to init event");
goto error_bhi_event; goto error_bhi_event;
} }
mhi_dev_ctxt->chan_start_complete =
kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
if (NULL == mhi_dev_ctxt->chan_start_complete) {
mhi_log(MHI_MSG_ERROR, "Failed to init event");
goto error_chan_complete;
}
/* Initialize the event which starts the event parsing thread */ /* Initialize the event which starts the event parsing thread */
init_waitqueue_head(mhi_dev_ctxt->event_handle); init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
/* Initialize the event which starts the state change thread */ /* Initialize the event which starts the state change thread */
init_waitqueue_head(mhi_dev_ctxt->state_change_event_handle); init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
/* Initialize the event which triggers clients waiting to send */ /* Initialize the event which triggers clients waiting to send */
init_waitqueue_head(mhi_dev_ctxt->M0_event); init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.m0_event);
/* Initialize the event which triggers D3hot */ /* Initialize the event which triggers D3hot */
init_waitqueue_head(mhi_dev_ctxt->M3_event); init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.m3_event);
init_waitqueue_head(mhi_dev_ctxt->bhi_event); init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
init_waitqueue_head(mhi_dev_ctxt->chan_start_complete);
return MHI_STATUS_SUCCESS; return MHI_STATUS_SUCCESS;
error_chan_complete:
kfree(mhi_dev_ctxt->bhi_event);
error_bhi_event: error_bhi_event:
kfree(mhi_dev_ctxt->M3_event); kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event);
error_M0_event: error_m0_event:
kfree(mhi_dev_ctxt->M0_event); kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event);
error_state_change_event_handle: error_state_change_event_handle:
kfree(mhi_dev_ctxt->state_change_event_handle); kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
error_event_handle_alloc: error_event_handle_alloc:
kfree(mhi_dev_ctxt->event_handle); kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
return MHI_STATUS_ERROR; return MHI_STATUS_ERROR;
} }
@ -261,7 +252,7 @@ static enum MHI_STATUS mhi_init_device_ctrl(struct mhi_device_ctxt
} }
ctrl_seg_size += align_len - (ctrl_seg_size % align_len); ctrl_seg_size += align_len - (ctrl_seg_size % align_len);
for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) for (i = 0; i < NR_EV_RINGS; ++i)
ctrl_seg_size += sizeof(union mhi_event_pkt)* ctrl_seg_size += sizeof(union mhi_event_pkt)*
(EV_EL_PER_RING + ELEMENT_GAP); (EV_EL_PER_RING + ELEMENT_GAP);
@ -297,7 +288,7 @@ static enum MHI_STATUS mhi_init_device_ctrl(struct mhi_device_ctxt
} }
ctrl_seg_offset += align_len - (ctrl_seg_offset % align_len); ctrl_seg_offset += align_len - (ctrl_seg_offset % align_len);
for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { for (i = 0; i < NR_EV_RINGS; ++i) {
mhi_dev_ctxt->mhi_ctrl_seg->ev_trb_list[i] = mhi_dev_ctxt->mhi_ctrl_seg->ev_trb_list[i] =
(union mhi_event_pkt *)ctrl_seg_offset; (union mhi_event_pkt *)ctrl_seg_offset;
ctrl_seg_offset += sizeof(union mhi_event_pkt) * ctrl_seg_offset += sizeof(union mhi_event_pkt) *
@ -391,7 +382,7 @@ static enum MHI_STATUS mhi_init_contexts(struct mhi_device_ctxt *mhi_dev_ctxt)
u32 intmod_t = 0; u32 intmod_t = 0;
uintptr_t ev_ring_addr; uintptr_t ev_ring_addr;
for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { for (i = 0; i < NR_EV_RINGS; ++i) {
MHI_GET_EVENT_RING_INFO(EVENT_RING_MSI_VEC, MHI_GET_EVENT_RING_INFO(EVENT_RING_MSI_VEC,
mhi_dev_ctxt->ev_ring_props[i], mhi_dev_ctxt->ev_ring_props[i],
msi_vec); msi_vec);
@ -448,7 +439,7 @@ static enum MHI_STATUS mhi_init_contexts(struct mhi_device_ctxt *mhi_dev_ctxt)
(uintptr_t)trb_list, (uintptr_t)trb_list,
MAX_NR_TRBS_PER_HARD_CHAN, MAX_NR_TRBS_PER_HARD_CHAN,
(i % 2) ? MHI_IN : MHI_OUT, (i % 2) ? MHI_IN : MHI_OUT,
EVENT_RINGS_ALLOCATED - (MHI_MAX_CHANNELS - i), NR_EV_RINGS - (MHI_MAX_CHANNELS - i),
&mhi_dev_ctxt->mhi_local_chan_ctxt[i]); &mhi_dev_ctxt->mhi_local_chan_ctxt[i]);
} }
} }
@ -591,8 +582,9 @@ enum MHI_STATUS mhi_init_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
spin_lock_irqsave(lock, flags); spin_lock_irqsave(lock, flags);
mhi_log(MHI_MSG_INFO, "mmio_addr = 0x%p, mmio_len = 0x%llx\n", mhi_log(MHI_MSG_INFO, "mmio_info.mmio_addr = 0x%p, mmio_len = 0x%llx\n",
mhi_dev_ctxt->mmio_addr, mhi_dev_ctxt->mmio_len); mhi_dev_ctxt->mmio_info.mmio_addr,
mhi_dev_ctxt->mmio_info.mmio_len);
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"Initializing event ring %d\n", event_ring_index); "Initializing event ring %d\n", event_ring_index);

View file

@ -28,7 +28,8 @@ irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n"); mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n");
return IRQ_HANDLED; return IRQ_HANDLED;
} }
mhi_dev_ctxt->msi_counter[IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++; mhi_dev_ctxt->counters.msi_counter[
IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++;
mhi_log(MHI_MSG_VERBOSE, mhi_log(MHI_MSG_VERBOSE,
"Got MSI 0x%x\n", IRQ_TO_MSI(mhi_dev_ctxt, irq_number)); "Got MSI 0x%x\n", IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number)); trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
@ -36,7 +37,7 @@ irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
case 0: case 0:
case 1: case 1:
atomic_inc(&mhi_dev_ctxt->flags.events_pending); atomic_inc(&mhi_dev_ctxt->flags.events_pending);
wake_up_interruptible(mhi_dev_ctxt->event_handle); wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
break; break;
case 2: case 2:
client_index = MHI_CLIENT_IP_HW_0_IN; client_index = MHI_CLIENT_IP_HW_0_IN;
@ -182,13 +183,14 @@ int parse_event_thread(void *ctxt)
/* Go through all event rings */ /* Go through all event rings */
for (;;) { for (;;) {
ret_val = ret_val =
wait_event_interruptible(*mhi_dev_ctxt->event_handle, wait_event_interruptible(
*mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq,
((atomic_read( ((atomic_read(
&mhi_dev_ctxt->flags.events_pending) > 0) && &mhi_dev_ctxt->flags.events_pending) > 0) &&
!mhi_dev_ctxt->flags.stop_threads) || !mhi_dev_ctxt->flags.stop_threads) ||
mhi_dev_ctxt->flags.kill_threads || mhi_dev_ctxt->flags.kill_threads ||
(mhi_dev_ctxt->flags.stop_threads && (mhi_dev_ctxt->flags.stop_threads &&
!mhi_dev_ctxt->ev_thread_stopped)); !mhi_dev_ctxt->flags.ev_thread_stopped));
switch (ret_val) { switch (ret_val) {
case -ERESTARTSYS: case -ERESTARTSYS:
@ -201,15 +203,15 @@ int parse_event_thread(void *ctxt)
return 0; return 0;
} }
if (mhi_dev_ctxt->flags.stop_threads) { if (mhi_dev_ctxt->flags.stop_threads) {
mhi_dev_ctxt->ev_thread_stopped = 1; mhi_dev_ctxt->flags.ev_thread_stopped = 1;
continue; continue;
} }
break; break;
} }
mhi_dev_ctxt->ev_thread_stopped = 0; mhi_dev_ctxt->flags.ev_thread_stopped = 0;
atomic_dec(&mhi_dev_ctxt->flags.events_pending); atomic_dec(&mhi_dev_ctxt->flags.events_pending);
for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { for (i = 0; i < NR_EV_RINGS; ++i) {
MHI_GET_EVENT_RING_INFO(EVENT_RING_POLLING, MHI_GET_EVENT_RING_INFO(EVENT_RING_POLLING,
mhi_dev_ctxt->ev_ring_props[i], mhi_dev_ctxt->ev_ring_props[i],
ev_poll_en) ev_poll_en)

View file

@ -33,7 +33,7 @@
#define MAX_NR_MSI 4 #define MAX_NR_MSI 4
#define EVENT_RINGS_ALLOCATED 3 #define NR_EV_RINGS 3
#define PRIMARY_EVENT_RING 0 #define PRIMARY_EVENT_RING 0
#define IPA_OUT_EV_RING 1 #define IPA_OUT_EV_RING 1
#define IPA_IN_EV_RING 2 #define IPA_IN_EV_RING 2

View file

@ -46,11 +46,11 @@ static void mhi_update_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
u64 val) u64 val)
{ {
wmb(); wmb();
if (mhi_dev_ctxt->channel_db_addr == io_addr) { if (mhi_dev_ctxt->mmio_info.chan_db_addr == io_addr) {
mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]. mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan].
mhi_trb_write_ptr = val; mhi_trb_write_ptr = val;
} else if (mhi_dev_ctxt->event_db_addr == io_addr) { } else if (mhi_dev_ctxt->mmio_info.event_db_addr == io_addr) {
if (chan < EVENT_RINGS_ALLOCATED) if (chan < NR_EV_RINGS)
mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[chan]. mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[chan].
mhi_event_write_ptr = val; mhi_event_write_ptr = val;
else else
@ -395,7 +395,7 @@ void ring_ev_db(struct mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index)
&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index]; &mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index];
db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
(uintptr_t)event_ctxt->wp); (uintptr_t)event_ctxt->wp);
mhi_process_db(mhi_dev_ctxt, mhi_dev_ctxt->event_db_addr, mhi_process_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.event_db_addr,
event_ring_index, db_value); event_ring_index, db_value);
} }
@ -471,28 +471,22 @@ enum MHI_STATUS mhi_add_elements_to_event_rings(
void mhi_update_chan_db(struct mhi_device_ctxt *mhi_dev_ctxt, void mhi_update_chan_db(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 chan) u32 chan)
{ {
u64 db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, struct mhi_ring *chan_ctxt;
(uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp); u64 db_value;
chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
(uintptr_t)chan_ctxt->wp);
mhi_dev_ctxt->mhi_chan_db_order[chan]++; mhi_dev_ctxt->mhi_chan_db_order[chan]++;
if (IS_HARDWARE_CHANNEL(chan) && (chan % 2)) { if (IS_HARDWARE_CHANNEL(chan) && chan_ctxt->dir == MHI_IN) {
if (unlikely(mhi_xfer_db_interval != 0)) { if ((mhi_dev_ctxt->counters.chan_pkts_xferd[chan] %
if ((mhi_dev_ctxt->
mhi_chan_cntr[chan].pkts_xferd %
mhi_xfer_db_interval) == 0)
mhi_process_db(mhi_dev_ctxt,
mhi_dev_ctxt->channel_db_addr,
chan, db_value);
} else {
if ((mhi_dev_ctxt->
mhi_chan_cntr[chan].pkts_xferd %
MHI_XFER_DB_INTERVAL) == 0) MHI_XFER_DB_INTERVAL) == 0)
mhi_process_db(mhi_dev_ctxt, mhi_process_db(mhi_dev_ctxt,
mhi_dev_ctxt->channel_db_addr, mhi_dev_ctxt->mmio_info.chan_db_addr,
chan, db_value); chan, db_value);
}
} else { } else {
mhi_process_db(mhi_dev_ctxt, mhi_process_db(mhi_dev_ctxt,
mhi_dev_ctxt->channel_db_addr, mhi_dev_ctxt->mmio_info.chan_db_addr,
chan, db_value); chan, db_value);
} }
} }
@ -502,14 +496,14 @@ enum MHI_STATUS mhi_check_m2_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_log(MHI_MSG_VERBOSE, "state = %d\n", mhi_dev_ctxt->mhi_state); mhi_log(MHI_MSG_VERBOSE, "state = %d\n", mhi_dev_ctxt->mhi_state);
if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) { if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) {
mhi_log(MHI_MSG_INFO, "M2 Transition flag value = %d\n", mhi_log(MHI_MSG_INFO, "M2 Transition flag value = %d\n",
(atomic_read(&mhi_dev_ctxt->m2_transition))); (atomic_read(&mhi_dev_ctxt->flags.m2_transition)));
if ((atomic_read(&mhi_dev_ctxt->m2_transition)) == 0) { if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) {
if (mhi_dev_ctxt->flags.link_up) { if (mhi_dev_ctxt->flags.link_up) {
mhi_assert_device_wake(mhi_dev_ctxt); mhi_assert_device_wake(mhi_dev_ctxt);
ret_val = MHI_STATUS_CHAN_NOT_READY; ret_val = MHI_STATUS_CHAN_NOT_READY;
} }
} else{ } else{
mhi_log(MHI_MSG_INFO, "m2_transition flag is set\n"); mhi_log(MHI_MSG_INFO, "M2 transition flag is set\n");
ret_val = MHI_STATUS_CHAN_NOT_READY; ret_val = MHI_STATUS_CHAN_NOT_READY;
} }
} else { } else {
@ -531,12 +525,6 @@ static inline enum MHI_STATUS mhi_queue_tre(struct mhi_device_ctxt
chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]; chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan];
mhi_dev_ctxt->counters.m1_m0++; mhi_dev_ctxt->counters.m1_m0++;
mhi_log(MHI_MSG_VERBOSE, "Entered"); mhi_log(MHI_MSG_VERBOSE, "Entered");
if (chan % 2 == 0) {
atomic_inc(&mhi_dev_ctxt->counters.outbound_acks);
mhi_log(MHI_MSG_VERBOSE,
"Queued outbound pkt. Pending Acks %d\n",
atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
}
ret_val = mhi_check_m2_transition(mhi_dev_ctxt); ret_val = mhi_check_m2_transition(mhi_dev_ctxt);
if (likely(((ret_val == MHI_STATUS_SUCCESS) && if (likely(((ret_val == MHI_STATUS_SUCCESS) &&
(((mhi_dev_ctxt->mhi_state == MHI_STATE_M0) || (((mhi_dev_ctxt->mhi_state == MHI_STATE_M0) ||
@ -559,8 +547,9 @@ static inline enum MHI_STATUS mhi_queue_tre(struct mhi_device_ctxt
mhi_dev_ctxt->mhi_ctrl_seg_info, mhi_dev_ctxt->mhi_ctrl_seg_info,
(uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt->wp); (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt->wp);
mhi_dev_ctxt->cmd_ring_order++; mhi_dev_ctxt->cmd_ring_order++;
mhi_process_db(mhi_dev_ctxt, mhi_dev_ctxt->cmd_db_addr, mhi_process_db(mhi_dev_ctxt,
0, db_value); mhi_dev_ctxt->mmio_info.cmd_db_addr,
0, db_value);
} else { } else {
mhi_log(MHI_MSG_VERBOSE, mhi_log(MHI_MSG_VERBOSE,
"Wrong type of packet = %d\n", type); "Wrong type of packet = %d\n", type);
@ -912,7 +901,7 @@ enum MHI_STATUS parse_xfer_event(struct mhi_device_ctxt *ctxt,
MHI_TRB_GET_INFO(TX_TRB_IEOT, local_trb_loc, ieot_flag); MHI_TRB_GET_INFO(TX_TRB_IEOT, local_trb_loc, ieot_flag);
phy_buf_loc = local_trb_loc->data_tx_pkt.buffer_ptr; phy_buf_loc = local_trb_loc->data_tx_pkt.buffer_ptr;
trb_data_loc = (dma_addr_t)phy_buf_loc; trb_data_loc = (dma_addr_t)phy_buf_loc;
if (chan % 2) if (local_chan_ctxt->dir == MHI_IN)
xfer_len = MHI_EV_READ_LEN(EV_LEN, event); xfer_len = MHI_EV_READ_LEN(EV_LEN, event);
else else
xfer_len = MHI_TX_TRB_GET_LEN(TX_TRB_LEN, xfer_len = MHI_TX_TRB_GET_LEN(TX_TRB_LEN,
@ -930,14 +919,14 @@ enum MHI_STATUS parse_xfer_event(struct mhi_device_ctxt *ctxt,
result->bytes_xferd = xfer_len; result->bytes_xferd = xfer_len;
result->user_data = client_handle->user_data; result->user_data = client_handle->user_data;
} }
if (chan % 2) { if (local_chan_ctxt->dir == MHI_IN) {
parse_inbound(mhi_dev_ctxt, chan, parse_inbound(mhi_dev_ctxt, chan,
local_ev_trb_loc, xfer_len); local_ev_trb_loc, xfer_len);
} else { } else {
parse_outbound(mhi_dev_ctxt, chan, parse_outbound(mhi_dev_ctxt, chan,
local_ev_trb_loc, xfer_len); local_ev_trb_loc, xfer_len);
} }
mhi_dev_ctxt->mhi_chan_cntr[chan].pkts_xferd++; mhi_dev_ctxt->counters.chan_pkts_xferd[chan]++;
if (local_trb_loc == if (local_trb_loc ==
(union mhi_xfer_pkt *)local_chan_ctxt->rp) { (union mhi_xfer_pkt *)local_chan_ctxt->rp) {
mhi_log(MHI_MSG_CRITICAL, mhi_log(MHI_MSG_CRITICAL,
@ -958,9 +947,9 @@ enum MHI_STATUS parse_xfer_event(struct mhi_device_ctxt *ctxt,
{ {
struct mhi_ring *chan_ctxt = NULL; struct mhi_ring *chan_ctxt = NULL;
u64 db_value = 0; u64 db_value = 0;
mhi_dev_ctxt->uldl_enabled = 1; mhi_dev_ctxt->flags.uldl_enabled = 1;
chan = MHI_EV_READ_CHID(EV_CHID, event); chan = MHI_EV_READ_CHID(EV_CHID, event);
mhi_dev_ctxt->db_mode[chan] = 1; mhi_dev_ctxt->flags.db_mode[chan] = 1;
chan_ctxt = chan_ctxt =
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
mhi_log(MHI_MSG_INFO, "DB_MODE/OOB Detected chan %d.\n", chan); mhi_log(MHI_MSG_INFO, "DB_MODE/OOB Detected chan %d.\n", chan);
@ -968,7 +957,7 @@ enum MHI_STATUS parse_xfer_event(struct mhi_device_ctxt *ctxt,
db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
(uintptr_t)chan_ctxt->wp); (uintptr_t)chan_ctxt->wp);
mhi_process_db(mhi_dev_ctxt, mhi_process_db(mhi_dev_ctxt,
mhi_dev_ctxt->channel_db_addr, chan, mhi_dev_ctxt->mmio_info.chan_db_addr, chan,
db_value); db_value);
} }
client_handle = mhi_dev_ctxt->client_handle_list[chan]; client_handle = mhi_dev_ctxt->client_handle_list[chan];
@ -996,13 +985,8 @@ enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
void *removed_element = NULL; void *removed_element = NULL;
void *added_element = NULL; void *added_element = NULL;
if (NULL == mhi_dev_ctxt || NULL == ring ||
ring_type > (MHI_RING_TYPE_MAX - 1) ||
ring_index > (MHI_MAX_CHANNELS - 1)) {
mhi_log(MHI_MSG_ERROR, "Bad input params\n");
return ret_val;
}
ret_val = ctxt_del_element(ring, &removed_element); ret_val = ctxt_del_element(ring, &removed_element);
if (MHI_STATUS_SUCCESS != ret_val) { if (MHI_STATUS_SUCCESS != ret_val) {
mhi_log(MHI_MSG_ERROR, "Could not remove element from ring\n"); mhi_log(MHI_MSG_ERROR, "Could not remove element from ring\n");
return MHI_STATUS_ERROR; return MHI_STATUS_ERROR;
@ -1024,17 +1008,22 @@ enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
} else if (MHI_RING_TYPE_EVENT_RING == ring_type && } else if (MHI_RING_TYPE_EVENT_RING == ring_type &&
mhi_dev_ctxt->counters.m0_m3 > 0 && mhi_dev_ctxt->counters.m0_m3 > 0 &&
IS_HARDWARE_CHANNEL(ring_index)) { IS_HARDWARE_CHANNEL(ring_index)) {
spinlock_t *lock = NULL; spinlock_t *lock;
unsigned long flags = 0; unsigned long flags;
if (ring_index >= NR_EV_RINGS)
return MHI_STATUS_ERROR;
lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index]; lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
spin_lock_irqsave(lock, flags); spin_lock_irqsave(lock, flags);
db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
(uintptr_t)ring->wp); (uintptr_t)ring->wp);
mhi_update_ctxt(mhi_dev_ctxt, mhi_update_ctxt(mhi_dev_ctxt,
mhi_dev_ctxt->event_db_addr, mhi_dev_ctxt->mmio_info.event_db_addr,
ring_index, db_value); ring_index, db_value);
mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1; mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1;
mhi_dev_ctxt->ev_counter[ring_index]++; mhi_dev_ctxt->counters.ev_counter[ring_index]++;
spin_unlock_irqrestore(lock, flags); spin_unlock_irqrestore(lock, flags);
} }
atomic_inc(&mhi_dev_ctxt->flags.data_pending); atomic_inc(&mhi_dev_ctxt->flags.data_pending);
@ -1051,8 +1040,9 @@ enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_cmd_mutex_list[PRIMARY_CMD_RING]; mhi_cmd_mutex_list[PRIMARY_CMD_RING];
mutex_lock(cmd_mutex); mutex_lock(cmd_mutex);
mhi_dev_ctxt->cmd_ring_order = 1; mhi_dev_ctxt->cmd_ring_order = 1;
mhi_process_db(mhi_dev_ctxt, mhi_dev_ctxt->cmd_db_addr, mhi_process_db(mhi_dev_ctxt,
ring_index, db_value); mhi_dev_ctxt->mmio_info.cmd_db_addr,
ring_index, db_value);
mutex_unlock(cmd_mutex); mutex_unlock(cmd_mutex);
break; break;
} }
@ -1063,14 +1053,14 @@ enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index]; lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
spin_lock_irqsave(lock, flags); spin_lock_irqsave(lock, flags);
mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1; mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1;
if ((mhi_dev_ctxt->ev_counter[ring_index] % if ((mhi_dev_ctxt->counters.ev_counter[ring_index] %
MHI_EV_DB_INTERVAL) == 0) { MHI_EV_DB_INTERVAL) == 0) {
db_value = mhi_v2p_addr( db_value = mhi_v2p_addr(
mhi_dev_ctxt->mhi_ctrl_seg_info, mhi_dev_ctxt->mhi_ctrl_seg_info,
(uintptr_t)ring->wp); (uintptr_t)ring->wp);
mhi_process_db(mhi_dev_ctxt, mhi_process_db(mhi_dev_ctxt,
mhi_dev_ctxt->event_db_addr, mhi_dev_ctxt->mmio_info.event_db_addr,
ring_index, db_value); ring_index, db_value);
} }
spin_unlock_irqrestore(lock, flags); spin_unlock_irqrestore(lock, flags);
break; break;
@ -1083,7 +1073,7 @@ enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
flags); flags);
mhi_dev_ctxt->mhi_chan_db_order[ring_index] = 1; mhi_dev_ctxt->mhi_chan_db_order[ring_index] = 1;
mhi_process_db(mhi_dev_ctxt, mhi_process_db(mhi_dev_ctxt,
mhi_dev_ctxt->channel_db_addr, mhi_dev_ctxt->mmio_info.chan_db_addr,
ring_index, db_value); ring_index, db_value);
spin_unlock_irqrestore( spin_unlock_irqrestore(
&mhi_dev_ctxt->db_write_lock[ring_index], &mhi_dev_ctxt->db_write_lock[ring_index],
@ -1149,7 +1139,7 @@ static enum MHI_STATUS reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
* they will never be acked after a channel reset. * they will never be acked after a channel reset.
*/ */
ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
if (chan % 2 == 0) if (ring->dir == MHI_OUT)
get_nr_enclosed_el(ring, ring->rp, ring->wp, &pending_el); get_nr_enclosed_el(ring, ring->rp, ring->wp, &pending_el);
mhi_log(MHI_MSG_INFO, "Decrementing chan %d out acks by %d.\n", mhi_log(MHI_MSG_INFO, "Decrementing chan %d out acks by %d.\n",
@ -1235,8 +1225,6 @@ enum MHI_STATUS parse_cmd_event(struct mhi_device_ctxt *mhi_dev_ctxt,
cmd_pkt)) cmd_pkt))
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"Failed to process reset cmd\n"); "Failed to process reset cmd\n");
wake_up_interruptible(
mhi_dev_ctxt->chan_start_complete);
break; break;
default: default:
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
@ -1324,8 +1312,8 @@ enum MHI_STATUS validate_ring_el_addr(struct mhi_ring *ring, uintptr_t addr)
enum MHI_STATUS mhi_wait_for_mdm(struct mhi_device_ctxt *mhi_dev_ctxt) enum MHI_STATUS mhi_wait_for_mdm(struct mhi_device_ctxt *mhi_dev_ctxt)
{ {
u32 j = 0; u32 j = 0;
while (readl_relaxed((void *)(mhi_dev_ctxt->mmio_addr + while (mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr, MHIREGLEN)
MHIREGLEN)) == 0xFFFFFFFF == 0xFFFFFFFF
&& j <= MHI_MAX_LINK_RETRIES) { && j <= MHI_MAX_LINK_RETRIES) {
mhi_log(MHI_MSG_CRITICAL, mhi_log(MHI_MSG_CRITICAL,
"Could not access MDM retry %d\n", j); "Could not access MDM retry %d\n", j);
@ -1361,13 +1349,13 @@ int mhi_get_epid(struct mhi_client_handle *client_handle)
int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt) int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
{ {
if ((mhi_dev_ctxt->channel_db_addr) && if ((mhi_dev_ctxt->mmio_info.chan_db_addr) &&
(mhi_dev_ctxt->flags.link_up)) { (mhi_dev_ctxt->flags.link_up)) {
mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_log(MHI_MSG_VERBOSE, "LPM %d\n",
mhi_dev_ctxt->enable_lpm); mhi_dev_ctxt->enable_lpm);
atomic_set(&mhi_dev_ctxt->flags.device_wake, 1); atomic_set(&mhi_dev_ctxt->flags.device_wake, 1);
mhi_write_db(mhi_dev_ctxt, mhi_write_db(mhi_dev_ctxt,
mhi_dev_ctxt->channel_db_addr, mhi_dev_ctxt->mmio_info.chan_db_addr,
MHI_DEV_WAKE_DB, 1); MHI_DEV_WAKE_DB, 1);
mhi_dev_ctxt->device_wake_asserted = 1; mhi_dev_ctxt->device_wake_asserted = 1;
} else { } else {
@ -1380,11 +1368,11 @@ inline int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
{ {
if ((mhi_dev_ctxt->enable_lpm) && if ((mhi_dev_ctxt->enable_lpm) &&
(atomic_read(&mhi_dev_ctxt->flags.device_wake)) && (atomic_read(&mhi_dev_ctxt->flags.device_wake)) &&
(mhi_dev_ctxt->channel_db_addr != NULL) && (mhi_dev_ctxt->mmio_info.chan_db_addr != NULL) &&
(mhi_dev_ctxt->flags.link_up)) { (mhi_dev_ctxt->flags.link_up)) {
mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_dev_ctxt->enable_lpm); mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_dev_ctxt->enable_lpm);
atomic_set(&mhi_dev_ctxt->flags.device_wake, 0); atomic_set(&mhi_dev_ctxt->flags.device_wake, 0);
mhi_write_db(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr, mhi_write_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.chan_db_addr,
MHI_DEV_WAKE_DB, 0); MHI_DEV_WAKE_DB, 0);
mhi_dev_ctxt->device_wake_asserted = 0; mhi_dev_ctxt->device_wake_asserted = 0;
} else { } else {
@ -1436,24 +1424,24 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val); mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
/* Channel Doorbell and Polling Mode Disabled or Software Channel*/ /* Channel Doorbell and Polling Mode Disabled or Software Channel*/
if (io_addr == mhi_dev_ctxt->channel_db_addr) { if (io_addr == mhi_dev_ctxt->mmio_info.chan_db_addr) {
if (!(IS_HARDWARE_CHANNEL(chan) && if (!(IS_HARDWARE_CHANNEL(chan) &&
mhi_dev_ctxt->uldl_enabled && mhi_dev_ctxt->flags.uldl_enabled &&
!mhi_dev_ctxt->db_mode[chan])) { !mhi_dev_ctxt->flags.db_mode[chan])) {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val); mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
mhi_dev_ctxt->db_mode[chan] = 0; mhi_dev_ctxt->flags.db_mode[chan] = 0;
} }
/* Event Doorbell and Polling mode Disabled */ /* Event Doorbell and Polling mode Disabled */
} else if (io_addr == mhi_dev_ctxt->event_db_addr) { } else if (io_addr == mhi_dev_ctxt->mmio_info.event_db_addr) {
/* Only ring for software channel */ /* Only ring for software channel */
if (IS_SOFTWARE_CHANNEL(chan) || if (IS_SOFTWARE_CHANNEL(chan) ||
!mhi_dev_ctxt->uldl_enabled) { !mhi_dev_ctxt->flags.uldl_enabled) {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val); mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
mhi_dev_ctxt->db_mode[chan] = 0; mhi_dev_ctxt->flags.db_mode[chan] = 0;
} }
} else { } else {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val); mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
mhi_dev_ctxt->db_mode[chan] = 0; mhi_dev_ctxt->flags.db_mode[chan] = 0;
} }
} }

View file

@ -18,7 +18,8 @@ enum MHI_STATUS mhi_test_for_device_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
u32 pcie_word_val = 0; u32 pcie_word_val = 0;
u32 expiry_counter; u32 expiry_counter;
mhi_log(MHI_MSG_INFO, "Waiting for MMIO RESET bit to be cleared.\n"); mhi_log(MHI_MSG_INFO, "Waiting for MMIO RESET bit to be cleared.\n");
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_addr, MHISTATUS); pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
MHISTATUS);
MHI_READ_FIELD(pcie_word_val, MHI_READ_FIELD(pcie_word_val,
MHICTRL_RESET_MASK, MHICTRL_RESET_MASK,
MHICTRL_RESET_SHIFT); MHICTRL_RESET_SHIFT);
@ -29,7 +30,8 @@ enum MHI_STATUS mhi_test_for_device_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_log(MHI_MSG_ERROR, mhi_log(MHI_MSG_ERROR,
"Device is not RESET, sleeping and retrying.\n"); "Device is not RESET, sleeping and retrying.\n");
msleep(MHI_READY_STATUS_TIMEOUT_MS); msleep(MHI_READY_STATUS_TIMEOUT_MS);
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_addr, MHICTRL); pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
MHICTRL);
MHI_READ_FIELD(pcie_word_val, MHI_READ_FIELD(pcie_word_val,
MHICTRL_RESET_MASK, MHICTRL_RESET_MASK,
MHICTRL_RESET_SHIFT); MHICTRL_RESET_SHIFT);
@ -47,7 +49,8 @@ enum MHI_STATUS mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_log(MHI_MSG_INFO, "Waiting for MMIO Ready bit to be set\n"); mhi_log(MHI_MSG_INFO, "Waiting for MMIO Ready bit to be set\n");
/* Read MMIO and poll for READY bit to be set */ /* Read MMIO and poll for READY bit to be set */
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_addr, MHISTATUS); pcie_word_val = mhi_reg_read(
mhi_dev_ctxt->mmio_info.mmio_addr, MHISTATUS);
MHI_READ_FIELD(pcie_word_val, MHI_READ_FIELD(pcie_word_val,
MHISTATUS_READY_MASK, MHISTATUS_READY_MASK,
MHISTATUS_READY_SHIFT); MHISTATUS_READY_SHIFT);
@ -60,7 +63,7 @@ enum MHI_STATUS mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_log(MHI_MSG_ERROR, mhi_log(MHI_MSG_ERROR,
"Device is not ready, sleeping and retrying.\n"); "Device is not ready, sleeping and retrying.\n");
msleep(MHI_READY_STATUS_TIMEOUT_MS); msleep(MHI_READY_STATUS_TIMEOUT_MS);
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_addr, pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
MHISTATUS); MHISTATUS);
MHI_READ_FIELD(pcie_word_val, MHI_READ_FIELD(pcie_word_val,
MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT); MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT);
@ -79,22 +82,23 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
enum MHI_STATUS ret_val; enum MHI_STATUS ret_val;
mhi_log(MHI_MSG_INFO, "~~~ Initializing MMIO ~~~\n"); mhi_log(MHI_MSG_INFO, "~~~ Initializing MMIO ~~~\n");
mhi_dev_ctxt->mmio_addr = mhi_dev_ctxt->dev_props->bar0_base; mhi_dev_ctxt->mmio_info.mmio_addr = mhi_dev_ctxt->dev_props->bar0_base;
mhi_log(MHI_MSG_INFO, "Bar 0 address is at: 0x%p\n", mhi_log(MHI_MSG_INFO, "Bar 0 address is at: 0x%p\n",
mhi_dev_ctxt->mmio_addr); mhi_dev_ctxt->mmio_info.mmio_addr);
mhi_dev_ctxt->mmio_len = mhi_reg_read(mhi_dev_ctxt->mmio_addr, mhi_dev_ctxt->mmio_info.mmio_len = mhi_reg_read(
mhi_dev_ctxt->mmio_info.mmio_addr,
MHIREGLEN); MHIREGLEN);
if (0 == mhi_dev_ctxt->mmio_len) { if (0 == mhi_dev_ctxt->mmio_info.mmio_len) {
mhi_log(MHI_MSG_ERROR, "Received mmio length as zero\n"); mhi_log(MHI_MSG_ERROR, "Received mmio length as zero\n");
return MHI_STATUS_ERROR; return MHI_STATUS_ERROR;
} }
mhi_log(MHI_MSG_INFO, "Testing MHI Ver\n"); mhi_log(MHI_MSG_INFO, "Testing MHI Ver\n");
mhi_dev_ctxt->dev_props->mhi_ver = mhi_reg_read( mhi_dev_ctxt->dev_props->mhi_ver = mhi_reg_read(
mhi_dev_ctxt->mmio_addr, MHIVER); mhi_dev_ctxt->mmio_info.mmio_addr, MHIVER);
if (MHI_VERSION != mhi_dev_ctxt->dev_props->mhi_ver) { if (MHI_VERSION != mhi_dev_ctxt->dev_props->mhi_ver) {
mhi_log(MHI_MSG_CRITICAL, "Bad MMIO version, 0x%x\n", mhi_log(MHI_MSG_CRITICAL, "Bad MMIO version, 0x%x\n",
mhi_dev_ctxt->dev_props->mhi_ver); mhi_dev_ctxt->dev_props->mhi_ver);
@ -117,36 +121,40 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
"Read back MMIO Ready bit successfully. Moving on..\n"); "Read back MMIO Ready bit successfully. Moving on..\n");
mhi_log(MHI_MSG_INFO, "Reading channel doorbell offset\n"); mhi_log(MHI_MSG_INFO, "Reading channel doorbell offset\n");
mhi_dev_ctxt->channel_db_addr = mhi_dev_ctxt->mmio_addr; mhi_dev_ctxt->mmio_info.chan_db_addr =
mhi_dev_ctxt->event_db_addr = mhi_dev_ctxt->mmio_addr; mhi_dev_ctxt->mmio_info.mmio_addr;
mhi_dev_ctxt->mmio_info.event_db_addr =
mhi_dev_ctxt->mmio_info.mmio_addr;
mhi_dev_ctxt->channel_db_addr += mhi_reg_read_field( mhi_dev_ctxt->mmio_info.chan_db_addr += mhi_reg_read_field(
mhi_dev_ctxt->mmio_addr, mhi_dev_ctxt->mmio_info.mmio_addr,
CHDBOFF, CHDBOFF_CHDBOFF_MASK, CHDBOFF, CHDBOFF_CHDBOFF_MASK,
CHDBOFF_CHDBOFF_SHIFT); CHDBOFF_CHDBOFF_SHIFT);
mhi_log(MHI_MSG_INFO, "Reading event doorbell offset\n"); mhi_log(MHI_MSG_INFO, "Reading event doorbell offset\n");
mhi_dev_ctxt->event_db_addr += mhi_reg_read_field( mhi_dev_ctxt->mmio_info.event_db_addr += mhi_reg_read_field(
mhi_dev_ctxt->mmio_addr, mhi_dev_ctxt->mmio_info.mmio_addr,
ERDBOFF, ERDBOFF_ERDBOFF_MASK, ERDBOFF, ERDBOFF_ERDBOFF_MASK,
ERDBOFF_ERDBOFF_SHIFT); ERDBOFF_ERDBOFF_SHIFT);
mhi_log(MHI_MSG_INFO, "Setting all MMIO values.\n"); mhi_log(MHI_MSG_INFO, "Setting all MMIO values.\n");
mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, MHICFG, mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr,
MHICFG,
MHICFG_NER_MASK, MHICFG_NER_SHIFT, MHICFG_NER_MASK, MHICFG_NER_SHIFT,
EVENT_RINGS_ALLOCATED); NR_EV_RINGS);
pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
(uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list); (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list);
pcie_word_val = HIGH_WORD(pcie_dword_val); pcie_word_val = HIGH_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt, mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_addr, CCABAP_HIGHER, mhi_dev_ctxt->mmio_info.mmio_addr, CCABAP_HIGHER,
CCABAP_HIGHER_CCABAP_HIGHER_MASK, CCABAP_HIGHER_CCABAP_HIGHER_MASK,
CCABAP_HIGHER_CCABAP_HIGHER_SHIFT, pcie_word_val); CCABAP_HIGHER_CCABAP_HIGHER_SHIFT, pcie_word_val);
pcie_word_val = LOW_WORD(pcie_dword_val); pcie_word_val = LOW_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, CCABAP_LOWER, mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr,
CCABAP_LOWER,
CCABAP_LOWER_CCABAP_LOWER_MASK, CCABAP_LOWER_CCABAP_LOWER_MASK,
CCABAP_LOWER_CCABAP_LOWER_SHIFT, CCABAP_LOWER_CCABAP_LOWER_SHIFT,
pcie_word_val); pcie_word_val);
@ -156,12 +164,13 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
(uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list); (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list);
pcie_word_val = HIGH_WORD(pcie_dword_val); pcie_word_val = HIGH_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt, mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_addr, ECABAP_HIGHER, mhi_dev_ctxt->mmio_info.mmio_addr, ECABAP_HIGHER,
ECABAP_HIGHER_ECABAP_HIGHER_MASK, ECABAP_HIGHER_ECABAP_HIGHER_MASK,
ECABAP_HIGHER_ECABAP_HIGHER_SHIFT, pcie_word_val); ECABAP_HIGHER_ECABAP_HIGHER_SHIFT, pcie_word_val);
pcie_word_val = LOW_WORD(pcie_dword_val); pcie_word_val = LOW_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_addr, ECABAP_LOWER, mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr,
ECABAP_LOWER,
ECABAP_LOWER_ECABAP_LOWER_MASK, ECABAP_LOWER_ECABAP_LOWER_MASK,
ECABAP_LOWER_ECABAP_LOWER_SHIFT, pcie_word_val); ECABAP_LOWER_ECABAP_LOWER_SHIFT, pcie_word_val);
@ -171,31 +180,33 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
(uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list); (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list);
pcie_word_val = HIGH_WORD(pcie_dword_val); pcie_word_val = HIGH_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt, mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_addr, CRCBAP_HIGHER, mhi_dev_ctxt->mmio_info.mmio_addr,
CRCBAP_HIGHER,
CRCBAP_HIGHER_CRCBAP_HIGHER_MASK, CRCBAP_HIGHER_CRCBAP_HIGHER_MASK,
CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT, CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT,
pcie_word_val); pcie_word_val);
pcie_word_val = LOW_WORD(pcie_dword_val); pcie_word_val = LOW_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt, mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_addr, CRCBAP_LOWER, mhi_dev_ctxt->mmio_info.mmio_addr, CRCBAP_LOWER,
CRCBAP_LOWER_CRCBAP_LOWER_MASK, CRCBAP_LOWER_CRCBAP_LOWER_MASK,
CRCBAP_LOWER_CRCBAP_LOWER_SHIFT, CRCBAP_LOWER_CRCBAP_LOWER_SHIFT,
pcie_word_val); pcie_word_val);
mhi_dev_ctxt->cmd_db_addr = mhi_dev_ctxt->mmio_addr + CRDB_LOWER; mhi_dev_ctxt->mmio_info.cmd_db_addr =
mhi_dev_ctxt->mmio_info.mmio_addr + CRDB_LOWER;
/* Set the control segment in the MMIO */ /* Set the control segment in the MMIO */
pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
(uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg); (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg);
pcie_word_val = HIGH_WORD(pcie_dword_val); pcie_word_val = HIGH_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt, mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_addr, MHICTRLBASE_HIGHER, mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLBASE_HIGHER,
MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK, MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK,
MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT, MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT,
pcie_word_val); pcie_word_val);
pcie_word_val = LOW_WORD(pcie_dword_val); pcie_word_val = LOW_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt, mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_addr, MHICTRLBASE_LOWER, mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLBASE_LOWER,
MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK, MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK,
MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT, MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT,
pcie_word_val); pcie_word_val);
@ -206,13 +217,13 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
pcie_word_val = HIGH_WORD(pcie_dword_val); pcie_word_val = HIGH_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt, mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_addr, MHICTRLLIMIT_HIGHER, mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLLIMIT_HIGHER,
MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK, MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK,
MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT, MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT,
pcie_word_val); pcie_word_val);
pcie_word_val = LOW_WORD(pcie_dword_val); pcie_word_val = LOW_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt, mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_addr, MHICTRLLIMIT_LOWER, mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRLLIMIT_LOWER,
MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK, MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK,
MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT, MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT,
pcie_word_val); pcie_word_val);
@ -221,14 +232,14 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
pcie_dword_val = MHI_DATA_SEG_WINDOW_START_ADDR; pcie_dword_val = MHI_DATA_SEG_WINDOW_START_ADDR;
pcie_word_val = HIGH_WORD(pcie_dword_val); pcie_word_val = HIGH_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt, mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_addr, MHIDATABASE_HIGHER, mhi_dev_ctxt->mmio_info.mmio_addr, MHIDATABASE_HIGHER,
MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK, MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK,
MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT, MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT,
pcie_word_val); pcie_word_val);
pcie_word_val = LOW_WORD(pcie_dword_val); pcie_word_val = LOW_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt, mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_addr, MHIDATABASE_LOWER, mhi_dev_ctxt->mmio_info.mmio_addr, MHIDATABASE_LOWER,
MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK, MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK,
MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT, MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT,
pcie_word_val); pcie_word_val);
@ -237,13 +248,14 @@ enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
pcie_word_val = HIGH_WORD(pcie_dword_val); pcie_word_val = HIGH_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt, mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_addr, MHIDATALIMIT_HIGHER, mhi_dev_ctxt->mmio_info.mmio_addr, MHIDATALIMIT_HIGHER,
MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK, MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK,
MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT, MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT,
pcie_word_val); pcie_word_val);
pcie_word_val = LOW_WORD(pcie_dword_val); pcie_word_val = LOW_WORD(pcie_dword_val);
mhi_reg_write_field(mhi_dev_ctxt, mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_addr, MHIDATALIMIT_LOWER, mhi_dev_ctxt->mmio_info.mmio_addr,
MHIDATALIMIT_LOWER,
MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK, MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK,
MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT, MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT,
pcie_word_val); pcie_word_val);

View file

@ -89,7 +89,7 @@ int mhi_pci_resume(struct pci_dev *pcie_dev)
r = mhi_initiate_m0(mhi_dev_ctxt); r = mhi_initiate_m0(mhi_dev_ctxt);
if (r) if (r)
goto exit; goto exit;
r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M0_event, r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
mhi_dev_ctxt->mhi_state == MHI_STATE_M1, mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT)); msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));

View file

@ -28,8 +28,9 @@ static void conditional_chan_db_write(
if (0 == mhi_dev_ctxt->mhi_chan_db_order[chan]) { if (0 == mhi_dev_ctxt->mhi_chan_db_order[chan]) {
db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
(uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp); (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp);
mhi_process_db(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr, mhi_process_db(mhi_dev_ctxt,
chan, db_value); mhi_dev_ctxt->mmio_info.chan_db_addr,
chan, db_value);
} }
mhi_dev_ctxt->mhi_chan_db_order[chan] = 0; mhi_dev_ctxt->mhi_chan_db_order[chan] = 0;
spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags); spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags);
@ -45,9 +46,10 @@ static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
if (VALID_CHAN_NR(i)) { if (VALID_CHAN_NR(i)) {
local_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[i]; local_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
if (IS_HARDWARE_CHANNEL(i)) if (IS_HARDWARE_CHANNEL(i))
mhi_dev_ctxt->db_mode[i] = 1; mhi_dev_ctxt->flags.db_mode[i] = 1;
if ((local_ctxt->wp != local_ctxt->rp) || if ((local_ctxt->wp != local_ctxt->rp) ||
((local_ctxt->wp != local_ctxt->rp) && (i % 2))) ((local_ctxt->wp != local_ctxt->rp) &&
(local_ctxt->dir == MHI_IN)))
conditional_chan_db_write(mhi_dev_ctxt, i); conditional_chan_db_write(mhi_dev_ctxt, i);
} }
} }
@ -69,11 +71,13 @@ static void ring_all_cmd_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
(uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[0].wp); (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[0].wp);
if (0 == mhi_dev_ctxt->cmd_ring_order && rp != db_value) if (0 == mhi_dev_ctxt->cmd_ring_order && rp != db_value)
mhi_process_db(mhi_dev_ctxt, mhi_dev_ctxt->cmd_db_addr, mhi_process_db(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.cmd_db_addr,
0, db_value); 0, db_value);
mhi_dev_ctxt->cmd_ring_order = 0; mhi_dev_ctxt->cmd_ring_order = 0;
mutex_unlock(cmd_mutex); mutex_unlock(cmd_mutex);
} }
static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt) static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
{ {
u32 i; u32 i;
@ -85,12 +89,11 @@ static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
unsigned long flags; unsigned long flags;
mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg; mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg;
for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { for (i = 0; i < NR_EV_RINGS; ++i) {
event_ring_index = mhi_dev_ctxt->alloced_ev_rings[i]; event_ring_index = mhi_dev_ctxt->alloced_ev_rings[i];
lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[event_ring_index]; lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[event_ring_index];
mhi_dev_ctxt->mhi_ev_db_order[event_ring_index] = 0; mhi_dev_ctxt->mhi_ev_db_order[event_ring_index] = 0;
spin_lock_irqsave(lock, flags); spin_lock_irqsave(lock, flags);
event_ctxt = &mhi_ctrl->mhi_ec_list[event_ring_index]; event_ctxt = &mhi_ctrl->mhi_ec_list[event_ring_index];
db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
@ -99,7 +102,7 @@ static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
if (0 == mhi_dev_ctxt->mhi_ev_db_order[event_ring_index]) { if (0 == mhi_dev_ctxt->mhi_ev_db_order[event_ring_index]) {
mhi_process_db(mhi_dev_ctxt, mhi_process_db(mhi_dev_ctxt,
mhi_dev_ctxt->event_db_addr, mhi_dev_ctxt->mmio_info.event_db_addr,
event_ring_index, db_value); event_ring_index, db_value);
} }
mhi_dev_ctxt->mhi_ev_db_order[event_ring_index] = 0; mhi_dev_ctxt->mhi_ev_db_order[event_ring_index] = 0;
@ -153,7 +156,7 @@ static enum MHI_STATUS process_m0_transition(
atomic_set(&mhi_dev_ctxt->flags.pending_ssr, 0); atomic_set(&mhi_dev_ctxt->flags.pending_ssr, 0);
atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 0); atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 0);
} }
wake_up_interruptible(mhi_dev_ctxt->M0_event); wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m0_event);
if (ret_val == -ERESTARTSYS) if (ret_val == -ERESTARTSYS)
mhi_log(MHI_MSG_CRITICAL, mhi_log(MHI_MSG_CRITICAL,
"Pending restart detected\n"); "Pending restart detected\n");
@ -181,15 +184,15 @@ static enum MHI_STATUS process_m1_transition(
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
if (!mhi_dev_ctxt->flags.pending_M3) { if (!mhi_dev_ctxt->flags.pending_M3) {
mhi_log(MHI_MSG_INFO, "Setting M2 Transition flag\n"); mhi_log(MHI_MSG_INFO, "Setting M2 Transition flag\n");
atomic_inc(&mhi_dev_ctxt->m2_transition); atomic_inc(&mhi_dev_ctxt->flags.m2_transition);
mhi_dev_ctxt->mhi_state = MHI_STATE_M2; mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n"); mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n");
mhi_reg_write_field(mhi_dev_ctxt, mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_addr, MHICTRL, mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL,
MHICTRL_MHISTATE_MASK, MHICTRL_MHISTATE_MASK,
MHICTRL_MHISTATE_SHIFT, MHICTRL_MHISTATE_SHIFT,
MHI_STATE_M2); MHI_STATE_M2);
mhi_reg_read(mhi_dev_ctxt->mmio_addr, MHICTRL); mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL);
mhi_dev_ctxt->counters.m1_m2++; mhi_dev_ctxt->counters.m1_m2++;
} }
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
@ -216,7 +219,7 @@ static enum MHI_STATUS process_m1_transition(
"Failed to remove counter ret %d\n", r); "Failed to remove counter ret %d\n", r);
} }
} }
atomic_set(&mhi_dev_ctxt->m2_transition, 0); atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0);
mhi_log(MHI_MSG_INFO, "M2 transition complete.\n"); mhi_log(MHI_MSG_INFO, "M2 transition complete.\n");
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
@ -233,7 +236,7 @@ static enum MHI_STATUS process_m3_transition(
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
mhi_dev_ctxt->mhi_state = MHI_STATE_M3; mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
mhi_dev_ctxt->flags.pending_M3 = 0; mhi_dev_ctxt->flags.pending_M3 = 0;
wake_up_interruptible(mhi_dev_ctxt->M3_event); wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m3_event);
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
mhi_dev_ctxt->counters.m0_m3++; mhi_dev_ctxt->counters.m0_m3++;
return MHI_STATUS_SUCCESS; return MHI_STATUS_SUCCESS;
@ -256,12 +259,12 @@ static enum MHI_STATUS mhi_process_link_down(
mhi_dev_ctxt->flags.stop_threads = 1; mhi_dev_ctxt->flags.stop_threads = 1;
while (!mhi_dev_ctxt->ev_thread_stopped) { while (!mhi_dev_ctxt->flags.ev_thread_stopped) {
wake_up_interruptible(mhi_dev_ctxt->event_handle); wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"Waiting for threads to SUSPEND EVT: %d, STT: %d\n", "Waiting for threads to SUSPEND EVT: %d, STT: %d\n",
mhi_dev_ctxt->st_thread_stopped, mhi_dev_ctxt->flags.st_thread_stopped,
mhi_dev_ctxt->ev_thread_stopped); mhi_dev_ctxt->flags.ev_thread_stopped);
msleep(20); msleep(20);
} }
@ -341,7 +344,7 @@ static enum MHI_STATUS process_bhi_transition(
mhi_turn_on_pcie_link(mhi_dev_ctxt); mhi_turn_on_pcie_link(mhi_dev_ctxt);
mhi_log(MHI_MSG_INFO, "Entered\n"); mhi_log(MHI_MSG_INFO, "Entered\n");
mhi_dev_ctxt->mhi_state = MHI_STATE_BHI; mhi_dev_ctxt->mhi_state = MHI_STATE_BHI;
wake_up_interruptible(mhi_dev_ctxt->bhi_event); wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
mhi_log(MHI_MSG_INFO, "Exited\n"); mhi_log(MHI_MSG_INFO, "Exited\n");
return MHI_STATUS_SUCCESS; return MHI_STATUS_SUCCESS;
} }
@ -377,7 +380,7 @@ static enum MHI_STATUS process_ready_transition(
mhi_dev_ctxt->flags.stop_threads = 0; mhi_dev_ctxt->flags.stop_threads = 0;
mhi_reg_write_field(mhi_dev_ctxt, mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_addr, MHICTRL, mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL,
MHICTRL_MHISTATE_MASK, MHICTRL_MHISTATE_MASK,
MHICTRL_MHISTATE_SHIFT, MHICTRL_MHISTATE_SHIFT,
MHI_STATE_M0); MHI_STATE_M0);
@ -477,7 +480,7 @@ static enum MHI_STATUS process_reset_transition(
mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
(uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp); (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp);
} }
for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { for (i = 0; i < NR_EV_RINGS; ++i) {
ev_ring_index = mhi_dev_ctxt->alloced_ev_rings[i]; ev_ring_index = mhi_dev_ctxt->alloced_ev_rings[i];
mhi_reset_ev_ctxt(mhi_dev_ctxt, ev_ring_index); mhi_reset_ev_ctxt(mhi_dev_ctxt, ev_ring_index);
} }
@ -658,13 +661,13 @@ static void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
{ {
if (MHI_STATE_RESET == new_state) { if (MHI_STATE_RESET == new_state) {
mhi_reg_write_field(mhi_dev_ctxt, mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_addr, MHICTRL, mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL,
MHICTRL_RESET_MASK, MHICTRL_RESET_MASK,
MHICTRL_RESET_SHIFT, MHICTRL_RESET_SHIFT,
1); 1);
} else { } else {
mhi_reg_write_field(mhi_dev_ctxt, mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_addr, MHICTRL, mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL,
MHICTRL_MHISTATE_MASK, MHICTRL_MHISTATE_MASK,
MHICTRL_MHISTATE_SHIFT, MHICTRL_MHISTATE_SHIFT,
new_state); new_state);
@ -764,9 +767,9 @@ int mhi_state_change_thread(void *ctxt)
} }
for (;;) { for (;;) {
r = wait_event_interruptible( r = wait_event_interruptible(
*mhi_dev_ctxt->state_change_event_handle, *mhi_dev_ctxt->mhi_ev_wq.state_change_event,
((work_q->q_info.rp != work_q->q_info.wp) && ((work_q->q_info.rp != work_q->q_info.wp) &&
!mhi_dev_ctxt->st_thread_stopped)); !mhi_dev_ctxt->flags.st_thread_stopped));
if (r) { if (r) {
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"Caught signal %d, quitting\n", r); "Caught signal %d, quitting\n", r);
@ -778,7 +781,7 @@ int mhi_state_change_thread(void *ctxt)
"Caught exit signal, quitting\n"); "Caught exit signal, quitting\n");
return 0; return 0;
} }
mhi_dev_ctxt->st_thread_stopped = 0; mhi_dev_ctxt->flags.st_thread_stopped = 0;
spin_lock_irqsave(work_q->q_lock, flags); spin_lock_irqsave(work_q->q_lock, flags);
cur_work_item = *(enum STATE_TRANSITION *)(state_change_q->rp); cur_work_item = *(enum STATE_TRANSITION *)(state_change_q->rp);
ret_val = ctxt_del_element(&work_q->q_info, NULL); ret_val = ctxt_del_element(&work_q->q_info, NULL);
@ -826,7 +829,7 @@ enum MHI_STATUS mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
MHI_ASSERT(MHI_STATUS_SUCCESS == ret_val, MHI_ASSERT(MHI_STATUS_SUCCESS == ret_val,
"Failed to add selement to STT workqueue\n"); "Failed to add selement to STT workqueue\n");
spin_unlock_irqrestore(work_q->q_lock, flags); spin_unlock_irqrestore(work_q->q_lock, flags);
wake_up_interruptible(mhi_dev_ctxt->state_change_event_handle); wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
return ret_val; return ret_val;
} }
@ -844,7 +847,7 @@ int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
"Waiting for M0 M1 or M3. Currently %d...\n", "Waiting for M0 M1 or M3. Currently %d...\n",
mhi_dev_ctxt->mhi_state); mhi_dev_ctxt->mhi_state);
r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M3_event, r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_M3 || mhi_dev_ctxt->mhi_state == MHI_STATE_M3 ||
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
mhi_dev_ctxt->mhi_state == MHI_STATE_M1, mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
@ -936,13 +939,14 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
"Triggering wake out of M2\n"); "Triggering wake out of M2\n");
write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
mhi_dev_ctxt->flags.pending_M3 = 1; mhi_dev_ctxt->flags.pending_M3 = 1;
if ((atomic_read(&mhi_dev_ctxt->m2_transition)) == 0) { if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) {
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"M2_transition not set\n"); "M2 transition not set\n");
mhi_assert_device_wake(mhi_dev_ctxt); mhi_assert_device_wake(mhi_dev_ctxt);
} }
write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M0_event, r = wait_event_interruptible_timeout(
*mhi_dev_ctxt->mhi_ev_wq.m0_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
mhi_dev_ctxt->mhi_state == MHI_STATE_M1, mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT)); msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
@ -999,7 +1003,7 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_log(MHI_MSG_INFO, mhi_log(MHI_MSG_INFO,
"Waiting for M3 completion.\n"); "Waiting for M3 completion.\n");
r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M3_event, r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_M3, mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT)); msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
switch (r) { switch (r) {

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014, The Linux Foundation. All rights reserved. /* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
@ -18,22 +18,13 @@
#include "mhi_sys.h" #include "mhi_sys.h"
enum MHI_DEBUG_LEVEL mhi_msg_lvl = MHI_MSG_CRITICAL; enum MHI_DEBUG_LEVEL mhi_msg_lvl = MHI_MSG_INFO;
enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_INFO; enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_VERBOSE;
enum MHI_DEBUG_CLASS mhi_msg_class = MHI_DBG_DATA | MHI_DBG_POWER; enum MHI_DEBUG_CLASS mhi_msg_class = MHI_DBG_DATA | MHI_DBG_POWER;
enum MHI_DEBUG_LEVEL mhi_xfer_db_interval;
module_param(mhi_xfer_db_interval, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(mhi_xfer_db_interval, "mhi xfer doorbell interval");
enum MHI_DEBUG_LEVEL tx_mhi_intmodt = 10;
module_param(tx_mhi_intmodt, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(tx_mhi_intmodt, "xfer interrupt modulation");
enum MHI_DEBUG_LEVEL rx_mhi_intmodt = 6;
module_param(rx_mhi_intmodt, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(rx_mhi_intmodt, "rcver interrupt modulation");
module_param(mhi_msg_lvl , uint, S_IRUGO | S_IWUSR); module_param(mhi_msg_lvl , uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(mhi_msg_lvl, "dbg lvl"); MODULE_PARM_DESC(mhi_msg_lvl, "dbg lvl");
module_param(mhi_ipc_log_lvl, uint, S_IRUGO | S_IWUSR); module_param(mhi_ipc_log_lvl, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(mhi_ipc_log_lvl, "dbg lvl"); MODULE_PARM_DESC(mhi_ipc_log_lvl, "dbg lvl");
@ -73,7 +64,7 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
"chan:", "chan:",
(unsigned int)*offp, (unsigned int)*offp,
"pkts from dev:", "pkts from dev:",
mhi_dev_ctxt->mhi_chan_cntr[*offp].pkts_xferd, mhi_dev_ctxt->counters.chan_pkts_xferd[*offp],
"state:", "state:",
chan_ctxt->mhi_chan_state, chan_ctxt->mhi_chan_state,
"p_base:", "p_base:",
@ -122,10 +113,10 @@ static ssize_t mhi_dbgfs_ev_read(struct file *fp, char __user *buf,
&mhi_devices.device_list[0].mhi_ctxt; &mhi_devices.device_list[0].mhi_ctxt;
if (NULL == mhi_dev_ctxt) if (NULL == mhi_dev_ctxt)
return -EIO; return -EIO;
*offp = (u32)(*offp) % EVENT_RINGS_ALLOCATED; *offp = (u32)(*offp) % NR_EV_RINGS;
event_ring_index = mhi_dev_ctxt->alloced_ev_rings[*offp]; event_ring_index = mhi_dev_ctxt->alloced_ev_rings[*offp];
ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[event_ring_index]; ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[event_ring_index];
if (*offp == (EVENT_RINGS_ALLOCATED - 1)) if (*offp == (NR_EV_RINGS - 1))
msleep(1000); msleep(1000);
get_element_index(&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index], get_element_index(&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index],
@ -153,7 +144,7 @@ static ssize_t mhi_dbgfs_ev_read(struct file *fp, char __user *buf,
"MSI Vector", "MSI Vector",
ev_ctxt->mhi_msi_vector, ev_ctxt->mhi_msi_vector,
"MSI RX Count", "MSI RX Count",
mhi_dev_ctxt->msi_counter[*offp], mhi_dev_ctxt->counters.msi_counter[*offp],
"p_base:", "p_base:",
ev_ctxt->mhi_event_ring_base_addr, ev_ctxt->mhi_event_ring_base_addr,
"p_rp:", "p_rp:",