Merge "mhi: core: add support for priority based event processing"

This commit is contained in:
Linux Build Service Account 2017-03-16 20:02:53 -07:00 committed by Gerrit - the friendly Code Review server
commit 1e266f61af
10 changed files with 283 additions and 350 deletions

View file

@ -88,12 +88,13 @@ Main node properties:
Usage: required
Value type: Array of <u32>
Definition: mhi event ring configuration parameters for platform
defined as below <A B C D E>:
defined as below <A B C D E F>:
A = maximum event descriptors
B = MSI associated with event
C = interrupt moderation (see MHI specification)
D = Associated channel
E = flags defined by mhi_macros.h GET_EV_PROPS
E = priority of the event ring. 0 being the highest.
F = flags defined by mhi_macros.h GET_EV_PROPS
- qcom,mhi-address-window
Usage: required
@ -120,6 +121,13 @@ Main node properties:
Value type: <u32>
Definition: Segment size in bytes for each segment in bytes.
- qcom,mhi-bb-required
Usage: optional
Value type: bool
Definition: Determine whether MHI device require bounce buffer
during active transfer. If true, during channel open host
will pre-allocate transfer buffers.
========
Example:
========
@ -140,5 +148,5 @@ mhi: qcom,mhi {
<100 512 1200000000 1200000000>;
mhi-event-rings = <1>;
mhi-chan-cfg-102 = <0x66 0x80 0x5 0x62>;
mhi-event-cfg-0 = <0x80 0x0 0x0 0x0 0x11>;
mhi-event-cfg-0 = <0x80 0x0 0x0 0x0 0 1 0x11>;
};

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -348,6 +348,11 @@ struct mhi_ring {
u32 msi_disable_cntr;
u32 msi_enable_cntr;
spinlock_t ring_lock;
struct dma_pool *dma_pool;
struct tasklet_struct ev_task;
struct work_struct ev_worker;
struct mhi_device_ctxt *mhi_dev_ctxt;
int index;
};
enum MHI_CMD_STATUS {
@ -446,9 +451,12 @@ struct mhi_state_work_queue {
struct mhi_buf_info {
dma_addr_t bb_p_addr;
dma_addr_t pre_alloc_p_addr;
void *bb_v_addr;
void *pre_alloc_v_addr;
void *client_buf;
size_t buf_len;
size_t pre_alloc_len;
size_t filled_size;
enum dma_data_direction dir;
int bb_active;
@ -465,7 +473,6 @@ struct mhi_counters {
u32 bb_used[MHI_MAX_CHANNELS];
atomic_t device_wake;
atomic_t outbound_acks;
atomic_t events_pending;
u32 *msi_counter;
u32 mhi_reset_cntr;
u32 link_down_cntr;
@ -475,15 +482,10 @@ struct mhi_counters {
struct mhi_flags {
u32 mhi_initialized;
u32 link_up;
int stop_threads;
u32 kill_threads;
u32 ev_thread_stopped;
u32 st_thread_stopped;
bool bb_required;
};
struct mhi_wait_queues {
wait_queue_head_t *mhi_event_wq;
wait_queue_head_t *state_change_event;
wait_queue_head_t *m0_event;
wait_queue_head_t *m3_event;
wait_queue_head_t *bhi_event;
@ -542,9 +544,7 @@ struct mhi_device_ctxt {
struct mhi_client_handle *client_handle_list[MHI_MAX_CHANNELS];
struct mhi_event_ring_cfg *ev_ring_props;
struct task_struct *event_thread_handle;
struct task_struct *st_thread_handle;
struct tasklet_struct ev_task; /* Process control Events */
struct work_struct st_thread_worker;
struct work_struct process_m1_worker;
struct mhi_wait_queues mhi_ev_wq;
struct dev_mmio_info mmio_info;
@ -604,10 +604,18 @@ struct mhi_event_ring_cfg {
u32 intmod;
enum MHI_CLIENT_CHANNEL chan;
u32 flags;
/*
* Priority of event handling:
* 0 = highest, handle events in isr (reserved for future)
* 1 = handles event using tasklet
* 2 = handles events using workerthread
*/
u32 priority;
enum MHI_RING_CLASS class;
enum MHI_EVENT_RING_STATE state;
irqreturn_t (*mhi_handler_ptr)(int , void *);
};
#define MHI_EV_PRIORITY_TASKLET (1)
struct mhi_data_buf {
dma_addr_t bounce_buffer;
@ -665,14 +673,13 @@ enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt,
union mhi_cmd_pkt **cmd_pkt, u32 event_index);
int parse_cmd_event(struct mhi_device_ctxt *ctxt,
union mhi_event_pkt *event, u32 event_index);
int parse_event_thread(void *ctxt);
int mhi_test_for_device_ready(
struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_test_for_device_reset(
struct mhi_device_ctxt *mhi_dev_ctxt);
int validate_ring_el_addr(struct mhi_ring *ring, uintptr_t addr);
int validate_ev_el_addr(struct mhi_ring *ring, uintptr_t addr);
int mhi_state_change_thread(void *ctxt);
void mhi_state_change_worker(struct work_struct *work);
int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION new_state);
int mhi_wait_for_mdm(struct mhi_device_ctxt *mhi_dev_ctxt);
@ -746,6 +753,9 @@ int set_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt);
void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_STATE new_state);
const char *state_transition_str(enum STATE_TRANSITION state);
void mhi_ctrl_ev_task(unsigned long data);
void mhi_ev_task(unsigned long data);
void process_event_ring(struct work_struct *work);
int process_m0_transition(struct mhi_device_ctxt *mhi_dev_ctxt);
int process_m3_transition(struct mhi_device_ctxt *mhi_dev_ctxt);
#endif

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -39,7 +39,7 @@ static int bhi_alloc_bhie_xfer(struct mhi_device_ctxt *mhi_dev_ctxt,
{
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
struct device *dev = &mhi_dev_ctxt->plat_dev->dev;
const u32 align = bhi_ctxt->alignment - 1;
const phys_addr_t align = bhi_ctxt->alignment - 1;
size_t seg_size = bhi_ctxt->firmware_info.segment_size;
/* We need one additional entry for Vector Table */
int segments = DIV_ROUND_UP(size, seg_size) + 1;
@ -112,7 +112,7 @@ static int bhi_alloc_pbl_xfer(struct mhi_device_ctxt *mhi_dev_ctxt,
size_t size)
{
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
const u32 align_len = bhi_ctxt->alignment;
const phys_addr_t align_len = bhi_ctxt->alignment;
size_t alloc_size = size + (align_len - 1);
struct device *dev = &mhi_dev_ctxt->plat_dev->dev;

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -43,13 +43,13 @@ int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt)
return -ENOMEM;
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
u32 dt_configs[5];
int len;
u32 dt_configs[6];
int no_elements;
scnprintf(dt_prop, MAX_BUF_SIZE, "%s%d", "mhi-event-cfg-", i);
if (!of_find_property(np, dt_prop, &len))
goto dt_error;
if (len != sizeof(dt_configs))
no_elements = of_property_count_elems_of_size(np, dt_prop,
sizeof(dt_configs));
if (no_elements != 1)
goto dt_error;
r = of_property_read_u32_array(
np,
@ -66,14 +66,16 @@ int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_dev_ctxt->ev_ring_props[i].msi_vec = dt_configs[1];
mhi_dev_ctxt->ev_ring_props[i].intmod = dt_configs[2];
mhi_dev_ctxt->ev_ring_props[i].chan = dt_configs[3];
mhi_dev_ctxt->ev_ring_props[i].flags = dt_configs[4];
mhi_dev_ctxt->ev_ring_props[i].priority = dt_configs[4];
mhi_dev_ctxt->ev_ring_props[i].flags = dt_configs[5];
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"ev ring %d,desc:0x%x,msi:0x%x,intmod%d chan:%u flags0x%x\n",
"ev ring %d,desc:0x%x,msi:0x%x,intmod%d chan:%u priority:%u flags0x%x\n",
i,
mhi_dev_ctxt->ev_ring_props[i].nr_desc,
mhi_dev_ctxt->ev_ring_props[i].msi_vec,
mhi_dev_ctxt->ev_ring_props[i].intmod,
mhi_dev_ctxt->ev_ring_props[i].chan,
mhi_dev_ctxt->ev_ring_props[i].priority,
mhi_dev_ctxt->ev_ring_props[i].flags);
if (GET_EV_PROPS(EV_MANAGED,
mhi_dev_ctxt->ev_ring_props[i].flags))
@ -128,6 +130,9 @@ int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_local_event_ctxt[i];
spin_lock_init(&mhi_ring->ring_lock);
tasklet_init(&mhi_ring->ev_task, mhi_ev_task,
(unsigned long)mhi_ring);
INIT_WORK(&mhi_ring->ev_worker, process_event_ring);
}
return r;
@ -157,6 +162,8 @@ void ring_ev_db(struct mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index)
static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
struct mhi_ring *ring,
struct mhi_device_ctxt *mhi_dev_ctxt,
int index,
u32 el_per_ring,
u32 intmodt_val,
u32 msi_vec,
@ -166,6 +173,8 @@ static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
ev_list->mhi_msi_vector = msi_vec;
ev_list->mhi_event_ring_len = el_per_ring*sizeof(union mhi_event_pkt);
MHI_SET_EV_CTXT(EVENT_CTXT_INTMODT, ev_list, intmodt_val);
ring->mhi_dev_ctxt = mhi_dev_ctxt;
ring->index = index;
ring->len = ((size_t)(el_per_ring)*sizeof(union mhi_event_pkt));
ring->el_size = sizeof(union mhi_event_pkt);
ring->overwrite_en = 0;
@ -198,6 +207,7 @@ void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt)
event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
mhi_local_event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
mhi_event_ring_init(event_ctxt, mhi_local_event_ctxt,
mhi_dev_ctxt, i,
mhi_dev_ctxt->ev_ring_props[i].nr_desc,
mhi_dev_ctxt->ev_ring_props[i].intmod,
mhi_dev_ctxt->ev_ring_props[i].msi_vec,

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -110,8 +110,6 @@ int mhi_ctxt_init(struct mhi_device_ctxt *mhi_dev_ctxt)
irq_error:
kfree(mhi_dev_ctxt->state_change_work_item_list.q_lock);
kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
@ -190,12 +188,10 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
pcie_device->dev.of_node = plat_dev->dev.of_node;
mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE;
INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition);
INIT_WORK(&mhi_dev_ctxt->st_thread_worker, mhi_state_change_worker);
mutex_init(&mhi_dev_ctxt->pm_lock);
rwlock_init(&mhi_dev_ctxt->pm_xfer_lock);
spin_lock_init(&mhi_dev_ctxt->dev_wake_lock);
tasklet_init(&mhi_dev_ctxt->ev_task,
mhi_ctrl_ev_task,
(unsigned long)mhi_dev_ctxt);
init_completion(&mhi_dev_ctxt->cmd_complete);
mhi_dev_ctxt->flags.link_up = 1;
@ -456,6 +452,10 @@ static int mhi_plat_probe(struct platform_device *pdev)
INIT_WORK(&bhi_ctxt->fw_load_work, bhi_firmware_download);
}
mhi_dev_ctxt->flags.bb_required =
of_property_read_bool(pdev->dev.of_node,
"qcom,mhi-bb-required");
mhi_dev_ctxt->plat_dev = pdev;
platform_set_drvdata(pdev, mhi_dev_ctxt);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -250,19 +250,6 @@ err_ev_alloc:
static int mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt)
{
mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq = kmalloc(
sizeof(wait_queue_head_t),
GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "Failed to init event");
return -ENOMEM;
}
mhi_dev_ctxt->mhi_ev_wq.state_change_event =
kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_ev_wq.state_change_event) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "Failed to init event");
goto error_event_handle_alloc;
}
/* Initialize the event which signals M0 */
mhi_dev_ctxt->mhi_ev_wq.m0_event = kmalloc(sizeof(wait_queue_head_t),
GFP_KERNEL);
@ -284,10 +271,7 @@ static int mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "Failed to init event");
goto error_bhi_event;
}
/* Initialize the event which starts the event parsing thread */
init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
/* Initialize the event which starts the state change thread */
init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
/* Initialize the event which triggers clients waiting to send */
init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.m0_event);
/* Initialize the event which triggers D3hot */
@ -300,9 +284,6 @@ error_bhi_event:
error_m0_event:
kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event);
error_state_change_event_handle:
kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
error_event_handle_alloc:
kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
return -ENOMEM;
}
@ -339,21 +320,6 @@ static void mhi_init_wakelock(struct mhi_device_ctxt *mhi_dev_ctxt)
wakeup_source_init(&mhi_dev_ctxt->w_lock, "mhi_wakeup_source");
}
static int mhi_spawn_threads(struct mhi_device_ctxt *mhi_dev_ctxt)
{
mhi_dev_ctxt->event_thread_handle = kthread_run(parse_event_thread,
mhi_dev_ctxt,
"mhi_ev_thrd");
if (IS_ERR(mhi_dev_ctxt->event_thread_handle))
return PTR_ERR(mhi_dev_ctxt->event_thread_handle);
mhi_dev_ctxt->st_thread_handle = kthread_run(mhi_state_change_thread,
mhi_dev_ctxt,
"mhi_st_thrd");
if (IS_ERR(mhi_dev_ctxt->event_thread_handle))
return PTR_ERR(mhi_dev_ctxt->event_thread_handle);
return 0;
}
/**
* @brief Main initialization function for a mhi struct device context
* All threads, events mutexes, mhi specific data structures
@ -408,21 +374,11 @@ int mhi_init_device_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
init_event_ctxt_array(mhi_dev_ctxt);
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
r = mhi_spawn_threads(mhi_dev_ctxt);
if (r) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to spawn threads ret %d\n", r);
goto error_during_thread_spawn;
}
mhi_init_wakelock(mhi_dev_ctxt);
return r;
error_during_thread_spawn:
kfree(mhi_dev_ctxt->state_change_work_item_list.q_lock);
error_during_thread_init:
kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.bhi_event);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -125,8 +125,11 @@ static int mhi_process_event_ring(
"MHI STE received ring 0x%x State:%s\n",
ev_index, state_transition_str(new_state));
/* If transitioning to M1 schedule worker thread */
if (new_state == STATE_TRANSITION_M1) {
switch (new_state) {
case STATE_TRANSITION_M0:
process_m0_transition(mhi_dev_ctxt);
break;
case STATE_TRANSITION_M1:
write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock,
flags);
mhi_dev_ctxt->mhi_state =
@ -140,9 +143,15 @@ static int mhi_process_event_ring(
write_unlock_irqrestore(&mhi_dev_ctxt->
pm_xfer_lock,
flags);
} else {
mhi_init_state_transition(mhi_dev_ctxt,
new_state);
break;
case STATE_TRANSITION_M3:
process_m3_transition(mhi_dev_ctxt);
break;
default:
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Unsupported STE received ring 0x%x State:%s\n",
ev_index,
state_transition_str(new_state));
}
break;
}
@ -207,84 +216,39 @@ static int mhi_process_event_ring(
return ret_val;
}
int parse_event_thread(void *ctxt)
{
struct mhi_device_ctxt *mhi_dev_ctxt = ctxt;
u32 i = 0;
int ret_val = 0;
int ret_val_process_event = 0;
atomic_t *ev_pen_ptr = &mhi_dev_ctxt->counters.events_pending;
/* Go through all event rings */
for (;;) {
ret_val =
wait_event_interruptible(
*mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq,
((atomic_read(
&mhi_dev_ctxt->counters.events_pending) > 0) &&
!mhi_dev_ctxt->flags.stop_threads) ||
mhi_dev_ctxt->flags.kill_threads ||
(mhi_dev_ctxt->flags.stop_threads &&
!mhi_dev_ctxt->flags.ev_thread_stopped));
switch (ret_val) {
case -ERESTARTSYS:
return 0;
default:
if (mhi_dev_ctxt->flags.kill_threads) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Caught exit signal, quitting\n");
return 0;
}
if (mhi_dev_ctxt->flags.stop_threads) {
mhi_dev_ctxt->flags.ev_thread_stopped = 1;
continue;
}
break;
}
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "awake\n");
mhi_dev_ctxt->flags.ev_thread_stopped = 0;
atomic_dec(&mhi_dev_ctxt->counters.events_pending);
for (i = 1; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
if (mhi_dev_ctxt->mhi_state == MHI_STATE_SYS_ERR) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"SYS_ERR detected, not processing events\n");
atomic_set(&mhi_dev_ctxt->
counters.events_pending,
0);
break;
}
if (GET_EV_PROPS(EV_MANAGED,
mhi_dev_ctxt->ev_ring_props[i].flags)) {
ret_val_process_event =
mhi_process_event_ring(mhi_dev_ctxt,
i,
mhi_dev_ctxt->
ev_ring_props[i].nr_desc);
if (ret_val_process_event == -EINPROGRESS)
atomic_inc(ev_pen_ptr);
}
}
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "sleep\n");
}
}
void mhi_ctrl_ev_task(unsigned long data)
void mhi_ev_task(unsigned long data)
{
struct mhi_ring *mhi_ring = (struct mhi_ring *)data;
struct mhi_device_ctxt *mhi_dev_ctxt =
(struct mhi_device_ctxt *)data;
const unsigned CTRL_EV_RING = 0;
mhi_ring->mhi_dev_ctxt;
int ev_index = mhi_ring->index;
struct mhi_event_ring_cfg *ring_props =
&mhi_dev_ctxt->ev_ring_props[CTRL_EV_RING];
&mhi_dev_ctxt->ev_ring_props[ev_index];
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Enter\n");
/* Process control event ring */
mhi_process_event_ring(mhi_dev_ctxt,
CTRL_EV_RING,
ring_props->nr_desc);
enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, CTRL_EV_RING));
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Exit\n");
/* Process event ring */
mhi_process_event_ring(mhi_dev_ctxt, ev_index, ring_props->nr_desc);
enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, ev_index));
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Exit\n");
}
void process_event_ring(struct work_struct *work)
{
struct mhi_ring *mhi_ring =
container_of(work, struct mhi_ring, ev_worker);
struct mhi_device_ctxt *mhi_dev_ctxt =
mhi_ring->mhi_dev_ctxt;
int ev_index = mhi_ring->index;
struct mhi_event_ring_cfg *ring_props =
&mhi_dev_ctxt->ev_ring_props[ev_index];
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Enter\n");
/* Process event ring */
mhi_process_event_ring(mhi_dev_ctxt, ev_index, ring_props->nr_desc);
enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, ev_index));
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Exit\n");
}
struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
@ -332,19 +296,19 @@ irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
{
struct mhi_device_ctxt *mhi_dev_ctxt = dev_id;
int msi = IRQ_TO_MSI(mhi_dev_ctxt, irq_number);
struct mhi_ring *mhi_ring = &mhi_dev_ctxt->mhi_local_event_ctxt[msi];
struct mhi_event_ring_cfg *ring_props =
&mhi_dev_ctxt->ev_ring_props[msi];
mhi_dev_ctxt->counters.msi_counter[
IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++;
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi);
trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
if (msi) {
atomic_inc(&mhi_dev_ctxt->counters.events_pending);
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
} else {
disable_irq_nosync(irq_number);
tasklet_schedule(&mhi_dev_ctxt->ev_task);
}
disable_irq_nosync(irq_number);
if (ring_props->priority <= MHI_EV_PRIORITY_TASKLET)
tasklet_schedule(&mhi_ring->ev_task);
else
schedule_work(&mhi_ring->ev_worker);
return IRQ_HANDLED;
}

View file

@ -32,9 +32,18 @@
static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
union mhi_cmd_pkt *cmd_pkt);
static void disable_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_ring *bb_ctxt);
static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el)
static int enable_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_ring *bb_ctxt,
int nr_el,
int chan,
size_t max_payload)
{
int i;
struct mhi_buf_info *mhi_buf_info;
bb_ctxt->el_size = sizeof(struct mhi_buf_info);
bb_ctxt->len = bb_ctxt->el_size * nr_el;
bb_ctxt->base = kzalloc(bb_ctxt->len, GFP_KERNEL);
@ -43,7 +52,46 @@ static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el)
bb_ctxt->ack_rp = bb_ctxt->base;
if (!bb_ctxt->base)
return -ENOMEM;
if (mhi_dev_ctxt->flags.bb_required) {
char pool_name[32];
snprintf(pool_name, sizeof(pool_name), "mhi%d_%d",
mhi_dev_ctxt->plat_dev->id, chan);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Creating pool %s for chan:%d payload: 0x%lx\n",
pool_name, chan, max_payload);
bb_ctxt->dma_pool = dma_pool_create(pool_name,
&mhi_dev_ctxt->plat_dev->dev, max_payload, 0, 0);
if (unlikely(!bb_ctxt->dma_pool))
goto dma_pool_error;
mhi_buf_info = (struct mhi_buf_info *)bb_ctxt->base;
for (i = 0; i < nr_el; i++, mhi_buf_info++) {
mhi_buf_info->pre_alloc_v_addr =
dma_pool_alloc(bb_ctxt->dma_pool, GFP_KERNEL,
&mhi_buf_info->pre_alloc_p_addr);
if (unlikely(!mhi_buf_info->pre_alloc_v_addr))
goto dma_alloc_error;
mhi_buf_info->pre_alloc_len = max_payload;
}
}
return 0;
dma_alloc_error:
for (--i, --mhi_buf_info; i >= 0; i--, mhi_buf_info--)
dma_pool_free(bb_ctxt->dma_pool, mhi_buf_info->pre_alloc_v_addr,
mhi_buf_info->pre_alloc_p_addr);
dma_pool_destroy(bb_ctxt->dma_pool);
bb_ctxt->dma_pool = NULL;
dma_pool_error:
kfree(bb_ctxt->base);
bb_ctxt->base = NULL;
return -ENOMEM;
}
static void mhi_write_db(struct mhi_device_ctxt *mhi_dev_ctxt,
@ -207,11 +255,9 @@ int mhi_release_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
return 0;
}
void free_tre_ring(struct mhi_client_config *client_config)
void free_tre_ring(struct mhi_device_ctxt *mhi_dev_ctxt, int chan)
{
struct mhi_chan_ctxt *chan_ctxt;
struct mhi_device_ctxt *mhi_dev_ctxt = client_config->mhi_dev_ctxt;
int chan = client_config->chan_info.chan_nr;
int r;
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
@ -276,11 +322,6 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
return -EINVAL;
mhi_dev_ctxt = client_config->mhi_dev_ctxt;
ret_val = get_chan_props(mhi_dev_ctxt,
client_config->chan_info.chan_nr,
&client_config->chan_info);
if (ret_val)
return ret_val;
chan = client_config->chan_info.chan_nr;
cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
@ -302,21 +343,11 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to initialize tre ring chan %d ret %d\n",
chan, ret_val);
mutex_unlock(&cfg->chan_lock);
return ret_val;
goto error_tre_ring;
}
client_config->event_ring_index =
mhi_dev_ctxt->dev_space.ring_ctxt.
cc_list[chan].mhi_event_ring_index;
ret_val = enable_bb_ctxt(&mhi_dev_ctxt->chan_bb_list[chan],
client_config->chan_info.max_desc);
if (ret_val) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to initialize bb ctxt chan %d ret %d\n",
chan, ret_val);
mutex_unlock(&cfg->chan_lock);
return ret_val;
}
client_config->msi_vec =
mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[
@ -331,18 +362,13 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"MHI State is disabled\n");
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mutex_unlock(&cfg->chan_lock);
return -EIO;
ret_val = -EIO;
goto error_pm_state;
}
WARN_ON(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE);
mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->runtime_get(mhi_dev_ctxt);
spin_lock_irq(&chan_ring->ring_lock);
chan_ring->ch_state = MHI_CHAN_STATE_ENABLED;
spin_unlock_irq(&chan_ring->ring_lock);
ret_val = mhi_send_cmd(client_config->mhi_dev_ctxt,
MHI_COMMAND_START_CHAN,
chan);
@ -377,16 +403,30 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
goto error_completion;
}
spin_lock_irq(&chan_ring->ring_lock);
chan_ring->ch_state = MHI_CHAN_STATE_ENABLED;
spin_unlock_irq(&chan_ring->ring_lock);
client_config->chan_status = 1;
error_completion:
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
mutex_unlock(&cfg->chan_lock);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"chan:%d opened successfully\n", chan);
return 0;
error_completion:
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
error_pm_state:
free_tre_ring(mhi_dev_ctxt, chan);
error_tre_ring:
mutex_unlock(&cfg->chan_lock);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Exited chan 0x%x ret:%d\n", chan, ret_val);
return ret_val;
@ -431,6 +471,7 @@ int mhi_register_channel(struct mhi_client_handle **client_handle,
struct mhi_client_config *client_config;
const char *node_name;
enum MHI_CLIENT_CHANNEL chan;
int ret;
if (!client_info || client_info->dev->of_node == NULL)
return -EINVAL;
@ -489,6 +530,17 @@ int mhi_register_channel(struct mhi_client_handle **client_handle,
if (MHI_CLIENT_IP_HW_0_IN == chan)
client_config->intmod_t = 10;
get_chan_props(mhi_dev_ctxt, chan, &client_config->chan_info);
ret = enable_bb_ctxt(mhi_dev_ctxt, &mhi_dev_ctxt->chan_bb_list[chan],
client_config->chan_info.max_desc, chan,
client_config->client_info.max_payload);
if (ret) {
kfree(mhi_dev_ctxt->client_handle_list[chan]->client_config);
kfree(mhi_dev_ctxt->client_handle_list[chan]);
mhi_dev_ctxt->client_handle_list[chan] = NULL;
return -ENOMEM;
}
if (mhi_dev_ctxt->dev_exec_env == MHI_EXEC_ENV_AMSS &&
mhi_dev_ctxt->flags.mhi_initialized) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
@ -531,6 +583,14 @@ void mhi_close_channel(struct mhi_client_handle *client_handle)
/* No more processing events for this channel */
spin_lock_irq(&chan_ring->ring_lock);
if (chan_ring->ch_state != MHI_CHAN_STATE_ENABLED) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Chan %d is not enabled, cur state:0x%x\n",
chan, chan_ring->ch_state);
spin_unlock_irq(&chan_ring->ring_lock);
mutex_unlock(&cfg->chan_lock);
return;
}
chan_ring->ch_state = MHI_CHAN_STATE_DISABLED;
spin_unlock_irq(&chan_ring->ring_lock);
init_completion(&cfg->cmd_complete);
@ -565,22 +625,30 @@ void mhi_close_channel(struct mhi_client_handle *client_handle)
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Error to receive event completion ev_cod:0x%x\n",
ev_code);
goto error_completion;
}
error_completion:
ret_val = reset_chan_cmd(mhi_dev_ctxt, &cmd_pkt);
if (ret_val)
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Error resetting cmd ret:%d\n", ret_val);
error_completion:
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"resetting bb_ring for chan 0x%x\n", chan);
mhi_dev_ctxt->chan_bb_list[chan].rp =
mhi_dev_ctxt->chan_bb_list[chan].base;
mhi_dev_ctxt->chan_bb_list[chan].wp =
mhi_dev_ctxt->chan_bb_list[chan].base;
mhi_dev_ctxt->chan_bb_list[chan].ack_rp =
mhi_dev_ctxt->chan_bb_list[chan].base;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Freeing ring for chan 0x%x\n", chan);
free_tre_ring(client_config);
free_tre_ring(mhi_dev_ctxt, chan);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Chan 0x%x confirmed closed.\n", chan);
client_config->chan_status = 0;
@ -639,6 +707,7 @@ static inline int mhi_queue_tre(struct mhi_device_ctxt
}
return 0;
}
static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
int chan, void *buf, size_t buf_len,
enum dma_data_direction dir, struct mhi_buf_info **bb)
@ -674,6 +743,7 @@ static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
bb_info->client_buf,
bb_info->buf_len,
bb_info->dir);
bb_info->bb_active = 0;
if (!VALID_BUF(bb_info->bb_p_addr, bb_info->buf_len, mhi_dev_ctxt)) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Buffer outside DMA range 0x%lx, size 0x%zx\n",
@ -682,30 +752,48 @@ static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
bb_info->bb_p_addr,
bb_info->buf_len,
bb_info->dir);
mhi_log(mhi_dev_ctxt, MHI_MSG_RAW,
"Allocating BB, chan %d\n", chan);
bb_info->bb_v_addr = dma_alloc_coherent(
&mhi_dev_ctxt->plat_dev->dev,
bb_info->buf_len,
&bb_info->bb_p_addr,
GFP_ATOMIC);
if (!bb_info->bb_v_addr)
return -ENOMEM;
mhi_dev_ctxt->counters.bb_used[chan]++;
if (dir == DMA_TO_DEVICE) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Copying client buf into BB.\n");
memcpy(bb_info->bb_v_addr, buf, bb_info->buf_len);
/* Flush out data to bounce buffer */
wmb();
}
bb_info->bb_active = 1;
if (likely((mhi_dev_ctxt->flags.bb_required &&
bb_info->pre_alloc_len >= bb_info->buf_len))) {
bb_info->bb_p_addr = bb_info->pre_alloc_p_addr;
bb_info->bb_v_addr = bb_info->pre_alloc_v_addr;
mhi_dev_ctxt->counters.bb_used[chan]++;
if (dir == DMA_TO_DEVICE) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Copying client buf into BB.\n");
memcpy(bb_info->bb_v_addr, buf,
bb_info->buf_len);
}
bb_info->bb_active = 1;
} else
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"No BB allocated\n");
}
*bb = bb_info;
mhi_log(mhi_dev_ctxt, MHI_MSG_RAW, "Exited chan %d\n", chan);
return 0;
}
static void disable_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_ring *bb_ctxt)
{
if (mhi_dev_ctxt->flags.bb_required) {
struct mhi_buf_info *bb =
(struct mhi_buf_info *)bb_ctxt->base;
int nr_el = bb_ctxt->len / bb_ctxt->el_size;
int i = 0;
for (i = 0; i < nr_el; i++, bb++)
dma_pool_free(bb_ctxt->dma_pool, bb->pre_alloc_v_addr,
bb->pre_alloc_p_addr);
dma_pool_destroy(bb_ctxt->dma_pool);
bb_ctxt->dma_pool = NULL;
}
kfree(bb_ctxt->base);
bb_ctxt->base = NULL;
}
static void free_bounce_buffer(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_buf_info *bb)
{
@ -714,44 +802,11 @@ static void free_bounce_buffer(struct mhi_device_ctxt *mhi_dev_ctxt,
/* This buffer was maped directly to device */
dma_unmap_single(&mhi_dev_ctxt->plat_dev->dev,
bb->bb_p_addr, bb->buf_len, bb->dir);
else
/* This buffer was bounced */
dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
bb->buf_len,
bb->bb_v_addr,
bb->bb_p_addr);
bb->bb_active = 0;
mhi_log(mhi_dev_ctxt, MHI_MSG_RAW, "Exited\n");
}
void reset_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_ring *bb_ctxt)
{
int r = 0;
struct mhi_buf_info *bb = NULL;
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Entered\n");
/*
Assumption: No events are expected during or after
this operation is occurring for this channel.
If a bounce buffer was allocated, the coherent memory is
expected to be already freed.
If the user's bounce buffer was mapped, it is expected to be
already unmapped.
Failure of any of the above conditions will result in
a memory leak or subtle memory corruption.
*/
while (!r) {
r = ctxt_del_element(bb_ctxt, (void **)&bb);
if (bb)
free_bounce_buffer(mhi_dev_ctxt, bb);
}
bb_ctxt->ack_rp = bb_ctxt->base;
bb_ctxt->rp = bb_ctxt->base;
bb_ctxt->wp = bb_ctxt->base;
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Exited\n");
}
static int mhi_queue_dma_xfer(
struct mhi_client_config *client_config,
dma_addr_t buf, size_t buf_len, enum MHI_FLAGS mhi_flags)
@ -1342,7 +1397,6 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_chan_ctxt *chan_ctxt;
struct mhi_event_ctxt *ev_ctxt = NULL;
int pending_el = 0, i;
struct mhi_ring *bb_ctxt;
unsigned long flags;
union mhi_event_pkt *local_rp = NULL;
union mhi_event_pkt *device_rp = NULL;
@ -1355,8 +1409,6 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
return -EINVAL;
}
bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
ev_ring = &mhi_dev_ctxt->
@ -1426,9 +1478,6 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr;
chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Cleaning up BB list\n");
reset_bb_ctxt(mhi_dev_ctxt, bb_ctxt);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Reset complete.\n");
return ret_val;
}
@ -1672,14 +1721,17 @@ int mhi_deregister_channel(struct mhi_client_handle *client_handle)
int ret_val = 0;
int chan;
struct mhi_client_config *client_config;
struct mhi_device_ctxt *mhi_dev_ctxt;
if (!client_handle)
return -EINVAL;
client_config = client_handle->client_config;
mhi_dev_ctxt = client_config->mhi_dev_ctxt;
chan = client_config->chan_info.chan_nr;
client_config->magic = 0;
client_config->mhi_dev_ctxt->client_handle_list[chan] = NULL;
mhi_dev_ctxt->client_handle_list[chan] = NULL;
disable_bb_ctxt(mhi_dev_ctxt, &mhi_dev_ctxt->chan_bb_list[chan]);
kfree(client_config);
kfree(client_handle);
return ret_val;
@ -1736,11 +1788,10 @@ int mhi_register_device(struct mhi_device *mhi_device,
mhi_dev_ctxt->pcie_device = pci_dev;
mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE;
INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition);
INIT_WORK(&mhi_dev_ctxt->st_thread_worker, mhi_state_change_worker);
mutex_init(&mhi_dev_ctxt->pm_lock);
rwlock_init(&mhi_dev_ctxt->pm_xfer_lock);
spin_lock_init(&mhi_dev_ctxt->dev_wake_lock);
tasklet_init(&mhi_dev_ctxt->ev_task, mhi_ctrl_ev_task,
(unsigned long)mhi_dev_ctxt);
init_completion(&mhi_dev_ctxt->cmd_complete);
mhi_dev_ctxt->flags.link_up = 1;
core_info = &mhi_dev_ctxt->core;

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -139,6 +139,13 @@ int set_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_dev_ctxt->bhi_ctxt.bhi_base = mhi_dev_ctxt->core.bar0_base;
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->bhi_ctxt.bhi_base, BHIOFF);
/* confirm it's a valid reading */
if (unlikely(pcie_word_val == U32_MAX)) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Invalid BHI Offset:0x%x\n", pcie_word_val);
return -EIO;
}
mhi_dev_ctxt->bhi_ctxt.bhi_base += pcie_word_val;
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->bhi_ctxt.bhi_base,
BHI_EXECENV);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -169,10 +169,9 @@ static int process_bhie_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
return 0;
}
static int process_m0_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
int process_m0_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
{
unsigned long flags;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered With State %s\n",
@ -189,10 +188,10 @@ static int process_m0_transition(
break;
}
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
mhi_dev_ctxt->mhi_state = MHI_STATE_M0;
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M0;
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
write_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, true);
@ -266,11 +265,9 @@ void process_m1_transition(struct work_struct *work)
mutex_unlock(&mhi_dev_ctxt->pm_lock);
}
static int process_m3_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
int process_m3_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
{
unsigned long flags;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered with State %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
@ -286,34 +283,14 @@ static int process_m3_transition(
break;
}
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3;
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
write_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
wake_up(mhi_dev_ctxt->mhi_ev_wq.m3_event);
return 0;
}
static int process_link_down_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered with State %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
return -EIO;
}
static int process_wake_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered with State %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
return -EIO;
}
static int process_bhi_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
@ -364,7 +341,6 @@ static int process_ready_transition(
}
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->flags.stop_threads = 0;
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL,
MHICTRL_MHISTATE_MASK,
@ -439,16 +415,6 @@ static int process_reset_transition(
return r;
}
static int process_syserr_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered with State %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
return -EIO;
}
static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_EXEC_ENV exec_env)
{
@ -510,9 +476,6 @@ static int process_amss_transition(
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
return r;
}
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
ring_all_chan_dbs(mhi_dev_ctxt, true);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Notifying clients that MHI is enabled\n");
enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
@ -521,9 +484,6 @@ static int process_amss_transition(
"MHI is initialized\n");
}
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
ring_all_ev_dbs(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
complete(&mhi_dev_ctxt->cmd_complete);
/*
@ -574,23 +534,6 @@ static int process_stt_work_item(
case STATE_TRANSITION_AMSS:
r = process_amss_transition(mhi_dev_ctxt, cur_work_item);
break;
case STATE_TRANSITION_M0:
r = process_m0_transition(mhi_dev_ctxt, cur_work_item);
break;
case STATE_TRANSITION_M3:
r = process_m3_transition(mhi_dev_ctxt, cur_work_item);
break;
case STATE_TRANSITION_SYS_ERR:
r = process_syserr_transition(mhi_dev_ctxt,
cur_work_item);
break;
case STATE_TRANSITION_LINK_DOWN:
r = process_link_down_transition(mhi_dev_ctxt,
cur_work_item);
break;
case STATE_TRANSITION_WAKE:
r = process_wake_transition(mhi_dev_ctxt, cur_work_item);
break;
case STATE_TRANSITION_BHIE:
r = process_bhie_transition(mhi_dev_ctxt, cur_work_item);
break;
@ -603,42 +546,26 @@ static int process_stt_work_item(
return r;
}
int mhi_state_change_thread(void *ctxt)
void mhi_state_change_worker(struct work_struct *work)
{
int r = 0;
unsigned long flags = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = (struct mhi_device_ctxt *)ctxt;
struct mhi_device_ctxt *mhi_dev_ctxt = container_of(work,
struct mhi_device_ctxt,
st_thread_worker);
enum STATE_TRANSITION cur_work_item;
struct mhi_state_work_queue *work_q =
&mhi_dev_ctxt->state_change_work_item_list;
struct mhi_ring *state_change_q = &work_q->q_info;
for (;;) {
r = wait_event_interruptible(
*mhi_dev_ctxt->mhi_ev_wq.state_change_event,
((work_q->q_info.rp != work_q->q_info.wp) &&
!mhi_dev_ctxt->flags.st_thread_stopped));
if (r) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Caught signal %d, quitting\n", r);
return 0;
}
if (mhi_dev_ctxt->flags.kill_threads) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Caught exit signal, quitting\n");
return 0;
}
mhi_dev_ctxt->flags.st_thread_stopped = 0;
spin_lock_irqsave(work_q->q_lock, flags);
while (work_q->q_info.rp != work_q->q_info.wp) {
spin_lock_irq(work_q->q_lock);
cur_work_item = *(enum STATE_TRANSITION *)(state_change_q->rp);
r = ctxt_del_element(&work_q->q_info, NULL);
MHI_ASSERT(r == 0,
"Failed to delete element from STT workqueue\n");
spin_unlock_irqrestore(work_q->q_lock, flags);
spin_unlock_irq(work_q->q_lock);
r = process_stt_work_item(mhi_dev_ctxt, cur_work_item);
}
return 0;
}
/**
@ -673,6 +600,6 @@ int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
r = ctxt_add_element(stt_ring, (void **)&cur_work_item);
BUG_ON(r);
spin_unlock_irqrestore(work_q->q_lock, flags);
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
schedule_work(&mhi_dev_ctxt->st_thread_worker);
return r;
}