mhi: core: add support for bounce buffer transfer

Not all MHI capable devices can access entire host
DDR. If an MHI client provide a transfer packet that’s
outside of supported address range, copy the packet to
a local bounce buffer prior to transfer.

CRs-Fixed 1110280
Change-Id: Ie7292a2c89cc6608d5360f8a330d78635d913c29
Signed-off-by: Sujeev Dias <sdias@codeaurora.org>
This commit is contained in:
Sujeev Dias 2017-01-19 12:19:55 -08:00
parent 75edc331a7
commit 5ce3f182b8
4 changed files with 161 additions and 93 deletions

View file

@ -120,6 +120,13 @@ Main node properties:
Value type: <u32>
Definition: Segment size in bytes for each segment in bytes.
- qcom,mhi-bb-required
Usage: optional
Value type: bool
Definition: Determine whether MHI device require bounce buffer
during active transfer. If true, during channel open host
will pre-allocate transfer buffers.
========
Example:
========

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -348,6 +348,7 @@ struct mhi_ring {
u32 msi_disable_cntr;
u32 msi_enable_cntr;
spinlock_t ring_lock;
struct dma_pool *dma_pool;
};
enum MHI_CMD_STATUS {
@ -446,9 +447,12 @@ struct mhi_state_work_queue {
struct mhi_buf_info {
dma_addr_t bb_p_addr;
dma_addr_t pre_alloc_p_addr;
void *bb_v_addr;
void *pre_alloc_v_addr;
void *client_buf;
size_t buf_len;
size_t pre_alloc_len;
size_t filled_size;
enum dma_data_direction dir;
int bb_active;
@ -479,6 +483,7 @@ struct mhi_flags {
u32 kill_threads;
u32 ev_thread_stopped;
u32 st_thread_stopped;
bool bb_required;
};
struct mhi_wait_queues {

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -456,6 +456,10 @@ static int mhi_plat_probe(struct platform_device *pdev)
INIT_WORK(&bhi_ctxt->fw_load_work, bhi_firmware_download);
}
mhi_dev_ctxt->flags.bb_required =
of_property_read_bool(pdev->dev.of_node,
"qcom,mhi-bb-required");
mhi_dev_ctxt->plat_dev = pdev;
platform_set_drvdata(pdev, mhi_dev_ctxt);

View file

@ -32,9 +32,18 @@
static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
union mhi_cmd_pkt *cmd_pkt);
static void disable_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_ring *bb_ctxt);
static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el)
static int enable_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_ring *bb_ctxt,
int nr_el,
int chan,
size_t max_payload)
{
int i;
struct mhi_buf_info *mhi_buf_info;
bb_ctxt->el_size = sizeof(struct mhi_buf_info);
bb_ctxt->len = bb_ctxt->el_size * nr_el;
bb_ctxt->base = kzalloc(bb_ctxt->len, GFP_KERNEL);
@ -43,7 +52,46 @@ static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el)
bb_ctxt->ack_rp = bb_ctxt->base;
if (!bb_ctxt->base)
return -ENOMEM;
if (mhi_dev_ctxt->flags.bb_required) {
char pool_name[32];
snprintf(pool_name, sizeof(pool_name), "mhi%d_%d",
mhi_dev_ctxt->plat_dev->id, chan);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Creating pool %s for chan:%d payload: 0x%lx\n",
pool_name, chan, max_payload);
bb_ctxt->dma_pool = dma_pool_create(pool_name,
&mhi_dev_ctxt->plat_dev->dev, max_payload, 0, 0);
if (unlikely(!bb_ctxt->dma_pool))
goto dma_pool_error;
mhi_buf_info = (struct mhi_buf_info *)bb_ctxt->base;
for (i = 0; i < nr_el; i++, mhi_buf_info++) {
mhi_buf_info->pre_alloc_v_addr =
dma_pool_alloc(bb_ctxt->dma_pool, GFP_KERNEL,
&mhi_buf_info->pre_alloc_p_addr);
if (unlikely(!mhi_buf_info->pre_alloc_v_addr))
goto dma_alloc_error;
mhi_buf_info->pre_alloc_len = max_payload;
}
}
return 0;
dma_alloc_error:
for (--i, --mhi_buf_info; i >= 0; i--, mhi_buf_info--)
dma_pool_free(bb_ctxt->dma_pool, mhi_buf_info->pre_alloc_v_addr,
mhi_buf_info->pre_alloc_p_addr);
dma_pool_destroy(bb_ctxt->dma_pool);
bb_ctxt->dma_pool = NULL;
dma_pool_error:
kfree(bb_ctxt->base);
bb_ctxt->base = NULL;
return -ENOMEM;
}
static void mhi_write_db(struct mhi_device_ctxt *mhi_dev_ctxt,
@ -207,11 +255,9 @@ int mhi_release_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
return 0;
}
void free_tre_ring(struct mhi_client_config *client_config)
void free_tre_ring(struct mhi_device_ctxt *mhi_dev_ctxt, int chan)
{
struct mhi_chan_ctxt *chan_ctxt;
struct mhi_device_ctxt *mhi_dev_ctxt = client_config->mhi_dev_ctxt;
int chan = client_config->chan_info.chan_nr;
int r;
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
@ -276,11 +322,6 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
return -EINVAL;
mhi_dev_ctxt = client_config->mhi_dev_ctxt;
ret_val = get_chan_props(mhi_dev_ctxt,
client_config->chan_info.chan_nr,
&client_config->chan_info);
if (ret_val)
return ret_val;
chan = client_config->chan_info.chan_nr;
cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
@ -302,21 +343,11 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to initialize tre ring chan %d ret %d\n",
chan, ret_val);
mutex_unlock(&cfg->chan_lock);
return ret_val;
goto error_tre_ring;
}
client_config->event_ring_index =
mhi_dev_ctxt->dev_space.ring_ctxt.
cc_list[chan].mhi_event_ring_index;
ret_val = enable_bb_ctxt(&mhi_dev_ctxt->chan_bb_list[chan],
client_config->chan_info.max_desc);
if (ret_val) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to initialize bb ctxt chan %d ret %d\n",
chan, ret_val);
mutex_unlock(&cfg->chan_lock);
return ret_val;
}
client_config->msi_vec =
mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[
@ -331,18 +362,13 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"MHI State is disabled\n");
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mutex_unlock(&cfg->chan_lock);
return -EIO;
ret_val = -EIO;
goto error_pm_state;
}
WARN_ON(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE);
mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->runtime_get(mhi_dev_ctxt);
spin_lock_irq(&chan_ring->ring_lock);
chan_ring->ch_state = MHI_CHAN_STATE_ENABLED;
spin_unlock_irq(&chan_ring->ring_lock);
ret_val = mhi_send_cmd(client_config->mhi_dev_ctxt,
MHI_COMMAND_START_CHAN,
chan);
@ -377,16 +403,30 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
goto error_completion;
}
spin_lock_irq(&chan_ring->ring_lock);
chan_ring->ch_state = MHI_CHAN_STATE_ENABLED;
spin_unlock_irq(&chan_ring->ring_lock);
client_config->chan_status = 1;
error_completion:
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
mutex_unlock(&cfg->chan_lock);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"chan:%d opened successfully\n", chan);
return 0;
error_completion:
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
error_pm_state:
free_tre_ring(mhi_dev_ctxt, chan);
error_tre_ring:
mutex_unlock(&cfg->chan_lock);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Exited chan 0x%x ret:%d\n", chan, ret_val);
return ret_val;
@ -431,6 +471,7 @@ int mhi_register_channel(struct mhi_client_handle **client_handle,
struct mhi_client_config *client_config;
const char *node_name;
enum MHI_CLIENT_CHANNEL chan;
int ret;
if (!client_info || client_info->dev->of_node == NULL)
return -EINVAL;
@ -489,6 +530,17 @@ int mhi_register_channel(struct mhi_client_handle **client_handle,
if (MHI_CLIENT_IP_HW_0_IN == chan)
client_config->intmod_t = 10;
get_chan_props(mhi_dev_ctxt, chan, &client_config->chan_info);
ret = enable_bb_ctxt(mhi_dev_ctxt, &mhi_dev_ctxt->chan_bb_list[chan],
client_config->chan_info.max_desc, chan,
client_config->client_info.max_payload);
if (ret) {
kfree(mhi_dev_ctxt->client_handle_list[chan]->client_config);
kfree(mhi_dev_ctxt->client_handle_list[chan]);
mhi_dev_ctxt->client_handle_list[chan] = NULL;
return -ENOMEM;
}
if (mhi_dev_ctxt->dev_exec_env == MHI_EXEC_ENV_AMSS &&
mhi_dev_ctxt->flags.mhi_initialized) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
@ -531,6 +583,14 @@ void mhi_close_channel(struct mhi_client_handle *client_handle)
/* No more processing events for this channel */
spin_lock_irq(&chan_ring->ring_lock);
if (chan_ring->ch_state != MHI_CHAN_STATE_ENABLED) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Chan %d is not enabled, cur state:0x%x\n",
chan, chan_ring->ch_state);
spin_unlock_irq(&chan_ring->ring_lock);
mutex_unlock(&cfg->chan_lock);
return;
}
chan_ring->ch_state = MHI_CHAN_STATE_DISABLED;
spin_unlock_irq(&chan_ring->ring_lock);
init_completion(&cfg->cmd_complete);
@ -565,22 +625,30 @@ void mhi_close_channel(struct mhi_client_handle *client_handle)
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Error to receive event completion ev_cod:0x%x\n",
ev_code);
goto error_completion;
}
error_completion:
ret_val = reset_chan_cmd(mhi_dev_ctxt, &cmd_pkt);
if (ret_val)
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Error resetting cmd ret:%d\n", ret_val);
error_completion:
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"resetting bb_ring for chan 0x%x\n", chan);
mhi_dev_ctxt->chan_bb_list[chan].rp =
mhi_dev_ctxt->chan_bb_list[chan].base;
mhi_dev_ctxt->chan_bb_list[chan].wp =
mhi_dev_ctxt->chan_bb_list[chan].base;
mhi_dev_ctxt->chan_bb_list[chan].ack_rp =
mhi_dev_ctxt->chan_bb_list[chan].base;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Freeing ring for chan 0x%x\n", chan);
free_tre_ring(client_config);
free_tre_ring(mhi_dev_ctxt, chan);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Chan 0x%x confirmed closed.\n", chan);
client_config->chan_status = 0;
@ -639,6 +707,7 @@ static inline int mhi_queue_tre(struct mhi_device_ctxt
}
return 0;
}
static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
int chan, void *buf, size_t buf_len,
enum dma_data_direction dir, struct mhi_buf_info **bb)
@ -674,6 +743,7 @@ static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
bb_info->client_buf,
bb_info->buf_len,
bb_info->dir);
bb_info->bb_active = 0;
if (!VALID_BUF(bb_info->bb_p_addr, bb_info->buf_len, mhi_dev_ctxt)) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Buffer outside DMA range 0x%lx, size 0x%zx\n",
@ -682,30 +752,48 @@ static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
bb_info->bb_p_addr,
bb_info->buf_len,
bb_info->dir);
mhi_log(mhi_dev_ctxt, MHI_MSG_RAW,
"Allocating BB, chan %d\n", chan);
bb_info->bb_v_addr = dma_alloc_coherent(
&mhi_dev_ctxt->plat_dev->dev,
bb_info->buf_len,
&bb_info->bb_p_addr,
GFP_ATOMIC);
if (!bb_info->bb_v_addr)
return -ENOMEM;
mhi_dev_ctxt->counters.bb_used[chan]++;
if (dir == DMA_TO_DEVICE) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Copying client buf into BB.\n");
memcpy(bb_info->bb_v_addr, buf, bb_info->buf_len);
/* Flush out data to bounce buffer */
wmb();
}
bb_info->bb_active = 1;
if (likely((mhi_dev_ctxt->flags.bb_required &&
bb_info->pre_alloc_len >= bb_info->buf_len))) {
bb_info->bb_p_addr = bb_info->pre_alloc_p_addr;
bb_info->bb_v_addr = bb_info->pre_alloc_v_addr;
mhi_dev_ctxt->counters.bb_used[chan]++;
if (dir == DMA_TO_DEVICE) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Copying client buf into BB.\n");
memcpy(bb_info->bb_v_addr, buf,
bb_info->buf_len);
}
bb_info->bb_active = 1;
} else
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"No BB allocated\n");
}
*bb = bb_info;
mhi_log(mhi_dev_ctxt, MHI_MSG_RAW, "Exited chan %d\n", chan);
return 0;
}
static void disable_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_ring *bb_ctxt)
{
if (mhi_dev_ctxt->flags.bb_required) {
struct mhi_buf_info *bb =
(struct mhi_buf_info *)bb_ctxt->base;
int nr_el = bb_ctxt->len / bb_ctxt->el_size;
int i = 0;
for (i = 0; i < nr_el; i++, bb++)
dma_pool_free(bb_ctxt->dma_pool, bb->pre_alloc_v_addr,
bb->pre_alloc_p_addr);
dma_pool_destroy(bb_ctxt->dma_pool);
bb_ctxt->dma_pool = NULL;
}
kfree(bb_ctxt->base);
bb_ctxt->base = NULL;
}
static void free_bounce_buffer(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_buf_info *bb)
{
@ -714,44 +802,11 @@ static void free_bounce_buffer(struct mhi_device_ctxt *mhi_dev_ctxt,
/* This buffer was maped directly to device */
dma_unmap_single(&mhi_dev_ctxt->plat_dev->dev,
bb->bb_p_addr, bb->buf_len, bb->dir);
else
/* This buffer was bounced */
dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
bb->buf_len,
bb->bb_v_addr,
bb->bb_p_addr);
bb->bb_active = 0;
mhi_log(mhi_dev_ctxt, MHI_MSG_RAW, "Exited\n");
}
void reset_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_ring *bb_ctxt)
{
int r = 0;
struct mhi_buf_info *bb = NULL;
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Entered\n");
/*
Assumption: No events are expected during or after
this operation is occurring for this channel.
If a bounce buffer was allocated, the coherent memory is
expected to be already freed.
If the user's bounce buffer was mapped, it is expected to be
already unmapped.
Failure of any of the above conditions will result in
a memory leak or subtle memory corruption.
*/
while (!r) {
r = ctxt_del_element(bb_ctxt, (void **)&bb);
if (bb)
free_bounce_buffer(mhi_dev_ctxt, bb);
}
bb_ctxt->ack_rp = bb_ctxt->base;
bb_ctxt->rp = bb_ctxt->base;
bb_ctxt->wp = bb_ctxt->base;
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Exited\n");
}
static int mhi_queue_dma_xfer(
struct mhi_client_config *client_config,
dma_addr_t buf, size_t buf_len, enum MHI_FLAGS mhi_flags)
@ -1342,7 +1397,6 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_chan_ctxt *chan_ctxt;
struct mhi_event_ctxt *ev_ctxt = NULL;
int pending_el = 0, i;
struct mhi_ring *bb_ctxt;
unsigned long flags;
union mhi_event_pkt *local_rp = NULL;
union mhi_event_pkt *device_rp = NULL;
@ -1355,8 +1409,6 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
return -EINVAL;
}
bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
ev_ring = &mhi_dev_ctxt->
@ -1426,9 +1478,6 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr;
chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Cleaning up BB list\n");
reset_bb_ctxt(mhi_dev_ctxt, bb_ctxt);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Reset complete.\n");
return ret_val;
}
@ -1672,14 +1721,17 @@ int mhi_deregister_channel(struct mhi_client_handle *client_handle)
int ret_val = 0;
int chan;
struct mhi_client_config *client_config;
struct mhi_device_ctxt *mhi_dev_ctxt;
if (!client_handle)
return -EINVAL;
client_config = client_handle->client_config;
mhi_dev_ctxt = client_config->mhi_dev_ctxt;
chan = client_config->chan_info.chan_nr;
client_config->magic = 0;
client_config->mhi_dev_ctxt->client_handle_list[chan] = NULL;
mhi_dev_ctxt->client_handle_list[chan] = NULL;
disable_bb_ctxt(mhi_dev_ctxt, &mhi_dev_ctxt->chan_bb_list[chan]);
kfree(client_config);
kfree(client_handle);
return ret_val;