mhi: core: Enable correct channel alignment

Enable correct allignment of MHI channels to the
ring size rounded up to the next power of two as
per device requirement.

Change-Id: Ic77c43fdbcde2ec3b6f08e3062eaf778eb13ec5b
Signed-off-by: Andrei Danaila <adanaila@codeaurora.org>
Signed-off-by: Tony Truong <truong@codeaurora.org>
This commit is contained in:
Tony Truong 2015-08-20 18:00:30 -07:00 committed by David Keitel
parent 6ad91fea76
commit d3d1144ee5
3 changed files with 100 additions and 81 deletions

View file

@ -71,9 +71,8 @@ ev_mutex_free:
size_t calculate_mhi_space(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int i, r;
int i = 0;
size_t mhi_dev_mem = 0;
struct mhi_chan_info chan_info;
/* Calculate size needed for contexts */
mhi_dev_mem += (MHI_MAX_CHANNELS * sizeof(struct mhi_chan_ctxt)) +
@ -90,23 +89,6 @@ size_t calculate_mhi_space(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_dev_mem += (sizeof(union mhi_event_pkt) *
mhi_dev_ctxt->ev_ring_props[i].nr_desc);
/* Calculate size needed for xfer TREs and bounce buffers */
for (i = 0; i < MHI_MAX_CHANNELS; ++i)
if (VALID_CHAN_NR(i)) {
r = get_chan_props(mhi_dev_ctxt, i, &chan_info);
if (r)
continue;
/* Add size of TREs */
mhi_dev_mem += (sizeof(union mhi_xfer_pkt) *
chan_info.max_desc);
/* Add bounce buffer size */
if (mhi_dev_ctxt->flags.bb_enabled) {
mhi_log(MHI_MSG_INFO,
"Enabling BB list, chan %d\n", i);
/*mhi_dev_mem += (MAX_BOUNCE_BUF_SIZE *
chan_info.max_desc); */
}
}
mhi_log(MHI_MSG_INFO, "Final bytes for MHI device space %zd\n",
mhi_dev_mem);
return mhi_dev_mem;
@ -201,20 +183,6 @@ static int mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt,
return 0;
}
static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el)
{
bb_ctxt->el_size = sizeof(struct mhi_buf_info);
bb_ctxt->len = bb_ctxt->el_size * nr_el;
bb_ctxt->base = kzalloc(bb_ctxt->len, GFP_KERNEL);
bb_ctxt->wp = bb_ctxt->base;
bb_ctxt->rp = bb_ctxt->base;
bb_ctxt->ack_rp = bb_ctxt->base;
if (!bb_ctxt->base)
return -ENOMEM;
return 0;
}
/*
* The device can have severe addressing limitations, and in this case
* the MHI driver may be restricted on where memory can be allocated.
@ -311,7 +279,7 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
calculate_mhi_space(mhi_dev_ctxt);
mhi_dev_ctxt->dev_space.dev_mem_start =
dma_alloc_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
dma_alloc_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
mhi_dev_ctxt->dev_space.dev_mem_len,
&mhi_dev_ctxt->dev_space.dma_dev_mem_start,
GFP_KERNEL);
@ -392,49 +360,9 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
(u64)dma_dev_mem_start + mhi_mem_index);
mhi_mem_index += ring_len;
}
/* Initialize both the local and device xfer contexts */
for (i = 0; i < MHI_MAX_CHANNELS; ++i)
if (VALID_CHAN_NR(i)) {
struct mhi_chan_info chan_info;
r = get_chan_props(mhi_dev_ctxt, i, &chan_info);
if (r)
continue;
mhi_log(MHI_MSG_INFO, "Initializing chan ctxt %d\n", i);
ring_len = (sizeof(union mhi_xfer_pkt) *
chan_info.max_desc);
init_dev_chan_ctxt(
&mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i],
dma_dev_mem_start + mhi_mem_index,
ring_len, chan_info.ev_ring);
/* TODO: May not need to do this. It would be best for
* the client to set it during chan open */
mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i].
mhi_chan_type = (i % 2) + 1;
init_local_chan_ctxt(
&mhi_dev_ctxt->mhi_local_chan_ctxt[i],
dev_mem_start + mhi_mem_index,
ring_len);
/* TODO: May not need to do this. It would be best for
* the client to set it during chan open */
mhi_dev_ctxt->mhi_local_chan_ctxt[i].dir = (i % 2) + 1;
/* Add size of TREs */
mhi_mem_index += ring_len;
if (mhi_dev_ctxt->flags.bb_enabled) {
r = enable_bb_ctxt(
&mhi_dev_ctxt->chan_bb_list[i],
chan_info.max_desc);
if (r)
goto error_during_bb_list;
}
}
return 0;
error_during_bb_list:
for (; i >= 0; --i)
kfree(mhi_dev_ctxt->chan_bb_list[i].base);
dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
mhi_dev_ctxt->dev_space.dev_mem_len,
mhi_dev_ctxt->dev_space.dev_mem_start,
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
@ -629,7 +557,7 @@ error_during_thread_init:
kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
error_wq_init:
dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
mhi_dev_ctxt->dev_space.dev_mem_len,
mhi_dev_ctxt->dev_space.dev_mem_start,
mhi_dev_ctxt->dev_space.dma_dev_mem_start);

View file

@ -29,6 +29,19 @@
#include "mhi_macros.h"
#include "mhi_trace.h"
static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el)
{
bb_ctxt->el_size = sizeof(struct mhi_buf_info);
bb_ctxt->len = bb_ctxt->el_size * nr_el;
bb_ctxt->base = kzalloc(bb_ctxt->len, GFP_KERNEL);
bb_ctxt->wp = bb_ctxt->base;
bb_ctxt->rp = bb_ctxt->base;
bb_ctxt->ack_rp = bb_ctxt->base;
if (!bb_ctxt->base)
return -ENOMEM;
return 0;
}
static void mhi_write_db(struct mhi_device_ctxt *mhi_dev_ctxt,
void __iomem *io_addr_lower,
uintptr_t chan, u64 val)
@ -188,6 +201,69 @@ int get_chan_props(struct mhi_device_ctxt *mhi_dev_ctxt, int chan,
return r;
}
int mhi_release_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_chan_ctxt *cc_list,
struct mhi_ring *ring)
{
if (cc_list == NULL || ring == NULL)
return -EINVAL;
dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
ring->len, ring->base,
cc_list->mhi_trb_ring_base_addr);
mhi_init_chan_ctxt(cc_list, 0, 0, 0, 0, 0, ring,
MHI_CHAN_STATE_DISABLED);
return 0;
}
void free_tre_ring(struct mhi_client_handle *client_handle)
{
struct mhi_chan_ctxt *chan_ctxt;
struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
int chan = client_handle->chan_info.chan_nr;
int r;
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
r = mhi_release_chan_ctxt(mhi_dev_ctxt, chan_ctxt,
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan]);
if (r)
mhi_log(MHI_MSG_ERROR,
"Failed to release chan %d ret %d\n", chan, r);
}
static int populate_tre_ring(struct mhi_client_handle *client_handle)
{
dma_addr_t ring_dma_addr;
void *ring_local_addr;
struct mhi_chan_ctxt *chan_ctxt;
struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
u32 chan = client_handle->chan_info.chan_nr;
u32 nr_desc = client_handle->chan_info.max_desc;
mhi_log(MHI_MSG_INFO,
"Entered chan %d requested desc %d\n", chan, nr_desc);
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
ring_local_addr = dma_alloc_coherent(
&mhi_dev_ctxt->dev_info->pcie_device->dev,
nr_desc * sizeof(union mhi_xfer_pkt),
&ring_dma_addr, GFP_KERNEL);
if (ring_local_addr == NULL)
return -ENOMEM;
mhi_init_chan_ctxt(chan_ctxt, ring_dma_addr,
(uintptr_t)ring_local_addr,
nr_desc,
GET_CHAN_PROPS(CHAN_DIR,
client_handle->chan_info.flags),
client_handle->chan_info.ev_ring,
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
MHI_CHAN_STATE_ENABLED);
mhi_log(MHI_MSG_INFO, "Exited\n");
return 0;
}
enum MHI_STATUS mhi_open_channel(struct mhi_client_handle *client_handle)
{
enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
@ -217,10 +293,24 @@ enum MHI_STATUS mhi_open_channel(struct mhi_client_handle *client_handle)
chan, mhi_dev_ctxt->dev_exec_env);
return MHI_STATUS_DEVICE_NOT_READY;
}
client_handle->event_ring_index =
mhi_dev_ctxt->dev_space.ring_ctxt.
cc_list[chan].mhi_event_ring_index;
r = enable_bb_ctxt(&mhi_dev_ctxt->chan_bb_list[chan],
client_handle->chan_info.max_desc);
if (r) {
mhi_log(MHI_MSG_ERROR,
"Failed to initialize bb ctxt chan %d ret %d\n",
chan, r);
return r;
}
r = populate_tre_ring(client_handle);
if (r) {
mhi_log(MHI_MSG_ERROR,
"Failed to initialize tre ring chan %d ret %d\n",
chan, r);
return r;
}
client_handle->msi_vec =
mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[
@ -480,7 +570,7 @@ static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
bb_info->dir);
mhi_log(MHI_MSG_RAW, "Allocating BB, chan %d\n", chan);
bb_info->bb_v_addr = dma_alloc_coherent(
&mhi_dev_ctxt->dev_info->plat_dev->dev,
&mhi_dev_ctxt->dev_info->pcie_device->dev,
bb_info->buf_len,
&bb_info->bb_p_addr,
GFP_ATOMIC);
@ -510,7 +600,7 @@ static void free_bounce_buffer(struct mhi_device_ctxt *mhi_dev_ctxt,
bb->bb_p_addr, bb->buf_len, bb->dir);
else
/* This buffer was bounced */
dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
bb->buf_len,
bb->bb_v_addr,
bb->bb_p_addr);
@ -1013,7 +1103,7 @@ enum MHI_STATUS parse_xfer_event(struct mhi_device_ctxt *ctxt,
/* Get the TRB this event points to */
local_ev_trb_loc = (void *)mhi_p2v_addr(mhi_dev_ctxt,
MHI_RING_TYPE_EVENT_RING, event_id,
MHI_RING_TYPE_XFER_RING, chan,
phy_ev_trb_loc);
local_trb_loc = (union mhi_xfer_pkt *)local_chan_ctxt->rp;

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -619,6 +619,7 @@ static enum MHI_STATUS process_amss_transition(
}
ring_all_chan_dbs(mhi_dev_ctxt);
}
ring_all_ev_dbs(mhi_dev_ctxt);
atomic_dec(&mhi_dev_ctxt->flags.data_pending);
if (!mhi_dev_ctxt->flags.pending_M3 &&
mhi_dev_ctxt->flags.link_up)