msm: ipa3: add support for TX of sk_buff's with paged data

This support is needed to enable GSO (Generic Segmentation
Offload) on msmcobalt.

Change-Id: Id9949bef91835318a7b344753983eea0aeab7bdc
Signed-off-by: Skylar Chang <chiaweic@codeaurora.org>
This commit is contained in:
Skylar Chang 2016-04-14 16:49:37 -07:00 committed by Kyle Yan
parent c2698360a7
commit 52d3e4b266
3 changed files with 136 additions and 32 deletions

View file

@ -921,6 +921,7 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"sw_tx=%u\n"
"hw_tx=%u\n"
"tx_non_linear=%u\n"
"tx_compl=%u\n"
"wan_rx=%u\n"
"stat_compl=%u\n"
@ -936,6 +937,7 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
"flow_disable=%u\n",
ipa3_ctx->stats.tx_sw_pkts,
ipa3_ctx->stats.tx_hw_pkts,
ipa3_ctx->stats.tx_non_linear,
ipa3_ctx->stats.tx_pkts_compl,
ipa3_ctx->stats.rx_pkts,
ipa3_ctx->stats.stat_compl,

View file

@ -113,11 +113,19 @@ static void ipa3_wq_write_done_common(struct ipa3_sys_context *sys,
list_del(&tx_pkt->link);
sys->len--;
spin_unlock_bh(&sys->spinlock);
if (!tx_pkt->no_unmap_dma)
if (!tx_pkt->no_unmap_dma) {
if (tx_pkt->type != IPA_DATA_DESC_SKB_PAGED) {
dma_unmap_single(ipa3_ctx->pdev,
tx_pkt->mem.phys_base,
tx_pkt->mem.size,
DMA_TO_DEVICE);
} else {
dma_unmap_page(ipa3_ctx->pdev,
next_pkt->mem.phys_base,
next_pkt->mem.size,
DMA_TO_DEVICE);
}
}
if (tx_pkt->callback)
tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
@ -547,6 +555,8 @@ int ipa3_send(struct ipa3_sys_context *sys,
}
tx_pkt->type = desc[i].type;
if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
tx_pkt->mem.base = desc[i].pyld;
tx_pkt->mem.size = desc[i].len;
@ -562,10 +572,31 @@ int ipa3_send(struct ipa3_sys_context *sys,
goto failure;
}
} else {
tx_pkt->mem.phys_base = desc[i].dma_address;
tx_pkt->mem.phys_base =
desc[i].dma_address;
tx_pkt->no_unmap_dma = true;
}
} else {
tx_pkt->mem.base = desc[i].frag;
tx_pkt->mem.size = desc[i].len;
if (!desc[i].dma_address_valid) {
tx_pkt->mem.phys_base =
skb_frag_dma_map(ipa3_ctx->pdev,
desc[i].frag,
0, tx_pkt->mem.size,
DMA_TO_DEVICE);
if (!tx_pkt->mem.phys_base) {
IPAERR("dma map failed\n");
fail_dma_wrap = 1;
goto failure;
}
} else {
tx_pkt->mem.phys_base =
desc[i].dma_address;
tx_pkt->no_unmap_dma = true;
}
}
tx_pkt->sys = sys;
tx_pkt->callback = desc[i].callback;
tx_pkt->user1 = desc[i].user1;
@ -664,9 +695,15 @@ failure:
for (j = 0; j < i; j++) {
next_pkt = list_next_entry(tx_pkt, link);
list_del(&tx_pkt->link);
if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
dma_unmap_single(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
tx_pkt->mem.size,
DMA_TO_DEVICE);
} else {
dma_unmap_page(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
tx_pkt->mem.size,
DMA_TO_DEVICE);
}
kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
tx_pkt = next_pkt;
}
@ -675,9 +712,9 @@ failure:
if (fail_dma_wrap)
kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
kfree(gsi_xfer_elem_array);
else {
} else {
if (transfer.iovec_phys) {
if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
dma_pool_free(ipa3_ctx->dma_pool,
@ -1534,20 +1571,42 @@ static void ipa3_tx_cmd_comp(void *user1, int user2)
int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
struct ipa_tx_meta *meta)
{
struct ipa3_desc desc[3];
struct ipa3_desc *desc;
struct ipa3_desc _desc[3];
int dst_ep_idx;
struct ipahal_imm_cmd_ip_packet_init cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
struct ipa3_sys_context *sys;
int src_ep_idx;
int num_frags, f;
memset(desc, 0, 3 * sizeof(struct ipa3_desc));
if (unlikely(!ipa3_ctx)) {
IPAERR("IPA3 driver was not initialized\n");
return -EINVAL;
}
if (skb->len == 0) {
IPAERR("packet size is 0\n");
return -EINVAL;
}
num_frags = skb_shinfo(skb)->nr_frags;
if (num_frags) {
/* 1 desc for tag to resolve status out-of-order issue;
* 1 desc is needed for the linear portion of skb;
* 1 desc may be needed for the PACKET_INIT;
* 1 desc for each frag
*/
desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC);
if (!desc) {
IPAERR("failed to alloc desc array\n");
goto fail_mem;
}
} else {
memset(_desc, 0, 3 * sizeof(struct ipa3_desc));
desc = &_desc[0];
}
/*
* USB_CONS: PKT_INIT ep_idx = dst pipe
* Q6_CONS: PKT_INIT ep_idx = sender pipe
@ -1562,14 +1621,14 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
if (-1 == src_ep_idx) {
IPAERR("Client %u is not mapped\n",
IPA_CLIENT_APPS_LAN_WAN_PROD);
return -EFAULT;
goto fail_gen;
}
dst_ep_idx = ipa3_get_ep_mapping(dst);
} else {
src_ep_idx = ipa3_get_ep_mapping(dst);
if (-1 == src_ep_idx) {
IPAERR("Client %u is not mapped\n", dst);
return -EFAULT;
goto fail_gen;
}
if (meta && meta->pkt_init_dst_ep_valid)
dst_ep_idx = meta->pkt_init_dst_ep;
@ -1607,7 +1666,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
desc[1].callback = ipa3_tx_cmd_comp;
desc[1].user1 = cmd_pyld;
desc[2].pyld = skb->data;
desc[2].len = skb->len;
desc[2].len = skb_headlen(skb);
desc[2].type = IPA_DATA_DESC_SKB;
desc[2].callback = ipa3_tx_comp_usr_notify_release;
desc[2].user1 = skb;
@ -1620,8 +1679,22 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
desc[2].dma_address = meta->dma_address;
}
if (ipa3_send(sys, 3, desc, true)) {
IPAERR("fail to send immediate command\n");
for (f = 0; f < num_frags; f++) {
desc[3+f].frag = &skb_shinfo(skb)->frags[f];
desc[3+f].type = IPA_DATA_DESC_SKB_PAGED;
desc[3+f].len = skb_frag_size(desc[3+f].frag);
}
/* don't free skb till frag mappings are released */
if (num_frags) {
desc[3+f-1].callback = desc[2].callback;
desc[3+f-1].user1 = desc[2].user1;
desc[3+f-1].user2 = desc[2].user2;
desc[2].callback = NULL;
}
if (ipa3_send(sys, num_frags + 3, desc, true)) {
IPAERR("fail to send skb %p num_frags %u SWP\n",
skb, num_frags);
goto fail_send;
}
IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_sw_pkts);
@ -1633,7 +1706,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
desc[0].type = IPA_IMM_CMD_DESC;
desc[0].callback = ipa3_tag_destroy_imm;
desc[1].pyld = skb->data;
desc[1].len = skb->len;
desc[1].len = skb_headlen(skb);
desc[1].type = IPA_DATA_DESC_SKB;
desc[1].callback = ipa3_tx_comp_usr_notify_release;
desc[1].user1 = skb;
@ -1643,19 +1716,44 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
desc[1].dma_address_valid = true;
desc[1].dma_address = meta->dma_address;
}
if (num_frags == 0) {
if (ipa3_send(sys, 2, desc, true)) {
IPAERR("fail to send skb\n");
IPAERR("fail to send skb %p HWP\n", skb);
goto fail_gen;
}
} else {
for (f = 0; f < num_frags; f++) {
desc[2+f].frag = &skb_shinfo(skb)->frags[f];
desc[2+f].type = IPA_DATA_DESC_SKB_PAGED;
desc[2+f].len = skb_frag_size(desc[2+f].frag);
}
/* don't free skb till frag mappings are released */
desc[2+f-1].callback = desc[1].callback;
desc[2+f-1].user1 = desc[1].user1;
desc[2+f-1].user2 = desc[1].user2;
desc[1].callback = NULL;
if (ipa3_send(sys, num_frags + 2, desc, true)) {
IPAERR("fail to send skb %p num_frags %u HWP\n",
skb, num_frags);
goto fail_gen;
}
}
IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_hw_pkts);
}
if (num_frags) {
kfree(desc);
IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_non_linear);
}
return 0;
fail_send:
ipahal_destroy_imm_cmd(cmd_pyld);
fail_gen:
if (num_frags)
kfree(desc);
fail_mem:
return -EFAULT;
}

View file

@ -703,7 +703,8 @@ struct ipa3_sys_context {
enum ipa3_desc_type {
IPA_DATA_DESC,
IPA_DATA_DESC_SKB,
IPA_IMM_CMD_DESC
IPA_DATA_DESC_SKB_PAGED,
IPA_IMM_CMD_DESC,
};
/**
@ -767,6 +768,7 @@ struct ipa3_dma_xfer_wrapper {
* struct ipa3_desc - IPA descriptor
* @type: skb or immediate command or plain old data
* @pyld: points to skb
* @frag: points to paged fragment
* or kmalloc'ed immediate command parameters/plain old data
* @dma_address: dma mapped address of pyld
* @dma_address_valid: valid field for dma_address
@ -780,6 +782,7 @@ struct ipa3_dma_xfer_wrapper {
struct ipa3_desc {
enum ipa3_desc_type type;
void *pyld;
skb_frag_t *frag;
dma_addr_t dma_address;
bool dma_address_valid;
u16 len;
@ -889,6 +892,7 @@ struct ipa3_stats {
u32 lan_repl_rx_empty;
u32 flow_enable;
u32 flow_disable;
u32 tx_non_linear;
};
struct ipa3_active_clients {