msm: ipa: Fix to use GFP_DMA flag

IPAv2 hardware works with 32 bit addressing,
so allocate a kernel memory using GFP_DMA flag
which is processed by IPA hardware. Added code
changes to free correct tx_pkt pointer in fail_dma_map condition.

Change-Id: I5f7005c3bf89275fd56af648ee5bf1b3d06afc38
Acked-by: Ashok Vuyyuru <avuyyuru@qti.qualcomm.com>
Acked-by: Mohammed Javid <mjavid@qti.qualcomm.com>
Signed-off-by: Utkarsh Saxena <usaxena@codeaurora.org>
This commit is contained in:
Utkarsh Saxena 2017-05-16 22:41:50 +05:30
parent 5404e35069
commit fd92093b0a
7 changed files with 49 additions and 44 deletions

View file

@ -1836,6 +1836,7 @@ static int ipa_q6_clean_q6_tables(void)
struct ipa_mem_buffer mem = { 0 };
u32 *entry;
u32 max_cmds = ipa_get_max_flt_rt_cmds(ipa_ctx->ipa_num_pipes);
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
mem.base = dma_alloc_coherent(ipa_ctx->pdev, 4, &mem.phys_base,
GFP_ATOMIC);
@ -1856,7 +1857,7 @@ static int ipa_q6_clean_q6_tables(void)
}
cmd = kcalloc(max_cmds, sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
GFP_KERNEL);
flag);
if (!cmd) {
IPAERR("failed to allocate memory\n");
retval = -ENOMEM;

View file

@ -420,15 +420,17 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
int i = 0;
int j;
int result;
int fail_dma_wrap = 0;
uint size = num_desc * sizeof(struct sps_iovec);
u32 mem_flag = GFP_ATOMIC;
gfp_t mem_flag = GFP_ATOMIC;
struct sps_iovec iov;
int ret;
gfp_t flag;
if (unlikely(!in_atomic))
mem_flag = GFP_KERNEL;
flag = mem_flag | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag,
&dma_addr);
@ -437,7 +439,7 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
return -EFAULT;
}
} else {
transfer.iovec = kmalloc(size, mem_flag);
transfer.iovec = kmalloc(size, flag);
if (!transfer.iovec) {
IPAERR("fail to alloc mem for sps xfr buff ");
IPAERR("num_desc = %d size = %d\n", num_desc, size);
@ -457,7 +459,6 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
spin_lock_bh(&sys->spinlock);
for (i = 0; i < num_desc; i++) {
fail_dma_wrap = 0;
tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
mem_flag);
if (!tx_pkt) {
@ -493,15 +494,6 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
tx_pkt->mem.base,
tx_pkt->mem.size,
DMA_TO_DEVICE);
if (dma_mapping_error(ipa_ctx->pdev,
tx_pkt->mem.phys_base)) {
IPAERR("dma_map_single ");
IPAERR("failed\n");
fail_dma_wrap = 1;
goto failure;
}
} else {
tx_pkt->mem.phys_base = desc[i].dma_address;
tx_pkt->no_unmap_dma = true;
@ -522,10 +514,9 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
}
}
if (!tx_pkt->mem.phys_base) {
IPAERR("failed to alloc tx wrapper\n");
fail_dma_wrap = 1;
goto failure;
if (dma_mapping_error(ipa_ctx->pdev, tx_pkt->mem.phys_base)) {
IPAERR("dma_map_single failed\n");
goto failure_dma_map;
}
tx_pkt->sys = sys;
@ -580,27 +571,30 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
spin_unlock_bh(&sys->spinlock);
return 0;
failure_dma_map:
kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
failure:
tx_pkt = transfer.user;
for (j = 0; j < i; j++) {
next_pkt = list_next_entry(tx_pkt, link);
list_del(&tx_pkt->link);
if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
dma_unmap_single(ipa_ctx->pdev, tx_pkt->mem.phys_base,
tx_pkt->mem.size,
DMA_TO_DEVICE);
} else {
dma_unmap_page(ipa_ctx->pdev, tx_pkt->mem.phys_base,
tx_pkt->mem.size,
DMA_TO_DEVICE);
if (!tx_pkt->no_unmap_dma) {
if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
dma_unmap_single(ipa_ctx->pdev,
tx_pkt->mem.phys_base,
tx_pkt->mem.size,
DMA_TO_DEVICE);
} else {
dma_unmap_page(ipa_ctx->pdev,
tx_pkt->mem.phys_base,
tx_pkt->mem.size,
DMA_TO_DEVICE);
}
}
kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
tx_pkt = next_pkt;
}
if (j < num_desc)
/* last desc failed */
if (fail_dma_wrap)
kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
if (transfer.iovec_phys) {
if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
dma_pool_free(ipa_ctx->dma_pool, transfer.iovec,
@ -1659,6 +1653,7 @@ int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
struct ipa_sys_context *sys;
int src_ep_idx;
int num_frags, f;
gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
if (unlikely(!ipa_ctx)) {
IPAERR("IPA driver was not initialized\n");
@ -1724,7 +1719,7 @@ int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
if (dst_ep_idx != -1) {
/* SW data path */
cmd = kzalloc(sizeof(struct ipa_ip_packet_init), GFP_ATOMIC);
cmd = kzalloc(sizeof(struct ipa_ip_packet_init), flag);
if (!cmd) {
IPAERR("failed to alloc immediate command object\n");
goto fail_gen;

View file

@ -653,6 +653,7 @@ int __ipa_commit_flt_v1_1(enum ipa_ip_type ip)
struct ipa_ip_v6_filter_init *v6;
u16 avail;
u16 size;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
if (!mem) {
@ -669,7 +670,7 @@ int __ipa_commit_flt_v1_1(enum ipa_ip_type ip)
IPA_MEM_PART(v6_flt_size_ddr);
size = sizeof(struct ipa_ip_v6_filter_init);
}
cmd = kmalloc(size, GFP_KERNEL);
cmd = kmalloc(size, flag);
if (!cmd) {
IPAERR("failed to alloc immediate command object\n");
goto fail_alloc_cmd;
@ -842,6 +843,7 @@ int __ipa_commit_flt_v2(enum ipa_ip_type ip)
int num_desc = 0;
int i;
u16 avail;
gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
desc = kzalloc(16 * sizeof(*desc), GFP_ATOMIC);
if (desc == NULL) {
@ -850,7 +852,7 @@ int __ipa_commit_flt_v2(enum ipa_ip_type ip)
goto fail_desc;
}
cmd = kzalloc(16 * sizeof(*cmd), GFP_ATOMIC);
cmd = kzalloc(16 * sizeof(*cmd), flag);
if (cmd == NULL) {
IPAERR("fail to alloc cmd blob ip %d\n", ip);
rc = -ENOMEM;

View file

@ -176,6 +176,7 @@ int __ipa_commit_hdr_v1_1(void)
struct ipa_mem_buffer *mem;
struct ipa_hdr_init_local *cmd;
u16 len;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
if (!mem) {
@ -190,7 +191,7 @@ int __ipa_commit_hdr_v1_1(void)
* we can use init_local ptr for init_system due to layout of the
* struct
*/
cmd = kmalloc(len, GFP_KERNEL);
cmd = kmalloc(len, flag);
if (!cmd) {
IPAERR("failed to alloc immediate command object\n");
goto fail_alloc_cmd;
@ -663,6 +664,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
int id;
int mem_size;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
IPAERR("bad parm\n");
@ -674,7 +676,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
goto error;
}
entry = kmem_cache_zalloc(ipa_ctx->hdr_cache, GFP_KERNEL);
entry = kmem_cache_zalloc(ipa_ctx->hdr_cache, flag);
if (!entry) {
IPAERR("failed to alloc hdr object\n");
goto error;

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -325,6 +325,7 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
int result;
u32 offset = 0;
size_t tmp;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
IPADBG("\n");
if (init->table_entries == 0) {
@ -406,7 +407,7 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
memset(&desc, 0, sizeof(desc));
/* NO-OP IC for ensuring that IPA pipeline is empty */
reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag);
if (!reg_write_nop) {
IPAERR("no mem\n");
result = -ENOMEM;
@ -424,7 +425,7 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
desc[0].pyld = (void *)reg_write_nop;
desc[0].len = sizeof(*reg_write_nop);
cmd = kmalloc(size, GFP_KERNEL);
cmd = kmalloc(size, flag);
if (!cmd) {
IPAERR("Failed to alloc immediate command object\n");
result = -ENOMEM;
@ -569,6 +570,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
struct ipa_desc *desc = NULL;
u16 size = 0, cnt = 0;
int ret = 0;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
IPADBG("\n");
if (dma->entries <= 0) {
@ -652,7 +654,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
}
size = sizeof(struct ipa_nat_dma);
cmd = kzalloc(size, GFP_KERNEL);
cmd = kzalloc(size, flag);
if (cmd == NULL) {
IPAERR("Failed to alloc memory\n");
ret = -ENOMEM;
@ -660,7 +662,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
}
/* NO-OP IC for ensuring that IPA pipeline is empty */
reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag);
if (!reg_write_nop) {
IPAERR("Failed to alloc memory\n");
ret = -ENOMEM;
@ -754,6 +756,7 @@ int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
u8 mem_type = IPA_NAT_SHARED_MEMORY;
u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
int result;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
IPADBG("\n");
if (ipa_ctx->nat_mem.is_tmp_mem) {
@ -770,7 +773,7 @@ int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
memset(&desc, 0, sizeof(desc));
/* NO-OP IC for ensuring that IPA pipeline is empty */
reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag);
if (!reg_write_nop) {
IPAERR("no mem\n");
result = -ENOMEM;
@ -788,7 +791,7 @@ int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
desc[0].pyld = (void *)reg_write_nop;
desc[0].len = sizeof(*reg_write_nop);
cmd = kmalloc(size, GFP_KERNEL);
cmd = kmalloc(size, flag);
if (cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n");
result = -ENOMEM;

View file

@ -525,6 +525,7 @@ int __ipa_commit_rt_v1_1(enum ipa_ip_type ip)
struct ipa_ip_v6_routing_init *v6;
u16 avail;
u16 size;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
if (!mem) {
@ -541,7 +542,7 @@ int __ipa_commit_rt_v1_1(enum ipa_ip_type ip)
IPA_MEM_PART(v6_rt_size_ddr);
size = sizeof(struct ipa_ip_v6_routing_init);
}
cmd = kmalloc(size, GFP_KERNEL);
cmd = kmalloc(size, flag);
if (!cmd) {
IPAERR("failed to alloc immediate command object\n");
goto fail_alloc_cmd;

View file

@ -4507,6 +4507,7 @@ int ipa_tag_process(struct ipa_desc desc[],
int res;
struct ipa_tag_completion *comp;
int ep_idx;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
/* Not enough room for the required descriptors for the tag process */
if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) {
@ -4524,7 +4525,7 @@ int ipa_tag_process(struct ipa_desc desc[],
}
sys = ipa_ctx->ep[ep_idx].sys;
tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, GFP_KERNEL);
tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, flag);
if (!tag_desc) {
IPAERR("failed to allocate memory\n");
res = -ENOMEM;