msm: ipa: Fix to use GFP_DMA and add check for dma_map_single

IPAv2 hardware works with 32 bit addressing,
so allocate a kernel memory using GFP_DMA flag
which is processed by IPA hardware.

Add a error check for return of dma_map_single
using dma_mapping_error API.

Change-Id: I08e3eec8e9b2d81f945283a72338c0a8f23e2ac9
Acked-by: Mohammed Javid <mjavid@qti.qualcomm.com>
Signed-off-by: Utkarsh Saxena <usaxena@codeaurora.org>
This commit is contained in:
Utkarsh Saxena 2017-04-14 19:31:07 +05:30
parent 406ab7d2d4
commit 314c7a8de6
4 changed files with 57 additions and 29 deletions

View file

@ -1584,6 +1584,7 @@ static int ipa_init_smem_region(int memory_region_size,
struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL; struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
struct ipa_desc desc; struct ipa_desc desc;
struct ipa_mem_buffer mem; struct ipa_mem_buffer mem;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc; int rc;
if (memory_region_size == 0) if (memory_region_size == 0)
@ -1603,7 +1604,7 @@ static int ipa_init_smem_region(int memory_region_size,
memset(mem.base, 0, mem.size); memset(mem.base, 0, mem.size);
cmd = kzalloc(sizeof(*cmd), cmd = kzalloc(sizeof(*cmd),
GFP_KERNEL); flag);
if (cmd == NULL) { if (cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n"); IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM; rc = -ENOMEM;
@ -2166,6 +2167,7 @@ int _ipa_init_sram_v2(void)
struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL; struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
struct ipa_desc desc = {0}; struct ipa_desc desc = {0};
struct ipa_mem_buffer mem; struct ipa_mem_buffer mem;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc = 0; int rc = 0;
phys_addr = ipa_ctx->ipa_wrapper_base + phys_addr = ipa_ctx->ipa_wrapper_base +
@ -2203,7 +2205,7 @@ int _ipa_init_sram_v2(void)
} }
memset(mem.base, 0, mem.size); memset(mem.base, 0, mem.size);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); cmd = kzalloc(sizeof(*cmd), flag);
if (cmd == NULL) { if (cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n"); IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM; rc = -ENOMEM;
@ -2314,6 +2316,7 @@ int _ipa_init_hdr_v2(void)
struct ipa_desc desc = { 0 }; struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem; struct ipa_mem_buffer mem;
struct ipa_hdr_init_local *cmd = NULL; struct ipa_hdr_init_local *cmd = NULL;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc = 0; int rc = 0;
mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size); mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
@ -2325,7 +2328,7 @@ int _ipa_init_hdr_v2(void)
} }
memset(mem.base, 0, mem.size); memset(mem.base, 0, mem.size);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); cmd = kzalloc(sizeof(*cmd), flag);
if (cmd == NULL) { if (cmd == NULL) {
IPAERR("Failed to alloc header init command object\n"); IPAERR("Failed to alloc header init command object\n");
rc = -ENOMEM; rc = -ENOMEM;
@ -2360,6 +2363,7 @@ int _ipa_init_hdr_v2_5(void)
struct ipa_mem_buffer mem; struct ipa_mem_buffer mem;
struct ipa_hdr_init_local *cmd = NULL; struct ipa_hdr_init_local *cmd = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL; struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size); mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
@ -2370,7 +2374,7 @@ int _ipa_init_hdr_v2_5(void)
} }
memset(mem.base, 0, mem.size); memset(mem.base, 0, mem.size);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); cmd = kzalloc(sizeof(*cmd), flag);
if (cmd == NULL) { if (cmd == NULL) {
IPAERR("Failed to alloc header init command object\n"); IPAERR("Failed to alloc header init command object\n");
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
@ -2411,7 +2415,7 @@ int _ipa_init_hdr_v2_5(void)
memset(mem.base, 0, mem.size); memset(mem.base, 0, mem.size);
memset(&desc, 0, sizeof(desc)); memset(&desc, 0, sizeof(desc));
dma_cmd = kzalloc(sizeof(*dma_cmd), GFP_KERNEL); dma_cmd = kzalloc(sizeof(*dma_cmd), flag);
if (dma_cmd == NULL) { if (dma_cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n"); IPAERR("Failed to alloc immediate command object\n");
dma_free_coherent(ipa_ctx->pdev, dma_free_coherent(ipa_ctx->pdev,
@ -2462,6 +2466,7 @@ int _ipa_init_rt4_v2(void)
struct ipa_desc desc = { 0 }; struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem; struct ipa_mem_buffer mem;
struct ipa_ip_v4_routing_init *v4_cmd = NULL; struct ipa_ip_v4_routing_init *v4_cmd = NULL;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u32 *entry; u32 *entry;
int i; int i;
int rc = 0; int rc = 0;
@ -2486,7 +2491,7 @@ int _ipa_init_rt4_v2(void)
entry++; entry++;
} }
v4_cmd = kzalloc(sizeof(*v4_cmd), GFP_KERNEL); v4_cmd = kzalloc(sizeof(*v4_cmd), flag);
if (v4_cmd == NULL) { if (v4_cmd == NULL) {
IPAERR("Failed to alloc v4 routing init command object\n"); IPAERR("Failed to alloc v4 routing init command object\n");
rc = -ENOMEM; rc = -ENOMEM;
@ -2522,6 +2527,7 @@ int _ipa_init_rt6_v2(void)
struct ipa_desc desc = { 0 }; struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem; struct ipa_mem_buffer mem;
struct ipa_ip_v6_routing_init *v6_cmd = NULL; struct ipa_ip_v6_routing_init *v6_cmd = NULL;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u32 *entry; u32 *entry;
int i; int i;
int rc = 0; int rc = 0;
@ -2546,7 +2552,7 @@ int _ipa_init_rt6_v2(void)
entry++; entry++;
} }
v6_cmd = kzalloc(sizeof(*v6_cmd), GFP_KERNEL); v6_cmd = kzalloc(sizeof(*v6_cmd), flag);
if (v6_cmd == NULL) { if (v6_cmd == NULL) {
IPAERR("Failed to alloc v6 routing init command object\n"); IPAERR("Failed to alloc v6 routing init command object\n");
rc = -ENOMEM; rc = -ENOMEM;
@ -2582,6 +2588,7 @@ int _ipa_init_flt4_v2(void)
struct ipa_desc desc = { 0 }; struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem; struct ipa_mem_buffer mem;
struct ipa_ip_v4_filter_init *v4_cmd = NULL; struct ipa_ip_v4_filter_init *v4_cmd = NULL;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u32 *entry; u32 *entry;
int i; int i;
int rc = 0; int rc = 0;
@ -2604,7 +2611,7 @@ int _ipa_init_flt4_v2(void)
entry++; entry++;
} }
v4_cmd = kzalloc(sizeof(*v4_cmd), GFP_KERNEL); v4_cmd = kzalloc(sizeof(*v4_cmd), flag);
if (v4_cmd == NULL) { if (v4_cmd == NULL) {
IPAERR("Failed to alloc v4 fliter init command object\n"); IPAERR("Failed to alloc v4 fliter init command object\n");
rc = -ENOMEM; rc = -ENOMEM;
@ -2640,6 +2647,7 @@ int _ipa_init_flt6_v2(void)
struct ipa_desc desc = { 0 }; struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem; struct ipa_mem_buffer mem;
struct ipa_ip_v6_filter_init *v6_cmd = NULL; struct ipa_ip_v6_filter_init *v6_cmd = NULL;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u32 *entry; u32 *entry;
int i; int i;
int rc = 0; int rc = 0;
@ -2662,7 +2670,7 @@ int _ipa_init_flt6_v2(void)
entry++; entry++;
} }
v6_cmd = kzalloc(sizeof(*v6_cmd), GFP_KERNEL); v6_cmd = kzalloc(sizeof(*v6_cmd), flag);
if (v6_cmd == NULL) { if (v6_cmd == NULL) {
IPAERR("Failed to alloc v6 fliter init command object\n"); IPAERR("Failed to alloc v6 fliter init command object\n");
rc = -ENOMEM; rc = -ENOMEM;

View file

@ -322,8 +322,8 @@ int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
dma_address = desc->dma_address; dma_address = desc->dma_address;
tx_pkt->no_unmap_dma = true; tx_pkt->no_unmap_dma = true;
} }
if (!dma_address) { if (dma_mapping_error(ipa_ctx->pdev, dma_address)) {
IPAERR("failed to DMA wrap\n"); IPAERR("dma_map_single failed\n");
goto fail_dma_map; goto fail_dma_map;
} }
@ -445,7 +445,7 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
} }
dma_addr = dma_map_single(ipa_ctx->pdev, dma_addr = dma_map_single(ipa_ctx->pdev,
transfer.iovec, size, DMA_TO_DEVICE); transfer.iovec, size, DMA_TO_DEVICE);
if (!dma_addr) { if (dma_mapping_error(ipa_ctx->pdev, dma_addr)) {
IPAERR("dma_map_single failed for sps xfr buff\n"); IPAERR("dma_map_single failed for sps xfr buff\n");
kfree(transfer.iovec); kfree(transfer.iovec);
return -EFAULT; return -EFAULT;
@ -493,6 +493,15 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
tx_pkt->mem.base, tx_pkt->mem.base,
tx_pkt->mem.size, tx_pkt->mem.size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(ipa_ctx->pdev,
tx_pkt->mem.phys_base)) {
IPAERR("dma_map_single ");
IPAERR("failed\n");
fail_dma_wrap = 1;
goto failure;
}
} else { } else {
tx_pkt->mem.phys_base = desc[i].dma_address; tx_pkt->mem.phys_base = desc[i].dma_address;
tx_pkt->no_unmap_dma = true; tx_pkt->no_unmap_dma = true;
@ -1874,8 +1883,8 @@ begin:
rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr, rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
sys->rx_buff_sz, sys->rx_buff_sz,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (rx_pkt->data.dma_addr == 0 || if (dma_mapping_error(ipa_ctx->pdev,
rx_pkt->data.dma_addr == ~0) { rx_pkt->data.dma_addr)) {
pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n", pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
__func__, (void *)rx_pkt->data.dma_addr, __func__, (void *)rx_pkt->data.dma_addr,
ptr, sys); ptr, sys);
@ -2030,8 +2039,8 @@ static void ipa_alloc_wlan_rx_common_cache(u32 size)
ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ); ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr, rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE); IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
if (rx_pkt->data.dma_addr == 0 || if (dma_mapping_error(ipa_ctx->pdev,
rx_pkt->data.dma_addr == ~0) { rx_pkt->data.dma_addr)) {
IPAERR("dma_map_single failure %p for %p\n", IPAERR("dma_map_single failure %p for %p\n",
(void *)rx_pkt->data.dma_addr, ptr); (void *)rx_pkt->data.dma_addr, ptr);
goto fail_dma_mapping; goto fail_dma_mapping;
@ -2102,8 +2111,8 @@ static void ipa_replenish_rx_cache(struct ipa_sys_context *sys)
rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr, rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
sys->rx_buff_sz, sys->rx_buff_sz,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (rx_pkt->data.dma_addr == 0 || if (dma_mapping_error(ipa_ctx->pdev,
rx_pkt->data.dma_addr == ~0) { rx_pkt->data.dma_addr)) {
IPAERR("dma_map_single failure %p for %p\n", IPAERR("dma_map_single failure %p for %p\n",
(void *)rx_pkt->data.dma_addr, ptr); (void *)rx_pkt->data.dma_addr, ptr);
goto fail_dma_mapping; goto fail_dma_mapping;
@ -2160,9 +2169,10 @@ static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys)
ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz); ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev,
ptr, sys->rx_buff_sz, DMA_FROM_DEVICE); ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
if (rx_pkt->data.dma_addr == 0 || if (dma_mapping_error(ipa_ctx->pdev, rx_pkt->data.dma_addr)) {
rx_pkt->data.dma_addr == ~0) IPAERR("dma_map_single failure for rx_pkt\n");
goto fail_dma_mapping; goto fail_dma_mapping;
}
list_add_tail(&rx_pkt->link, &sys->head_desc_list); list_add_tail(&rx_pkt->link, &sys->head_desc_list);
rx_len_cached = ++sys->len; rx_len_cached = ++sys->len;

View file

@ -268,6 +268,7 @@ int __ipa_commit_hdr_v2(void)
struct ipa_mem_buffer mem; struct ipa_mem_buffer mem;
struct ipa_hdr_init_system *cmd = NULL; struct ipa_hdr_init_system *cmd = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL; struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL;
gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc = -EFAULT; int rc = -EFAULT;
if (ipa_generate_hdr_hw_tbl(&mem)) { if (ipa_generate_hdr_hw_tbl(&mem)) {
@ -281,7 +282,7 @@ int __ipa_commit_hdr_v2(void)
IPA_MEM_PART(apps_hdr_size)); IPA_MEM_PART(apps_hdr_size));
goto fail_send_cmd; goto fail_send_cmd;
} else { } else {
dma_cmd = kzalloc(sizeof(*dma_cmd), GFP_ATOMIC); dma_cmd = kzalloc(sizeof(*dma_cmd), flag);
if (dma_cmd == NULL) { if (dma_cmd == NULL) {
IPAERR("fail to alloc immediate cmd\n"); IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM; rc = -ENOMEM;
@ -303,7 +304,7 @@ int __ipa_commit_hdr_v2(void)
IPA_MEM_PART(apps_hdr_size_ddr)); IPA_MEM_PART(apps_hdr_size_ddr));
goto fail_send_cmd; goto fail_send_cmd;
} else { } else {
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); cmd = kzalloc(sizeof(*cmd), flag);
if (cmd == NULL) { if (cmd == NULL) {
IPAERR("fail to alloc hdr init cmd\n"); IPAERR("fail to alloc hdr init cmd\n");
rc = -ENOMEM; rc = -ENOMEM;
@ -359,6 +360,7 @@ int __ipa_commit_hdr_v2_5(void)
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_hdr = NULL; struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_hdr = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_ctx = NULL; struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_ctx = NULL;
struct ipa_register_write *reg_write_cmd = NULL; struct ipa_register_write *reg_write_cmd = NULL;
gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc = -EFAULT; int rc = -EFAULT;
u32 proc_ctx_size; u32 proc_ctx_size;
u32 proc_ctx_ofst; u32 proc_ctx_ofst;
@ -383,7 +385,7 @@ int __ipa_commit_hdr_v2_5(void)
IPA_MEM_PART(apps_hdr_size)); IPA_MEM_PART(apps_hdr_size));
goto fail_send_cmd1; goto fail_send_cmd1;
} else { } else {
dma_cmd_hdr = kzalloc(sizeof(*dma_cmd_hdr), GFP_ATOMIC); dma_cmd_hdr = kzalloc(sizeof(*dma_cmd_hdr), flag);
if (dma_cmd_hdr == NULL) { if (dma_cmd_hdr == NULL) {
IPAERR("fail to alloc immediate cmd\n"); IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM; rc = -ENOMEM;
@ -406,7 +408,7 @@ int __ipa_commit_hdr_v2_5(void)
goto fail_send_cmd1; goto fail_send_cmd1;
} else { } else {
hdr_init_cmd = kzalloc(sizeof(*hdr_init_cmd), hdr_init_cmd = kzalloc(sizeof(*hdr_init_cmd),
GFP_ATOMIC); flag);
if (hdr_init_cmd == NULL) { if (hdr_init_cmd == NULL) {
IPAERR("fail to alloc immediate cmd\n"); IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM; rc = -ENOMEM;
@ -431,7 +433,7 @@ int __ipa_commit_hdr_v2_5(void)
goto fail_send_cmd1; goto fail_send_cmd1;
} else { } else {
dma_cmd_ctx = kzalloc(sizeof(*dma_cmd_ctx), dma_cmd_ctx = kzalloc(sizeof(*dma_cmd_ctx),
GFP_ATOMIC); flag);
if (dma_cmd_ctx == NULL) { if (dma_cmd_ctx == NULL) {
IPAERR("fail to alloc immediate cmd\n"); IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM; rc = -ENOMEM;
@ -456,7 +458,7 @@ int __ipa_commit_hdr_v2_5(void)
goto fail_send_cmd1; goto fail_send_cmd1;
} else { } else {
reg_write_cmd = kzalloc(sizeof(*reg_write_cmd), reg_write_cmd = kzalloc(sizeof(*reg_write_cmd),
GFP_ATOMIC); flag);
if (reg_write_cmd == NULL) { if (reg_write_cmd == NULL) {
IPAERR("fail to alloc immediate cmd\n"); IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM; rc = -ENOMEM;
@ -722,6 +724,11 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
entry->hdr, entry->hdr,
entry->hdr_len, entry->hdr_len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(ipa_ctx->pdev,
entry->phys_base)) {
IPAERR("dma_map_single failure for entry\n");
goto fail_dma_mapping;
}
} }
} else { } else {
entry->is_hdr_proc_ctx = false; entry->is_hdr_proc_ctx = false;
@ -798,6 +805,8 @@ fail_add_proc_ctx:
list_del(&entry->link); list_del(&entry->link);
dma_unmap_single(ipa_ctx->pdev, entry->phys_base, dma_unmap_single(ipa_ctx->pdev, entry->phys_base,
entry->hdr_len, DMA_TO_DEVICE); entry->hdr_len, DMA_TO_DEVICE);
fail_dma_mapping:
entry->is_hdr_proc_ctx = false;
bad_hdr_len: bad_hdr_len:
entry->cookie = 0; entry->cookie = 0;
kmem_cache_free(ipa_ctx->hdr_cache, entry); kmem_cache_free(ipa_ctx->hdr_cache, entry);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. /* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
@ -698,6 +698,7 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip)
struct ipa_mem_buffer head; struct ipa_mem_buffer head;
struct ipa_hw_imm_cmd_dma_shared_mem *cmd1 = NULL; struct ipa_hw_imm_cmd_dma_shared_mem *cmd1 = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *cmd2 = NULL; struct ipa_hw_imm_cmd_dma_shared_mem *cmd2 = NULL;
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u16 avail; u16 avail;
u32 num_modem_rt_index; u32 num_modem_rt_index;
int rc = 0; int rc = 0;
@ -748,7 +749,7 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip)
} }
cmd1 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem), cmd1 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
GFP_KERNEL); flag);
if (cmd1 == NULL) { if (cmd1 == NULL) {
IPAERR("Failed to alloc immediate command object\n"); IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM; rc = -ENOMEM;
@ -765,7 +766,7 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip)
if (lcl) { if (lcl) {
cmd2 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem), cmd2 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
GFP_KERNEL); flag);
if (cmd2 == NULL) { if (cmd2 == NULL) {
IPAERR("Failed to alloc immediate command object\n"); IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM; rc = -ENOMEM;