msm: ipa: snapshot of IPA changes

This snapshot is taken as of msm-3.18 commit
d5809484b (Merge "msm: ipa: fix race condition
when teardown pipe" )

Signed-off-by: Skylar Chang <chiaweic@codeaurora.org>
This commit is contained in:
Skylar Chang 2016-03-14 15:51:49 -07:00 committed by David Keitel
parent d2c1940363
commit f36ae7405a
60 changed files with 9319 additions and 4452 deletions

View file

@ -97,6 +97,17 @@ Optional properties:
- clock-names: This property shall contain the clock input names used
by driver in same order as the clocks property.This should be "iface_clk"
IPA SMP2P sub nodes
-compatible: "qcom,smp2pgpio-map-ipa-1-out" - represents the out gpio from
ipa driver to modem.
-compatible: "qcom,smp2pgpio-map-ipa-1-in" - represents the in gpio to
ipa driver from modem.
-gpios: Binding to the gpio defined in XXX-smp2p.dtsi
Example:
qcom,ipa@fd4c0000 {
@ -147,4 +158,15 @@ qcom,ipa@fd4c0000 {
qcom,descriptor-fifo-offset = <0xd00>;
qcom,descriptor-fifo-size = <0x300>;
};
/* smp2p gpio information */
qcom,smp2pgpio_map_ipa_1_out {
compatible = "qcom,smp2pgpio-map-ipa-1-out";
gpios = <&smp2pgpio_ipa_1_out 0 0>;
};
qcom,smp2pgpio_map_ipa_1_in {
compatible = "qcom,smp2pgpio-map-ipa-1-in";
gpios = <&smp2pgpio_ipa_1_in 0 0>;
};
};

View file

@ -8,6 +8,8 @@ Required properties:
Optional:
- qcom,rmnet-ipa-ssr: determine if modem SSR is supported
- qcom,ipa-loaduC: indicate that ipa uC should be loaded
- qcom,ipa-advertise-sg-support: determine how to respond to a query
regarding scatter-gather capability
Example:
qcom,rmnet-ipa {

View file

@ -50,6 +50,7 @@ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
obj-$(CONFIG_ARCH_MSM) += msm/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/

View file

@ -28,6 +28,9 @@
#include <linux/rndis_ipa.h>
#include <linux/workqueue.h>
#define CREATE_TRACE_POINTS
#include "rndis_ipa_trace.h"
#define DRV_NAME "RNDIS_IPA"
#define DEBUGFS_DIR_NAME "rndis_ipa"
#define DEBUGFS_AGGR_DIR_NAME "rndis_ipa_aggregation"
@ -912,6 +915,7 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
}
skb = rndis_encapsulate_skb(skb);
trace_rndis_tx_dp(skb->protocol);
ret = ipa_tx_dp(IPA_TO_USB_CLIENT, skb, NULL);
if (ret) {
RNDIS_IPA_ERROR("ipa transmit failed (%d)\n", ret);
@ -957,6 +961,8 @@ static void rndis_ipa_tx_complete_notify(void *private,
NULL_CHECK_NO_RETVAL(private);
trace_rndis_status_rcvd(skb->protocol);
RNDIS_IPA_DEBUG("Tx-complete, len=%d, skb->prot=%d, outstanding=%d\n",
skb->len, skb->protocol,
atomic_read(&rndis_ipa_ctx->outstanding_pkts));
@ -1121,6 +1127,7 @@ static void rndis_ipa_packet_receive_notify(void *private,
return;
}
trace_rndis_netif_ni(skb->protocol);
result = netif_rx_ni(skb);
if (result)
RNDIS_IPA_ERROR("fail on netif_rx_ni\n");

View file

@ -0,0 +1,81 @@
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rndis_ipa
#define TRACE_INCLUDE_FILE rndis_ipa_trace
#if !defined(_RNDIS_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _RNDIS_IPA_TRACE_H
#include <linux/tracepoint.h>
TRACE_EVENT(
rndis_netif_ni,
TP_PROTO(unsigned long proto),
TP_ARGS(proto),
TP_STRUCT__entry(
__field(unsigned long, proto)
),
TP_fast_assign(
__entry->proto = proto;
),
TP_printk("proto =%lu\n", __entry->proto)
);
TRACE_EVENT(
rndis_tx_dp,
TP_PROTO(unsigned long proto),
TP_ARGS(proto),
TP_STRUCT__entry(
__field(unsigned long, proto)
),
TP_fast_assign(
__entry->proto = proto;
),
TP_printk("proto =%lu\n", __entry->proto)
);
TRACE_EVENT(
rndis_status_rcvd,
TP_PROTO(unsigned long proto),
TP_ARGS(proto),
TP_STRUCT__entry(
__field(unsigned long, proto)
),
TP_fast_assign(
__entry->proto = proto;
),
TP_printk("proto =%lu\n", __entry->proto)
);
#endif /* _RNDIS_IPA_TRACE_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#include <trace/define_trace.h>

View file

@ -1,5 +1,5 @@
obj-$(CONFIG_IPA) += ipa_v2/
obj-$(CONFIG_IPA3) += ipa_v3/
obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/
obj-$(CONFIG_IPA) += ipa_api.o
obj-$(CONFIG_IPA3) += ipa_api.o

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -408,7 +408,7 @@ int ipa_cfg_ep_holb_by_client(enum ipa_client_type client,
EXPORT_SYMBOL(ipa_cfg_ep_holb_by_client);
/**
* ipa_cfg_ep_hdr() - IPA end-point Control configuration
* ipa_cfg_ep_ctrl() - IPA end-point Control configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
* @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params
*
@ -1442,6 +1442,22 @@ int ipa_uc_reg_rdyCB(
}
EXPORT_SYMBOL(ipa_uc_reg_rdyCB);
/**
* ipa_uc_dereg_rdyCB() - To de-register uC ready CB
*
* Returns: 0 on success, negative on failure
*
*/
int ipa_uc_dereg_rdyCB(void)
{
int ret;
IPA_API_DISPATCH_RETURN(ipa_uc_dereg_rdyCB);
return ret;
}
EXPORT_SYMBOL(ipa_uc_dereg_rdyCB);
/**
* ipa_rm_create_resource() - create resource
* @create_params: [in] parameters needed
@ -2632,6 +2648,8 @@ static struct of_device_id ipa_plat_drv_match[] = {
{ .compatible = "qcom,ipa-smmu-ap-cb", },
{ .compatible = "qcom,ipa-smmu-wlan-cb", },
{ .compatible = "qcom,ipa-smmu-uc-cb", },
{ .compatible = "qcom,smp2pgpio-map-ipa-1-in", },
{ .compatible = "qcom,smp2pgpio-map-ipa-1-out", },
{}
};
@ -2639,21 +2657,29 @@ static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p)
{
int result;
pr_debug("ipa: IPA driver probing started\n");
/*
* IPA probe function can be called for multiple times as the same probe
* function handles multiple compatibilities
*/
pr_debug("ipa: IPA driver probing started for %s\n",
pdev_p->dev.of_node->name);
ipa_api_ctrl = kzalloc(sizeof(*ipa_api_ctrl), GFP_KERNEL);
if (!ipa_api_ctrl)
return -ENOMEM;
if (!ipa_api_ctrl) {
ipa_api_ctrl = kzalloc(sizeof(*ipa_api_ctrl), GFP_KERNEL);
if (!ipa_api_ctrl)
return -ENOMEM;
/* Get IPA HW Version */
result = of_property_read_u32(pdev_p->dev.of_node, "qcom,ipa-hw-ver",
&ipa_api_hw_type);
if ((result) || (ipa_api_hw_type == 0)) {
pr_err("ipa: get resource failed for ipa-hw-ver!\n");
result = -ENODEV;
goto fail;
/* Get IPA HW Version */
result = of_property_read_u32(pdev_p->dev.of_node,
"qcom,ipa-hw-ver", &ipa_api_hw_type);
if ((result) || (ipa_api_hw_type == 0)) {
pr_err("ipa: get resource failed for ipa-hw-ver!\n");
kfree(ipa_api_ctrl);
ipa_api_ctrl = 0;
return -ENODEV;
}
pr_debug("ipa: ipa_api_hw_type = %d", ipa_api_hw_type);
}
pr_debug("ipa: ipa_api_hw_type = %d", ipa_api_hw_type);
/* call probe based on IPA HW version */
switch (ipa_api_hw_type) {
@ -2663,30 +2689,20 @@ static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p)
case IPA_HW_v2_6L:
result = ipa_plat_drv_probe(pdev_p, ipa_api_ctrl,
ipa_plat_drv_match);
if (result) {
pr_err("ipa: ipa_plat_drv_probe failed\n");
goto fail;
}
break;
case IPA_HW_v3_0:
case IPA_HW_v3_1:
result = ipa3_plat_drv_probe(pdev_p, ipa_api_ctrl,
ipa_plat_drv_match);
if (result) {
pr_err("ipa: ipa3_plat_drv_probe failed\n");
goto fail;
}
break;
default:
pr_err("ipa: unsupported version %d\n", ipa_api_hw_type);
result = -EPERM;
goto fail;
return -EPERM;
}
return 0;
fail:
kfree(ipa_api_ctrl);
ipa_api_ctrl = 0;
if (result && result != -EPROBE_DEFER)
pr_err("ipa: ipa_plat_drv_probe failed\n");
return result;
}
@ -2708,79 +2724,6 @@ static int ipa_ap_resume(struct device *dev)
return ret;
}
int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
struct ipa_usb_teth_params *teth_params,
int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *),
void *user_data)
{
int ret;
IPA_API_DISPATCH_RETURN(ipa_usb_init_teth_prot, teth_prot, teth_params,
ipa_usb_notify_cb, user_data);
return ret;
}
EXPORT_SYMBOL(ipa_usb_init_teth_prot);
int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
struct ipa_usb_xdci_chan_params *dl_chan_params,
struct ipa_req_chan_out_params *ul_out_params,
struct ipa_req_chan_out_params *dl_out_params,
struct ipa_usb_xdci_connect_params *connect_params)
{
int ret;
IPA_API_DISPATCH_RETURN(ipa_usb_xdci_connect, ul_chan_params,
dl_chan_params, ul_out_params, dl_out_params, connect_params);
return ret;
}
EXPORT_SYMBOL(ipa_usb_xdci_connect);
int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot)
{
int ret;
IPA_API_DISPATCH_RETURN(ipa_usb_xdci_disconnect, ul_clnt_hdl,
dl_clnt_hdl, teth_prot);
return ret;
}
EXPORT_SYMBOL(ipa_usb_xdci_disconnect);
int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
{
int ret;
IPA_API_DISPATCH_RETURN(ipa_usb_deinit_teth_prot, teth_prot);
return ret;
}
EXPORT_SYMBOL(ipa_usb_deinit_teth_prot);
int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot)
{
int ret;
IPA_API_DISPATCH_RETURN(ipa_usb_xdci_suspend, ul_clnt_hdl,
dl_clnt_hdl, teth_prot);
return ret;
}
EXPORT_SYMBOL(ipa_usb_xdci_suspend);
int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl)
{
int ret;
IPA_API_DISPATCH_RETURN(ipa_usb_xdci_resume, ul_clnt_hdl, dl_clnt_hdl);
return ret;
}
EXPORT_SYMBOL(ipa_usb_xdci_resume);
int ipa_register_ipa_ready_cb(void (*ipa_ready_cb)(void *user_data),
void *user_data)
{

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -181,6 +181,8 @@ struct ipa_api_controller {
int (*ipa_uc_reg_rdyCB)(struct ipa_wdi_uc_ready_params *param);
int (*ipa_uc_dereg_rdyCB)(void);
int (*ipa_rm_create_resource)(
struct ipa_rm_create_params *create_params);
@ -331,28 +333,6 @@ struct ipa_api_controller {
struct ipa_gsi_ep_config *(*ipa_get_gsi_ep_info)(int ipa_ep_idx);
int (*ipa_usb_init_teth_prot)(enum ipa_usb_teth_prot teth_prot,
struct ipa_usb_teth_params *teth_params,
int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void*),
void *user_data);
int (*ipa_usb_xdci_connect)(
struct ipa_usb_xdci_chan_params *ul_chan_params,
struct ipa_usb_xdci_chan_params *dl_chan_params,
struct ipa_req_chan_out_params *ul_out_params,
struct ipa_req_chan_out_params *dl_out_params,
struct ipa_usb_xdci_connect_params *connect_params);
int (*ipa_usb_xdci_disconnect)(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot);
int (*ipa_usb_deinit_teth_prot)(enum ipa_usb_teth_prot teth_prot);
int (*ipa_usb_xdci_suspend)(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot);
int (*ipa_usb_xdci_resume)(u32 ul_clnt_hdl, u32 dl_clnt_hdl);
int (*ipa_register_ipa_ready_cb)(void (*ipa_ready_cb)(void *user_data),
void *user_data);

View file

@ -0,0 +1 @@
obj-$(CONFIG_IPA3) += ipa_usb.o

View file

@ -32,9 +32,14 @@
#include <linux/delay.h>
#include <linux/qcom_iommu.h>
#include <linux/time.h>
#include <linux/hashtable.h>
#include <linux/hash.h>
#include "ipa_i.h"
#include "ipa_rm_i.h"
#define CREATE_TRACE_POINTS
#include "ipa_trace.h"
#define IPA_SUMMING_THRESHOLD (0x10)
#define IPA_PIPE_MEM_START_OFST (0x0)
#define IPA_PIPE_MEM_SIZE (0x0)
@ -53,6 +58,13 @@
#define CLEANUP_TAG_PROCESS_TIMEOUT 150
#define IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
#define IPA2_ACTIVE_CLIENT_LOG_TYPE_EP 0
#define IPA2_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
#define IPA2_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
#define IPA2_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
#define IPA_AGGR_STR_IN_BYTES(str) \
(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
@ -192,6 +204,8 @@ static bool smmu_present;
static bool arm_smmu;
static bool smmu_disable_htw;
static char *active_clients_table_buf;
const char *ipa2_clients_strings[IPA_CLIENT_MAX] = {
__stringify(IPA_CLIENT_HSIC1_PROD),
__stringify(IPA_CLIENT_WLAN1_PROD),
@ -264,23 +278,107 @@ const char *ipa2_clients_strings[IPA_CLIENT_MAX] = {
__stringify(IPA_CLIENT_TEST4_CONS),
};
int ipa2_active_clients_log_print_buffer(char *buf, int size)
{
int i;
int nbytes;
int cnt = 0;
int start_idx;
int end_idx;
start_idx = (ipa_ctx->ipa2_active_clients_logging.log_tail + 1) %
IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
end_idx = ipa_ctx->ipa2_active_clients_logging.log_head;
for (i = start_idx; i != end_idx;
i = (i + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
ipa_ctx->ipa2_active_clients_logging
.log_buffer[i]);
cnt += nbytes;
}
return cnt;
}
int ipa2_active_clients_log_print_table(char *buf, int size)
{
int i;
struct ipa2_active_client_htable_entry *iterator;
int cnt = 0;
cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
hash_for_each(ipa_ctx->ipa2_active_clients_logging.htable, i,
iterator, list) {
switch (iterator->type) {
case IPA2_ACTIVE_CLIENT_LOG_TYPE_EP:
cnt += scnprintf(buf + cnt, size - cnt,
"%-40s %-3d ENDPOINT\n",
iterator->id_string, iterator->count);
break;
case IPA2_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
cnt += scnprintf(buf + cnt, size - cnt,
"%-40s %-3d SIMPLE\n",
iterator->id_string, iterator->count);
break;
case IPA2_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
cnt += scnprintf(buf + cnt, size - cnt,
"%-40s %-3d RESOURCE\n",
iterator->id_string, iterator->count);
break;
case IPA2_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
cnt += scnprintf(buf + cnt, size - cnt,
"%-40s %-3d SPECIAL\n",
iterator->id_string, iterator->count);
break;
default:
IPAERR("Trying to print illegal active_clients type");
break;
}
}
cnt += scnprintf(buf + cnt, size - cnt,
"\nTotal active clients count: %d\n",
ipa_ctx->ipa_active_clients.cnt);
return cnt;
}
static int ipa2_active_clients_panic_notifier(struct notifier_block *this,
unsigned long event, void *ptr)
{
ipa_active_clients_lock();
ipa2_active_clients_log_print_table(active_clients_table_buf,
IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
IPAERR("%s", active_clients_table_buf);
ipa_active_clients_unlock();
return NOTIFY_DONE;
}
static struct notifier_block ipa2_active_clients_panic_blk = {
.notifier_call = ipa2_active_clients_panic_notifier,
};
static int ipa2_active_clients_log_insert(const char *string)
{
int head;
int tail;
head = ipa_ctx->ipa2_active_clients_logging.log_head;
tail = ipa_ctx->ipa2_active_clients_logging.log_tail;
if (!ipa_ctx->ipa2_active_clients_logging.log_rdy)
return -EPERM;
strlcpy(ipa_ctx->ipa2_active_clients_logging.log_buffer
[ipa_ctx->ipa2_active_clients_logging.log_head],
string,
memset(ipa_ctx->ipa2_active_clients_logging.log_buffer[head], '_',
IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN);
strlcpy(ipa_ctx->ipa2_active_clients_logging.log_buffer[head], string,
(size_t)IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN);
ipa_ctx->ipa2_active_clients_logging.log_head =
(ipa_ctx->ipa2_active_clients_logging.log_head + 1) %
IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
if (ipa_ctx->ipa2_active_clients_logging.log_tail ==
ipa_ctx->ipa2_active_clients_logging.log_head) {
ipa_ctx->ipa2_active_clients_logging.log_tail =
(ipa_ctx->ipa2_active_clients_logging.log_tail + 1) %
IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
}
head = (head + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
if (tail == head)
tail = (tail + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
ipa_ctx->ipa2_active_clients_logging.log_tail = tail;
ipa_ctx->ipa2_active_clients_logging.log_head = head;
return 0;
}
@ -292,6 +390,8 @@ static int ipa2_active_clients_log_init(void)
IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
sizeof(char[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN]),
GFP_KERNEL);
active_clients_table_buf = kzalloc(sizeof(
char[IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
if (ipa_ctx->ipa2_active_clients_logging.log_buffer == NULL) {
IPAERR("Active Clients Logging memory allocation failed");
goto bail;
@ -304,6 +404,9 @@ static int ipa2_active_clients_log_init(void)
ipa_ctx->ipa2_active_clients_logging.log_head = 0;
ipa_ctx->ipa2_active_clients_logging.log_tail =
IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
hash_init(ipa_ctx->ipa2_active_clients_logging.htable);
atomic_notifier_chain_register(&panic_notifier_list,
&ipa2_active_clients_panic_blk);
ipa_ctx->ipa2_active_clients_logging.log_rdy = 1;
return 0;
@ -330,22 +433,6 @@ static void ipa2_active_clients_log_destroy(void)
IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
}
void ipa2_active_clients_log_print_buffer(void)
{
int i;
ipa_active_clients_lock();
for (i = (ipa_ctx->ipa2_active_clients_logging.log_tail + 1) %
IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
i != ipa_ctx->ipa2_active_clients_logging.log_head;
i = (i + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
pr_err("%s\n", ipa_ctx->ipa2_active_clients_logging
.log_buffer[i]);
}
ipa_active_clients_unlock();
}
enum ipa_smmu_cb_type {
IPA_SMMU_CB_AP,
IPA_SMMU_CB_WLAN,
@ -2527,7 +2614,7 @@ static void ipa_teardown_apps_pipes(void)
}
#ifdef CONFIG_COMPAT
long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
long compat_ipa_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int retval = 0;
struct ipa_ioc_nat_alloc_mem32 nat_mem32;
@ -2668,7 +2755,7 @@ static const struct file_operations ipa_drv_fops = {
.read = ipa_read,
.unlocked_ioctl = ipa_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_ipa3_ioctl,
.compat_ioctl = compat_ipa_ioctl,
#endif
};
@ -2919,11 +3006,102 @@ static void ipa_start_tag_process(struct work_struct *work)
if (res)
IPAERR("ipa_tag_aggr_force_close failed %d\n", res);
IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
IPA2_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
IPADBG("TAG process done\n");
}
/**
* ipa2_active_clients_log_mod() - Log a modification in the active clients
* reference count
*
* This method logs any modification in the active clients reference count:
* It logs the modification in the circular history buffer
* It logs the modification in the hash table - looking for an entry,
* creating one if needed and deleting one if needed.
*
* @id: ipa2_active client logging info struct to hold the log information
* @inc: a boolean variable to indicate whether the modification is an increase
* or decrease
* @int_ctx: a boolean variable to indicate whether this call is being made from
* an interrupt context and therefore should allocate GFP_ATOMIC memory
*
* Method process:
* - Hash the unique identifier string
* - Find the hash in the table
* 1)If found, increase or decrease the reference count
* 2)If not found, allocate a new hash table entry struct and initialize it
* - Remove and deallocate unneeded data structure
* - Log the call in the circular history buffer (unless it is a simple call)
*/
void ipa2_active_clients_log_mod(struct ipa2_active_client_logging_info *id,
bool inc, bool int_ctx)
{
char temp_str[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN];
unsigned long long t;
unsigned long nanosec_rem;
struct ipa2_active_client_htable_entry *hentry;
struct ipa2_active_client_htable_entry *hfound;
u32 hkey;
char str_to_hash[IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN];
hfound = NULL;
memset(str_to_hash, 0, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
strlcpy(str_to_hash, id->id_string, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
hkey = arch_fast_hash(str_to_hash, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN,
0);
hash_for_each_possible(ipa_ctx->ipa2_active_clients_logging.htable,
hentry, list, hkey) {
if (!strcmp(hentry->id_string, id->id_string)) {
hentry->count = hentry->count + (inc ? 1 : -1);
hfound = hentry;
}
}
if (hfound == NULL) {
hentry = NULL;
hentry = kzalloc(sizeof(
struct ipa2_active_client_htable_entry),
int_ctx ? GFP_ATOMIC : GFP_KERNEL);
if (hentry == NULL) {
IPAERR("failed allocating active clients hash entry");
return;
}
hentry->type = id->type;
strlcpy(hentry->id_string, id->id_string,
IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
INIT_HLIST_NODE(&hentry->list);
hentry->count = inc ? 1 : -1;
hash_add(ipa_ctx->ipa2_active_clients_logging.htable,
&hentry->list, hkey);
} else if (hfound->count == 0) {
hash_del(&hfound->list);
kfree(hfound);
}
if (id->type != SIMPLE) {
t = local_clock();
nanosec_rem = do_div(t, 1000000000) / 1000;
snprintf(temp_str, IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN,
inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
"[%5lu.%06lu] v %s, %s: %d",
(unsigned long)t, nanosec_rem,
id->id_string, id->file, id->line);
ipa2_active_clients_log_insert(temp_str);
}
}
void ipa2_active_clients_log_dec(struct ipa2_active_client_logging_info *id,
bool int_ctx)
{
ipa2_active_clients_log_mod(id, false, int_ctx);
}
void ipa2_active_clients_log_inc(struct ipa2_active_client_logging_info *id,
bool int_ctx)
{
ipa2_active_clients_log_mod(id, true, int_ctx);
}
/**
* ipa_inc_client_enable_clks() - Increase active clients counter, and
* enable ipa clocks if necessary
@ -2936,20 +3114,8 @@ static void ipa_start_tag_process(struct work_struct *work)
*/
void ipa2_inc_client_enable_clks(struct ipa2_active_client_logging_info *id)
{
char temp_str[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN];
unsigned long long t;
unsigned long nanosec_rem;
ipa_active_clients_lock();
if (id->type != SIMPLE) {
t = cpu_clock(smp_processor_id());
nanosec_rem = do_div(t, 1000000000) / 1000;
snprintf(temp_str, IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN,
"[%5lu.%06lu] ^ %s, %s: %d",
(unsigned long)t, nanosec_rem,
id->id_string, id->file, id->line);
ipa2_active_clients_log_insert(temp_str);
}
ipa2_active_clients_log_inc(id, false);
ipa_ctx->ipa_active_clients.cnt++;
if (ipa_ctx->ipa_active_clients.cnt == 1)
ipa_enable_clks();
@ -2973,9 +3139,6 @@ int ipa2_inc_client_enable_clks_no_block(struct ipa2_active_client_logging_info
{
int res = 0;
unsigned long flags;
char temp_str[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN];
unsigned long long t;
unsigned long nanosec_rem;
if (ipa_active_clients_trylock(&flags) == 0)
return -EPERM;
@ -2985,15 +3148,7 @@ int ipa2_inc_client_enable_clks_no_block(struct ipa2_active_client_logging_info
goto bail;
}
if (id->type != SIMPLE) {
t = cpu_clock(smp_processor_id());
nanosec_rem = do_div(t, 1000000000) / 1000;
snprintf(temp_str, IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN,
"[%5lu.%06lu] ^ %s, %s: %d",
(unsigned long)t, nanosec_rem,
id->id_string, id->file, id->line);
ipa2_active_clients_log_insert(temp_str);
}
ipa2_active_clients_log_inc(id, true);
ipa_ctx->ipa_active_clients.cnt++;
IPADBG("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt);
@ -3019,24 +3174,17 @@ bail:
*/
void ipa2_dec_client_disable_clks(struct ipa2_active_client_logging_info *id)
{
char temp_str[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN];
unsigned long long t;
unsigned long nanosec_rem;
struct ipa2_active_client_logging_info log_info;
ipa_active_clients_lock();
if (id->type != SIMPLE) {
t = cpu_clock(smp_processor_id());
nanosec_rem = do_div(t, 1000000000) / 1000;
snprintf(temp_str, IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN,
"[%5lu.%06lu] v %s, %s: %d",
(unsigned long)t, nanosec_rem,
id->id_string, id->file, id->line);
ipa2_active_clients_log_insert(temp_str);
}
ipa2_active_clients_log_dec(id, false);
ipa_ctx->ipa_active_clients.cnt--;
IPADBG("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt);
if (ipa_ctx->ipa_active_clients.cnt == 0) {
if (ipa_ctx->tag_process_before_gating) {
IPA2_ACTIVE_CLIENTS_PREP_SPECIAL(log_info,
"TAG_PROCESS");
ipa2_active_clients_log_inc(&log_info, false);
ipa_ctx->tag_process_before_gating = false;
/*
* When TAG process ends, active clients will be
@ -3058,15 +3206,21 @@ void ipa2_dec_client_disable_clks(struct ipa2_active_client_logging_info *id)
* Return codes:
* None
*/
void ipa_inc_acquire_wakelock(void)
void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client)
{
unsigned long flags;
if (ref_client >= IPA_WAKELOCK_REF_CLIENT_MAX)
return;
spin_lock_irqsave(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
ipa_ctx->wakelock_ref_cnt.cnt++;
if (ipa_ctx->wakelock_ref_cnt.cnt == 1)
if (ipa_ctx->wakelock_ref_cnt.cnt & (1 << ref_client))
IPAERR("client enum %d mask already set. ref cnt = %d\n",
ref_client, ipa_ctx->wakelock_ref_cnt.cnt);
ipa_ctx->wakelock_ref_cnt.cnt |= (1 << ref_client);
if (ipa_ctx->wakelock_ref_cnt.cnt)
__pm_stay_awake(&ipa_ctx->w_lock);
IPADBG("active wakelock ref cnt = %d\n", ipa_ctx->wakelock_ref_cnt.cnt);
IPADBG("active wakelock ref cnt = %d client enum %d\n",
ipa_ctx->wakelock_ref_cnt.cnt, ref_client);
spin_unlock_irqrestore(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
}
@ -3078,13 +3232,16 @@ void ipa_inc_acquire_wakelock(void)
* Return codes:
* None
*/
void ipa_dec_release_wakelock(void)
void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client)
{
unsigned long flags;
if (ref_client >= IPA_WAKELOCK_REF_CLIENT_MAX)
return;
spin_lock_irqsave(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
ipa_ctx->wakelock_ref_cnt.cnt--;
IPADBG("active wakelock ref cnt = %d\n", ipa_ctx->wakelock_ref_cnt.cnt);
ipa_ctx->wakelock_ref_cnt.cnt &= ~(1 << ref_client);
IPADBG("active wakelock ref cnt = %d client enum %d\n",
ipa_ctx->wakelock_ref_cnt.cnt, ref_client);
if (ipa_ctx->wakelock_ref_cnt.cnt == 0)
__pm_relax(&ipa_ctx->w_lock);
spin_unlock_irqrestore(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
@ -3461,6 +3618,7 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
struct sps_bam_props bam_props = { 0 };
struct ipa_flt_tbl *flt_tbl;
struct ipa_rt_tbl_set *rset;
struct ipa2_active_client_logging_info log_info;
IPADBG("IPA Driver initialization started\n");
@ -3584,6 +3742,8 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
mutex_init(&ipa_ctx->ipa_active_clients.mutex);
spin_lock_init(&ipa_ctx->ipa_active_clients.spinlock);
IPA2_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
ipa2_active_clients_log_inc(&log_info, false);
ipa_ctx->ipa_active_clients.cnt = 1;
/* Create workqueues for power management */
@ -3617,6 +3777,7 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
bam_props.manage |= SPS_BAM_MGR_DEVICE_REMOTE;
if (ipa_ctx->smmu_present)
bam_props.options |= SPS_BAM_SMMU_EN;
bam_props.options |= SPS_BAM_CACHED_WP;
bam_props.ee = resource_p->ee;
bam_props.ipc_loglevel = 3;

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -407,7 +407,8 @@ int ipa2_connect(const struct ipa_connect_params *in,
}
if ((ipa_ctx->ipa_hw_type == IPA_HW_v2_0 ||
ipa_ctx->ipa_hw_type == IPA_HW_v2_5) &&
ipa_ctx->ipa_hw_type == IPA_HW_v2_5 ||
ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) &&
IPA_CLIENT_IS_USB_CONS(in->client))
ep->connect.event_thresh = IPA_USB_EVENT_THRESHOLD;
else
@ -539,6 +540,7 @@ int ipa2_disconnect(u32 clnt_hdl)
struct iommu_domain *smmu_domain;
struct ipa_disable_force_clear_datapath_req_msg_v01 req = {0};
int res;
enum ipa_client_type client_type;
if (unlikely(!ipa_ctx)) {
IPAERR("IPA driver was not initialized\n");
@ -552,10 +554,9 @@ int ipa2_disconnect(u32 clnt_hdl)
}
ep = &ipa_ctx->ep[clnt_hdl];
client_type = ipa2_get_client_mapping(clnt_hdl);
if (!ep->keep_ipa_awake)
IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
IPA2_ACTIVE_CLIENTS_INC_EP(client_type);
/* Set Disconnect in Progress flag. */
spin_lock(&ipa_ctx->disconnect_lock);
@ -662,7 +663,7 @@ int ipa2_disconnect(u32 clnt_hdl)
memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
spin_unlock(&ipa_ctx->disconnect_lock);
IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
IPA2_ACTIVE_CLIENTS_DEC_EP(client_type);
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);

View file

@ -21,6 +21,9 @@
#define IPA_MAX_MSG_LEN 4096
#define IPA_DBG_CNTR_ON 127265
#define IPA_DBG_CNTR_OFF 127264
#define IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE ((IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN \
* IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) \
+ IPA_MAX_MSG_LEN)
#define IPA_DUMP_STATUS_FIELD(f) \
pr_err(#f "=0x%x\n", status->f)
@ -108,6 +111,7 @@ static struct dentry *dfile_rm_stats;
static struct dentry *dfile_status_stats;
static struct dentry *dfile_active_clients;
static char dbg_buff[IPA_MAX_MSG_LEN];
static char *active_clients_buf;
static s8 ep_reg_idx;
int _ipa_read_gen_reg_v1_1(char *buff, int max_len)
@ -1552,9 +1556,23 @@ static ssize_t ipa_status_stats_read(struct file *file, char __user *ubuf,
static ssize_t ipa2_print_active_clients_log(struct file *file,
char __user *ubuf, size_t count, loff_t *ppos)
{
ipa2_active_clients_log_print_buffer();
int cnt;
int table_size;
return 0;
if (active_clients_buf == NULL) {
IPAERR("Active Clients buffer is not allocated");
return 0;
}
memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE);
ipa_active_clients_lock();
cnt = ipa2_active_clients_log_print_buffer(active_clients_buf,
IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE - IPA_MAX_MSG_LEN);
table_size = ipa2_active_clients_log_print_table(active_clients_buf
+ cnt, IPA_MAX_MSG_LEN);
ipa_active_clients_unlock();
return simple_read_from_buffer(ubuf, count, ppos, active_clients_buf,
cnt + table_size);
}
static ssize_t ipa2_clear_active_clients_log(struct file *file,
@ -1682,13 +1700,19 @@ void ipa_debugfs_init(void)
goto fail;
}
dfile_ep_reg = debugfs_create_file("active_clients",
dfile_active_clients = debugfs_create_file("active_clients",
read_write_mode, dent, 0, &ipa2_active_clients);
if (!dfile_ep_reg || IS_ERR(dfile_active_clients)) {
IPAERR("fail to create file for debug_fs ep_reg\n");
if (!dfile_active_clients || IS_ERR(dfile_active_clients)) {
IPAERR("fail to create file for debug_fs active_clients\n");
goto fail;
}
active_clients_buf = NULL;
active_clients_buf = kzalloc(IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE,
GFP_KERNEL);
if (active_clients_buf == NULL)
IPAERR("fail to allocate active clients memory buffer");
dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0,
&ipa_ep_reg_ops);
if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) {
@ -1843,6 +1867,10 @@ void ipa_debugfs_remove(void)
IPAERR("ipa_debugfs_remove: folder was not created.\n");
return;
}
if (active_clients_buf != NULL) {
kfree(active_clients_buf);
active_clients_buf = NULL;
}
debugfs_remove_recursive(dent);
}

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include "ipa_i.h"
#include "ipa_trace.h"
#define IPA_LAST_DESC_CNT 0xFFFF
#define POLLING_INACTIVITY_RX 40
@ -241,7 +242,6 @@ static void ipa_tx_switch_to_intr_mode(struct ipa_sys_context *sys)
}
atomic_set(&sys->curr_polling_state, 0);
ipa_handle_tx_core(sys, true, false);
ipa_dec_release_wakelock();
return;
fail:
@ -725,7 +725,6 @@ static void ipa_sps_irq_tx_notify(struct sps_event_notify *notify)
IPAERR("sps_set_config() failed %d\n", ret);
break;
}
ipa_inc_acquire_wakelock();
atomic_set(&sys->curr_polling_state, 1);
queue_work(sys->wq, &sys->work);
}
@ -841,7 +840,7 @@ static void ipa_rx_switch_to_intr_mode(struct ipa_sys_context *sys)
}
atomic_set(&sys->curr_polling_state, 0);
ipa_handle_rx_core(sys, true, false);
ipa_dec_release_wakelock();
ipa_dec_release_wakelock(sys->ep->wakelock_client);
return;
fail:
@ -857,6 +856,16 @@ static void ipa_sps_irq_control(struct ipa_sys_context *sys, bool enable)
{
int ret;
/*
* Do not change sps config in case we are in polling mode as this
* indicates that sps driver already notified EOT event and sps config
* should not change until ipa driver processes the packet.
*/
if (atomic_read(&sys->curr_polling_state)) {
IPADBG("in polling mode, do not change config\n");
return;
}
if (enable) {
ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
if (ret) {
@ -959,8 +968,9 @@ static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
IPAERR("sps_set_config() failed %d\n", ret);
break;
}
ipa_inc_acquire_wakelock();
ipa_inc_acquire_wakelock(sys->ep->wakelock_client);
atomic_set(&sys->curr_polling_state, 1);
trace_intr_to_poll(sys->ep->client);
queue_work(sys->wq, &sys->work);
}
break;
@ -997,8 +1007,10 @@ static void ipa_handle_rx(struct ipa_sys_context *sys)
cnt = ipa_handle_rx_core(sys, true, true);
if (cnt == 0) {
inactive_cycles++;
trace_idle_sleep_enter(sys->ep->client);
usleep_range(POLLING_MIN_SLEEP_RX,
POLLING_MAX_SLEEP_RX);
trace_idle_sleep_exit(sys->ep->client);
} else {
inactive_cycles = 0;
}
@ -1012,6 +1024,7 @@ static void ipa_handle_rx(struct ipa_sys_context *sys)
} while (inactive_cycles <= POLLING_INACTIVITY_RX);
trace_poll_to_intr(sys->ep->client);
ipa_rx_switch_to_intr_mode(sys);
IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
}
@ -1309,14 +1322,6 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
*clnt_hdl = ipa_ep_idx;
if (IPA_CLIENT_IS_CONS(sys_in->client))
ipa_replenish_rx_cache(ep->sys);
if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
ipa_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
atomic_inc(&ipa_ctx->wc_memb.active_clnt_cnt);
}
if (nr_cpu_ids > 1 &&
(sys_in->client == IPA_CLIENT_APPS_LAN_CONS ||
sys_in->client == IPA_CLIENT_APPS_WAN_CONS)) {
@ -1334,6 +1339,14 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
}
}
if (IPA_CLIENT_IS_CONS(sys_in->client))
ipa_replenish_rx_cache(ep->sys);
if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
ipa_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
atomic_inc(&ipa_ctx->wc_memb.active_clnt_cnt);
}
ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
if (ipa_ctx->modem_cfg_emb_pipe_flt &&
@ -1414,6 +1427,8 @@ int ipa2_teardown_sys_pipe(u32 clnt_hdl)
} while (1);
}
if (IPA_CLIENT_IS_CONS(ep->client))
cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
flush_workqueue(ep->sys->wq);
sps_disconnect(ep->ep_hdl);
dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size,
@ -2858,6 +2873,7 @@ static int ipa_assign_policy(struct ipa_sys_connect_params *in,
}
} else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
sys->ep->status.status_en = true;
sys->ep->wakelock_client = IPA_WAKELOCK_REF_CLIENT_MAX;
if (IPA_CLIENT_IS_PROD(in->client)) {
if (!sys->ep->skip_ep_cfg) {
sys->policy = IPA_POLICY_NOINTR_MODE;
@ -2905,11 +2921,15 @@ static int ipa_assign_policy(struct ipa_sys_connect_params *in,
IPA_GENERIC_AGGR_BYTE_LIMIT;
in->ipa_ep_cfg.aggr.aggr_pkt_limit =
IPA_GENERIC_AGGR_PKT_LIMIT;
sys->ep->wakelock_client =
IPA_WAKELOCK_REF_CLIENT_LAN_RX;
} else if (in->client ==
IPA_CLIENT_APPS_WAN_CONS) {
sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr;
sys->rx_pool_sz =
ipa_ctx->wan_rx_ring_size;
sys->ep->wakelock_client =
IPA_WAKELOCK_REF_CLIENT_WAN_RX;
if (ipa_ctx->
ipa_client_apps_wan_cons_agg_gro) {
IPAERR("get close-by %u\n",
@ -2980,9 +3000,12 @@ static int ipa_assign_policy(struct ipa_sys_connect_params *in,
if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
sys->pyld_hdlr = NULL;
sys->repl_hdlr = ipa_replenish_wlan_rx_cache;
sys->get_skb = ipa_get_skb_ipa_rx;
sys->free_skb = ipa_free_skb_rx;
in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
sys->ep->wakelock_client =
IPA_WAKELOCK_REF_CLIENT_WLAN_RX;
} else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
IPADBG("assigning policy to client:%d",
in->client);
@ -3007,6 +3030,8 @@ static int ipa_assign_policy(struct ipa_sys_connect_params *in,
sys->get_skb = ipa_get_skb_ipa_rx;
sys->free_skb = ipa_free_skb_rx;
sys->repl_hdlr = ipa_replenish_rx_cache;
sys->ep->wakelock_client =
IPA_WAKELOCK_REF_CLIENT_ODU_RX;
} else if (in->client ==
IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
IPADBG("assigning policy to client:%d",

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -235,7 +235,9 @@
} while (0)
#define IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120
#define IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN 100
#define IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN 96
#define IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50
#define IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN 40
extern const char *ipa2_clients_strings[];
@ -254,11 +256,19 @@ struct ipa2_active_client_logging_info {
enum ipa2_active_client_log_type type;
};
struct ipa2_active_client_htable_entry {
struct hlist_node list;
char id_string[IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN];
int count;
enum ipa2_active_client_log_type type;
};
struct ipa2_active_clients_log_ctx {
char *log_buffer[IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES];
int log_head;
int log_tail;
bool log_rdy;
struct hlist_head htable[IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE];
};
@ -596,6 +606,16 @@ struct ipa_status_stats {
int curr;
};
enum ipa_wakelock_ref_client {
IPA_WAKELOCK_REF_CLIENT_TX = 0,
IPA_WAKELOCK_REF_CLIENT_LAN_RX = 1,
IPA_WAKELOCK_REF_CLIENT_WAN_RX = 2,
IPA_WAKELOCK_REF_CLIENT_WLAN_RX = 3,
IPA_WAKELOCK_REF_CLIENT_ODU_RX = 4,
IPA_WAKELOCK_REF_CLIENT_SPS = 5,
IPA_WAKELOCK_REF_CLIENT_MAX
};
/**
* struct ipa_ep_context - IPA end point context
* @valid: flag indicating id EP context is valid
@ -655,6 +675,7 @@ struct ipa_ep_context {
u32 rx_replenish_threshold;
bool disconnect_in_progress;
u32 qmi_request_sent;
enum ipa_wakelock_ref_client wakelock_client;
/* sys MUST be the last element of this struct */
struct ipa_sys_context *sys;
@ -905,7 +926,7 @@ struct ipa_active_clients {
struct ipa_wakelock_ref_cnt {
spinlock_t spinlock;
int cnt;
u32 cnt;
};
struct ipa_tag_completion {
@ -1753,6 +1774,10 @@ int ipa2_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
* if uC not ready only, register callback
*/
int ipa2_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
/*
* To de-register uC ready callback
*/
int ipa2_uc_dereg_rdyCB(void);
/*
* Resource manager
@ -1945,7 +1970,12 @@ void ipa2_inc_client_enable_clks(struct ipa2_active_client_logging_info *id);
int ipa2_inc_client_enable_clks_no_block(struct ipa2_active_client_logging_info
*id);
void ipa2_dec_client_disable_clks(struct ipa2_active_client_logging_info *id);
void ipa2_active_clients_log_print_buffer(void);
void ipa2_active_clients_log_dec(struct ipa2_active_client_logging_info *id,
bool int_ctx);
void ipa2_active_clients_log_inc(struct ipa2_active_client_logging_info *id,
bool int_ctx);
int ipa2_active_clients_log_print_buffer(char *buf, int size);
int ipa2_active_clients_log_print_table(char *buf, int size);
void ipa2_active_clients_log_clear(void);
int ipa_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev);
int __ipa_del_rt_rule(u32 rule_hdl);
@ -2118,7 +2148,7 @@ void ipa_flow_control(enum ipa_client_type ipa_client, bool enable,
uint32_t qmap_id);
int ipa2_restore_suspend_handler(void);
void ipa_sps_irq_control_all(bool enable);
void ipa_inc_acquire_wakelock(void);
void ipa_dec_release_wakelock(void);
void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client);
void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client);
const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name);
#endif /* _IPA_I_H_ */

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -20,6 +20,7 @@
static const char *resource_name_to_str[IPA_RM_RESOURCE_MAX] = {
__stringify(IPA_RM_RESOURCE_Q6_PROD),
__stringify(IPA_RM_RESOURCE_USB_PROD),
__stringify(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD),
__stringify(IPA_RM_RESOURCE_HSIC_PROD),
__stringify(IPA_RM_RESOURCE_STD_ECM_PROD),
__stringify(IPA_RM_RESOURCE_RNDIS_PROD),
@ -29,6 +30,7 @@ static const char *resource_name_to_str[IPA_RM_RESOURCE_MAX] = {
__stringify(IPA_RM_RESOURCE_MHI_PROD),
__stringify(IPA_RM_RESOURCE_Q6_CONS),
__stringify(IPA_RM_RESOURCE_USB_CONS),
__stringify(IPA_RM_RESOURCE_USB_DPL_CONS),
__stringify(IPA_RM_RESOURCE_HSIC_CONS),
__stringify(IPA_RM_RESOURCE_WLAN_CONS),
__stringify(IPA_RM_RESOURCE_APPS_CONS),

View file

@ -0,0 +1,135 @@
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ipa
#define TRACE_INCLUDE_FILE ipa_trace
#if !defined(_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _IPA_TRACE_H
#include <linux/tracepoint.h>
TRACE_EVENT(
intr_to_poll,
TP_PROTO(unsigned long client),
TP_ARGS(client),
TP_STRUCT__entry(
__field(unsigned long, client)
),
TP_fast_assign(
__entry->client = client;
),
TP_printk("client=%lu", __entry->client)
);
TRACE_EVENT(
poll_to_intr,
TP_PROTO(unsigned long client),
TP_ARGS(client),
TP_STRUCT__entry(
__field(unsigned long, client)
),
TP_fast_assign(
__entry->client = client;
),
TP_printk("client=%lu", __entry->client)
);
TRACE_EVENT(
idle_sleep_enter,
TP_PROTO(unsigned long client),
TP_ARGS(client),
TP_STRUCT__entry(
__field(unsigned long, client)
),
TP_fast_assign(
__entry->client = client;
),
TP_printk("client=%lu", __entry->client)
);
TRACE_EVENT(
idle_sleep_exit,
TP_PROTO(unsigned long client),
TP_ARGS(client),
TP_STRUCT__entry(
__field(unsigned long, client)
),
TP_fast_assign(
__entry->client = client;
),
TP_printk("client=%lu", __entry->client)
);
TRACE_EVENT(
rmnet_ipa_netifni,
TP_PROTO(unsigned long rx_pkt_cnt),
TP_ARGS(rx_pkt_cnt),
TP_STRUCT__entry(
__field(unsigned long, rx_pkt_cnt)
),
TP_fast_assign(
__entry->rx_pkt_cnt = rx_pkt_cnt;
),
TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
);
TRACE_EVENT(
rmnet_ipa_netifrx,
TP_PROTO(unsigned long rx_pkt_cnt),
TP_ARGS(rx_pkt_cnt),
TP_STRUCT__entry(
__field(unsigned long, rx_pkt_cnt)
),
TP_fast_assign(
__entry->rx_pkt_cnt = rx_pkt_cnt;
),
TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
);
#endif /* _IPA_TRACE_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#include <trace/define_trace.h>

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -732,7 +732,7 @@ int ipa_uc_reset_pipe(enum ipa_client_type ipa_client)
IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD", ep_idx);
ret = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_RESET_PIPE, 0,
true, 10*HZ);
false, 10*HZ);
return ret;
}

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -1450,6 +1450,20 @@ int ipa2_uc_reg_rdyCB(
return 0;
}
/**
* ipa2_uc_dereg_rdyCB() - To de-register uC ready CB
*
* Returns: 0 on success, negative on failure
*
*/
int ipa2_uc_dereg_rdyCB(void)
{
ipa_ctx->uc_wdi_ctx.uc_ready_cb = NULL;
ipa_ctx->uc_wdi_ctx.priv = NULL;
return 0;
}
/**
* ipa2_uc_wdi_get_dbpa() - To retrieve
@ -1519,9 +1533,16 @@ static void ipa_uc_wdi_loaded_handler(void)
return;
}
if (ipa_ctx->uc_wdi_ctx.uc_ready_cb)
if (ipa_ctx->uc_wdi_ctx.uc_ready_cb) {
ipa_ctx->uc_wdi_ctx.uc_ready_cb(
ipa_ctx->uc_wdi_ctx.priv);
ipa_ctx->uc_wdi_ctx.uc_ready_cb =
NULL;
ipa_ctx->uc_wdi_ctx.priv = NULL;
}
return;
}
int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -569,6 +569,7 @@ int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource)
struct ipa_ep_cfg_ctrl suspend;
int ipa_ep_idx;
unsigned long flags;
struct ipa2_active_client_logging_info log_info;
if (ipa_active_clients_trylock(&flags) == 0)
return -EPERM;
@ -606,6 +607,9 @@ int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource)
}
if (res == 0) {
IPA2_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
ipa_rm_resource_str(resource));
ipa2_active_clients_log_dec(&log_info, true);
ipa_ctx->ipa_active_clients.cnt--;
IPADBG("active clients = %d\n",
ipa_ctx->ipa_active_clients.cnt);
@ -5004,6 +5008,7 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_get_smem_restr_bytes = ipa2_get_smem_restr_bytes;
api_ctrl->ipa_uc_wdi_get_dbpa = ipa2_uc_wdi_get_dbpa;
api_ctrl->ipa_uc_reg_rdyCB = ipa2_uc_reg_rdyCB;
api_ctrl->ipa_uc_dereg_rdyCB = ipa2_uc_dereg_rdyCB;
api_ctrl->ipa_create_wdi_mapping = ipa2_create_wdi_mapping;
api_ctrl->ipa_release_wdi_mapping = ipa2_release_wdi_mapping;
api_ctrl->ipa_rm_create_resource = ipa2_rm_create_resource;

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -34,6 +34,8 @@
#include <linux/rmnet_ipa_fd_ioctl.h>
#include <linux/ipa.h>
#include "ipa_trace.h"
#define WWAN_METADATA_SHFT 24
#define WWAN_METADATA_MASK 0xFF000000
#define WWAN_DATA_LEN 2000
@ -88,6 +90,7 @@ enum wwan_device_status {
struct ipa_rmnet_plat_drv_res {
bool ipa_rmnet_ssr;
bool ipa_loaduC;
bool ipa_advertise_sg_support;
};
/**
@ -1147,10 +1150,13 @@ static void apps_ipa_packet_receive_notify(void *priv,
skb->dev = ipa_netdevs[0];
skb->protocol = htons(ETH_P_MAP);
if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH == 0)
if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH == 0) {
trace_rmnet_ipa_netifni(dev->stats.rx_packets);
result = netif_rx_ni(skb);
else
} else {
trace_rmnet_ipa_netifrx(dev->stats.rx_packets);
result = netif_rx(skb);
}
if (result) {
pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_rx\n",
@ -1161,6 +1167,8 @@ static void apps_ipa_packet_receive_notify(void *priv,
dev->stats.rx_bytes += packet_len;
}
static struct ipa_rmnet_plat_drv_res ipa_rmnet_res = {0, };
/**
* ipa_wwan_ioctl() - I/O control for wwan network driver.
*
@ -1289,6 +1297,15 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
sizeof(struct rmnet_ioctl_extended_s)))
rc = -EFAULT;
break;
/* GET SG support */
case RMNET_IOCTL_GET_SG_SUPPORT:
extend_ioctl_data.u.data =
ipa_rmnet_res.ipa_advertise_sg_support;
if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
&extend_ioctl_data,
sizeof(struct rmnet_ioctl_extended_s)))
rc = -EFAULT;
break;
/* Get endpoint ID */
case RMNET_IOCTL_GET_EPID:
IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n");
@ -1349,6 +1366,11 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
rmnet_mux_val.mux_id);
return rc;
}
if (rmnet_index >= MAX_NUM_OF_MUX_CHANNEL) {
IPAWANERR("Exceed mux_channel limit(%d)\n",
rmnet_index);
return -EFAULT;
}
IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
extend_ioctl_data.u.rmnet_mux_val.mux_id,
extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
@ -1391,7 +1413,8 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) {
apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 8;
apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
cs_offload_en = 1;
cs_offload_en =
IPA_ENABLE_CS_OFFLOAD_UL;
apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
cs_metadata_hdr_offset = 1;
} else {
@ -1449,7 +1472,8 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if ((extend_ioctl_data.u.data) &
RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.
cs_offload_en = 2;
cs_offload_en =
IPA_ENABLE_CS_OFFLOAD_DL;
if ((extend_ioctl_data.u.data) &
RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
@ -1823,8 +1847,6 @@ static struct notifier_block ssr_notifier = {
.notifier_call = ssr_notifier_cb,
};
static struct ipa_rmnet_plat_drv_res ipa_rmnet_res = {0, };
static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res)
{
@ -1838,6 +1860,12 @@ static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
"qcom,ipa-loaduC");
pr_info("IPA ipa-loaduC = %s\n",
ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False");
ipa_rmnet_drv_res->ipa_advertise_sg_support =
of_property_read_bool(pdev->dev.of_node,
"qcom,ipa-advertise-sg-support");
pr_info("IPA SG support = %s\n",
ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
return 0;
}
@ -2078,8 +2106,6 @@ static int ipa_wwan_remove(struct platform_device *pdev)
ipa_del_mux_qmap_hdrs();
if (ipa_qmi_ctx && ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false)
wwan_del_ul_flt_rule_to_ipa();
/* clean up cached QMI msg/handlers */
ipa_qmi_service_exit();
ipa_cleanup_deregister_intf();
atomic_set(&is_initialized, 0);
pr_info("rmnet_ipa completed deinitialization\n");
@ -2207,6 +2233,9 @@ static int ssr_notifier_cb(struct notifier_block *this,
}
if (SUBSYS_BEFORE_POWERUP == code) {
pr_info("IPA received MPSS BEFORE_POWERUP\n");
if (atomic_read(&is_ssr))
/* clean up cached QMI msg/handlers */
ipa_qmi_service_exit();
ipa2_proxy_clk_vote();
pr_info("IPA BEFORE_POWERUP handling is complete\n");
return NOTIFY_DONE;
@ -2469,20 +2498,20 @@ int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
{
struct ipa_get_data_stats_req_msg_v01 *req;
struct ipa_get_data_stats_resp_msg_v01 *resp;
int pipe_len, rc = -ENOMEM;
int pipe_len, rc;
req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01),
GFP_KERNEL);
if (!req) {
IPAWANERR("Can't allocate memory for stats message\n");
return rc;
IPAWANERR("failed to allocate memory for stats message\n");
return -ENOMEM;
}
resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
GFP_KERNEL);
if (!resp) {
IPAWANERR("Can't allocate memory for stats message\n");
IPAWANERR("failed to allocate memory for stats message\n");
kfree(req);
return rc;
return -ENOMEM;
}
memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));

View file

@ -1,7 +1,9 @@
obj-$(CONFIG_IPA3) += ipahal/
obj-$(CONFIG_IPA3) += ipat.o
ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o odu_bridge.o \
ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o \
ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_usb.o
ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o
obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -29,13 +29,14 @@
#define IPA_POLL_FOR_EMPTINESS_NUM 50
#define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20
#define IPA_POLL_FOR_CHANNEL_STOP_NUM 10
#define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5
#define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200
/* xfer_rsc_idx should be 7 bits */
#define IPA_XFER_RSC_IDX_MAX 127
static bool ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
struct gsi_chan_info *chan_info);
static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
bool *is_empty);
int ipa3_enable_data_path(u32 clnt_hdl)
{
@ -43,7 +44,7 @@ int ipa3_enable_data_path(u32 clnt_hdl)
struct ipa_ep_cfg_holb holb_cfg;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
int res = 0;
u32 reg_val = 0;
struct ipahal_reg_endp_init_rsrc_grp rsrc_grp;
IPADBG("Enabling data path\n");
if (IPA_CLIENT_IS_CONS(ep->client)) {
@ -63,20 +64,19 @@ int ipa3_enable_data_path(u32 clnt_hdl)
ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
}
/* Assign the resource group for pipe*/
if (ipa_get_ep_group(ep->client) == -1) {
/* Assign the resource group for pipe */
memset(&rsrc_grp, 0, sizeof(rsrc_grp));
rsrc_grp.rsrc_grp = ipa_get_ep_group(ep->client);
if (rsrc_grp.rsrc_grp == -1) {
IPAERR("invalid group for client %d\n", ep->client);
WARN_ON(1);
return -EFAULT;
}
IPADBG("Setting group %d for pipe %d\n",
ipa_get_ep_group(ep->client), clnt_hdl);
IPA_SETFIELD_IN_REG(reg_val, ipa_get_ep_group(ep->client),
IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT,
IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK);
ipa_write_reg(ipa3_ctx->mmio,
IPA_ENDP_INIT_RSRC_GRP_n(clnt_hdl), reg_val);
rsrc_grp.rsrc_grp, clnt_hdl);
ipahal_write_reg_n_fields(IPA_ENDP_INIT_RSRC_GRP_n, clnt_hdl,
&rsrc_grp);
return res;
}
@ -86,7 +86,7 @@ int ipa3_disable_data_path(u32 clnt_hdl)
struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl];
struct ipa_ep_cfg_holb holb_cfg;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
u32 aggr_init;
struct ipa_ep_cfg_aggr ep_aggr;
int res = 0;
IPADBG("Disabling data path\n");
@ -105,10 +105,8 @@ int ipa3_disable_data_path(u32 clnt_hdl)
}
udelay(IPA_PKT_FLUSH_TO_US);
aggr_init = ipa_read_reg(ipa3_ctx->mmio,
IPA_ENDP_INIT_AGGR_N_OFST_v3_0(clnt_hdl));
if (((aggr_init & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) >>
IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT) == IPA_ENABLE_AGGR) {
ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, &ep_aggr);
if (ep_aggr.aggr_en) {
res = ipa3_tag_aggr_force_close(clnt_hdl);
if (res) {
IPAERR("tag process timeout, client:%d err:%d\n",
@ -283,7 +281,7 @@ int ipa3_connect(const struct ipa_connect_params *in,
int ipa_ep_idx;
int result = -EFAULT;
struct ipa3_ep_context *ep;
struct ipa3_ep_cfg_status ep_status;
struct ipahal_reg_ep_cfg_status ep_status;
unsigned long base;
struct iommu_domain *smmu_domain;
@ -310,7 +308,7 @@ int ipa3_connect(const struct ipa_connect_params *in,
}
memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(in->client);
ep->skip_ep_cfg = in->skip_ep_cfg;
ep->valid = 1;
@ -443,7 +441,7 @@ int ipa3_connect(const struct ipa_connect_params *in,
ipa3_install_dflt_flt_rules(ipa_ep_idx);
if (!ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx);
@ -497,7 +495,7 @@ desc_mem_alloc_fail:
sps_free_endpoint(ep->ep_hdl);
ipa_cfg_ep_fail:
memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
fail:
return result;
}
@ -551,6 +549,7 @@ int ipa3_disconnect(u32 clnt_hdl)
struct iommu_domain *smmu_domain;
struct ipa_disable_force_clear_datapath_req_msg_v01 req = {0};
int res;
enum ipa_client_type client_type;
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
@ -559,9 +558,9 @@ int ipa3_disconnect(u32 clnt_hdl)
}
ep = &ipa3_ctx->ep[clnt_hdl];
client_type = ipa3_get_client_mapping(clnt_hdl);
if (!ep->keep_ipa_awake)
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(client_type);
/* Set Disconnect in Progress flag. */
spin_lock(&ipa3_ctx->disconnect_lock);
@ -661,8 +660,7 @@ int ipa3_disconnect(u32 clnt_hdl)
spin_lock(&ipa3_ctx->disconnect_lock);
memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
spin_unlock(&ipa3_ctx->disconnect_lock);
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
@ -687,8 +685,7 @@ int ipa3_reset_endpoint(u32 clnt_hdl)
return -EFAULT;
}
ep = &ipa3_ctx->ep[clnt_hdl];
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
res = sps_disconnect(ep->ep_hdl);
if (res) {
IPAERR("sps_disconnect() failed, res=%d.\n", res);
@ -703,8 +700,7 @@ int ipa3_reset_endpoint(u32 clnt_hdl)
}
bail:
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return res;
}
@ -849,8 +845,7 @@ static int ipa3_reset_with_open_aggr_frame_wa(u32 clnt_hdl,
int aggr_active_bitmap = 0;
IPADBG("Applying reset channel with open aggregation frame WA\n");
ipa_write_reg(ipa3_ctx->mmio, IPA_AGGR_FORCE_CLOSE_OFST,
(1 << clnt_hdl));
ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
/* Reset channel */
gsi_res = gsi_reset_channel(ep->gsi_chan_hdl);
@ -899,8 +894,7 @@ static int ipa3_reset_with_open_aggr_frame_wa(u32 clnt_hdl,
/* Wait for aggregation frame to be closed and stop channel*/
for (i = 0; i < IPA_POLL_AGGR_STATE_RETRIES_NUM; i++) {
aggr_active_bitmap = ipa_read_reg(ipa3_ctx->mmio,
IPA_STATE_AGGR_ACTIVE_OFST);
aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
if (!(aggr_active_bitmap & (1 << clnt_hdl)))
break;
msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
@ -928,6 +922,12 @@ static int ipa3_reset_with_open_aggr_frame_wa(u32 clnt_hdl,
goto start_chan_fail;
}
/*
* Need to sleep for 1ms as required by H/W verified
* sequence for resetting GSI channel
*/
msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
/* Restore channels properties */
result = ipa3_restore_channel_properties(ep, &orig_chan_props,
&orig_chan_scratch);
@ -957,7 +957,7 @@ int ipa3_reset_gsi_channel(u32 clnt_hdl)
enum gsi_status gsi_res;
int aggr_active_bitmap = 0;
IPADBG("ipa3_reset_gsi_channel: entry\n");
IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("Bad parameter.\n");
@ -967,15 +967,13 @@ int ipa3_reset_gsi_channel(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
/*
* Check for open aggregation frame on Consumer EP -
* reset with open aggregation frame WA
*/
if (IPA_CLIENT_IS_CONS(ep->client)) {
aggr_active_bitmap = ipa_read_reg(ipa3_ctx->mmio,
IPA_STATE_AGGR_ACTIVE_OFST);
aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
if (aggr_active_bitmap & (1 << clnt_hdl)) {
result = ipa3_reset_with_open_aggr_frame_wa(clnt_hdl,
ep);
@ -985,7 +983,11 @@ int ipa3_reset_gsi_channel(u32 clnt_hdl)
}
}
/* Reset channel */
/*
* Reset channel
* If the reset called after stop, need to wait 1ms
*/
msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
gsi_res = gsi_reset_channel(ep->gsi_chan_hdl);
if (gsi_res != GSI_STATUS_SUCCESS) {
IPAERR("Error resetting channel: %d\n", gsi_res);
@ -995,14 +997,14 @@ int ipa3_reset_gsi_channel(u32 clnt_hdl)
finish_reset:
if (!ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPADBG("ipa3_reset_gsi_channel: exit\n");
IPADBG("exit\n");
return 0;
reset_chan_fail:
if (!ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return result;
}
@ -1012,7 +1014,7 @@ int ipa3_reset_gsi_event_ring(u32 clnt_hdl)
int result = -EFAULT;
enum gsi_status gsi_res;
IPADBG("ipa3_reset_gsi_event_ring: entry\n");
IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("Bad parameter.\n");
@ -1022,8 +1024,7 @@ int ipa3_reset_gsi_event_ring(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
/* Reset event ring */
gsi_res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
if (gsi_res != GSI_STATUS_SUCCESS) {
@ -1033,14 +1034,14 @@ int ipa3_reset_gsi_event_ring(u32 clnt_hdl)
}
if (!ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPADBG("ipa3_reset_gsi_event_ring: exit\n");
IPADBG("exit\n");
return 0;
reset_evt_fail:
if (!ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return result;
}
@ -1058,7 +1059,7 @@ int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
int ipa_ep_idx;
int result = -EFAULT;
struct ipa3_ep_context *ep;
struct ipa3_ep_cfg_status ep_status;
struct ipahal_reg_ep_cfg_status ep_status;
unsigned long gsi_dev_hdl;
enum gsi_status gsi_res;
struct ipa_gsi_ep_config gsi_ep_cfg;
@ -1085,7 +1086,7 @@ int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
}
memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ep->skip_ep_cfg = params->skip_ep_cfg;
ep->valid = 1;
@ -1185,7 +1186,7 @@ int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(params->client))
ipa3_install_dflt_flt_rules(ipa_ep_idx);
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPADBG("client %d (ep: %d) connected\n", params->client, ipa_ep_idx);
IPADBG("exit\n");
@ -1198,7 +1199,7 @@ write_evt_scratch_fail:
gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
ipa_cfg_ep_fail:
memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
fail:
return result;
}
@ -1211,7 +1212,7 @@ int ipa3_set_usb_max_packet_size(
IPADBG("entry\n");
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&dev_scratch, 0, sizeof(struct gsi_device_scratch));
dev_scratch.mhi_base_chan_idx_valid = false;
@ -1224,8 +1225,7 @@ int ipa3_set_usb_max_packet_size(
IPAERR("Error writing device scratch: %d\n", gsi_res);
return -EFAULT;
}
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPADBG("exit\n");
return 0;
@ -1246,8 +1246,7 @@ int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
}
ep = &ipa3_ctx->ep[clnt_hdl];
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
if (xferrscidx_valid) {
ep->chan_scratch.xdci.xferrscidx = xferrscidx;
@ -1264,18 +1263,37 @@ int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
goto write_chan_scratch_fail;
}
if (!ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPADBG("exit\n");
return 0;
write_chan_scratch_fail:
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return result;
}
static bool ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
struct gsi_chan_info *chan_info)
static int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
unsigned long chan_hdl)
{
enum gsi_status gsi_res;
memset(gsi_chan_info, 0, sizeof(struct gsi_chan_info));
gsi_res = gsi_query_channel_info(chan_hdl, gsi_chan_info);
if (gsi_res != GSI_STATUS_SUCCESS) {
IPAERR("Error querying channel info: %d\n", gsi_res);
return -EFAULT;
}
if (!gsi_chan_info->evt_valid) {
IPAERR("Event info invalid\n");
return -EFAULT;
}
return 0;
}
static bool ipa3_is_xdci_channel_with_given_info_empty(
struct ipa3_ep_context *ep, struct gsi_chan_info *chan_info)
{
bool is_empty = false;
@ -1303,6 +1321,28 @@ static bool ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
return is_empty;
}
static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
bool *is_empty)
{
struct gsi_chan_info chan_info;
int res;
if (!ep || !is_empty || !ep->valid) {
IPAERR("Input Error\n");
return -EFAULT;
}
res = ipa3_get_gsi_chan_info(&chan_info, ep->gsi_chan_hdl);
if (res) {
IPAERR("Failed to get GSI channel info\n");
return -EFAULT;
}
*is_empty = ipa3_is_xdci_channel_with_given_info_empty(ep, &chan_info);
return 0;
}
static int ipa3_enable_force_clear(u32 request_id, bool throttle_source,
u32 source_pipe_bitmask)
{
@ -1343,73 +1383,161 @@ static int ipa3_disable_force_clear(u32 request_id)
return 0;
}
static int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
unsigned long chan_hdl)
/* Clocks should be voted before invoking this function */
static int ipa3_xdci_stop_gsi_channel(u32 clnt_hdl, bool *stop_in_proc)
{
enum gsi_status gsi_res;
int res;
memset(gsi_chan_info, 0, sizeof(struct gsi_chan_info));
gsi_res = gsi_query_channel_info(chan_hdl, gsi_chan_info);
if (gsi_res != GSI_STATUS_SUCCESS) {
IPAERR("Error querying channel info: %d\n", gsi_res);
return -EFAULT;
IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0 ||
!stop_in_proc) {
IPAERR("Bad parameter.\n");
return -EINVAL;
}
if (!gsi_chan_info->evt_valid) {
IPAERR("Event info invalid\n");
res = ipa3_stop_gsi_channel(clnt_hdl);
if (res != 0 && res != -GSI_STATUS_AGAIN &&
res != -GSI_STATUS_TIMED_OUT) {
IPAERR("xDCI stop channel failed res=%d\n", res);
return -EFAULT;
}
*stop_in_proc = res;
IPADBG("xDCI channel is %s (result=%d)\n",
res ? "STOP_IN_PROC/TimeOut" : "STOP", res);
IPADBG("exit\n");
return 0;
}
/* Clocks should be voted for before invoking this function */
static int ipa3_drain_ul_chan_data(struct ipa3_ep_context *ep, u32 qmi_req_id,
u32 source_pipe_bitmask, bool should_force_clear)
/* Clocks should be voted before invoking this function */
static int ipa3_xdci_stop_gsi_ch_brute_force(u32 clnt_hdl,
bool *stop_in_proc)
{
unsigned long jiffies_start;
unsigned long jiffies_timeout =
msecs_to_jiffies(IPA_CHANNEL_STOP_IN_PROC_TO_MSEC);
int res;
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0 ||
!stop_in_proc) {
IPAERR("Bad parameter.\n");
return -EINVAL;
}
jiffies_start = jiffies;
while (1) {
res = ipa3_xdci_stop_gsi_channel(clnt_hdl,
stop_in_proc);
if (res) {
IPAERR("failed to stop xDCI channel hdl=%d\n",
clnt_hdl);
return res;
}
if (!*stop_in_proc) {
IPADBG("xDCI channel STOP hdl=%d\n", clnt_hdl);
return res;
}
/*
* Give chance to the previous stop request to be accomplished
* before the retry
*/
udelay(IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC);
if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
IPADBG("timeout waiting for xDCI channel emptiness\n");
return res;
}
}
}
/* Clocks should be voted for before invoking this function */
static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
u32 source_pipe_bitmask, bool should_force_clear, u32 clnt_hdl)
{
int i;
bool is_empty = false;
int result;
struct gsi_chan_info gsi_chan_info;
bool is_empty = false;
int i;
bool stop_in_proc;
struct ipa3_ep_context *ep;
result = ipa3_get_gsi_chan_info(&gsi_chan_info, ep->gsi_chan_hdl);
if (result)
return -EFAULT;
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("Bad parameter.\n");
return -EINVAL;
}
do {
for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
is_empty = ipa3_is_xdci_channel_empty(ep,
&gsi_chan_info);
if (is_empty)
break;
udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
}
ep = &ipa3_ctx->ep[clnt_hdl];
/* first try to stop the channel */
result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
&stop_in_proc);
if (result) {
IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
clnt_hdl, ep->client);
goto exit;
}
if (!stop_in_proc)
goto exit;
/* if stop_in_proc, lets wait for emptiness */
for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
result = ipa3_is_xdci_channel_empty(ep, &is_empty);
if (result)
goto exit;
if (is_empty)
break;
if (should_force_clear) {
result = ipa3_enable_force_clear(qmi_req_id, true,
source_pipe_bitmask);
if (result)
return -EFAULT;
}
for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
is_empty = ipa3_is_xdci_channel_empty(ep,
&gsi_chan_info);
if (is_empty)
break;
udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
}
if (should_force_clear) {
result = ipa3_disable_force_clear(qmi_req_id);
if (result)
return -EFAULT;
udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
}
/* In case of empty, lets try to stop the channel again */
if (is_empty) {
result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
&stop_in_proc);
if (result) {
IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
clnt_hdl, ep->client);
goto exit;
}
if (!stop_in_proc)
goto exit;
}
/* if still stop_in_proc or not empty, activate force clear */
if (should_force_clear) {
result = ipa3_enable_force_clear(qmi_req_id, true,
source_pipe_bitmask);
if (result)
goto exit;
}
/* with force clear, wait for emptiness */
for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
result = ipa3_is_xdci_channel_empty(ep, &is_empty);
if (result)
goto disable_force_clear_and_exit;
if (is_empty)
break;
IPAERR("UL channel is not empty after draining it!\n");
BUG();
} while (0);
return 0;
udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
}
/* try to stop for the last time */
result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
&stop_in_proc);
if (result) {
IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
clnt_hdl, ep->client);
goto disable_force_clear_and_exit;
}
result = stop_in_proc ? -EFAULT : 0;
disable_force_clear_and_exit:
if (should_force_clear)
result = ipa3_disable_force_clear(qmi_req_id);
exit:
return result;
}
int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id)
@ -1418,7 +1546,7 @@ int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id)
int result;
u32 source_pipe_bitmask = 0;
IPADBG("ipa3_xdci_disconnect: entry\n");
IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("Bad parameter.\n");
@ -1428,33 +1556,40 @@ int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id)
ep = &ipa3_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
ipa3_disable_data_path(clnt_hdl);
/* Drain UL channel before stopping it */
if (!IPA_CLIENT_IS_CONS(ep->client)) {
source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ep->client);
result = ipa3_drain_ul_chan_data(ep, qmi_req_id,
source_pipe_bitmask, should_force_clear);
if (result)
IPAERR("Error draining UL channel data: %d\n", result);
IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n",
clnt_hdl, ep->client);
source_pipe_bitmask = 1 <<
ipa3_get_ep_mapping(ep->client);
result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
source_pipe_bitmask, should_force_clear, clnt_hdl);
if (result) {
IPAERR("Fail to stop UL channel with data drain\n");
WARN_ON(1);
goto stop_chan_fail;
}
} else {
IPADBG("Stopping CONS channel - hdl=%d clnt=%d\n",
clnt_hdl, ep->client);
result = ipa3_stop_gsi_channel(clnt_hdl);
if (result) {
IPAERR("Error stopping channel (CONS client): %d\n",
result);
goto stop_chan_fail;
}
}
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
result = ipa3_stop_gsi_channel(clnt_hdl);
if (result) {
IPAERR("Error stopping channel: %d\n", result);
goto stop_chan_fail;
}
ipa3_dec_client_disable_clks();
IPADBG("ipa3_xdci_disconnect: exit\n");
IPADBG("exit\n");
return 0;
stop_chan_fail:
if (!ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return result;
}
@ -1464,7 +1599,7 @@ int ipa3_release_gsi_channel(u32 clnt_hdl)
int result = -EFAULT;
enum gsi_status gsi_res;
IPADBG("ipa3_release_gsi_channel: entry\n");
IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("Bad parameter.\n");
@ -1474,7 +1609,7 @@ int ipa3_release_gsi_channel(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
gsi_res = gsi_dealloc_channel(ep->gsi_chan_hdl);
if (gsi_res != GSI_STATUS_SUCCESS) {
@ -1492,21 +1627,21 @@ int ipa3_release_gsi_channel(u32 clnt_hdl)
ipa3_delete_dflt_flt_rules(clnt_hdl);
if (!ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
IPADBG("ipa3_release_gsi_channel: exit\n");
IPADBG("exit\n");
return 0;
dealloc_chan_fail:
if (!ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return result;
}
int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
bool should_force_clear, u32 qmi_req_id)
bool should_force_clear, u32 qmi_req_id, bool is_dpl)
{
struct ipa3_ep_context *ul_ep, *dl_ep;
int result = -EFAULT;
@ -1519,105 +1654,112 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
int aggr_active_bitmap = 0;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
IPADBG("ipa3_xdci_suspend: entry\n");
if (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[ul_clnt_hdl].valid == 0 ||
dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[dl_clnt_hdl].valid == 0) {
/* In case of DPL, dl is the DPL channel/client */
IPADBG("entry\n");
if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[dl_clnt_hdl].valid == 0 ||
(!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) {
IPAERR("Bad parameter.\n");
return -EINVAL;
}
ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
dl_ep = &ipa3_ctx->ep[dl_clnt_hdl];
if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake)
ipa3_inc_client_enable_clks();
result = ipa3_get_gsi_chan_info(&ul_gsi_chan_info,
ul_ep->gsi_chan_hdl);
if (result)
goto query_chan_info_fail;
if (!is_dpl)
ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
result = ipa3_get_gsi_chan_info(&dl_gsi_chan_info,
dl_ep->gsi_chan_hdl);
if (result)
goto query_chan_info_fail;
goto disable_clk_and_exit;
if (!is_dpl) {
result = ipa3_get_gsi_chan_info(&ul_gsi_chan_info,
ul_ep->gsi_chan_hdl);
if (result)
goto disable_clk_and_exit;
}
for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
if (!dl_data_pending && !ul_data_pending)
break;
is_empty = ipa3_is_xdci_channel_empty(dl_ep,
&dl_gsi_chan_info);
result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty);
if (result)
goto disable_clk_and_exit;
if (!is_empty) {
dl_data_pending = true;
break;
}
dl_data_pending = false;
is_empty = ipa3_is_xdci_channel_empty(ul_ep,
&ul_gsi_chan_info);
ul_data_pending = is_empty ? false : true;
if (!is_dpl) {
result = ipa3_is_xdci_channel_empty(ul_ep, &is_empty);
if (result)
goto disable_clk_and_exit;
ul_data_pending = !is_empty;
} else {
ul_data_pending = false;
}
udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
}
if (!dl_data_pending) {
aggr_active_bitmap = ipa_read_reg(ipa3_ctx->mmio,
IPA_STATE_AGGR_ACTIVE_OFST);
aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
if (aggr_active_bitmap & (1 << dl_clnt_hdl)) {
IPADBG("DL data pending due to open aggr. frame\n");
IPADBG("DL/DPL data pending due to open aggr. frame\n");
dl_data_pending = true;
}
}
if (dl_data_pending) {
IPAERR("DL data pending, can't suspend\n");
IPAERR("DL/DPL data pending, can't suspend\n");
result = -EFAULT;
goto query_chan_info_fail;
goto disable_clk_and_exit;
}
/* Drain UL channel before stopping it */
if (ul_data_pending) {
source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client);
result = ipa3_drain_ul_chan_data(ul_ep, qmi_req_id,
source_pipe_bitmask, should_force_clear);
if (result)
IPAERR("Error draining UL channel data: %d\n", result);
}
/* Suspend the DL EP */
/* Suspend the DL/DPL EP */
memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl));
ep_cfg_ctrl.ipa_ep_suspend = true;
ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
/*
* Check if DL channel is empty again, data could enter the channel
* Check if DL/DPL channel is empty again, data could enter the channel
* before its IPA EP was suspended
*/
is_empty = ipa3_is_xdci_channel_empty(dl_ep, &dl_gsi_chan_info);
result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty);
if (result)
goto unsuspend_dl_and_exit;
if (!is_empty) {
IPAERR("DL data pending, can't suspend\n");
/* Unsuspend the DL EP */
memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl));
ep_cfg_ctrl.ipa_ep_suspend = false;
ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
IPAERR("DL/DPL data pending, can't suspend\n");
result = -EFAULT;
goto query_chan_info_fail;
goto unsuspend_dl_and_exit;
}
result = ipa3_stop_gsi_channel(ul_clnt_hdl);
if (result) {
IPAERR("Error stopping UL channel: %d\n", result);
goto query_chan_info_fail;
/* STOP UL channel */
if (!is_dpl) {
source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client);
result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
source_pipe_bitmask, should_force_clear, ul_clnt_hdl);
if (result) {
IPAERR("Error stopping UL channel: result = %d\n",
result);
goto unsuspend_dl_and_exit;
}
}
if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
IPADBG("ipa3_xdci_suspend: exit\n");
IPADBG("exit\n");
return 0;
query_chan_info_fail:
if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
unsuspend_dl_and_exit:
/* Unsuspend the DL EP */
memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
ep_cfg_ctrl.ipa_ep_suspend = false;
ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
disable_clk_and_exit:
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
return result;
}
@ -1627,7 +1769,7 @@ int ipa3_start_gsi_channel(u32 clnt_hdl)
int result = -EFAULT;
enum gsi_status gsi_res;
IPADBG("ipa3_start_gsi_channel: entry\n");
IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("Bad parameters.\n");
@ -1637,7 +1779,7 @@ int ipa3_start_gsi_channel(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
if (gsi_res != GSI_STATUS_SUCCESS) {
@ -1646,52 +1788,54 @@ int ipa3_start_gsi_channel(u32 clnt_hdl)
}
if (!ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPADBG("ipa3_start_gsi_channel: exit\n");
IPADBG("exit\n");
return 0;
start_chan_fail:
if (!ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return result;
}
int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl)
int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl)
{
struct ipa3_ep_context *ul_ep, *dl_ep;
enum gsi_status gsi_res;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
IPADBG("ipa3_xdci_resume: entry\n");
if (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[ul_clnt_hdl].valid == 0 ||
dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[dl_clnt_hdl].valid == 0) {
/* In case of DPL, dl is the DPL channel/client */
IPADBG("entry\n");
if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[dl_clnt_hdl].valid == 0 ||
(!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) {
IPAERR("Bad parameter.\n");
return -EINVAL;
}
ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
dl_ep = &ipa3_ctx->ep[dl_clnt_hdl];
if (!is_dpl)
ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake)
ipa3_inc_client_enable_clks();
/* Unsuspend the DL EP */
/* Unsuspend the DL/DPL EP */
memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl));
ep_cfg_ctrl.ipa_ep_suspend = true;
ep_cfg_ctrl.ipa_ep_suspend = false;
ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
/* Start UL channel */
gsi_res = gsi_start_channel(ul_ep->gsi_chan_hdl);
if (gsi_res != GSI_STATUS_SUCCESS)
IPAERR("Error starting UL channel: %d\n", gsi_res);
if (!is_dpl) {
gsi_res = gsi_start_channel(ul_ep->gsi_chan_hdl);
if (gsi_res != GSI_STATUS_SUCCESS)
IPAERR("Error starting UL channel: %d\n", gsi_res);
}
if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
IPADBG("ipa3_xdci_resume: exit\n");
IPADBG("exit\n");
return 0;
}
/**
@ -1741,7 +1885,7 @@ int ipa3_clear_endpoint_delay(u32 clnt_hdl)
ep->qmi_request_sent = true;
}
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
/* Set disconnect in progress flag so further flow control events are
* not honored.
*/
@ -1754,7 +1898,7 @@ int ipa3_clear_endpoint_delay(u32 clnt_hdl)
ep_ctrl.ipa_ep_suspend = false;
ipa3_cfg_ep_ctrl(clnt_hdl, &ep_ctrl);
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPADBG("client (ep: %d) removed ep delay\n", clnt_hdl);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -19,9 +19,9 @@
#include "ipa_rm_i.h"
#define IPA_MAX_MSG_LEN 4096
#define IPA_DBG_CNTR_ON 127265
#define IPA_DBG_CNTR_OFF 127264
#define IPA_DBG_MAX_RULE_IN_TBL 128
#define IPA_DBG_ACTIVE_CLIENT_BUF_SIZE ((IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN \
* IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) + IPA_MAX_MSG_LEN)
#define IPA_DUMP_STATUS_FIELD(f) \
pr_err(#f "=0x%x\n", status->f)
@ -111,44 +111,37 @@ static struct dentry *dfile_msg;
static struct dentry *dfile_ip4_nat;
static struct dentry *dfile_rm_stats;
static struct dentry *dfile_status_stats;
static struct dentry *dfile_active_clients;
static char dbg_buff[IPA_MAX_MSG_LEN];
static char *active_clients_buf;
static s8 ep_reg_idx;
/**
* _ipa_read_gen_reg_v3_0() - Reads and prints IPA general configuration
* registers
*
* Returns the number of characters printed
*/
int _ipa_read_gen_reg_v3_0(char *buff, int max_len)
{
return scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"IPA_VERSION=0x%x\n"
"IPA_COMP_HW_VERSION=0x%x\n"
"IPA_ROUTE=0x%x\n"
"IPA_SHARED_MEM_RESTRICTED=0x%x\n"
"IPA_SHARED_MEM_SIZE=0x%x\n",
ipa_read_reg(ipa3_ctx->mmio, IPA_VERSION_OFST),
ipa_read_reg(ipa3_ctx->mmio, IPA_COMP_HW_VERSION_OFST),
ipa_read_reg(ipa3_ctx->mmio, IPA_ROUTE_OFST_v3_0),
ipa_read_reg_field(ipa3_ctx->mmio,
IPA_SHARED_MEM_SIZE_OFST_v3_0,
IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v3_0,
IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v3_0),
ipa_read_reg_field(ipa3_ctx->mmio,
IPA_SHARED_MEM_SIZE_OFST_v3_0,
IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v3_0,
IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v3_0));
}
static ssize_t ipa3_read_gen_reg(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
int nbytes;
struct ipahal_reg_shared_mem_size smem_sz;
ipa3_inc_client_enable_clks();
nbytes = ipa3_ctx->ctrl->ipa3_read_gen_reg(dbg_buff, IPA_MAX_MSG_LEN);
ipa3_dec_client_disable_clks();
memset(&smem_sz, 0, sizeof(smem_sz));
ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz);
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"IPA_VERSION=0x%x\n"
"IPA_COMP_HW_VERSION=0x%x\n"
"IPA_ROUTE=0x%x\n"
"IPA_SHARED_MEM_RESTRICTED=0x%x\n"
"IPA_SHARED_MEM_SIZE=0x%x\n",
ipahal_read_reg(IPA_VERSION),
ipahal_read_reg(IPA_COMP_HW_VERSION),
ipahal_read_reg(IPA_ROUTE),
smem_sz.shared_mem_baddr,
smem_sz.shared_mem_sz);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}
@ -247,28 +240,17 @@ int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe)
"IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n"
"IPA_ENDP_INIT_DEAGGR_%u=0x%x\n"
"IPA_ENDP_INIT_CFG_%u=0x%x\n",
pipe, ipa_read_reg(ipa3_ctx->mmio,
IPA_ENDP_INIT_NAT_N_OFST_v3_0(pipe)),
pipe, ipa_read_reg(ipa3_ctx->mmio,
IPA_ENDP_INIT_HDR_N_OFST_v3_0(pipe)),
pipe, ipa_read_reg(ipa3_ctx->mmio,
IPA_ENDP_INIT_HDR_EXT_n_OFST_v3_0(pipe)),
pipe, ipa_read_reg(ipa3_ctx->mmio,
IPA_ENDP_INIT_MODE_N_OFST_v3_0(pipe)),
pipe, ipa_read_reg(ipa3_ctx->mmio,
IPA_ENDP_INIT_AGGR_N_OFST_v3_0(pipe)),
pipe, ipa_read_reg(ipa3_ctx->mmio,
IPA_ENDP_INIT_ROUTE_N_OFST_v3_0(pipe)),
pipe, ipa_read_reg(ipa3_ctx->mmio,
IPA_ENDP_INIT_CTRL_N_OFST(pipe)),
pipe, ipa_read_reg(ipa3_ctx->mmio,
IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v3_0(pipe)),
pipe, ipa_read_reg(ipa3_ctx->mmio,
IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v3_0(pipe)),
pipe, ipa_read_reg(ipa3_ctx->mmio,
IPA_ENDP_INIT_DEAGGR_n_OFST_v3_0(pipe)),
pipe, ipa_read_reg(ipa3_ctx->mmio,
IPA_ENDP_INIT_CFG_n_OFST(pipe)));
pipe, ipahal_read_reg_n(IPA_ENDP_INIT_NAT_n, pipe),
pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_n, pipe),
pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_EXT_n, pipe),
pipe, ipahal_read_reg_n(IPA_ENDP_INIT_MODE_n, pipe),
pipe, ipahal_read_reg_n(IPA_ENDP_INIT_AGGR_n, pipe),
pipe, ipahal_read_reg_n(IPA_ENDP_INIT_ROUTE_n, pipe),
pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CTRL_n, pipe),
pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe),
pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, pipe),
pipe, ipahal_read_reg_n(IPA_ENDP_INIT_DEAGGR_n, pipe),
pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CFG_n, pipe));
}
static ssize_t ipa3_read_ep_reg(struct file *file, char __user *ubuf,
@ -291,7 +273,7 @@ static ssize_t ipa3_read_ep_reg(struct file *file, char __user *ubuf,
end_idx = start_idx + 1;
}
pos = *ppos;
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
for (i = start_idx; i < end_idx; i++) {
nbytes = ipa3_ctx->ctrl->ipa3_read_ep_reg(dbg_buff,
@ -301,7 +283,7 @@ static ssize_t ipa3_read_ep_reg(struct file *file, char __user *ubuf,
ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff,
nbytes);
if (ret < 0) {
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return ret;
}
@ -309,7 +291,7 @@ static ssize_t ipa3_read_ep_reg(struct file *file, char __user *ubuf,
ubuf += nbytes;
count -= nbytes;
}
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
*ppos = pos + size;
return size;
@ -333,9 +315,9 @@ static ssize_t ipa3_write_keep_awake(struct file *file, const char __user *buf,
return -EFAULT;
if (option == 1)
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
else if (option == 0)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
else
return -EFAULT;
@ -1250,25 +1232,12 @@ static ssize_t ipa3_read_wdi(struct file *file, char __user *ubuf,
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
/**
* _ipa_write_dbg_cnt_v3_0() - Configure IPA debug counter register
*
*/
void _ipa_write_dbg_cnt_v3_0(int option)
{
if (option == 1)
ipa_write_reg(ipa3_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v3_0(0),
IPA_DBG_CNTR_ON);
else
ipa_write_reg(ipa3_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v3_0(0),
IPA_DBG_CNTR_OFF);
}
static ssize_t ipa3_write_dbg_cnt(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned long missing;
u32 option = 0;
struct ipahal_reg_debug_cnt_ctrl dbg_cnt_ctrl;
if (sizeof(dbg_buff) < count + 1)
return -EFAULT;
@ -1281,36 +1250,36 @@ static ssize_t ipa3_write_dbg_cnt(struct file *file, const char __user *buf,
if (kstrtou32(dbg_buff, 0, &option))
return -EFAULT;
ipa3_inc_client_enable_clks();
ipa3_ctx->ctrl->ipa3_write_dbg_cnt(option);
ipa3_dec_client_disable_clks();
memset(&dbg_cnt_ctrl, 0, sizeof(dbg_cnt_ctrl));
dbg_cnt_ctrl.type = DBG_CNT_TYPE_GENERAL;
dbg_cnt_ctrl.product = true;
dbg_cnt_ctrl.src_pipe = 0x1f;
dbg_cnt_ctrl.rule_idx_pipe_rule = false;
dbg_cnt_ctrl.rule_idx = 0;
if (option == 1)
dbg_cnt_ctrl.en = true;
else
dbg_cnt_ctrl.en = false;
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ipahal_write_reg_n_fields(IPA_DEBUG_CNT_CTRL_n, 0, &dbg_cnt_ctrl);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return count;
}
/**
* _ipa_write_dbg_cnt_v3_0() - Read IPA debug counter register
*
*/
int _ipa_read_dbg_cnt_v3_0(char *buf, int max_len)
{
int regval;
regval = ipa_read_reg(ipa3_ctx->mmio,
IPA_DEBUG_CNT_REG_N_OFST_v3_0(0));
return scnprintf(buf, max_len,
"IPA_DEBUG_CNT_REG_0=0x%x\n", regval);
}
static ssize_t ipa3_read_dbg_cnt(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
int nbytes;
u32 regval;
ipa3_inc_client_enable_clks();
nbytes = ipa3_ctx->ctrl->ipa3_read_dbg_cnt(dbg_buff, IPA_MAX_MSG_LEN);
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
regval =
ipahal_read_reg_n(IPA_DEBUG_CNT_REG_n, 0);
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"IPA_DEBUG_CNT_REG_0=0x%x\n", regval);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}
@ -1585,6 +1554,49 @@ static ssize_t ipa_status_stats_read(struct file *file, char __user *ubuf,
return 0;
}
static ssize_t ipa3_print_active_clients_log(struct file *file,
char __user *ubuf, size_t count, loff_t *ppos)
{
int cnt;
int table_size;
if (active_clients_buf == NULL) {
IPAERR("Active Clients buffer is not allocated");
return 0;
}
memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENT_BUF_SIZE);
ipa3_active_clients_lock();
cnt = ipa3_active_clients_log_print_buffer(active_clients_buf,
IPA_DBG_ACTIVE_CLIENT_BUF_SIZE - IPA_MAX_MSG_LEN);
table_size = ipa3_active_clients_log_print_table(active_clients_buf
+ cnt, IPA_MAX_MSG_LEN);
ipa3_active_clients_unlock();
return simple_read_from_buffer(ubuf, count, ppos,
active_clients_buf, cnt + table_size);
}
static ssize_t ipa3_clear_active_clients_log(struct file *file,
const char __user *ubuf, size_t count, loff_t *ppos)
{
unsigned long missing;
s8 option = 0;
if (sizeof(dbg_buff) < count + 1)
return -EFAULT;
missing = copy_from_user(dbg_buff, ubuf, count);
if (missing)
return -EFAULT;
dbg_buff[count] = '\0';
if (kstrtos8(dbg_buff, 0, &option))
return -EFAULT;
ipa3_active_clients_log_clear();
return count;
}
const struct file_operations ipa3_gen_reg_ops = {
.read = ipa3_read_gen_reg,
@ -1665,6 +1677,11 @@ const struct file_operations ipa3_rm_stats = {
.read = ipa3_rm_read_stats,
};
const struct file_operations ipa3_active_clients = {
.read = ipa3_print_active_clients_log,
.write = ipa3_clear_active_clients_log,
};
void ipa3_debugfs_init(void)
{
const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
@ -1694,6 +1711,19 @@ void ipa3_debugfs_init(void)
goto fail;
}
dfile_active_clients = debugfs_create_file("active_clients",
read_write_mode, dent, 0, &ipa3_active_clients);
if (!dfile_active_clients || IS_ERR(dfile_active_clients)) {
IPAERR("fail to create file for debug_fs active_clients\n");
goto fail;
}
active_clients_buf = NULL;
active_clients_buf = kzalloc(IPA_DBG_ACTIVE_CLIENT_BUF_SIZE,
GFP_KERNEL);
if (active_clients_buf == NULL)
IPAERR("fail to allocate active clients memory buffer");
dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0,
&ipa3_ep_reg_ops);
if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) {
@ -1864,6 +1894,13 @@ void ipa3_debugfs_init(void)
goto fail;
}
file = debugfs_create_u32("enable_low_prio_print", read_write_mode,
dent, &ipa3_ctx->enable_low_prio_print);
if (!file) {
IPAERR("could not create enable_low_prio_print file\n");
goto fail;
}
return;
fail:
@ -1876,6 +1913,10 @@ void ipa3_debugfs_remove(void)
IPAERR("ipa3_debugfs_remove: folder was not created.\n");
return;
}
if (active_clients_buf != NULL) {
kfree(active_clients_buf);
active_clients_buf = NULL;
}
debugfs_remove_recursive(dent);
}

View file

@ -259,7 +259,7 @@ int ipa3_dma_enable(void)
mutex_unlock(&ipa3_dma_ctx->enable_lock);
return -EPERM;
}
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SPECIAL("DMA");
ipa3_dma_ctx->is_enabled = true;
mutex_unlock(&ipa3_dma_ctx->enable_lock);
@ -322,7 +322,7 @@ int ipa3_dma_disable(void)
}
ipa3_dma_ctx->is_enabled = false;
spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SPECIAL("DMA");
mutex_unlock(&ipa3_dma_ctx->enable_lock);
IPADMA_FUNC_EXIT();
return 0;

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -11,6 +11,7 @@
*/
#include "ipa_i.h"
#include "ipahal/ipahal.h"
#define IPA_FLT_TABLE_INDEX_NOT_FOUND (-1)
#define IPA_FLT_STATUS_OF_ADD_FAILED (-1)
@ -246,12 +247,12 @@ static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
}
}
IPADBG("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
IPADBG_LOW("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
en_rule,
hdr->u.hdr.action,
hdr->u.hdr.rt_tbl_idx,
hdr->u.hdr.retain_hdr);
IPADBG("priority=%d, rule_id=%d\n",
IPADBG_LOW("priority=%d, rule_id=%d\n",
hdr->u.hdr.priority,
hdr->u.hdr.rule_id);
@ -274,7 +275,7 @@ static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
struct ipa3_flt_tbl *tbl;
int i;
IPADBG("reaping sys flt tbls ip=%d rlt=%d\n", ip, rlt);
IPADBG_LOW("reaping sys flt tbls ip=%d rlt=%d\n", ip, rlt);
for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
if (!ipa_is_ep_support_flt(i))
@ -282,7 +283,7 @@ static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
tbl = &ipa3_ctx->flt_tbl[i][ip];
if (tbl->prev_mem[rlt].phys_base) {
IPADBG("reaping flt tbl (prev) pipe=%d\n", i);
IPADBG_LOW("reaping flt tbl (prev) pipe=%d\n", i);
dma_free_coherent(ipa3_ctx->pdev,
tbl->prev_mem[rlt].size,
tbl->prev_mem[rlt].base,
@ -293,7 +294,8 @@ static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
if (list_empty(&tbl->head_flt_rule_list)) {
if (tbl->curr_mem[rlt].phys_base) {
IPADBG("reaping flt tbl (curr) pipe=%d\n", i);
IPADBG_LOW("reaping flt tbl (curr) pipe=%d\n",
i);
dma_free_coherent(ipa3_ctx->pdev,
tbl->curr_mem[rlt].size,
tbl->curr_mem[rlt].base,
@ -391,7 +393,7 @@ static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
IPAERR("failed to calculate HW FLT rule size\n");
return -EPERM;
}
IPADBG("pipe %d hw_len %d priority %u\n",
IPADBG_LOW("pipe %d hw_len %d priority %u\n",
pipe_idx, entry->hw_len, entry->prio);
if (entry->rule.hashable)
@ -402,7 +404,8 @@ static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
if ((tbl->sz[IPA_RULE_HASHABLE] +
tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
IPADBG("flt tbl pipe %d is with zero total size\n", pipe_idx);
IPADBG_LOW("flt tbl pipe %d is with zero total size\n",
pipe_idx);
return 0;
}
@ -412,7 +415,7 @@ static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
if (tbl->sz[IPA_RULE_NON_HASHABLE])
tbl->sz[IPA_RULE_NON_HASHABLE] += IPA_HW_TBL_HDR_WIDTH;
IPADBG("FLT tbl pipe idx %d hash sz %u non-hash sz %u\n", pipe_idx,
IPADBG_LOW("FLT tbl pipe idx %d hash sz %u non-hash sz %u\n", pipe_idx,
tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
return 0;
@ -648,7 +651,7 @@ static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
}
ipa_get_flt_tbl_lcl_bdy_size(ip, &hash_bdy_sz, &nhash_bdy_sz);
IPADBG("total flt tbl local body sizes: hash %u nhash %u\n",
IPADBG_LOW("total flt tbl local body sizes: hash %u nhash %u\n",
hash_bdy_sz, nhash_bdy_sz);
hash_bdy->size = hash_bdy_sz + IPA_HW_TBL_BLK_SIZE_ALIGNMENT;
@ -744,32 +747,31 @@ static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
/**
* ipa_flt_alloc_cmd_buffers() - alloc descriptors and imm cmds
* buffers for headers and bodies updates via imm cmds
* also allocate descriptor for the flushing imm cmd
* payload pointers buffers for headers and bodies of flt structure
* as well as place for flush imm.
* @ipt: the ip address family type
* @desc: [OUT] descriptor buffer
* @cmd: [OUT] imm commands buffer
* @cmd: [OUT] imm commands payload pointers buffer
*
* Return: 0 on success, negative on failure
*/
static int ipa_flt_alloc_cmd_buffers(enum ipa_ip_type ip,
struct ipa3_desc **desc, struct ipa3_hw_imm_cmd_dma_shared_mem **cmd)
struct ipa3_desc **desc, struct ipahal_imm_cmd_pyld ***cmd_pyld)
{
u16 entries;
/* +3: 2 for bodies (hashable and non-hashable) and 1 for flushing */
entries = (ipa3_ctx->ep_flt_num) * 2 + 3;
*desc = kcalloc(entries, sizeof(**desc), GFP_ATOMIC);
if (*desc == NULL) {
IPAERR("fail to alloc desc blob ip %d\n", ip);
goto fail_desc_alloc;
}
/* +2: for bodies (hashable and non-hashable) */
entries = (ipa3_ctx->ep_flt_num) * 2 + 2;
*cmd = kcalloc(entries, sizeof(**cmd), GFP_ATOMIC);
if (*cmd == NULL) {
IPAERR("fail to alloc cmd blob ip %d\n", ip);
*cmd_pyld = kcalloc(entries, sizeof(**cmd_pyld), GFP_ATOMIC);
if (*cmd_pyld == NULL) {
IPAERR("fail to alloc cmd pyld blob ip %d\n", ip);
goto fail_cmd_alloc;
}
@ -791,18 +793,18 @@ fail_desc_alloc:
static bool ipa_flt_skip_pipe_config(int pipe)
{
if (ipa_is_modem_pipe(pipe)) {
IPADBG("skip %d - modem owned pipe\n", pipe);
IPADBG_LOW("skip %d - modem owned pipe\n", pipe);
return true;
}
if (ipa3_ctx->skip_ep_cfg_shadow[pipe]) {
IPADBG("skip %d\n", pipe);
IPADBG_LOW("skip %d\n", pipe);
return true;
}
if ((ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD) == pipe
&& ipa3_ctx->modem_cfg_emb_pipe_flt)) {
IPADBG("skip %d\n", pipe);
IPADBG_LOW("skip %d\n", pipe);
return true;
}
@ -824,14 +826,17 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
struct ipa3_mem_buffer hash_hdr, nhash_hdr;
int rc = 0;
struct ipa3_desc *desc;
struct ipa3_hw_imm_cmd_dma_shared_mem *mem_cmd;
struct ipa3_register_write reg_write_cmd = {0};
struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
struct ipahal_imm_cmd_pyld **cmd_pyld;
int num_cmd = 0;
int i;
int hdr_idx;
u32 lcl_hash_hdr, lcl_nhash_hdr;
u32 lcl_hash_bdy, lcl_nhash_bdy;
bool lcl_hash, lcl_nhash;
struct ipahal_reg_fltrt_hash_flush flush;
struct ipahal_reg_valmask valmask;
if (ip == IPA_IP_v4) {
lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
@ -878,29 +883,40 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
goto fail_size_valid;
}
if (ipa_flt_alloc_cmd_buffers(ip, &desc, &mem_cmd)) {
if (ipa_flt_alloc_cmd_buffers(ip, &desc, &cmd_pyld)) {
rc = -ENOMEM;
goto fail_size_valid;
}
/* flushing ipa internal hashable flt rules cache */
reg_write_cmd.skip_pipeline_clear = 0;
reg_write_cmd.pipeline_clear_options = IPA_HPS_CLEAR;
reg_write_cmd.offset = IPA_FILT_ROUT_HASH_FLUSH_OFST;
reg_write_cmd.value = (ip == IPA_IP_v4) ?
(1<<IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT) :
(1<<IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT);
reg_write_cmd.value_mask = reg_write_cmd.value;
desc[0].opcode = IPA_REGISTER_WRITE;
desc[0].pyld = &reg_write_cmd;
desc[0].len = sizeof(reg_write_cmd);
memset(&flush, 0, sizeof(flush));
if (ip == IPA_IP_v4)
flush.v4_flt = true;
else
flush.v6_flt = true;
ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
reg_write_cmd.skip_pipeline_clear = false;
reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
reg_write_cmd.value = valmask.val;
reg_write_cmd.value_mask = valmask.mask;
cmd_pyld[0] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd, false);
if (!cmd_pyld[0]) {
IPAERR("fail construct register_write imm cmd: IP %d\n", ip);
rc = -EFAULT;
goto fail_reg_write_construct;
}
desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
desc[0].pyld = cmd_pyld[0]->data;
desc[0].len = cmd_pyld[0]->len;
desc[0].type = IPA_IMM_CMD_DESC;
num_cmd++;
hdr_idx = 0;
for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
if (!ipa_is_ep_support_flt(i)) {
IPADBG("skip %d - not filtering pipe\n", i);
IPADBG_LOW("skip %d - not filtering pipe\n", i);
continue;
}
@ -909,83 +925,115 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
continue;
}
IPADBG("Prepare imm cmd for hdr at index %d for pipe %d\n",
IPADBG_LOW("Prepare imm cmd for hdr at index %d for pipe %d\n",
hdr_idx, i);
mem_cmd[num_cmd-1].skip_pipeline_clear = 0;
mem_cmd[num_cmd-1].pipeline_clear_options = IPA_HPS_CLEAR;
mem_cmd[num_cmd-1].size = IPA_HW_TBL_HDR_WIDTH;
mem_cmd[num_cmd-1].system_addr = nhash_hdr.phys_base +
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
mem_cmd.size = IPA_HW_TBL_HDR_WIDTH;
mem_cmd.system_addr = nhash_hdr.phys_base +
hdr_idx * IPA_HW_TBL_HDR_WIDTH;
mem_cmd[num_cmd-1].local_addr = lcl_nhash_hdr +
mem_cmd.local_addr = lcl_nhash_hdr +
hdr_idx * IPA_HW_TBL_HDR_WIDTH;
desc[num_cmd].opcode = IPA_DMA_SHARED_MEM;
desc[num_cmd].pyld = &mem_cmd[num_cmd-1];
desc[num_cmd].len =
sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
if (!cmd_pyld[num_cmd]) {
IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
ip);
goto fail_imm_cmd_construct;
}
desc[num_cmd].opcode =
ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd++].type = IPA_IMM_CMD_DESC;
mem_cmd[num_cmd-1].skip_pipeline_clear = 0;
mem_cmd[num_cmd-1].pipeline_clear_options = IPA_HPS_CLEAR;
mem_cmd[num_cmd-1].size = IPA_HW_TBL_HDR_WIDTH;
mem_cmd[num_cmd-1].system_addr = hash_hdr.phys_base +
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
mem_cmd.size = IPA_HW_TBL_HDR_WIDTH;
mem_cmd.system_addr = hash_hdr.phys_base +
hdr_idx * IPA_HW_TBL_HDR_WIDTH;
mem_cmd[num_cmd-1].local_addr = lcl_hash_hdr +
mem_cmd.local_addr = lcl_hash_hdr +
hdr_idx * IPA_HW_TBL_HDR_WIDTH;
desc[num_cmd].opcode = IPA_DMA_SHARED_MEM;
desc[num_cmd].pyld = &mem_cmd[num_cmd-1];
desc[num_cmd].len =
sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
if (!cmd_pyld[num_cmd]) {
IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
ip);
goto fail_imm_cmd_construct;
}
desc[num_cmd].opcode =
ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd++].type = IPA_IMM_CMD_DESC;
hdr_idx++;
}
if (lcl_nhash) {
mem_cmd[num_cmd-1].skip_pipeline_clear = 0;
mem_cmd[num_cmd-1].pipeline_clear_options = IPA_HPS_CLEAR;
mem_cmd[num_cmd-1].size = nhash_bdy.size;
mem_cmd[num_cmd-1].system_addr = nhash_bdy.phys_base;
mem_cmd[num_cmd-1].local_addr = lcl_nhash_bdy;
desc[num_cmd].opcode = IPA_DMA_SHARED_MEM;
desc[num_cmd].pyld = &mem_cmd[num_cmd-1];
desc[num_cmd].len =
sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
mem_cmd.size = nhash_bdy.size;
mem_cmd.system_addr = nhash_bdy.phys_base;
mem_cmd.local_addr = lcl_nhash_bdy;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
if (!cmd_pyld[num_cmd]) {
IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
ip);
goto fail_imm_cmd_construct;
}
desc[num_cmd].opcode =
ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd++].type = IPA_IMM_CMD_DESC;
}
if (lcl_hash) {
mem_cmd[num_cmd-1].skip_pipeline_clear = 0;
mem_cmd[num_cmd-1].pipeline_clear_options = IPA_HPS_CLEAR;
mem_cmd[num_cmd-1].size = hash_bdy.size;
mem_cmd[num_cmd-1].system_addr = hash_bdy.phys_base;
mem_cmd[num_cmd-1].local_addr = lcl_hash_bdy;
desc[num_cmd].opcode = IPA_DMA_SHARED_MEM;
desc[num_cmd].pyld = &mem_cmd[num_cmd-1];
desc[num_cmd].len =
sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
mem_cmd.size = hash_bdy.size;
mem_cmd.system_addr = hash_bdy.phys_base;
mem_cmd.local_addr = lcl_hash_bdy;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
if (!cmd_pyld[num_cmd]) {
IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
ip);
goto fail_imm_cmd_construct;
}
desc[num_cmd].opcode =
ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd++].type = IPA_IMM_CMD_DESC;
}
if (ipa3_send_cmd(num_cmd, desc)) {
IPAERR("fail to send immediate command\n");
rc = -EFAULT;
goto fail_send_cmd;
goto fail_imm_cmd_construct;
}
IPADBG("Hashable HEAD\n");
IPADBG_LOW("Hashable HEAD\n");
IPA_DUMP_BUFF(hash_hdr.base, hash_hdr.phys_base, hash_hdr.size);
IPADBG("Non-Hashable HEAD\n");
IPADBG_LOW("Non-Hashable HEAD\n");
IPA_DUMP_BUFF(nhash_hdr.base, nhash_hdr.phys_base, nhash_hdr.size);
if (hash_bdy.size) {
IPADBG("Hashable BODY\n");
IPADBG_LOW("Hashable BODY\n");
IPA_DUMP_BUFF(hash_bdy.base,
hash_bdy.phys_base, hash_bdy.size);
}
if (nhash_bdy.size) {
IPADBG("Non-Hashable BODY\n");
IPADBG_LOW("Non-Hashable BODY\n");
IPA_DUMP_BUFF(nhash_bdy.base,
nhash_bdy.phys_base, nhash_bdy.size);
}
@ -993,9 +1041,12 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
__ipa_reap_sys_flt_tbls(ip, IPA_RULE_HASHABLE);
__ipa_reap_sys_flt_tbls(ip, IPA_RULE_NON_HASHABLE);
fail_send_cmd:
fail_imm_cmd_construct:
for (i = 0 ; i < num_cmd ; i++)
ipahal_destroy_imm_cmd(cmd_pyld[i]);
fail_reg_write_construct:
kfree(desc);
kfree(mem_cmd);
kfree(cmd_pyld);
fail_size_valid:
dma_free_coherent(ipa3_ctx->pdev, hash_hdr.size,
hash_hdr.base, hash_hdr.phys_base);
@ -1112,7 +1163,7 @@ static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl,
}
*rule_hdl = id;
entry->id = id;
IPADBG("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
return 0;
}
@ -1320,7 +1371,7 @@ static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
return -EINVAL;
tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip];
IPADBG("add ep flt rule ip=%d ep=%d\n", ip, ep);
IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep);
return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
}
@ -1693,48 +1744,6 @@ void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx)
mutex_unlock(&ipa3_ctx->lock);
}
static u32 ipa3_build_flt_tuple_mask(struct ipa3_hash_tuple *tpl)
{
u32 msk = 0;
IPA_SETFIELD_IN_REG(msk, tpl->src_id,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK
);
IPA_SETFIELD_IN_REG(msk, tpl->src_ip_addr,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK
);
IPA_SETFIELD_IN_REG(msk, tpl->dst_ip_addr,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK
);
IPA_SETFIELD_IN_REG(msk, tpl->src_port,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK
);
IPA_SETFIELD_IN_REG(msk, tpl->dst_port,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK
);
IPA_SETFIELD_IN_REG(msk, tpl->protocol,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK
);
IPA_SETFIELD_IN_REG(msk, tpl->meta_data,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK
);
return msk;
}
/**
* ipa3_set_flt_tuple_mask() - Sets the flt tuple masking for the given pipe
* Pipe must be for AP EP (not modem) and support filtering
@ -1745,10 +1754,9 @@ static u32 ipa3_build_flt_tuple_mask(struct ipa3_hash_tuple *tpl)
* Returns: 0 on success, negative on failure
*
*/
int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipa3_hash_tuple *tuple)
int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple)
{
u32 val;
u32 mask;
struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
if (!tuple) {
IPAERR("bad tuple\n");
@ -1770,19 +1778,11 @@ int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipa3_hash_tuple *tuple)
return -EINVAL;
}
val = ipa_read_reg(ipa3_ctx->mmio,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OFST(pipe_idx));
val &= 0xFFFF0000; /* clear 16 LSBs - flt bits */
mask = ipa3_build_flt_tuple_mask(tuple);
mask &= 0x0000FFFF;
val |= mask;
ipa_write_reg(ipa3_ctx->mmio,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OFST(pipe_idx),
val);
ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
pipe_idx, &fltrt_tuple);
fltrt_tuple.flt = *tuple;
ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
pipe_idx, &fltrt_tuple);
return 0;
}
@ -1866,7 +1866,8 @@ int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
IPADBG("tbl_entry_in_hdr_ofst=0x%llx\n", tbl_entry_in_hdr_ofst);
tbl_entry_in_hdr = ipa3_ctx->mmio +
IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(0) + tbl_entry_in_hdr_ofst;
ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
tbl_entry_in_hdr_ofst;
/* for tables resides in DDR access it from the virtual memory */
if (*tbl_entry_in_hdr & 0x1) {

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -11,6 +11,7 @@
*/
#include "ipa_i.h"
#include "ipahal/ipahal.h"
static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 60};
static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
@ -43,7 +44,7 @@ static int ipa3_generate_hdr_hw_tbl(struct ipa3_mem_buffer *mem)
IPAERR("hdr tbl empty\n");
return -EPERM;
}
IPADBG("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
&mem->phys_base, GFP_KERNEL);
@ -57,7 +58,7 @@ static int ipa3_generate_hdr_hw_tbl(struct ipa3_mem_buffer *mem)
link) {
if (entry->is_hdr_proc_ctx)
continue;
IPADBG("hdr of len %d ofst=%d\n", entry->hdr_len,
IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len,
entry->offset_entry->offset);
memcpy(mem->base + entry->offset_entry->offset, entry->hdr,
entry->hdr_len);
@ -74,7 +75,7 @@ static void ipa3_hdr_proc_ctx_to_hw_format(struct ipa3_mem_buffer *mem,
list_for_each_entry(entry,
&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
link) {
IPADBG("processing type %d ofst=%d\n",
IPADBG_LOW("processing type %d ofst=%d\n",
entry->type, entry->offset_entry->offset);
if (entry->type == IPA_HDR_PROC_NONE) {
struct ipa3_hdr_proc_ctx_add_hdr_seq *ctx;
@ -88,7 +89,7 @@ static void ipa3_hdr_proc_ctx_to_hw_format(struct ipa3_mem_buffer *mem,
entry->hdr->phys_base :
hdr_base_addr +
entry->hdr->offset_entry->offset;
IPADBG("header address 0x%x\n",
IPADBG_LOW("header address 0x%x\n",
ctx->hdr_add.hdr_addr);
ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
ctx->end.length = 0;
@ -105,7 +106,7 @@ static void ipa3_hdr_proc_ctx_to_hw_format(struct ipa3_mem_buffer *mem,
entry->hdr->phys_base :
hdr_base_addr +
entry->hdr->offset_entry->offset;
IPADBG("header address 0x%x\n",
IPADBG_LOW("header address 0x%x\n",
ctx->hdr_add.hdr_addr);
ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
ctx->cmd.length = 0;
@ -117,7 +118,7 @@ static void ipa3_hdr_proc_ctx_to_hw_format(struct ipa3_mem_buffer *mem,
ctx->cmd.value = IPA_HDR_UCP_802_3_TO_ETHII;
else if (entry->type == IPA_HDR_PROC_802_3_TO_802_3)
ctx->cmd.value = IPA_HDR_UCP_802_3_TO_802_3;
IPADBG("command id %d\n", ctx->cmd.value);
IPADBG_LOW("command id %d\n", ctx->cmd.value);
ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
ctx->end.length = 0;
ctx->end.value = 0;
@ -144,7 +145,7 @@ static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
/* make sure table is aligned */
mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
IPADBG("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
&mem->phys_base, GFP_KERNEL);
@ -177,10 +178,12 @@ int __ipa_commit_hdr_v3_0(void)
struct ipa3_mem_buffer hdr_mem;
struct ipa3_mem_buffer ctx_mem;
struct ipa3_mem_buffer aligned_ctx_mem;
struct ipa3_hdr_init_system hdr_init_cmd = {0};
struct ipa3_hw_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
struct ipa3_hw_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
struct ipa3_register_write reg_write_cmd = {0};
struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
int rc = -EFAULT;
u32 proc_ctx_size;
u32 proc_ctx_ofst;
@ -205,17 +208,25 @@ int __ipa_commit_hdr_v3_0(void)
IPA_MEM_PART(apps_hdr_size));
goto end;
} else {
dma_cmd_hdr.skip_pipeline_clear = 0;
dma_cmd_hdr.pipeline_clear_options = IPA_HPS_CLEAR;
dma_cmd_hdr.is_read = false; /* write operation */
dma_cmd_hdr.skip_pipeline_clear = false;
dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
dma_cmd_hdr.system_addr = hdr_mem.phys_base;
dma_cmd_hdr.size = hdr_mem.size;
dma_cmd_hdr.local_addr =
ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(apps_hdr_ofst);
desc[0].opcode = IPA_DMA_SHARED_MEM;
desc[0].pyld = &dma_cmd_hdr;
desc[0].len =
sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
hdr_cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM,
&dma_cmd_hdr, false);
if (!hdr_cmd_pyld) {
IPAERR("fail construct dma_shared_mem cmd\n");
goto end;
}
desc[0].opcode = ipahal_imm_cmd_get_opcode(
IPA_IMM_CMD_DMA_SHARED_MEM);
desc[0].pyld = hdr_cmd_pyld->data;
desc[0].len = hdr_cmd_pyld->len;
}
} else {
if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
@ -224,9 +235,17 @@ int __ipa_commit_hdr_v3_0(void)
goto end;
} else {
hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
desc[0].opcode = IPA_HDR_INIT_SYSTEM;
desc[0].pyld = &hdr_init_cmd;
desc[0].len = sizeof(struct ipa3_hdr_init_system);
hdr_cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_HDR_INIT_SYSTEM,
&hdr_init_cmd, false);
if (!hdr_cmd_pyld) {
IPAERR("fail construct hdr_init_system cmd\n");
goto end;
}
desc[0].opcode = ipahal_imm_cmd_get_opcode(
IPA_IMM_CMD_HDR_INIT_SYSTEM);
desc[0].pyld = hdr_cmd_pyld->data;
desc[0].len = hdr_cmd_pyld->len;
}
}
desc[0].type = IPA_IMM_CMD_DESC;
@ -241,17 +260,25 @@ int __ipa_commit_hdr_v3_0(void)
proc_ctx_size);
goto end;
} else {
dma_cmd_ctx.skip_pipeline_clear = 0;
dma_cmd_ctx.pipeline_clear_options = IPA_HPS_CLEAR;
dma_cmd_ctx.is_read = false; /* Write operation */
dma_cmd_ctx.skip_pipeline_clear = false;
dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR;
dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
dma_cmd_ctx.size = aligned_ctx_mem.size;
dma_cmd_ctx.local_addr =
ipa3_ctx->smem_restricted_bytes +
proc_ctx_ofst;
desc[1].opcode = IPA_DMA_SHARED_MEM;
desc[1].pyld = &dma_cmd_ctx;
desc[1].len =
sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
ctx_cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM,
&dma_cmd_ctx, false);
if (!ctx_cmd_pyld) {
IPAERR("fail construct dma_shared_mem cmd\n");
goto end;
}
desc[1].opcode = ipahal_imm_cmd_get_opcode(
IPA_IMM_CMD_DMA_SHARED_MEM);
desc[1].pyld = ctx_cmd_pyld->data;
desc[1].len = ctx_cmd_pyld->len;
}
} else {
proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
@ -261,15 +288,26 @@ int __ipa_commit_hdr_v3_0(void)
proc_ctx_size_ddr);
goto end;
} else {
reg_write_cmd.skip_pipeline_clear = 0;
reg_write_cmd.pipeline_clear_options = IPA_HPS_CLEAR;
reg_write_cmd.offset = IPA_SYS_PKT_PROC_CNTXT_BASE_OFST;
reg_write_cmd.skip_pipeline_clear = false;
reg_write_cmd.pipeline_clear_options =
IPAHAL_HPS_CLEAR;
reg_write_cmd.offset =
ipahal_get_reg_ofst(
IPA_SYS_PKT_PROC_CNTXT_BASE);
reg_write_cmd.value = aligned_ctx_mem.phys_base;
reg_write_cmd.value_mask =
~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
desc[1].pyld = &reg_write_cmd;
desc[1].opcode = IPA_REGISTER_WRITE;
desc[1].len = sizeof(reg_write_cmd);
ctx_cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_REGISTER_WRITE,
&reg_write_cmd, false);
if (!ctx_cmd_pyld) {
IPAERR("fail construct register_write cmd\n");
goto end;
}
desc[1].opcode = ipahal_imm_cmd_get_opcode(
IPA_IMM_CMD_REGISTER_WRITE);
desc[1].pyld = ctx_cmd_pyld->data;
desc[1].len = ctx_cmd_pyld->len;
}
}
desc[1].type = IPA_IMM_CMD_DESC;
@ -309,6 +347,12 @@ int __ipa_commit_hdr_v3_0(void)
}
end:
if (ctx_cmd_pyld)
ipahal_destroy_imm_cmd(ctx_cmd_pyld);
if (hdr_cmd_pyld)
ipahal_destroy_imm_cmd(hdr_cmd_pyld);
return rc;
}
@ -323,7 +367,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
int id;
int needed_len;
IPADBG("processing type %d hdr_hdl %d\n",
IPADBG_LOW("processing type %d hdr_hdl %d\n",
proc_ctx->type, proc_ctx->hdr_hdl);
if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
@ -394,7 +438,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
entry->offset_entry = offset;
list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
htbl->proc_ctx_cnt++;
IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
IPADBG_LOW("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
htbl->proc_ctx_cnt, offset->offset);
id = ipa3_id_alloc(entry);
@ -510,12 +554,12 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
list_add(&entry->link, &htbl->head_hdr_entry_list);
htbl->hdr_cnt++;
if (entry->is_hdr_proc_ctx)
IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
hdr->hdr_len,
htbl->hdr_cnt,
&entry->phys_base);
else
IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
hdr->hdr_len,
htbl->hdr_cnt,
entry->offset_entry->offset);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -16,25 +16,6 @@
/* This header defines various HW related data types */
/* immediate command op-codes */
#define IPA_DECIPH_INIT (1)
#define IPA_PPP_FRM_INIT (2)
#define IPA_IP_V4_FILTER_INIT (3)
#define IPA_IP_V6_FILTER_INIT (4)
#define IPA_IP_V4_NAT_INIT (5)
#define IPA_IP_V6_NAT_INIT (6)
#define IPA_IP_V4_ROUTING_INIT (7)
#define IPA_IP_V6_ROUTING_INIT (8)
#define IPA_HDR_INIT_LOCAL (9)
#define IPA_HDR_INIT_SYSTEM (10)
#define IPA_DECIPH_SETUP (11)
#define IPA_REGISTER_WRITE (12)
#define IPA_NAT_DMA (14)
#define IPA_IP_PACKET_INIT (16)
#define IPA_DMA_SHARED_MEM (19)
#define IPA_IP_PACKET_TAG_STATUS (20)
#define IPA_DMA_TASK_32B_ADDR(num_buff) (17 + ((num_buff) << 8))
/* Processing context TLV type */
#define IPA_PROC_CTX_TLV_TYPE_END 0
#define IPA_PROC_CTX_TLV_TYPE_HDR_ADD 1
@ -113,111 +94,6 @@ struct ipa3_rt_rule_hw_hdr {
} u;
};
/**
* struct ipa3_ip_v4_filter_init - IPA_IP_V4_FILTER_INIT command payload
* @hash_rules_addr: System memory address of IPv4 hashable rules
* @hash_rules_size: Size in bytes of the hashable rules
* @hash_local_addr: Shared memory address of IPv4 hashable rules
* @nhash_rules_size: Size in bytes of the non-hashable rules
* @nhash_local_addr: Shared memory address of IPv4 non-hashable rules
* @rsvd: reserved
* @nhash_rules_addr: System memory address of IPv4 non-hashable rules
*/
struct ipa3_ip_v4_filter_init {
u64 hash_rules_addr:64;
u64 hash_rules_size:12;
u64 hash_local_addr:16;
u64 nhash_rules_size:12;
u64 nhash_local_addr:16;
u64 rsvd:8;
u64 nhash_rules_addr:64;
};
/**
* struct ipa3_ip_v6_filter_init - IPA_IP_V6_FILTER_INIT command payload
* @hash_rules_addr: System memory address of IPv6 hashable rules
* @hash_rules_size: Size in bytes of the hashable rules
* @hash_local_addr: Shared memory address of IPv6 hashable rules
* @nhash_rules_size: Size in bytes of the non-hashable rules
* @nhash_local_addr: Shared memory address of IPv6 non-hashable rules
* @rsvd: reserved
* @nhash_rules_addr: System memory address of IPv6 non-hashable rules
*/
struct ipa3_ip_v6_filter_init {
u64 hash_rules_addr:64;
u64 hash_rules_size:12;
u64 hash_local_addr:16;
u64 nhash_rules_size:12;
u64 nhash_local_addr:16;
u64 rsvd:8;
u64 nhash_rules_addr:64;
};
/**
* struct ipa3_ip_v4_routing_init - IPA_IP_V4_ROUTING_INIT command payload
* @hash_rules_addr: System memory address of IPv4 hashable rules
* @hash_rules_size: Size in bytes of the hashable rules
* @hash_local_addr: Shared memory address of IPv4 hashable rules
* @nhash_rules_size: Size in bytes of the non-hashable rules
* @nhash_local_addr: Shared memory address of IPv4 non-hashable rules
* @rsvd: reserved
* @nhash_rules_addr: System memory address of IPv4 non-hashable rules
*/
struct ipa3_ip_v4_routing_init {
u64 hash_rules_addr:64;
u64 hash_rules_size:12;
u64 hash_local_addr:16;
u64 nhash_rules_size:12;
u64 nhash_local_addr:16;
u64 rsvd:8;
u64 nhash_rules_addr:64;
};
/**
* struct ipa3_ip_v6_routing_init - IPA_IP_V6_ROUTING_INIT command payload
* @hash_rules_addr: System memory address of IPv6 hashable rules
* @hash_rules_size: Size in bytes of the hashable rules
* @hash_local_addr: Shared memory address of IPv6 hashable rules
* @nhash_rules_size: Size in bytes of the non-hashable rules
* @nhash_local_addr: Shared memory address of IPv6 non-hashable rules
* @rsvd: reserved
* @nhash_rules_addr: System memory address of IPv6 non-hashable rules
*/
struct ipa3_ip_v6_routing_init {
u64 hash_rules_addr:64;
u64 hash_rules_size:12;
u64 hash_local_addr:16;
u64 nhash_rules_size:12;
u64 nhash_local_addr:16;
u64 rsvd:8;
u64 nhash_rules_addr:64;
};
/**
* struct ipa3_hdr_init_local - IPA_HDR_INIT_LOCAL command payload
* @hdr_table_src_addr: word address of header table in system memory where the
* table starts (use as source for memory copying)
* @size_hdr_table: size of the above (in bytes)
* @hdr_table_dst_addr: header address in IPA sram (used as dst for memory copy)
* @rsvd: reserved
*/
struct ipa3_hdr_init_local {
u64 hdr_table_src_addr:64;
u64 size_hdr_table:12;
u64 hdr_table_dst_addr:16;
u64 rsvd:4;
};
/**
* struct ipa3_hdr_init_system - IPA_HDR_INIT_SYSTEM command payload
* @hdr_table_addr: word address of header table in system memory where the
* table starts (use as source for memory copying)
* @rsvd: reserved
*/
struct ipa3_hdr_init_system {
u64 hdr_table_addr:64;
};
/**
* struct ipa3_hdr_proc_ctx_tlv -
* HW structure of IPA processing context header - TLV part
@ -276,124 +152,6 @@ struct ipa3_a5_mux_hdr {
u32 metadata;
};
/**
* enum ipa_pipeline_clear_option - Values for pipeline_clear_options
* @IPA_HPS_CLEAR: Wait for HPS clear. All queues except high priority queue
* shall not be serviced until HPS is clear of packets or immediate commands.
* The high priority Rx queue / Q6ZIP group shall still be serviced normally.
*
* @IPA_SRC_GRP_CLEAR: Wait for originating source group to be clear
* (for no packet contexts allocated to the originating source group).
* The source group / Rx queue shall not be serviced until all previously
* allocated packet contexts are released. All other source groups/queues shall
* be serviced normally.
*
* @IPA_FULL_PIPELINE_CLEAR: Wait for full pipeline to be clear.
* All groups / Rx queues shall not be serviced until IPA pipeline is fully
* clear. This should be used for debug only.
*/
enum ipa_pipeline_clear_option {
IPA_HPS_CLEAR,
IPA_SRC_GRP_CLEAR,
IPA_FULL_PIPELINE_CLEAR
};
/**
* struct ipa3_register_write - IPA_REGISTER_WRITE command payload
* @rsvd: reserved
* @skip_pipeline_clear: 0 to wait until IPA pipeline is clear
* @offset: offset from IPA base address
* @value: value to write to register
* @value_mask: mask specifying which value bits to write to the register
* @pipeline_clear_options: options for pipeline to clear
*/
struct ipa3_register_write {
u64 rsvd:15;
u64 skip_pipeline_clear:1;
u64 offset:16;
u64 value:32;
u64 value_mask:32;
u64 pipeline_clear_options:2;
u64 rsvd2:30;
};
/**
* struct ipa3_nat_dma - IPA_NAT_DMA command payload
* @table_index: NAT table index
* @rsvd1: reserved
* @base_addr: base address
* @rsvd2: reserved
* @offset: offset
* @data: metadata
* @rsvd3: reserved
*/
struct ipa3_nat_dma {
u64 table_index:3;
u64 rsvd1:1;
u64 base_addr:2;
u64 rsvd2:2;
u64 offset:32;
u64 data:16;
u64 rsvd3:8;
};
/**
* struct ipa3_ip_packet_init - IPA_IP_PACKET_INIT command payload
* @destination_pipe_index: destination pipe index
* @rsvd1: reserved
* @metadata: metadata
* @rsvd2: reserved
*/
struct ipa3_ip_packet_init {
u64 destination_pipe_index:5;
u64 rsvd1:3;
u64 rsvd2:32;
u64 rsvd3:24;
};
/**
* struct ipa3_nat_dma - IPA_IP_V4_NAT_INIT command payload
* @ipv4_rules_addr: ipv4 rules address
* @ipv4_expansion_rules_addr: ipv4 expansion rules address
* @index_table_addr: index tables address
* @index_table_expansion_addr: index expansion table address
* @table_index: index in table
* @ipv4_rules_addr_type: ipv4 address type
* @ipv4_expansion_rules_addr_type: ipv4 expansion address type
* @index_table_addr_type: index table address type
* @index_table_expansion_addr_type: index expansion table type
* @size_base_tables: size of base tables
* @size_expansion_tables: size of expansion tables
* @rsvd2: reserved
* @public_ip_addr: public IP address
*/
struct ipa3_ip_v4_nat_init {
u64 ipv4_rules_addr:64;
u64 ipv4_expansion_rules_addr:64;
u64 index_table_addr:64;
u64 index_table_expansion_addr:64;
u64 table_index:3;
u64 rsvd1:1;
u64 ipv4_rules_addr_type:1;
u64 ipv4_expansion_rules_addr_type:1;
u64 index_table_addr_type:1;
u64 index_table_expansion_addr_type:1;
u64 size_base_tables:12;
u64 size_expansion_tables:10;
u64 rsvd2:2;
u64 public_ip_addr:32;
};
/**
* struct ipa3_ip_packet_tag_status - IPA_IP_PACKET_TAG_STATUS command payload
* @rsvd: reserved
* @tag: tag value returned within status
*/
struct ipa3_ip_packet_tag_status {
u64 rsvd:16;
u64 tag:48;
};
/*! @brief Struct for the IPAv3.0 UL packet status header */
struct ipa3_hw_pkt_status {
u64 status_opcode:8;
@ -474,29 +232,4 @@ enum ipa3_hw_pkt_status_exception {
IPA_HW_PKT_STATUS_EXCEPTION_MAX = 0xFF
};
/*! @brief IPA_HW_IMM_CMD_DMA_SHARED_MEM Immediate Command Parameters */
struct ipa3_hw_imm_cmd_dma_shared_mem {
u64 reserved_1:16;
u64 size:16;
u64 local_addr:16;
u64 direction:1;
u64 skip_pipeline_clear:1;
u64 pipeline_clear_options:2;
u64 reserved_2:12;
u64 system_addr:64;
};
/*! @brief IPA_HW_IMM_CMD_DMA_TASK_32B_ADDR Immediate Command Parameters */
struct ipa3_hw_imm_cmd_dma_task_32b_addr {
u64 reserved:11;
u64 cmplt:1;
u64 eof:1;
u64 flsh:1;
u64 lock:1;
u64 unlock:1;
u64 size1:16;
u64 addr1:32;
u64 packet_size:16;
};
#endif /* _IPA_HW_DEFS_H */

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -22,16 +22,18 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/ipa.h>
#include <linux/ipa_usb.h>
#include <linux/msm-sps.h>
#include <asm/dma-iommu.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/ipc_logging.h>
#include <linux/firmware.h>
#include "ipa_hw_defs.h"
#include "ipa_ram_mmap.h"
#include "ipa_reg.h"
#include "ipa_qmi_service.h"
#include "../ipa_api.h"
#include "ipahal/ipahal_reg.h"
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
@ -48,11 +50,38 @@
#define IPA_GENERIC_RX_POOL_SZ 192
#define IPA_MAX_STATUS_STAT_NUM 30
#define __FILENAME__ \
(strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define IPA_IPC_LOGGING(buf, fmt, args...) \
ipc_log_string((buf), \
DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
#define IPADBG(fmt, args...) \
pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
do { \
pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
if (ipa3_ctx) { \
IPA_IPC_LOGGING(ipa3_ctx->logbuf, fmt, ## args); \
IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, fmt, ## args); \
} \
} while (0)
#define IPADBG_LOW(fmt, args...) \
do { \
pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
if (ipa3_ctx && ipa3_ctx->enable_low_prio_print) \
IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, fmt, ## args); \
} while (0)
#define IPAERR(fmt, args...) \
pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
do { \
pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
if (ipa3_ctx) { \
IPA_IPC_LOGGING(ipa3_ctx->logbuf, fmt, ## args); \
IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, fmt, ## args); \
} \
} while (0)
#define WLAN_AMPDU_TX_EP 15
#define WLAN_PROD_TX_EP 19
@ -128,9 +157,6 @@
#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
#define IPA_SETFIELD_IN_REG(reg, val, shift, mask) \
(reg |= ((val) << (shift)) & (mask))
#define IPA_HW_TABLE_ALIGNMENT(start_ofst) \
(((start_ofst) + 127) & ~127)
@ -184,6 +210,123 @@
#define IPA_SLEEP_CLK_RATE_KHZ (32)
#define IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client) \
log_info.file = __FILENAME__; \
log_info.line = __LINE__; \
log_info.type = EP; \
log_info.id_string = ipa3_clients_strings[client]
#define IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \
log_info.file = __FILENAME__; \
log_info.line = __LINE__; \
log_info.type = SIMPLE; \
log_info.id_string = __func__
#define IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name) \
log_info.file = __FILENAME__; \
log_info.line = __LINE__; \
log_info.type = RESOURCE; \
log_info.id_string = resource_name
#define IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str) \
log_info.file = __FILENAME__; \
log_info.line = __LINE__; \
log_info.type = SPECIAL; \
log_info.id_string = id_str
#define IPA_ACTIVE_CLIENTS_INC_EP(client) \
do { \
struct ipa3_active_client_logging_info log_info; \
IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
ipa3_inc_client_enable_clks(&log_info); \
} while (0)
#define IPA_ACTIVE_CLIENTS_DEC_EP(client) \
do { \
struct ipa3_active_client_logging_info log_info; \
IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
ipa3_dec_client_disable_clks(&log_info); \
} while (0)
#define IPA_ACTIVE_CLIENTS_INC_SIMPLE() \
do { \
struct ipa3_active_client_logging_info log_info; \
IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
ipa3_inc_client_enable_clks(&log_info); \
} while (0)
#define IPA_ACTIVE_CLIENTS_DEC_SIMPLE() \
do { \
struct ipa3_active_client_logging_info log_info; \
IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
ipa3_dec_client_disable_clks(&log_info); \
} while (0)
#define IPA_ACTIVE_CLIENTS_INC_RESOURCE(resource_name) \
do { \
struct ipa3_active_client_logging_info log_info; \
IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
ipa3_inc_client_enable_clks(&log_info); \
} while (0)
#define IPA_ACTIVE_CLIENTS_DEC_RESOURCE(resource_name) \
do { \
struct ipa3_active_client_logging_info log_info; \
IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
ipa3_dec_client_disable_clks(&log_info); \
} while (0)
#define IPA_ACTIVE_CLIENTS_INC_SPECIAL(id_str) \
do { \
struct ipa3_active_client_logging_info log_info; \
IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
ipa3_inc_client_enable_clks(&log_info); \
} while (0)
#define IPA_ACTIVE_CLIENTS_DEC_SPECIAL(id_str) \
do { \
struct ipa3_active_client_logging_info log_info; \
IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
ipa3_dec_client_disable_clks(&log_info); \
} while (0)
#define IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120
#define IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN 96
#define IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50
#define IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN 40
extern const char *ipa3_clients_strings[];
enum ipa3_active_client_log_type {
EP,
SIMPLE,
RESOURCE,
SPECIAL,
INVALID
};
struct ipa3_active_client_logging_info {
const char *id_string;
char *file;
int line;
enum ipa3_active_client_log_type type;
};
struct ipa3_active_client_htable_entry {
struct hlist_node list;
char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
int count;
enum ipa3_active_client_log_type type;
};
struct ipa3_active_clients_log_ctx {
char *log_buffer[IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES];
int log_head;
int log_tail;
bool log_rdy;
struct hlist_head htable[IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE];
};
struct ipa3_client_names {
enum ipa_client_type names[MAX_RESOURCE_TO_CLIENTS];
int length;
@ -467,28 +610,6 @@ struct ipa3_rt_tbl_set {
u32 tbl_cnt;
};
/**
* struct ipa3_ep_cfg_status - status configuration in IPA end-point
* @status_en: Determines if end point supports Status Indications. SW should
* set this bit in order to enable Statuses. Output Pipe - send
* Status indications only if bit is set. Input Pipe - forward Status
* indication to STATUS_ENDP only if bit is set. Valid for Input
* and Output Pipes (IPA Consumer and Producer)
* @status_ep: Statuses generated for this endpoint will be forwarded to the
* specified Status End Point. Status endpoint needs to be
* configured with STATUS_EN=1 Valid only for Input Pipes (IPA
* Consumer)
* @status_location: Location of PKT-STATUS on destination pipe.
* If set to 0 (default), PKT-STATUS will be appended before the packet
* for this endpoint. If set to 1, PKT-STATUS will be appended after the
* packet for this endpoint. Valid only for Output Pipes (IPA Producer)
*/
struct ipa3_ep_cfg_status {
bool status_en;
u8 status_ep;
bool status_location;
};
/**
* struct ipa3_wlan_stats - Wlan stats for each wlan endpoint
* @rx_pkts_rcvd: Packets sent by wlan driver
@ -591,7 +712,7 @@ struct ipa3_ep_context {
dma_addr_t phys_base;
struct ipa_ep_cfg cfg;
struct ipa_ep_cfg_holb holb;
struct ipa3_ep_cfg_status status;
struct ipahal_reg_ep_cfg_status status;
u32 dst_pipe_index;
u32 rt_tbl_idx;
struct sps_connect connect;
@ -650,7 +771,6 @@ struct ipa_request_gsi_channel_params {
union __packed gsi_channel_scratch chan_scratch;
};
enum ipa3_sys_pipe_policy {
IPA_POLICY_INTR_MODE,
IPA_POLICY_NOINTR_MODE,
@ -683,6 +803,7 @@ struct ipa3_sys_context {
int (*pyld_hdlr)(struct sk_buff *skb, struct ipa3_sys_context *sys);
struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags);
void (*free_skb)(struct sk_buff *skb);
void (*free_rx_wrapper)(struct ipa3_rx_pkt_wrapper *rk_pkt);
u32 rx_buff_sz;
u32 rx_pool_sz;
struct sk_buff *prev_skb;
@ -700,6 +821,7 @@ struct ipa3_sys_context {
/* ordering is important - mutable fields go above */
struct ipa3_ep_context *ep;
struct list_head head_desc_list;
struct list_head rcycl_list;
spinlock_t spinlock;
struct workqueue_struct *wq;
struct workqueue_struct *repl_wq;
@ -972,6 +1094,47 @@ enum ipa3_hw_features {
IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES
};
/**
* enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
* @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
* device
* @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
*/
enum ipa3_hw_2_cpu_events {
IPA_HW_2_CPU_EVENT_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
IPA_HW_2_CPU_EVENT_LOG_INFO =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
};
/**
* enum ipa3_hw_errors - Common error types.
* @IPA_HW_ERROR_NONE : No error persists
* @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
* @IPA_HW_DMA_ERROR : Unexpected DMA error
* @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
* @IPA_HW_INVALID_OPCODE : Invalid opcode sent
* @IPA_HW_ZIP_ENGINE_ERROR : ZIP engine error
*/
enum ipa3_hw_errors {
IPA_HW_ERROR_NONE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
IPA_HW_INVALID_DOORBELL_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
IPA_HW_DMA_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
IPA_HW_FATAL_SYSTEM_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
IPA_HW_INVALID_OPCODE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
IPA_HW_ZIP_ENGINE_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7)
};
/**
* struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
* section in 128B shared memory located in offset zero of SW Partition in IPA
@ -1033,6 +1196,20 @@ union IpaHwFeatureInfoData_t {
u32 raw32b;
} __packed;
/**
* union IpaHwErrorEventData_t - HW->CPU Common Events
* @errorType : Entered when a system error is detected by the HW. Type of
* error is specified by IPA_HW_ERRORS
* @reserved : Reserved
*/
union IpaHwErrorEventData_t {
struct IpaHwErrorEventParams_t {
u32 errorType:8;
u32 reserved:24;
} __packed params;
u32 raw32b;
} __packed;
/**
* struct IpaHwEventInfoData_t - Structure holding the parameters for
* statistics and config info
@ -1184,6 +1361,7 @@ union IpaHwMhiDlUlSyncCmdData_t {
* @uc_status: The last status provided by the uC
* @uc_zip_error: uC has notified the APPS upon a ZIP engine error
* @uc_error_type: error type from uC error event
* @uc_error_timestamp: tag timer sampled after uC crashed
*/
struct ipa3_uc_ctx {
bool uc_inited;
@ -1199,6 +1377,7 @@ struct ipa3_uc_ctx {
u32 uc_status;
bool uc_zip_error;
u32 uc_error_type;
u32 uc_error_timestamp;
};
/**
@ -1239,27 +1418,11 @@ struct ipa3cm_client_info {
enum ipacm_client_enum client_enum;
bool uplink;
};
/**
* struct ipa3_hash_tuple - Hash tuple members for flt and rt
* the fields tells if to be masked or not
* @src_id: pipe number for flt, table index for rt
* @src_ip_addr: IP source address
* @dst_ip_addr: IP destination address
* @src_port: L4 source port
* @dst_port: L4 destination port
* @protocol: IP protocol field
* @meta_data: packet meta-data
*
*/
struct ipa3_hash_tuple {
/* src_id: pipe in flt, tbl index in rt */
bool src_id;
bool src_ip_addr;
bool dst_ip_addr;
bool src_port;
bool dst_port;
bool protocol;
bool meta_data;
struct ipa3_smp2p_info {
u32 out_base_id;
u32 in_base_id;
bool res_sent;
};
/**
@ -1342,10 +1505,13 @@ struct ipa3_ready_cb_info {
* @use_ipa_teth_bridge: use tethering bridge driver
* @ipa_bam_remote_mode: ipa bam is in remote mode
* @modem_cfg_emb_pipe_flt: modem configure embedded pipe filtering rules
* @logbuf: ipc log buffer for high priority messages
* @logbuf_low: ipc log buffer for low priority messages
* @ipa_bus_hdl: msm driver handle for the data path bus
* @ctrl: holds the core specific operations based on
* core version (vtable like)
* @enable_clock_scaling: clock scaling is enabled ?
* @enable_low_prio_print: enable low priority prints
* @curr_ipa_clk_rate: ipa3_clk current rate
* @wcstats: wlan common buffer stats
* @uc_ctx: uC interface context
@ -1417,6 +1583,7 @@ struct ipa3_context {
struct gen_pool *pipe_mem_pool;
struct dma_pool *dma_pool;
struct ipa3_active_clients ipa3_active_clients;
struct ipa3_active_clients_log_ctx ipa3_active_clients_logging;
struct workqueue_struct *power_mgmt_wq;
struct workqueue_struct *transport_power_mgmt_wq;
bool tag_process_before_gating;
@ -1439,6 +1606,8 @@ struct ipa3_context {
/* featurize if memory footprint becomes a concern */
struct ipa3_stats stats;
void *smem_pipe_mem;
void *logbuf;
void *logbuf_low;
u32 ipa_bus_hdl;
struct ipa3_controller *ctrl;
struct idr ipa_idr;
@ -1446,6 +1615,7 @@ struct ipa3_context {
struct device *uc_pdev;
spinlock_t idr_lock;
u32 enable_clock_scaling;
u32 enable_low_prio_print;
u32 curr_ipa_clk_rate;
bool q6_proxy_clk_vote_valid;
u32 ipa_num_pipes;
@ -1478,27 +1648,7 @@ struct ipa3_context {
bool ipa_initialization_complete;
struct list_head ipa_ready_cb_list;
struct completion init_completion_obj;
};
/**
* struct ipa3_route - IPA route
* @route_dis: route disable
* @route_def_pipe: route default pipe
* @route_def_hdr_table: route default header table
* @route_def_hdr_ofst: route default header offset table
* @route_frag_def_pipe: Default pipe to route fragmented exception
* packets and frag new rule statues, if source pipe does not have
* a notification status pipe defined.
* @route_def_retain_hdr: default value of retain header. It is used
* when no rule was hit
*/
struct ipa3_route {
u32 route_dis;
u32 route_def_pipe;
u32 route_def_hdr_table;
u32 route_def_hdr_ofst;
u8 route_frag_def_pipe;
u32 route_def_retain_hdr;
struct ipa3_smp2p_info smp2p_info;
};
/**
@ -1624,43 +1774,15 @@ struct ipa3_controller {
int (*ipa_init_rt6)(void);
int (*ipa_init_flt4)(void);
int (*ipa_init_flt6)(void);
void (*ipa3_cfg_ep_hdr)(u32 pipe_number,
const struct ipa_ep_cfg_hdr *ipa_ep_hdr_cfg);
int (*ipa3_cfg_ep_hdr_ext)(u32 pipe_number,
const struct ipa_ep_cfg_hdr_ext *ipa_ep_hdr_ext_cfg);
void (*ipa3_cfg_ep_aggr)(u32 pipe_number,
const struct ipa_ep_cfg_aggr *ipa_ep_agrr_cfg);
int (*ipa3_cfg_ep_deaggr)(u32 pipe_index,
const struct ipa_ep_cfg_deaggr *ep_deaggr);
void (*ipa3_cfg_ep_nat)(u32 pipe_number,
const struct ipa_ep_cfg_nat *ipa_ep_nat_cfg);
void (*ipa3_cfg_ep_mode)(u32 pipe_number, u32 dst_pipe_number,
const struct ipa_ep_cfg_mode *ep_mode);
void (*ipa3_cfg_ep_route)(u32 pipe_index, u32 rt_tbl_index);
void (*ipa3_cfg_ep_holb)(u32 pipe_index,
const struct ipa_ep_cfg_holb *ep_holb);
void (*ipa3_cfg_route)(struct ipa3_route *route);
int (*ipa3_read_gen_reg)(char *buff, int max_len);
int (*ipa3_read_ep_reg)(char *buff, int max_len, int pipe);
void (*ipa3_write_dbg_cnt)(int option);
int (*ipa3_read_dbg_cnt)(char *buf, int max_len);
void (*ipa3_cfg_ep_status)(u32 clnt_hdl,
const struct ipa3_ep_cfg_status *ep_status);
int (*ipa3_commit_flt)(enum ipa_ip_type ip);
int (*ipa3_commit_rt)(enum ipa_ip_type ip);
int (*ipa_generate_rt_hw_rule)(enum ipa_ip_type ip,
struct ipa3_rt_entry *entry, u8 *buf);
int (*ipa3_commit_hdr)(void);
void (*ipa3_cfg_ep_cfg)(u32 clnt_hdl,
const struct ipa_ep_cfg_cfg *cfg);
void (*ipa3_cfg_ep_metadata_mask)(u32 clnt_hdl,
const struct ipa_ep_cfg_metadata_mask *metadata_mask);
void (*ipa3_enable_clks)(void);
void (*ipa3_disable_clks)(void);
struct msm_bus_scale_pdata *msm_bus_data_ptr;
void (*ipa3_cfg_ep_metadata)(u32 pipe_number,
const struct ipa_ep_cfg_metadata *);
};
extern struct ipa3_context *ipa3_ctx;
@ -1697,36 +1819,9 @@ int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid);
int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id);
int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
bool should_force_clear, u32 qmi_req_id);
bool should_force_clear, u32 qmi_req_id, bool is_dpl);
int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl);
/*
* USB
*/
int ipa3_usb_init(void);
int ipa3_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
struct ipa_usb_teth_params *teth_params,
int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
void *),
void *user_data);
int ipa3_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
struct ipa_usb_xdci_chan_params *dl_chan_params,
struct ipa_req_chan_out_params *ul_out_params,
struct ipa_req_chan_out_params *dl_out_params,
struct ipa_usb_xdci_connect_params *connect_params);
int ipa3_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot);
int ipa3_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot);
int ipa3_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot);
int ipa3_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl);
int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl);
/*
* Resume / Suspend
@ -1922,6 +2017,10 @@ int ipa3_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
* if uC not ready only, register callback
*/
int ipa3_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
/*
* To de-register uC ready callback
*/
int ipa3_uc_dereg_rdyCB(void);
/*
* Resource manager
@ -2110,7 +2209,7 @@ void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
#endif
int ipa3_controller_static_bind(struct ipa3_controller *controller,
enum ipa_hw_type ipa_hw_type);
int ipa3_cfg_route(struct ipa3_route *route);
int ipa3_cfg_route(struct ipahal_reg_route *route);
int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr);
int ipa3_cfg_filter(u32 disable);
int ipa3_pipe_mem_init(u32 start_ofst, u32 size);
@ -2120,18 +2219,23 @@ int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary);
struct ipa3_context *ipa3_get_ctx(void);
void ipa3_enable_clks(void);
void ipa3_disable_clks(void);
void ipa3_inc_client_enable_clks(void);
int ipa3_inc_client_enable_clks_no_block(void);
void ipa3_dec_client_disable_clks(void);
void ipa3_inc_client_enable_clks(struct ipa3_active_client_logging_info *id);
int ipa3_inc_client_enable_clks_no_block(struct ipa3_active_client_logging_info
*id);
void ipa3_dec_client_disable_clks(struct ipa3_active_client_logging_info *id);
void ipa3_active_clients_log_dec(struct ipa3_active_client_logging_info *id,
bool int_ctx);
void ipa3_active_clients_log_inc(struct ipa3_active_client_logging_info *id,
bool int_ctx);
int ipa3_active_clients_log_print_buffer(char *buf, int size);
int ipa3_active_clients_log_print_table(char *buf, int size);
void ipa3_active_clients_log_clear(void);
int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev);
int __ipa3_del_rt_rule(u32 rule_hdl);
int __ipa3_del_hdr(u32 hdr_hdl);
int __ipa3_release_hdr(u32 hdr_hdl);
int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl);
int _ipa_read_gen_reg_v3_0(char *buff, int max_len);
int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe);
void _ipa_write_dbg_cnt_v3_0(int option);
int _ipa_read_dbg_cnt_v3_0(char *buf, int max_len);
void _ipa_enable_clks_v3_0(void);
void _ipa_disable_clks_v3_0(void);
struct device *ipa3_get_dma_dev(void);
@ -2141,22 +2245,6 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt,
void *interrupt_data);
static inline u32 ipa_read_reg(void *base, u32 offset)
{
return ioread32(base + offset);
}
static inline u32 ipa_read_reg_field(void *base, u32 offset,
u32 mask, u32 shift)
{
return (ipa_read_reg(base, offset) & mask) >> shift;
}
static inline void ipa_write_reg(void *base, u32 offset, u32 val)
{
iowrite32(val, base + offset);
}
int ipa_bridge_init(void);
void ipa_bridge_cleanup(void);
@ -2204,9 +2292,7 @@ int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
u32 bandwidth_mbps);
int ipa3_cfg_ep_status(u32 clnt_hdl,
const struct ipa3_ep_cfg_status *ipa_ep_cfg);
int ipa3_cfg_aggr_cntr_granularity(u8 aggr_granularity);
int ipa3_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity);
const struct ipahal_reg_ep_cfg_status *ipa_ep_cfg);
int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name name);
int ipa3_suspend_resource_sync(enum ipa_rm_resource_name name);
@ -2239,7 +2325,6 @@ int ipa3_uc_loaded_check(void);
void ipa3_uc_load_notify(void);
int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
bool polling_mode, unsigned long timeout_jiffies);
void ipa3_register_panic_hdlr(void);
void ipa3_uc_register_handlers(enum ipa3_hw_features feature,
struct ipa3_uc_hdlrs *hdlrs);
int ipa3_create_nat_device(void);
@ -2263,9 +2348,9 @@ int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected);
int ipa3_uc_mhi_stop_event_update_channel(int channelHandle);
int ipa3_uc_mhi_print_stats(char *dbg_buff, int size);
int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
void ipa3_tag_free_buf(void *user1, int user2);
void ipa3_tag_destroy_imm(void *user1, int user2);
struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info(int ipa_ep_idx);
void ipa3_uc_rg10_write_reg(void *base, u32 offset, u32 val);
void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val);
u32 ipa3_get_num_pipes(void);
struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void);
@ -2279,8 +2364,8 @@ int ipa3_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
enum ipa_rm_resource_name depends_on_name);
int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipa3_hash_tuple *tuple);
int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipa3_hash_tuple *tuple);
int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple);
int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple);
void ipa3_set_resorce_groups_min_max_limits(void);
void ipa3_suspend_apps_pipes(bool suspend);
void ipa3_flow_control(enum ipa_client_type ipa_client, bool enable,
@ -2298,10 +2383,14 @@ int ipa3_rt_read_tbl_from_hw(u32 tbl_idx,
struct ipa3_debugfs_rt_entry entry[],
int *num_entry);
int ipa3_calc_extra_wrd_bytes(const struct ipa_ipfltri_rule_eq *attrib);
const char *ipa3_rm_resource_str(enum ipa_rm_resource_name resource_name);
int ipa3_restore_suspend_handler(void);
int ipa3_inject_dma_task_for_gsi(void);
int ipa3_uc_panic_notifier(struct notifier_block *this,
unsigned long event, void *ptr);
void ipa3_inc_acquire_wakelock(void);
void ipa3_dec_release_wakelock(void);
int ipa3_load_fws(const struct firmware *firmware);
int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data);
const char *ipa_hw_error_str(enum ipa3_hw_errors err_type);
#endif /* _IPA3_I_H_ */

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -111,20 +111,16 @@ static int ipa3_handle_interrupt(int irq_num, bool isr_context)
switch (interrupt_info.interrupt) {
case IPA_TX_SUSPEND_IRQ:
IPADBG("processing TX_SUSPEND interrupt work-around\n");
IPADBG_LOW("processing TX_SUSPEND interrupt work-around\n");
ipa3_tx_suspend_interrupt_wa();
if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) {
suspend_data = ipa_read_reg(ipa3_ctx->mmio,
IPA_IRQ_SUSPEND_INFO_EE_n_ADDR_v3_0(ipa_ee));
IPADBG("get interrupt %d\n", suspend_data);
} else {
suspend_data = ipa_read_reg(ipa3_ctx->mmio,
IPA_IRQ_SUSPEND_INFO_EE_n_ADDR_v3_1(ipa_ee));
IPADBG("get interrupt %d\n", suspend_data);
suspend_data = ipahal_read_reg_n(IPA_IRQ_SUSPEND_INFO_EE_n,
ipa_ee);
IPADBG_LOW("get interrupt %d\n", suspend_data);
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
/* Clearing L2 interrupts status */
ipa_write_reg(ipa3_ctx->mmio,
IPA_SUSPEND_IRQ_CLR_EE_n_ADDR(ipa_ee),
suspend_data);
ipahal_write_reg_n(IPA_SUSPEND_IRQ_CLR_EE_n,
ipa_ee, suspend_data);
}
if (!ipa3_is_valid_ep(suspend_data))
return 0;
@ -138,6 +134,20 @@ static int ipa3_handle_interrupt(int irq_num, bool isr_context)
suspend_interrupt_data->endpoints = suspend_data;
interrupt_data = suspend_interrupt_data;
break;
case IPA_UC_IRQ_0:
if (ipa3_ctx->apply_rg10_wa) {
/*
* Early detect of uC crash. If RG10 workaround is
* enable uC crash will not be detected as before
* processing uC event the interrupt is cleared using
* uC register write which times out as it crashed
* already.
*/
if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
IPA_HW_2_CPU_EVENT_ERROR)
ipa3_ctx->uc_ctx.uc_failed = true;
}
break;
default:
break;
}
@ -179,26 +189,25 @@ static void ipa3_enable_tx_suspend_wa(struct work_struct *work)
u32 suspend_bmask;
int irq_num;
IPADBG("Enter\n");
IPADBG_LOW("Enter\n");
irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
BUG_ON(irq_num == -1);
/* make sure ipa hw is clocked on*/
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
en = ipa_read_reg(ipa3_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
suspend_bmask = 1 << irq_num;
/*enable TX_SUSPEND_IRQ*/
en |= suspend_bmask;
IPADBG("enable TX_SUSPEND_IRQ, IPA_IRQ_EN_EE reg, write val = %u\n"
, en);
ipa3_uc_rg10_write_reg(ipa3_ctx->mmio,
IPA_IRQ_EN_EE_n_ADDR(ipa_ee), en);
ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, en);
ipa3_process_interrupts(false);
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPADBG("Exit\n");
IPADBG_LOW("Exit\n");
}
static void ipa3_tx_suspend_interrupt_wa(void)
@ -207,24 +216,23 @@ static void ipa3_tx_suspend_interrupt_wa(void)
u32 suspend_bmask;
int irq_num;
IPADBG("Enter\n");
IPADBG_LOW("Enter\n");
irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
BUG_ON(irq_num == -1);
/*disable TX_SUSPEND_IRQ*/
val = ipa_read_reg(ipa3_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
suspend_bmask = 1 << irq_num;
val &= ~suspend_bmask;
IPADBG("Disabling TX_SUSPEND_IRQ, write val: %u to IPA_IRQ_EN_EE reg\n",
val);
ipa3_uc_rg10_write_reg(ipa3_ctx->mmio,
IPA_IRQ_EN_EE_n_ADDR(ipa_ee), val);
ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val);
IPADBG(" processing suspend interrupt work-around, delayed work\n");
IPADBG_LOW(" processing suspend interrupt work-around, delayed work\n");
queue_delayed_work(ipa_interrupt_wq, &dwork_en_suspend_int,
msecs_to_jiffies(DIS_SUSPEND_INTERRUPT_TIMEOUT));
IPADBG("Exit\n");
IPADBG_LOW("Exit\n");
}
static void ipa3_process_interrupts(bool isr_context)
@ -235,11 +243,11 @@ static void ipa3_process_interrupts(bool isr_context)
u32 en;
unsigned long flags;
IPADBG("Enter\n");
IPADBG_LOW("Enter\n");
spin_lock_irqsave(&suspend_wa_lock, flags);
en = ipa_read_reg(ipa3_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
reg = ipa_read_reg(ipa3_ctx->mmio, IPA_IRQ_STTS_EE_n_ADDR(ipa_ee));
en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
while (en & reg) {
bmsk = 1;
for (i = 0; i < IPA_IRQ_NUM_MAX; i++) {
@ -257,26 +265,31 @@ static void ipa3_process_interrupts(bool isr_context)
}
bmsk = bmsk << 1;
}
ipa3_uc_rg10_write_reg(ipa3_ctx->mmio,
IPA_IRQ_CLR_EE_n_ADDR(ipa_ee), reg);
reg = ipa_read_reg(ipa3_ctx->mmio,
IPA_IRQ_STTS_EE_n_ADDR(ipa_ee));
/*
* In case uC failed interrupt cannot be cleared.
* Device will crash as part of handling uC event handler.
*/
if (ipa3_ctx->apply_rg10_wa && ipa3_ctx->uc_ctx.uc_failed)
break;
ipa3_uc_rg10_write_reg(IPA_IRQ_CLR_EE_n, ipa_ee, reg);
reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
/* since the suspend interrupt HW bug we must
* read again the EN register, otherwise the while is endless
*/
en = ipa_read_reg(ipa3_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
}
spin_unlock_irqrestore(&suspend_wa_lock, flags);
IPADBG("Exit\n");
IPADBG_LOW("Exit\n");
}
static void ipa3_interrupt_defer(struct work_struct *work)
{
IPADBG("processing interrupts in wq\n");
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ipa3_process_interrupts(false);
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPADBG("Done\n");
}
@ -284,7 +297,7 @@ static irqreturn_t ipa3_isr(int irq, void *ctxt)
{
unsigned long flags;
IPADBG("Enter\n");
IPADBG_LOW("Enter\n");
/* defer interrupt handling in case IPA is not clocked on */
if (ipa3_active_clients_trylock(&flags) == 0) {
IPADBG("defer interrupt processing\n");
@ -299,7 +312,7 @@ static irqreturn_t ipa3_isr(int irq, void *ctxt)
}
ipa3_process_interrupts(true);
IPADBG("Exit\n");
IPADBG_LOW("Exit\n");
bail:
ipa3_active_clients_trylock_unlock(&flags);
@ -346,13 +359,12 @@ int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
ipa_interrupt_to_cb[irq_num].private_data = private_data;
ipa_interrupt_to_cb[irq_num].interrupt = interrupt;
val = ipa_read_reg(ipa3_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
IPADBG("read IPA_IRQ_EN_EE_n_ADDR register. reg = %d\n", val);
val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
IPADBG("read IPA_IRQ_EN_EE_n register. reg = %d\n", val);
bmsk = 1 << irq_num;
val |= bmsk;
ipa3_uc_rg10_write_reg(ipa3_ctx->mmio,
IPA_IRQ_EN_EE_n_ADDR(ipa_ee), val);
IPADBG("wrote IPA_IRQ_EN_EE_n_ADDR register. reg = %d\n", val);
ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val);
IPADBG("wrote IPA_IRQ_EN_EE_n register. reg = %d\n", val);
/* register SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt*/
if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
@ -370,9 +382,8 @@ int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
val &= ~(1 << ep_idx);
}
ipa_write_reg(ipa3_ctx->mmio,
IPA_SUSPEND_IRQ_EN_EE_n_ADDR(ipa_ee), val);
IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n_ADDR reg = %d\n", val);
ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, val);
IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", val);
}
return 0;
}
@ -411,16 +422,14 @@ int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt)
/* clean SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt */
if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
(ipa3_ctx->ipa_hw_type == IPA_HW_v3_1)) {
ipa_write_reg(ipa3_ctx->mmio,
IPA_SUSPEND_IRQ_EN_EE_n_ADDR(ipa_ee), 0);
IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n_ADDR reg = %d\n", 0);
ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, 0);
IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", 0);
}
val = ipa_read_reg(ipa3_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
bmsk = 1 << irq_num;
val &= ~bmsk;
ipa3_uc_rg10_write_reg(ipa3_ctx->mmio,
IPA_IRQ_EN_EE_n_ADDR(ipa_ee), val);
ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val);
return 0;
}
@ -439,7 +448,6 @@ int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt)
int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev)
{
int idx;
u32 reg = 0xFFFFFFFF;
int res = 0;
ipa_ee = ee;
@ -457,15 +465,6 @@ int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev)
return -ENOMEM;
}
/* Clearing interrupts status */
ipa3_uc_rg10_write_reg(ipa3_ctx->mmio,
IPA_IRQ_CLR_EE_n_ADDR(ipa_ee), reg);
/* Clearing L2 interrupts status */
if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_1)
ipa_write_reg(ipa3_ctx->mmio,
IPA_SUSPEND_IRQ_CLR_EE_n_ADDR(ipa_ee), reg);
res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr,
IRQF_TRIGGER_RISING, "ipa", ipa_dev);
if (res) {
@ -499,13 +498,11 @@ void ipa3_suspend_active_aggr_wa(u32 clnt_hdl)
struct ipa3_interrupt_work_wrap *work_data;
struct ipa_tx_suspend_irq_data *suspend_interrupt_data;
int irq_num;
int aggr_active_bitmap = ipa_read_reg(ipa3_ctx->mmio,
IPA_STATE_AGGR_ACTIVE_OFST);
int aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
if (aggr_active_bitmap & (1 << clnt_hdl)) {
/* force close aggregation */
ipa_write_reg(ipa3_ctx->mmio, IPA_AGGR_FORCE_CLOSE_OFST,
(1 << clnt_hdl));
ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
/* simulate suspend IRQ */
irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
@ -516,7 +513,7 @@ void ipa3_suspend_active_aggr_wa(u32 clnt_hdl)
}
suspend_interrupt_data = kzalloc(
sizeof(*suspend_interrupt_data),
GFP_KERNEL);
GFP_ATOMIC);
if (!suspend_interrupt_data) {
IPAERR("failed allocating suspend_interrupt_data\n");
return;
@ -524,7 +521,7 @@ void ipa3_suspend_active_aggr_wa(u32 clnt_hdl)
suspend_interrupt_data->endpoints = 1 << clnt_hdl;
work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
GFP_KERNEL);
GFP_ATOMIC);
if (!work_data) {
IPAERR("failed allocating ipa3_interrupt_work_wrap\n");
goto fail_alloc_work;

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -523,7 +523,7 @@ ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
list_del(&msg->link);
}
IPADBG("msg=%p\n", msg);
IPADBG_LOW("msg=%p\n", msg);
if (msg) {
locked = 0;

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -1606,7 +1606,7 @@ static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel,
ch_props.ring_len = channel->ch_ctx_host.rlen;
ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
channel->ch_ctx_host.rbase);
ch_props.use_db_eng = GSI_CHAN_DB_MODE;
ch_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
ch_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
ch_props.low_weight = 1;
ch_props.err_cb = ipa_mhi_gsi_ch_err_cb;
@ -2008,7 +2008,7 @@ int ipa3_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
IPA_MHI_DBG("channel_context_addr 0x%llx\n",
channel->channel_context_addr);
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
res = ipa_mhi_start_gsi_channel(channel, ipa_ep_idx);
@ -2065,7 +2065,7 @@ int ipa3_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
ipa3_install_dflt_flt_rules(ipa_ep_idx);
if (!ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
IPA_MHI_DBG("client %d (ep: %d) connected\n", in->sys.client,
@ -2080,7 +2080,7 @@ fail_ep_cfg:
fail_enable_dp:
ipa3_mhi_reset_channel(channel);
fail_start_channel:
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
fail_init_channel:
memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
return -EPERM;
@ -2137,8 +2137,7 @@ int ipa3_mhi_disconnect_pipe(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
res = ipa3_mhi_reset_channel(channel);
if (res) {
IPA_MHI_ERR("ipa3_mhi_reset_channel failed %d\n", res);
@ -2155,8 +2154,7 @@ int ipa3_mhi_disconnect_pipe(u32 clnt_hdl)
ep->valid = 0;
ipa3_delete_dflt_flt_rules(clnt_hdl);
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
IPA_MHI_FUNC_EXIT();
@ -2164,7 +2162,7 @@ int ipa3_mhi_disconnect_pipe(u32 clnt_hdl)
fail_reset_channel:
if (!ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return res;
}
@ -2494,8 +2492,7 @@ static bool ipa3_mhi_has_open_aggr_frame(void)
int i;
int ipa_ep_idx;
aggr_state_active = ipa_read_reg(ipa3_ctx->mmio,
IPA_STATE_AGGR_ACTIVE_OFST);
aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
IPA_MHI_DBG("IPA_STATE_AGGR_ACTIVE_OFST 0x%x\n", aggr_state_active);
for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
@ -2608,7 +2605,8 @@ int ipa3_mhi_suspend(bool force)
* hold IPA clocks and release them after all
* IPA RM resource are released to make sure tag process will not start
*/
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
IPA_MHI_DBG("release prod\n");
res = ipa3_mhi_release_prod();
if (res) {
@ -2661,7 +2659,7 @@ int ipa3_mhi_suspend(bool force)
goto fail_release_cons;
}
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPA_MHI_FUNC_EXIT();
return 0;
@ -2671,7 +2669,7 @@ fail_suspend_dl_channel:
fail_release_cons:
ipa3_mhi_request_prod();
fail_release_prod:
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
fail_suspend_ul_channel:
ipa3_mhi_resume_ul_channels(true);
ipa3_mhi_set_state(IPA_MHI_STATE_STARTED);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -17,12 +17,11 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
#include "ipa_i.h"
#include "ipahal/ipahal.h"
#define IPA_NAT_PHYS_MEM_OFFSET 0
#define IPA_NAT_PHYS_MEM_SIZE IPA_RAM_NAT_SIZE
#define IPA_NAT_SYSTEM_MEMORY 0
#define IPA_NAT_SHARED_MEMORY 1
#define IPA_NAT_TEMP_MEM_SIZE 128
static int ipa3_nat_vma_fault_remap(
@ -91,8 +90,8 @@ static int ipa3_nat_mmap(struct file *filp, struct vm_area_struct *vma)
}
phys_addr = ipa3_ctx->ipa_wrapper_base +
ipa3_ctx->ctrl->ipa_reg_base_ofst +
IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(
IPA_NAT_PHYS_MEM_OFFSET);
ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
IPA_NAT_PHYS_MEM_OFFSET);
if (remap_pfn_range(
vma, vma->vm_start,
@ -310,10 +309,10 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
#define TBL_ENTRY_SIZE 32
#define INDX_TBL_ENTRY_SIZE 4
struct ipa3_register_write *reg_write_nop;
struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
struct ipa3_desc desc[2];
struct ipa3_ip_v4_nat_init *cmd;
u16 size = sizeof(struct ipa3_ip_v4_nat_init);
struct ipahal_imm_cmd_ip_v4_nat_init cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
int result;
u32 offset = 0;
size_t tmp;
@ -398,37 +397,28 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
memset(&desc, 0, sizeof(desc));
/* NO-OP IC for ensuring that IPA pipeline is empty */
reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
if (!reg_write_nop) {
IPAERR("no mem\n");
nop_cmd_pyld =
ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
if (!nop_cmd_pyld) {
IPAERR("failed to construct NOP imm cmd\n");
result = -ENOMEM;
goto bail;
}
reg_write_nop->skip_pipeline_clear = 0;
reg_write_nop->pipeline_clear_options = IPA_HPS_CLEAR;
reg_write_nop->value_mask = 0x0;
desc[0].opcode = IPA_REGISTER_WRITE;
desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
desc[0].type = IPA_IMM_CMD_DESC;
desc[0].callback = NULL;
desc[0].user1 = NULL;
desc[0].user2 = 0;
desc[0].pyld = (void *)reg_write_nop;
desc[0].len = sizeof(*reg_write_nop);
desc[0].pyld = nop_cmd_pyld->data;
desc[0].len = nop_cmd_pyld->len;
cmd = kmalloc(size, GFP_KERNEL);
if (!cmd) {
IPAERR("Failed to alloc immediate command object\n");
result = -ENOMEM;
goto free_nop;
}
if (ipa3_ctx->nat_mem.vaddr) {
IPADBG("using system memory for nat table\n");
cmd->ipv4_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
cmd->index_table_addr_type = IPA_NAT_SYSTEM_MEMORY;
cmd->index_table_expansion_addr_type = IPA_NAT_SYSTEM_MEMORY;
cmd.ipv4_rules_addr_shared = false;
cmd.ipv4_expansion_rules_addr_shared = false;
cmd.index_table_addr_shared = false;
cmd.index_table_expansion_addr_shared = false;
offset = UINT_MAX - ipa3_ctx->nat_mem.dma_handle;
@ -448,62 +438,70 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
IPAERR("index_expn_offset: 0x%x\n",
init->index_expn_offset);
result = -EPERM;
goto free_mem;
goto free_nop;
}
cmd->ipv4_rules_addr =
cmd.ipv4_rules_addr =
ipa3_ctx->nat_mem.dma_handle + init->ipv4_rules_offset;
IPADBG("ipv4_rules_offset:0x%x\n", init->ipv4_rules_offset);
cmd->ipv4_expansion_rules_addr =
cmd.ipv4_expansion_rules_addr =
ipa3_ctx->nat_mem.dma_handle + init->expn_rules_offset;
IPADBG("expn_rules_offset:0x%x\n", init->expn_rules_offset);
cmd->index_table_addr =
cmd.index_table_addr =
ipa3_ctx->nat_mem.dma_handle + init->index_offset;
IPADBG("index_offset:0x%x\n", init->index_offset);
cmd->index_table_expansion_addr =
cmd.index_table_expansion_addr =
ipa3_ctx->nat_mem.dma_handle + init->index_expn_offset;
IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset);
} else {
IPADBG("using shared(local) memory for nat table\n");
cmd->ipv4_rules_addr_type = IPA_NAT_SHARED_MEMORY;
cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SHARED_MEMORY;
cmd->index_table_addr_type = IPA_NAT_SHARED_MEMORY;
cmd->index_table_expansion_addr_type = IPA_NAT_SHARED_MEMORY;
cmd.ipv4_rules_addr_shared = true;
cmd.ipv4_expansion_rules_addr_shared = true;
cmd.index_table_addr_shared = true;
cmd.index_table_expansion_addr_shared = true;
cmd->ipv4_rules_addr = init->ipv4_rules_offset +
cmd.ipv4_rules_addr = init->ipv4_rules_offset +
IPA_RAM_NAT_OFST;
cmd->ipv4_expansion_rules_addr = init->expn_rules_offset +
cmd.ipv4_expansion_rules_addr = init->expn_rules_offset +
IPA_RAM_NAT_OFST;
cmd->index_table_addr = init->index_offset +
cmd.index_table_addr = init->index_offset +
IPA_RAM_NAT_OFST;
cmd->index_table_expansion_addr = init->index_expn_offset +
cmd.index_table_expansion_addr = init->index_expn_offset +
IPA_RAM_NAT_OFST;
}
cmd->table_index = init->tbl_index;
IPADBG("Table index:0x%x\n", cmd->table_index);
cmd->size_base_tables = init->table_entries;
IPADBG("Base Table size:0x%x\n", cmd->size_base_tables);
cmd->size_expansion_tables = init->expn_table_entries;
IPADBG("Expansion Table size:0x%x\n", cmd->size_expansion_tables);
cmd->public_ip_addr = init->ip_addr;
IPADBG("Public ip address:0x%x\n", cmd->public_ip_addr);
desc[1].opcode = IPA_IP_V4_NAT_INIT;
cmd.table_index = init->tbl_index;
IPADBG("Table index:0x%x\n", cmd.table_index);
cmd.size_base_tables = init->table_entries;
IPADBG("Base Table size:0x%x\n", cmd.size_base_tables);
cmd.size_expansion_tables = init->expn_table_entries;
IPADBG("Expansion Table size:0x%x\n", cmd.size_expansion_tables);
cmd.public_ip_addr = init->ip_addr;
IPADBG("Public ip address:0x%x\n", cmd.public_ip_addr);
cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
if (!cmd_pyld) {
IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
result = -EPERM;
goto free_nop;
}
desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_NAT_INIT);
desc[1].type = IPA_IMM_CMD_DESC;
desc[1].callback = NULL;
desc[1].user1 = NULL;
desc[1].user2 = 0;
desc[1].pyld = (void *)cmd;
desc[1].len = size;
desc[1].pyld = cmd_pyld->data;
desc[1].len = cmd_pyld->len;
IPADBG("posting v4 init command\n");
if (ipa3_send_cmd(2, desc)) {
IPAERR("Fail to send immediate command\n");
result = -EPERM;
goto free_mem;
goto destroy_imm_cmd;
}
ipa3_ctx->nat_mem.public_ip_addr = init->ip_addr;
@ -538,10 +536,10 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
IPADBG("return\n");
result = 0;
free_mem:
kfree(cmd);
destroy_imm_cmd:
ipahal_destroy_imm_cmd(cmd_pyld);
free_nop:
kfree(reg_write_nop);
ipahal_destroy_imm_cmd(nop_cmd_pyld);
bail:
return result;
}
@ -558,8 +556,9 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
{
#define NUM_OF_DESC 2
struct ipa3_register_write *reg_write_nop = NULL;
struct ipa3_nat_dma *cmd = NULL;
struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
struct ipahal_imm_cmd_nat_dma cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
struct ipa3_desc *desc = NULL;
u16 size = 0, cnt = 0;
int ret = 0;
@ -580,62 +579,53 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
goto bail;
}
size = sizeof(struct ipa3_nat_dma);
cmd = kzalloc(size, GFP_KERNEL);
if (cmd == NULL) {
IPAERR("Failed to alloc memory\n");
ret = -ENOMEM;
goto bail;
}
/* NO-OP IC for ensuring that IPA pipeline is empty */
reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
if (!reg_write_nop) {
IPAERR("Failed to alloc memory\n");
nop_cmd_pyld =
ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
if (!nop_cmd_pyld) {
IPAERR("Failed to construct NOP imm cmd\n");
ret = -ENOMEM;
goto bail;
}
reg_write_nop->skip_pipeline_clear = 0;
reg_write_nop->pipeline_clear_options = IPA_HPS_CLEAR;
reg_write_nop->value_mask = 0x0;
desc[0].type = IPA_IMM_CMD_DESC;
desc[0].opcode = IPA_REGISTER_WRITE;
desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
desc[0].callback = NULL;
desc[0].user1 = NULL;
desc[0].user2 = 0;
desc[0].len = sizeof(*reg_write_nop);
desc[0].pyld = (void *)reg_write_nop;
desc[0].pyld = nop_cmd_pyld->data;
desc[0].len = nop_cmd_pyld->len;
for (cnt = 0; cnt < dma->entries; cnt++) {
cmd->table_index = dma->dma[cnt].table_index;
cmd->base_addr = dma->dma[cnt].base_addr;
cmd->offset = dma->dma[cnt].offset;
cmd->data = dma->dma[cnt].data;
cmd.table_index = dma->dma[cnt].table_index;
cmd.base_addr = dma->dma[cnt].base_addr;
cmd.offset = dma->dma[cnt].offset;
cmd.data = dma->dma[cnt].data;
cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_NAT_DMA, &cmd, false);
if (!cmd_pyld) {
IPAERR("Fail to construct nat_dma imm cmd\n");
continue;
}
desc[1].type = IPA_IMM_CMD_DESC;
desc[1].opcode = IPA_NAT_DMA;
desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_NAT_DMA);
desc[1].callback = NULL;
desc[1].user1 = NULL;
desc[1].user2 = 0;
desc[1].len = sizeof(struct ipa3_nat_dma);
desc[1].pyld = (void *)cmd;
desc[1].pyld = cmd_pyld->data;
desc[1].len = cmd_pyld->len;
ret = ipa3_send_cmd(NUM_OF_DESC, desc);
if (ret == -EPERM)
IPAERR("Fail to send immediate command %d\n", cnt);
ipahal_destroy_imm_cmd(cmd_pyld);
}
bail:
if (cmd != NULL)
kfree(cmd);
if (desc != NULL)
kfree(desc);
if (reg_write_nop != NULL)
kfree(reg_write_nop);
if (nop_cmd_pyld != NULL)
ipahal_destroy_imm_cmd(nop_cmd_pyld);
return ret;
}
@ -677,18 +667,18 @@ void ipa3_nat_free_mem_and_device(struct ipa3_nat_mem *nat_ctx)
*/
int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
{
struct ipa3_register_write *reg_write_nop;
struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
struct ipa3_desc desc[2];
struct ipa3_ip_v4_nat_init *cmd;
u16 size = sizeof(struct ipa3_ip_v4_nat_init);
u8 mem_type = IPA_NAT_SHARED_MEMORY;
struct ipahal_imm_cmd_ip_v4_nat_init cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld;
bool mem_type_shared = true;
u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
int result;
IPADBG("\n");
if (ipa3_ctx->nat_mem.is_tmp_mem) {
IPAERR("using temp memory during nat del\n");
mem_type = IPA_NAT_SYSTEM_MEMORY;
mem_type_shared = false;
base_addr = ipa3_ctx->nat_mem.tmp_dma_handle;
}
@ -700,55 +690,52 @@ int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
memset(&desc, 0, sizeof(desc));
/* NO-OP IC for ensuring that IPA pipeline is empty */
reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
if (!reg_write_nop) {
IPAERR("no mem\n");
nop_cmd_pyld =
ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
if (!nop_cmd_pyld) {
IPAERR("Failed to construct NOP imm cmd\n");
result = -ENOMEM;
goto bail;
}
reg_write_nop->skip_pipeline_clear = 0;
reg_write_nop->pipeline_clear_options = IPA_HPS_CLEAR;
reg_write_nop->value_mask = 0x0;
desc[0].opcode = IPA_REGISTER_WRITE;
desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
desc[0].type = IPA_IMM_CMD_DESC;
desc[0].callback = NULL;
desc[0].user1 = NULL;
desc[0].user2 = 0;
desc[0].pyld = (void *)reg_write_nop;
desc[0].len = sizeof(*reg_write_nop);
desc[0].pyld = nop_cmd_pyld->data;
desc[0].len = nop_cmd_pyld->len;
cmd = kmalloc(size, GFP_KERNEL);
if (cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n");
result = -ENOMEM;
goto free_nop;
cmd.table_index = del->table_index;
cmd.ipv4_rules_addr = base_addr;
cmd.ipv4_rules_addr_shared = mem_type_shared;
cmd.ipv4_expansion_rules_addr = base_addr;
cmd.ipv4_expansion_rules_addr_shared = mem_type_shared;
cmd.index_table_addr = base_addr;
cmd.index_table_addr_shared = mem_type_shared;
cmd.index_table_expansion_addr = base_addr;
cmd.index_table_expansion_addr_shared = mem_type_shared;
cmd.size_base_tables = 0;
cmd.size_expansion_tables = 0;
cmd.public_ip_addr = 0;
cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
if (!cmd_pyld) {
IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
result = -EPERM;
goto destroy_regwrt_imm_cmd;
}
cmd->table_index = del->table_index;
cmd->ipv4_rules_addr = base_addr;
cmd->ipv4_rules_addr_type = mem_type;
cmd->ipv4_expansion_rules_addr = base_addr;
cmd->ipv4_expansion_rules_addr_type = mem_type;
cmd->index_table_addr = base_addr;
cmd->index_table_addr_type = mem_type;
cmd->index_table_expansion_addr = base_addr;
cmd->index_table_expansion_addr_type = mem_type;
cmd->size_base_tables = 0;
cmd->size_expansion_tables = 0;
cmd->public_ip_addr = 0;
desc[1].opcode = IPA_IP_V4_NAT_INIT;
desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_NAT_INIT);
desc[1].type = IPA_IMM_CMD_DESC;
desc[1].callback = NULL;
desc[1].user1 = NULL;
desc[1].user2 = 0;
desc[1].pyld = (void *)cmd;
desc[1].len = size;
desc[1].pyld = cmd_pyld->data;
desc[1].len = cmd_pyld->len;
if (ipa3_send_cmd(2, desc)) {
IPAERR("Fail to send immediate command\n");
result = -EPERM;
goto free_mem;
goto destroy_imm_cmd;
}
ipa3_ctx->nat_mem.size_base_tables = 0;
@ -762,10 +749,11 @@ int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
ipa3_nat_free_mem_and_device(&ipa3_ctx->nat_mem);
IPADBG("return\n");
result = 0;
free_mem:
kfree(cmd);
free_nop:
kfree(reg_write_nop);
destroy_imm_cmd:
ipahal_destroy_imm_cmd(cmd_pyld);
destroy_regwrt_imm_cmd:
ipahal_destroy_imm_cmd(nop_cmd_pyld);
bail:
return result;
}

View file

@ -590,11 +590,11 @@ int ipa3_qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
int rc;
/* check if the filter rules from IPACM is valid */
if (req->filter_spec_list_len == 0) {
if (req->filter_spec_ex_list_len == 0) {
IPAWANDBG("IPACM pass zero rules to Q6\n");
} else {
IPAWANDBG("IPACM pass %d rules to Q6\n",
req->filter_spec_list_len);
req->filter_spec_ex_list_len);
}
/* cache the qmi_filter_request */

View file

@ -1,318 +0,0 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __IPA_REG_H__
#define __IPA_REG_H__
/*
* IPA HW 3.1 Registers
*/
#define IPA_SUSPEND_IRQ_EN_EE_n_ADDR(n) (0x00003034 + 0x1000 * (n))
#define IPA_SUSPEND_IRQ_CLR_EE_n_ADDR(n) (0x00003038 + 0x1000 * (n))
/*
* End of IPA 3.1 Registers
*/
/*
Common Registers
*/
#define IPA_IRQ_STTS_EE_n_ADDR(n) (0x00003008 + 0x1000 * (n))
#define IPA_IRQ_EN_EE_n_ADDR(n) (0x0000300c + 0x1000 * (n))
#define IPA_IRQ_CLR_EE_n_ADDR(n) (0x00003010 + 0x1000 * (n))
#define IPA_IRQ_SUSPEND_INFO_EE_n_ADDR_v3_0(n) (0x00003098 + 0x1000 * (n))
#define IPA_IRQ_SUSPEND_INFO_EE_n_ADDR_v3_1(n) (0x00003030 + 0x1000 * (n))
#define IPA_BCR_OFST 0x000001D0
#define IPA_COUNTER_CFG_OFST 0x000001f0
#define IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK 0xF
#define IPA_COUNTER_CFG_EOT_COAL_GRAN_SHFT 0x0
#define IPA_COUNTER_CFG_AGGR_GRAN_BMSK 0x1F0
#define IPA_COUNTER_CFG_AGGR_GRAN_SHFT 0x4
#define IPA_ENABLED_PIPES_OFST 0x00000038
#define IPA_REG_BASE_OFST_v3_0 0x00040000
#define IPA_COMP_SW_RESET_OFST 0x00000040
#define IPA_VERSION_OFST 0x00000034
#define IPA_COMP_HW_VERSION_OFST 0x00000030
#define IPA_SPARE_REG_1_OFST (0x00005090)
#define IPA_SPARE_REG_2_OFST (0x00005094)
#define IPA_SHARED_MEM_SIZE_OFST_v3_0 0x00000054
#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v3_0 0xffff0000
#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v3_0 0x10
#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v3_0 0xffff
#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v3_0 0x0
#define IPA_ENDP_INIT_ROUTE_N_OFST_v3_0(n) (0x00000828 + 0x70 * (n))
#define IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK 0x1f
#define IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT 0x0
#define IPA_ROUTE_OFST_v3_0 0x00000048
#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1
#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0X40
#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000
#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11
#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK 0x1000000
#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT 0x18
#define IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(n) (0x00007000 + 0x4 * (n))
#define IPA_COMP_CFG_OFST 0x0000003C
#define IPA_STATE_AGGR_ACTIVE_OFST 0x0000010C
#define IPA_AGGR_FORCE_CLOSE_OFST 0x000001EC
#define IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK 0x3FFFFFFF
#define IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT 0
#define IPA_ENDP_INIT_AGGR_N_OFST_v3_0(n) (0x00000824 + 0x70 * (n))
#define IPA_ENDP_INIT_AGGR_N_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK 0x1000000
#define IPA_ENDP_INIT_AGGR_N_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT 0x18
#define IPA_ENDP_INIT_AGGR_N_AGGR_FORCE_CLOSE_BMSK 0x400000
#define IPA_ENDP_INIT_AGGR_N_AGGR_FORCE_CLOSE_SHFT 0x16
#define IPA_ENDP_INIT_AGGR_N_AGGR_SW_EOF_ACTIVE_BMSK 0x200000
#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT 0x15
#define IPA_ENDP_INIT_AGGR_N_AGGR_PKT_LIMIT_BMSK 0x1f8000
#define IPA_ENDP_INIT_AGGR_N_AGGR_PKT_LIMIT_SHFT 0xf
#define IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK 0x7c00
#define IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT 0xa
#define IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK 0x3e0
#define IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT 0x5
#define IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK 0x1c
#define IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT 0x2
#define IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK 0x3
#define IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT 0x0
#define IPA_ENDP_INIT_MODE_N_OFST_v3_0(n) (0x00000820 + 0x70 * (n))
#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v3_0 0x1f0
#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v3_0 0x4
#define IPA_ENDP_INIT_MODE_N_MODE_BMSK 0x7
#define IPA_ENDP_INIT_MODE_N_MODE_SHFT 0x0
#define IPA_ENDP_INIT_HDR_N_OFST_v3_0(n) (0x00000810 + 0x70 * (n))
#define IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK 0x3f
#define IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT 0x0
#define IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
#define IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT 0x14
#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
#define IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_BMSK_v2 0x10000000
#define IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_SHFT_v2 0x1c
#define IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_BMSK_v2 0x8000000
#define IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_SHFT_v2 0x1b
#define IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK 0x4000000
#define IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT 0x1a
#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK 0x40
#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT 0x6
#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT 0x7
#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK 0x1f80
#define IPA_ENDP_INIT_NAT_N_OFST_v3_0(n) (0x0000080C + 0x70 * (n))
#define IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK 0x3
#define IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT 0x0
#define IPA_ENDP_INIT_HDR_EXT_n_OFST_v3_0(n) (0x00000814 + 0x70 * (n))
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK 0x1
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT 0x0
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK 0x3f0
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0 0x3c00
#define IPA_SINGLE_NDP_MODE_OFST 0x00000068
#define IPA_QCNCM_OFST 0x00000064
#define IPA_ENDP_INIT_CTRL_N_OFST(n) (0x00000800 + 0x70 * (n))
#define IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_BMSK 0x1
#define IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_SHFT 0x0
#define IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK 0x2
#define IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT 0x1
#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v3_0(n) (0x0000082c + 0x70 * (n))
#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_RMSK 0x1
#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_MAX 19
#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_BMSK 0x1
#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_SHFT 0x0
#define IPA_ENDP_INIT_DEAGGR_n_OFST_v3_0(n) (0x00000834 + 0x70 * (n))
#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3F
#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0
#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK 0x80
#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x7
#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3F00
#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8
#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xFFFF0000
#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10
#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v3_0(n) (0x00000830 + 0x70 * (n))
#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_RMSK 0x1ff
#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_MAX 19
#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_BMSK 0x1ff
#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_SHFT 0x0
#define IPA_DEBUG_CNT_REG_N_OFST_v3_0(n) (0x00000600 + 0x4 * (n))
#define IPA_DEBUG_CNT_REG_N_RMSK 0xffffffff
#define IPA_DEBUG_CNT_REG_N_MAX 15
#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_BMSK 0xffffffff
#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_SHFT 0x0
#define IPA_DEBUG_CNT_CTRL_N_OFST_v3_0(n) (0x00000640 + 0x4 * (n))
#define IPA_DEBUG_CNT_CTRL_N_RMSK 0x1ff1f171
#define IPA_DEBUG_CNT_CTRL_N_MAX 15
#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_RULE_INDEX_BMSK 0x1ff00000
#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_RULE_INDEX_SHFT 0x14
#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000
#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_SOURCE_PIPE_SHFT 0xc
#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_PRODUCT_BMSK 0x100
#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_PRODUCT_SHFT 0x8
#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_TYPE_BMSK 0x70
#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_TYPE_SHFT 0x4
#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_EN_BMSK 0x1
#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_EN_SHFT 0x0
#define IPA_ENDP_STATUS_n_OFST(n) (0x00000840 + 0x70 * (n))
#define IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK 0x100
#define IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT 0x8
#define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e
#define IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1
#define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1
#define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0
#define IPA_ENDP_INIT_CFG_n_OFST(n) (0x00000808 + 0x70 * (n))
#define IPA_ENDP_INIT_CFG_n_RMSK 0x7f
#define IPA_ENDP_INIT_CFG_n_MAXn 19
#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78
#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3
#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6
#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1
#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1
#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0
#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFST(n) (0x00000818 + 0x70 * (n))
#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_RMSK 0xffffffff
#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_MAXn 19
#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK 0xffffffff
#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0
#define IPA_ENDP_INIT_HDR_METADATA_n_OFST(n) (0x0000081c + 0x70 * (n))
#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK 0xFF0000
#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT 0x10
#define IPA_ENDP_INIT_RSRC_GRP_n(n) (0x00000838 + 0x70 * (n))
#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK 0x7
#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT 0
#define IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n(n) (0x00000400 + 0x20 * (n))
#define IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n(n) (0x00000404 + 0x20 * (n))
#define IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n(n) (0x00000408 + 0x20 * (n))
#define IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n(n) (0x0000040C + 0x20 * (n))
#define IPA_DST_RSRC_GRP_01_RSRC_TYPE_n(n) (0x00000500 + 0x20 * (n))
#define IPA_DST_RSRC_GRP_23_RSRC_TYPE_n(n) (0x00000504 + 0x20 * (n))
#define IPA_DST_RSRC_GRP_45_RSRC_TYPE_n(n) (0x00000508 + 0x20 * (n))
#define IPA_DST_RSRC_GRP_67_RSRC_TYPE_n(n) (0x0000050c + 0x20 * (n))
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK 0xFF000000
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT 24
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK 0xFF0000
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT 16
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK 0xFF00
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT 8
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK 0xFF
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT 0
#define IPA_RX_HPS_CLIENTS_MIN_DEPTH_0 0x000023C4
#define IPA_RX_HPS_CLIENTS_MIN_DEPTH_1 0x000023C8
#define IPA_RX_HPS_CLIENTS_MAX_DEPTH_0 0x000023CC
#define IPA_RX_HPS_CLIENTS_MAX_DEPTH_1 0x000023D0
#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(n) (0x7F << (8 * (n)))
#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(n) (8 * (n))
#define IPA_IRQ_EE_UC_n_OFFS(n) (0x0000301c + 0x1000 * (n))
#define IPA_UC_MAILBOX_m_n_OFFS_v3_0(m, n) (0x00032000 + 0x80 * (m) + 0x4 * (n))
#define IPA_SYS_PKT_PROC_CNTXT_BASE_OFST (0x000001e0)
#define IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFST (0x000001e8)
#define IPA_FILT_ROUT_HASH_FLUSH_OFST (0x00000090)
#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT (12)
#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT (8)
#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT (4)
#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT (0)
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OFST(n) (0x0000085C + 0x70 * (n))
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT 0
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK 0x1
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT 1
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK 0x2
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT 2
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK 0x4
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT 3
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK 0x8
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT 4
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK 0x10
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT 5
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK 0x20
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT 6
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK 0x40
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT 16
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK 0x10000
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT 17
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK 0x20000
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT 18
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK 0x40000
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT 19
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK 0x80000
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT 20
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK 0x100000
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT 21
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK 0x200000
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT 22
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK 0x400000
#define IPA_ENDP_INIT_SEQ_n_OFST(n) (0x0000083C + 0x70*(n))
#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_BMSK 0xf000
#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_SHFT 0xc
#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_BMSK 0xf00
#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_SHFT 0x8
#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_BMSK 0xf0
#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_SHFT 0x4
#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_BMSK 0xf
#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_SHFT 0x0
#define IPA_ENDP_GSI_CFG_TLV_n_OFST(n) (0x850 + 0x70 * (n))
#define IPA_ENDP_GSI_CFG_AOS_n_OFST(n) (0x854 + 0x70 * (n))
#define IPA_ENDP_GSI_CFG2_n_OFST(n) (0x858 + 0x70 * (n))
#define IPA_ENDP_GSI_CFG1_n_OFST(n) (0x5504 + 0x4 * (n))
#define IPA_ENABLE_GSI_OFST 0x5500
#endif

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -20,6 +20,7 @@
static const char *ipa3_resource_name_to_str[IPA_RM_RESOURCE_MAX] = {
__stringify(IPA_RM_RESOURCE_Q6_PROD),
__stringify(IPA_RM_RESOURCE_USB_PROD),
__stringify(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD),
__stringify(IPA_RM_RESOURCE_HSIC_PROD),
__stringify(IPA_RM_RESOURCE_STD_ECM_PROD),
__stringify(IPA_RM_RESOURCE_RNDIS_PROD),
@ -29,6 +30,7 @@ static const char *ipa3_resource_name_to_str[IPA_RM_RESOURCE_MAX] = {
__stringify(IPA_RM_RESOURCE_MHI_PROD),
__stringify(IPA_RM_RESOURCE_Q6_CONS),
__stringify(IPA_RM_RESOURCE_USB_CONS),
__stringify(IPA_RM_RESOURCE_USB_DPL_CONS),
__stringify(IPA_RM_RESOURCE_HSIC_CONS),
__stringify(IPA_RM_RESOURCE_WLAN_CONS),
__stringify(IPA_RM_RESOURCE_APPS_CONS),
@ -394,6 +396,7 @@ bail:
return result;
}
/**
* ipa3_rm_release_resource() - release resource
* @resource_name: [in] name of the requested resource
@ -530,6 +533,9 @@ int ipa3_rm_set_perf_profile(enum ipa_rm_resource_name resource_name,
unsigned long flags;
struct ipa_rm_resource *resource;
IPADBG("resource: %s ", ipa3_rm_resource_str(resource_name));
if (profile)
IPADBG("BW: %d\n", profile->max_supported_bandwidth_mbps);
IPA_RM_DBG("%s\n", ipa3_rm_resource_str(resource_name));
spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
@ -662,14 +668,16 @@ static void ipa3_rm_wq_resume_handler(struct work_struct *work)
IPA_RM_ERR("resource is not CONS\n");
return;
}
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_RESOURCE(ipa3_rm_resource_str(
ipa_rm_work->resource_name));
spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
ipa_rm_work->resource_name,
&resource) != 0){
IPA_RM_ERR("resource does not exists\n");
spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa3_rm_resource_str(
ipa_rm_work->resource_name));
goto bail;
}
ipa3_rm_resource_consumer_request_work(

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -70,22 +70,22 @@ static void ipa3_rm_inactivity_timer_func(struct work_struct *work)
work);
unsigned long flags;
IPADBG("%s: timer expired for resource %d!\n", __func__,
IPADBG_LOW("%s: timer expired for resource %d!\n", __func__,
me->resource_name);
spin_lock_irqsave(
&ipa3_rm_it_handles[me->resource_name].lock, flags);
if (ipa3_rm_it_handles[me->resource_name].reschedule_work) {
IPADBG("%s: setting delayed work\n", __func__);
IPADBG_LOW("%s: setting delayed work\n", __func__);
ipa3_rm_it_handles[me->resource_name].reschedule_work = false;
schedule_delayed_work(
&ipa3_rm_it_handles[me->resource_name].work,
ipa3_rm_it_handles[me->resource_name].jiffies);
} else if (ipa3_rm_it_handles[me->resource_name].resource_requested) {
IPADBG("%s: not calling release\n", __func__);
IPADBG_LOW("%s: not calling release\n", __func__);
ipa3_rm_it_handles[me->resource_name].work_in_progress = false;
} else {
IPADBG("%s: calling release_resource on resource %d!\n",
IPADBG_LOW("%s: calling release_resource on resource %d!\n",
__func__, me->resource_name);
ipa3_rm_release_resource(me->resource_name);
ipa3_rm_it_handles[me->resource_name].work_in_progress = false;
@ -110,7 +110,7 @@ static void ipa3_rm_inactivity_timer_func(struct work_struct *work)
int ipa3_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
unsigned long msecs)
{
IPADBG("%s: resource %d\n", __func__, resource_name);
IPADBG_LOW("%s: resource %d\n", __func__, resource_name);
if (resource_name < 0 ||
resource_name >= IPA_RM_RESOURCE_MAX) {
@ -150,7 +150,7 @@ int ipa3_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
*/
int ipa3_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name)
{
IPADBG("%s: resource %d\n", __func__, resource_name);
IPADBG_LOW("%s: resource %d\n", __func__, resource_name);
if (resource_name < 0 ||
resource_name >= IPA_RM_RESOURCE_MAX) {
@ -190,7 +190,7 @@ int ipa3_rm_inactivity_timer_request_resource(
int ret;
unsigned long flags;
IPADBG("%s: resource %d\n", __func__, resource_name);
IPADBG_LOW("%s: resource %d\n", __func__, resource_name);
if (resource_name < 0 ||
resource_name >= IPA_RM_RESOURCE_MAX) {
@ -207,7 +207,8 @@ int ipa3_rm_inactivity_timer_request_resource(
ipa3_rm_it_handles[resource_name].resource_requested = true;
spin_unlock_irqrestore(&ipa3_rm_it_handles[resource_name].lock, flags);
ret = ipa3_rm_request_resource(resource_name);
IPADBG("%s: resource %d: returning %d\n", __func__, resource_name, ret);
IPADBG_LOW("%s: resource %d: returning %d\n", __func__,
resource_name, ret);
return ret;
}
@ -232,7 +233,7 @@ int ipa3_rm_inactivity_timer_release_resource(
{
unsigned long flags;
IPADBG("%s: resource %d\n", __func__, resource_name);
IPADBG_LOW("%s: resource %d\n", __func__, resource_name);
if (resource_name < 0 ||
resource_name >= IPA_RM_RESOURCE_MAX) {
@ -248,7 +249,7 @@ int ipa3_rm_inactivity_timer_release_resource(
spin_lock_irqsave(&ipa3_rm_it_handles[resource_name].lock, flags);
ipa3_rm_it_handles[resource_name].resource_requested = false;
if (ipa3_rm_it_handles[resource_name].work_in_progress) {
IPADBG("%s: Timer already set, not scheduling again %d\n",
IPADBG_LOW("%s: Timer already set, not scheduling again %d\n",
__func__, resource_name);
ipa3_rm_it_handles[resource_name].reschedule_work = true;
spin_unlock_irqrestore(
@ -257,7 +258,7 @@ int ipa3_rm_inactivity_timer_release_resource(
}
ipa3_rm_it_handles[resource_name].work_in_progress = true;
ipa3_rm_it_handles[resource_name].reschedule_work = false;
IPADBG("%s: setting delayed work\n", __func__);
IPADBG_LOW("%s: setting delayed work\n", __func__);
schedule_delayed_work(&ipa3_rm_it_handles[resource_name].work,
ipa3_rm_it_handles[resource_name].jiffies);
spin_unlock_irqrestore(&ipa3_rm_it_handles[resource_name].lock, flags);

View file

@ -31,6 +31,7 @@ int ipa3_rm_prod_index(enum ipa_rm_resource_name resource_name)
switch (resource_name) {
case IPA_RM_RESOURCE_Q6_PROD:
case IPA_RM_RESOURCE_USB_PROD:
case IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD:
case IPA_RM_RESOURCE_HSIC_PROD:
case IPA_RM_RESOURCE_STD_ECM_PROD:
case IPA_RM_RESOURCE_RNDIS_PROD:
@ -68,6 +69,7 @@ int ipa3_rm_cons_index(enum ipa_rm_resource_name resource_name)
case IPA_RM_RESOURCE_APPS_CONS:
case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
case IPA_RM_RESOURCE_MHI_CONS:
case IPA_RM_RESOURCE_USB_DPL_CONS:
break;
default:
result = IPA_RM_INDEX_INVALID;
@ -149,6 +151,7 @@ int ipa3_rm_resource_consumer_request(
{
int result = 0;
enum ipa3_rm_resource_state prev_state;
struct ipa3_active_client_logging_info log_info;
IPA_RM_DBG("%s state: %d\n",
ipa3_rm_resource_str(consumer->resource.name),
@ -161,8 +164,10 @@ int ipa3_rm_resource_consumer_request(
case IPA_RM_RELEASE_IN_PROGRESS:
reinit_completion(&consumer->request_consumer_in_progress);
consumer->resource.state = IPA_RM_REQUEST_IN_PROGRESS;
IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
ipa3_rm_resource_str(consumer->resource.name));
if (prev_state == IPA_RM_RELEASE_IN_PROGRESS ||
ipa3_inc_client_enable_clks_no_block() != 0) {
ipa3_inc_client_enable_clks_no_block(&log_info) != 0) {
IPA_RM_DBG("async resume work for %s\n",
ipa3_rm_resource_str(consumer->resource.name));
ipa3_rm_wq_send_resume_cmd(consumer->resource.name,

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/idr.h>
#include "ipa_i.h"
#include "ipahal/ipahal.h"
#define IPA_RT_INDEX_BITMAP_SIZE (32)
#define IPA_RT_STATUS_OF_ADD_FAILED (-1)
@ -110,7 +111,7 @@ int __ipa_generate_rt_hw_rule_v3_0(enum ipa_ip_type ip,
return -EPERM;
}
IPADBG("en_rule 0x%x\n", en_rule);
IPADBG_LOW("en_rule 0x%x\n", en_rule);
rule_hdr->u.hdr.en_rule = en_rule;
ipa3_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
@ -271,7 +272,7 @@ static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
for (i = 0; i < IPA_RULE_TYPE_MAX; i++) {
if (tbl->prev_mem[i].phys_base) {
IPADBG(
IPADBG_LOW(
"reaping sys rt tbl name=%s ip=%d rlt=%d\n",
tbl->name, ip, i);
dma_free_coherent(ipa3_ctx->pdev,
@ -289,7 +290,7 @@ static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
for (i = 0; i < IPA_RULE_TYPE_MAX; i++) {
WARN_ON(tbl->prev_mem[i].phys_base != 0);
if (tbl->curr_mem[i].phys_base) {
IPADBG(
IPADBG_LOW(
"reaping sys rt tbl name=%s ip=%d rlt=%d\n",
tbl->name, ip, i);
dma_free_coherent(ipa3_ctx->pdev,
@ -399,7 +400,7 @@ static int ipa_prep_rt_tbl_for_cmt(enum ipa_ip_type ip,
return -EPERM;
}
IPADBG("RT rule id (handle) %d hw_len %u priority %u\n",
IPADBG_LOW("RT rule id (handle) %d hw_len %u priority %u\n",
entry->id, entry->hw_len, entry->prio);
if (entry->rule.hashable)
@ -419,7 +420,7 @@ static int ipa_prep_rt_tbl_for_cmt(enum ipa_ip_type ip,
if (tbl->sz[IPA_RULE_NON_HASHABLE])
tbl->sz[IPA_RULE_NON_HASHABLE] += IPA_HW_TBL_HDR_WIDTH;
IPADBG("RT tbl index %u hash_sz %u non-hash sz %u\n", tbl->idx,
IPADBG_LOW("RT tbl index %u hash_sz %u non-hash sz %u\n", tbl->idx,
tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
return 0;
@ -528,7 +529,7 @@ static int ipa_generate_rt_hw_tbl_img(enum ipa_ip_type ip,
}
ipa_get_rt_tbl_lcl_bdy_size(ip, &hash_bdy_sz, &nhash_bdy_sz);
IPADBG("total rt tbl local body sizes: hash %u nhash %u\n",
IPADBG_LOW("total rt tbl local body sizes: hash %u nhash %u\n",
hash_bdy_sz, nhash_bdy_sz);
hash_bdy->size = hash_bdy_sz + IPA_HW_TBL_BLK_SIZE_ALIGNMENT;
@ -634,8 +635,9 @@ static bool ipa_rt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
int __ipa_commit_rt_v3(enum ipa_ip_type ip)
{
struct ipa3_desc desc[5];
struct ipa3_register_write reg_write_cmd = {0};
struct ipa3_hw_imm_cmd_dma_shared_mem mem_cmd[4];
struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
struct ipahal_imm_cmd_pyld *cmd_pyld[5];
int num_cmd = 0;
struct ipa3_mem_buffer hash_bdy, nhash_bdy;
struct ipa3_mem_buffer hash_hdr, nhash_hdr;
@ -644,9 +646,12 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
u32 lcl_hash_hdr, lcl_nhash_hdr;
u32 lcl_hash_bdy, lcl_nhash_bdy;
bool lcl_hash, lcl_nhash;
struct ipahal_reg_fltrt_hash_flush flush;
struct ipahal_reg_valmask valmask;
int i;
memset(desc, 0, sizeof(desc));
memset(mem_cmd, 0, sizeof(mem_cmd));
memset(cmd_pyld, 0, sizeof(cmd_pyld));
if (ip == IPA_IP_v4) {
num_modem_rt_index =
@ -700,64 +705,107 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
}
/* flushing ipa internal hashable rt rules cache */
reg_write_cmd.skip_pipeline_clear = 0;
reg_write_cmd.pipeline_clear_options = IPA_HPS_CLEAR;
reg_write_cmd.offset = IPA_FILT_ROUT_HASH_FLUSH_OFST;
reg_write_cmd.value = (ip == IPA_IP_v4) ?
(1 << IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT) :
(1 << IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT);
reg_write_cmd.value_mask = reg_write_cmd.value;
desc[num_cmd].opcode = IPA_REGISTER_WRITE;
desc[num_cmd].pyld = &reg_write_cmd;
desc[num_cmd].len = sizeof(reg_write_cmd);
memset(&flush, 0, sizeof(flush));
if (ip == IPA_IP_v4)
flush.v4_rt = true;
else
flush.v6_rt = true;
ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
reg_write_cmd.skip_pipeline_clear = false;
reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
reg_write_cmd.value = valmask.val;
reg_write_cmd.value_mask = valmask.mask;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd, false);
if (!cmd_pyld[num_cmd]) {
IPAERR("fail construct register_write imm cmd. IP %d\n", ip);
goto fail_size_valid;
}
desc[num_cmd].opcode =
ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd].type = IPA_IMM_CMD_DESC;
num_cmd++;
mem_cmd[num_cmd - 1].skip_pipeline_clear = 0;
mem_cmd[num_cmd - 1].pipeline_clear_options = IPA_HPS_CLEAR;
mem_cmd[num_cmd - 1].size = nhash_hdr.size;
mem_cmd[num_cmd - 1].system_addr = nhash_hdr.phys_base;
mem_cmd[num_cmd - 1].local_addr = lcl_nhash_hdr;
desc[num_cmd].opcode = IPA_DMA_SHARED_MEM;
desc[num_cmd].pyld = &mem_cmd[num_cmd - 1];
desc[num_cmd].len = sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
mem_cmd.size = nhash_hdr.size;
mem_cmd.system_addr = nhash_hdr.phys_base;
mem_cmd.local_addr = lcl_nhash_hdr;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
if (!cmd_pyld[num_cmd]) {
IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
goto fail_imm_cmd_construct;
}
desc[num_cmd].opcode =
ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd].type = IPA_IMM_CMD_DESC;
num_cmd++;
mem_cmd[num_cmd - 1].skip_pipeline_clear = 0;
mem_cmd[num_cmd - 1].pipeline_clear_options = IPA_HPS_CLEAR;
mem_cmd[num_cmd - 1].size = hash_hdr.size;
mem_cmd[num_cmd - 1].system_addr = hash_hdr.phys_base;
mem_cmd[num_cmd - 1].local_addr = lcl_hash_hdr;
desc[num_cmd].opcode = IPA_DMA_SHARED_MEM;
desc[num_cmd].pyld = &mem_cmd[num_cmd - 1];
desc[num_cmd].len = sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
mem_cmd.size = hash_hdr.size;
mem_cmd.system_addr = hash_hdr.phys_base;
mem_cmd.local_addr = lcl_hash_hdr;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
if (!cmd_pyld[num_cmd]) {
IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
goto fail_imm_cmd_construct;
}
desc[num_cmd].opcode =
ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd].type = IPA_IMM_CMD_DESC;
num_cmd++;
if (lcl_nhash) {
mem_cmd[num_cmd - 1].skip_pipeline_clear = 0;
mem_cmd[num_cmd - 1].pipeline_clear_options = IPA_HPS_CLEAR;
mem_cmd[num_cmd - 1].size = nhash_bdy.size;
mem_cmd[num_cmd - 1].system_addr = nhash_bdy.phys_base;
mem_cmd[num_cmd - 1].local_addr = lcl_nhash_bdy;
desc[num_cmd].opcode = IPA_DMA_SHARED_MEM;
desc[num_cmd].pyld = &mem_cmd[num_cmd - 1];
desc[num_cmd].len =
sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
mem_cmd.size = nhash_bdy.size;
mem_cmd.system_addr = nhash_bdy.phys_base;
mem_cmd.local_addr = lcl_nhash_bdy;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
if (!cmd_pyld[num_cmd]) {
IPAERR("fail construct dma_shared_mem cmd. IP %d\n",
ip);
goto fail_imm_cmd_construct;
}
desc[num_cmd].opcode =
ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd].type = IPA_IMM_CMD_DESC;
num_cmd++;
}
if (lcl_hash) {
mem_cmd[num_cmd - 1].skip_pipeline_clear = 0;
mem_cmd[num_cmd - 1].pipeline_clear_options = IPA_HPS_CLEAR;
mem_cmd[num_cmd - 1].size = hash_bdy.size;
mem_cmd[num_cmd - 1].system_addr = hash_bdy.phys_base;
mem_cmd[num_cmd - 1].local_addr = lcl_hash_bdy;
desc[num_cmd].opcode = IPA_DMA_SHARED_MEM;
desc[num_cmd].pyld = &mem_cmd[num_cmd - 1];
desc[num_cmd].len =
sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
mem_cmd.size = hash_bdy.size;
mem_cmd.system_addr = hash_bdy.phys_base;
mem_cmd.local_addr = lcl_hash_bdy;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
if (!cmd_pyld[num_cmd]) {
IPAERR("fail construct dma_shared_mem cmd. IP %d\n",
ip);
goto fail_imm_cmd_construct;
}
desc[num_cmd].opcode =
ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd].type = IPA_IMM_CMD_DESC;
num_cmd++;
}
@ -765,13 +813,13 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
if (ipa3_send_cmd(num_cmd, desc)) {
IPAERR("fail to send immediate command\n");
rc = -EFAULT;
goto fail_size_valid;
goto fail_imm_cmd_construct;
}
IPADBG("Hashable HEAD\n");
IPADBG_LOW("Hashable HEAD\n");
IPA_DUMP_BUFF(hash_hdr.base, hash_hdr.phys_base, hash_hdr.size);
IPADBG("Non-Hashable HEAD\n");
IPADBG_LOW("Non-Hashable HEAD\n");
IPA_DUMP_BUFF(nhash_hdr.base, nhash_hdr.phys_base, nhash_hdr.size);
if (hash_bdy.size) {
@ -788,6 +836,9 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
__ipa_reap_sys_rt_tbls(ip);
fail_imm_cmd_construct:
for (i = 0 ; i < num_cmd ; i++)
ipahal_destroy_imm_cmd(cmd_pyld[i]);
fail_size_valid:
dma_free_coherent(ipa3_ctx->pdev, hash_hdr.size,
hash_hdr.base, hash_hdr.phys_base);
@ -1717,48 +1768,6 @@ bail:
return result;
}
static u32 ipa3_build_rt_tuple_mask(struct ipa3_hash_tuple *tpl)
{
u32 msk = 0;
IPA_SETFIELD_IN_REG(msk, tpl->src_id,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK
);
IPA_SETFIELD_IN_REG(msk, tpl->src_ip_addr,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK
);
IPA_SETFIELD_IN_REG(msk, tpl->dst_ip_addr,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK
);
IPA_SETFIELD_IN_REG(msk, tpl->src_port,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK
);
IPA_SETFIELD_IN_REG(msk, tpl->dst_port,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK
);
IPA_SETFIELD_IN_REG(msk, tpl->protocol,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK
);
IPA_SETFIELD_IN_REG(msk, tpl->meta_data,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK
);
return msk;
}
/**
* ipa3_set_rt_tuple_mask() - Sets the rt tuple masking for the given tbl
* table index must be for AP EP (not modem)
@ -1769,10 +1778,9 @@ static u32 ipa3_build_rt_tuple_mask(struct ipa3_hash_tuple *tpl)
* Returns: 0 on success, negative on failure
*
*/
int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipa3_hash_tuple *tuple)
int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple)
{
u32 val;
u32 mask;
struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
if (!tuple) {
IPAERR("bad tuple\n");
@ -1799,19 +1807,11 @@ int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipa3_hash_tuple *tuple)
return -EINVAL;
}
val = ipa_read_reg(ipa3_ctx->mmio,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OFST(tbl_idx));
val &= 0x0000FFFF; /* clear 16 MSBs - rt bits */
mask = ipa3_build_rt_tuple_mask(tuple);
mask &= 0xFFFF0000;
val |= mask;
ipa_write_reg(ipa3_ctx->mmio,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OFST(tbl_idx),
val);
ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
tbl_idx, &fltrt_tuple);
fltrt_tuple.rt = *tuple;
ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
tbl_idx, &fltrt_tuple);
return 0;
}
@ -1885,10 +1885,11 @@ int ipa3_rt_read_tbl_from_hw(u32 tbl_idx,
IPADBG("tbl_entry_in_hdr_ofst=0x%llx\n", tbl_entry_in_hdr_ofst);
tbl_entry_in_hdr = ipa3_ctx->mmio +
IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(0) + tbl_entry_in_hdr_ofst;
ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
tbl_entry_in_hdr_ofst;
/* for tables which reside in DDR access it from the virtual memory */
if (*tbl_entry_in_hdr & 0x0) {
if (!(*tbl_entry_in_hdr & 0x1)) {
/* system */
struct ipa3_rt_tbl_set *set;
struct ipa3_rt_tbl *tbl;

View file

@ -0,0 +1,135 @@
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ipa
#define TRACE_INCLUDE_FILE ipa_trace
#if !defined(_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _IPA_TRACE_H
#include <linux/tracepoint.h>
TRACE_EVENT(
intr_to_poll3,
TP_PROTO(unsigned long client),
TP_ARGS(client),
TP_STRUCT__entry(
__field(unsigned long, client)
),
TP_fast_assign(
__entry->client = client;
),
TP_printk("client=%lu", __entry->client)
);
TRACE_EVENT(
poll_to_intr3,
TP_PROTO(unsigned long client),
TP_ARGS(client),
TP_STRUCT__entry(
__field(unsigned long, client)
),
TP_fast_assign(
__entry->client = client;
),
TP_printk("client=%lu", __entry->client)
);
TRACE_EVENT(
idle_sleep_enter3,
TP_PROTO(unsigned long client),
TP_ARGS(client),
TP_STRUCT__entry(
__field(unsigned long, client)
),
TP_fast_assign(
__entry->client = client;
),
TP_printk("client=%lu", __entry->client)
);
TRACE_EVENT(
idle_sleep_exit3,
TP_PROTO(unsigned long client),
TP_ARGS(client),
TP_STRUCT__entry(
__field(unsigned long, client)
),
TP_fast_assign(
__entry->client = client;
),
TP_printk("client=%lu", __entry->client)
);
TRACE_EVENT(
rmnet_ipa_netifni3,
TP_PROTO(unsigned long rx_pkt_cnt),
TP_ARGS(rx_pkt_cnt),
TP_STRUCT__entry(
__field(unsigned long, rx_pkt_cnt)
),
TP_fast_assign(
__entry->rx_pkt_cnt = rx_pkt_cnt;
),
TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
);
TRACE_EVENT(
rmnet_ipa_netifrx3,
TP_PROTO(unsigned long rx_pkt_cnt),
TP_ARGS(rx_pkt_cnt),
TP_STRUCT__entry(
__field(unsigned long, rx_pkt_cnt)
),
TP_fast_assign(
__entry->rx_pkt_cnt = rx_pkt_cnt;
),
TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
);
#endif /* _IPA_TRACE_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#include <trace/define_trace.h>

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -78,47 +78,6 @@ enum ipa3_hw_2_cpu_responses {
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
};
/**
* enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
* @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
* device
* @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
*/
enum ipa3_hw_2_cpu_events {
IPA_HW_2_CPU_EVENT_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
IPA_HW_2_CPU_EVENT_LOG_INFO =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
};
/**
* enum ipa3_hw_errors - Common error types.
* @IPA_HW_ERROR_NONE : No error persists
* @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
* @IPA_HW_DMA_ERROR : Unexpected DMA error
* @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
* @IPA_HW_INVALID_OPCODE : Invalid opcode sent
* @IPA_HW_ZIP_ENGINE_ERROR : ZIP engine error
*/
enum ipa3_hw_errors {
IPA_HW_ERROR_NONE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
IPA_HW_INVALID_DOORBELL_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
IPA_HW_DMA_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
IPA_HW_FATAL_SYSTEM_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
IPA_HW_INVALID_OPCODE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
IPA_HW_ZIP_ENGINE_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7)
};
/**
* struct IpaHwResetPipeCmdData_t - Structure holding the parameters
* for IPA_CPU_2_HW_CMD_MEMCPY command.
@ -180,20 +139,6 @@ union IpaHwCpuCmdCompletedResponseData_t {
u32 raw32b;
} __packed;
/**
* union IpaHwErrorEventData_t - HW->CPU Common Events
* @errorType : Entered when a system error is detected by the HW. Type of
* error is specified by IPA_HW_ERRORS
* @reserved : Reserved
*/
union IpaHwErrorEventData_t {
struct IpaHwErrorEventParams_t {
u32 errorType:8;
u32 reserved:24;
} __packed params;
u32 raw32b;
} __packed;
/**
* union IpaHwUpdateFlagsCmdData_t - Structure holding the parameters for
* IPA_CPU_2_HW_CMD_UPDATE_FLAGS command
@ -230,7 +175,7 @@ do { \
struct ipa3_uc_hdlrs ipa3_uc_hdlrs[IPA_HW_NUM_FEATURES] = { { 0 } };
static inline const char *ipa_hw_error_str(enum ipa3_hw_errors err_type)
const char *ipa_hw_error_str(enum ipa3_hw_errors err_type)
{
const char *str;
@ -267,7 +212,7 @@ static void ipa3_log_evt_hdlr(void)
if (ipa3_ctx->uc_ctx.uc_event_top_ofst +
sizeof(struct IpaHwEventLogInfoData_t) >=
ipa3_ctx->ctrl->ipa_reg_base_ofst +
IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(0) +
ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
ipa3_ctx->smem_sz) {
IPAERR("uc_top 0x%x outside SRAM\n",
ipa3_ctx->uc_ctx.uc_event_top_ofst);
@ -352,7 +297,7 @@ static void ipa3_uc_event_handler(enum ipa_irq_type interrupt,
WARN_ON(private_data != ipa3_ctx);
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
IPADBG("uC evt opcode=%u\n",
ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
@ -363,7 +308,7 @@ static void ipa3_uc_event_handler(enum ipa_irq_type interrupt,
if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
IPAERR("Invalid feature %u for event %u\n",
feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return;
}
/* Feature specific handling */
@ -383,6 +328,8 @@ static void ipa3_uc_event_handler(enum ipa_irq_type interrupt,
IPAERR("IPA has encountered a ZIP engine error\n");
ipa3_ctx->uc_ctx.uc_zip_error = true;
}
ipa3_ctx->uc_ctx.uc_error_timestamp =
ipahal_read_reg(IPA_TAG_TIMER);
BUG();
} else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
IPA_HW_2_CPU_EVENT_LOG_INFO) {
@ -393,14 +340,15 @@ static void ipa3_uc_event_handler(enum ipa_irq_type interrupt,
IPADBG("unsupported uC evt opcode=%u\n",
ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
}
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
static int ipa3_uc_panic_notifier(struct notifier_block *this,
int ipa3_uc_panic_notifier(struct notifier_block *this,
unsigned long event, void *ptr)
{
int result = 0;
struct ipa3_active_client_logging_info log_info;
IPADBG("this=%p evt=%lu ptr=%p\n", this, event, ptr);
@ -408,7 +356,8 @@ static int ipa3_uc_panic_notifier(struct notifier_block *this,
if (result)
goto fail;
if (ipa3_inc_client_enable_clks_no_block())
IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
if (ipa3_inc_client_enable_clks_no_block(&log_info))
goto fail;
ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp =
@ -418,32 +367,22 @@ static int ipa3_uc_panic_notifier(struct notifier_block *this,
wmb();
if (ipa3_ctx->apply_rg10_wa)
ipa_write_reg(ipa3_ctx->mmio,
IPA_UC_MAILBOX_m_n_OFFS_v3_0(IPA_CPU_2_HW_CMD_MBOX_m,
IPA_CPU_2_HW_CMD_MBOX_n), 0x1);
ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
IPA_CPU_2_HW_CMD_MBOX_m,
IPA_CPU_2_HW_CMD_MBOX_n, 0x1);
else
ipa_write_reg(ipa3_ctx->mmio, IPA_IRQ_EE_UC_n_OFFS(0), 0x1);
ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1);
/* give uc enough time to save state */
udelay(IPA_PKT_FLUSH_TO_US);
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPADBG("err_fatal issued\n");
fail:
return NOTIFY_DONE;
}
static struct notifier_block ipa3_uc_panic_blk = {
.notifier_call = ipa3_uc_panic_notifier,
};
void ipa3_register_panic_hdlr(void)
{
atomic_notifier_chain_register(&panic_notifier_list,
&ipa3_uc_panic_blk);
}
static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
void *private_data,
void *interrupt_data)
@ -454,8 +393,7 @@ static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
int i;
WARN_ON(private_data != ipa3_ctx);
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
IPADBG("uC rsp opcode=%u\n",
ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
@ -464,7 +402,7 @@ static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
IPAERR("Invalid feature %u for event %u\n",
feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return;
}
@ -477,7 +415,7 @@ static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
IPADBG("feature %d specific response handler\n",
feature);
complete_all(&ipa3_ctx->uc_ctx.uc_completion);
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return;
}
}
@ -517,7 +455,7 @@ static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
IPAERR("Unsupported uC rsp opcode = %u\n",
ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
}
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
static int ipa3_uc_send_cmd_64b_param(u32 cmd_lo, u32 cmd_hi, u32 opcode,
@ -558,11 +496,11 @@ send_cmd:
wmb();
if (ipa3_ctx->apply_rg10_wa)
ipa_write_reg(ipa3_ctx->mmio,
IPA_UC_MAILBOX_m_n_OFFS_v3_0(IPA_CPU_2_HW_CMD_MBOX_m,
IPA_CPU_2_HW_CMD_MBOX_n), 0x1);
ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
IPA_CPU_2_HW_CMD_MBOX_m,
IPA_CPU_2_HW_CMD_MBOX_n, 0x1);
else
ipa_write_reg(ipa3_ctx->mmio, IPA_IRQ_EE_UC_n_OFFS(0), 0x1);
ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1);
if (polling_mode) {
for (index = 0; index < IPA_UC_POLL_MAX_RETRY; index++) {
@ -661,7 +599,7 @@ int ipa3_uc_interface_init(void)
phys_addr = ipa3_ctx->ipa_wrapper_base +
ipa3_ctx->ctrl->ipa_reg_base_ofst +
IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(0);
ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0);
ipa3_ctx->uc_ctx.uc_sram_mmio = ioremap(phys_addr,
IPA_RAM_UC_SMEM_SIZE);
if (!ipa3_ctx->uc_ctx.uc_sram_mmio) {
@ -721,7 +659,7 @@ void ipa3_uc_load_notify(void)
if (!ipa3_ctx->apply_rg10_wa)
return;
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ipa3_ctx->uc_ctx.uc_loaded = true;
IPADBG("IPA uC loaded\n");
@ -739,7 +677,7 @@ void ipa3_uc_load_notify(void)
if (ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr)
ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr();
}
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
EXPORT_SYMBOL(ipa3_uc_load_notify);
@ -900,20 +838,21 @@ int ipa3_uc_update_hw_flags(u32 flags)
* to a register will be proxied by the uC due to H/W limitation.
* This func should be called for RG10 registers only
*
* @Parameters: Like ipa_write_reg() parameters
* @Parameters: Like ipahal_write_reg_n() parameters
*
*/
void ipa3_uc_rg10_write_reg(void *base, u32 offset, u32 val)
void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val)
{
int ret;
u32 paddr;
if (!ipa3_ctx->apply_rg10_wa)
return ipa_write_reg(base, offset, val);
return ipahal_write_reg_n(reg, n, val);
/* calculate register physical address */
paddr = ipa3_ctx->ipa_wrapper_base + ipa3_ctx->ctrl->ipa_reg_base_ofst;
paddr += offset;
paddr += ipahal_get_reg_n_ofst(reg, n);
IPADBG("Sending uC cmd to reg write: addr=0x%x val=0x%x\n",
paddr, val);

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -554,7 +554,7 @@ static void ipa3_uc_mhi_event_log_info_hdlr(
if (ipa3_uc_mhi_ctx->mhi_uc_stats_ofst +
sizeof(struct IpaHwStatsMhiInfoData_t) >=
ipa3_ctx->ctrl->ipa_reg_base_ofst +
IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(0) +
ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
ipa3_ctx->smem_sz) {
IPAERR("uc_mhi_stats 0x%x outside SRAM\n",
ipa3_uc_mhi_ctx->mhi_uc_stats_ofst);
@ -629,7 +629,7 @@ int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
return -EFAULT;
}
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
res = ipa3_uc_update_hw_flags(0);
if (res) {
@ -692,7 +692,7 @@ int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
res = 0;
disable_clks:
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@ -715,7 +715,7 @@ int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
return -EINVAL;
}
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&uc_rsp, 0, sizeof(uc_rsp));
uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
@ -740,7 +740,7 @@ int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
res = 0;
disable_clks:
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@ -755,8 +755,7 @@ int ipa3_uc_mhi_reset_channel(int channelHandle)
IPAERR("Not initialized\n");
return -EFAULT;
}
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&uc_rsp, 0, sizeof(uc_rsp));
uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
@ -778,7 +777,7 @@ int ipa3_uc_mhi_reset_channel(int channelHandle)
res = 0;
disable_clks:
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@ -792,8 +791,7 @@ int ipa3_uc_mhi_suspend_channel(int channelHandle)
IPAERR("Not initialized\n");
return -EFAULT;
}
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&uc_rsp, 0, sizeof(uc_rsp));
uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
@ -815,7 +813,7 @@ int ipa3_uc_mhi_suspend_channel(int channelHandle)
res = 0;
disable_clks:
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@ -829,8 +827,7 @@ int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected)
IPAERR("Not initialized\n");
return -EFAULT;
}
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&uc_rsp, 0, sizeof(uc_rsp));
uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
@ -853,7 +850,7 @@ int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected)
res = 0;
disable_clks:
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@ -866,8 +863,7 @@ int ipa3_uc_mhi_stop_event_update_channel(int channelHandle)
IPAERR("Not initialized\n");
return -EFAULT;
}
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&cmd, 0, sizeof(cmd));
cmd.params.channelHandle = channelHandle;
@ -885,7 +881,7 @@ int ipa3_uc_mhi_stop_event_update_channel(int channelHandle)
res = 0;
disable_clks:
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@ -903,7 +899,7 @@ int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd)
IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n",
cmd.params.ulMsiEventThreshold, cmd.params.dlMsiEventThreshold);
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
res = ipa3_uc_send_cmd(cmd.raw32b,
IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ);
@ -914,7 +910,7 @@ int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd)
res = 0;
disable_clks:
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -339,7 +339,7 @@ struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
if (ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst +
sizeof(struct IpaHwStatsWDIInfoData_t) >=
ipa3_ctx->ctrl->ipa_reg_base_ofst +
IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(0) +
ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
ipa3_ctx->smem_sz) {
IPAERR("uc_wdi_stats 0x%x outside SRAM\n",
ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
@ -401,8 +401,7 @@ int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio);
return -EINVAL;
}
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
TX_STATS(num_pkts_processed);
TX_STATS(copy_engine_doorbell_value);
@ -444,7 +443,7 @@ int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
RX_STATS(reserved1);
RX_STATS(reserved2);
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
@ -737,7 +736,7 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
}
memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx);
if (IPA_CLIENT_IS_CONS(in->sys.client)) {
@ -825,10 +824,10 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
tx->num_tx_buffers = in->u.dl.num_tx_buffers;
tx->ipa_pipe_number = ipa_ep_idx;
out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
IPA_REG_BASE_OFST_v3_0 +
IPA_UC_MAILBOX_m_n_OFFS_v3_0(
IPA_HW_WDI_TX_MBOX_START_INDEX/32,
IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
ipahal_get_reg_base() +
ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
IPA_HW_WDI_TX_MBOX_START_INDEX/32,
IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
} else {
rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base;
@ -868,8 +867,8 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
rx->ipa_pipe_number = ipa_ep_idx;
out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
IPA_REG_BASE_OFST_v3_0 +
IPA_UC_MAILBOX_m_n_OFFS_v3_0(
ipahal_get_reg_base() +
ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
IPA_HW_WDI_RX_MBOX_START_INDEX/32,
IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
}
@ -921,7 +920,7 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
ipa3_install_dflt_flt_rules(ipa_ep_idx);
if (!ep->keep_ipa_awake)
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
ep->wdi_state |= IPA_WDI_CONNECTED;
@ -935,7 +934,7 @@ uc_timeout:
ipa_release_uc_smmu_mappings(in->sys.client);
dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
dma_alloc_fail:
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
fail:
return result;
}
@ -974,7 +973,7 @@ int ipa3_disconnect_wdi_pipe(u32 clnt_hdl)
}
if (!ep->keep_ipa_awake)
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
tear.params.ipa_pipe_number = clnt_hdl;
@ -992,7 +991,7 @@ int ipa3_disconnect_wdi_pipe(u32 clnt_hdl)
ipa_release_uc_smmu_mappings(ep->client);
memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
@ -1033,8 +1032,7 @@ int ipa3_enable_wdi_pipe(u32 clnt_hdl)
IPAERR("WDI channel bad state %d\n", ep->wdi_state);
return -EFAULT;
}
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
enable.params.ipa_pipe_number = clnt_hdl;
result = ipa3_uc_send_cmd(enable.raw32b,
@ -1053,8 +1051,7 @@ int ipa3_enable_wdi_pipe(u32 clnt_hdl)
holb_cfg.tmr_val = 0;
result = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
}
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
ep->wdi_state |= IPA_WDI_ENABLED;
IPADBG("client (ep: %d) enabled\n", clnt_hdl);
@ -1096,8 +1093,7 @@ int ipa3_disable_wdi_pipe(u32 clnt_hdl)
IPAERR("WDI channel bad state %d\n", ep->wdi_state);
return -EFAULT;
}
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
result = ipa3_disable_data_path(clnt_hdl);
if (result) {
@ -1149,8 +1145,7 @@ int ipa3_disable_wdi_pipe(u32 clnt_hdl)
ep_cfg_ctrl.ipa_ep_delay = true;
ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
}
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
ep->wdi_state &= ~IPA_WDI_ENABLED;
IPADBG("client (ep: %d) disabled\n", clnt_hdl);
@ -1191,8 +1186,7 @@ int ipa3_resume_wdi_pipe(u32 clnt_hdl)
IPAERR("WDI channel bad state %d\n", ep->wdi_state);
return -EFAULT;
}
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
resume.params.ipa_pipe_number = clnt_hdl;
result = ipa3_uc_send_cmd(resume.raw32b,
@ -1303,7 +1297,7 @@ int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
}
ipa3_ctx->tag_process_before_gating = true;
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
ep->wdi_state &= ~IPA_WDI_RESUMED;
IPADBG("client (ep: %d) suspended\n", clnt_hdl);
@ -1335,8 +1329,7 @@ int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
IPAERR("WDI channel bad state %d\n", ep->wdi_state);
return -EFAULT;
}
ipa3_inc_client_enable_clks();
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
qmap.params.ipa_pipe_number = clnt_hdl;
qmap.params.qmap_id = qmap_id;
@ -1349,8 +1342,7 @@ int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
result = -EFAULT;
goto uc_timeout;
}
ipa3_dec_client_disable_clks();
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id);
@ -1389,6 +1381,20 @@ int ipa3_uc_reg_rdyCB(
return 0;
}
/**
* ipa3_uc_dereg_rdyCB() - To de-register uC ready CB
*
* Returns: 0 on success, negative on failure
*
*/
int ipa3_uc_dereg_rdyCB(void)
{
ipa3_ctx->uc_wdi_ctx.uc_ready_cb = NULL;
ipa3_ctx->uc_wdi_ctx.priv = NULL;
return 0;
}
/**
* ipa3_uc_wdi_get_dbpa() - To retrieve
@ -1411,14 +1417,14 @@ int ipa3_uc_wdi_get_dbpa(
if (IPA_CLIENT_IS_CONS(param->client)) {
param->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
IPA_REG_BASE_OFST_v3_0 +
IPA_UC_MAILBOX_m_n_OFFS_v3_0(
ipahal_get_reg_base() +
ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
IPA_HW_WDI_TX_MBOX_START_INDEX/32,
IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
} else {
param->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
IPA_REG_BASE_OFST_v3_0 +
IPA_UC_MAILBOX_m_n_OFFS_v3_0(
ipahal_get_reg_base() +
ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
IPA_HW_WDI_RX_MBOX_START_INDEX/32,
IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
}
@ -1433,9 +1439,16 @@ static void ipa3_uc_wdi_loaded_handler(void)
return;
}
if (ipa3_ctx->uc_wdi_ctx.uc_ready_cb)
if (ipa3_ctx->uc_wdi_ctx.uc_ready_cb) {
ipa3_ctx->uc_wdi_ctx.uc_ready_cb(
ipa3_ctx->uc_wdi_ctx.priv);
ipa3_ctx->uc_wdi_ctx.uc_ready_cb =
NULL;
ipa3_ctx->uc_wdi_ctx.priv = NULL;
}
return;
}
int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,3 @@
obj-$(CONFIG_IPA3) += ipa_hal.o
ipa_hal-y := ipahal.o ipahal_reg.o

View file

@ -0,0 +1,768 @@
/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "ipahal.h"
#include "ipahal_i.h"
#include "ipahal_reg_i.h"
static int ipahal_imm_cmd_init(enum ipa_hw_type ipa_hw_type);
struct ipahal_context *ipahal_ctx;
static const char *ipahal_imm_cmd_name_to_str[IPA_IMM_CMD_MAX] = {
__stringify(IPA_IMM_CMD_IP_V4_FILTER_INIT),
__stringify(IPA_IMM_CMD_IP_V6_FILTER_INIT),
__stringify(IPA_IMM_CMD_IP_V4_NAT_INIT),
__stringify(IPA_IMM_CMD_IP_V4_ROUTING_INIT),
__stringify(IPA_IMM_CMD_IP_V6_ROUTING_INIT),
__stringify(IPA_IMM_CMD_HDR_INIT_LOCAL),
__stringify(IPA_IMM_CMD_HDR_INIT_SYSTEM),
__stringify(IPA_IMM_CMD_REGISTER_WRITE),
__stringify(IPA_IMM_CMD_NAT_DMA),
__stringify(IPA_IMM_CMD_IP_PACKET_INIT),
__stringify(IPA_IMM_CMD_DMA_SHARED_MEM),
__stringify(IPA_IMM_CMD_IP_PACKET_TAG_STATUS),
__stringify(IPA_IMM_CMD_DMA_TASK_32B_ADDR),
};
#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
(kzalloc((__size), ((__is_atomic_ctx)?GFP_ATOMIC:GFP_KERNEL)))
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_task_32b_addr(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
{
struct ipahal_imm_cmd_pyld *pyld;
struct ipa_imm_cmd_hw_dma_task_32b_addr *data;
struct ipahal_imm_cmd_dma_task_32b_addr *dma_params =
(struct ipahal_imm_cmd_dma_task_32b_addr *)params;
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
if (unlikely(!pyld)) {
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_dma_task_32b_addr *)pyld->data;
if (unlikely(dma_params->size1 & ~0xFFFF)) {
IPAHAL_ERR("Size1 is bigger than 16bit width 0x%x\n",
dma_params->size1);
WARN_ON(1);
}
if (unlikely(dma_params->packet_size & ~0xFFFF)) {
IPAHAL_ERR("Pkt size is bigger than 16bit width 0x%x\n",
dma_params->packet_size);
WARN_ON(1);
}
data->cmplt = dma_params->cmplt ? 1 : 0;
data->eof = dma_params->eof ? 1 : 0;
data->flsh = dma_params->flsh ? 1 : 0;
data->lock = dma_params->lock ? 1 : 0;
data->unlock = dma_params->unlock ? 1 : 0;
data->size1 = dma_params->size1;
data->addr1 = dma_params->addr1;
data->packet_size = dma_params->packet_size;
return pyld;
}
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_tag_status(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
{
struct ipahal_imm_cmd_pyld *pyld;
struct ipa_imm_cmd_hw_ip_packet_tag_status *data;
struct ipahal_imm_cmd_ip_packet_tag_status *tag_params =
(struct ipahal_imm_cmd_ip_packet_tag_status *)params;
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
if (unlikely(!pyld)) {
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_ip_packet_tag_status *)pyld->data;
if (unlikely(tag_params->tag & ~0xFFFFFFFFFFFF)) {
IPAHAL_ERR("tag is bigger than 48bit width 0x%llx\n",
tag_params->tag);
WARN_ON(1);
}
data->tag = tag_params->tag;
return pyld;
}
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
{
struct ipahal_imm_cmd_pyld *pyld;
struct ipa_imm_cmd_hw_dma_shared_mem *data;
struct ipahal_imm_cmd_dma_shared_mem *mem_params =
(struct ipahal_imm_cmd_dma_shared_mem *)params;
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
if (unlikely(!pyld)) {
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_dma_shared_mem *)pyld->data;
if (unlikely(mem_params->size & ~0xFFFF)) {
IPAHAL_ERR("Size is bigger than 16bit width 0x%x\n",
mem_params->size);
WARN_ON(1);
}
if (unlikely(mem_params->local_addr & ~0xFFFF)) {
IPAHAL_ERR("Local addr is bigger than 16bit width 0x%x\n",
mem_params->local_addr);
WARN_ON(1);
}
data->direction = mem_params->is_read ? 1 : 0;
data->size = mem_params->size;
data->local_addr = mem_params->local_addr;
data->system_addr = mem_params->system_addr;
data->skip_pipeline_clear = mem_params->skip_pipeline_clear ? 1 : 0;
switch (mem_params->pipeline_clear_options) {
case IPAHAL_HPS_CLEAR:
data->pipeline_clear_options = 0;
break;
case IPAHAL_SRC_GRP_CLEAR:
data->pipeline_clear_options = 1;
break;
case IPAHAL_FULL_PIPELINE_CLEAR:
data->pipeline_clear_options = 2;
break;
default:
IPAHAL_ERR("unsupported pipline clear option %d\n",
mem_params->pipeline_clear_options);
WARN_ON(1);
};
return pyld;
}
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
{
struct ipahal_imm_cmd_pyld *pyld;
struct ipa_imm_cmd_hw_register_write *data;
struct ipahal_imm_cmd_register_write *regwrt_params =
(struct ipahal_imm_cmd_register_write *)params;
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
if (unlikely(!pyld)) {
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_register_write *)pyld->data;
if (unlikely(regwrt_params->offset & ~0xFFFF)) {
IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n",
regwrt_params->offset);
WARN_ON(1);
}
data->offset = regwrt_params->offset;
data->value = regwrt_params->value;
data->value_mask = regwrt_params->value_mask;
data->skip_pipeline_clear = regwrt_params->skip_pipeline_clear ? 1 : 0;
switch (regwrt_params->pipeline_clear_options) {
case IPAHAL_HPS_CLEAR:
data->pipeline_clear_options = 0;
break;
case IPAHAL_SRC_GRP_CLEAR:
data->pipeline_clear_options = 1;
break;
case IPAHAL_FULL_PIPELINE_CLEAR:
data->pipeline_clear_options = 2;
break;
default:
IPAHAL_ERR("unsupported pipline clear option %d\n",
regwrt_params->pipeline_clear_options);
WARN_ON(1);
};
return pyld;
}
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_init(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
{
struct ipahal_imm_cmd_pyld *pyld;
struct ipa_imm_cmd_hw_ip_packet_init *data;
struct ipahal_imm_cmd_ip_packet_init *pktinit_params =
(struct ipahal_imm_cmd_ip_packet_init *)params;
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
if (unlikely(!pyld)) {
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_ip_packet_init *)pyld->data;
if (unlikely(pktinit_params->destination_pipe_index & ~0x1F)) {
IPAHAL_ERR("Dst pipe idx is bigger than 5bit width 0x%x\n",
pktinit_params->destination_pipe_index);
WARN_ON(1);
}
data->destination_pipe_index = pktinit_params->destination_pipe_index;
return pyld;
}
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_nat_dma(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
{
struct ipahal_imm_cmd_pyld *pyld;
struct ipa_imm_cmd_hw_nat_dma *data;
struct ipahal_imm_cmd_nat_dma *nat_params =
(struct ipahal_imm_cmd_nat_dma *)params;
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
if (unlikely(!pyld)) {
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_nat_dma *)pyld->data;
data->table_index = nat_params->table_index;
data->base_addr = nat_params->base_addr;
data->offset = nat_params->offset;
data->data = nat_params->data;
return pyld;
}
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_system(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
{
struct ipahal_imm_cmd_pyld *pyld;
struct ipa_imm_cmd_hw_hdr_init_system *data;
struct ipahal_imm_cmd_hdr_init_system *syshdr_params =
(struct ipahal_imm_cmd_hdr_init_system *)params;
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
if (unlikely(!pyld)) {
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_hdr_init_system *)pyld->data;
data->hdr_table_addr = syshdr_params->hdr_table_addr;
return pyld;
}
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_local(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
{
struct ipahal_imm_cmd_pyld *pyld;
struct ipa_imm_cmd_hw_hdr_init_local *data;
struct ipahal_imm_cmd_hdr_init_local *lclhdr_params =
(struct ipahal_imm_cmd_hdr_init_local *)params;
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
if (unlikely(!pyld)) {
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_hdr_init_local *)pyld->data;
if (unlikely(lclhdr_params->size_hdr_table & ~0xFFF)) {
IPAHAL_ERR("Hdr tble size is bigger than 12bit width 0x%x\n",
lclhdr_params->size_hdr_table);
WARN_ON(1);
}
data->hdr_table_addr = lclhdr_params->hdr_table_addr;
data->size_hdr_table = lclhdr_params->size_hdr_table;
data->hdr_addr = lclhdr_params->hdr_addr;
return pyld;
}
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_routing_init(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
{
struct ipahal_imm_cmd_pyld *pyld;
struct ipa_imm_cmd_hw_ip_v6_routing_init *data;
struct ipahal_imm_cmd_ip_v6_routing_init *rt6_params =
(struct ipahal_imm_cmd_ip_v6_routing_init *)params;
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
if (unlikely(!pyld)) {
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_ip_v6_routing_init *)pyld->data;
data->hash_rules_addr = rt6_params->hash_rules_addr;
data->hash_rules_size = rt6_params->hash_rules_size;
data->hash_local_addr = rt6_params->hash_local_addr;
data->nhash_rules_addr = rt6_params->nhash_rules_addr;
data->nhash_rules_size = rt6_params->nhash_rules_size;
data->nhash_local_addr = rt6_params->nhash_local_addr;
return pyld;
}
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_routing_init(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
{
struct ipahal_imm_cmd_pyld *pyld;
struct ipa_imm_cmd_hw_ip_v4_routing_init *data;
struct ipahal_imm_cmd_ip_v4_routing_init *rt4_params =
(struct ipahal_imm_cmd_ip_v4_routing_init *)params;
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
if (unlikely(!pyld)) {
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_ip_v4_routing_init *)pyld->data;
data->hash_rules_addr = rt4_params->hash_rules_addr;
data->hash_rules_size = rt4_params->hash_rules_size;
data->hash_local_addr = rt4_params->hash_local_addr;
data->nhash_rules_addr = rt4_params->nhash_rules_addr;
data->nhash_rules_size = rt4_params->nhash_rules_size;
data->nhash_local_addr = rt4_params->nhash_local_addr;
return pyld;
}
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_nat_init(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
{
struct ipahal_imm_cmd_pyld *pyld;
struct ipa_imm_cmd_hw_ip_v4_nat_init *data;
struct ipahal_imm_cmd_ip_v4_nat_init *nat4_params =
(struct ipahal_imm_cmd_ip_v4_nat_init *)params;
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
if (unlikely(!pyld)) {
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_ip_v4_nat_init *)pyld->data;
data->ipv4_rules_addr = nat4_params->ipv4_rules_addr;
data->ipv4_expansion_rules_addr =
nat4_params->ipv4_expansion_rules_addr;
data->index_table_addr = nat4_params->index_table_addr;
data->index_table_expansion_addr =
nat4_params->index_table_expansion_addr;
data->table_index = nat4_params->table_index;
data->ipv4_rules_addr_type =
nat4_params->ipv4_rules_addr_shared ? 1 : 0;
data->ipv4_expansion_rules_addr_type =
nat4_params->ipv4_expansion_rules_addr_shared ? 1 : 0;
data->index_table_addr_type =
nat4_params->index_table_addr_shared ? 1 : 0;
data->index_table_expansion_addr_type =
nat4_params->index_table_expansion_addr_shared ? 1 : 0;
data->size_base_tables = nat4_params->size_base_tables;
data->size_expansion_tables = nat4_params->size_expansion_tables;
data->public_ip_addr = nat4_params->public_ip_addr;
return pyld;
}
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_filter_init(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
{
struct ipahal_imm_cmd_pyld *pyld;
struct ipa_imm_cmd_hw_ip_v6_filter_init *data;
struct ipahal_imm_cmd_ip_v6_filter_init *flt6_params =
(struct ipahal_imm_cmd_ip_v6_filter_init *)params;
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
if (unlikely(!pyld)) {
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_ip_v6_filter_init *)pyld->data;
data->hash_rules_addr = flt6_params->hash_rules_addr;
data->hash_rules_size = flt6_params->hash_rules_size;
data->hash_local_addr = flt6_params->hash_local_addr;
data->nhash_rules_addr = flt6_params->nhash_rules_addr;
data->nhash_rules_size = flt6_params->nhash_rules_size;
data->nhash_local_addr = flt6_params->nhash_local_addr;
return pyld;
}
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_filter_init(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
{
struct ipahal_imm_cmd_pyld *pyld;
struct ipa_imm_cmd_hw_ip_v4_filter_init *data;
struct ipahal_imm_cmd_ip_v4_filter_init *flt4_params =
(struct ipahal_imm_cmd_ip_v4_filter_init *)params;
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
if (unlikely(!pyld)) {
IPAHAL_ERR("kzalloc err\n");
return pyld;
}
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_ip_v4_filter_init *)pyld->data;
data->hash_rules_addr = flt4_params->hash_rules_addr;
data->hash_rules_size = flt4_params->hash_rules_size;
data->hash_local_addr = flt4_params->hash_local_addr;
data->nhash_rules_addr = flt4_params->nhash_rules_addr;
data->nhash_rules_size = flt4_params->nhash_rules_size;
data->nhash_local_addr = flt4_params->nhash_local_addr;
return pyld;
}
/*
* struct ipahal_imm_cmd_obj - immediate command H/W information for
* specific IPA version
* @construct - CB to construct imm command payload from abstracted structure
* @opcode - Immediate command OpCode
* @dyn_op - Does this command supports Dynamic opcode?
* Some commands opcode are dynamic where the part of the opcode is
* supplied as param. This flag indicates if the specific command supports it
* or not.
*/
struct ipahal_imm_cmd_obj {
struct ipahal_imm_cmd_pyld *(*construct)(enum ipahal_imm_cmd_name cmd,
const void *params, bool is_atomic_ctx);
u16 opcode;
bool dyn_op;
};
/*
* This table contains the info regard each immediate command for IPAv3
* and later.
* Information like: opcode and construct functions.
* All the information on the IMM on IPAv3 are statically defined below.
* If information is missing regard some IMM on some IPA version,
* the init function will fill it with the information from the previous
* IPA version.
* Information is considered missing if all of the fields are 0
* If opcode is -1, this means that the IMM is removed on the
* specific version
*/
static struct ipahal_imm_cmd_obj
ipahal_imm_cmd_objs[IPA_HW_MAX][IPA_IMM_CMD_MAX] = {
/* IPAv3 */
[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_FILTER_INIT] = {
ipa_imm_cmd_construct_ip_v4_filter_init,
3, false},
[IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_FILTER_INIT] = {
ipa_imm_cmd_construct_ip_v6_filter_init,
4, false},
[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_NAT_INIT] = {
ipa_imm_cmd_construct_ip_v4_nat_init,
5, false},
[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_ROUTING_INIT] = {
ipa_imm_cmd_construct_ip_v4_routing_init,
7, false},
[IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_ROUTING_INIT] = {
ipa_imm_cmd_construct_ip_v6_routing_init,
8, false},
[IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_LOCAL] = {
ipa_imm_cmd_construct_hdr_init_local,
9, false},
[IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_SYSTEM] = {
ipa_imm_cmd_construct_hdr_init_system,
10, false},
[IPA_HW_v3_0][IPA_IMM_CMD_REGISTER_WRITE] = {
ipa_imm_cmd_construct_register_write,
12, false},
[IPA_HW_v3_0][IPA_IMM_CMD_NAT_DMA] = {
ipa_imm_cmd_construct_nat_dma,
14, false},
[IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_INIT] = {
ipa_imm_cmd_construct_ip_packet_init,
16, false},
[IPA_HW_v3_0][IPA_IMM_CMD_DMA_TASK_32B_ADDR] = {
ipa_imm_cmd_construct_dma_task_32b_addr,
17, true},
[IPA_HW_v3_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
ipa_imm_cmd_construct_dma_shared_mem,
19, false},
[IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_TAG_STATUS] = {
ipa_imm_cmd_construct_ip_packet_tag_status,
20, false},
};
/*
* ipahal_imm_cmd_init() - Build the Immediate command information table
* See ipahal_imm_cmd_objs[][] comments
*/
static int ipahal_imm_cmd_init(enum ipa_hw_type ipa_hw_type)
{
int i;
int j;
struct ipahal_imm_cmd_obj zero_obj;
IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
memset(&zero_obj, 0, sizeof(zero_obj));
for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
for (j = 0; j < IPA_IMM_CMD_MAX ; j++) {
if (!memcmp(&ipahal_imm_cmd_objs[i+1][j], &zero_obj,
sizeof(struct ipahal_imm_cmd_obj))) {
memcpy(&ipahal_imm_cmd_objs[i+1][j],
&ipahal_imm_cmd_objs[i][j],
sizeof(struct ipahal_imm_cmd_obj));
} else {
/*
* explicitly overridden immediate command.
* Check validity
*/
if (!ipahal_imm_cmd_objs[i+1][j].opcode) {
IPAHAL_ERR(
"imm_cmd=%s with zero opcode\n",
ipahal_imm_cmd_name_str(j));
WARN_ON(1);
}
if (!ipahal_imm_cmd_objs[i+1][j].construct) {
IPAHAL_ERR(
"imm_cmd=%s with NULL construct fun\n",
ipahal_imm_cmd_name_str(j));
WARN_ON(1);
}
}
}
}
return 0;
}
/*
* ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
* @cmd_name: [in] Immediate command name
*/
const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name)
{
if (cmd_name < 0 || cmd_name >= IPA_IMM_CMD_MAX) {
IPAHAL_ERR("requested name of invalid imm_cmd=%d\n", cmd_name);
return "Invalid IMM_CMD";
}
return ipahal_imm_cmd_name_to_str[cmd_name];
}
/*
* ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
*/
u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd)
{
u32 opcode;
if (cmd >= IPA_IMM_CMD_MAX) {
IPAHAL_ERR("Invalid immediate command imm_cmd=%u\n", cmd);
WARN_ON(1);
return -EFAULT;
}
IPAHAL_DBG("Get opcode of IMM_CMD=%s\n", ipahal_imm_cmd_name_str(cmd));
opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
if (opcode == -1) {
IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
ipahal_imm_cmd_name_str(cmd));
WARN_ON(1);
return -EFAULT;
}
return opcode;
}
/*
* ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
* that supports dynamic opcode
* Some commands opcode are not totaly fixed, but part of it is
* a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
* is a given parameter.
* This API will return the composed opcode of the command given
* the parameter
* Note: Use this API only for immediate comamnds that support Dynamic Opcode
*/
u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param)
{
u32 opcode;
if (cmd >= IPA_IMM_CMD_MAX) {
IPAHAL_ERR("Invalid immediate command IMM_CMD=%u\n", cmd);
WARN_ON(1);
return -EFAULT;
}
IPAHAL_DBG("Get opcode of IMM_CMD=%s\n", ipahal_imm_cmd_name_str(cmd));
if (!ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].dyn_op) {
IPAHAL_ERR("IMM_CMD=%s does not support dynamic opcode\n",
ipahal_imm_cmd_name_str(cmd));
WARN_ON(1);
return -EFAULT;
}
/* Currently, dynamic opcode commands uses params to be set
* on the Opcode hi-byte (lo-byte is fixed).
* If this to be changed in the future, make the opcode calculation
* a CB per command
*/
if (param & ~0xFFFF) {
IPAHAL_ERR("IMM_CMD=%s opcode param is invalid\n",
ipahal_imm_cmd_name_str(cmd));
WARN_ON(1);
return -EFAULT;
}
opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
if (opcode == -1) {
IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
ipahal_imm_cmd_name_str(cmd));
WARN_ON(1);
return -EFAULT;
}
if (opcode & ~0xFFFF) {
IPAHAL_ERR("IMM_CMD=%s opcode will be overridden\n",
ipahal_imm_cmd_name_str(cmd));
WARN_ON(1);
return -EFAULT;
}
return (opcode + (param<<8));
}
/*
* ipahal_construct_imm_cmd() - Construct immdiate command
* This function builds imm cmd bulk that can be be sent to IPA
* The command will be allocated dynamically.
* After done using it, call ipahal_destroy_imm_cmd() to release it
*/
struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
{
if (!params) {
IPAHAL_ERR("Input error: params=%p\n", params);
WARN_ON(1);
return NULL;
}
if (cmd >= IPA_IMM_CMD_MAX) {
IPAHAL_ERR("Invalid immediate command %u\n", cmd);
WARN_ON(1);
return NULL;
}
IPAHAL_DBG("construct IMM_CMD:%s\n", ipahal_imm_cmd_name_str(cmd));
return ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].construct(
cmd, params, is_atomic_ctx);
}
/*
* ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
* Core driver may want functionality to inject NOP commands to IPA
* to ensure e.g., PIPLINE clear before someother operation.
* The functionality given by this function can be reached by
* ipahal_construct_imm_cmd(). This function is helper to the core driver
* to reach this NOP functionlity easily.
* @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
* @pipline_clr_opt: options for pipeline clear waiting
* @is_atomic_ctx: is called in atomic context or can sleep?
*/
struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
bool skip_pipline_clear,
enum ipahal_pipeline_clear_option pipline_clr_opt,
bool is_atomic_ctx)
{
struct ipahal_imm_cmd_register_write cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld;
memset(&cmd, 0, sizeof(cmd));
cmd.skip_pipeline_clear = skip_pipline_clear;
cmd.pipeline_clear_options = pipline_clr_opt;
cmd.value_mask = 0x0;
cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
&cmd, is_atomic_ctx);
if (!cmd_pyld)
IPAHAL_ERR("failed to construct register_write imm cmd\n");
return cmd_pyld;
}
int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base)
{
int result;
IPAHAL_DBG("Entry - IPA HW TYPE=%d base=%p\n",
ipa_hw_type, base);
ipahal_ctx = kzalloc(sizeof(*ipahal_ctx), GFP_KERNEL);
if (!ipahal_ctx) {
IPAHAL_ERR("kzalloc err for ipahal_ctx\n");
result = -ENOMEM;
goto bail_err_exit;
}
if (ipa_hw_type < IPA_HW_v3_0) {
IPAHAL_ERR("ipahal supported on IPAv3 and later only\n");
result = -EINVAL;
goto bail_free_ctx;
}
if (!base) {
IPAHAL_ERR("invalid memory io mapping addr\n");
result = -EINVAL;
goto bail_free_ctx;
}
ipahal_ctx->hw_type = ipa_hw_type;
ipahal_ctx->base = base;
if (ipahal_reg_init(ipa_hw_type)) {
IPAHAL_ERR("failed to init ipahal reg\n");
result = -EFAULT;
goto bail_free_ctx;
}
if (ipahal_imm_cmd_init(ipa_hw_type)) {
IPAHAL_ERR("failed to init ipahal imm cmd\n");
result = -EFAULT;
goto bail_free_ctx;
}
return 0;
bail_free_ctx:
kfree(ipahal_ctx);
ipahal_ctx = NULL;
bail_err_exit:
return result;
}
void ipahal_destroy(void)
{
IPAHAL_DBG("Entry\n");
kfree(ipahal_ctx);
ipahal_ctx = NULL;
}

View file

@ -0,0 +1,395 @@
/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _IPAHAL_H_
#define _IPAHAL_H_
#include <linux/msm_ipa.h>
/*
* Immediate command names
*
* NOTE:: Any change to this enum, need to change to ipahal_imm_cmd_name_to_str
* array as well.
*/
enum ipahal_imm_cmd_name {
IPA_IMM_CMD_IP_V4_FILTER_INIT,
IPA_IMM_CMD_IP_V6_FILTER_INIT,
IPA_IMM_CMD_IP_V4_NAT_INIT,
IPA_IMM_CMD_IP_V4_ROUTING_INIT,
IPA_IMM_CMD_IP_V6_ROUTING_INIT,
IPA_IMM_CMD_HDR_INIT_LOCAL,
IPA_IMM_CMD_HDR_INIT_SYSTEM,
IPA_IMM_CMD_REGISTER_WRITE,
IPA_IMM_CMD_NAT_DMA,
IPA_IMM_CMD_IP_PACKET_INIT,
IPA_IMM_CMD_DMA_SHARED_MEM,
IPA_IMM_CMD_IP_PACKET_TAG_STATUS,
IPA_IMM_CMD_DMA_TASK_32B_ADDR,
IPA_IMM_CMD_MAX,
};
/* Immediate commands abstracted structures */
/*
* struct ipahal_imm_cmd_ip_v4_filter_init - IP_V4_FILTER_INIT cmd payload
* Inits IPv4 filter block.
* @hash_rules_addr: Addr in sys mem where ipv4 hashable flt tbl starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
* be copied to
* @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
* be copied to
*/
struct ipahal_imm_cmd_ip_v4_filter_init {
u64 hash_rules_addr;
u32 hash_rules_size;
u32 hash_local_addr;
u64 nhash_rules_addr;
u32 nhash_rules_size;
u32 nhash_local_addr;
};
/*
* struct ipahal_imm_cmd_ip_v6_filter_init - IP_V6_FILTER_INIT cmd payload
* Inits IPv6 filter block.
* @hash_rules_addr: Addr in sys mem where ipv6 hashable flt tbl starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
* be copied to
* @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
* be copied to
*/
struct ipahal_imm_cmd_ip_v6_filter_init {
u64 hash_rules_addr;
u32 hash_rules_size;
u32 hash_local_addr;
u64 nhash_rules_addr;
u32 nhash_rules_size;
u32 nhash_local_addr;
};
/*
* struct ipahal_imm_cmd_ip_v4_nat_init - IP_V4_NAT_INIT cmd payload
* Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
* cache address abd itger related parameters.
* @table_index: For future support of multiple NAT tables
* @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start
* @ipv4_rules_addr_shared: ipv4_rules_addr in shared mem (if not, then sys)
* @ipv4_expansion_rules_addr: Addr in sys/shared mem where expantion NAT
* table starts. IPv4 NAT rules that result in NAT collision are located
* in this table.
* @ipv4_expansion_rules_addr_shared: ipv4_expansion_rules_addr in
* shared mem (if not, then sys)
* @index_table_addr: Addr in sys/shared mem where index table, which points
* to NAT table starts
* @index_table_addr_shared: index_table_addr in shared mem (if not, then sys)
* @index_table_expansion_addr: Addr in sys/shared mem where expansion index
* table starts
* @index_table_expansion_addr_shared: index_table_expansion_addr in
* shared mem (if not, then sys)
* @size_base_tables: Num of entries in NAT tbl and idx tbl (each)
* @size_expansion_tables: Num of entries in NAT expantion tbl and expantion
* idx tbl (each)
* @public_ip_addr: public IP address
*/
struct ipahal_imm_cmd_ip_v4_nat_init {
u8 table_index;
u64 ipv4_rules_addr;
bool ipv4_rules_addr_shared;
u64 ipv4_expansion_rules_addr;
bool ipv4_expansion_rules_addr_shared;
u64 index_table_addr;
bool index_table_addr_shared;
u64 index_table_expansion_addr;
bool index_table_expansion_addr_shared;
u16 size_base_tables;
u16 size_expansion_tables;
u32 public_ip_addr;
};
/*
* struct ipahal_imm_cmd_ip_v4_routing_init - IP_V4_ROUTING_INIT cmd payload
* Inits IPv4 routing table/structure - with the rules and other related params
* @hash_rules_addr: Addr in sys mem where ipv4 hashable rt tbl starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
* be copied to
* @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
* be copied to
*/
struct ipahal_imm_cmd_ip_v4_routing_init {
u64 hash_rules_addr;
u32 hash_rules_size;
u32 hash_local_addr;
u64 nhash_rules_addr;
u32 nhash_rules_size;
u32 nhash_local_addr;
};
/*
* struct ipahal_imm_cmd_ip_v6_routing_init - IP_V6_ROUTING_INIT cmd payload
* Inits IPv6 routing table/structure - with the rules and other related params
* @hash_rules_addr: Addr in sys mem where ipv6 hashable rt tbl starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
* be copied to
* @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
* be copied to
*/
struct ipahal_imm_cmd_ip_v6_routing_init {
u64 hash_rules_addr;
u32 hash_rules_size;
u32 hash_local_addr;
u64 nhash_rules_addr;
u32 nhash_rules_size;
u32 nhash_local_addr;
};
/*
* struct ipahal_imm_cmd_hdr_init_local - HDR_INIT_LOCAL cmd payload
* Inits hdr table within local mem with the hdrs and their length.
* @hdr_table_addr: Word address in sys mem where the table starts (SRC)
* @size_hdr_table: Size of the above (in bytes)
* @hdr_addr: header address in IPA sram (used as DST for memory copy)
* @rsvd: reserved
*/
struct ipahal_imm_cmd_hdr_init_local {
u64 hdr_table_addr;
u32 size_hdr_table;
u32 hdr_addr;
};
/*
* struct ipahal_imm_cmd_hdr_init_system - HDR_INIT_SYSTEM cmd payload
* Inits hdr table within sys mem with the hdrs and their length.
* @hdr_table_addr: Word address in system memory where the hdrs tbl starts.
*/
struct ipahal_imm_cmd_hdr_init_system {
u64 hdr_table_addr;
};
/*
* struct ipahal_imm_cmd_nat_dma - NAT_DMA cmd payload
* Perform DMA operation on NAT related mem addressess. Copy data into
* different locations within NAT associated tbls. (For add/remove NAT rules)
* @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
* @base_addr: Base addr to which the DMA operation should be performed.
* @offset: offset in bytes from base addr to write 'data' to
* @data: data to be written
*/
struct ipahal_imm_cmd_nat_dma {
u8 table_index;
u8 base_addr;
u32 offset;
u16 data;
};
/*
* struct ipahal_imm_cmd_ip_packet_init - IP_PACKET_INIT cmd payload
* Configuration for specific IP pkt. Shall be called prior to an IP pkt
* data. Pkt will not go through IP pkt processing.
* @destination_pipe_index: Destination pipe index (in case routing
* is enabled, this field will overwrite the rt rule)
*/
struct ipahal_imm_cmd_ip_packet_init {
u32 destination_pipe_index;
};
/*
* enum ipa_pipeline_clear_option - Values for pipeline clear waiting options
* @IPAHAL_HPS_CLEAR: Wait for HPS clear. All queues except high priority queue
* shall not be serviced until HPS is clear of packets or immediate commands.
* The high priority Rx queue / Q6ZIP group shall still be serviced normally.
*
* @IPAHAL_SRC_GRP_CLEAR: Wait for originating source group to be clear
* (for no packet contexts allocated to the originating source group).
* The source group / Rx queue shall not be serviced until all previously
* allocated packet contexts are released. All other source groups/queues shall
* be serviced normally.
*
* @IPAHAL_FULL_PIPELINE_CLEAR: Wait for full pipeline to be clear.
* All groups / Rx queues shall not be serviced until IPA pipeline is fully
* clear. This should be used for debug only.
*/
enum ipahal_pipeline_clear_option {
IPAHAL_HPS_CLEAR,
IPAHAL_SRC_GRP_CLEAR,
IPAHAL_FULL_PIPELINE_CLEAR
};
/*
* struct ipahal_imm_cmd_register_write - REGISTER_WRITE cmd payload
* Write value to register. Allows reg changes to be synced with data packet
* and other immediate commands. Can be used to access the sram
* @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
* @value: value to write to register
* @value_mask: mask specifying which value bits to write to the register
* @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
* @pipeline_clear_option: options for pipeline clear waiting
*/
struct ipahal_imm_cmd_register_write {
u32 offset;
u32 value;
u32 value_mask;
bool skip_pipeline_clear;
enum ipahal_pipeline_clear_option pipeline_clear_options;
};
/*
* struct ipahal_imm_cmd_dma_shared_mem - DMA_SHARED_MEM cmd payload
* Perform mem copy into or out of the SW area of IPA local mem
* @size: Size in bytes of data to copy. Expected size is up to 2K bytes
* @local_addr: Address in IPA local memory
* @is_read: Read operation from local memory? If not, then write.
* @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
* @pipeline_clear_option: options for pipeline clear waiting
* @system_addr: Address in system memory
*/
struct ipahal_imm_cmd_dma_shared_mem {
u32 size;
u32 local_addr;
bool is_read;
bool skip_pipeline_clear;
enum ipahal_pipeline_clear_option pipeline_clear_options;
u64 system_addr;
};
/*
* struct ipahal_imm_cmd_ip_packet_tag_status - IP_PACKET_TAG_STATUS cmd payload
* This cmd is used for to allow SW to track HW processing by setting a TAG
* value that is passed back to SW inside Packet Status information.
* TAG info will be provided as part of Packet Status info generated for
* the next pkt transferred over the pipe.
* This immediate command must be followed by a packet in the same transfer.
* @tag: Tag that is provided back to SW
*/
struct ipahal_imm_cmd_ip_packet_tag_status {
u64 tag;
};
/*
* struct ipahal_imm_cmd_dma_task_32b_addr - IPA_DMA_TASK_32B_ADDR cmd payload
* Used by clients using 32bit addresses. Used to perform DMA operation on
* multiple descriptors.
* The Opcode is dynamic, where it holds the number of buffer to process
* @cmplt: Complete flag: If true, IPA interrupt SW when the entire
* DMA related data was completely xfered to its destination.
* @eof: Enf Of Frame flag: If true, IPA assert the EOT to the
* dest client. This is used used for aggr sequence
* @flsh: Flush flag: If true pkt will go through the IPA blocks but
* will not be xfered to dest client but rather will be discarded
* @lock: Lock pipe flag: If true, IPA will stop processing descriptors
* from other EPs in the same src grp (RX queue)
* @unlock: Unlock pipe flag: If true, IPA will stop exclusively
* servicing current EP out of the src EPs of the grp (RX queue)
* @size1: Size of buffer1 data
* @addr1: Pointer to buffer1 data
* @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs,
* only the first one needs to have this field set. It will be ignored
* in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK
* must contain this field (2 or more buffers) or EOT.
*/
struct ipahal_imm_cmd_dma_task_32b_addr {
bool cmplt;
bool eof;
bool flsh;
bool lock;
bool unlock;
u32 size1;
u32 addr1;
u32 packet_size;
};
/*
* struct ipahal_imm_cmd_pyld - Immediate cmd payload information
* @len: length of the buffer
* @data: buffer contains the immediate command payload. Buffer goes
* back to back with this structure
*/
struct ipahal_imm_cmd_pyld {
u16 len;
u8 data[0];
};
/* Immediate command Function APIs */
/*
* ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
* @cmd_name: [in] Immediate command name
*/
const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name);
/*
* ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
*/
u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
/*
* ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
* that supports dynamic opcode
* Some commands opcode are not totaly fixed, but part of it is
* a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
* is a given parameter.
* This API will return the composed opcode of the command given
* the parameter
* Note: Use this API only for immediate comamnds that support Dynamic Opcode
*/
u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param);
/*
* ipahal_construct_imm_cmd() - Construct immdiate command
* This function builds imm cmd bulk that can be be sent to IPA
* The command will be allocated dynamically.
* After done using it, call ipahal_destroy_imm_cmd() to release it
*/
struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx);
/*
* ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
* Core driver may want functionality to inject NOP commands to IPA
* to ensure e.g., PIPLINE clear before someother operation.
* The functionality given by this function can be reached by
* ipahal_construct_imm_cmd(). This function is helper to the core driver
* to reach this NOP functionlity easily.
* @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
* @pipline_clr_opt: options for pipeline clear waiting
* @is_atomic_ctx: is called in atomic context or can sleep?
*/
struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
bool skip_pipline_clear,
enum ipahal_pipeline_clear_option pipline_clr_opt,
bool is_atomic_ctx);
/*
* ipahal_destroy_imm_cmd() - Destroy/Release bulk that was built
* by the construction functions
*/
static inline void ipahal_destroy_imm_cmd(struct ipahal_imm_cmd_pyld *pyld)
{
kfree(pyld);
}
int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base);
void ipahal_destroy(void);
#endif /* _IPAHAL_H_ */

View file

@ -0,0 +1,351 @@
/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _IPAHAL_I_H_
#define _IPAHAL_I_H_
#define IPAHAL_DRV_NAME "ipahal"
#define IPAHAL_DBG(fmt, args...) \
pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
#define IPAHAL_ERR(fmt, args...) \
pr_err(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
/*
* struct ipahal_context - HAL global context data
* @hw_type: IPA H/W type/version.
* @base: Base address to be used for accessing IPA memory. This is
* I/O memory mapped address.
*/
struct ipahal_context {
enum ipa_hw_type hw_type;
void __iomem *base;
};
extern struct ipahal_context *ipahal_ctx;
/* Immediate commands H/W structures */
/*
* struct ipa_imm_cmd_hw_ip_v4_filter_init - IP_V4_FILTER_INIT command payload
* in H/W format.
* Inits IPv4 filter block.
* @hash_rules_addr: Addr in system mem where ipv4 hashable flt rules starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
* be copied to
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
* be copied to
* @rsvd: reserved
* @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
*/
struct ipa_imm_cmd_hw_ip_v4_filter_init {
u64 hash_rules_addr:64;
u64 hash_rules_size:12;
u64 hash_local_addr:16;
u64 nhash_rules_size:12;
u64 nhash_local_addr:16;
u64 rsvd:8;
u64 nhash_rules_addr:64;
};
/*
* struct ipa_imm_cmd_hw_ip_v6_filter_init - IP_V6_FILTER_INIT command payload
* in H/W format.
* Inits IPv6 filter block.
* @hash_rules_addr: Addr in system mem where ipv6 hashable flt rules starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
* be copied to
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
* be copied to
* @rsvd: reserved
* @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
*/
struct ipa_imm_cmd_hw_ip_v6_filter_init {
u64 hash_rules_addr:64;
u64 hash_rules_size:12;
u64 hash_local_addr:16;
u64 nhash_rules_size:12;
u64 nhash_local_addr:16;
u64 rsvd:8;
u64 nhash_rules_addr:64;
};
/*
* struct ipa_imm_cmd_hw_ip_v4_nat_init - IP_V4_NAT_INIT command payload
* in H/W format.
* Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
* cache address abd itger related parameters.
* @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start
* @ipv4_expansion_rules_addr: Addr in sys/shared mem where expantion NAT
* table starts. IPv4 NAT rules that result in NAT collision are located
* in this table.
* @index_table_addr: Addr in sys/shared mem where index table, which points
* to NAT table starts
* @index_table_expansion_addr: Addr in sys/shared mem where expansion index
* table starts
* @table_index: For future support of multiple NAT tables
* @rsvd1: reserved
* @ipv4_rules_addr_type: ipv4_rules_addr in sys or shared mem
* @ipv4_expansion_rules_addr_type: ipv4_expansion_rules_addr in
* sys or shared mem
* @index_table_addr_type: index_table_addr in sys or shared mem
* @index_table_expansion_addr_type: index_table_expansion_addr in
* sys or shared mem
* @size_base_tables: Num of entries in NAT tbl and idx tbl (each)
* @size_expansion_tables: Num of entries in NAT expantion tbl and expantion
* idx tbl (each)
* @rsvd2: reserved
* @public_ip_addr: public IP address
*/
struct ipa_imm_cmd_hw_ip_v4_nat_init {
u64 ipv4_rules_addr:64;
u64 ipv4_expansion_rules_addr:64;
u64 index_table_addr:64;
u64 index_table_expansion_addr:64;
u64 table_index:3;
u64 rsvd1:1;
u64 ipv4_rules_addr_type:1;
u64 ipv4_expansion_rules_addr_type:1;
u64 index_table_addr_type:1;
u64 index_table_expansion_addr_type:1;
u64 size_base_tables:12;
u64 size_expansion_tables:10;
u64 rsvd2:2;
u64 public_ip_addr:32;
};
/*
* struct ipa_imm_cmd_hw_ip_v4_routing_init - IP_V4_ROUTING_INIT command payload
* in H/W format.
* Inits IPv4 routing table/structure - with the rules and other related params
* @hash_rules_addr: Addr in system mem where ipv4 hashable rt rules starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
* be copied to
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
* be copied to
* @rsvd: reserved
* @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
*/
struct ipa_imm_cmd_hw_ip_v4_routing_init {
u64 hash_rules_addr:64;
u64 hash_rules_size:12;
u64 hash_local_addr:16;
u64 nhash_rules_size:12;
u64 nhash_local_addr:16;
u64 rsvd:8;
u64 nhash_rules_addr:64;
};
/*
* struct ipa_imm_cmd_hw_ip_v6_routing_init - IP_V6_ROUTING_INIT command payload
* in H/W format.
* Inits IPv6 routing table/structure - with the rules and other related params
* @hash_rules_addr: Addr in system mem where ipv6 hashable rt rules starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
* be copied to
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
* be copied to
* @rsvd: reserved
* @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
*/
struct ipa_imm_cmd_hw_ip_v6_routing_init {
u64 hash_rules_addr:64;
u64 hash_rules_size:12;
u64 hash_local_addr:16;
u64 nhash_rules_size:12;
u64 nhash_local_addr:16;
u64 rsvd:8;
u64 nhash_rules_addr:64;
};
/*
* struct ipa_imm_cmd_hw_hdr_init_local - HDR_INIT_LOCAL command payload
* in H/W format.
* Inits hdr table within local mem with the hdrs and their length.
* @hdr_table_addr: Word address in sys mem where the table starts (SRC)
* @size_hdr_table: Size of the above (in bytes)
* @hdr_addr: header address in IPA sram (used as DST for memory copy)
* @rsvd: reserved
*/
struct ipa_imm_cmd_hw_hdr_init_local {
u64 hdr_table_addr:64;
u64 size_hdr_table:12;
u64 hdr_addr:16;
u64 rsvd:4;
};
/*
* struct ipa_imm_cmd_hw_nat_dma - NAT_DMA command payload
* in H/W format
* Perform DMA operation on NAT related mem addressess. Copy data into
* different locations within NAT associated tbls. (For add/remove NAT rules)
* @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
* @rsvd1: reserved
* @base_addr: Base addr to which the DMA operation should be performed.
* @rsvd2: reserved
* @offset: offset in bytes from base addr to write 'data' to
* @data: data to be written
* @rsvd3: reserved
*/
struct ipa_imm_cmd_hw_nat_dma {
u64 table_index:3;
u64 rsvd1:1;
u64 base_addr:2;
u64 rsvd2:2;
u64 offset:32;
u64 data:16;
u64 rsvd3:8;
};
/*
* struct ipa_imm_cmd_hw_hdr_init_system - HDR_INIT_SYSTEM command payload
* in H/W format.
* Inits hdr table within sys mem with the hdrs and their length.
* @hdr_table_addr: Word address in system memory where the hdrs tbl starts.
*/
struct ipa_imm_cmd_hw_hdr_init_system {
u64 hdr_table_addr:64;
};
/*
* struct ipa_imm_cmd_hw_ip_packet_init - IP_PACKET_INIT command payload
* in H/W format.
* Configuration for specific IP pkt. Shall be called prior to an IP pkt
* data. Pkt will not go through IP pkt processing.
* @destination_pipe_index: Destination pipe index (in case routing
* is enabled, this field will overwrite the rt rule)
* @rsvd: reserved
*/
struct ipa_imm_cmd_hw_ip_packet_init {
u64 destination_pipe_index:5;
u64 rsv1:59;
};
/*
* struct ipa_imm_cmd_hw_register_write - REGISTER_WRITE command payload
* in H/W format.
* Write value to register. Allows reg changes to be synced with data packet
* and other immediate command. Can be used to access the sram
* @sw_rsvd: Ignored by H/W. My be used by S/W
* @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
* @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
* @value: value to write to register
* @value_mask: mask specifying which value bits to write to the register
* @pipeline_clear_options: options for pipeline to clear
* 0: HPS - no pkt inside HPS (not grp specific)
* 1: source group - The immediate cmd src grp does not use any pkt ctxs
* 2: Wait until no pkt reside inside IPA pipeline
* 3: reserved
* @rsvd: reserved - should be set to zero
*/
struct ipa_imm_cmd_hw_register_write {
u64 sw_rsvd:15;
u64 skip_pipeline_clear:1;
u64 offset:16;
u64 value:32;
u64 value_mask:32;
u64 pipeline_clear_options:2;
u64 rsvd:30;
};
/*
* struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload
* in H/W format.
* Perform mem copy into or out of the SW area of IPA local mem
* @sw_rsvd: Ignored by H/W. My be used by S/W
* @size: Size in bytes of data to copy. Expected size is up to 2K bytes
* @local_addr: Address in IPA local memory
* @direction: Read or write?
* 0: IPA write, Write to local address from system address
* 1: IPA read, Read from local address to system address
* @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
* @pipeline_clear_options: options for pipeline to clear
* 0: HPS - no pkt inside HPS (not grp specific)
* 1: source group - The immediate cmd src grp does npt use any pkt ctxs
* 2: Wait until no pkt reside inside IPA pipeline
* 3: reserved
* @rsvd: reserved - should be set to zero
* @system_addr: Address in system memory
*/
struct ipa_imm_cmd_hw_dma_shared_mem {
u64 sw_rsvd:16;
u64 size:16;
u64 local_addr:16;
u64 direction:1;
u64 skip_pipeline_clear:1;
u64 pipeline_clear_options:2;
u64 rsvd:12;
u64 system_addr:64;
};
/*
* struct ipa_imm_cmd_hw_ip_packet_tag_status -
* IP_PACKET_TAG_STATUS command payload in H/W format.
* This cmd is used for to allow SW to track HW processing by setting a TAG
* value that is passed back to SW inside Packet Status information.
* TAG info will be provided as part of Packet Status info generated for
* the next pkt transferred over the pipe.
* This immediate command must be followed by a packet in the same transfer.
* @sw_rsvd: Ignored by H/W. My be used by S/W
* @tag: Tag that is provided back to SW
*/
struct ipa_imm_cmd_hw_ip_packet_tag_status {
u64 sw_rsvd:16;
u64 tag:48;
};
/*
* struct ipa_imm_cmd_hw_dma_task_32b_addr -
* IPA_DMA_TASK_32B_ADDR command payload in H/W format.
* Used by clients using 32bit addresses. Used to perform DMA operation on
* multiple descriptors.
* The Opcode is dynamic, where it holds the number of buffer to process
* @sw_rsvd: Ignored by H/W. My be used by S/W
* @cmplt: Complete flag: When asserted IPA will interrupt SW when the entire
* DMA related data was completely xfered to its destination.
* @eof: Enf Of Frame flag: When asserted IPA will assert the EOT to the
* dest client. This is used used for aggr sequence
* @flsh: Flush flag: When asserted, pkt will go through the IPA blocks but
* will not be xfered to dest client but rather will be discarded
* @lock: Lock pipe flag: When asserted, IPA will stop processing descriptors
* from other EPs in the same src grp (RX queue)
* @unlock: Unlock pipe flag: When asserted, IPA will stop exclusively
* servicing current EP out of the src EPs of the grp (RX queue)
* @size1: Size of buffer1 data
* @addr1: Pointer to buffer1 data
* @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs,
* only the first one needs to have this field set. It will be ignored
* in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK
* must contain this field (2 or more buffers) or EOT.
*/
struct ipa_imm_cmd_hw_dma_task_32b_addr {
u64 sw_rsvd:11;
u64 cmplt:1;
u64 eof:1;
u64 flsh:1;
u64 lock:1;
u64 unlock:1;
u64 size1:16;
u64 addr1:32;
u64 packet_size:16;
};
#endif /* _IPAHAL_I_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,434 @@
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _IPAHAL_REG_H_
#define _IPAHAL_REG_H_
#include <linux/ipa.h>
/*
* Registers names
*
* NOTE:: Any change to this enum, need to change to ipareg_name_to_str
* array as well.
*/
enum ipahal_reg_name {
IPA_ROUTE,
IPA_IRQ_STTS_EE_n,
IPA_IRQ_EN_EE_n,
IPA_IRQ_CLR_EE_n,
IPA_IRQ_SUSPEND_INFO_EE_n,
IPA_SUSPEND_IRQ_EN_EE_n,
IPA_SUSPEND_IRQ_CLR_EE_n,
IPA_BCR,
IPA_ENABLED_PIPES,
IPA_COMP_SW_RESET,
IPA_VERSION,
IPA_TAG_TIMER,
IPA_COMP_HW_VERSION,
IPA_SPARE_REG_1,
IPA_SPARE_REG_2,
IPA_COMP_CFG,
IPA_STATE_AGGR_ACTIVE,
IPA_ENDP_INIT_HDR_n,
IPA_ENDP_INIT_HDR_EXT_n,
IPA_ENDP_INIT_AGGR_n,
IPA_AGGR_FORCE_CLOSE,
IPA_ENDP_INIT_ROUTE_n,
IPA_ENDP_INIT_MODE_n,
IPA_ENDP_INIT_NAT_n,
IPA_ENDP_INIT_CTRL_n,
IPA_ENDP_INIT_HOL_BLOCK_EN_n,
IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
IPA_ENDP_INIT_DEAGGR_n,
IPA_ENDP_INIT_SEQ_n,
IPA_DEBUG_CNT_REG_n,
IPA_ENDP_INIT_CFG_n,
IPA_IRQ_EE_UC_n,
IPA_ENDP_INIT_HDR_METADATA_MASK_n,
IPA_ENDP_INIT_HDR_METADATA_n,
IPA_ENABLE_GSI,
IPA_ENDP_INIT_RSRC_GRP_n,
IPA_SHARED_MEM_SIZE,
IPA_SRAM_DIRECT_ACCESS_n,
IPA_DEBUG_CNT_CTRL_n,
IPA_UC_MAILBOX_m_n,
IPA_FILT_ROUT_HASH_FLUSH,
IPA_SINGLE_NDP_MODE,
IPA_QCNCM,
IPA_SYS_PKT_PROC_CNTXT_BASE,
IPA_LOCAL_PKT_PROC_CNTXT_BASE,
IPA_ENDP_STATUS_n,
IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n,
IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
IPA_DST_RSRC_GRP_67_RSRC_TYPE_n,
IPA_RX_HPS_CLIENTS_MIN_DEPTH_0,
IPA_RX_HPS_CLIENTS_MIN_DEPTH_1,
IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
IPA_REG_MAX,
};
/*
* struct ipahal_reg_route - IPA route register
* @route_dis: route disable
* @route_def_pipe: route default pipe
* @route_def_hdr_table: route default header table
* @route_def_hdr_ofst: route default header offset table
* @route_frag_def_pipe: Default pipe to route fragmented exception
* packets and frag new rule statues, if source pipe does not have
* a notification status pipe defined.
* @route_def_retain_hdr: default value of retain header. It is used
* when no rule was hit
*/
struct ipahal_reg_route {
u32 route_dis;
u32 route_def_pipe;
u32 route_def_hdr_table;
u32 route_def_hdr_ofst;
u8 route_frag_def_pipe;
u32 route_def_retain_hdr;
};
/*
* struct ipahal_reg_endp_init_route - IPA ENDP_INIT_ROUTE_n register
* @route_table_index: Default index of routing table (IPA Consumer).
*/
struct ipahal_reg_endp_init_route {
u32 route_table_index;
};
/*
* struct ipahal_reg_endp_init_rsrc_grp - PA_ENDP_INIT_RSRC_GRP_n register
* @rsrc_grp: Index of group for this ENDP. If this ENDP is a source-ENDP,
* index is for source-resource-group. If destination ENPD, index is
* for destination-resoruce-group.
*/
struct ipahal_reg_endp_init_rsrc_grp {
u32 rsrc_grp;
};
/*
* struct ipahal_reg_endp_init_mode - IPA ENDP_INIT_MODE_n register
* @dst_pipe_number: This parameter specifies destination output-pipe-packets
* will be routed to. Valid for DMA mode only and for Input
* Pipes only (IPA Consumer)
*/
struct ipahal_reg_endp_init_mode {
u32 dst_pipe_number;
struct ipa_ep_cfg_mode ep_mode;
};
/*
* struct ipahal_reg_shared_mem_size - IPA SHARED_MEM_SIZE register
* @shared_mem_sz: Available size [in 8Bytes] of SW partition within
* IPA shared memory.
* @shared_mem_baddr: Offset of SW partition within IPA
* shared memory[in 8Bytes]. To get absolute address of SW partition,
* add this offset to IPA_SRAM_DIRECT_ACCESS_n baddr.
*/
struct ipahal_reg_shared_mem_size {
u32 shared_mem_sz;
u32 shared_mem_baddr;
};
/*
* struct ipahal_reg_ep_cfg_status - status configuration in IPA end-point
* @status_en: Determines if end point supports Status Indications. SW should
* set this bit in order to enable Statuses. Output Pipe - send
* Status indications only if bit is set. Input Pipe - forward Status
* indication to STATUS_ENDP only if bit is set. Valid for Input
* and Output Pipes (IPA Consumer and Producer)
* @status_ep: Statuses generated for this endpoint will be forwarded to the
* specified Status End Point. Status endpoint needs to be
* configured with STATUS_EN=1 Valid only for Input Pipes (IPA
* Consumer)
* @status_location: Location of PKT-STATUS on destination pipe.
* If set to 0 (default), PKT-STATUS will be appended before the packet
* for this endpoint. If set to 1, PKT-STATUS will be appended after the
* packet for this endpoint. Valid only for Output Pipes (IPA Producer)
*/
struct ipahal_reg_ep_cfg_status {
bool status_en;
u8 status_ep;
bool status_location;
};
/*
* struct ipa_hash_tuple - Hash tuple members for flt and rt
* the fields tells if to be masked or not
* @src_id: pipe number for flt, table index for rt
* @src_ip_addr: IP source address
* @dst_ip_addr: IP destination address
* @src_port: L4 source port
* @dst_port: L4 destination port
* @protocol: IP protocol field
* @meta_data: packet meta-data
*
*/
struct ipahal_reg_hash_tuple {
/* src_id: pipe in flt, tbl index in rt */
bool src_id;
bool src_ip_addr;
bool dst_ip_addr;
bool src_port;
bool dst_port;
bool protocol;
bool meta_data;
};
/*
* struct ipahal_reg_fltrt_hash_tuple - IPA hash tuple register
* @flt: Hash tuple info for filtering
* @rt: Hash tuple info for routing
* @undefinedX: Undefined/Unused bit fields set of the register
*/
struct ipahal_reg_fltrt_hash_tuple {
struct ipahal_reg_hash_tuple flt;
struct ipahal_reg_hash_tuple rt;
u32 undefined1;
u32 undefined2;
};
/*
* enum ipahal_reg_dbg_cnt_type - Debug Counter Type
* DBG_CNT_TYPE_IPV4_FLTR - Count IPv4 filtering rules
* DBG_CNT_TYPE_IPV4_ROUT - Count IPv4 routing rules
* DBG_CNT_TYPE_GENERAL - General counter
* DBG_CNT_TYPE_IPV6_FLTR - Count IPv6 filtering rules
* DBG_CNT_TYPE_IPV4_ROUT - Count IPv6 routing rules
*/
enum ipahal_reg_dbg_cnt_type {
DBG_CNT_TYPE_IPV4_FLTR,
DBG_CNT_TYPE_IPV4_ROUT,
DBG_CNT_TYPE_GENERAL,
DBG_CNT_TYPE_IPV6_FLTR,
DBG_CNT_TYPE_IPV6_ROUT,
};
/*
* struct ipahal_reg_debug_cnt_ctrl - IPA_DEBUG_CNT_CTRL_n register
* @en - Enable debug counter
* @type - Type of debugging couting
* @product - False->Count Bytes . True->Count #packets
* @src_pipe - Specific Pipe to match. If FF, no need to match
* specific pipe
* @rule_idx_pipe_rule - Global Rule or Pipe Rule. If pipe, then indicated by
* src_pipe
* @rule_idx - Rule index. Irrelevant for type General
*/
struct ipahal_reg_debug_cnt_ctrl {
bool en;
enum ipahal_reg_dbg_cnt_type type;
bool product;
u8 src_pipe;
bool rule_idx_pipe_rule;
u8 rule_idx;
};
/*
* struct ipahal_reg_rsrc_grp_cfg - Mix/Max values for two rsrc groups
* @x_min - first group min value
* @x_max - first group max value
* @y_min - second group min value
* @y_max - second group max value
*/
struct ipahal_reg_rsrc_grp_cfg {
u32 x_min;
u32 x_max;
u32 y_min;
u32 y_max;
};
/*
* struct ipahal_reg_rx_hps_clients - Min or Max values for RX HPS clients
* @client_minmax - Min or Max values. In case of depth 0 the 4 values
* are used. In case of depth 1, only the first 2 values are used
*/
struct ipahal_reg_rx_hps_clients {
u32 client_minmax[4];
};
/*
* struct ipahal_reg_valmask - holding values and masking for registers
* HAL application may require only value and mask of it for some
* register fields.
* @val - The value
* @mask - Tha mask of the value
*/
struct ipahal_reg_valmask {
u32 val;
u32 mask;
};
/*
* struct ipahal_reg_fltrt_hash_flush - Flt/Rt flush configuration
* @v6_rt - Flush IPv6 Routing cache
* @v6_flt - Flush IPv6 Filtering cache
* @v4_rt - Flush IPv4 Routing cache
* @v4_flt - Flush IPv4 Filtering cache
*/
struct ipahal_reg_fltrt_hash_flush {
bool v6_rt;
bool v6_flt;
bool v4_rt;
bool v4_flt;
};
/*
* struct ipahal_reg_single_ndp_mode - IPA SINGLE_NDP_MODE register
* @single_ndp_en: When set to '1', IPA builds MBIM frames with up to 1
* NDP-header.
* @unused: undefined bits of the register
*/
struct ipahal_reg_single_ndp_mode {
bool single_ndp_en;
u32 undefined;
};
/*
* struct ipahal_reg_qcncm - IPA QCNCM register
* @mode_en: When QCNCM_MODE_EN=1, IPA will use QCNCM signature.
* @mode_val: Used only when QCNCM_MODE_EN=1 and sets SW Signature in
* the NDP header.
* @unused: undefined bits of the register
*/
struct ipahal_reg_qcncm {
bool mode_en;
u32 mode_val;
u32 undefined;
};
/*
* ipahal_reg_name_str() - returns string that represent the register
* @reg_name: [in] register name
*/
const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name);
/*
* ipahal_read_reg_n() - Get the raw value of n parameterized reg
*/
u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n);
/*
* ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value
*/
void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val);
/*
* ipahal_write_reg_n() - Write to n parameterized reg a raw value
*/
static inline void ipahal_write_reg_n(enum ipahal_reg_name reg,
u32 n, u32 val)
{
ipahal_write_reg_mn(reg, 0, n, val);
}
/*
* ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg
*/
u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields);
/*
* ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value
*/
void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n,
const void *fields);
/*
* ipahal_read_reg() - Get the raw value of a reg
*/
static inline u32 ipahal_read_reg(enum ipahal_reg_name reg)
{
return ipahal_read_reg_n(reg, 0);
}
/*
* ipahal_write_reg() - Write to reg a raw value
*/
static inline void ipahal_write_reg(enum ipahal_reg_name reg,
u32 val)
{
ipahal_write_reg_mn(reg, 0, 0, val);
}
/*
* ipahal_read_reg_fields() - Get the parsed value of a reg
*/
static inline u32 ipahal_read_reg_fields(enum ipahal_reg_name reg, void *fields)
{
return ipahal_read_reg_n_fields(reg, 0, fields);
}
/*
* ipahal_write_reg_fields() - Write to reg a parsed value
*/
static inline void ipahal_write_reg_fields(enum ipahal_reg_name reg,
const void *fields)
{
ipahal_write_reg_n_fields(reg, 0, fields);
}
/*
* Get the offset of a m/n parameterized register
*/
u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n);
/*
* Get the offset of a n parameterized register
*/
static inline u32 ipahal_get_reg_n_ofst(enum ipahal_reg_name reg, u32 n)
{
return ipahal_get_reg_mn_ofst(reg, 0, n);
}
/*
* Get the offset of a register
*/
static inline u32 ipahal_get_reg_ofst(enum ipahal_reg_name reg)
{
return ipahal_get_reg_mn_ofst(reg, 0, 0);
}
/*
* Get the register base address
*/
u32 ipahal_get_reg_base(void);
/*
* Specific functions
* These functions supply specific register values for specific operations
* that cannot be reached by generic functions.
* E.g. To disable aggregation, need to write to specific bits of the AGGR
* register. The other bits should be untouched. This oeprate is very specific
* and cannot be generically defined. For such operations we define these
* specific functions.
*/
void ipahal_get_disable_aggr_valmask(struct ipahal_reg_valmask *valmask);
u32 ipahal_aggr_get_max_byte_limit(void);
u32 ipahal_aggr_get_max_pkt_limit(void);
void ipahal_get_aggr_force_close_valmask(int ep_idx,
struct ipahal_reg_valmask *valmask);
void ipahal_get_fltrt_hash_flush_valmask(
struct ipahal_reg_fltrt_hash_flush *flush,
struct ipahal_reg_valmask *valmask);
void ipahal_get_status_ep_valmask(int pipe_num,
struct ipahal_reg_valmask *valmask);
#endif /* _IPAHAL_REG_H_ */

View file

@ -0,0 +1,279 @@
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _IPAHAL_REG_I_H_
#define _IPAHAL_REG_I_H_
int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
#define IPA_SETFIELD_IN_REG(reg, val, shift, mask) \
(reg |= ((val) << (shift)) & (mask))
#define IPA_GETFIELD_FROM_REG(reg, shift, mask) \
(((reg) & (mask)) >> (shift))
/* IPA_ROUTE register */
#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1
#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0X40
#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000
#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11
#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK 0x1000000
#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT 0x18
/* IPA_ENDP_INIT_HDR_n register */
#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f
#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0
#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40
#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6
#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7
#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80
#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14
#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000
#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a
#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v2 0x8000000
#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v2 0x1b
#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK_v2 0x10000000
#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT_v2 0x1c
/* IPA_ENDP_INIT_HDR_EXT_n register */
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK 0x1
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT 0x0
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK 0x3f0
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa
#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0 0x3c00
/* IPA_ENDP_INIT_AGGR_N register */
#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK 0x1000000
#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT 0x18
#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK 0x400000
#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT 0x16
#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK 0x200000
#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT 0x15
#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK 0x1f8000
#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT 0xf
#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x7c00
#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xa
#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x3e0
#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5
#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c
#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2
#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3
#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0
/* IPA_AGGR_FORCE_CLOSE register */
#define IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK 0x3fffffff
#define IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT 0
/* IPA_ENDP_INIT_ROUTE_n register */
#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f
#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT 0x0
/* IPA_ENDP_INIT_MODE_n register */
#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_BMSK 0x40000000
#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_SHFT 0x1e
#define IPA_ENDP_INIT_MODE_n_PAD_EN_BMSK 0x20000000
#define IPA_ENDP_INIT_MODE_n_PAD_EN_SHFT 0x1d
#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_BMSK 0x10000000
#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_SHFT 0x1c
#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_BMSK 0xffff000
#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_SHFT 0xc
#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x1f0
#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x4
#define IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x7
#define IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0
/* IPA_ENDP_INIT_NAT_n register */
#define IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3
#define IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0
/* IPA_ENDP_INIT_CTRL_n register */
#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK 0x1
#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT 0x0
#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK 0x2
#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT 0x1
/* IPA_ENDP_INIT_HOL_BLOCK_EN_n register */
#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK 0x1
#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAX 19
#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK 0x1
#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT 0x0
/* IPA_ENDP_INIT_HOL_BLOCK_TIMER_n register */
#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK 0xffffffff
#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT 0x0
/* IPA_ENDP_INIT_DEAGGR_n register */
#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xFFFF0000
#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10
#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3F00
#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8
#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK 0x80
#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x7
#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3F
#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0
/* IPA_IPA_ENDP_INIT_SEQ_n register */
#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_BMSK 0xf000
#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_SHFT 0xc
#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_BMSK 0xf00
#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_SHFT 0x8
#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_BMSK 0xf0
#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_SHFT 0x4
#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_BMSK 0xf
#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_SHFT 0x0
/* IPA_DEBUG_CNT_REG_m register */
#define IPA_DEBUG_CNT_REG_N_RMSK 0xffffffff
#define IPA_DEBUG_CNT_REG_N_MAX 15
#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_BMSK 0xffffffff
#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_SHFT 0x0
/* IPA_ENDP_INIT_CFG_n register */
#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK 0x100
#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT 0x8
#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78
#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3
#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6
#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1
#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1
#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0
/* IPA_ENDP_INIT_HDR_METADATA_MASK_n register */
#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK 0xffffffff
#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0
/* IPA_IPA_ENDP_INIT_HDR_METADATA_n register */
#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK 0xffffffff
#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT 0x0
/* IPA_ENDP_INIT_RSRC_GRP_n register */
#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK 0x7
#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT 0
/* IPA_SHARED_MEM_SIZE register */
#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK 0xffff0000
#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT 0x10
#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK 0xffff
#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT 0x0
/* IPA_DEBUG_CNT_CTRL_n register */
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK 0x10000000
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT 0x1c
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK 0x0ff00000
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT 0x14
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT 0xc
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK 0x100
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT 0x8
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK 0x70
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT 0x4
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK 0x1
#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT 0x0
/* IPA_FILT_ROUT_HASH_FLUSH register */
#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT 12
#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT 8
#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT 4
#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT 0
/* IPA_SINGLE_NDP_MODE register */
#define IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK 0xfffffffe
#define IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT 0x1
#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK 0x1
#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT 0
/* IPA_QCNCM register */
#define IPA_QCNCM_MODE_UNDEFINED2_BMSK 0xf0000000
#define IPA_QCNCM_MODE_UNDEFINED2_SHFT 0x1c
#define IPA_QCNCM_MODE_VAL_BMSK 0xffffff0
#define IPA_QCNCM_MODE_VAL_SHFT 0x4
#define IPA_QCNCM_UNDEFINED1_BMSK 0xe
#define IPA_QCNCM_UNDEFINED1_SHFT 0x1
#define IPA_QCNCM_MODE_EN_BMSK 0x1
#define IPA_QCNCM_MODE_EN_SHFT 0
/* IPA_ENDP_STATUS_n register */
#define IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK 0x100
#define IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT 0x8
#define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e
#define IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1
#define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1
#define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0
/* IPA_ENDP_FILTER_ROUTER_HSH_CFG_n register */
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT 0
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK 0x1
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT 1
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK 0x2
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT 2
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK 0x4
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT 3
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK 0x8
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT 4
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK 0x10
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT 5
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK 0x20
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT 6
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK 0x40
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT 7
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK 0xff80
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT 16
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK 0x10000
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT 17
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK 0x20000
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT 18
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK 0x40000
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT 19
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK 0x80000
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT 20
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK 0x100000
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT 21
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK 0x200000
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT 22
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK 0x400000
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT 23
#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK 0xff800000
/* IPA_RSRC_GRP_XY_RSRC_TYPE_n register */
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK 0xFF000000
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT 24
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK 0xFF0000
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT 16
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK 0xFF00
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT 8
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK 0xFF
#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT 0
/* IPA_IPA_IPA_RX_HPS_CLIENTS_MIN/MAX_DEPTH_0/1 registers */
#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(n) (0x7F << (8 * (n)))
#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(n) (8 * (n))
#endif /* _IPAHAL_REG_I_H_ */

File diff suppressed because it is too large Load diff

View file

@ -98,8 +98,6 @@ static void teth_bridge_ipa_cb(void *priv, enum ipa_dp_evt_type evt,
*/
int ipa3_teth_bridge_init(struct teth_bridge_init_params *params)
{
int res = 0;
TETH_DBG_FUNC_ENTRY();
if (!params) {
@ -112,28 +110,8 @@ int ipa3_teth_bridge_init(struct teth_bridge_init_params *params)
params->private_data = NULL;
params->skip_ep_cfg = true;
/* Build dependency graph */
res = ipa3_rm_add_dependency(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (res < 0 && res != -EINPROGRESS) {
TETH_ERR("ipa3_rm_add_dependency() failed.\n");
goto bail;
}
res = ipa3_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_USB_CONS);
if (res < 0 && res != -EINPROGRESS) {
ipa3_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_Q6_CONS);
TETH_ERR("ipa3_rm_add_dependency() failed.\n");
goto bail;
}
res = 0;
goto bail;
bail:
TETH_DBG_FUNC_EXIT();
return res;
return 0;
}
/**
@ -162,7 +140,40 @@ int ipa3_teth_bridge_disconnect(enum ipa_client_type client)
*/
int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
{
return 0;
int res = 0;
TETH_DBG_FUNC_ENTRY();
/* Build the dependency graph, first add_dependency call is sync
* in order to make sure the IPA clocks are up before we continue
* and notify the USB driver it may continue.
*/
res = ipa3_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (res < 0) {
TETH_ERR("ipa3_rm_add_dependency() failed.\n");
goto bail;
}
/* this add_dependency call can't be sync since it will block until USB
* status is connected (which can happen only after the tethering
* bridge is connected), the clocks are already up so the call doesn't
* need to block.
*/
res = ipa3_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_USB_CONS);
if (res < 0 && res != -EINPROGRESS) {
ipa3_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_Q6_CONS);
TETH_ERR("ipa3_rm_add_dependency() failed.\n");
goto bail;
}
res = 0;
bail:
TETH_DBG_FUNC_EXIT();
return res;
}
static long ipa3_teth_bridge_ioctl(struct file *filp,

View file

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -80,8 +80,8 @@ enum ipa_aggr_type {
* enum ipa_aggr_mode - global aggregation mode
*/
enum ipa_aggr_mode {
IPA_MBIM,
IPA_QCNCM,
IPA_MBIM_AGGR,
IPA_QCNCM_AGGR,
};
/**
@ -239,6 +239,13 @@ struct ipa_ep_cfg_mode {
* aggregation closure. Valid for Output Pipes only (IPA
* Producer). EOF affects only Pipes configured for
* generic aggregation.
* @aggr_hard_byte_limit_en: If set to 1, byte-limit aggregation for this
* pipe will apply a hard-limit behavior which will not
* allow frames to be closed with more than byte-limit
* bytes. If set to 0, previous byte-limit behavior
* will apply - frames close once a packet causes the
* accumulated byte-count to cross the byte-limit
* threshold (closed frame will contain that packet).
*/
struct ipa_ep_cfg_aggr {
enum ipa_aggr_en_type aggr_en;
@ -246,6 +253,7 @@ struct ipa_ep_cfg_aggr {
u32 aggr_byte_limit;
u32 aggr_time_limit;
u32 aggr_pkt_limit;
u32 aggr_hard_byte_limit_en;
};
/**
@ -323,11 +331,16 @@ enum ipa_cs_offload {
* checksum meta info header (4 bytes) starts (UL). Values are 0-15, which
* mean 0 - 60 byte checksum header offset. Valid for input
* pipes only (IPA consumer)
* @gen_qmb_master_sel: Select bit for ENDP GEN-QMB master. This is used to
* separate DDR & PCIe transactions in-order to limit them as
* a group (using MAX_WRITES/READS limiation). Valid for input and
* output pipes (IPA consumer+producer)
*/
struct ipa_ep_cfg_cfg {
bool frag_offload_en;
enum ipa_cs_offload cs_offload_en;
u8 cs_metadata_hdr_offset;
u8 gen_qmb_master_sel;
};
/**
@ -1130,151 +1143,6 @@ struct ipa_gsi_ep_config {
int ee;
};
enum ipa_usb_teth_prot {
IPA_USB_RNDIS = 0,
IPA_USB_ECM = 1,
IPA_USB_RMNET = 2,
IPA_USB_MBIM = 3,
IPA_USB_DIAG = 4,
IPA_USB_MAX_TETH_PROT_SIZE
};
/**
* ipa_usb_teth_params - parameters for RDNIS/ECM initialization API
*
* @host_ethaddr: host Ethernet address in network order
* @device_ethaddr: device Ethernet address in network order
*/
struct ipa_usb_teth_params {
u8 host_ethaddr[ETH_ALEN];
u8 device_ethaddr[ETH_ALEN];
};
enum ipa_usb_notify_event {
IPA_USB_DEVICE_READY,
IPA_USB_REMOTE_WAKEUP,
IPA_USB_SUSPEND_COMPLETED
};
enum ipa_usb_max_usb_packet_size {
IPA_USB_HIGH_SPEED_512B = 512,
IPA_USB_SUPER_SPEED_1024B = 1024
};
/**
* ipa_usb_xdci_chan_scratch - xDCI protocol SW config area of
* channel scratch
*
* @last_trb_addr: Address (LSB - based on alignment restrictions) of
* last TRB in queue. Used to identify roll over case
* @const_buffer_size: TRB buffer size in KB (similar to IPA aggregation
* configuration). Must be aligned to max USB Packet Size.
* Should be 1 <= const_buffer_size <= 31.
* @depcmd_low_addr: Used to generate "Update Transfer" command
* @depcmd_hi_addr: Used to generate "Update Transfer" command.
*/
struct ipa_usb_xdci_chan_scratch {
u16 last_trb_addr;
u8 const_buffer_size;
u32 depcmd_low_addr;
u8 depcmd_hi_addr;
};
/**
* ipa_usb_xdci_chan_params - xDCI channel related properties
*
* @client: type of "client"
* @ipa_ep_cfg: IPA EP configuration
* @keep_ipa_awake: when true, IPA will not be clock gated
* @teth_prot: tethering protocol for which the channel is created
* @gevntcount_low_addr: GEVNCOUNT low address for event scratch
* @gevntcount_hi_addr: GEVNCOUNT high address for event scratch
* @dir: channel direction
* @xfer_ring_len: length of transfer ring in bytes (must be integral
* multiple of transfer element size - 16B for xDCI)
* @xfer_ring_base_addr: physical base address of transfer ring. Address must be
* aligned to xfer_ring_len rounded to power of two
* @xfer_scratch: parameters for xDCI channel scratch
*
*/
struct ipa_usb_xdci_chan_params {
/* IPA EP params */
enum ipa_client_type client;
struct ipa_ep_cfg ipa_ep_cfg;
bool keep_ipa_awake;
enum ipa_usb_teth_prot teth_prot;
/* event ring params */
u32 gevntcount_low_addr;
u8 gevntcount_hi_addr;
/* transfer ring params */
enum gsi_chan_dir dir;
u16 xfer_ring_len;
u64 xfer_ring_base_addr;
struct ipa_usb_xdci_chan_scratch xfer_scratch;
};
/**
* ipa_usb_chan_out_params - out parameters for channel request
*
* @clnt_hdl: opaque client handle assigned by IPA to client
* @db_reg_phs_addr_lsb: Physical address of doorbell register where the 32
* LSBs of the doorbell value should be written
* @db_reg_phs_addr_msb: Physical address of doorbell register where the 32
* MSBs of the doorbell value should be written
*
*/
struct ipa_req_chan_out_params {
u32 clnt_hdl;
u32 db_reg_phs_addr_lsb;
u32 db_reg_phs_addr_msb;
};
/**
* ipa_usb_teth_prot_params - parameters for connecting RNDIS
*
* @max_xfer_size_bytes_to_dev: max size of UL packets in bytes
* @max_packet_number_to_dev: max number of UL aggregated packets
* @max_xfer_size_bytes_to_host: max size of DL packets in bytes
*
*/
struct ipa_usb_teth_prot_params {
u32 max_xfer_size_bytes_to_dev;
u32 max_packet_number_to_dev;
u32 max_xfer_size_bytes_to_host;
};
/**
* ipa_usb_xdci_connect_params - parameters required to start IN, OUT
* channels, and connect RNDIS/ECM/teth_bridge
*
* @max_pkt_size: high speed or full speed
* @ipa_to_usb_xferrscidx: Transfer Resource Index (XferRscIdx) for IN channel.
* The hardware-assigned transfer resource index for the
* transfer, which was returned in response to the
* Start Transfer command. This field is used for
* "Update Transfer" command.
* Should be 0 =< ipa_to_usb_xferrscidx <= 127.
* @ipa_to_usb_xferrscidx_valid: true if xferRscIdx should be updated for IN
* channel
* @usb_to_ipa_xferrscidx: Transfer Resource Index (XferRscIdx) for OUT channel
* Should be 0 =< usb_to_ipa_xferrscidx <= 127.
* @usb_to_ipa_xferrscidx_valid: true if xferRscIdx should be updated for OUT
* channel
* @teth_prot: tethering protocol
* @teth_prot_params: parameters for connecting the tethering protocol.
* @max_supported_bandwidth_mbps: maximum bandwidth need of the client in Mbps
*/
struct ipa_usb_xdci_connect_params {
enum ipa_usb_max_usb_packet_size max_pkt_size;
u8 ipa_to_usb_xferrscidx;
bool ipa_to_usb_xferrscidx_valid;
u8 usb_to_ipa_xferrscidx;
bool usb_to_ipa_xferrscidx_valid;
enum ipa_usb_teth_prot teth_prot;
struct ipa_usb_teth_prot_params teth_prot_params;
u32 max_supported_bandwidth_mbps;
};
#if defined CONFIG_IPA || defined CONFIG_IPA3
/*
@ -1465,6 +1333,10 @@ int ipa_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
* if uC not ready only, register callback
*/
int ipa_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
/*
* To de-register uC ready callback
*/
int ipa_uc_dereg_rdyCB(void);
int ipa_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
int ipa_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
@ -1579,117 +1451,6 @@ int ipa_mhi_resume(void);
void ipa_mhi_destroy(void);
/*
* IPA_USB
*/
/**
* ipa_usb_init_teth_prot - Peripheral should call this function to initialize
* RNDIS/ECM/teth_bridge, prior to calling ipa_usb_xdci_connect()
*
* @usb_teth_type: tethering protocol type
* @teth_params: pointer to tethering protocol parameters.
* Should be struct ipa_usb_teth_params for RNDIS/ECM,
* or NULL for teth_bridge
* @ipa_usb_notify_cb: will be called to notify USB driver on certain events
* @user_data: cookie used for ipa_usb_notify_cb
*
* @Return 0 on success, negative on failure
*/
int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
struct ipa_usb_teth_params *teth_params,
int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
void *),
void *user_data);
/**
* ipa_usb_xdci_connect - Peripheral should call this function to start IN &
* OUT xDCI channels, and connect RNDIS/ECM/MBIM/RMNET.
* For DIAG, only starts IN channel.
*
* @ul_chan_params: parameters for allocating UL xDCI channel. containing
* required info on event and transfer rings, and IPA EP
* configuration
* @ul_out_params: [out] opaque client handle assigned by IPA to client & DB
* registers physical address for UL channel
* @dl_chan_params: parameters for allocating DL xDCI channel. containing
* required info on event and transfer rings, and IPA EP
* configuration
* @dl_out_params: [out] opaque client handle assigned by IPA to client & DB
* registers physical address for DL channel
* @connect_params: handles and scratch params of the required channels,
* tethering protocol and the tethering protocol parameters.
*
* Note: Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
struct ipa_usb_xdci_chan_params *dl_chan_params,
struct ipa_req_chan_out_params *ul_out_params,
struct ipa_req_chan_out_params *dl_out_params,
struct ipa_usb_xdci_connect_params *connect_params);
/**
* ipa_usb_xdci_disconnect - Peripheral should call this function to stop
* IN & OUT xDCI channels
* For DIAG, only stops IN channel.
*
* @ul_clnt_hdl: client handle received from ipa_usb_xdci_connect()
* for OUT channel
* @dl_clnt_hdl: client handle received from ipa_usb_xdci_connect()
* for IN channel
* @teth_prot: tethering protocol
*
* Note: Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot);
/**
* ipa_usb_deinit_teth_prot - Peripheral should call this function to deinit
* RNDIS/ECM/MBIM/RMNET
*
* @teth_prot: tethering protocol
*
* @Return 0 on success, negative on failure
*/
int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot);
/**
* ipa_usb_xdci_suspend - Peripheral should call this function to suspend
* IN & OUT xDCI channels
*
* @ul_clnt_hdl: client handle previously obtained from
* ipa_usb_xdci_connect() for OUT channel
* @dl_clnt_hdl: client handle previously obtained from
* ipa_usb_xdci_connect() for IN channel
* @teth_prot: tethering protocol
*
* Note: Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot);
/**
* ipa_usb_xdci_resume - Peripheral should call this function to resume
* IN & OUT xDCI channels
*
* @ul_clnt_hdl: client handle received from ipa_usb_xdci_connect()
* for OUT channel
* @dl_clnt_hdl: client handle received from ipa_usb_xdci_connect()
* for IN channel
*
* Note: Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl);
/*
* mux id
*/
@ -2166,6 +1927,11 @@ static inline int ipa_uc_reg_rdyCB(
return -EPERM;
}
static inline int ipa_uc_dereg_rdyCB(void)
{
return -EPERM;
}
/*
* Resource manager
@ -2413,51 +2179,6 @@ static inline void ipa_mhi_destroy(void)
return;
}
/*
* IPA_USB
*/
static inline int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
struct ipa_usb_teth_params *teth_params,
int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
void *),
void *user_data)
{
return -EPERM;
}
static inline int ipa_usb_xdci_connect(
struct ipa_usb_xdci_chan_params *ul_chan_params,
struct ipa_usb_xdci_chan_params *dl_chan_params,
struct ipa_req_chan_out_params *ul_out_params,
struct ipa_req_chan_out_params *dl_out_params,
struct ipa_usb_xdci_connect_params *connect_params)
{
return -EPERM;
}
static inline int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot)
{
return -EPERM;
}
static inline int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
{
return -EPERM;
}
static inline int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot)
{
return -EPERM;
}
static inline int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl)
{
return -EPERM;
}
/*
* mux id
*/

321
include/linux/ipa_usb.h Normal file
View file

@ -0,0 +1,321 @@
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _IPA_USB_H_
#define _IPA_USB_H_
enum ipa_usb_teth_prot {
IPA_USB_RNDIS = 0,
IPA_USB_ECM = 1,
IPA_USB_RMNET = 2,
IPA_USB_MBIM = 3,
IPA_USB_DIAG = 4,
IPA_USB_MAX_TETH_PROT_SIZE
};
/**
* ipa_usb_teth_params - parameters for RDNIS/ECM initialization API
*
* @host_ethaddr: host Ethernet address in network order
* @device_ethaddr: device Ethernet address in network order
*/
struct ipa_usb_teth_params {
u8 host_ethaddr[ETH_ALEN];
u8 device_ethaddr[ETH_ALEN];
};
enum ipa_usb_notify_event {
IPA_USB_DEVICE_READY,
IPA_USB_REMOTE_WAKEUP,
IPA_USB_SUSPEND_COMPLETED
};
enum ipa_usb_max_usb_packet_size {
IPA_USB_HIGH_SPEED_512B = 512,
IPA_USB_SUPER_SPEED_1024B = 1024
};
/**
* ipa_usb_teth_prot_params - parameters for connecting RNDIS
*
* @max_xfer_size_bytes_to_dev: max size of UL packets in bytes
* @max_packet_number_to_dev: max number of UL aggregated packets
* @max_xfer_size_bytes_to_host: max size of DL packets in bytes
*
*/
struct ipa_usb_teth_prot_params {
u32 max_xfer_size_bytes_to_dev;
u32 max_packet_number_to_dev;
u32 max_xfer_size_bytes_to_host;
};
/**
* ipa_usb_xdci_connect_params - parameters required to start IN, OUT
* channels, and connect RNDIS/ECM/teth_bridge
*
* @max_pkt_size: high speed or full speed
* @ipa_to_usb_xferrscidx: Transfer Resource Index (XferRscIdx) for IN channel.
* The hardware-assigned transfer resource index for the
* transfer, which was returned in response to the
* Start Transfer command. This field is used for
* "Update Transfer" command.
* Should be 0 =< ipa_to_usb_xferrscidx <= 127.
* @ipa_to_usb_xferrscidx_valid: true if xferRscIdx should be updated for IN
* channel
* @usb_to_ipa_xferrscidx: Transfer Resource Index (XferRscIdx) for OUT channel
* Should be 0 =< usb_to_ipa_xferrscidx <= 127.
* @usb_to_ipa_xferrscidx_valid: true if xferRscIdx should be updated for OUT
* channel
* @teth_prot: tethering protocol
* @teth_prot_params: parameters for connecting the tethering protocol.
* @max_supported_bandwidth_mbps: maximum bandwidth need of the client in Mbps
*/
struct ipa_usb_xdci_connect_params {
enum ipa_usb_max_usb_packet_size max_pkt_size;
u8 ipa_to_usb_xferrscidx;
bool ipa_to_usb_xferrscidx_valid;
u8 usb_to_ipa_xferrscidx;
bool usb_to_ipa_xferrscidx_valid;
enum ipa_usb_teth_prot teth_prot;
struct ipa_usb_teth_prot_params teth_prot_params;
u32 max_supported_bandwidth_mbps;
};
/**
* ipa_usb_xdci_chan_scratch - xDCI protocol SW config area of
* channel scratch
*
* @last_trb_addr: Address (LSB - based on alignment restrictions) of
* last TRB in queue. Used to identify roll over case
* @const_buffer_size: TRB buffer size in KB (similar to IPA aggregation
* configuration). Must be aligned to max USB Packet Size.
* Should be 1 <= const_buffer_size <= 31.
* @depcmd_low_addr: Used to generate "Update Transfer" command
* @depcmd_hi_addr: Used to generate "Update Transfer" command.
*/
struct ipa_usb_xdci_chan_scratch {
u16 last_trb_addr;
u8 const_buffer_size;
u32 depcmd_low_addr;
u8 depcmd_hi_addr;
};
/**
* ipa_usb_xdci_chan_params - xDCI channel related properties
*
* @client: type of "client"
* @ipa_ep_cfg: IPA EP configuration
* @keep_ipa_awake: when true, IPA will not be clock gated
* @teth_prot: tethering protocol for which the channel is created
* @gevntcount_low_addr: GEVNCOUNT low address for event scratch
* @gevntcount_hi_addr: GEVNCOUNT high address for event scratch
* @dir: channel direction
* @xfer_ring_len: length of transfer ring in bytes (must be integral
* multiple of transfer element size - 16B for xDCI)
* @xfer_ring_base_addr: physical base address of transfer ring. Address must be
* aligned to xfer_ring_len rounded to power of two
* @xfer_scratch: parameters for xDCI channel scratch
*
*/
struct ipa_usb_xdci_chan_params {
/* IPA EP params */
enum ipa_client_type client;
struct ipa_ep_cfg ipa_ep_cfg;
bool keep_ipa_awake;
enum ipa_usb_teth_prot teth_prot;
/* event ring params */
u32 gevntcount_low_addr;
u8 gevntcount_hi_addr;
/* transfer ring params */
enum gsi_chan_dir dir;
u16 xfer_ring_len;
u64 xfer_ring_base_addr;
struct ipa_usb_xdci_chan_scratch xfer_scratch;
};
/**
* ipa_usb_chan_out_params - out parameters for channel request
*
* @clnt_hdl: opaque client handle assigned by IPA to client
* @db_reg_phs_addr_lsb: Physical address of doorbell register where the 32
* LSBs of the doorbell value should be written
* @db_reg_phs_addr_msb: Physical address of doorbell register where the 32
* MSBs of the doorbell value should be written
*
*/
struct ipa_req_chan_out_params {
u32 clnt_hdl;
u32 db_reg_phs_addr_lsb;
u32 db_reg_phs_addr_msb;
};
#ifdef CONFIG_IPA3
/**
* ipa_usb_init_teth_prot - Peripheral should call this function to initialize
* RNDIS/ECM/teth_bridge/DPL, prior to calling ipa_usb_xdci_connect()
*
* @usb_teth_type: tethering protocol type
* @teth_params: pointer to tethering protocol parameters.
* Should be struct ipa_usb_teth_params for RNDIS/ECM,
* or NULL for teth_bridge
* @ipa_usb_notify_cb: will be called to notify USB driver on certain events
* @user_data: cookie used for ipa_usb_notify_cb
*
* @Return 0 on success, negative on failure
*/
int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
struct ipa_usb_teth_params *teth_params,
int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
void *),
void *user_data);
/**
* ipa_usb_xdci_connect - Peripheral should call this function to start IN &
* OUT xDCI channels, and connect RNDIS/ECM/MBIM/RMNET.
* For DPL, only starts IN channel.
*
* @ul_chan_params: parameters for allocating UL xDCI channel. containing
* required info on event and transfer rings, and IPA EP
* configuration
* @ul_out_params: [out] opaque client handle assigned by IPA to client & DB
* registers physical address for UL channel
* @dl_chan_params: parameters for allocating DL xDCI channel. containing
* required info on event and transfer rings, and IPA EP
* configuration
* @dl_out_params: [out] opaque client handle assigned by IPA to client & DB
* registers physical address for DL channel
* @connect_params: handles and scratch params of the required channels,
* tethering protocol and the tethering protocol parameters.
*
* Note: Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
struct ipa_usb_xdci_chan_params *dl_chan_params,
struct ipa_req_chan_out_params *ul_out_params,
struct ipa_req_chan_out_params *dl_out_params,
struct ipa_usb_xdci_connect_params *connect_params);
/**
* ipa_usb_xdci_disconnect - Peripheral should call this function to stop
* IN & OUT xDCI channels
* For DPL, only stops IN channel.
*
* @ul_clnt_hdl: client handle received from ipa_usb_xdci_connect()
* for OUT channel
* @dl_clnt_hdl: client handle received from ipa_usb_xdci_connect()
* for IN channel
* @teth_prot: tethering protocol
*
* Note: Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot);
/**
* ipa_usb_deinit_teth_prot - Peripheral should call this function to deinit
* RNDIS/ECM/MBIM/RMNET
*
* @teth_prot: tethering protocol
*
* @Return 0 on success, negative on failure
*/
int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot);
/**
* ipa_usb_xdci_suspend - Peripheral should call this function to suspend
* IN & OUT or DPL xDCI channels
*
* @ul_clnt_hdl: client handle previously obtained from
* ipa_usb_xdci_connect() for OUT channel
* @dl_clnt_hdl: client handle previously obtained from
* ipa_usb_xdci_connect() for IN channel
* @teth_prot: tethering protocol
*
* Note: Should not be called from atomic context
* Note: for DPL, the ul will be ignored as irrelevant
*
* @Return 0 on success, negative on failure
*/
int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot);
/**
* ipa_usb_xdci_resume - Peripheral should call this function to resume
* IN & OUT or DPL xDCI channels
*
* @ul_clnt_hdl: client handle received from ipa_usb_xdci_connect()
* for OUT channel
* @dl_clnt_hdl: client handle received from ipa_usb_xdci_connect()
* for IN channel
* @teth_prot: tethering protocol
*
* Note: Should not be called from atomic context
* Note: for DPL, the ul will be ignored as irrelevant
*
* @Return 0 on success, negative on failure
*/
int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot);
#else /* CONFIG_IPA3 */
static inline int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
struct ipa_usb_teth_params *teth_params,
int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
void *),
void *user_data)
{
return -EPERM;
}
static inline int ipa_usb_xdci_connect(
struct ipa_usb_xdci_chan_params *ul_chan_params,
struct ipa_usb_xdci_chan_params *dl_chan_params,
struct ipa_req_chan_out_params *ul_out_params,
struct ipa_req_chan_out_params *dl_out_params,
struct ipa_usb_xdci_connect_params *connect_params)
{
return -EPERM;
}
static inline int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot)
{
return -EPERM;
}
static inline int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
{
return -EPERM;
}
static inline int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot)
{
return -EPERM;
}
static inline int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot)
{
return -EPERM;
}
#endif /* CONFIG_IPA3 */
#endif /* _IPA_USB_H_ */

View file

@ -1621,7 +1621,7 @@ static inline int sps_bam_process_irq(unsigned long dev)
}
static inline int sps_get_bam_addr(unsigned long dev, phys_addr_t *base,
u32 *size);
u32 *size)
{
return -EPERM;
}

View file

@ -73,12 +73,12 @@ void rndis_ipa_cleanup(void *private);
#else /* CONFIG_RNDIS_IPA*/
int rndis_ipa_init(struct ipa_usb_init_params *params)
static inline int rndis_ipa_init(struct ipa_usb_init_params *params)
{
return 0;
}
int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl,
static inline int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl,
u32 ipa_to_usb_hdl,
u32 max_xfer_size_bytes_to_dev,
u32 max_packet_number_to_dev,

View file

@ -395,13 +395,14 @@ enum ipa_tethering_stats_event {
/**
* enum ipa_rm_resource_name - IPA RM clients identification names
*
* Add new mapping to ipa_rm_dep_prod_index() / ipa_rm_dep_cons_index()
* Add new mapping to ipa_rm_prod_index() / ipa_rm_cons_index()
* when adding new entry to this enum.
*/
enum ipa_rm_resource_name {
IPA_RM_RESOURCE_PROD = 0,
IPA_RM_RESOURCE_Q6_PROD = IPA_RM_RESOURCE_PROD,
IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
IPA_RM_RESOURCE_HSIC_PROD,
IPA_RM_RESOURCE_STD_ECM_PROD,
IPA_RM_RESOURCE_RNDIS_PROD,
@ -413,6 +414,7 @@ enum ipa_rm_resource_name {
IPA_RM_RESOURCE_Q6_CONS = IPA_RM_RESOURCE_PROD_MAX,
IPA_RM_RESOURCE_USB_CONS,
IPA_RM_RESOURCE_USB_DPL_CONS,
IPA_RM_RESOURCE_HSIC_CONS,
IPA_RM_RESOURCE_WLAN_CONS,
IPA_RM_RESOURCE_APPS_CONS,