net: rmnet_data: adding new trace points

Added new trace points for flow control events and
aggregation/deaggregation.

CRs-Fixed: 661459
Change-Id: I22e5b441f5bb8ff055b0577954cc9f6285b68a74
Acked-by: Sivan Reinstein <sivanr@qti.qualcomm.com>
Signed-off-by: Harout Hedeshian <harouth@codeaurora.org>
This commit is contained in:
Harout Hedeshian 2014-05-07 09:10:49 +03:00 committed by David Keitel
parent c183cadfd9
commit e9f3f57cb4
4 changed files with 151 additions and 6 deletions

View file

@ -309,10 +309,12 @@ static rx_handler_result_t rmnet_map_ingress_handler(struct sk_buff *skb,
int rc, co = 0;
if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
trace_rmnet_start_deaggregation(skb);
while ((skbn = rmnet_map_deaggregate(skb, config)) != 0) {
_rmnet_map_ingress_handler(skbn, config);
co++;
}
trace_rmnet_end_deaggregation(skb, co);
LOGD("De-aggregated %d packets", co);
rmnet_stats_deagg_pkts(co);
rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF);

View file

@ -71,6 +71,135 @@ DEFINE_EVENT(rmnet_handler_template, __rmnet_deliver_skb,
TP_ARGS(skb)
);
DECLARE_EVENT_CLASS(rmnet_tc_fc_template,
TP_PROTO(u32 tcm_handle, int qdisc_len, int is_enable),
TP_ARGS(tcm_handle, qdisc_len, is_enable),
TP_STRUCT__entry(
__field(u32, handle)
__field(int, qlen)
__field(int, enable)
),
TP_fast_assign(
__entry->handle = tcm_handle;
__entry->qlen = qdisc_len;
__entry->enable = is_enable;
),
TP_printk("tcm_handle=%d qdisc length=%d flow %s",
__entry->handle, __entry->qlen,
__entry->enable ? "enable" : "disable")
)
DEFINE_EVENT(rmnet_tc_fc_template, rmnet_fc_qmi,
TP_PROTO(u32 tcm_handle, int qdisc_len, int is_enable),
TP_ARGS(tcm_handle, qdisc_len, is_enable)
);
DEFINE_EVENT(rmnet_tc_fc_template, rmnet_fc_map,
TP_PROTO(u32 tcm_handle, int qdisc_len, int is_enable),
TP_ARGS(tcm_handle, qdisc_len, is_enable)
);
DECLARE_EVENT_CLASS(rmnet_aggregation_template,
TP_PROTO(struct sk_buff *skb, int num_agg_pakcets),
TP_ARGS(skb, num_agg_pakcets),
TP_STRUCT__entry(
__field(void *, skbaddr)
__field(unsigned int, len)
__string(name, skb->dev->name)
__field(int, num)
),
TP_fast_assign(
__entry->skbaddr = skb;
__entry->len = skb->len;
__assign_str(name, skb->dev->name);
__entry->num = num_agg_pakcets;
),
TP_printk("dev=%s skbaddr=%p len=%u agg_count: %d",
__get_str(name), __entry->skbaddr, __entry->len, __entry->num)
)
DEFINE_EVENT(rmnet_aggregation_template, rmnet_map_aggregate,
TP_PROTO(struct sk_buff *skb, int num_agg_pakcets),
TP_ARGS(skb, num_agg_pakcets)
);
DEFINE_EVENT(rmnet_aggregation_template, rmnet_map_flush_packet_queue,
TP_PROTO(struct sk_buff *skb, int num_agg_pakcets),
TP_ARGS(skb, num_agg_pakcets)
);
TRACE_EVENT(rmnet_start_aggregation,
TP_PROTO(struct sk_buff *skb),
TP_ARGS(skb),
TP_STRUCT__entry(
__string(name, skb->dev->name)
),
TP_fast_assign(
__assign_str(name, skb->dev->name);
),
TP_printk("dev: %s, aggregated first packet", __get_str(name))
)
TRACE_EVENT(rmnet_start_deaggregation,
TP_PROTO(struct sk_buff *skb),
TP_ARGS(skb),
TP_STRUCT__entry(
__string(name, skb->dev->name)
),
TP_fast_assign(
__assign_str(name, skb->dev->name);
),
TP_printk("dev: %s, deaggregated first packet", __get_str(name))
)
TRACE_EVENT(rmnet_end_deaggregation,
TP_PROTO(struct sk_buff *skb, int num_deagg_packets),
TP_ARGS(skb, num_deagg_packets),
TP_STRUCT__entry(
__string(name, skb->dev->name)
__field(int, num)
),
TP_fast_assign(
__assign_str(name, skb->dev->name);
__entry->num = num_deagg_packets;
),
TP_printk("dev: %s, deaggregate end count: %d",
__get_str(name), __entry->num)
)
#endif /* _RMNET_DATA_TRACE_H_ */
/* This part must be outside protection */

View file

@ -214,7 +214,7 @@ static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
int cmd)
{
struct rmnet_vnd_private_s *dev_conf;
int rc;
int rc, qdisc_len = 0;
struct rmnet_ioctl_data_s ioctl_data;
rc = 0;
dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev);
@ -248,7 +248,9 @@ static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
rc = -EFAULT;
break;
}
tc_qdisc_flow_control(dev, ioctl_data.u.tcm_handle, 1);
qdisc_len = tc_qdisc_flow_control(dev,
ioctl_data.u.tcm_handle, 1);
trace_rmnet_fc_qmi(ioctl_data.u.tcm_handle, qdisc_len, 1);
break;
case RMNET_IOCTL_FLOW_DISABLE:
@ -258,7 +260,9 @@ static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
rc = -EFAULT;
break;
}
tc_qdisc_flow_control(dev, ioctl_data.u.tcm_handle, 0);
qdisc_len = tc_qdisc_flow_control(dev,
ioctl_data.u.tcm_handle, 0);
trace_rmnet_fc_qmi(ioctl_data.u.tcm_handle, qdisc_len, 0);
break;
default:
@ -278,10 +282,13 @@ struct rmnet_vnd_fc_work {
static void _rmnet_vnd_wq_flow_control(struct work_struct *work)
{
struct rmnet_vnd_fc_work *fcwork;
int qdisc_len = 0;
fcwork = (struct rmnet_vnd_fc_work *)work;
rtnl_lock();
tc_qdisc_flow_control(fcwork->dev, fcwork->tc_handle, fcwork->enable);
qdisc_len = tc_qdisc_flow_control(fcwork->dev, fcwork->tc_handle,
fcwork->enable);
trace_rmnet_fc_map(fcwork->tc_handle, qdisc_len, fcwork->enable);
rtnl_unlock();
LOGL("[%s] handle:%08X enable:%d",

View file

@ -25,6 +25,7 @@
#include "rmnet_map.h"
#include "rmnet_data_private.h"
#include "rmnet_data_stats.h"
#include "rmnet_data_trace.h"
RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_MAPD);
@ -151,7 +152,7 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work)
struct rmnet_phys_ep_conf_s *config;
unsigned long flags;
struct sk_buff *skb;
int rc;
int rc, agg_count = 0;
skb = 0;
real_work = (struct agg_work *)work;
@ -165,6 +166,7 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work)
if (config->agg_count > 1)
LOGL("Agg count: %d", config->agg_count);
skb = config->agg_skb;
agg_count = config->agg_count;
config->agg_skb = 0;
}
config->agg_state = RMNET_MAP_AGG_IDLE;
@ -176,6 +178,7 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work)
spin_unlock_irqrestore(&config->agg_lock, flags);
if (skb) {
trace_rmnet_map_flush_packet_queue(skb, agg_count);
rc = dev_queue_xmit(skb);
rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT);
}
@ -197,7 +200,7 @@ void rmnet_map_aggregate(struct sk_buff *skb,
struct agg_work *work;
unsigned long flags;
struct sk_buff *agg_skb;
int size, rc;
int size, rc, agg_count = 0;
if (!skb || !config)
@ -218,12 +221,14 @@ new_packet:
config->agg_count = 0;
spin_unlock_irqrestore(&config->agg_lock, flags);
rmnet_stats_agg_pkts(1);
trace_rmnet_map_aggregate(skb, 0);
rc = dev_queue_xmit(skb);
rmnet_stats_queue_xmit(rc,
RMNET_STATS_QUEUE_XMIT_AGG_CPY_EXP_FAIL);
return;
}
config->agg_count = 1;
trace_rmnet_start_aggregation(skb);
rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_CPY_EXPAND);
goto schedule;
}
@ -233,9 +238,11 @@ new_packet:
if (config->agg_count > 1)
LOGL("Agg count: %d", config->agg_count);
agg_skb = config->agg_skb;
agg_count = config->agg_count;
config->agg_skb = 0;
config->agg_count = 0;
spin_unlock_irqrestore(&config->agg_lock, flags);
trace_rmnet_map_aggregate(skb, agg_count);
rc = dev_queue_xmit(agg_skb);
rmnet_stats_queue_xmit(rc,
RMNET_STATS_QUEUE_XMIT_AGG_FILL_BUFFER);