Merge "defconfig: Enable preemptirq tracing for msm8998 and sdm660"

This commit is contained in:
Linux Build Service Account 2018-05-03 02:07:28 -07:00 committed by Gerrit - the friendly Code Review server
commit 58f82482c4
8 changed files with 162 additions and 1 deletions

View file

@ -696,6 +696,7 @@ CONFIG_IPC_LOGGING=y
CONFIG_QCOM_RTB=y CONFIG_QCOM_RTB=y
CONFIG_QCOM_RTB_SEPARATE_CPUS=y CONFIG_QCOM_RTB_SEPARATE_CPUS=y
CONFIG_FUNCTION_TRACER=y CONFIG_FUNCTION_TRACER=y
CONFIG_PREEMPTIRQ_EVENTS=y
CONFIG_IRQSOFF_TRACER=y CONFIG_IRQSOFF_TRACER=y
CONFIG_PREEMPT_TRACER=y CONFIG_PREEMPT_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y CONFIG_BLK_DEV_IO_TRACE=y

View file

@ -697,6 +697,7 @@ CONFIG_IPC_LOGGING=y
CONFIG_QCOM_RTB=y CONFIG_QCOM_RTB=y
CONFIG_QCOM_RTB_SEPARATE_CPUS=y CONFIG_QCOM_RTB_SEPARATE_CPUS=y
CONFIG_FUNCTION_TRACER=y CONFIG_FUNCTION_TRACER=y
CONFIG_PREEMPTIRQ_EVENTS=y
CONFIG_IRQSOFF_TRACER=y CONFIG_IRQSOFF_TRACER=y
CONFIG_PREEMPT_TRACER=y CONFIG_PREEMPT_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y CONFIG_BLK_DEV_IO_TRACE=y

View file

@ -85,6 +85,11 @@ extern unsigned int sysctl_sched_short_sleep;
#endif /* CONFIG_SCHED_HMP */ #endif /* CONFIG_SCHED_HMP */
#if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_IRQSOFF_TRACER)
extern unsigned int sysctl_preemptoff_tracing_threshold_ns;
extern unsigned int sysctl_irqsoff_tracing_threshold_ns;
#endif
enum sched_tunable_scaling { enum sched_tunable_scaling {
SCHED_TUNABLESCALING_NONE, SCHED_TUNABLESCALING_NONE,
SCHED_TUNABLESCALING_LOG, SCHED_TUNABLESCALING_LOG,

View file

@ -52,6 +52,34 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
TP_ARGS(ip, parent_ip)); TP_ARGS(ip, parent_ip));
#endif #endif
TRACE_EVENT(irqs_disable,
TP_PROTO(u64 delta, unsigned long caddr0, unsigned long caddr1,
unsigned long caddr2, unsigned long caddr3),
TP_ARGS(delta, caddr0, caddr1, caddr2, caddr3),
TP_STRUCT__entry(
__field(u64, delta)
__field(void*, caddr0)
__field(void*, caddr1)
__field(void*, caddr2)
__field(void*, caddr3)
),
TP_fast_assign(
__entry->delta = delta;
__entry->caddr0 = (void *)caddr0;
__entry->caddr1 = (void *)caddr1;
__entry->caddr2 = (void *)caddr2;
__entry->caddr3 = (void *)caddr3;
),
TP_printk("delta=%llu(ns) Callers:(%pf<-%pf<-%pf<-%pf)", __entry->delta,
__entry->caddr0, __entry->caddr1,
__entry->caddr2, __entry->caddr3)
);
#endif /* _TRACE_PREEMPTIRQ_H */ #endif /* _TRACE_PREEMPTIRQ_H */
#include <trace/define_trace.h> #include <trace/define_trace.h>

View file

@ -1436,6 +1436,38 @@ TRACE_EVENT(sched_isolate,
__entry->time, __entry->isolate) __entry->time, __entry->isolate)
); );
TRACE_EVENT(sched_preempt_disable,
TP_PROTO(u64 delta, bool irqs_disabled,
unsigned long caddr0, unsigned long caddr1,
unsigned long caddr2, unsigned long caddr3),
TP_ARGS(delta, irqs_disabled, caddr0, caddr1, caddr2, caddr3),
TP_STRUCT__entry(
__field(u64, delta)
__field(bool, irqs_disabled)
__field(void*, caddr0)
__field(void*, caddr1)
__field(void*, caddr2)
__field(void*, caddr3)
),
TP_fast_assign(
__entry->delta = delta;
__entry->irqs_disabled = irqs_disabled;
__entry->caddr0 = (void *)caddr0;
__entry->caddr1 = (void *)caddr1;
__entry->caddr2 = (void *)caddr2;
__entry->caddr3 = (void *)caddr3;
),
TP_printk("delta=%llu(ns) irqs_d=%d Callers:(%pf<-%pf<-%pf<-%pf)",
__entry->delta, __entry->irqs_disabled,
__entry->caddr0, __entry->caddr1,
__entry->caddr2, __entry->caddr3)
);
TRACE_EVENT(sched_contrib_scale_f, TRACE_EVENT(sched_contrib_scale_f,
TP_PROTO(int cpu, unsigned long freq_scale_factor, TP_PROTO(int cpu, unsigned long freq_scale_factor,

View file

@ -3268,9 +3268,24 @@ notrace unsigned long get_parent_ip(unsigned long addr)
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_PREEMPT_TRACER)) defined(CONFIG_PREEMPT_TRACER))
/*
* preemptoff stack tracing threshold in ns.
* default: 1ms
*/
unsigned int sysctl_preemptoff_tracing_threshold_ns = 1000000UL;
struct preempt_store {
u64 ts;
unsigned long caddr[4];
bool irqs_disabled;
};
static DEFINE_PER_CPU(struct preempt_store, the_ps);
void preempt_count_add(int val) void preempt_count_add(int val)
{ {
struct preempt_store *ps = &per_cpu(the_ps, raw_smp_processor_id());
#ifdef CONFIG_DEBUG_PREEMPT #ifdef CONFIG_DEBUG_PREEMPT
/* /*
* Underflow? * Underflow?
@ -3291,6 +3306,13 @@ void preempt_count_add(int val)
#ifdef CONFIG_DEBUG_PREEMPT #ifdef CONFIG_DEBUG_PREEMPT
current->preempt_disable_ip = ip; current->preempt_disable_ip = ip;
#endif #endif
ps->ts = sched_clock();
ps->caddr[0] = CALLER_ADDR0;
ps->caddr[1] = CALLER_ADDR1;
ps->caddr[2] = CALLER_ADDR2;
ps->caddr[3] = CALLER_ADDR3;
ps->irqs_disabled = irqs_disabled();
trace_preempt_off(CALLER_ADDR0, ip); trace_preempt_off(CALLER_ADDR0, ip);
} }
} }
@ -3313,8 +3335,22 @@ void preempt_count_sub(int val)
return; return;
#endif #endif
if (preempt_count() == val) if (preempt_count() == val) {
struct preempt_store *ps = &per_cpu(the_ps,
raw_smp_processor_id());
u64 delta = sched_clock() - ps->ts;
/*
* Trace preempt disable stack if preemption
* is disabled for more than the threshold.
*/
if (delta > sysctl_preemptoff_tracing_threshold_ns)
trace_sched_preempt_disable(delta, ps->irqs_disabled,
ps->caddr[0], ps->caddr[1],
ps->caddr[2], ps->caddr[3]);
trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
__preempt_count_sub(val); __preempt_count_sub(val);
} }
EXPORT_SYMBOL(preempt_count_sub); EXPORT_SYMBOL(preempt_count_sub);

View file

@ -291,6 +291,22 @@ static struct ctl_table kern_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec,
}, },
#if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_IRQSOFF_TRACER)
{
.procname = "preemptoff_tracing_threshold_ns",
.data = &sysctl_preemptoff_tracing_threshold_ns,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "irqsoff_tracing_threshold_ns",
.data = &sysctl_irqsoff_tracing_threshold_ns,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_SCHED_HMP #ifdef CONFIG_SCHED_HMP
{ {
.procname = "sched_freq_reporting_policy", .procname = "sched_freq_reporting_policy",

View file

@ -13,6 +13,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/sched/sysctl.h>
#include "trace.h" #include "trace.h"
@ -39,6 +40,12 @@ static int save_flags;
static void stop_irqsoff_tracer(struct trace_array *tr, int graph); static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
static int start_irqsoff_tracer(struct trace_array *tr, int graph); static int start_irqsoff_tracer(struct trace_array *tr, int graph);
/*
* irqsoff stack tracing threshold in ns.
* default: 1ms
*/
unsigned int sysctl_irqsoff_tracing_threshold_ns = 1000000UL;
#ifdef CONFIG_PREEMPT_TRACER #ifdef CONFIG_PREEMPT_TRACER
static inline int static inline int
preempt_trace(void) preempt_trace(void)
@ -454,17 +461,52 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
#else /* !CONFIG_PROVE_LOCKING */ #else /* !CONFIG_PROVE_LOCKING */
#ifdef CONFIG_PREEMPTIRQ_EVENTS
struct irqsoff_store {
u64 ts;
unsigned long caddr[4];
};
static DEFINE_PER_CPU(struct irqsoff_store, the_irqsoff);
#endif /* CONFIG_PREEMPTIRQ_EVENTS */
/* /*
* We are only interested in hardirq on/off events: * We are only interested in hardirq on/off events:
*/ */
static inline void tracer_hardirqs_on(void) static inline void tracer_hardirqs_on(void)
{ {
#ifdef CONFIG_PREEMPTIRQ_EVENTS
struct irqsoff_store *is = &per_cpu(the_irqsoff,
raw_smp_processor_id());
if (!is->ts) {
is->ts = sched_clock();
is->caddr[0] = CALLER_ADDR0;
is->caddr[1] = CALLER_ADDR1;
is->caddr[2] = CALLER_ADDR2;
is->caddr[3] = CALLER_ADDR3;
}
#endif /* CONFIG_PREEMPTIRQ_EVENTS */
if (!preempt_trace() && irq_trace()) if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
} }
static inline void tracer_hardirqs_off(void) static inline void tracer_hardirqs_off(void)
{ {
#ifdef CONFIG_PREEMPTIRQ_EVENTS
struct irqsoff_store *is = &per_cpu(the_irqsoff,
raw_smp_processor_id());
u64 delta = 0;
if (is->ts) {
delta = sched_clock() - is->ts;
is->ts = 0;
}
if (delta > sysctl_irqsoff_tracing_threshold_ns)
trace_irqs_disable(delta, is->caddr[0], is->caddr[1],
is->caddr[2], is->caddr[3]);
#endif /* CONFIG_PREEMPTIRQ_EVENTS */
if (!preempt_trace() && irq_trace()) if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
} }