diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 55345faded87..9ef7169441bc 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -31,7 +31,8 @@ arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o -arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_debug.o +arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_debug.o \ + perf_trace_counters.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o diff --git a/arch/arm64/kernel/perf_debug.c b/arch/arm64/kernel/perf_debug.c index bbcfb8c9b2f9..b0b4da762f95 100644 --- a/arch/arm64/kernel/perf_debug.c +++ b/arch/arm64/kernel/perf_debug.c @@ -23,6 +23,7 @@ */ static char *descriptions = " 0 arm64: perf: add debug patch logging framework\n" + " 1 Perf: arm64: Add L1 counters to tracepoints\n" ; static ssize_t desc_read(struct file *fp, char __user *buf, diff --git a/arch/arm64/kernel/perf_trace_counters.c b/arch/arm64/kernel/perf_trace_counters.c new file mode 100644 index 000000000000..de39a1c99505 --- /dev/null +++ b/arch/arm64/kernel/perf_trace_counters.c @@ -0,0 +1,125 @@ +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#define CREATE_TRACE_POINTS +#include "perf_trace_counters.h" + +static unsigned int tp_pid_state; + +DEFINE_PER_CPU(u32, previous_ccnt); +DEFINE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts); +DEFINE_PER_CPU(u32, old_pid); + +void tracectr_notifier(void *ignore, struct task_struct *prev, + struct task_struct *next) +{ + int current_pid; + u32 cpu = next->on_cpu; + + if (tp_pid_state != 1) + return; + current_pid = next->pid; + if (per_cpu(old_pid, cpu) != -1) + trace_sched_switch_with_ctrs(per_cpu(old_pid, cpu), + current_pid); + per_cpu(old_pid, cpu) = current_pid; +} + +static void enable_tp_pid(void) +{ + if (tp_pid_state == 0) { + tp_pid_state = 1; + register_trace_sched_switch(tracectr_notifier, NULL); + } +} + +static void disable_tp_pid(void) +{ + if (tp_pid_state == 1) { + tp_pid_state = 0; + unregister_trace_sched_switch(tracectr_notifier, NULL); + } +} + +static ssize_t read_enabled_perftp_file_bool(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char buf[2]; + buf[1] = '\n'; + if (tp_pid_state == 0) + buf[0] = '0'; + else + buf[0] = '1'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t write_enabled_perftp_file_bool(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + char buf[32]; + size_t buf_size; + + buf_size = min(count, (sizeof(buf)-1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + switch (buf[0]) { + case 'y': + case 'Y': + case '1': + enable_tp_pid(); + break; + case 'n': + case 'N': + case '0': + disable_tp_pid(); + break; + } + + return count; +} + +static const struct file_operations fops_perftp = { + .read = read_enabled_perftp_file_bool, + .write = write_enabled_perftp_file_bool, + .llseek = default_llseek, +}; + +int __init init_tracecounters(void) +{ + struct dentry *dir; + struct dentry *file; + unsigned int value = 1; + int cpu; + + dir = debugfs_create_dir("perf_debug_tp", NULL); + if (!dir) + return -ENOMEM; + file = debugfs_create_file("enabled", 0660, dir, + &value, &fops_perftp); + if (!file) { + debugfs_remove(dir); + return -ENOMEM; + } + for_each_possible_cpu(cpu) + per_cpu(old_pid, cpu) = -1; + return 0; +} + +int __exit exit_tracecounters(void) +{ + return 0; +} +late_initcall(init_tracecounters); diff --git a/arch/arm64/kernel/perf_trace_counters.h b/arch/arm64/kernel/perf_trace_counters.h new file mode 100644 index 000000000000..b0cc9b8c6d13 --- /dev/null +++ b/arch/arm64/kernel/perf_trace_counters.h @@ -0,0 +1,116 @@ +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM perf_trace_counters + +#if !defined(_PERF_TRACE_COUNTERS_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _PERF_TRACE_COUNTERS_H_ + +/* Ctr index for PMCNTENSET/CLR */ +#define CC 0x80000000 +#define C0 0x1 +#define C1 0x2 +#define C2 0x4 +#define C3 0x8 +#define C_ALL (CC | C0 | C1 | C2 | C3) +#define NUM_L1_CTRS 4 + +#include +#include +#include + +DECLARE_PER_CPU(u32, previous_ccnt); +DECLARE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts); +TRACE_EVENT(sched_switch_with_ctrs, + + TP_PROTO(pid_t prev, pid_t next), + + TP_ARGS(prev, next), + + TP_STRUCT__entry( + __field(pid_t, old_pid) + __field(pid_t, new_pid) + __field(u32, cctr) + __field(u32, ctr0) + __field(u32, ctr1) + __field(u32, ctr2) + __field(u32, ctr3) + __field(u32, lctr0) + __field(u32, lctr1) + ), + + TP_fast_assign( + u32 cpu = smp_processor_id(); + u32 i; + u32 cnten_val; + u32 total_ccnt = 0; + u32 total_cnt = 0; + u32 delta_l1_cnts[NUM_L1_CTRS]; + __entry->old_pid = prev; + __entry->new_pid = next; + + + /* Read PMCNTENSET */ + asm volatile("mrs %0, pmcntenset_el0" + : "=r" (cnten_val)); + /* Disable all the counters that were enabled */ + asm volatile("msr pmcntenclr_el0, %0" + : : "r" (cnten_val)); + if (cnten_val & CC) { + asm volatile("mrs %0, pmccntr_el0" + : "=r" (total_ccnt)); + /* Read value */ + __entry->cctr = total_ccnt - + per_cpu(previous_ccnt, cpu); + per_cpu(previous_ccnt, cpu) = total_ccnt; + } + for (i = 0; i < NUM_L1_CTRS; i++) { + if (cnten_val & (1 << i)) { + /* Select */ + asm volatile("msr pmselr_el0, %0" + : : "r" (i)); + isb(); + asm volatile("mrs %0, pmxevcntr_el0" + : "=r" (total_cnt)); + /* Read value */ + delta_l1_cnts[i] = total_cnt - + per_cpu(previous_l1_cnts[i], cpu); + per_cpu(previous_l1_cnts[i], cpu) = + total_cnt; + } else + delta_l1_cnts[i] = 0; + } + /* Enable all the counters that were disabled */ + asm volatile("msr pmcntenset_el0, %0" + : : "r" (cnten_val)); + + __entry->ctr0 = delta_l1_cnts[0]; + __entry->ctr1 = delta_l1_cnts[1]; + __entry->ctr2 = delta_l1_cnts[2]; + __entry->ctr3 = delta_l1_cnts[3]; + __entry->lctr0 = 0; + __entry->lctr1 = 0; + ), + + TP_printk("prev_pid=%d, next_pid=%d, CCNTR: %u, CTR0: %u, CTR1: %u, CTR2: %u, CTR3: %u, L2CTR0: %u, L2CTR1: %u", + __entry->old_pid, __entry->new_pid, + __entry->cctr, __entry->ctr0, __entry->ctr1, + __entry->ctr2, __entry->ctr3, + __entry->lctr0, __entry->lctr1) +); + +#endif +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/arm64/kernel +#define TRACE_INCLUDE_FILE perf_trace_counters +#include