Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android

This commit is contained in:
Alex Shi 2016-06-21 11:22:43 +08:00
commit 9b0440e3b2
46 changed files with 4150 additions and 63 deletions

View file

@ -1007,6 +1007,10 @@ F: drivers/hwtracing/coresight/*
F: Documentation/trace/coresight.txt
F: Documentation/devicetree/bindings/arm/coresight.txt
F: Documentation/ABI/testing/sysfs-bus-coresight-devices-*
F: tools/perf/arch/arm/util/pmu.c
F: tools/perf/arch/arm/util/auxtrace.c
F: tools/perf/arch/arm/util/cs_etm.c
F: tools/perf/arch/arm/util/cs_etm.h
ARM/CORGI MACHINE SUPPORT
M: Richard Purdie <rpurdie@rpsys.net>

View file

@ -143,5 +143,310 @@
<&A53_3>;
};
etr@20070000 {
compatible = "arm,coresight-tmc", "arm,primecell";
reg = <0 0x20070000 0 0x1000>;
clocks = <&soc_smc50mhz>;
clock-names = "apb_pclk";
port {
etr_in_port: endpoint {
slave-mode;
remote-endpoint = <&replicator_out_port1>;
};
};
};
tpiu@20030000 {
compatible = "arm,coresight-tpiu", "arm,primecell";
reg = <0 0x20030000 0 0x1000>;
clocks = <&soc_smc50mhz>;
clock-names = "apb_pclk";
port {
tpiu_in_port: endpoint {
slave-mode;
remote-endpoint = <&replicator_out_port0>;
};
};
};
replicator@20020000 {
/* non-configurable replicators don't show up on the
* AMBA bus. As such no need to add "arm,primecell".
*/
compatible = "arm,coresight-replicator";
ports {
#address-cells = <1>;
#size-cells = <0>;
/* replicator output ports */
port@0 {
reg = <0>;
replicator_out_port0: endpoint {
remote-endpoint = <&tpiu_in_port>;
};
};
port@1 {
reg = <1>;
replicator_out_port1: endpoint {
remote-endpoint = <&etr_in_port>;
};
};
/* replicator input port */
port@2 {
reg = <0>;
replicator_in_port0: endpoint {
slave-mode;
remote-endpoint = <&etf_out_port>;
};
};
};
};
etf@20010000 {
compatible = "arm,coresight-tmc", "arm,primecell";
reg = <0 0x20010000 0 0x1000>;
clocks = <&soc_smc50mhz>;
clock-names = "apb_pclk";
ports {
#address-cells = <1>;
#size-cells = <0>;
/* input port */
port@0 {
reg = <0>;
etf_in_port: endpoint {
slave-mode;
remote-endpoint =
<&main_funnel_out_port>;
};
};
/* output port */
port@1 {
reg = <0>;
etf_out_port: endpoint {
remote-endpoint =
<&replicator_in_port0>;
};
};
};
};
main_funnel@20040000 {
compatible = "arm,coresight-funnel", "arm,primecell";
reg = <0 0x20040000 0 0x1000>;
clocks = <&soc_smc50mhz>;
clock-names = "apb_pclk";
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
main_funnel_out_port: endpoint {
remote-endpoint =
<&etf_in_port>;
};
};
port@1 {
reg = <0>;
main_funnel_in_port0: endpoint {
slave-mode;
remote-endpoint =
<&A72_57_funnel_out_port>;
};
};
port@2 {
reg = <1>;
main_funnel_in_port1: endpoint {
slave-mode;
remote-endpoint = <&A53_funnel_out_port>;
};
};
};
};
A72_57_funnel@220c0000 {
compatible = "arm,coresight-funnel", "arm,primecell";
reg = <0 0x220c0000 0 0x1000>;
clocks = <&soc_smc50mhz>;
clock-names = "apb_pclk";
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
A72_57_funnel_out_port: endpoint {
remote-endpoint =
<&main_funnel_in_port0>;
};
};
port@1 {
reg = <0>;
A72_57_funnel_in_port0: endpoint {
slave-mode;
remote-endpoint =
<&A72_57_etm0_out_port>;
};
};
port@2 {
reg = <1>;
A72_57_funnel_in_port1: endpoint {
slave-mode;
remote-endpoint =
<&A72_57_etm1_out_port>;
};
};
};
};
A53_funnel@220c0000 {
compatible = "arm,coresight-funnel", "arm,primecell";
reg = <0 0x230c0000 0 0x1000>;
clocks = <&soc_smc50mhz>;
clock-names = "apb_pclk";
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
A53_funnel_out_port: endpoint {
remote-endpoint =
<&main_funnel_in_port1>;
};
};
port@1 {
reg = <0>;
A53_funnel_in_port0: endpoint {
slave-mode;
remote-endpoint = <&A53_etm0_out_port>;
};
};
port@2 {
reg = <1>;
A53_funnel_in_port1: endpoint {
slave-mode;
remote-endpoint = <&A53_etm1_out_port>;
};
};
port@3 {
reg = <2>;
A53_funnel_in_port2: endpoint {
slave-mode;
remote-endpoint = <&A53_etm2_out_port>;
};
};
port@4 {
reg = <3>;
A53_funnel_in_port3: endpoint {
slave-mode;
remote-endpoint = <&A53_etm3_out_port>;
};
};
};
};
etm@22040000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0 0x22040000 0 0x1000>;
cpu = <&A57_0>;
clocks = <&soc_smc50mhz>;
clock-names = "apb_pclk";
port {
A72_57_etm0_out_port: endpoint {
remote-endpoint = <&A72_57_funnel_in_port0>;
};
};
};
etm@22140000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0 0x22140000 0 0x1000>;
cpu = <&A57_1>;
clocks = <&soc_smc50mhz>;
clock-names = "apb_pclk";
port {
A72_57_etm1_out_port: endpoint {
remote-endpoint = <&A72_57_funnel_in_port1>;
};
};
};
etm@23040000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0 0x23040000 0 0x1000>;
cpu = <&A53_0>;
clocks = <&soc_smc50mhz>;
clock-names = "apb_pclk";
port {
A53_etm0_out_port: endpoint {
remote-endpoint = <&A53_funnel_in_port0>;
};
};
};
etm@23140000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0 0x23140000 0 0x1000>;
cpu = <&A53_1>;
clocks = <&soc_smc50mhz>;
clock-names = "apb_pclk";
port {
A53_etm1_out_port: endpoint {
remote-endpoint = <&A53_funnel_in_port1>;
};
};
};
etm@23240000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0 0x23240000 0 0x1000>;
cpu = <&A53_2>;
clocks = <&soc_smc50mhz>;
clock-names = "apb_pclk";
port {
A53_etm2_out_port: endpoint {
remote-endpoint = <&A53_funnel_in_port2>;
};
};
};
etm@23340000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0 0x23340000 0 0x1000>;
cpu = <&A53_3>;
clocks = <&soc_smc50mhz>;
clock-names = "apb_pclk";
port {
A53_etm3_out_port: endpoint {
remote-endpoint = <&A53_funnel_in_port3>;
};
};
};
#include "juno-base.dtsi"
};

View file

@ -22,6 +22,7 @@
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/parser.h>
#include <linux/perf_event.h>
#include <linux/slab.h>
#include <linux/types.h>
@ -46,6 +47,17 @@ struct etm_event_data {
struct list_head **path;
};
/**
* struct perf_pmu_drv_config - Driver specific configuration needed
* before a session can start.
* @sink: The name of the sink this session should use.
* @entry: Hook to the event->drv_configs list.
*/
struct perf_pmu_drv_config {
char *sink;
struct list_head entry;
};
static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
@ -155,15 +167,28 @@ static void etm_free_aux(void *data)
schedule_work(&event_data->work);
}
static void *etm_setup_aux(int event_cpu, void **pages,
static void *etm_setup_aux(struct perf_event *event, void **pages,
int nr_pages, bool overwrite)
{
int cpu;
char *sink_def = NULL;
cpumask_t *mask;
struct coresight_device *sink;
struct etm_event_data *event_data = NULL;
struct perf_pmu_drv_config *drv_config;
event_data = alloc_event_data(event_cpu);
/*
* Search the driver configurables looking for a sink. If more than
* one sink was specified the last one is taken.
*/
list_for_each_entry(drv_config, &event->drv_configs, entry) {
if (drv_config && drv_config->sink) {
sink_def = drv_config->sink;
break;
}
}
event_data = alloc_event_data(event->cpu);
if (!event_data)
return NULL;
@ -184,7 +209,7 @@ static void *etm_setup_aux(int event_cpu, void **pages,
* list of devices from source to sink that can be
* referenced later when the path is actually needed.
*/
event_data->path[cpu] = coresight_build_path(csdev);
event_data->path[cpu] = coresight_build_path(csdev, sink_def);
if (!event_data->path[cpu])
goto err;
}
@ -342,6 +367,95 @@ static void etm_event_del(struct perf_event *event, int mode)
etm_event_stop(event, PERF_EF_UPDATE);
}
enum {
ETM_TOKEN_SINK_CPU,
ETM_TOKEN_SINK,
ETM_TOKEN_ERR,
};
static const match_table_t drv_cfg_tokens = {
{ETM_TOKEN_SINK_CPU, "sink=cpu%d:%s"},
{ETM_TOKEN_SINK, "sink=%s"},
{ETM_TOKEN_ERR, NULL},
};
static int etm_get_drv_configs(struct perf_event *event, void __user *arg)
{
char *config, *sink = NULL;
int cpu = -1, token, ret = 0;
substring_t args[MAX_OPT_ARGS];
struct perf_pmu_drv_config *drv_config = NULL;
/* Make user supplied input usable */
config = strndup_user(arg, PAGE_SIZE);
if (IS_ERR(config))
return PTR_ERR(config);
/* See above declared @drv_cfg_tokens for the usable formats */
token = match_token(config, drv_cfg_tokens, args);
switch (token) {
case ETM_TOKEN_SINK:
/* Just a sink has been specified */
sink = match_strdup(&args[0]);
if (IS_ERR(sink)) {
ret = PTR_ERR(sink);
goto err;
}
break;
case ETM_TOKEN_SINK_CPU:
/* We have a sink and a CPU */
if (match_int(&args[0], &cpu)) {
ret = -EINVAL;
goto err;
}
sink = match_strdup(&args[1]);
if (IS_ERR(sink)) {
ret = PTR_ERR(sink);
goto err;
}
break;
default:
ret = -EINVAL;
goto err;
}
/* If the CPUs don't match the sink is destined to another path */
if (event->cpu != cpu)
goto err;
/*
* We have a valid configuration, allocate memory and add to the list
* of driver configurables.
*/
drv_config = kzalloc(sizeof(*drv_config), GFP_KERNEL);
if (IS_ERR(drv_config)) {
ret = PTR_ERR(drv_config);
goto err;
}
drv_config->sink = sink;
list_add(&drv_config->entry, &event->drv_configs);
out:
kfree(config);
return ret;
err:
kfree(sink);
goto out;
}
static void etm_free_drv_configs(struct perf_event *event)
{
struct perf_pmu_drv_config *config, *itr;
list_for_each_entry_safe(config, itr, &event->drv_configs, entry) {
list_del(&config->entry);
kfree(config->sink);
kfree(config);
}
}
int etm_perf_symlink(struct coresight_device *csdev, bool link)
{
char entry[sizeof("cpu9999999")];
@ -383,6 +497,9 @@ static int __init etm_perf_init(void)
etm_pmu.stop = etm_event_stop;
etm_pmu.add = etm_event_add;
etm_pmu.del = etm_event_del;
etm_pmu.get_drv_configs = etm_get_drv_configs;
etm_pmu.free_drv_configs
= etm_free_drv_configs;
ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
if (ret == 0)

View file

@ -94,7 +94,8 @@ static inline void CS_UNLOCK(void __iomem *addr)
void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, u32 mode);
struct coresight_device *coresight_get_sink(struct list_head *path);
struct list_head *coresight_build_path(struct coresight_device *csdev);
struct list_head *coresight_build_path(struct coresight_device *csdev,
const char *sink);
void coresight_release_path(struct list_head *path);
#ifdef CONFIG_CORESIGHT_SOURCE_ETM3X

View file

@ -15,11 +15,30 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/circ_buf.h>
#include <linux/coresight.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include "coresight-priv.h"
#include "coresight-tmc.h"
/**
* struct cs_etr_buffer - keep track of a recording session' specifics
* @tmc: generic portion of the TMC buffers
* @paddr: the physical address of a DMA'able contiguous memory area
* @vaddr: the virtual address associated to @paddr
* @size: how much memory we have, starting at @paddr
* @dev: the device @vaddr has been tied to
*/
struct cs_etr_buffers {
struct cs_buffers tmc;
dma_addr_t paddr;
void __iomem *vaddr;
u32 size;
struct device *dev;
};
void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
{
u32 axictl;
@ -235,9 +254,233 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
dev_info(drvdata->dev, "TMC-ETR disabled\n");
}
static void *tmc_alloc_etr_buffer(struct coresight_device *csdev, int cpu,
void **pages, int nr_pages, bool overwrite)
{
int node;
struct cs_etr_buffers *buf;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (cpu == -1)
cpu = smp_processor_id();
node = cpu_to_node(cpu);
/* Allocate memory structure for interaction with Perf */
buf = kzalloc_node(sizeof(struct cs_etr_buffers), GFP_KERNEL, node);
if (!buf)
return NULL;
buf->dev = drvdata->dev;
buf->size = drvdata->size;
buf->vaddr = dma_alloc_coherent(buf->dev, buf->size,
&buf->paddr, GFP_KERNEL);
if (!buf->vaddr) {
kfree(buf);
return NULL;
}
buf->tmc.snapshot = overwrite;
buf->tmc.nr_pages = nr_pages;
buf->tmc.data_pages = pages;
return buf;
}
static void tmc_free_etr_buffer(void *config)
{
struct cs_etr_buffers *buf = config;
dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->paddr);
kfree(buf);
}
static int tmc_set_etr_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config)
{
int ret = 0;
unsigned long head;
struct cs_etr_buffers *buf = sink_config;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
/* wrap head around to the amount of space we have */
head = handle->head & ((buf->tmc.nr_pages << PAGE_SHIFT) - 1);
/* find the page to write to */
buf->tmc.cur = head / PAGE_SIZE;
/* and offset within that page */
buf->tmc.offset = head % PAGE_SIZE;
local_set(&buf->tmc.data_size, 0);
/* Tell the HW where to put the trace data */
drvdata->vaddr = buf->vaddr;
drvdata->paddr = buf->paddr;
memset(drvdata->vaddr, 0, drvdata->size);
return ret;
}
static unsigned long tmc_reset_etr_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config, bool *lost)
{
long size = 0;
struct cs_etr_buffers *buf = sink_config;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (buf) {
/*
* In snapshot mode ->data_size holds the new address of the
* ring buffer's head. The size itself is the whole address
* range since we want the latest information.
*/
if (buf->tmc.snapshot) {
size = buf->tmc.nr_pages << PAGE_SHIFT;
handle->head = local_xchg(&buf->tmc.data_size, size);
}
/*
* Tell the tracer PMU how much we got in this run and if
* something went wrong along the way. Nobody else can use
* this cs_etr_buffers instance until we are done. As such
* resetting parameters here and squaring off with the ring
* buffer API in the tracer PMU is fine.
*/
*lost = !!local_xchg(&buf->tmc.lost, 0);
size = local_xchg(&buf->tmc.data_size, 0);
}
/* Get ready for another run */
drvdata->vaddr = NULL;
drvdata->paddr = 0;
return size;
}
static void tmc_update_etr_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config)
{
int i, cur;
u32 *buf_ptr;
u32 read_ptr, write_ptr;
u32 status, to_read;
unsigned long offset;
struct cs_buffers *buf = sink_config;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (!buf)
return;
/* This shouldn't happen */
if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF))
return;
CS_UNLOCK(drvdata->base);
tmc_flush_and_stop(drvdata);
read_ptr = readl_relaxed(drvdata->base + TMC_RRP);
write_ptr = readl_relaxed(drvdata->base + TMC_RWP);
/*
* Get a hold of the status register and see if a wrap around
* has occurred. If so adjust things accordingly.
*/
status = readl_relaxed(drvdata->base + TMC_STS);
if (status & TMC_STS_FULL) {
local_inc(&buf->lost);
to_read = drvdata->size;
} else {
to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
}
/*
* The TMC RAM buffer may be bigger than the space available in the
* perf ring buffer (handle->size). If so advance the RRP so that we
* get the latest trace data.
*/
if (to_read > handle->size) {
u32 buffer_start, mask = 0;
/* Read buffer start address in system memory */
buffer_start = readl_relaxed(drvdata->base + TMC_DBALO);
/*
* The value written to RRP must be byte-address aligned to
* the width of the trace memory databus _and_ to a frame
* boundary (16 byte), whichever is the biggest. For example,
* for 32-bit, 64-bit and 128-bit wide trace memory, the four
* LSBs must be 0s. For 256-bit wide trace memory, the five
* LSBs must be 0s.
*/
switch (drvdata->memwidth) {
case TMC_MEM_INTF_WIDTH_32BITS:
case TMC_MEM_INTF_WIDTH_64BITS:
case TMC_MEM_INTF_WIDTH_128BITS:
mask = GENMASK(31, 5);
break;
case TMC_MEM_INTF_WIDTH_256BITS:
mask = GENMASK(31, 6);
break;
}
/*
* Make sure the new size is aligned in accordance with the
* requirement explained above.
*/
to_read = handle->size & mask;
/* Move the RAM read pointer up */
read_ptr = (write_ptr + drvdata->size) - to_read;
/* Make sure we are still within our limits */
if (read_ptr > (buffer_start + (drvdata->size - 1)))
read_ptr -= drvdata->size;
/* Tell the HW */
writel_relaxed(read_ptr, drvdata->base + TMC_RRP);
local_inc(&buf->lost);
}
cur = buf->cur;
offset = buf->offset;
/* for every byte to read */
for (i = 0; i < to_read; i += 4) {
buf_ptr = buf->data_pages[cur] + offset;
*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
offset += 4;
if (offset >= PAGE_SIZE) {
offset = 0;
cur++;
/* wrap around at the end of the buffer */
cur &= buf->nr_pages - 1;
}
}
/*
* In snapshot mode all we have to do is communicate to
* perf_aux_output_end() the address of the current head. In full
* trace mode the same function expects a size to move rb->aux_head
* forward.
*/
if (buf->snapshot)
local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
else
local_add(to_read, &buf->data_size);
CS_LOCK(drvdata->base);
}
static const struct coresight_ops_sink tmc_etr_sink_ops = {
.enable = tmc_enable_etr_sink,
.disable = tmc_disable_etr_sink,
.alloc_buffer = tmc_alloc_etr_buffer,
.free_buffer = tmc_free_etr_buffer,
.set_buffer = tmc_set_etr_buffer,
.reset_buffer = tmc_reset_etr_buffer,
.update_buffer = tmc_update_etr_buffer,
};
const struct coresight_ops tmc_etr_cs_ops = {
@ -300,13 +543,10 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
/*
* The trace run will continue with the same allocated trace
* buffer. As such zero-out the buffer so that we don't end
* up with stale data.
*
* Since the tracer is still enabled drvdata::buf
* can't be NULL.
* buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
* so we don't have to explicitly clear it. Also, since the
* tracer is still enabled drvdata::buf can't be NULL.
*/
memset(drvdata->buf, 0, drvdata->size);
tmc_etr_enable_hw(drvdata);
} else {
/*
@ -315,7 +555,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
*/
vaddr = drvdata->vaddr;
paddr = drvdata->paddr;
drvdata->buf = NULL;
drvdata->buf = drvdata->vaddr = NULL;
}
drvdata->reading = false;

View file

@ -371,31 +371,42 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
/**
* _coresight_build_path - recursively build a path from a @csdev to a sink.
* @csdev: The device to start from.
* @sink: The name of the sink this path should connect with.
* @path: The list to add devices to.
*
* The tree of Coresight device is traversed until an activated sink is
* found. From there the sink is added to the list along with all the
* devices that led to that point - the end result is a list from source
* to sink. In that list the source is the first device and the sink the
* last one.
* The tree of Coresight device is traversed until an activated sink or
* the one specified by @sink is found.
* From there the sink is added to the list along with all the devices that
* led to that point - the end result is a list from source to sink. In that
* list the source is the first device and the sink the last one.
*/
static int _coresight_build_path(struct coresight_device *csdev,
struct list_head *path)
const char *sink, struct list_head *path)
{
int i;
bool found = false;
struct coresight_node *node;
struct coresight_connection *conn;
/* An activated sink has been found. Enqueue the element */
if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && csdev->activated)
goto out;
/*
* First see if we are dealing with a sink. If we have one check if
* it was selected via sysFS or the perf cmd line.
*/
if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
/* Activated via perf cmd line */
if (sink && !strcmp(dev_name(&csdev->dev), sink))
goto out;
/* Activatred via sysFS */
if (csdev->activated)
goto out;
}
/* Not a sink - recursively explore each port found on this element */
for (i = 0; i < csdev->nr_outport; i++) {
conn = &csdev->conns[i];
if (_coresight_build_path(conn->child_dev, path) == 0) {
struct coresight_device *child_dev = csdev->conns[i].child_dev;
if (child_dev &&
_coresight_build_path(child_dev, sink, path) == 0) {
found = true;
break;
}
@ -422,9 +433,11 @@ out:
return 0;
}
struct list_head *coresight_build_path(struct coresight_device *csdev)
struct list_head *coresight_build_path(struct coresight_device *csdev,
const char *sink)
{
struct list_head *path;
int rc;
path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
if (!path)
@ -432,9 +445,10 @@ struct list_head *coresight_build_path(struct coresight_device *csdev)
INIT_LIST_HEAD(path);
if (_coresight_build_path(csdev, path)) {
rc = _coresight_build_path(csdev, sink, path);
if (rc) {
kfree(path);
path = NULL;
return ERR_PTR(rc);
}
return path;
@ -506,9 +520,10 @@ int coresight_enable(struct coresight_device *csdev)
if (csdev->enable)
goto out;
path = coresight_build_path(csdev);
if (!path) {
path = coresight_build_path(csdev, NULL);
if (IS_ERR(path)) {
pr_err("building path(s) failed\n");
ret = PTR_ERR(path);
goto out;
}

View file

@ -378,7 +378,7 @@ struct pmu {
/*
* Set up pmu-private data structures for an AUX area
*/
void *(*setup_aux) (int cpu, void **pages,
void *(*setup_aux) (struct perf_event *event, void **pages,
int nr_pages, bool overwrite);
/* optional */
@ -391,6 +391,14 @@ struct pmu {
* Filter events for PMU-specific reasons.
*/
int (*filter_match) (struct perf_event *event); /* optional */
/*
* Initial, PMU driver specific configuration.
*/
int (*get_drv_configs) (struct perf_event *event,
void __user *arg); /* optional */
void (*free_drv_configs) (struct perf_event *event);
/* optional */
};
/**
@ -558,6 +566,7 @@ struct perf_event {
struct irq_work pending;
atomic_t event_limit;
struct list_head drv_configs;
void (*destroy)(struct perf_event *);
struct rcu_head rcu_head;

View file

@ -395,6 +395,7 @@ struct perf_event_attr {
#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
#define PERF_EVENT_IOC_SET_DRV_CONFIGS _IOW('$', 10, char *)
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,

View file

@ -3725,6 +3725,9 @@ static void __free_event(struct perf_event *event)
if (event->destroy)
event->destroy(event);
if (event->pmu->free_drv_configs)
event->pmu->free_drv_configs(event);
if (event->ctx)
put_ctx(event->ctx);
@ -4277,6 +4280,8 @@ static int perf_event_set_output(struct perf_event *event,
struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
static int perf_event_drv_configs(struct perf_event *event,
void __user *arg);
static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
{
@ -4333,6 +4338,9 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
case PERF_EVENT_IOC_SET_BPF:
return perf_event_set_bpf_prog(event, arg);
case PERF_EVENT_IOC_SET_DRV_CONFIGS:
return perf_event_drv_configs(event, (void __user *)arg);
default:
return -ENOTTY;
}
@ -4365,6 +4373,7 @@ static long perf_compat_ioctl(struct file *file, unsigned int cmd,
switch (_IOC_NR(cmd)) {
case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
case _IOC_NR(PERF_EVENT_IOC_ID):
case _IOC_NR(PERF_EVENT_IOC_SET_DRV_CONFIGS):
/* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
cmd &= ~IOCSIZE_MASK;
@ -7264,6 +7273,15 @@ void perf_bp_event(struct perf_event *bp, void *data)
}
#endif
static int perf_event_drv_configs(struct perf_event *event,
void __user *arg)
{
if (!event->pmu->get_drv_configs)
return -EINVAL;
return event->pmu->get_drv_configs(event, arg);
}
/*
* hrtimer based swevent callback
*/
@ -8000,6 +8018,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
INIT_LIST_HEAD(&event->sibling_list);
INIT_LIST_HEAD(&event->rb_entry);
INIT_LIST_HEAD(&event->active_entry);
INIT_LIST_HEAD(&event->drv_configs);
INIT_HLIST_NODE(&event->hlist_entry);

View file

@ -562,7 +562,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
goto out;
}
rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
overwrite);
if (!rb->aux_priv)
goto out;

View file

@ -57,6 +57,7 @@ include/asm-generic/bitops/const_hweight.h
include/asm-generic/bitops/fls64.h
include/asm-generic/bitops/__fls.h
include/asm-generic/bitops/fls.h
include/linux/coresight-pmu.h
include/linux/perf_event.h
include/linux/list.h
include/linux/hash.h

View file

@ -77,6 +77,9 @@ include config/utilities.mak
# Define NO_AUXTRACE if you do not want AUX area tracing support
#
# Define NO_LIBBPF if you do not want BPF support
#
# Define NO_CSTRACE if you do not want CoreSight trace decoding support
#
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL

View file

@ -2,3 +2,5 @@ libperf-$(CONFIG_DWARF) += dwarf-regs.o
libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
libperf-$(CONFIG_AUXTRACE) += pmu.o auxtrace.o cs-etm.o

View file

@ -0,0 +1,54 @@
/*
* Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdbool.h>
#include <linux/coresight-pmu.h>
#include "../../util/auxtrace.h"
#include "../../util/evlist.h"
#include "../../util/pmu.h"
#include "cs-etm.h"
struct auxtrace_record
*auxtrace_record__init(struct perf_evlist *evlist, int *err)
{
struct perf_pmu *cs_etm_pmu;
struct perf_evsel *evsel;
bool found_etm = false;
cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
if (evlist) {
evlist__for_each(evlist, evsel) {
if (cs_etm_pmu &&
evsel->attr.type == cs_etm_pmu->type)
found_etm = true;
}
}
if (found_etm)
return cs_etm_record_init(err);
/*
* Clear 'err' even if we haven't found a cs_etm event - that way perf
* record can still be used even if tracers aren't present. The NULL
* return value will take care of telling the infrastructure HW tracing
* isn't available.
*/
*err = 0;
return NULL;
}

View file

@ -0,0 +1,563 @@
/*
* Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <api/fs/fs.h>
#include <linux/bitops.h>
#include <linux/coresight-pmu.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/types.h>
#include "cs-etm.h"
#include "../../perf.h"
#include "../../util/auxtrace.h"
#include "../../util/cpumap.h"
#include "../../util/evlist.h"
#include "../../util/pmu.h"
#include "../../util/thread_map.h"
#include "../../util/cs-etm.h"
#include <stdlib.h>
struct cs_etm_recording {
struct auxtrace_record itr;
struct perf_pmu *cs_etm_pmu;
struct perf_evlist *evlist;
bool snapshot_mode;
size_t snapshot_size;
};
static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
struct record_opts *opts,
const char *str)
{
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
unsigned long long snapshot_size = 0;
char *endptr;
if (str) {
snapshot_size = strtoull(str, &endptr, 0);
if (*endptr || snapshot_size > SIZE_MAX)
return -1;
}
opts->auxtrace_snapshot_mode = true;
opts->auxtrace_snapshot_size = snapshot_size;
ptr->snapshot_size = snapshot_size;
return 0;
}
static int cs_etm_recording_options(struct auxtrace_record *itr,
struct perf_evlist *evlist,
struct record_opts *opts)
{
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
struct perf_evsel *evsel, *cs_etm_evsel = NULL;
const struct cpu_map *cpus = evlist->cpus;
bool privileged = (geteuid() == 0 || perf_event_paranoid() < 0);
ptr->evlist = evlist;
ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
evlist__for_each(evlist, evsel) {
if (evsel->attr.type == cs_etm_pmu->type) {
if (cs_etm_evsel) {
pr_err("There may be only one %s event\n",
CORESIGHT_ETM_PMU_NAME);
return -EINVAL;
}
evsel->attr.freq = 0;
evsel->attr.sample_period = 1;
cs_etm_evsel = evsel;
opts->full_auxtrace = true;
}
}
/* no need to continue if at least one event of interest was found */
if (!cs_etm_evsel)
return 0;
if (opts->use_clockid) {
pr_err("Cannot use clockid (-k option) with %s\n",
CORESIGHT_ETM_PMU_NAME);
return -EINVAL;
}
/* we are in snapshot mode */
if (opts->auxtrace_snapshot_mode) {
/*
* No size were given to '-S' or '-m,', so go with
* the default
*/
if (!opts->auxtrace_snapshot_size &&
!opts->auxtrace_mmap_pages) {
if (privileged) {
opts->auxtrace_mmap_pages = MiB(4) / page_size;
} else {
opts->auxtrace_mmap_pages =
KiB(128) / page_size;
if (opts->mmap_pages == UINT_MAX)
opts->mmap_pages = KiB(256) / page_size;
}
} else if (!opts->auxtrace_mmap_pages && !privileged &&
opts->mmap_pages == UINT_MAX) {
opts->mmap_pages = KiB(256) / page_size;
}
/*
* '-m,xyz' was specified but no snapshot size, so make the
* snapshot size as big as the auxtrace mmap area.
*/
if (!opts->auxtrace_snapshot_size) {
opts->auxtrace_snapshot_size =
opts->auxtrace_mmap_pages * (size_t)page_size;
}
/*
* -Sxyz was specified but no auxtrace mmap area, so make the
* auxtrace mmap area big enough to fit the requested snapshot
* size.
*/
if (!opts->auxtrace_mmap_pages) {
size_t sz = opts->auxtrace_snapshot_size;
sz = round_up(sz, page_size) / page_size;
opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
}
/* Snapshost size can't be bigger than the auxtrace area */
if (opts->auxtrace_snapshot_size >
opts->auxtrace_mmap_pages * (size_t)page_size) {
pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
opts->auxtrace_snapshot_size,
opts->auxtrace_mmap_pages * (size_t)page_size);
return -EINVAL;
}
/* Something went wrong somewhere - this shouldn't happen */
if (!opts->auxtrace_snapshot_size ||
!opts->auxtrace_mmap_pages) {
pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
return -EINVAL;
}
}
/* We are in full trace mode but '-m,xyz' wasn't specified */
if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
if (privileged) {
opts->auxtrace_mmap_pages = MiB(4) / page_size;
} else {
opts->auxtrace_mmap_pages = KiB(128) / page_size;
if (opts->mmap_pages == UINT_MAX)
opts->mmap_pages = KiB(256) / page_size;
}
}
/* Validate auxtrace_mmap_pages provided by user */
if (opts->auxtrace_mmap_pages) {
unsigned int max_page = (KiB(128) / page_size);
size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
if (!privileged &&
opts->auxtrace_mmap_pages > max_page) {
opts->auxtrace_mmap_pages = max_page;
pr_err("auxtrace too big, truncating to %d\n",
max_page);
}
if (!is_power_of_2(sz)) {
pr_err("Invalid mmap size for %s: must be a power of 2\n",
CORESIGHT_ETM_PMU_NAME);
return -EINVAL;
}
}
if (opts->auxtrace_snapshot_mode)
pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME,
opts->auxtrace_snapshot_size);
if (cs_etm_evsel) {
/*
* To obtain the auxtrace buffer file descriptor, the auxtrace
* event must come first.
*/
perf_evlist__to_front(evlist, cs_etm_evsel);
/*
* In the case of per-cpu mmaps, we need the CPU on the
* AUX event.
*/
if (!cpu_map__empty(cpus))
perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
}
/* Add dummy event to keep tracking */
if (opts->full_auxtrace) {
struct perf_evsel *tracking_evsel;
int err;
err = parse_events(evlist, "dummy:u", NULL);
if (err)
return err;
tracking_evsel = perf_evlist__last(evlist);
perf_evlist__set_tracking_event(evlist, tracking_evsel);
tracking_evsel->attr.freq = 0;
tracking_evsel->attr.sample_period = 1;
/* In per-cpu case, always need the time of mmap events etc */
if (!cpu_map__empty(cpus))
perf_evsel__set_sample_bit(tracking_evsel, TIME);
}
return 0;
}
static u64 cs_etm_get_config(struct auxtrace_record *itr)
{
u64 config = 0;
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
struct perf_evlist *evlist = ptr->evlist;
struct perf_evsel *evsel;
evlist__for_each(evlist, evsel) {
if (evsel->attr.type == cs_etm_pmu->type) {
/*
* Variable perf_event_attr::config is assigned to
* ETMv3/PTM. The bit fields have been made to match
* the ETMv3.5 ETRMCR register specification. See the
* PMU_FORMAT_ATTR() declarations in
* drivers/hwtracing/coresight/coresight-perf.c for
* details.
*/
config = evsel->attr.config;
break;
}
}
return config;
}
static size_t
cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
struct perf_evlist *evlist __maybe_unused)
{
int i;
int etmv3 = 0, etmv4 = 0;
const struct cpu_map *cpus = evlist->cpus;
/* cpu map is not empty, we have specific CPUs to work with */
if (!cpu_map__empty(cpus)) {
for (i = 0; i < cpu_map__nr(cpus); i++) {
if (cs_etm_is_etmv4(itr, cpus->map[i]))
etmv4++;
else
etmv3++;
}
} else {
/* get configuration for all CPUs in the system */
for (i = 0; i < cpu__max_cpu(); i++) {
if (cs_etm_is_etmv4(itr, i))
etmv4++;
else
etmv3++;
}
}
return (CS_ETM_HEADER_SIZE +
(etmv4 * CS_ETMV4_PRIV_SIZE) +
(etmv3 * CS_ETMV3_PRIV_SIZE));
}
static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
[CS_ETM_ETMCCER] = "mgmt/etmccer",
[CS_ETM_ETMIDR] = "mgmt/etmidr",
};
static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
[CS_ETMV4_TRCIDR0] = "trcidr/trcidr0",
[CS_ETMV4_TRCIDR1] = "trcidr/trcidr1",
[CS_ETMV4_TRCIDR2] = "trcidr/trcidr2",
[CS_ETMV4_TRCIDR8] = "trcidr/trcidr8",
[CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus",
};
static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu)
{
bool ret = false;
char path[PATH_MAX];
int scan;
unsigned int val;
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
/* Take any of the RO files for ETMv4 and see if it present */
snprintf(path, PATH_MAX, "cpu%d/%s",
cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
/* The file was read successfully, we have a winner */
if (scan == 1)
ret = true;
return ret;
}
static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path)
{
char pmu_path[PATH_MAX];
int scan;
unsigned int val = 0;
/* Get RO metadata from sysfs */
snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val);
if (scan != 1)
pr_err("%s: error reading: %s\n", __func__, pmu_path);
return val;
}
static void cs_etm_get_metadata(int cpu, u32 *offset,
struct auxtrace_record *itr,
struct auxtrace_info_event *info)
{
u32 increment;
u64 magic;
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
/* first see what kind of tracer this cpu is affined to */
if (cs_etm_is_etmv4(itr, cpu)) {
magic = __perf_cs_etmv4_magic;
/* Get trace configuration register */
info->priv[*offset + CS_ETMV4_TRCCONFIGR] =
cs_etm_get_config(itr);
/* Get traceID from the framework */
info->priv[*offset + CS_ETMV4_TRCTRACEIDR] =
coresight_get_trace_id(cpu);
/* Get read-only information from sysFS */
info->priv[*offset + CS_ETMV4_TRCIDR0] =
cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
info->priv[*offset + CS_ETMV4_TRCIDR1] =
cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_etmv4_ro[CS_ETMV4_TRCIDR1]);
info->priv[*offset + CS_ETMV4_TRCIDR2] =
cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
info->priv[*offset + CS_ETMV4_TRCIDR8] =
cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_etmv4_ro[CS_ETMV4_TRCIDR8]);
info->priv[*offset + CS_ETMV4_TRCAUTHSTATUS] =
cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_etmv4_ro
[CS_ETMV4_TRCAUTHSTATUS]);
/* How much space was used */
increment = CS_ETMV4_PRIV_MAX;
} else {
magic = __perf_cs_etmv3_magic;
/* Get configuration register */
info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr);
/* Get traceID from the framework */
info->priv[*offset + CS_ETM_ETMTRACEIDR] =
coresight_get_trace_id(cpu);
/* Get read-only information from sysFS */
info->priv[*offset + CS_ETM_ETMCCER] =
cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_etmv3_ro[CS_ETM_ETMCCER]);
info->priv[*offset + CS_ETM_ETMIDR] =
cs_etm_get_ro(cs_etm_pmu, cpu,
metadata_etmv3_ro[CS_ETM_ETMIDR]);
/* How much space was used */
increment = CS_ETM_PRIV_MAX;
}
/* Build generic header portion */
info->priv[*offset + CS_ETM_MAGIC] = magic;
info->priv[*offset + CS_ETM_CPU] = cpu;
/* Where the next CPU entry should start from */
*offset += increment;
}
static int cs_etm_info_fill(struct auxtrace_record *itr,
struct perf_session *session,
struct auxtrace_info_event *info,
size_t priv_size)
{
int i;
u32 offset;
u64 nr_cpu, type;
const struct cpu_map *cpus = session->evlist->cpus;
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
if (priv_size != cs_etm_info_priv_size(itr, session->evlist))
return -EINVAL;
if (!session->evlist->nr_mmaps)
return -EINVAL;
/* If the cpu_map is empty all CPUs are involved */
nr_cpu = cpu_map__empty(cpus) ? cpu__max_cpu() : cpu_map__nr(cpus);
/* Get PMU type as dynamically assigned by the core */
type = cs_etm_pmu->type;
/* First fill out the session header */
info->type = PERF_AUXTRACE_CS_ETM;
info->priv[CS_HEADER_VERSION_0] = 0;
info->priv[CS_PMU_TYPE_CPUS] = type << 32;
info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu;
info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode;
offset = CS_ETM_SNAPSHOT + 1;
/* cpu map is not empty, we have specific CPUs to work with */
if (!cpu_map__empty(cpus)) {
for (i = 0; i < cpu_map__nr(cpus) && offset < priv_size; i++)
cs_etm_get_metadata(cpus->map[i], &offset, itr, info);
} else {
/* get configuration for all CPUs in the system */
for (i = 0; i < cpu__max_cpu(); i++)
cs_etm_get_metadata(i, &offset, itr, info);
}
return 0;
}
static int cs_etm_find_snapshot(struct auxtrace_record *itr __maybe_unused,
int idx, struct auxtrace_mmap *mm,
unsigned char *data __maybe_unused,
u64 *head, u64 *old)
{
pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
__func__, idx, (size_t)*old, (size_t)*head, mm->len);
*old = *head;
*head += mm->len;
return 0;
}
static int cs_etm_snapshot_start(struct auxtrace_record *itr)
{
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_evsel *evsel;
evlist__for_each(ptr->evlist, evsel) {
if (evsel->attr.type == ptr->cs_etm_pmu->type)
return perf_evsel__disable(evsel);
}
return -EINVAL;
}
static int cs_etm_snapshot_finish(struct auxtrace_record *itr)
{
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_evsel *evsel;
evlist__for_each(ptr->evlist, evsel) {
int nthreads = thread_map__nr(evsel->threads);
int ncpus = cpu_map__nr(evsel->cpus);
if (evsel->attr.type == ptr->cs_etm_pmu->type) {
return perf_evsel__enable(evsel, ncpus, nthreads);
}
}
return -EINVAL;
}
static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused)
{
return (((u64) rand() << 0) & 0x00000000FFFFFFFFull) |
(((u64) rand() << 32) & 0xFFFFFFFF00000000ull);
}
static void cs_etm_recording_free(struct auxtrace_record *itr)
{
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
free(ptr);
}
static int cs_etm_read_finish(struct auxtrace_record *itr, int idx)
{
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_evsel *evsel;
evlist__for_each(ptr->evlist, evsel) {
if (evsel->attr.type == ptr->cs_etm_pmu->type)
return perf_evlist__enable_event_idx(ptr->evlist,
evsel, idx);
}
return -EINVAL;
}
struct auxtrace_record *cs_etm_record_init(int *err)
{
struct perf_pmu *cs_etm_pmu;
struct cs_etm_recording *ptr;
cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
if (!cs_etm_pmu) {
*err = -EINVAL;
goto out;
}
ptr = zalloc(sizeof(struct cs_etm_recording));
if (!ptr) {
*err = -ENOMEM;
goto out;
}
ptr->cs_etm_pmu = cs_etm_pmu;
ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options;
ptr->itr.recording_options = cs_etm_recording_options;
ptr->itr.info_priv_size = cs_etm_info_priv_size;
ptr->itr.info_fill = cs_etm_info_fill;
ptr->itr.find_snapshot = cs_etm_find_snapshot;
ptr->itr.snapshot_start = cs_etm_snapshot_start;
ptr->itr.snapshot_finish = cs_etm_snapshot_finish;
ptr->itr.reference = cs_etm_reference;
ptr->itr.free = cs_etm_recording_free;
ptr->itr.read_finish = cs_etm_read_finish;
*err = 0;
return &ptr->itr;
out:
return NULL;
}

View file

@ -0,0 +1,23 @@
/*
* Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef INCLUDE__PERF_CS_ETM_H__
#define INCLUDE__PERF_CS_ETM_H__
struct auxtrace_record *cs_etm_record_init(int *err);
#endif

View file

@ -0,0 +1,34 @@
/*
* Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <string.h>
#include <linux/coresight-pmu.h>
#include <linux/perf_event.h>
#include "../../util/pmu.h"
struct perf_event_attr
*perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
{
#ifdef HAVE_AUXTRACE_SUPPORT
if (!strcmp(pmu->name, CORESIGHT_ETM_PMU_NAME)) {
/* add ETM default config here */
pmu->selectable = true;
}
#endif
return NULL;
}

View file

@ -1,2 +1,6 @@
libperf-$(CONFIG_DWARF) += dwarf-regs.o
libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
libperf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
../../arm/util/auxtrace.o \
../../arm/util/cs-etm.o

View file

@ -276,6 +276,7 @@ static int record__open(struct record *rec)
struct perf_evlist *evlist = rec->evlist;
struct perf_session *session = rec->session;
struct record_opts *opts = &rec->opts;
struct perf_evsel_config_term *err_term;
int rc = 0;
perf_evlist__config(evlist, opts);
@ -305,6 +306,14 @@ try_again:
goto out;
}
if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
error("failed to set config \"%s\" on event %s with %d (%s)\n",
err_term->val.drv_cfg, perf_evsel__name(pos), errno,
strerror_r(errno, msg, sizeof(msg)));
rc = -1;
goto out;
}
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
opts->auxtrace_mmap_pages,
opts->auxtrace_snapshot_mode) < 0) {

View file

@ -92,7 +92,8 @@ static struct {
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
PERF_OUTPUT_EVNAME | PERF_OUTPUT_ADDR |
PERF_OUTPUT_IP |
PERF_OUTPUT_SYM | PERF_OUTPUT_DSO |
PERF_OUTPUT_PERIOD,

View file

@ -433,6 +433,24 @@ endif
grep-libs = $(filter -l%,$(1))
strip-libs = $(filter-out -l%,$(1))
ifdef CSTRACE_PATH
ifeq (${IS_64_BIT}, 1)
CSTRACE_LNX = linux64
else
CSTRACE_LNX = linux
endif
ifeq (${DEBUG}, 1)
LIBCSTRACE = -lcstraced_c_api -lcstraced
CSTRACE_LIB_PATH = $(CSTRACE_PATH)/lib/$(CSTRACE_LNX)/dbg
else
LIBCSTRACE = -lcstraced_c_api -lcstraced
CSTRACE_LIB_PATH = $(CSTRACE_PATH)/lib/$(CSTRACE_LNX)/rel
endif
$(call detected,CSTRACE)
$(call detected_var,CSTRACE_PATH)
EXTLIBS += -L$(CSTRACE_LIB_PATH) $(LIBCSTRACE) -lstdc++
endif
ifdef NO_LIBPERL
CFLAGS += -DNO_LIBPERL
else
@ -647,9 +665,14 @@ ifdef LIBBABELTRACE
endif
ifndef NO_AUXTRACE
ifeq ($(feature-get_cpuid), 0)
msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc);
NO_AUXTRACE := 1
ifeq ($(ARCH),x86)
ifeq ($(feature-get_cpuid), 0)
msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc);
NO_AUXTRACE := 1
else
$(call detected,CONFIG_AUXTRACE)
CFLAGS += -DHAVE_AUXTRACE_SUPPORT
endif
else
$(call detected,CONFIG_AUXTRACE)
CFLAGS += -DHAVE_AUXTRACE_SUPPORT

View file

@ -0,0 +1,124 @@
# perf script event handlers, generated by perf script -g python
# Licensed under the terms of the GNU GPL License version 2
# The common_* event handler fields are the most useful fields common to
# all events. They don't necessarily correspond to the 'common_*' fields
# in the format files. Those fields not available as handler params can
# be retrieved using Python functions of the form common_*(context).
# See the perf-trace-python Documentation for the list of available functions.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from subprocess import *
from Core import *
import re;
from optparse import OptionParser
#
# Add options to specify vmlinux file and the objdump executable
#
parser = OptionParser()
parser.add_option("-k", "--vmlinux", dest="vmlinux_name",
help="path to vmlinux file")
parser.add_option("-d", "--objdump", dest="objdump_name",
help="name of objdump executable (in path)")
(options, args) = parser.parse_args()
if (options.objdump_name == None):
sys.exit("No objdump executable specified - use -d or --objdump option")
# initialize global dicts and regular expression
build_ids = dict();
mmaps = dict();
disasm_cache = dict();
disasm_re = re.compile("^\s*([0-9a-fA-F]+):")
cache_size = 16*1024
def trace_begin():
cmd_output = check_output(["perf", "buildid-list"]).split('\n');
bid_re = re.compile("([a-fA-f0-9]+)[ \t]([^ \n]+)")
for line in cmd_output:
m = bid_re.search(line)
if (m != None) :
build_ids[m.group(2)] = \
os.environ['PERF_BUILDID_DIR'] + \
m.group(2) + "/" + m.group(1);
if ((options.vmlinux_name != None) and ("[kernel.kallsyms]" in build_ids)):
build_ids['[kernel.kallsyms]'] = options.vmlinux_name;
else:
del build_ids['[kernel.kallsyms]']
mmap_re = re.compile("PERF_RECORD_MMAP2 -?[0-9]+/[0-9]+: \[(0x[0-9a-fA-F]+).*:\s.*\s(.*.so)")
cmd_output= check_output("perf script --show-mmap-events | fgrep PERF_RECORD_MMAP2",shell=True).split('\n')
for line in cmd_output:
m = mmap_re.search(line)
if (m != None) :
mmaps[m.group(2)] = int(m.group(1),0)
def trace_end():
pass
def process_event(t):
global cache_size
global options
sample = t['sample']
dso = t['dso']
# don't let the cache get too big, but don't bother with a fancy replacement policy
# just clear it when it hits max size
if (len(disasm_cache) > cache_size):
disasm_cache.clear();
cpu = format(sample['cpu'], "d");
addr_range = format(sample['ip'],"x") + ":" + format(sample['addr'],"x");
try:
disasm_output = disasm_cache[addr_range];
except:
try:
fname = build_ids[dso];
except KeyError:
if (dso == '[kernel.kallsyms]'):
return;
fname = dso;
if (dso in mmaps):
offset = mmaps[dso];
disasm = [options.objdump_name,"-d","-z", "--adjust-vma="+format(offset,"#x"),"--start-address="+format(sample['ip'],"#x"),"--stop-address="+format(sample['addr'],"#x"), fname]
else:
offset = 0
disasm = [options.objdump_name,"-d","-z", "--start-address="+format(sample['ip'],"#x"),"--stop-address="+format(sample['addr'],"#x"),fname]
disasm_output = check_output(disasm).split('\n')
disasm_cache[addr_range] = disasm_output;
print "FILE: %s\tCPU: %s" % (dso, cpu);
for line in disasm_output:
m = disasm_re.search(line)
if (m != None) :
try:
print "\t",line
except:
exit(1);
else:
continue;
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "print_header"
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),

View file

@ -0,0 +1,44 @@
#
# Copyright(C) 2016 Linaro Limited. All rights reserved.
# Author: Tor Jeremiassen <tor.jeremiassen@linaro.org>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
def trace_begin():
pass;
def trace_end():
pass
def process_event(t):
sample = t['sample']
print "range:",format(sample['ip'],"x"),"-",format(sample['addr'],"x")
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "print_header"
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),

View file

@ -84,6 +84,8 @@ libperf-$(CONFIG_AUXTRACE) += auxtrace.o
libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
libperf-$(CONFIG_AUXTRACE) += intel-pt.o
libperf-$(CONFIG_AUXTRACE) += intel-bts.o
libperf-$(CONFIG_AUXTRACE) += cs-etm.o
libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder/
libperf-y += parse-branch-options.o
libperf-y += parse-regs-options.o

View file

@ -49,6 +49,7 @@
#include "intel-pt.h"
#include "intel-bts.h"
#include "cs-etm.h"
int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
struct auxtrace_mmap_params *mp,
@ -892,6 +893,8 @@ int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
return intel_pt_process_auxtrace_info(event, session);
case PERF_AUXTRACE_INTEL_BTS:
return intel_bts_process_auxtrace_info(event, session);
case PERF_AUXTRACE_CS_ETM:
return cs_etm__process_auxtrace_info(event, session);
case PERF_AUXTRACE_UNKNOWN:
default:
return -EINVAL;

View file

@ -41,6 +41,7 @@ enum auxtrace_type {
PERF_AUXTRACE_UNKNOWN,
PERF_AUXTRACE_INTEL_PT,
PERF_AUXTRACE_INTEL_BTS,
PERF_AUXTRACE_CS_ETM,
};
enum itrace_period_type {

View file

@ -145,7 +145,7 @@ static int asnprintf(char **strp, size_t size, const char *fmt, ...)
return ret;
}
static char *build_id__filename(const char *sbuild_id, char *bf, size_t size)
char *build_id__filename(const char *sbuild_id, char *bf, size_t size)
{
char *tmp = bf;
int ret = asnprintf(&bf, size, "%s/.build-id/%.2s/%s", buildid_dir,

View file

@ -11,6 +11,7 @@
extern struct perf_tool build_id__mark_dso_hit_ops;
struct dso;
char *build_id__filename(const char *sbuild_id, char *bf, size_t size);
int build_id__sprintf(const u8 *build_id, int len, char *bf);
int sysfs__sprintf_build_id(const char *root_dir, char *sbuild_id);
int filename__sprintf_build_id(const char *pathname, char *sbuild_id);

View file

@ -0,0 +1,7 @@
ifeq ($(CSTRACE_PATH),)
libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder-stub.o
else
CFLAGS_cs-etm-decoder.o += -I$(CSTRACE_PATH)/include
libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder.o
endif

View file

@ -0,0 +1,91 @@
/*
*
* Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Tor Jeremiassen <tor.jeremiassen@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
* Public License for more details.
*
* You should have received a copy of the GNU GEneral Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdlib.h>
#include "cs-etm-decoder.h"
#include "../util.h"
struct cs_etm_decoder
{
void *state;
int dummy;
};
int cs_etm_decoder__flush(struct cs_etm_decoder *decoder)
{
(void) decoder;
return -1;
}
int cs_etm_decoder__add_bin_file(struct cs_etm_decoder *decoder, uint64_t offset, uint64_t address, uint64_t len, const char *fname)
{
(void) decoder;
(void) offset;
(void) address;
(void) len;
(void) fname;
return -1;
}
const struct cs_etm_state *cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
uint64_t indx,
const uint8_t *buf,
size_t len,
size_t *consumed)
{
(void) decoder;
(void) indx;
(void) buf;
(void) len;
(void) consumed;
return NULL;
}
int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder, uint64_t address, uint64_t len, cs_etm_mem_cb_type cb_func)
{
(void) decoder;
(void) address;
(void) len;
(void) cb_func;
return -1;
}
int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder,
struct cs_etm_packet *packet)
{
(void) decoder;
(void) packet;
return -1;
}
struct cs_etm_decoder *cs_etm_decoder__new(uint32_t num_cpu, struct cs_etm_decoder_params *d_params, struct cs_etm_trace_params t_params[])
{
(void) num_cpu;
(void) d_params;
(void) t_params;
return NULL;
}
void cs_etm_decoder__free(struct cs_etm_decoder *decoder)
{
(void) decoder;
return;
}

View file

@ -0,0 +1,503 @@
/*
*
* Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Tor Jeremiassen <tor.jeremiassen@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
* Public License for more details.
*
* You should have received a copy of the GNU GEneral Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/err.h>
#include <stdlib.h>
#include "../cs-etm.h"
#include "cs-etm-decoder.h"
#include "../util.h"
#include "../util/intlist.h"
#include "c_api/opencsd_c_api.h"
#include "ocsd_if_types.h"
#include "etmv4/trc_pkt_types_etmv4.h"
#define MAX_BUFFER 1024
struct cs_etm_decoder
{
struct cs_etm_state state;
dcd_tree_handle_t dcd_tree;
void (*packet_printer)(const char *);
cs_etm_mem_cb_type mem_access;
ocsd_datapath_resp_t prev_return;
size_t prev_processed;
bool trace_on;
bool discontinuity;
struct cs_etm_packet packet_buffer[MAX_BUFFER];
uint32_t packet_count;
uint32_t head;
uint32_t tail;
uint32_t end_tail;
};
static uint32_t cs_etm_decoder__mem_access(const void *context,
const ocsd_vaddr_t address,
const ocsd_mem_space_acc_t mem_space,
const uint32_t req_size,
uint8_t *buffer)
{
struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
(void) mem_space;
return decoder->mem_access(decoder->state.data,address,req_size,buffer);
}
static int cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params,
ocsd_etmv4_cfg *config)
{
config->reg_configr = params->reg_configr;
config->reg_traceidr = params->reg_traceidr;
config->reg_idr0 = params->reg_idr0;
config->reg_idr1 = params->reg_idr1;
config->reg_idr2 = params->reg_idr2;
config->reg_idr8 = params->reg_idr8;
config->reg_idr9 = 0;
config->reg_idr10 = 0;
config->reg_idr11 = 0;
config->reg_idr12 = 0;
config->reg_idr13 = 0;
config->arch_ver = ARCH_V8;
config->core_prof = profile_CortexA;
return 0;
}
static int cs_etm_decoder__flush_packet(struct cs_etm_decoder *decoder)
{
int err = 0;
if (decoder == NULL) return -1;
if (decoder->packet_count >= 31) return -1;
if (decoder->tail != decoder->end_tail) {
decoder->tail = (decoder->tail + 1) & (MAX_BUFFER - 1);
decoder->packet_count++;
}
return err;
}
int cs_etm_decoder__flush(struct cs_etm_decoder *decoder)
{
return cs_etm_decoder__flush_packet(decoder);
}
static int cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id,
enum cs_etm_sample_type sample_type)
{
int err = 0;
uint32_t et = 0;
struct int_node *inode = NULL;
if (decoder == NULL) return -1;
if (decoder->packet_count >= 31) return -1;
err = cs_etm_decoder__flush_packet(decoder);
if (err) return err;
et = decoder->end_tail;
/* Search the RB tree for the cpu associated with this traceID */
inode = intlist__find(traceid_list, trace_chan_id);
if (!inode)
return PTR_ERR(inode);
decoder->packet_buffer[et].sample_type = sample_type;
decoder->packet_buffer[et].start_addr = elem->st_addr;
decoder->packet_buffer[et].end_addr = elem->en_addr;
decoder->packet_buffer[et].exc = false;
decoder->packet_buffer[et].exc_ret = false;
decoder->packet_buffer[et].cpu = *((int*)inode->priv);
et = (et + 1) & (MAX_BUFFER - 1);
decoder->end_tail = et;
return err;
}
static int cs_etm_decoder__mark_exception(struct cs_etm_decoder *decoder)
{
int err = 0;
if (decoder == NULL) return -1;
decoder->packet_buffer[decoder->end_tail].exc = true;
return err;
}
static int cs_etm_decoder__mark_exception_return(struct cs_etm_decoder *decoder)
{
int err = 0;
if (decoder == NULL) return -1;
decoder->packet_buffer[decoder->end_tail].exc_ret = true;
return err;
}
static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
const void *context,
const ocsd_trc_index_t indx,
const uint8_t trace_chan_id,
const ocsd_generic_trace_elem *elem)
{
ocsd_datapath_resp_t resp = OCSD_RESP_CONT;
struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
(void) indx;
(void) trace_chan_id;
switch (elem->elem_type) {
case OCSD_GEN_TRC_ELEM_UNKNOWN:
break;
case OCSD_GEN_TRC_ELEM_NO_SYNC:
decoder->trace_on = false;
break;
case OCSD_GEN_TRC_ELEM_TRACE_ON:
decoder->trace_on = true;
break;
//case OCSD_GEN_TRC_ELEM_TRACE_OVERFLOW:
//decoder->trace_on = false;
//decoder->discontinuity = true;
//break;
case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
cs_etm_decoder__buffer_packet(decoder,elem,
trace_chan_id, CS_ETM_RANGE);
resp = OCSD_RESP_WAIT;
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION:
cs_etm_decoder__mark_exception(decoder);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
cs_etm_decoder__mark_exception_return(decoder);
break;
case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
case OCSD_GEN_TRC_ELEM_EO_TRACE:
case OCSD_GEN_TRC_ELEM_ADDR_NACC:
case OCSD_GEN_TRC_ELEM_TIMESTAMP:
case OCSD_GEN_TRC_ELEM_CYCLE_COUNT:
//case OCSD_GEN_TRC_ELEM_TS_WITH_CC:
case OCSD_GEN_TRC_ELEM_EVENT:
default:
break;
}
decoder->state.err = 0;
return resp;
}
static ocsd_datapath_resp_t cs_etm_decoder__etmv4i_packet_printer(
const void *context,
const ocsd_datapath_op_t op,
const ocsd_trc_index_t indx,
const ocsd_etmv4_i_pkt *pkt)
{
const size_t PACKET_STR_LEN = 1024;
ocsd_datapath_resp_t ret = OCSD_RESP_CONT;
char packet_str[PACKET_STR_LEN];
size_t offset;
struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
sprintf(packet_str,"%ld: ", (long int) indx);
offset = strlen(packet_str);
switch(op) {
case OCSD_OP_DATA:
if (ocsd_pkt_str(OCSD_PROTOCOL_ETMV4I,
(void *)pkt,
packet_str+offset,
PACKET_STR_LEN-offset) != OCSD_OK)
ret = OCSD_RESP_FATAL_INVALID_PARAM;
break;
case OCSD_OP_EOT:
sprintf(packet_str,"**** END OF TRACE ****\n");
break;
case OCSD_OP_FLUSH:
case OCSD_OP_RESET:
default:
break;
}
decoder->packet_printer(packet_str);
return ret;
}
static int cs_etm_decoder__create_etmv4i_packet_printer(struct cs_etm_decoder_params *d_params, struct cs_etm_trace_params *t_params,
struct cs_etm_decoder *decoder)
{
ocsd_etmv4_cfg trace_config;
int ret = 0;
if (d_params->packet_printer == NULL)
return -1;
ret = cs_etm_decoder__gen_etmv4_config(t_params,&trace_config);
if (ret != 0)
return -1;
decoder->packet_printer = d_params->packet_printer;
ret = ocsd_dt_create_etmv4i_pkt_proc(decoder->dcd_tree,
&trace_config,
cs_etm_decoder__etmv4i_packet_printer,
decoder);
return ret;
}
static int cs_etm_decoder__create_etmv4i_packet_decoder(struct cs_etm_decoder_params *d_params, struct cs_etm_trace_params *t_params,
struct cs_etm_decoder *decoder)
{
ocsd_etmv4_cfg trace_config;
int ret = 0;
decoder->packet_printer = d_params->packet_printer;
ret = cs_etm_decoder__gen_etmv4_config(t_params,&trace_config);
if (ret != 0)
return -1;
ret = ocsd_dt_create_etmv4i_decoder(decoder->dcd_tree,&trace_config);
if (ret != OCSD_OK)
return -1;
ret = ocsd_dt_set_gen_elem_outfn(decoder->dcd_tree,
cs_etm_decoder__gen_trace_elem_printer, decoder);
return ret;
}
int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder, uint64_t address, uint64_t len, cs_etm_mem_cb_type cb_func)
{
int err;
decoder->mem_access = cb_func;
err = ocsd_dt_add_callback_mem_acc(decoder->dcd_tree,
address,
address+len-1,
OCSD_MEM_SPACE_ANY,
cs_etm_decoder__mem_access,
decoder);
return err;
}
int cs_etm_decoder__add_bin_file(struct cs_etm_decoder *decoder, uint64_t offset, uint64_t address, uint64_t len, const char *fname)
{
int err = 0;
file_mem_region_t region;
(void) len;
if (NULL == decoder)
return -1;
if (NULL == decoder->dcd_tree)
return -1;
region.file_offset = offset;
region.start_address = address;
region.region_size = len;
err = ocsd_dt_add_binfile_region_mem_acc(decoder->dcd_tree,
&region,
1,
OCSD_MEM_SPACE_ANY,
fname);
return err;
}
const struct cs_etm_state *cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
uint64_t indx,
const uint8_t *buf,
size_t len,
size_t *consumed)
{
int ret = 0;
ocsd_datapath_resp_t dp_ret = decoder->prev_return;
size_t processed = 0;
if (decoder->packet_count > 0) {
decoder->state.err = ret;
*consumed = processed;
return &(decoder->state);
}
while ((processed < len) && (0 == ret)) {
if (OCSD_DATA_RESP_IS_CONT(dp_ret)) {
uint32_t count;
dp_ret = ocsd_dt_process_data(decoder->dcd_tree,
OCSD_OP_DATA,
indx+processed,
len - processed,
&buf[processed],
&count);
processed += count;
} else if (OCSD_DATA_RESP_IS_WAIT(dp_ret)) {
dp_ret = ocsd_dt_process_data(decoder->dcd_tree,
OCSD_OP_FLUSH,
0,
0,
NULL,
NULL);
break;
} else {
ret = -1;
}
}
if (OCSD_DATA_RESP_IS_WAIT(dp_ret)) {
if (OCSD_DATA_RESP_IS_CONT(decoder->prev_return)) {
decoder->prev_processed = processed;
}
processed = 0;
} else if (OCSD_DATA_RESP_IS_WAIT(decoder->prev_return)) {
processed = decoder->prev_processed;
decoder->prev_processed = 0;
}
*consumed = processed;
decoder->prev_return = dp_ret;
decoder->state.err = ret;
return &(decoder->state);
}
int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder,
struct cs_etm_packet *packet)
{
if (decoder->packet_count == 0) return -1;
if (packet == NULL) return -1;
*packet = decoder->packet_buffer[decoder->head];
decoder->head = (decoder->head + 1) & (MAX_BUFFER - 1);
decoder->packet_count--;
return 0;
}
static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder)
{
unsigned i;
decoder->head = 0;
decoder->tail = 0;
decoder->end_tail = 0;
decoder->packet_count = 0;
for (i = 0; i < MAX_BUFFER; i++) {
decoder->packet_buffer[i].start_addr = 0xdeadbeefdeadbeefUL;
decoder->packet_buffer[i].end_addr = 0xdeadbeefdeadbeefUL;
decoder->packet_buffer[i].exc = false;
decoder->packet_buffer[i].exc_ret = false;
decoder->packet_buffer[i].cpu = INT_MIN;
}
}
struct cs_etm_decoder *cs_etm_decoder__new(uint32_t num_cpu, struct cs_etm_decoder_params *d_params, struct cs_etm_trace_params t_params[])
{
struct cs_etm_decoder *decoder;
ocsd_dcd_tree_src_t format;
uint32_t flags;
int ret;
size_t i;
if ((t_params == NULL) || (d_params == 0)) {
return NULL;
}
decoder = zalloc(sizeof(struct cs_etm_decoder));
if (decoder == NULL) {
return NULL;
}
decoder->state.data = d_params->data;
decoder->prev_return = OCSD_RESP_CONT;
cs_etm_decoder__clear_buffer(decoder);
format = (d_params->formatted ? OCSD_TRC_SRC_FRAME_FORMATTED :
OCSD_TRC_SRC_SINGLE);
flags = 0;
flags |= (d_params->fsyncs ? OCSD_DFRMTR_HAS_FSYNCS : 0);
flags |= (d_params->hsyncs ? OCSD_DFRMTR_HAS_HSYNCS : 0);
flags |= (d_params->frame_aligned ? OCSD_DFRMTR_FRAME_MEM_ALIGN : 0);
/* Create decode tree for the data source */
decoder->dcd_tree = ocsd_create_dcd_tree(format,flags);
if (decoder->dcd_tree == 0) {
goto err_free_decoder;
}
for (i = 0; i < num_cpu; ++i) {
switch (t_params[i].protocol)
{
case CS_ETM_PROTO_ETMV4i:
if (d_params->operation == CS_ETM_OPERATION_PRINT) {
ret = cs_etm_decoder__create_etmv4i_packet_printer(d_params,&t_params[i],decoder);
} else if (d_params->operation == CS_ETM_OPERATION_DECODE) {
ret = cs_etm_decoder__create_etmv4i_packet_decoder(d_params,&t_params[i],decoder);
} else {
ret = -CS_ETM_ERR_PARAM;
}
if (ret != 0) {
goto err_free_decoder_tree;
}
break;
default:
goto err_free_decoder_tree;
break;
}
}
return decoder;
err_free_decoder_tree:
ocsd_destroy_dcd_tree(decoder->dcd_tree);
err_free_decoder:
free(decoder);
return NULL;
}
void cs_etm_decoder__free(struct cs_etm_decoder *decoder)
{
if (decoder == NULL) return;
ocsd_destroy_dcd_tree(decoder->dcd_tree);
decoder->dcd_tree = NULL;
free(decoder);
}

View file

@ -0,0 +1,118 @@
/*
* Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Tor Jeremiassen <tor.jeremiassen@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
* Public License for more details.
*
* You should have received a copy of the GNU GEneral Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef INCLUDE__CS_ETM_DECODER_H__
#define INCLUDE__CS_ETM_DECODER_H__
#include <linux/types.h>
#include <stdio.h>
struct cs_etm_decoder;
struct cs_etm_buffer {
const unsigned char *buf;
size_t len;
uint64_t offset;
//bool consecutive;
uint64_t ref_timestamp;
//uint64_t trace_nr;
};
enum cs_etm_sample_type {
CS_ETM_RANGE = 1 << 0,
};
struct cs_etm_state {
int err;
void *data;
unsigned isa;
uint64_t start;
uint64_t end;
uint64_t timestamp;
};
struct cs_etm_packet {
enum cs_etm_sample_type sample_type;
uint64_t start_addr;
uint64_t end_addr;
bool exc;
bool exc_ret;
int cpu;
};
struct cs_etm_queue;
typedef uint32_t (*cs_etm_mem_cb_type)(struct cs_etm_queue *, uint64_t, size_t, uint8_t *);
struct cs_etm_trace_params {
void *etmv4i_packet_handler;
uint32_t reg_idr0;
uint32_t reg_idr1;
uint32_t reg_idr2;
uint32_t reg_idr8;
uint32_t reg_configr;
uint32_t reg_traceidr;
int protocol;
};
struct cs_etm_decoder_params {
int operation;
void (*packet_printer)(const char *);
cs_etm_mem_cb_type mem_acc_cb;
bool formatted;
bool fsyncs;
bool hsyncs;
bool frame_aligned;
void *data;
};
enum {
CS_ETM_PROTO_ETMV3 = 1,
CS_ETM_PROTO_ETMV4i,
CS_ETM_PROTO_ETMV4d,
};
enum {
CS_ETM_OPERATION_PRINT = 1,
CS_ETM_OPERATION_DECODE,
};
enum {
CS_ETM_ERR_NOMEM = 1,
CS_ETM_ERR_NODATA,
CS_ETM_ERR_PARAM,
};
struct cs_etm_decoder *cs_etm_decoder__new(uint32_t num_cpu, struct cs_etm_decoder_params *,struct cs_etm_trace_params []);
int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *, uint64_t, uint64_t, cs_etm_mem_cb_type);
int cs_etm_decoder__flush(struct cs_etm_decoder *);
void cs_etm_decoder__free(struct cs_etm_decoder *);
int cs_etm_decoder__get_packet(struct cs_etm_decoder *, struct cs_etm_packet *);
int cs_etm_decoder__add_bin_file(struct cs_etm_decoder *, uint64_t, uint64_t, uint64_t, const char *);
const struct cs_etm_state *cs_etm_decoder__process_data_block(struct cs_etm_decoder *,
uint64_t,
const uint8_t *,
size_t,
size_t *);
#endif /* INCLUDE__CS_ETM_DECODER_H__ */

1533
tools/perf/util/cs-etm.c Normal file

File diff suppressed because it is too large Load diff

84
tools/perf/util/cs-etm.h Normal file
View file

@ -0,0 +1,84 @@
/*
* Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef INCLUDE__UTIL_PERF_CS_ETM_H__
#define INCLUDE__UTIL_PERF_CS_ETM_H__
#include "util/event.h"
#include "util/intlist.h"
#include "util/session.h"
/* Versionning header in case things need tro change in the future. That way
* decoding of old snapshot is still possible.
*/
enum {
/* Starting with 0x0 */
CS_HEADER_VERSION_0,
/* PMU->type (32 bit), total # of CPUs (32 bit) */
CS_PMU_TYPE_CPUS,
CS_ETM_SNAPSHOT,
CS_HEADER_VERSION_0_MAX,
};
/* Beginning of header common to both ETMv3 and V4 */
enum {
CS_ETM_MAGIC,
CS_ETM_CPU,
};
/* ETMv3/PTM metadata */
enum {
/* Dynamic, configurable parameters */
CS_ETM_ETMCR = CS_ETM_CPU + 1,
CS_ETM_ETMTRACEIDR,
/* RO, taken from sysFS */
CS_ETM_ETMCCER,
CS_ETM_ETMIDR,
CS_ETM_PRIV_MAX,
};
/* ETMv4 metadata */
enum {
/* Dynamic, configurable parameters */
CS_ETMV4_TRCCONFIGR = CS_ETM_CPU + 1,
CS_ETMV4_TRCTRACEIDR,
/* RO, taken from sysFS */
CS_ETMV4_TRCIDR0,
CS_ETMV4_TRCIDR1,
CS_ETMV4_TRCIDR2,
CS_ETMV4_TRCIDR8,
CS_ETMV4_TRCAUTHSTATUS,
CS_ETMV4_PRIV_MAX,
};
/* RB tree for quick conversion between traceID and CPUs */
struct intlist *traceid_list;
#define KiB(x) ((x) * 1024)
#define MiB(x) ((x) * 1024 * 1024)
#define CS_ETM_HEADER_SIZE (CS_HEADER_VERSION_0_MAX * sizeof(u64))
static const u64 __perf_cs_etmv3_magic = 0x3030303030303030ULL;
static const u64 __perf_cs_etmv4_magic = 0x4040404040404040ULL;
#define CS_ETMV3_PRIV_SIZE (CS_ETM_PRIV_MAX * sizeof(u64))
#define CS_ETMV4_PRIV_SIZE (CS_ETMV4_PRIV_MAX * sizeof(u64))
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session);
#endif

View file

@ -1247,6 +1247,30 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **e
return err;
}
int perf_evlist__apply_drv_configs(struct perf_evlist *evlist,
struct perf_evsel **err_evsel,
struct perf_evsel_config_term **err_term)
{
struct perf_evsel *evsel;
int err = 0;
const int ncpus = cpu_map__nr(evlist->cpus),
nthreads = thread_map__nr(evlist->threads);
evlist__for_each(evlist, evsel) {
if (list_empty(&evsel->drv_config_terms))
continue;
err = perf_evsel__apply_drv_configs(evsel, ncpus,
nthreads, err_term);
if (err) {
*err_evsel = evsel;
break;
}
}
return err;
}
int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
{
struct perf_evsel *evsel;

View file

@ -163,6 +163,9 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
struct thread_map *threads);
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel);
int perf_evlist__apply_drv_configs(struct perf_evlist *evlist,
struct perf_evsel **err_evsel,
struct perf_evsel_config_term **term);
void __perf_evlist__set_leader(struct list_head *list);
void perf_evlist__set_leader(struct perf_evlist *evlist);

View file

@ -211,6 +211,7 @@ void perf_evsel__init(struct perf_evsel *evsel,
evsel->bpf_fd = -1;
INIT_LIST_HEAD(&evsel->node);
INIT_LIST_HEAD(&evsel->config_terms);
INIT_LIST_HEAD(&evsel->drv_config_terms);
perf_evsel__object.init(evsel);
evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
perf_evsel__calc_id_pos(evsel);
@ -981,6 +982,27 @@ int perf_evsel__append_filter(struct perf_evsel *evsel,
return -1;
}
int perf_evsel__apply_drv_configs(struct perf_evsel *evsel,
int ncpus, int nthreads,
struct perf_evsel_config_term **err_term)
{
int err = 0;
struct perf_evsel_config_term *term;
list_for_each_entry(term, &evsel->drv_config_terms, list) {
err = perf_evsel__run_ioctl(evsel, ncpus, nthreads,
PERF_EVENT_IOC_SET_DRV_CONFIGS,
(void *)term->val.drv_cfg);
if (err) {
*err_term = term;
break;
}
}
return err;
}
int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
{
return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
@ -1043,6 +1065,16 @@ static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
}
}
static void perf_evsel__free_drv_config_terms(struct perf_evsel *evsel)
{
struct perf_evsel_config_term *term, *h;
list_for_each_entry_safe(term, h, &evsel->drv_config_terms, list) {
list_del(&term->list);
free(term);
}
}
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
int cpu, thread;
@ -1064,6 +1096,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
perf_evsel__free_fd(evsel);
perf_evsel__free_id(evsel);
perf_evsel__free_config_terms(evsel);
perf_evsel__free_drv_config_terms(evsel);
close_cgroup(evsel->cgrp);
cpu_map__put(evsel->cpus);
cpu_map__put(evsel->own_cpus);

View file

@ -44,6 +44,7 @@ enum {
PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
PERF_EVSEL__CONFIG_TERM_STACK_USER,
PERF_EVSEL__CONFIG_TERM_INHERIT,
PERF_EVSEL__CONFIG_TERM_DRV_CFG,
PERF_EVSEL__CONFIG_TERM_MAX,
};
@ -55,6 +56,7 @@ struct perf_evsel_config_term {
u64 freq;
bool time;
char *callgraph;
char *drv_cfg;
u64 stack_user;
bool inherit;
} val;
@ -75,6 +77,7 @@ struct perf_evsel_config_term {
* PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
* is used there is an id sample appended to non-sample events
* @priv: And what is in its containing unnamed union are tool specific
* @drv_config_terms: List of configurables sent directly to the PMU driver
*/
struct perf_evsel {
struct list_head node;
@ -123,6 +126,7 @@ struct perf_evsel {
char *group_name;
bool cmdline_group_boundary;
struct list_head config_terms;
struct list_head drv_config_terms;
int bpf_fd;
};
@ -227,6 +231,9 @@ int perf_evsel__append_filter(struct perf_evsel *evsel,
const char *op, const char *filter);
int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
const char *filter);
int perf_evsel__apply_drv_configs(struct perf_evsel *evsel,
int ncpus, int nthreads,
struct perf_evsel_config_term **err_term);
int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__disable(struct perf_evsel *evsel);

View file

@ -1,3 +1,4 @@
#include "build-id.h"
#include "callchain.h"
#include "debug.h"
#include "event.h"
@ -685,8 +686,16 @@ static struct dso *machine__get_kernel(struct machine *machine)
DSO_TYPE_GUEST_KERNEL);
}
if (kernel != NULL && (!kernel->has_build_id))
dso__read_running_kernel_build_id(kernel, machine);
if (kernel != NULL && (!kernel->has_build_id)) {
if (symbol_conf.vmlinux_name != NULL) {
filename__read_build_id(symbol_conf.vmlinux_name,
kernel->build_id,
sizeof(kernel->build_id));
kernel->has_build_id = 1;
} else {
dso__read_running_kernel_build_id(kernel, machine);
}
}
return kernel;
}
@ -700,8 +709,19 @@ static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
{
if (machine__is_default_guest(machine))
scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
else
scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
else {
if (symbol_conf.vmlinux_name != 0) {
unsigned char build_id[BUILD_ID_SIZE];
char build_id_hex[SBUILD_ID_SIZE];
filename__read_build_id(symbol_conf.vmlinux_name,
build_id,
sizeof(build_id));
build_id__sprintf(build_id,sizeof(build_id), build_id_hex);
build_id__filename((char *)build_id_hex,buf,bufsz);
} else {
scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
}
}
}
const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
@ -710,7 +730,7 @@ const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
* Returns the name of the start symbol in *symbol_name. Pass in NULL as
* symbol_name if it's not that important.
*/
static u64 machine__get_running_kernel_start(struct machine *machine,
static u64 machine__get_kallsyms_kernel_start(struct machine *machine,
const char **symbol_name)
{
char filename[PATH_MAX];
@ -738,7 +758,7 @@ static u64 machine__get_running_kernel_start(struct machine *machine,
int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
{
enum map_type type;
u64 start = machine__get_running_kernel_start(machine, NULL);
u64 start = machine__get_kallsyms_kernel_start(machine, NULL);
for (type = 0; type < MAP__NR_TYPES; ++type) {
struct kmap *kmap;
@ -1083,7 +1103,8 @@ int machine__create_kernel_maps(struct machine *machine)
{
struct dso *kernel = machine__get_kernel(machine);
const char *name;
u64 addr = machine__get_running_kernel_start(machine, &name);
u64 addr = machine__get_kallsyms_kernel_start(machine, &name);
if (!addr)
return -1;

View file

@ -285,7 +285,8 @@ static struct perf_evsel *
__add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr,
char *name, struct cpu_map *cpus,
struct list_head *config_terms)
struct list_head *config_terms,
struct list_head *drv_config_terms)
{
struct perf_evsel *evsel;
@ -304,6 +305,9 @@ __add_event(struct list_head *list, int *idx,
if (config_terms)
list_splice(config_terms, &evsel->config_terms);
if (drv_config_terms)
list_splice(drv_config_terms, &evsel->drv_config_terms);
list_add_tail(&evsel->node, list);
return evsel;
}
@ -312,7 +316,8 @@ static int add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr, char *name,
struct list_head *config_terms)
{
return __add_event(list, idx, attr, name, NULL, config_terms) ? 0 : -ENOMEM;
return __add_event(list, idx, attr, name,
NULL, config_terms, NULL) ? 0 : -ENOMEM;
}
static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
@ -823,7 +828,8 @@ static int config_term_pmu(struct perf_event_attr *attr,
struct parse_events_term *term,
struct parse_events_error *err)
{
if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER)
if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG)
/*
* Always succeed for sysfs terms, as we dont know
* at this point what type they need to have.
@ -869,10 +875,7 @@ static int config_attr(struct perf_event_attr *attr,
return 0;
}
static int get_config_terms(struct list_head *head_config,
struct list_head *head_terms __maybe_unused)
{
#define ADD_CONFIG_TERM(__type, __name, __val) \
#define ADD_CONFIG_TERM(__type, __name, __val, __head_terms) \
do { \
struct perf_evsel_config_term *__t; \
\
@ -883,33 +886,43 @@ do { \
INIT_LIST_HEAD(&__t->list); \
__t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \
__t->val.__name = __val; \
list_add_tail(&__t->list, head_terms); \
list_add_tail(&__t->list, __head_terms); \
} while (0)
static int get_config_terms(struct list_head *head_config,
struct list_head *head_terms __maybe_unused)
{
struct parse_events_term *term;
list_for_each_entry(term, head_config, list) {
switch (term->type_term) {
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
ADD_CONFIG_TERM(PERIOD, period, term->val.num);
ADD_CONFIG_TERM(PERIOD, period,
term->val.num, head_terms);
break;
case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
ADD_CONFIG_TERM(FREQ, freq, term->val.num);
ADD_CONFIG_TERM(FREQ, freq,
term->val.num, head_terms);
break;
case PARSE_EVENTS__TERM_TYPE_TIME:
ADD_CONFIG_TERM(TIME, time, term->val.num);
ADD_CONFIG_TERM(TIME, time,
term->val.num, head_terms);
break;
case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
ADD_CONFIG_TERM(CALLGRAPH, callgraph, term->val.str);
ADD_CONFIG_TERM(CALLGRAPH, callgraph,
term->val.str, head_terms);
break;
case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
ADD_CONFIG_TERM(STACK_USER, stack_user, term->val.num);
ADD_CONFIG_TERM(STACK_USER, stack_user,
term->val.num, head_terms);
break;
case PARSE_EVENTS__TERM_TYPE_INHERIT:
ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 1 : 0);
ADD_CONFIG_TERM(INHERIT, inherit,
term->val.num ? 1 : 0, head_terms);
break;
case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 0 : 1);
ADD_CONFIG_TERM(INHERIT, inherit,
term->val.num ? 0 : 1, head_terms);
break;
default:
break;
@ -919,6 +932,21 @@ do { \
return 0;
}
static int get_drv_config_terms(struct list_head *head_config,
struct list_head *head_terms)
{
struct parse_events_term *term;
list_for_each_entry(term, head_config, list) {
if (term->type_term != PARSE_EVENTS__TERM_TYPE_DRV_CFG)
continue;
ADD_CONFIG_TERM(DRV_CFG, drv_cfg, term->val.str, head_terms);
}
return 0;
}
int parse_events_add_tracepoint(struct list_head *list, int *idx,
char *sys, char *event,
struct parse_events_error *err,
@ -989,6 +1017,7 @@ int parse_events_add_pmu(struct parse_events_evlist *data,
struct perf_pmu *pmu;
struct perf_evsel *evsel;
LIST_HEAD(config_terms);
LIST_HEAD(drv_config_terms);
pmu = perf_pmu__find(name);
if (!pmu)
@ -1003,7 +1032,8 @@ int parse_events_add_pmu(struct parse_events_evlist *data,
if (!head_config) {
attr.type = pmu->type;
evsel = __add_event(list, &data->idx, &attr, NULL, pmu->cpus, NULL);
evsel = __add_event(list, &data->idx, &attr,
NULL, pmu->cpus, NULL, NULL);
return evsel ? 0 : -ENOMEM;
}
@ -1020,12 +1050,15 @@ int parse_events_add_pmu(struct parse_events_evlist *data,
if (get_config_terms(head_config, &config_terms))
return -ENOMEM;
if (get_drv_config_terms(head_config, &drv_config_terms))
return -ENOMEM;
if (perf_pmu__config(pmu, &attr, head_config, data->error))
return -EINVAL;
evsel = __add_event(list, &data->idx, &attr,
pmu_event_name(head_config), pmu->cpus,
&config_terms);
&config_terms, &drv_config_terms);
if (evsel) {
evsel->unit = info.unit;
evsel->scale = info.scale;

View file

@ -68,7 +68,8 @@ enum {
PARSE_EVENTS__TERM_TYPE_CALLGRAPH,
PARSE_EVENTS__TERM_TYPE_STACKSIZE,
PARSE_EVENTS__TERM_TYPE_NOINHERIT,
PARSE_EVENTS__TERM_TYPE_INHERIT
PARSE_EVENTS__TERM_TYPE_INHERIT,
PARSE_EVENTS__TERM_TYPE_DRV_CFG,
};
struct parse_events_term {

View file

@ -53,6 +53,16 @@ static int str(yyscan_t scanner, int token)
return token;
}
static int drv_str(yyscan_t scanner, int token)
{
YYSTYPE *yylval = parse_events_get_lval(scanner);
char *text = parse_events_get_text(scanner);
/* Strip off the '@' */
yylval->str = strdup(text + 1);
return token;
}
#define REWIND(__alloc) \
do { \
YYSTYPE *__yylval = parse_events_get_lval(yyscanner); \
@ -123,6 +133,7 @@ num_hex 0x[a-fA-F0-9]+
num_raw_hex [a-fA-F0-9]+
name [a-zA-Z_*?][a-zA-Z0-9_*?.]*
name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.]*
drv_cfg_term [a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)?
/* If you add a modifier you need to update check_modifier() */
modifier_event [ukhpPGHSDI]+
modifier_bp [rwx]{1,3}
@ -196,6 +207,7 @@ no-inherit { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOINHERIT); }
, { return ','; }
"/" { BEGIN(INITIAL); return '/'; }
{name_minus} { return str(yyscanner, PE_NAME); }
@{drv_cfg_term} { return drv_str(yyscanner, PE_DRV_CFG_TERM); }
}
<mem>{

View file

@ -48,6 +48,7 @@ static inc_group_count(struct list_head *list,
%token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP
%token PE_ERROR
%token PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT
%token PE_DRV_CFG_TERM
%type <num> PE_VALUE
%type <num> PE_VALUE_SYM_HW
%type <num> PE_VALUE_SYM_SW
@ -62,6 +63,7 @@ static inc_group_count(struct list_head *list,
%type <str> PE_MODIFIER_BP
%type <str> PE_EVENT_NAME
%type <str> PE_PMU_EVENT_PRE PE_PMU_EVENT_SUF PE_KERNEL_PMU_EVENT
%type <str> PE_DRV_CFG_TERM
%type <num> value_sym
%type <head> event_config
%type <term> event_term
@ -573,6 +575,15 @@ PE_TERM
ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1, &@1, NULL));
$$ = term;
}
|
PE_DRV_CFG_TERM
{
struct parse_events_term *term;
ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_DRV_CFG,
$1, $1, &@1, NULL));
$$ = term;
}
sep_dc: ':' |

View file

@ -806,6 +806,8 @@ static void python_process_general_event(struct perf_sample *sample,
PyInt_FromLong(sample->cpu));
pydict_set_item_string_decref(dict_sample, "ip",
PyLong_FromUnsignedLongLong(sample->ip));
pydict_set_item_string_decref(dict_sample, "addr",
PyLong_FromUnsignedLongLong(sample->addr));
pydict_set_item_string_decref(dict_sample, "time",
PyLong_FromUnsignedLongLong(sample->time));
pydict_set_item_string_decref(dict_sample, "period",

View file

@ -344,7 +344,7 @@ int dso__load_sym(struct dso *dso, struct map *map __maybe_unused,
if (ret >= 0)
dso->is_64_bit = ret;
if (filename__read_build_id(ss->name, build_id, BUILD_ID_SIZE) > 0) {
if ((!dso->has_build_id) && (filename__read_build_id(ss->name, build_id, BUILD_ID_SIZE) > 0)) {
dso__set_build_id(dso, build_id);
}
return 0;

View file

@ -1465,7 +1465,8 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
* Read the build id if possible. This is required for
* DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
*/
if (filename__read_build_id(dso->name, build_id, BUILD_ID_SIZE) > 0)
if ((!dso->has_build_id) &&
(filename__read_build_id(dso->name, build_id, BUILD_ID_SIZE) > 0))
dso__set_build_id(dso, build_id);
/*