perf_counter: move PERF_RECORD_TIME

Move PERF_RECORD_TIME so that all the fixed length items come before
the variable length ones.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090408130409.307926436@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra 2009-04-08 15:01:32 +02:00 committed by Ingo Molnar
parent de9ac07bbf
commit 4d855457d8
2 changed files with 17 additions and 18 deletions

View file

@ -100,9 +100,9 @@ enum sw_event_ids {
enum perf_counter_record_format { enum perf_counter_record_format {
PERF_RECORD_IP = 1U << 0, PERF_RECORD_IP = 1U << 0,
PERF_RECORD_TID = 1U << 1, PERF_RECORD_TID = 1U << 1,
PERF_RECORD_GROUP = 1U << 2, PERF_RECORD_TIME = 1U << 2,
PERF_RECORD_CALLCHAIN = 1U << 3, PERF_RECORD_GROUP = 1U << 3,
PERF_RECORD_TIME = 1U << 4, PERF_RECORD_CALLCHAIN = 1U << 4,
}; };
/* /*
@ -250,6 +250,7 @@ enum perf_event_type {
* *
* { u64 ip; } && PERF_RECORD_IP * { u64 ip; } && PERF_RECORD_IP
* { u32 pid, tid; } && PERF_RECORD_TID * { u32 pid, tid; } && PERF_RECORD_TID
* { u64 time; } && PERF_RECORD_TIME
* *
* { u64 nr; * { u64 nr;
* { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP
@ -259,8 +260,6 @@ enum perf_event_type {
* kernel, * kernel,
* user; * user;
* u64 ips[nr]; } && PERF_RECORD_CALLCHAIN * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN
*
* { u64 time; } && PERF_RECORD_TIME
* }; * };
*/ */
}; };

View file

@ -1850,6 +1850,16 @@ static void perf_counter_output(struct perf_counter *counter,
header.size += sizeof(tid_entry); header.size += sizeof(tid_entry);
} }
if (record_type & PERF_RECORD_TIME) {
/*
* Maybe do better on x86 and provide cpu_clock_nmi()
*/
time = sched_clock();
header.type |= PERF_RECORD_TIME;
header.size += sizeof(u64);
}
if (record_type & PERF_RECORD_GROUP) { if (record_type & PERF_RECORD_GROUP) {
header.type |= PERF_RECORD_GROUP; header.type |= PERF_RECORD_GROUP;
header.size += sizeof(u64) + header.size += sizeof(u64) +
@ -1867,16 +1877,6 @@ static void perf_counter_output(struct perf_counter *counter,
} }
} }
if (record_type & PERF_RECORD_TIME) {
/*
* Maybe do better on x86 and provide cpu_clock_nmi()
*/
time = sched_clock();
header.type |= PERF_RECORD_TIME;
header.size += sizeof(u64);
}
ret = perf_output_begin(&handle, counter, header.size, nmi, 1); ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
if (ret) if (ret)
return; return;
@ -1889,6 +1889,9 @@ static void perf_counter_output(struct perf_counter *counter,
if (record_type & PERF_RECORD_TID) if (record_type & PERF_RECORD_TID)
perf_output_put(&handle, tid_entry); perf_output_put(&handle, tid_entry);
if (record_type & PERF_RECORD_TIME)
perf_output_put(&handle, time);
if (record_type & PERF_RECORD_GROUP) { if (record_type & PERF_RECORD_GROUP) {
struct perf_counter *leader, *sub; struct perf_counter *leader, *sub;
u64 nr = counter->nr_siblings; u64 nr = counter->nr_siblings;
@ -1910,9 +1913,6 @@ static void perf_counter_output(struct perf_counter *counter,
if (callchain) if (callchain)
perf_output_copy(&handle, callchain, callchain_size); perf_output_copy(&handle, callchain, callchain_size);
if (record_type & PERF_RECORD_TIME)
perf_output_put(&handle, time);
perf_output_end(&handle); perf_output_end(&handle);
} }