android_kernel_oneplus_msm8998/include/trace/events/sched.h
Patrick Bellasi f0ba6a5d0c FIXUP: sched: fix build for non-SMP target
Currently the build for a single-core (e.g. user-mode) Linux is broken
and this configuration is required (at least) to run some network tests.

The main issues for the current code support on single-core systems are:
1. {se,rq}::sched_avg is not available nor maintained for !SMP systems
   This means that load and utilisation signals are NOT available in single
   core systems. All the EAS code depends on these signals.
2. sched_group_energy is also SMP dependant. Again this means that all the
   EAS setup and preparation code (energyn model initialization) has to be
   properly guarded/disabled for !SMP systems.
3. SchedFreq depends on utilization signal, which is not available on
   !SMP systems.
4. SchedTune is useless on unicore systems if SchedFreq is not available.
5. WALT machinery is not required on single-core systems.

This patch addresses all these issues by enforcing some constraints for
single-core systems:
a) WALT, SchedTune and SchedTune are now dependant on SMP
b) The default governor for !SMP systems is INTERACTIVE
c) The energy model initialisation/build functions are
d) Other minor code re-arrangements and CONFIG_SMP guarding to enable
   single core builds.

Signed-off-by: Patrick Bellasi <patrick.bellasi@arm.com>
2016-09-14 14:59:32 +05:30

945 lines
22 KiB
C

#undef TRACE_SYSTEM
#define TRACE_SYSTEM sched
#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SCHED_H
#include <linux/sched.h>
#include <linux/tracepoint.h>
#include <linux/binfmts.h>
/*
* Tracepoint for calling kthread_stop, performed to end a kthread:
*/
TRACE_EVENT(sched_kthread_stop,
TP_PROTO(struct task_struct *t),
TP_ARGS(t),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
),
TP_fast_assign(
memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
__entry->pid = t->pid;
),
TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
);
/*
* Tracepoint for the return value of the kthread stopping:
*/
TRACE_EVENT(sched_kthread_stop_ret,
TP_PROTO(int ret),
TP_ARGS(ret),
TP_STRUCT__entry(
__field( int, ret )
),
TP_fast_assign(
__entry->ret = ret;
),
TP_printk("ret=%d", __entry->ret)
);
/*
* Tracepoint for waking up a task:
*/
DECLARE_EVENT_CLASS(sched_wakeup_template,
TP_PROTO(struct task_struct *p),
TP_ARGS(__perf_task(p)),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
__field( int, success )
__field( int, target_cpu )
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
__entry->success = 1; /* rudiment, kill when possible */
__entry->target_cpu = task_cpu(p);
),
TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
__entry->comm, __entry->pid, __entry->prio,
__entry->target_cpu)
);
/*
* Tracepoint called when waking a task; this tracepoint is guaranteed to be
* called from the waking context.
*/
DEFINE_EVENT(sched_wakeup_template, sched_waking,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
/*
* Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
* It it not always called from the waking context.
*/
DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
/*
* Tracepoint for waking up a new task:
*/
DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
#ifdef CREATE_TRACE_POINTS
static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
{
#ifdef CONFIG_SCHED_DEBUG
BUG_ON(p != current);
#endif /* CONFIG_SCHED_DEBUG */
/*
* Preemption ignores task state, therefore preempted tasks are always
* RUNNING (we will not have dequeued if state != RUNNING).
*/
return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
}
#endif /* CREATE_TRACE_POINTS */
/*
* Tracepoint for task switches, performed by the scheduler:
*/
TRACE_EVENT(sched_switch,
TP_PROTO(bool preempt,
struct task_struct *prev,
struct task_struct *next),
TP_ARGS(preempt, prev, next),
TP_STRUCT__entry(
__array( char, prev_comm, TASK_COMM_LEN )
__field( pid_t, prev_pid )
__field( int, prev_prio )
__field( long, prev_state )
__array( char, next_comm, TASK_COMM_LEN )
__field( pid_t, next_pid )
__field( int, next_prio )
),
TP_fast_assign(
memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
__entry->prev_pid = prev->pid;
__entry->prev_prio = prev->prio;
__entry->prev_state = __trace_sched_switch_state(preempt, prev);
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
__entry->next_prio = next->prio;
),
TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
__entry->prev_state & (TASK_STATE_MAX-1) ?
__print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
{ 16, "Z" }, { 32, "X" }, { 64, "x" },
{ 128, "K" }, { 256, "W" }, { 512, "P" },
{ 1024, "N" }) : "R",
__entry->prev_state & TASK_STATE_MAX ? "+" : "",
__entry->next_comm, __entry->next_pid, __entry->next_prio)
);
/*
* Tracepoint for a task being migrated:
*/
TRACE_EVENT(sched_migrate_task,
TP_PROTO(struct task_struct *p, int dest_cpu),
TP_ARGS(p, dest_cpu),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
__field( int, orig_cpu )
__field( int, dest_cpu )
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
__entry->orig_cpu = task_cpu(p);
__entry->dest_cpu = dest_cpu;
),
TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
__entry->comm, __entry->pid, __entry->prio,
__entry->orig_cpu, __entry->dest_cpu)
);
/*
* Tracepoint for a CPU going offline/online:
*/
TRACE_EVENT(sched_cpu_hotplug,
TP_PROTO(int affected_cpu, int error, int status),
TP_ARGS(affected_cpu, error, status),
TP_STRUCT__entry(
__field( int, affected_cpu )
__field( int, error )
__field( int, status )
),
TP_fast_assign(
__entry->affected_cpu = affected_cpu;
__entry->error = error;
__entry->status = status;
),
TP_printk("cpu %d %s error=%d", __entry->affected_cpu,
__entry->status ? "online" : "offline", __entry->error)
);
DECLARE_EVENT_CLASS(sched_process_template,
TP_PROTO(struct task_struct *p),
TP_ARGS(p),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
),
TP_printk("comm=%s pid=%d prio=%d",
__entry->comm, __entry->pid, __entry->prio)
);
/*
* Tracepoint for freeing a task:
*/
DEFINE_EVENT(sched_process_template, sched_process_free,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
/*
* Tracepoint for a task exiting:
*/
DEFINE_EVENT(sched_process_template, sched_process_exit,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
/*
* Tracepoint for waiting on task to unschedule:
*/
DEFINE_EVENT(sched_process_template, sched_wait_task,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
/*
* Tracepoint for a waiting task:
*/
TRACE_EVENT(sched_process_wait,
TP_PROTO(struct pid *pid),
TP_ARGS(pid),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
),
TP_fast_assign(
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
__entry->pid = pid_nr(pid);
__entry->prio = current->prio;
),
TP_printk("comm=%s pid=%d prio=%d",
__entry->comm, __entry->pid, __entry->prio)
);
/*
* Tracepoint for do_fork:
*/
TRACE_EVENT(sched_process_fork,
TP_PROTO(struct task_struct *parent, struct task_struct *child),
TP_ARGS(parent, child),
TP_STRUCT__entry(
__array( char, parent_comm, TASK_COMM_LEN )
__field( pid_t, parent_pid )
__array( char, child_comm, TASK_COMM_LEN )
__field( pid_t, child_pid )
),
TP_fast_assign(
memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
__entry->parent_pid = parent->pid;
memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
__entry->child_pid = child->pid;
),
TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
__entry->parent_comm, __entry->parent_pid,
__entry->child_comm, __entry->child_pid)
);
/*
* Tracepoint for exec:
*/
TRACE_EVENT(sched_process_exec,
TP_PROTO(struct task_struct *p, pid_t old_pid,
struct linux_binprm *bprm),
TP_ARGS(p, old_pid, bprm),
TP_STRUCT__entry(
__string( filename, bprm->filename )
__field( pid_t, pid )
__field( pid_t, old_pid )
),
TP_fast_assign(
__assign_str(filename, bprm->filename);
__entry->pid = p->pid;
__entry->old_pid = old_pid;
),
TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
__entry->pid, __entry->old_pid)
);
/*
* XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
* adding sched_stat support to SCHED_FIFO/RR would be welcome.
*/
DECLARE_EVENT_CLASS(sched_stat_template,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(__perf_task(tsk), __perf_count(delay)),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, delay )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->delay = delay;
),
TP_printk("comm=%s pid=%d delay=%Lu [ns]",
__entry->comm, __entry->pid,
(unsigned long long)__entry->delay)
);
/*
* Tracepoint for accounting wait time (time the task is runnable
* but not actually running due to scheduler contention).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_wait,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
/*
* Tracepoint for accounting sleep time (time the task is not runnable,
* including iowait, see below).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
/*
* Tracepoint for accounting iowait time (time the task is not runnable
* due to waiting on IO to complete).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
/*
* Tracepoint for accounting blocked time (time the task is in uninterruptible).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
/*
* Tracepoint for recording the cause of uninterruptible sleep.
*/
TRACE_EVENT(sched_blocked_reason,
TP_PROTO(struct task_struct *tsk),
TP_ARGS(tsk),
TP_STRUCT__entry(
__field( pid_t, pid )
__field( void*, caller )
__field( bool, io_wait )
),
TP_fast_assign(
__entry->pid = tsk->pid;
__entry->caller = (void*)get_wchan(tsk);
__entry->io_wait = tsk->in_iowait;
),
TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
);
/*
* Tracepoint for accounting runtime (time the task is executing
* on a CPU).
*/
DECLARE_EVENT_CLASS(sched_stat_runtime,
TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
TP_ARGS(tsk, __perf_count(runtime), vruntime),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, runtime )
__field( u64, vruntime )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->runtime = runtime;
__entry->vruntime = vruntime;
),
TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
__entry->comm, __entry->pid,
(unsigned long long)__entry->runtime,
(unsigned long long)__entry->vruntime)
);
DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
TP_ARGS(tsk, runtime, vruntime));
/*
* Tracepoint for showing priority inheritance modifying a tasks
* priority.
*/
TRACE_EVENT(sched_pi_setprio,
TP_PROTO(struct task_struct *tsk, int newprio),
TP_ARGS(tsk, newprio),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, oldprio )
__field( int, newprio )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->oldprio = tsk->prio;
__entry->newprio = newprio;
),
TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
__entry->comm, __entry->pid,
__entry->oldprio, __entry->newprio)
);
#ifdef CONFIG_DETECT_HUNG_TASK
TRACE_EVENT(sched_process_hang,
TP_PROTO(struct task_struct *tsk),
TP_ARGS(tsk),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
),
TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
);
#endif /* CONFIG_DETECT_HUNG_TASK */
DECLARE_EVENT_CLASS(sched_move_task_template,
TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
TP_ARGS(tsk, src_cpu, dst_cpu),
TP_STRUCT__entry(
__field( pid_t, pid )
__field( pid_t, tgid )
__field( pid_t, ngid )
__field( int, src_cpu )
__field( int, src_nid )
__field( int, dst_cpu )
__field( int, dst_nid )
),
TP_fast_assign(
__entry->pid = task_pid_nr(tsk);
__entry->tgid = task_tgid_nr(tsk);
__entry->ngid = task_numa_group_id(tsk);
__entry->src_cpu = src_cpu;
__entry->src_nid = cpu_to_node(src_cpu);
__entry->dst_cpu = dst_cpu;
__entry->dst_nid = cpu_to_node(dst_cpu);
),
TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
__entry->pid, __entry->tgid, __entry->ngid,
__entry->src_cpu, __entry->src_nid,
__entry->dst_cpu, __entry->dst_nid)
);
/*
* Tracks migration of tasks from one runqueue to another. Can be used to
* detect if automatic NUMA balancing is bouncing between nodes
*/
DEFINE_EVENT(sched_move_task_template, sched_move_numa,
TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
TP_ARGS(tsk, src_cpu, dst_cpu)
);
DEFINE_EVENT(sched_move_task_template, sched_stick_numa,
TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
TP_ARGS(tsk, src_cpu, dst_cpu)
);
TRACE_EVENT(sched_swap_numa,
TP_PROTO(struct task_struct *src_tsk, int src_cpu,
struct task_struct *dst_tsk, int dst_cpu),
TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
TP_STRUCT__entry(
__field( pid_t, src_pid )
__field( pid_t, src_tgid )
__field( pid_t, src_ngid )
__field( int, src_cpu )
__field( int, src_nid )
__field( pid_t, dst_pid )
__field( pid_t, dst_tgid )
__field( pid_t, dst_ngid )
__field( int, dst_cpu )
__field( int, dst_nid )
),
TP_fast_assign(
__entry->src_pid = task_pid_nr(src_tsk);
__entry->src_tgid = task_tgid_nr(src_tsk);
__entry->src_ngid = task_numa_group_id(src_tsk);
__entry->src_cpu = src_cpu;
__entry->src_nid = cpu_to_node(src_cpu);
__entry->dst_pid = task_pid_nr(dst_tsk);
__entry->dst_tgid = task_tgid_nr(dst_tsk);
__entry->dst_ngid = task_numa_group_id(dst_tsk);
__entry->dst_cpu = dst_cpu;
__entry->dst_nid = cpu_to_node(dst_cpu);
),
TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
__entry->src_pid, __entry->src_tgid, __entry->src_ngid,
__entry->src_cpu, __entry->src_nid,
__entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
__entry->dst_cpu, __entry->dst_nid)
);
/*
* Tracepoint for waking a polling cpu without an IPI.
*/
TRACE_EVENT(sched_wake_idle_without_ipi,
TP_PROTO(int cpu),
TP_ARGS(cpu),
TP_STRUCT__entry(
__field( int, cpu )
),
TP_fast_assign(
__entry->cpu = cpu;
),
TP_printk("cpu=%d", __entry->cpu)
);
TRACE_EVENT(sched_contrib_scale_f,
TP_PROTO(int cpu, unsigned long freq_scale_factor,
unsigned long cpu_scale_factor),
TP_ARGS(cpu, freq_scale_factor, cpu_scale_factor),
TP_STRUCT__entry(
__field(int, cpu)
__field(unsigned long, freq_scale_factor)
__field(unsigned long, cpu_scale_factor)
),
TP_fast_assign(
__entry->cpu = cpu;
__entry->freq_scale_factor = freq_scale_factor;
__entry->cpu_scale_factor = cpu_scale_factor;
),
TP_printk("cpu=%d freq_scale_factor=%lu cpu_scale_factor=%lu",
__entry->cpu, __entry->freq_scale_factor,
__entry->cpu_scale_factor)
);
#ifdef CONFIG_SMP
/*
* Tracepoint for accounting sched averages for tasks.
*/
TRACE_EVENT(sched_load_avg_task,
TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
TP_ARGS(tsk, avg),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, cpu )
__field( unsigned long, load_avg )
__field( unsigned long, util_avg )
__field( u64, load_sum )
__field( u32, util_sum )
__field( u32, period_contrib )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->cpu = task_cpu(tsk);
__entry->load_avg = avg->load_avg;
__entry->util_avg = avg->util_avg;
__entry->load_sum = avg->load_sum;
__entry->util_sum = avg->util_sum;
__entry->period_contrib = avg->period_contrib;
),
TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu load_sum=%llu"
" util_sum=%u period_contrib=%u",
__entry->comm,
__entry->pid,
__entry->cpu,
__entry->load_avg,
__entry->util_avg,
(u64)__entry->load_sum,
(u32)__entry->util_sum,
(u32)__entry->period_contrib)
);
/*
* Tracepoint for accounting sched averages for cpus.
*/
TRACE_EVENT(sched_load_avg_cpu,
TP_PROTO(int cpu, struct cfs_rq *cfs_rq),
TP_ARGS(cpu, cfs_rq),
TP_STRUCT__entry(
__field( int, cpu )
__field( unsigned long, load_avg )
__field( unsigned long, util_avg )
),
TP_fast_assign(
__entry->cpu = cpu;
__entry->load_avg = cfs_rq->avg.load_avg;
__entry->util_avg = cfs_rq->avg.util_avg;
),
TP_printk("cpu=%d load_avg=%lu util_avg=%lu",
__entry->cpu, __entry->load_avg, __entry->util_avg)
);
/*
* Tracepoint for sched_tune_config settings
*/
TRACE_EVENT(sched_tune_config,
TP_PROTO(int boost),
TP_ARGS(boost),
TP_STRUCT__entry(
__field( int, boost )
),
TP_fast_assign(
__entry->boost = boost;
),
TP_printk("boost=%d ", __entry->boost)
);
/*
* Tracepoint for accounting CPU boosted utilization
*/
TRACE_EVENT(sched_boost_cpu,
TP_PROTO(int cpu, unsigned long util, unsigned long margin),
TP_ARGS(cpu, util, margin),
TP_STRUCT__entry(
__field( int, cpu )
__field( unsigned long, util )
__field( unsigned long, margin )
),
TP_fast_assign(
__entry->cpu = cpu;
__entry->util = util;
__entry->margin = margin;
),
TP_printk("cpu=%d util=%lu margin=%lu",
__entry->cpu,
__entry->util,
__entry->margin)
);
/*
* Tracepoint for schedtune_tasks_update
*/
TRACE_EVENT(sched_tune_tasks_update,
TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx,
unsigned int boost, unsigned int max_boost),
TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, cpu )
__field( int, tasks )
__field( int, idx )
__field( unsigned int, boost )
__field( unsigned int, max_boost )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->cpu = cpu;
__entry->tasks = tasks;
__entry->idx = idx;
__entry->boost = boost;
__entry->max_boost = max_boost;
),
TP_printk("pid=%d comm=%s "
"cpu=%d tasks=%d idx=%d boost=%u max_boost=%u",
__entry->pid, __entry->comm,
__entry->cpu, __entry->tasks, __entry->idx,
__entry->boost, __entry->max_boost)
);
/*
* Tracepoint for schedtune_boostgroup_update
*/
TRACE_EVENT(sched_tune_boostgroup_update,
TP_PROTO(int cpu, int variation, int max_boost),
TP_ARGS(cpu, variation, max_boost),
TP_STRUCT__entry(
__field( int, cpu )
__field( int, variation )
__field( int, max_boost )
),
TP_fast_assign(
__entry->cpu = cpu;
__entry->variation = variation;
__entry->max_boost = max_boost;
),
TP_printk("cpu=%d variation=%d max_boost=%d",
__entry->cpu, __entry->variation, __entry->max_boost)
);
/*
* Tracepoint for accounting task boosted utilization
*/
TRACE_EVENT(sched_boost_task,
TP_PROTO(struct task_struct *tsk, unsigned long util, unsigned long margin),
TP_ARGS(tsk, util, margin),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( unsigned long, util )
__field( unsigned long, margin )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->util = util;
__entry->margin = margin;
),
TP_printk("comm=%s pid=%d util=%lu margin=%lu",
__entry->comm, __entry->pid,
__entry->util,
__entry->margin)
);
/*
* Tracepoint for accounting sched group energy
*/
TRACE_EVENT(sched_energy_diff,
TP_PROTO(struct task_struct *tsk, int scpu, int dcpu, int udelta,
int nrgb, int nrga, int nrgd, int capb, int capa, int capd,
int nrgn, int nrgp),
TP_ARGS(tsk, scpu, dcpu, udelta,
nrgb, nrga, nrgd, capb, capa, capd,
nrgn, nrgp),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, scpu )
__field( int, dcpu )
__field( int, udelta )
__field( int, nrgb )
__field( int, nrga )
__field( int, nrgd )
__field( int, capb )
__field( int, capa )
__field( int, capd )
__field( int, nrgn )
__field( int, nrgp )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->scpu = scpu;
__entry->dcpu = dcpu;
__entry->udelta = udelta;
__entry->nrgb = nrgb;
__entry->nrga = nrga;
__entry->nrgd = nrgd;
__entry->capb = capb;
__entry->capa = capa;
__entry->capd = capd;
__entry->nrgn = nrgn;
__entry->nrgp = nrgp;
),
TP_printk("pid=%d comm=%s "
"src_cpu=%d dst_cpu=%d usage_delta=%d "
"nrg_before=%d nrg_after=%d nrg_diff=%d "
"cap_before=%d cap_after=%d cap_delta=%d "
"nrg_delta=%d nrg_payoff=%d",
__entry->pid, __entry->comm,
__entry->scpu, __entry->dcpu, __entry->udelta,
__entry->nrgb, __entry->nrga, __entry->nrgd,
__entry->capb, __entry->capa, __entry->capd,
__entry->nrgn, __entry->nrgp)
);
/*
* Tracepoint for schedtune_tasks_update
*/
TRACE_EVENT(sched_tune_filter,
TP_PROTO(int nrg_delta, int cap_delta,
int nrg_gain, int cap_gain,
int payoff, int region),
TP_ARGS(nrg_delta, cap_delta, nrg_gain, cap_gain, payoff, region),
TP_STRUCT__entry(
__field( int, nrg_delta )
__field( int, cap_delta )
__field( int, nrg_gain )
__field( int, cap_gain )
__field( int, payoff )
__field( int, region )
),
TP_fast_assign(
__entry->nrg_delta = nrg_delta;
__entry->cap_delta = cap_delta;
__entry->nrg_gain = nrg_gain;
__entry->cap_gain = cap_gain;
__entry->payoff = payoff;
__entry->region = region;
),
TP_printk("nrg_delta=%d cap_delta=%d nrg_gain=%d cap_gain=%d payoff=%d region=%d",
__entry->nrg_delta, __entry->cap_delta,
__entry->nrg_gain, __entry->cap_gain,
__entry->payoff, __entry->region)
);
#endif /* CONFIG_SMP */
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
#include <trace/define_trace.h>