From bf863e333ff3704543c1300f837058b33c7bcb46 Mon Sep 17 00:00:00 2001 From: Srivatsa Vaddagiri Date: Mon, 31 Mar 2014 18:21:26 -0700 Subject: [PATCH] sched: Provide scaled load information for tasks in /proc Extend "sched" file in /proc for every task to provide information on scaled load statistics and percentage-scaled based load (load_avg) for a task. This will be valuable debug aid. Change-Id: I6ee0394b409c77c7f79f5b9ac560da03dc879758 Signed-off-by: Srivatsa Vaddagiri --- kernel/sched/debug.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index a89496c171c6..02319b46ad6c 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -571,6 +571,9 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m) void proc_sched_show_task(struct task_struct *p, struct seq_file *m) { unsigned long nr_switches; + unsigned int load_avg; + + load_avg = pct_task_load(p); SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p), get_nr_threads(p)); @@ -622,6 +625,14 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) P(se.statistics.nr_wakeups_passive); P(se.statistics.nr_wakeups_idle); +#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED) + __P(load_avg); +#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP) + P(ravg.demand); + P(se.avg.runnable_avg_sum_scaled); +#endif +#endif + { u64 avg_atom, avg_per_cpu;