From 97fe3984e9045713edec172324ea865485c00eee Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Thu, 20 Sep 2018 15:31:36 +0530 Subject: [PATCH] sched/walt: Fix the memory leak of idle task load pointers The memory for task load pointers are allocated twice for each idle thread except for the boot CPU. This happens during boot from idle_threads_init()->idle_init() in the following 2 paths. 1. idle_init()->fork_idle()->copy_process()-> sched_fork()->init_new_task_load() 2. idle_init()->fork_idle()-> init_idle()->init_new_task_load() The memory allocation for all tasks happens through the 1st path, so use the same for idle tasks and kill the 2nd path. Since the idle thread of boot CPU does not go through fork_idle(), allocate the memory for it separately. Change-Id: I4696a414ffe07d4114b56d326463026019e278f1 Signed-off-by: Pavankumar Kondeti [schikk@codeaurora.org: resolved merge conflicts] Signed-off-by: Swetha Chikkaboraiah --- include/linux/sched.h | 2 +- kernel/fork.c | 2 +- kernel/sched/core.c | 11 ++++------- kernel/sched/hmp.c | 5 +---- kernel/sched/sched.h | 4 ++-- kernel/smpboot.c | 2 +- 6 files changed, 10 insertions(+), 16 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 25ed9c7c3c21..5e0f2ce81100 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -384,7 +384,7 @@ extern int lockdep_tasklist_lock_is_held(void); extern void sched_init(void); extern void sched_init_smp(void); extern asmlinkage void schedule_tail(struct task_struct *prev); -extern void init_idle(struct task_struct *idle, int cpu, bool hotplug); +extern void init_idle(struct task_struct *idle, int cpu); extern void init_idle_bootup_task(struct task_struct *idle); extern cpumask_var_t cpu_isolated_map; diff --git a/kernel/fork.c b/kernel/fork.c index f1a53fbe6f46..14ac1181cd95 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1753,7 +1753,7 @@ struct task_struct *fork_idle(int cpu) cpu_to_node(cpu)); if (!IS_ERR(task)) { init_idle_pids(task->pids); - init_idle(task, cpu, false); + init_idle(task, cpu); } return task; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d507932f3908..92bce29c4819 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2479,7 +2479,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) unsigned long flags; int cpu; - init_new_task_load(p, false); + init_new_task_load(p); cpu = get_cpu(); __sched_fork(clone_flags, p); @@ -5439,19 +5439,15 @@ void init_idle_bootup_task(struct task_struct *idle) * init_idle - set up an idle thread for a given CPU * @idle: task in question * @cpu: cpu the idle task belongs to - * @cpu_up: differentiate between initial boot vs hotplug * * NOTE: this function does not set the idle thread's NEED_RESCHED * flag, to make booting more robust. */ -void init_idle(struct task_struct *idle, int cpu, bool cpu_up) +void init_idle(struct task_struct *idle, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; - if (!cpu_up) - init_new_task_load(idle, true); - raw_spin_lock_irqsave(&idle->pi_lock, flags); raw_spin_lock(&rq->lock); @@ -8579,7 +8575,8 @@ void __init sched_init(void) * but because we are the idle thread, we just pick up running again * when this runqueue becomes "idle". */ - init_idle(current, smp_processor_id(), false); + init_idle(current, smp_processor_id()); + init_new_task_load(current); calc_load_update = jiffies + LOAD_FREQ; diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index 1c5428c74f96..f9526751936e 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -1544,7 +1544,7 @@ void free_task_load_ptrs(struct task_struct *p) p->ravg.prev_window_cpu = NULL; } -void init_new_task_load(struct task_struct *p, bool idle_task) +void init_new_task_load(struct task_struct *p) { int i; u32 init_load_windows = sched_init_task_load_windows; @@ -1571,9 +1571,6 @@ void init_new_task_load(struct task_struct *p, bool idle_task) /* Don't have much choice. CPU frequency would be bogus */ BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu); - if (idle_task) - return; - if (init_load_pct) init_load_windows = div64_u64((u64)init_load_pct * (u64)sched_ravg_window, 100); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b6cd12998f16..35985ddf12a6 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1151,7 +1151,7 @@ extern unsigned int __read_mostly sched_downmigrate; extern unsigned int __read_mostly sysctl_sched_spill_nr_run; extern unsigned int __read_mostly sched_load_granule; -extern void init_new_task_load(struct task_struct *p, bool idle_task); +extern void init_new_task_load(struct task_struct *p); extern u64 sched_ktime_clock(void); extern int got_boost_kick(void); extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb); @@ -1641,7 +1641,7 @@ static inline struct sched_cluster *rq_cluster(struct rq *rq) return NULL; } -static inline void init_new_task_load(struct task_struct *p, bool idle_task) +static inline void init_new_task_load(struct task_struct *p) { } diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 552e154fc77e..979248f5e21a 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -32,7 +32,7 @@ struct task_struct *idle_thread_get(unsigned int cpu) if (!tsk) return ERR_PTR(-ENOMEM); - init_idle(tsk, cpu, true); + init_idle(tsk, cpu); return tsk; }