diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f60117eb60fa..f352d06d7673 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5600,7 +5600,6 @@ int do_isolation_work_cpu_stop(void *data) */ nohz_balance_clear_nohz_mask(cpu); - clear_hmp_request(cpu); local_irq_enable(); return 0; } @@ -5725,6 +5724,7 @@ int sched_isolate_cpu(int cpu) migrate_sync_cpu(cpu, cpumask_first(&avail_cpus)); stop_cpus(cpumask_of(cpu), do_isolation_work_cpu_stop, 0); + clear_hmp_request(cpu); calc_load_migrate(rq); update_max_interval(); diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index 968a41e0e81e..a9ccb63c8e23 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -641,14 +641,18 @@ void clear_hmp_request(int cpu) clear_boost_kick(cpu); clear_reserved(cpu); if (rq->push_task) { + struct task_struct *push_task = NULL; + raw_spin_lock_irqsave(&rq->lock, flags); if (rq->push_task) { clear_reserved(rq->push_cpu); - put_task_struct(rq->push_task); + push_task = rq->push_task; rq->push_task = NULL; } rq->active_balance = 0; raw_spin_unlock_irqrestore(&rq->lock, flags); + if (push_task) + put_task_struct(push_task); } }