diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f501b8c0de4e..efce166a9f17 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -231,7 +231,7 @@ struct pmu { int capabilities; int * __percpu pmu_disable_count; - struct perf_cpu_context * __percpu pmu_cpu_context; + struct perf_cpu_context __percpu *pmu_cpu_context; atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ int task_ctx_nr; int hrtimer_interval_ms; diff --git a/kernel/events/core.c b/kernel/events/core.c index a8f984b4fcff..3e6e5eb3e4d2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -9654,13 +9654,26 @@ static void __perf_event_stop_swclock(void *__info) static void perf_event_exit_cpu_context(int cpu) { + struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; + unsigned long flags; struct pmu *pmu; int idx; idx = srcu_read_lock(&pmus_srcu); list_for_each_entry_rcu(pmu, &pmus, entry) { - ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; + cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); + ctx = &cpuctx->ctx; + + /* Cancel the mux hrtimer to avoid CPU migration */ + if (pmu->task_ctx_nr != perf_sw_context) { + raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); + hrtimer_cancel(&cpuctx->hrtimer); + cpuctx->hrtimer_active = 0; + raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, + flags); + } + mutex_lock(&ctx->mutex); /* * If keeping events across hotplugging is supported, do not