Perf: arm64: fix disable of pmu irq during hotplug
PMU irq is disabled when a cpu is hotplugged off and perf is running. Using cpu_pmu->active_events to determine if the pmu is running left a window where it is decremented to 0 in hw_perf_event_destroy, and then armpmu_release_hardware is called. If a cpu is hotplugged off in this window it may not disable its irq. Use a separate flag which is not cleared until after the irq is released by all online cpus. The variable needs to be tristate because of the possibility of a cpu being hotplugged in during this window. In that case it should not enable its irq when the pmu is being shut down. Having the GOING_DOWN state allows correct behavior for cpus both going down and coming up. Change-Id: I934ba5dec34e681ce8defd7fa7e311b4a2a92c1a Signed-off-by: Neil Leeder <nleeder@codeaurora.org> [satyap: merge conflict resolution and move changes in arch/arm64/kernel/perf_event.c to drivers/perf/arm_pmu.c to align with kernel 4.4] Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
This commit is contained in:
parent
0e4ee435ac
commit
09e03e5113
3 changed files with 21 additions and 4 deletions
|
@ -30,6 +30,7 @@ static char *descriptions =
|
||||||
" 7 Perf: arm64: Update PMU force reset\n"
|
" 7 Perf: arm64: Update PMU force reset\n"
|
||||||
"10 Perf: arm64: tracectr: initialize counts after hotplug\n"
|
"10 Perf: arm64: tracectr: initialize counts after hotplug\n"
|
||||||
"11 Perf: arm64: Refine disable/enable in tracecounters\n"
|
"11 Perf: arm64: Refine disable/enable in tracecounters\n"
|
||||||
|
"12 Perf: arm64: fix disable of pmu irq during hotplug\n"
|
||||||
"15 Perf: arm64: make debug dir handle exportable\n"
|
"15 Perf: arm64: make debug dir handle exportable\n"
|
||||||
"16 Perf: arm64: add perf trace user\n"
|
"16 Perf: arm64: add perf trace user\n"
|
||||||
"17 Perf: arm64: add support for kryo pmu\n"
|
"17 Perf: arm64: add support for kryo pmu\n"
|
||||||
|
|
|
@ -370,6 +370,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
armpmu->pmu_state = ARM_PMU_STATE_RUNNING;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -630,6 +632,14 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
|
||||||
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
|
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
|
||||||
|
|
||||||
irqs = min(pmu_device->num_resources, num_possible_cpus());
|
irqs = min(pmu_device->num_resources, num_possible_cpus());
|
||||||
|
if (!irqs)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If a cpu comes online during this function, do not enable its irq.
|
||||||
|
* If a cpu goes offline, it should disable its irq.
|
||||||
|
*/
|
||||||
|
cpu_pmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;
|
||||||
|
|
||||||
irq = platform_get_irq(pmu_device, 0);
|
irq = platform_get_irq(pmu_device, 0);
|
||||||
if (irq >= 0 && irq_is_percpu(irq)) {
|
if (irq >= 0 && irq_is_percpu(irq)) {
|
||||||
|
@ -649,6 +659,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
|
||||||
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
cpu_pmu->pmu_state = ARM_PMU_STATE_OFF;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
|
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
|
||||||
|
@ -766,7 +777,6 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
|
||||||
struct arm_pmu *cpu_pmu = container_of(b, struct arm_pmu, hotplug_nb);
|
struct arm_pmu *cpu_pmu = container_of(b, struct arm_pmu, hotplug_nb);
|
||||||
int irq;
|
int irq;
|
||||||
struct pmu *pmu;
|
struct pmu *pmu;
|
||||||
int perf_running;
|
|
||||||
unsigned long masked_action = action & ~CPU_TASKS_FROZEN;
|
unsigned long masked_action = action & ~CPU_TASKS_FROZEN;
|
||||||
int ret = NOTIFY_DONE;
|
int ret = NOTIFY_DONE;
|
||||||
|
|
||||||
|
@ -783,13 +793,12 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
|
||||||
if (!cpumask_test_cpu(cpu, &cpu_pmu->supported_cpus))
|
if (!cpumask_test_cpu(cpu, &cpu_pmu->supported_cpus))
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
perf_running = atomic_read(&cpu_pmu->active_events);
|
|
||||||
switch (masked_action) {
|
switch (masked_action) {
|
||||||
case CPU_DOWN_PREPARE:
|
case CPU_DOWN_PREPARE:
|
||||||
if (cpu_pmu->save_pm_registers)
|
if (cpu_pmu->save_pm_registers)
|
||||||
smp_call_function_single(cpu,
|
smp_call_function_single(cpu,
|
||||||
cpu_pmu->save_pm_registers, hcpu, 1);
|
cpu_pmu->save_pm_registers, hcpu, 1);
|
||||||
if (perf_running) {
|
if (cpu_pmu->pmu_state != ARM_PMU_STATE_OFF) {
|
||||||
if (cpu_has_active_perf(cpu, cpu_pmu))
|
if (cpu_has_active_perf(cpu, cpu_pmu))
|
||||||
smp_call_function_single(cpu,
|
smp_call_function_single(cpu,
|
||||||
armpmu_update_counters, cpu_pmu, 1);
|
armpmu_update_counters, cpu_pmu, 1);
|
||||||
|
@ -808,7 +817,7 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
|
||||||
cpu_pmu->reset(NULL);
|
cpu_pmu->reset(NULL);
|
||||||
if (cpu_pmu->restore_pm_registers)
|
if (cpu_pmu->restore_pm_registers)
|
||||||
cpu_pmu->restore_pm_registers(hcpu);
|
cpu_pmu->restore_pm_registers(hcpu);
|
||||||
if (perf_running) {
|
if (cpu_pmu->pmu_state == ARM_PMU_STATE_RUNNING) {
|
||||||
/* Arm the PMU IRQ before appearing. */
|
/* Arm the PMU IRQ before appearing. */
|
||||||
if (cpu_pmu->plat_device) {
|
if (cpu_pmu->plat_device) {
|
||||||
irq = cpu_pmu->percpu_irq;
|
irq = cpu_pmu->percpu_irq;
|
||||||
|
|
|
@ -51,6 +51,12 @@ struct arm_pmu_platdata {
|
||||||
}, \
|
}, \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum arm_pmu_state {
|
||||||
|
ARM_PMU_STATE_OFF = 0,
|
||||||
|
ARM_PMU_STATE_GOING_DOWN,
|
||||||
|
ARM_PMU_STATE_RUNNING,
|
||||||
|
};
|
||||||
|
|
||||||
/* The events for a given PMU register set. */
|
/* The events for a given PMU register set. */
|
||||||
struct pmu_hw_events {
|
struct pmu_hw_events {
|
||||||
/*
|
/*
|
||||||
|
@ -103,6 +109,7 @@ struct arm_pmu {
|
||||||
void (*free_irq)(struct arm_pmu *);
|
void (*free_irq)(struct arm_pmu *);
|
||||||
int (*map_event)(struct perf_event *event);
|
int (*map_event)(struct perf_event *event);
|
||||||
int num_events;
|
int num_events;
|
||||||
|
int pmu_state;
|
||||||
int percpu_irq;
|
int percpu_irq;
|
||||||
atomic_t active_events;
|
atomic_t active_events;
|
||||||
struct mutex reserve_mutex;
|
struct mutex reserve_mutex;
|
||||||
|
|
Loading…
Add table
Reference in a new issue