Perf: arm64: disable irq for hotplug offline

Currently the hotplug notifier calls cpu_pmu_enable_percpu_irq
when a CPU comes online. The notifier doesn't have a corresponding
call to cpu_pmu_disable_percpu_irq when the CPU goes offline.
Change the code to follow what was done in 3.18 including
tracking whether the PMU is active and the IRQ number in use.

Change-Id: I1835fb543eb483713a30eb6c0ccd4e4a4b9908be
Signed-off-by: Patrick Fay <pfay@codeaurora.org>
This commit is contained in:
Patrick Fay 2017-04-14 10:14:02 -07:00
parent 406ab7d2d4
commit c079e157fd
2 changed files with 55 additions and 37 deletions

View file

@ -367,6 +367,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
return err;
}
armpmu->pmu_state = ARM_PMU_STATE_RUNNING;
return 0;
}
@ -601,10 +603,12 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
struct platform_device *pmu_device = cpu_pmu->plat_device;
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
cpu_pmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;
irqs = min(pmu_device->num_resources, num_possible_cpus());
irq = platform_get_irq(pmu_device, 0);
if (irq >= 0 && irq_is_percpu(irq)) {
if (irq > 0 && irq_is_percpu(irq)) {
on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
free_percpu_irq(irq, &hw_events->percpu_pmu);
} else {
@ -617,10 +621,11 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq >= 0)
if (irq > 0)
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
}
}
cpu_pmu->pmu_state = ARM_PMU_STATE_OFF;
}
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
@ -639,7 +644,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
}
irq = platform_get_irq(pmu_device, 0);
if (irq >= 0 && irq_is_percpu(irq)) {
if (irq > 0 && irq_is_percpu(irq)) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&hw_events->percpu_pmu);
if (err) {
@ -648,6 +653,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return err;
}
on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
cpu_pmu->percpu_irq = irq;
} else {
for (i = 0; i < irqs; ++i) {
int cpu = i;
@ -754,13 +760,6 @@ static void cpu_pm_pmu_common(void *info)
return;
}
/*
* Always reset the PMU registers on power-up even if
* there are no events running.
*/
if (cmd == CPU_PM_EXIT && armpmu->reset)
armpmu->reset(armpmu);
if (!enabled) {
data->ret = NOTIFY_OK;
return;
@ -795,6 +794,13 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
.cpu = smp_processor_id(),
};
/*
* Always reset the PMU registers on power-up even if
* there are no events running.
*/
if (cmd == CPU_PM_EXIT && data.armpmu->reset)
data.armpmu->reset(data.armpmu);
cpu_pm_pmu_common(&data);
return data.ret;
}
@ -824,6 +830,7 @@ static inline void cpu_pm_pmu_common(void *info) { }
static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
void *hcpu)
{
int irq = -1;
unsigned long masked_action = (action & ~CPU_TASKS_FROZEN);
struct cpu_pm_pmu_args data = {
.armpmu = container_of(b, struct arm_pmu, hotplug_nb),
@ -835,37 +842,37 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
switch (masked_action) {
case CPU_STARTING:
data.cmd = CPU_PM_EXIT;
break;
case CPU_DYING:
data.cmd = CPU_PM_ENTER;
break;
case CPU_DOWN_FAILED:
data.cmd = CPU_PM_ENTER_FAILED;
break;
case CPU_ONLINE:
if (data.armpmu->plat_device) {
struct platform_device *pmu_device =
data.armpmu->plat_device;
int irq = platform_get_irq(pmu_device, 0);
if (irq >= 0 && irq_is_percpu(irq)) {
smp_call_function_single(data.cpu,
cpu_pmu_enable_percpu_irq, &irq, 1);
/*
* Always reset the PMU registers on power-up even if
* there are no events running.
*/
if (data.armpmu->reset)
data.armpmu->reset(data.armpmu);
if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING) {
if (data.armpmu->plat_device)
irq = data.armpmu->percpu_irq;
/* Arm the PMU IRQ before appearing. */
if (irq > 0 && irq_is_percpu(irq))
cpu_pmu_enable_percpu_irq(&irq);
data.cmd = CPU_PM_EXIT;
cpu_pm_pmu_common(&data);
}
return NOTIFY_OK;
case CPU_DYING:
if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF) {
data.cmd = CPU_PM_ENTER;
cpu_pm_pmu_common(&data);
/* Disarm the PMU IRQ before disappearing. */
if (data.armpmu->plat_device)
irq = data.armpmu->percpu_irq;
if (irq > 0 && irq_is_percpu(irq))
cpu_pmu_disable_percpu_irq(&irq);
}
return NOTIFY_DONE;
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
if (smp_processor_id() == data.cpu)
cpu_pm_pmu_common(&data);
else
smp_call_function_single(data.cpu,
cpu_pm_pmu_common, &data, 1);
return data.ret;
}
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
@ -966,7 +973,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
/* Check the IRQ type and prohibit a mix of PPIs and SPIs */
irq = platform_get_irq(pdev, i);
if (irq >= 0) {
if (irq > 0) {
bool spi = !irq_is_percpu(irq);
if (i > 0 && spi != using_spi) {
@ -1085,6 +1092,9 @@ int arm_pmu_device_probe(struct platform_device *pdev,
if (ret)
goto out_destroy;
pmu->pmu_state = ARM_PMU_STATE_OFF;
pmu->percpu_irq = -1;
pr_info("enabled with %s PMU driver, %d counters available\n",
pmu->name, pmu->num_events);

View file

@ -77,6 +77,12 @@ struct pmu_hw_events {
struct arm_pmu *percpu_pmu;
};
enum armpmu_pmu_states {
ARM_PMU_STATE_OFF,
ARM_PMU_STATE_RUNNING,
ARM_PMU_STATE_GOING_DOWN,
};
struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
@ -101,6 +107,8 @@ struct arm_pmu {
void (*free_irq)(struct arm_pmu *);
int (*map_event)(struct perf_event *event);
int num_events;
int pmu_state;
int percpu_irq;
atomic_t active_events;
struct mutex reserve_mutex;
u64 max_period;