Perf: arm64: disable irq for hotplug offline
Currently the hotplug notifier calls cpu_pmu_enable_percpu_irq when a CPU comes online. The notifier doesn't have a corresponding call to cpu_pmu_disable_percpu_irq when the CPU goes offline. Change the code to follow what was done in 3.18 including tracking whether the PMU is active and the IRQ number in use. Change-Id: I1835fb543eb483713a30eb6c0ccd4e4a4b9908be Signed-off-by: Patrick Fay <pfay@codeaurora.org>
This commit is contained in:
parent
406ab7d2d4
commit
c079e157fd
2 changed files with 55 additions and 37 deletions
|
@ -367,6 +367,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
armpmu->pmu_state = ARM_PMU_STATE_RUNNING;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -601,10 +603,12 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
|
||||||
struct platform_device *pmu_device = cpu_pmu->plat_device;
|
struct platform_device *pmu_device = cpu_pmu->plat_device;
|
||||||
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
|
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
|
||||||
|
|
||||||
|
cpu_pmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;
|
||||||
|
|
||||||
irqs = min(pmu_device->num_resources, num_possible_cpus());
|
irqs = min(pmu_device->num_resources, num_possible_cpus());
|
||||||
|
|
||||||
irq = platform_get_irq(pmu_device, 0);
|
irq = platform_get_irq(pmu_device, 0);
|
||||||
if (irq >= 0 && irq_is_percpu(irq)) {
|
if (irq > 0 && irq_is_percpu(irq)) {
|
||||||
on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
|
on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
|
||||||
free_percpu_irq(irq, &hw_events->percpu_pmu);
|
free_percpu_irq(irq, &hw_events->percpu_pmu);
|
||||||
} else {
|
} else {
|
||||||
|
@ -617,10 +621,11 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
|
||||||
if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
|
if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
|
||||||
continue;
|
continue;
|
||||||
irq = platform_get_irq(pmu_device, i);
|
irq = platform_get_irq(pmu_device, i);
|
||||||
if (irq >= 0)
|
if (irq > 0)
|
||||||
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
cpu_pmu->pmu_state = ARM_PMU_STATE_OFF;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
|
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
|
||||||
|
@ -639,7 +644,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
|
||||||
}
|
}
|
||||||
|
|
||||||
irq = platform_get_irq(pmu_device, 0);
|
irq = platform_get_irq(pmu_device, 0);
|
||||||
if (irq >= 0 && irq_is_percpu(irq)) {
|
if (irq > 0 && irq_is_percpu(irq)) {
|
||||||
err = request_percpu_irq(irq, handler, "arm-pmu",
|
err = request_percpu_irq(irq, handler, "arm-pmu",
|
||||||
&hw_events->percpu_pmu);
|
&hw_events->percpu_pmu);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -648,6 +653,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
|
on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
|
||||||
|
cpu_pmu->percpu_irq = irq;
|
||||||
} else {
|
} else {
|
||||||
for (i = 0; i < irqs; ++i) {
|
for (i = 0; i < irqs; ++i) {
|
||||||
int cpu = i;
|
int cpu = i;
|
||||||
|
@ -754,13 +760,6 @@ static void cpu_pm_pmu_common(void *info)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Always reset the PMU registers on power-up even if
|
|
||||||
* there are no events running.
|
|
||||||
*/
|
|
||||||
if (cmd == CPU_PM_EXIT && armpmu->reset)
|
|
||||||
armpmu->reset(armpmu);
|
|
||||||
|
|
||||||
if (!enabled) {
|
if (!enabled) {
|
||||||
data->ret = NOTIFY_OK;
|
data->ret = NOTIFY_OK;
|
||||||
return;
|
return;
|
||||||
|
@ -795,6 +794,13 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
|
||||||
.cpu = smp_processor_id(),
|
.cpu = smp_processor_id(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Always reset the PMU registers on power-up even if
|
||||||
|
* there are no events running.
|
||||||
|
*/
|
||||||
|
if (cmd == CPU_PM_EXIT && data.armpmu->reset)
|
||||||
|
data.armpmu->reset(data.armpmu);
|
||||||
|
|
||||||
cpu_pm_pmu_common(&data);
|
cpu_pm_pmu_common(&data);
|
||||||
return data.ret;
|
return data.ret;
|
||||||
}
|
}
|
||||||
|
@ -824,6 +830,7 @@ static inline void cpu_pm_pmu_common(void *info) { }
|
||||||
static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
|
static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
|
||||||
void *hcpu)
|
void *hcpu)
|
||||||
{
|
{
|
||||||
|
int irq = -1;
|
||||||
unsigned long masked_action = (action & ~CPU_TASKS_FROZEN);
|
unsigned long masked_action = (action & ~CPU_TASKS_FROZEN);
|
||||||
struct cpu_pm_pmu_args data = {
|
struct cpu_pm_pmu_args data = {
|
||||||
.armpmu = container_of(b, struct arm_pmu, hotplug_nb),
|
.armpmu = container_of(b, struct arm_pmu, hotplug_nb),
|
||||||
|
@ -835,37 +842,37 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
|
||||||
|
|
||||||
switch (masked_action) {
|
switch (masked_action) {
|
||||||
case CPU_STARTING:
|
case CPU_STARTING:
|
||||||
data.cmd = CPU_PM_EXIT;
|
|
||||||
break;
|
|
||||||
case CPU_DYING:
|
|
||||||
data.cmd = CPU_PM_ENTER;
|
|
||||||
break;
|
|
||||||
case CPU_DOWN_FAILED:
|
case CPU_DOWN_FAILED:
|
||||||
data.cmd = CPU_PM_ENTER_FAILED;
|
/*
|
||||||
break;
|
* Always reset the PMU registers on power-up even if
|
||||||
case CPU_ONLINE:
|
* there are no events running.
|
||||||
if (data.armpmu->plat_device) {
|
*/
|
||||||
struct platform_device *pmu_device =
|
if (data.armpmu->reset)
|
||||||
data.armpmu->plat_device;
|
data.armpmu->reset(data.armpmu);
|
||||||
int irq = platform_get_irq(pmu_device, 0);
|
if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING) {
|
||||||
|
if (data.armpmu->plat_device)
|
||||||
if (irq >= 0 && irq_is_percpu(irq)) {
|
irq = data.armpmu->percpu_irq;
|
||||||
smp_call_function_single(data.cpu,
|
/* Arm the PMU IRQ before appearing. */
|
||||||
cpu_pmu_enable_percpu_irq, &irq, 1);
|
if (irq > 0 && irq_is_percpu(irq))
|
||||||
}
|
cpu_pmu_enable_percpu_irq(&irq);
|
||||||
|
data.cmd = CPU_PM_EXIT;
|
||||||
|
cpu_pm_pmu_common(&data);
|
||||||
}
|
}
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_OK;
|
||||||
|
case CPU_DYING:
|
||||||
|
if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF) {
|
||||||
|
data.cmd = CPU_PM_ENTER;
|
||||||
|
cpu_pm_pmu_common(&data);
|
||||||
|
/* Disarm the PMU IRQ before disappearing. */
|
||||||
|
if (data.armpmu->plat_device)
|
||||||
|
irq = data.armpmu->percpu_irq;
|
||||||
|
if (irq > 0 && irq_is_percpu(irq))
|
||||||
|
cpu_pmu_disable_percpu_irq(&irq);
|
||||||
|
}
|
||||||
|
return NOTIFY_OK;
|
||||||
default:
|
default:
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (smp_processor_id() == data.cpu)
|
|
||||||
cpu_pm_pmu_common(&data);
|
|
||||||
else
|
|
||||||
smp_call_function_single(data.cpu,
|
|
||||||
cpu_pm_pmu_common, &data, 1);
|
|
||||||
|
|
||||||
return data.ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
|
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
|
||||||
|
@ -966,7 +973,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
|
||||||
|
|
||||||
/* Check the IRQ type and prohibit a mix of PPIs and SPIs */
|
/* Check the IRQ type and prohibit a mix of PPIs and SPIs */
|
||||||
irq = platform_get_irq(pdev, i);
|
irq = platform_get_irq(pdev, i);
|
||||||
if (irq >= 0) {
|
if (irq > 0) {
|
||||||
bool spi = !irq_is_percpu(irq);
|
bool spi = !irq_is_percpu(irq);
|
||||||
|
|
||||||
if (i > 0 && spi != using_spi) {
|
if (i > 0 && spi != using_spi) {
|
||||||
|
@ -1085,6 +1092,9 @@ int arm_pmu_device_probe(struct platform_device *pdev,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_destroy;
|
goto out_destroy;
|
||||||
|
|
||||||
|
pmu->pmu_state = ARM_PMU_STATE_OFF;
|
||||||
|
pmu->percpu_irq = -1;
|
||||||
|
|
||||||
pr_info("enabled with %s PMU driver, %d counters available\n",
|
pr_info("enabled with %s PMU driver, %d counters available\n",
|
||||||
pmu->name, pmu->num_events);
|
pmu->name, pmu->num_events);
|
||||||
|
|
||||||
|
|
|
@ -77,6 +77,12 @@ struct pmu_hw_events {
|
||||||
struct arm_pmu *percpu_pmu;
|
struct arm_pmu *percpu_pmu;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum armpmu_pmu_states {
|
||||||
|
ARM_PMU_STATE_OFF,
|
||||||
|
ARM_PMU_STATE_RUNNING,
|
||||||
|
ARM_PMU_STATE_GOING_DOWN,
|
||||||
|
};
|
||||||
|
|
||||||
struct arm_pmu {
|
struct arm_pmu {
|
||||||
struct pmu pmu;
|
struct pmu pmu;
|
||||||
cpumask_t active_irqs;
|
cpumask_t active_irqs;
|
||||||
|
@ -101,6 +107,8 @@ struct arm_pmu {
|
||||||
void (*free_irq)(struct arm_pmu *);
|
void (*free_irq)(struct arm_pmu *);
|
||||||
int (*map_event)(struct perf_event *event);
|
int (*map_event)(struct perf_event *event);
|
||||||
int num_events;
|
int num_events;
|
||||||
|
int pmu_state;
|
||||||
|
int percpu_irq;
|
||||||
atomic_t active_events;
|
atomic_t active_events;
|
||||||
struct mutex reserve_mutex;
|
struct mutex reserve_mutex;
|
||||||
u64 max_period;
|
u64 max_period;
|
||||||
|
|
Loading…
Add table
Reference in a new issue