ARM64: smp: Prevent cluster LPM modes when pending IPIs on cluster CPUs

LPM modes can fail if there is a pending IPI interrupt at GIC CPU
interface. On some usecases frequent failure of LPM modes can
cause power and performance degradation. Hence, prevent cluster
low power modes when there is a pending IPI on cluster CPUs.

Change-Id: Id8a0ac24e4867ef824e0a6f11d989f1e1a2b0e93
Signed-off-by: Mahesh Sivasubramanian <msivasub@codeaurora.org>
Signed-off-by: Murali Nalajala <mnalajal@codeaurora.org>

Conflicts:
	arch/arm/kernel/smp.c
	arch/arm64/kernel/smp.c
This commit is contained in:
Mahesh Sivasubramanian 2014-09-18 20:33:55 -06:00 committed by David Keitel
parent 75765be3fe
commit 76fb22e96d
2 changed files with 34 additions and 17 deletions

View file

@ -462,6 +462,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
DEFINE_PER_CPU(bool, pending_ipi);
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
@ -469,6 +470,21 @@ void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
__smp_cross_call = fn;
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_CALL_FUNC);
}
void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_WAKEUP);
}
void arch_send_call_function_single_ipi(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
static const char *ipi_types[NR_IPI] __tracepoint_string = {
#define S(x,s) [x] = s
S(IPI_WAKEUP, "CPU wakeup interrupts"),
@ -483,6 +499,11 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
unsigned int cpu;
for_each_cpu(cpu, target)
per_cpu(pending_ipi, cpu) = true;
trace_ipi_raise(target, ipi_types[ipinr]);
__smp_cross_call(target, ipinr);
}
@ -513,21 +534,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
return sum;
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_CALL_FUNC);
}
void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_WAKEUP);
}
void arch_send_call_function_single_ipi(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
{
@ -660,11 +666,15 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
if ((unsigned)ipinr < NR_IPI)
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
per_cpu(pending_ipi, cpu) = false;
set_irq_regs(old_regs);
}
void smp_send_reschedule(int cpu)
{
BUG_ON(cpu_is_offline(cpu));
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}

View file

@ -469,6 +469,8 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
#else
#define acpi_table_parse_madt(...) do { } while (0)
#endif
void (*__smp_cross_call)(const struct cpumask *, unsigned int);
DEFINE_PER_CPU(bool, pending_ipi);
/*
* Enumerate the possible CPU set from the device tree and build the
@ -615,8 +617,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
void (*__smp_cross_call)(const struct cpumask *, unsigned int);
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
__smp_cross_call = fn;
@ -633,6 +633,11 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
unsigned int cpu;
for_each_cpu(cpu, target)
per_cpu(pending_ipi, cpu) = true;
trace_ipi_raise(target, ipi_types[ipinr]);
__smp_cross_call(target, ipinr);
}
@ -760,6 +765,8 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
if ((unsigned)ipinr < NR_IPI)
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
per_cpu(pending_ipi, cpu) = false;
set_irq_regs(old_regs);
}