ARM64: smp: Prevent cluster LPM modes when pending IPIs on cluster CPUs

LPM modes can fail if there is a pending IPI interrupt at GIC CPU
interface. On some usecases frequent failure of LPM modes can
cause power and performance degradation. Hence, prevent cluster
low power modes when there is a pending IPI on cluster CPUs.

Change-Id: Id8a0ac24e4867ef824e0a6f11d989f1e1a2b0e93
Signed-off-by: Mahesh Sivasubramanian <msivasub@codeaurora.org>
Signed-off-by: Murali Nalajala <mnalajal@codeaurora.org>
[satyap: trivial merge conflict resolution]
Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
This commit is contained in:
Mahesh Sivasubramanian 2014-09-18 20:33:55 -06:00 committed by David Keitel
parent ee5580ee55
commit d0aebd3840
2 changed files with 34 additions and 13 deletions

View file

@ -464,6 +464,17 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
DEFINE_PER_CPU(bool, pending_ipi);
static void smp_cross_call_common(const struct cpumask *cpumask,
unsigned int func)
{
unsigned int cpu;
for_each_cpu(cpu, cpumask)
per_cpu(pending_ipi, cpu) = true;
__smp_cross_call(cpumask, func);
}
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
if (!__smp_cross_call)
@ -472,17 +483,17 @@ void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_CALL_FUNC);
smp_cross_call_common(mask, IPI_CALL_FUNC);
}
void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_WAKEUP);
smp_cross_call_common(mask, IPI_WAKEUP);
}
void arch_send_call_function_single_ipi(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
static const char *ipi_types[NR_IPI] __tracepoint_string = {
@ -675,7 +686,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
void smp_send_reschedule(int cpu)
{
BUG_ON(cpu_is_offline(cpu));
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE);
}
void smp_send_stop(void)
@ -686,7 +697,7 @@ void smp_send_stop(void)
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
if (!cpumask_empty(&mask))
smp_cross_call(&mask, IPI_CPU_STOP);
smp_cross_call_common(&mask, IPI_CPU_STOP);
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC;
@ -768,7 +779,7 @@ static void raise_nmi(cpumask_t *mask)
if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
nmi_cpu_backtrace(NULL);
smp_cross_call(mask, IPI_CPU_BACKTRACE);
smp_cross_call_common(mask, IPI_CPU_BACKTRACE);
}
void arch_trigger_all_cpu_backtrace(bool include_self)

View file

@ -484,6 +484,16 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
void (*__smp_cross_call)(const struct cpumask *, unsigned int);
DEFINE_PER_CPU(bool, pending_ipi);
void smp_cross_call_common(const struct cpumask *cpumask, unsigned int func)
{
unsigned int cpu;
for_each_cpu(cpu, cpumask)
per_cpu(pending_ipi, cpu) = true;
__smp_cross_call(cpumask, func);
}
/*
* Enumerate the possible CPU set from the device tree and build the
* cpu logical map array containing MPIDR values related to logical
@ -683,17 +693,17 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_CALL_FUNC);
smp_cross_call_common(mask, IPI_CALL_FUNC);
}
void arch_send_call_function_single_ipi(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC);
}
void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_WAKEUP);
smp_cross_call_common(mask, IPI_WAKEUP);
}
#ifdef CONFIG_IRQ_WORK
@ -759,7 +769,7 @@ static void smp_send_all_cpu_backtrace(void)
pr_info("\nsending IPI to all other CPUs:\n");
if (!cpumask_empty(&backtrace_mask))
smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
smp_cross_call_common(&backtrace_mask, IPI_CPU_BACKTRACE);
/* Wait for up to 10 seconds for all other CPUs to do the backtrace */
for (i = 0; i < 10 * 1000; i++) {
@ -867,13 +877,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
void smp_send_reschedule(int cpu)
{
BUG_ON(cpu_is_offline(cpu));
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE);
}
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_TIMER);
smp_cross_call_common(mask, IPI_TIMER);
}
#endif
@ -887,7 +897,7 @@ void smp_send_stop(void)
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
smp_cross_call(&mask, IPI_CPU_STOP);
smp_cross_call_common(&mask, IPI_CPU_STOP);
}
/* Wait up to one second for other CPUs to stop */