diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index b26361355dae..8166aaabe5fc 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -462,6 +462,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } static void (*__smp_cross_call)(const struct cpumask *, unsigned int); +DEFINE_PER_CPU(bool, pending_ipi); void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) { @@ -469,6 +470,21 @@ void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) __smp_cross_call = fn; } +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + smp_cross_call(mask, IPI_CALL_FUNC); +} + +void arch_send_wakeup_ipi_mask(const struct cpumask *mask) +{ + smp_cross_call(mask, IPI_WAKEUP); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); +} + static const char *ipi_types[NR_IPI] __tracepoint_string = { #define S(x,s) [x] = s S(IPI_WAKEUP, "CPU wakeup interrupts"), @@ -483,6 +499,11 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = { static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) { + unsigned int cpu; + + for_each_cpu(cpu, target) + per_cpu(pending_ipi, cpu) = true; + trace_ipi_raise(target, ipi_types[ipinr]); __smp_cross_call(target, ipinr); } @@ -513,21 +534,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu) return sum; } -void arch_send_call_function_ipi_mask(const struct cpumask *mask) -{ - smp_cross_call(mask, IPI_CALL_FUNC); -} - -void arch_send_wakeup_ipi_mask(const struct cpumask *mask) -{ - smp_cross_call(mask, IPI_WAKEUP); -} - -void arch_send_call_function_single_ipi(int cpu) -{ - smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); -} - #ifdef CONFIG_IRQ_WORK void arch_irq_work_raise(void) { @@ -660,11 +666,15 @@ void handle_IPI(int ipinr, struct pt_regs *regs) if ((unsigned)ipinr < NR_IPI) trace_ipi_exit_rcuidle(ipi_types[ipinr]); + + per_cpu(pending_ipi, cpu) = false; + set_irq_regs(old_regs); } void smp_send_reschedule(int cpu) { + BUG_ON(cpu_is_offline(cpu)); smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 91d673a05d03..60946e1c30cf 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -469,6 +469,8 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, #else #define acpi_table_parse_madt(...) do { } while (0) #endif +void (*__smp_cross_call)(const struct cpumask *, unsigned int); +DEFINE_PER_CPU(bool, pending_ipi); /* * Enumerate the possible CPU set from the device tree and build the @@ -615,8 +617,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } } -void (*__smp_cross_call)(const struct cpumask *, unsigned int); - void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) { __smp_cross_call = fn; @@ -633,6 +633,11 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = { static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) { + unsigned int cpu; + + for_each_cpu(cpu, target) + per_cpu(pending_ipi, cpu) = true; + trace_ipi_raise(target, ipi_types[ipinr]); __smp_cross_call(target, ipinr); } @@ -760,6 +765,8 @@ void handle_IPI(int ipinr, struct pt_regs *regs) if ((unsigned)ipinr < NR_IPI) trace_ipi_exit_rcuidle(ipi_types[ipinr]); + + per_cpu(pending_ipi, cpu) = false; set_irq_regs(old_regs); }