* refs/heads/tmp-5e24b4e Linux 4.4.153 ovl: warn instead of error if d_type is not supported ovl: Do d_type check only if work dir creation was successful ovl: Ensure upper filesystem supports d_type x86/mm: Fix use-after-free of ldt_struct x86/mm/pat: Fix L1TF stable backport for CPA ANDROID: x86_64_cuttlefish_defconfig: Enable lz4 compression for zram UPSTREAM: drivers/block/zram/zram_drv.c: fix bug storing backing_dev BACKPORT: zram: introduce zram memory tracking BACKPORT: zram: record accessed second BACKPORT: zram: mark incompressible page as ZRAM_HUGE UPSTREAM: zram: correct flag name of ZRAM_ACCESS UPSTREAM: zram: Delete gendisk before cleaning up the request queue UPSTREAM: drivers/block/zram/zram_drv.c: make zram_page_end_io() static BACKPORT: zram: set BDI_CAP_STABLE_WRITES once UPSTREAM: zram: fix null dereference of handle UPSTREAM: zram: add config and doc file for writeback feature BACKPORT: zram: read page from backing device BACKPORT: zram: write incompressible pages to backing device BACKPORT: zram: identify asynchronous IO's return value BACKPORT: zram: add free space management in backing device UPSTREAM: zram: add interface to specif backing device UPSTREAM: zram: rename zram_decompress_page to __zram_bvec_read UPSTREAM: zram: inline zram_compress UPSTREAM: zram: clean up duplicated codes in __zram_bvec_write Linux 4.4.152 reiserfs: fix broken xattr handling (heap corruption, bad retval) i2c: imx: Fix race condition in dma read PCI: pciehp: Fix use-after-free on unplug PCI: Skip MPS logic for Virtual Functions (VFs) PCI: hotplug: Don't leak pci_slot on registration failure parisc: Remove unnecessary barriers from spinlock.h bridge: Propagate vlan add failure to user packet: refine ring v3 block size test to hold one frame netfilter: conntrack: dccp: treat SYNC/SYNCACK as invalid if no prior state xfrm_user: prevent leaking 2 bytes of kernel memory parisc: Remove ordered stores from syscall.S ext4: fix spectre gadget in ext4_mb_regular_allocator() KVM: irqfd: fix race between EPOLLHUP and irq_bypass_register_consumer staging: android: ion: check for kref overflow tcp: identify cryptic messages as TCP seq # bugs net: qca_spi: Fix log level if probe fails net: qca_spi: Make sure the QCA7000 reset is triggered net: qca_spi: Avoid packet drop during initial sync net: usb: rtl8150: demote allmulti message to dev_dbg() net/ethernet/freescale/fman: fix cross-build error drm/nouveau/gem: off by one bugs in nouveau_gem_pushbuf_reloc_apply() tcp: remove DELAYED ACK events in DCTCP qlogic: check kstrtoul() for errors packet: reset network header if packet shorter than ll reserved space ixgbe: Be more careful when modifying MAC filters ARM: dts: am3517.dtsi: Disable reference to OMAP3 OTG controller ARM: 8780/1: ftrace: Only set kernel memory back to read-only after boot perf llvm-utils: Remove bashism from kernel include fetch script bnxt_en: Fix for system hang if request_irq fails drm/armada: fix colorkey mode property ieee802154: fakelb: switch from BUG_ON() to WARN_ON() on problem ieee802154: at86rf230: use __func__ macro for debug messages ieee802154: at86rf230: switch from BUG_ON() to WARN_ON() on problem ARM: pxa: irq: fix handling of ICMR registers in suspend/resume netfilter: x_tables: set module owner for icmp(6) matches smsc75xx: Add workaround for gigabit link up hardware errata. kasan: fix shadow_size calculation error in kasan_module_alloc tracing: Use __printf markup to silence compiler ARM: imx_v4_v5_defconfig: Select ULPI support ARM: imx_v6_v7_defconfig: Select ULPI support HID: wacom: Correct touch maximum XY of 2nd-gen Intuos m68k: fix "bad page state" oops on ColdFire boot bnx2x: Fix receiving tx-timeout in error or recovery state. drm/exynos: decon5433: Fix WINCONx reset value drm/exynos: decon5433: Fix per-plane global alpha for XRGB modes drm/exynos: gsc: Fix support for NV16/61, YUV420/YVU420 and YUV422 modes md/raid10: fix that replacement cannot complete recovery after reassemble dmaengine: k3dma: Off by one in k3_of_dma_simple_xlate() ARM: dts: da850: Fix interrups property for gpio selftests/x86/sigreturn/64: Fix spurious failures on AMD CPUs perf report powerpc: Fix crash if callchain is empty perf test session topology: Fix test on s390 usb: xhci: increase CRS timeout value ARM: dts: am437x: make edt-ft5x06 a wakeup source brcmfmac: stop watchdog before detach and free everything cxgb4: when disabling dcb set txq dcb priority to 0 Smack: Mark inode instant in smack_task_to_inode ipv6: mcast: fix unsolicited report interval after receiving querys locking/lockdep: Do not record IRQ state within lockdep code net: davinci_emac: match the mdio device against its compatible if possible ARC: Enable machine_desc->init_per_cpu for !CONFIG_SMP net: propagate dev_get_valid_name return code net: hamradio: use eth_broadcast_addr enic: initialize enic->rfs_h.lock in enic_probe qed: Add sanity check for SIMD fastpath handler. arm64: make secondary_start_kernel() notrace scsi: xen-scsifront: add error handling for xenbus_printf usb: gadget: dwc2: fix memory leak in gadget_init() usb: gadget: composite: fix delayed_status race condition when set_interface usb: dwc2: fix isoc split in transfer with no data ARM: dts: Cygnus: Fix I2C controller interrupt type selftests: sync: add config fragment for testing sync framework selftests: zram: return Kselftest Skip code for skipped tests selftests: user: return Kselftest Skip code for skipped tests selftests: static_keys: return Kselftest Skip code for skipped tests selftests: pstore: return Kselftest Skip code for skipped tests netfilter: ipv6: nf_defrag: reduce struct net memory waste ARC: Explicitly add -mmedium-calls to CFLAGS ANDROID: x86_64_cuttlefish_defconfig: Enable zram and zstd BACKPORT: crypto: zstd - Add zstd support UPSTREAM: zram: add zstd to the supported algorithms list UPSTREAM: lib: Add zstd modules UPSTREAM: lib: Add xxhash module UPSTREAM: zram: rework copy of compressor name in comp_algorithm_store() UPSTREAM: zram: constify attribute_group structures. UPSTREAM: zram: count same page write as page_stored UPSTREAM: zram: reduce load operation in page_same_filled UPSTREAM: zram: use zram_free_page instead of open-coded UPSTREAM: zram: introduce zram data accessor UPSTREAM: zram: remove zram_meta structure UPSTREAM: zram: use zram_slot_lock instead of raw bit_spin_lock op BACKPORT: zram: partial IO refactoring BACKPORT: zram: handle multiple pages attached bio's bvec UPSTREAM: zram: fix operator precedence to get offset BACKPORT: zram: extend zero pages to same element pages BACKPORT: zram: remove waitqueue for IO done UPSTREAM: zram: remove obsolete sysfs attrs UPSTREAM: zram: support BDI_CAP_STABLE_WRITES UPSTREAM: zram: revalidate disk under init_lock BACKPORT: mm: support anonymous stable page UPSTREAM: zram: use __GFP_MOVABLE for memory allocation UPSTREAM: zram: drop gfp_t from zcomp_strm_alloc() UPSTREAM: zram: add more compression algorithms UPSTREAM: zram: delete custom lzo/lz4 UPSTREAM: zram: cosmetic: cleanup documentation UPSTREAM: zram: use crypto api to check alg availability BACKPORT: zram: switch to crypto compress API UPSTREAM: zram: rename zstrm find-release functions UPSTREAM: zram: introduce per-device debug_stat sysfs node UPSTREAM: zram: remove max_comp_streams internals UPSTREAM: zram: user per-cpu compression streams BACKPORT: zsmalloc: require GFP in zs_malloc() UPSTREAM: zram/zcomp: do not zero out zcomp private pages UPSTREAM: zram: pass gfp from zcomp frontend to backend UPSTREAM: socket: close race condition between sock_close() and sockfs_setattr() ANDROID: Refresh x86_64_cuttlefish_defconfig Linux 4.4.151 isdn: Disable IIOCDBGVAR Bluetooth: avoid killing an already killed socket x86/mm: Simplify p[g4um]d_page() macros serial: 8250_dw: always set baud rate in dw8250_set_termios ACPI / PM: save NVS memory for ASUS 1025C laptop ACPI: save NVS memory for Lenovo G50-45 USB: option: add support for DW5821e USB: serial: sierra: fix potential deadlock at close ALSA: vxpocket: Fix invalid endian conversions ALSA: memalloc: Don't exceed over the requested size ALSA: hda: Correct Asrock B85M-ITX power_save blacklist entry ALSA: cs5535audio: Fix invalid endian conversion ALSA: virmidi: Fix too long output trigger loop ALSA: vx222: Fix invalid endian conversions ALSA: hda - Turn CX8200 into D3 as well upon reboot ALSA: hda - Sleep for 10ms after entering D3 on Conexant codecs net_sched: fix NULL pointer dereference when delete tcindex filter vsock: split dwork to avoid reinitializations net_sched: Fix missing res info when create new tc_index filter llc: use refcount_inc_not_zero() for llc_sap_find() l2tp: use sk_dst_check() to avoid race on sk->sk_dst_cache dccp: fix undefined behavior with 'cwnd' shift in ccid2_cwnd_restart() Conflicts: drivers/block/zram/zram_drv.c drivers/staging/android/ion/ion.c include/linux/swap.h mm/zsmalloc.c Change-Id: I1c437ac5133503a939d06d51ec778b65371df6d1 Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
947 lines
21 KiB
C
947 lines
21 KiB
C
/*
|
|
* SMP initialisation and IPI support
|
|
* Based on arch/arm/kernel/smp.c
|
|
*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#include <linux/acpi.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/init.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/cache.h>
|
|
#include <linux/profile.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/err.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/irq.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/clockchips.h>
|
|
#include <linux/completion.h>
|
|
#include <linux/of.h>
|
|
#include <linux/irq_work.h>
|
|
|
|
#include <asm/alternative.h>
|
|
#include <asm/atomic.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cpu.h>
|
|
#include <asm/cputype.h>
|
|
#include <asm/cpu_ops.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/smp_plat.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/virt.h>
|
|
#include <asm/edac.h>
|
|
#include <soc/qcom/minidump.h>
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/ipi.h>
|
|
|
|
DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
|
|
EXPORT_PER_CPU_SYMBOL(cpu_number);
|
|
|
|
/*
|
|
* as from 2.5, kernels no longer have an init_tasks structure
|
|
* so we need some other way of telling a new secondary core
|
|
* where to place its SVC stack
|
|
*/
|
|
struct secondary_data secondary_data;
|
|
|
|
enum ipi_msg_type {
|
|
IPI_RESCHEDULE,
|
|
IPI_CALL_FUNC,
|
|
IPI_CPU_STOP,
|
|
IPI_TIMER,
|
|
IPI_IRQ_WORK,
|
|
IPI_WAKEUP,
|
|
IPI_CPU_BACKTRACE,
|
|
};
|
|
|
|
/*
|
|
* Boot a secondary CPU, and assign it the specified idle task.
|
|
* This also gives us the initial stack to use for this CPU.
|
|
*/
|
|
static int boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
{
|
|
if (cpu_ops[cpu]->cpu_boot)
|
|
return cpu_ops[cpu]->cpu_boot(cpu);
|
|
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
static DECLARE_COMPLETION(cpu_running);
|
|
|
|
int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
|
{
|
|
int ret;
|
|
|
|
/*
|
|
* We need to tell the secondary core where to find its stack and the
|
|
* page tables.
|
|
*/
|
|
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
|
secondary_data.task = idle;
|
|
#endif
|
|
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
|
|
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
|
|
|
|
/*
|
|
* Now bring the CPU into our world.
|
|
*/
|
|
ret = boot_secondary(cpu, idle);
|
|
if (ret == 0) {
|
|
/*
|
|
* CPU was successfully started, wait for it to come online or
|
|
* time out.
|
|
*/
|
|
wait_for_completion_timeout(&cpu_running,
|
|
msecs_to_jiffies(1000));
|
|
|
|
if (!cpu_online(cpu)) {
|
|
pr_crit("CPU%u: failed to come online\n", cpu);
|
|
ret = -EIO;
|
|
}
|
|
} else {
|
|
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
|
|
}
|
|
|
|
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
|
secondary_data.task = NULL;
|
|
#endif
|
|
secondary_data.stack = NULL;
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void smp_store_cpu_info(unsigned int cpuid)
|
|
{
|
|
store_cpu_topology(cpuid);
|
|
}
|
|
|
|
/*
|
|
* This is the secondary CPU boot entry. We're using this CPUs
|
|
* idle thread stack, but a set of temporary page tables.
|
|
*/
|
|
asmlinkage notrace void secondary_start_kernel(void)
|
|
{
|
|
struct mm_struct *mm = &init_mm;
|
|
unsigned int cpu;
|
|
|
|
cpu = task_cpu(current);
|
|
set_my_cpu_offset(per_cpu_offset(cpu));
|
|
|
|
pr_debug("CPU%u: Booted secondary processor\n", cpu);
|
|
|
|
/*
|
|
* All kernel threads share the same mm context; grab a
|
|
* reference and switch to it.
|
|
*/
|
|
atomic_inc(&mm->mm_count);
|
|
current->active_mm = mm;
|
|
|
|
/*
|
|
* TTBR0 is only used for the identity mapping at this stage. Make it
|
|
* point to zero page to avoid speculatively fetching new entries.
|
|
*/
|
|
cpu_uninstall_idmap();
|
|
|
|
preempt_disable();
|
|
trace_hardirqs_off();
|
|
|
|
/*
|
|
* If the system has established the capabilities, make sure
|
|
* this CPU ticks all of those. If it doesn't, the CPU will
|
|
* fail to come online.
|
|
*/
|
|
verify_local_cpu_capabilities();
|
|
|
|
if (cpu_ops[cpu]->cpu_postboot)
|
|
cpu_ops[cpu]->cpu_postboot();
|
|
|
|
/*
|
|
* Log the CPU info before it is marked online and might get read.
|
|
*/
|
|
cpuinfo_store_cpu();
|
|
|
|
/*
|
|
* Enable GIC and timers.
|
|
*/
|
|
smp_store_cpu_info(cpu);
|
|
|
|
notify_cpu_starting(cpu);
|
|
|
|
/*
|
|
* OK, now it's safe to let the boot CPU continue. Wait for
|
|
* the CPU migration code to notice that the CPU is online
|
|
* before we continue.
|
|
*/
|
|
pr_info("CPU%u: Booted secondary processor [%08x]\n",
|
|
cpu, read_cpuid_id());
|
|
set_cpu_online(cpu, true);
|
|
complete(&cpu_running);
|
|
|
|
local_irq_enable();
|
|
local_async_enable();
|
|
|
|
/*
|
|
* OK, it's off to the idle thread for us
|
|
*/
|
|
cpu_startup_entry(CPUHP_ONLINE);
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
static int op_cpu_disable(unsigned int cpu)
|
|
{
|
|
/*
|
|
* If we don't have a cpu_die method, abort before we reach the point
|
|
* of no return. CPU0 may not have an cpu_ops, so test for it.
|
|
*/
|
|
if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
|
|
return -EOPNOTSUPP;
|
|
|
|
/*
|
|
* We may need to abort a hot unplug for some other mechanism-specific
|
|
* reason.
|
|
*/
|
|
if (cpu_ops[cpu]->cpu_disable)
|
|
return cpu_ops[cpu]->cpu_disable(cpu);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* __cpu_disable runs on the processor to be shutdown.
|
|
*/
|
|
int __cpu_disable(void)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
int ret;
|
|
|
|
ret = op_cpu_disable(cpu);
|
|
if (ret)
|
|
return ret;
|
|
|
|
/*
|
|
* Take this CPU offline. Once we clear this, we can't return,
|
|
* and we must not schedule until we're ready to give up the cpu.
|
|
*/
|
|
set_cpu_online(cpu, false);
|
|
|
|
/*
|
|
* OK - migrate IRQs away from this CPU
|
|
*/
|
|
irq_migrate_all_off_this_cpu();
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int op_cpu_kill(unsigned int cpu)
|
|
{
|
|
/*
|
|
* If we have no means of synchronising with the dying CPU, then assume
|
|
* that it is really dead. We can only wait for an arbitrary length of
|
|
* time and hope that it's dead, so let's skip the wait and just hope.
|
|
*/
|
|
if (!cpu_ops[cpu]->cpu_kill)
|
|
return 0;
|
|
|
|
return cpu_ops[cpu]->cpu_kill(cpu);
|
|
}
|
|
|
|
/*
|
|
* called on the thread which is asking for a CPU to be shutdown -
|
|
* waits until shutdown has completed, or it is timed out.
|
|
*/
|
|
void __cpu_die(unsigned int cpu)
|
|
{
|
|
int err;
|
|
|
|
if (!cpu_wait_death(cpu, 5)) {
|
|
pr_crit("CPU%u: cpu didn't die\n", cpu);
|
|
return;
|
|
}
|
|
pr_debug("CPU%u: shutdown\n", cpu);
|
|
|
|
/*
|
|
* Now that the dying CPU is beyond the point of no return w.r.t.
|
|
* in-kernel synchronisation, try to get the firwmare to help us to
|
|
* verify that it has really left the kernel before we consider
|
|
* clobbering anything it might still be using.
|
|
*/
|
|
err = op_cpu_kill(cpu);
|
|
if (err)
|
|
pr_warn("CPU%d may not have shut down cleanly: %d\n",
|
|
cpu, err);
|
|
}
|
|
|
|
/*
|
|
* Called from the idle thread for the CPU which has been shutdown.
|
|
*
|
|
* Note that we disable IRQs here, but do not re-enable them
|
|
* before returning to the caller. This is also the behaviour
|
|
* of the other hotplug-cpu capable cores, so presumably coming
|
|
* out of idle fixes this.
|
|
*/
|
|
void __ref cpu_die(void)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
idle_task_exit();
|
|
|
|
local_irq_disable();
|
|
|
|
/* Tell __cpu_die() that this CPU is now safe to dispose of */
|
|
(void)cpu_report_death();
|
|
|
|
/*
|
|
* Actually shutdown the CPU. This must never fail. The specific hotplug
|
|
* mechanism must perform all required cache maintenance to ensure that
|
|
* no dirty lines are lost in the process of shutting down the CPU.
|
|
*/
|
|
cpu_ops[cpu]->cpu_die(cpu);
|
|
|
|
/*
|
|
* Do not return to the idle loop - jump back to the secondary
|
|
* cpu initialisation. There's some initialisation which needs
|
|
* to be repeated to undo the effects of taking the CPU offline.
|
|
*/
|
|
|
|
asm volatile("mov sp, %0\n"
|
|
"mov x29, #0\n"
|
|
"b secondary_start_kernel"
|
|
: : "r" (task_stack_page(current) + THREAD_START_SP));
|
|
}
|
|
#endif
|
|
|
|
static void __init hyp_mode_check(void)
|
|
{
|
|
if (is_hyp_mode_available())
|
|
pr_info("CPU: All CPU(s) started at EL2\n");
|
|
else if (is_hyp_mode_mismatched())
|
|
WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
|
|
"CPU: CPUs started in inconsistent modes");
|
|
else
|
|
pr_info("CPU: All CPU(s) started at EL1\n");
|
|
}
|
|
|
|
void __init smp_cpus_done(unsigned int max_cpus)
|
|
{
|
|
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
|
|
setup_cpu_features();
|
|
hyp_mode_check();
|
|
apply_alternatives_all();
|
|
}
|
|
|
|
void __init smp_prepare_boot_cpu(void)
|
|
{
|
|
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
|
|
cpuinfo_store_boot_cpu();
|
|
}
|
|
|
|
static u64 __init of_get_cpu_mpidr(struct device_node *dn)
|
|
{
|
|
const __be32 *cell;
|
|
u64 hwid;
|
|
|
|
/*
|
|
* A cpu node with missing "reg" property is
|
|
* considered invalid to build a cpu_logical_map
|
|
* entry.
|
|
*/
|
|
cell = of_get_property(dn, "reg", NULL);
|
|
if (!cell) {
|
|
pr_err("%s: missing reg property\n", dn->full_name);
|
|
return INVALID_HWID;
|
|
}
|
|
|
|
hwid = of_read_number(cell, of_n_addr_cells(dn));
|
|
/*
|
|
* Non affinity bits must be set to 0 in the DT
|
|
*/
|
|
if (hwid & ~MPIDR_HWID_BITMASK) {
|
|
pr_err("%s: invalid reg property\n", dn->full_name);
|
|
return INVALID_HWID;
|
|
}
|
|
return hwid;
|
|
}
|
|
|
|
/*
|
|
* Duplicate MPIDRs are a recipe for disaster. Scan all initialized
|
|
* entries and check for duplicates. If any is found just ignore the
|
|
* cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
|
|
* matching valid MPIDR values.
|
|
*/
|
|
static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
|
|
{
|
|
unsigned int i;
|
|
|
|
for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
|
|
if (cpu_logical_map(i) == hwid)
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
/*
|
|
* Initialize cpu operations for a logical cpu and
|
|
* set it in the possible mask on success
|
|
*/
|
|
static int __init smp_cpu_setup(int cpu)
|
|
{
|
|
if (cpu_read_ops(cpu))
|
|
return -ENODEV;
|
|
|
|
if (cpu_ops[cpu]->cpu_init(cpu))
|
|
return -ENODEV;
|
|
|
|
set_cpu_possible(cpu, true);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static bool bootcpu_valid __initdata;
|
|
static unsigned int cpu_count = 1;
|
|
|
|
#ifdef CONFIG_ACPI
|
|
/*
|
|
* acpi_map_gic_cpu_interface - parse processor MADT entry
|
|
*
|
|
* Carry out sanity checks on MADT processor entry and initialize
|
|
* cpu_logical_map on success
|
|
*/
|
|
static void __init
|
|
acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
|
|
{
|
|
u64 hwid = processor->arm_mpidr;
|
|
|
|
if (!(processor->flags & ACPI_MADT_ENABLED)) {
|
|
pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
|
|
return;
|
|
}
|
|
|
|
if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
|
|
pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
|
|
return;
|
|
}
|
|
|
|
if (is_mpidr_duplicate(cpu_count, hwid)) {
|
|
pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
|
|
return;
|
|
}
|
|
|
|
/* Check if GICC structure of boot CPU is available in the MADT */
|
|
if (cpu_logical_map(0) == hwid) {
|
|
if (bootcpu_valid) {
|
|
pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
|
|
hwid);
|
|
return;
|
|
}
|
|
bootcpu_valid = true;
|
|
return;
|
|
}
|
|
|
|
if (cpu_count >= NR_CPUS)
|
|
return;
|
|
|
|
/* map the logical cpu id to cpu MPIDR */
|
|
cpu_logical_map(cpu_count) = hwid;
|
|
|
|
/*
|
|
* Set-up the ACPI parking protocol cpu entries
|
|
* while initializing the cpu_logical_map to
|
|
* avoid parsing MADT entries multiple times for
|
|
* nothing (ie a valid cpu_logical_map entry should
|
|
* contain a valid parking protocol data set to
|
|
* initialize the cpu if the parking protocol is
|
|
* the only available enable method).
|
|
*/
|
|
acpi_set_mailbox_entry(cpu_count, processor);
|
|
|
|
cpu_count++;
|
|
}
|
|
|
|
static int __init
|
|
acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
|
|
const unsigned long end)
|
|
{
|
|
struct acpi_madt_generic_interrupt *processor;
|
|
|
|
processor = (struct acpi_madt_generic_interrupt *)header;
|
|
if (BAD_MADT_GICC_ENTRY(processor, end))
|
|
return -EINVAL;
|
|
|
|
acpi_table_print_madt_entry(header);
|
|
|
|
acpi_map_gic_cpu_interface(processor);
|
|
|
|
return 0;
|
|
}
|
|
#else
|
|
#define acpi_table_parse_madt(...) do { } while (0)
|
|
#endif
|
|
void (*__smp_cross_call)(const struct cpumask *, unsigned int);
|
|
DEFINE_PER_CPU(bool, pending_ipi);
|
|
|
|
void smp_cross_call_common(const struct cpumask *cpumask, unsigned int func)
|
|
{
|
|
unsigned int cpu;
|
|
|
|
for_each_cpu(cpu, cpumask)
|
|
per_cpu(pending_ipi, cpu) = true;
|
|
|
|
__smp_cross_call(cpumask, func);
|
|
}
|
|
|
|
/*
|
|
* Enumerate the possible CPU set from the device tree and build the
|
|
* cpu logical map array containing MPIDR values related to logical
|
|
* cpus. Assumes that cpu_logical_map(0) has already been initialized.
|
|
*/
|
|
static void __init of_parse_and_init_cpus(void)
|
|
{
|
|
struct device_node *dn = NULL;
|
|
|
|
while ((dn = of_find_node_by_type(dn, "cpu"))) {
|
|
u64 hwid = of_get_cpu_mpidr(dn);
|
|
|
|
if (hwid == INVALID_HWID)
|
|
goto next;
|
|
|
|
if (is_mpidr_duplicate(cpu_count, hwid)) {
|
|
pr_err("%s: duplicate cpu reg properties in the DT\n",
|
|
dn->full_name);
|
|
goto next;
|
|
}
|
|
|
|
/*
|
|
* The numbering scheme requires that the boot CPU
|
|
* must be assigned logical id 0. Record it so that
|
|
* the logical map built from DT is validated and can
|
|
* be used.
|
|
*/
|
|
if (hwid == cpu_logical_map(0)) {
|
|
if (bootcpu_valid) {
|
|
pr_err("%s: duplicate boot cpu reg property in DT\n",
|
|
dn->full_name);
|
|
goto next;
|
|
}
|
|
|
|
bootcpu_valid = true;
|
|
|
|
/*
|
|
* cpu_logical_map has already been
|
|
* initialized and the boot cpu doesn't need
|
|
* the enable-method so continue without
|
|
* incrementing cpu.
|
|
*/
|
|
continue;
|
|
}
|
|
|
|
if (cpu_count >= NR_CPUS)
|
|
goto next;
|
|
|
|
pr_debug("cpu logical map 0x%llx\n", hwid);
|
|
cpu_logical_map(cpu_count) = hwid;
|
|
next:
|
|
cpu_count++;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Enumerate the possible CPU set from the device tree or ACPI and build the
|
|
* cpu logical map array containing MPIDR values related to logical
|
|
* cpus. Assumes that cpu_logical_map(0) has already been initialized.
|
|
*/
|
|
void __init smp_init_cpus(void)
|
|
{
|
|
int i;
|
|
|
|
if (acpi_disabled)
|
|
of_parse_and_init_cpus();
|
|
else
|
|
/*
|
|
* do a walk of MADT to determine how many CPUs
|
|
* we have including disabled CPUs, and get information
|
|
* we need for SMP init
|
|
*/
|
|
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
|
|
acpi_parse_gic_cpu_interface, 0);
|
|
|
|
if (cpu_count > NR_CPUS)
|
|
pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
|
|
cpu_count, NR_CPUS);
|
|
|
|
if (!bootcpu_valid) {
|
|
pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
|
|
return;
|
|
}
|
|
|
|
/*
|
|
* We need to set the cpu_logical_map entries before enabling
|
|
* the cpus so that cpu processor description entries (DT cpu nodes
|
|
* and ACPI MADT entries) can be retrieved by matching the cpu hwid
|
|
* with entries in cpu_logical_map while initializing the cpus.
|
|
* If the cpu set-up fails, invalidate the cpu_logical_map entry.
|
|
*/
|
|
for (i = 1; i < NR_CPUS; i++) {
|
|
if (cpu_logical_map(i) != INVALID_HWID) {
|
|
if (smp_cpu_setup(i))
|
|
cpu_logical_map(i) = INVALID_HWID;
|
|
}
|
|
}
|
|
}
|
|
|
|
void __init smp_prepare_cpus(unsigned int max_cpus)
|
|
{
|
|
int err;
|
|
unsigned int cpu, ncores = num_possible_cpus();
|
|
|
|
init_cpu_topology();
|
|
|
|
smp_store_cpu_info(smp_processor_id());
|
|
|
|
/*
|
|
* are we trying to boot more cores than exist?
|
|
*/
|
|
if (max_cpus > ncores)
|
|
max_cpus = ncores;
|
|
|
|
/* Don't bother if we're effectively UP */
|
|
if (max_cpus <= 1)
|
|
return;
|
|
|
|
/*
|
|
* Initialise the present map (which describes the set of CPUs
|
|
* actually populated at the present time) and release the
|
|
* secondaries from the bootloader.
|
|
*
|
|
* Make sure we online at most (max_cpus - 1) additional CPUs.
|
|
*/
|
|
max_cpus--;
|
|
for_each_possible_cpu(cpu) {
|
|
if (max_cpus == 0)
|
|
break;
|
|
|
|
per_cpu(cpu_number, cpu) = cpu;
|
|
|
|
if (cpu == smp_processor_id())
|
|
continue;
|
|
|
|
if (!cpu_ops[cpu])
|
|
continue;
|
|
|
|
err = cpu_ops[cpu]->cpu_prepare(cpu);
|
|
if (err)
|
|
continue;
|
|
|
|
set_cpu_present(cpu, true);
|
|
max_cpus--;
|
|
}
|
|
}
|
|
|
|
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
|
|
{
|
|
__smp_cross_call = fn;
|
|
}
|
|
|
|
static const char *ipi_types[NR_IPI] __tracepoint_string = {
|
|
#define S(x,s) [x] = s
|
|
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
|
|
S(IPI_CALL_FUNC, "Function call interrupts"),
|
|
S(IPI_CPU_STOP, "CPU stop interrupts"),
|
|
S(IPI_TIMER, "Timer broadcast interrupts"),
|
|
S(IPI_IRQ_WORK, "IRQ work interrupts"),
|
|
S(IPI_WAKEUP, "CPU wakeup interrupts"),
|
|
S(IPI_CPU_BACKTRACE, "CPU backtrace"),
|
|
};
|
|
|
|
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
|
|
{
|
|
unsigned int cpu;
|
|
|
|
for_each_cpu(cpu, target)
|
|
per_cpu(pending_ipi, cpu) = true;
|
|
|
|
trace_ipi_raise(target, ipi_types[ipinr]);
|
|
__smp_cross_call(target, ipinr);
|
|
}
|
|
|
|
void show_ipi_list(struct seq_file *p, int prec)
|
|
{
|
|
unsigned int cpu, i;
|
|
|
|
for (i = 0; i < NR_IPI; i++) {
|
|
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
|
|
prec >= 4 ? " " : "");
|
|
for_each_online_cpu(cpu)
|
|
seq_printf(p, "%10u ",
|
|
__get_irq_stat(cpu, ipi_irqs[i]));
|
|
seq_printf(p, " %s\n", ipi_types[i]);
|
|
}
|
|
}
|
|
|
|
u64 smp_irq_stat_cpu(unsigned int cpu)
|
|
{
|
|
u64 sum = 0;
|
|
int i;
|
|
|
|
for (i = 0; i < NR_IPI; i++)
|
|
sum += __get_irq_stat(cpu, ipi_irqs[i]);
|
|
|
|
return sum;
|
|
}
|
|
|
|
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
|
{
|
|
smp_cross_call_common(mask, IPI_CALL_FUNC);
|
|
}
|
|
|
|
void arch_send_call_function_single_ipi(int cpu)
|
|
{
|
|
smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC);
|
|
}
|
|
|
|
#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
|
|
void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
|
|
{
|
|
smp_cross_call(mask, IPI_WAKEUP);
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_IRQ_WORK
|
|
void arch_irq_work_raise(void)
|
|
{
|
|
if (__smp_cross_call)
|
|
smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
|
|
}
|
|
#endif
|
|
|
|
static DEFINE_RAW_SPINLOCK(stop_lock);
|
|
|
|
DEFINE_PER_CPU(struct pt_regs, regs_before_stop);
|
|
|
|
/*
|
|
* ipi_cpu_stop - handle IPI from smp_send_stop()
|
|
*/
|
|
static void ipi_cpu_stop(unsigned int cpu, struct pt_regs *regs)
|
|
{
|
|
if (system_state == SYSTEM_BOOTING ||
|
|
system_state == SYSTEM_RUNNING) {
|
|
per_cpu(regs_before_stop, cpu) = *regs;
|
|
raw_spin_lock(&stop_lock);
|
|
pr_crit("CPU%u: stopping\n", cpu);
|
|
show_regs(regs);
|
|
dump_stack();
|
|
dump_stack_minidump(regs->sp);
|
|
arm64_check_cache_ecc(NULL);
|
|
raw_spin_unlock(&stop_lock);
|
|
}
|
|
|
|
set_cpu_active(cpu, false);
|
|
|
|
flush_cache_all();
|
|
local_irq_disable();
|
|
|
|
while (1)
|
|
cpu_relax();
|
|
}
|
|
|
|
static cpumask_t backtrace_mask;
|
|
static DEFINE_RAW_SPINLOCK(backtrace_lock);
|
|
|
|
/* "in progress" flag of arch_trigger_all_cpu_backtrace */
|
|
static unsigned long backtrace_flag;
|
|
|
|
static void smp_send_all_cpu_backtrace(void)
|
|
{
|
|
unsigned int this_cpu = smp_processor_id();
|
|
int i;
|
|
|
|
if (test_and_set_bit(0, &backtrace_flag))
|
|
/*
|
|
* If there is already a trigger_all_cpu_backtrace() in progress
|
|
* (backtrace_flag == 1), don't output double cpu dump infos.
|
|
*/
|
|
return;
|
|
|
|
cpumask_copy(&backtrace_mask, cpu_online_mask);
|
|
cpumask_clear_cpu(this_cpu, &backtrace_mask);
|
|
|
|
pr_info("Backtrace for cpu %d (current):\n", this_cpu);
|
|
dump_stack();
|
|
|
|
pr_info("\nsending IPI to all other CPUs:\n");
|
|
if (!cpumask_empty(&backtrace_mask))
|
|
smp_cross_call_common(&backtrace_mask, IPI_CPU_BACKTRACE);
|
|
|
|
/* Wait for up to 10 seconds for all other CPUs to do the backtrace */
|
|
for (i = 0; i < 10 * 1000; i++) {
|
|
if (cpumask_empty(&backtrace_mask))
|
|
break;
|
|
mdelay(1);
|
|
}
|
|
|
|
clear_bit(0, &backtrace_flag);
|
|
smp_mb__after_atomic();
|
|
}
|
|
|
|
/*
|
|
* ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
|
|
*/
|
|
static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
|
|
{
|
|
if (cpumask_test_cpu(cpu, &backtrace_mask)) {
|
|
raw_spin_lock(&backtrace_lock);
|
|
pr_warn("IPI backtrace for cpu %d\n", cpu);
|
|
show_regs(regs);
|
|
raw_spin_unlock(&backtrace_lock);
|
|
cpumask_clear_cpu(cpu, &backtrace_mask);
|
|
}
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
void arch_trigger_all_cpu_backtrace(void)
|
|
{
|
|
smp_send_all_cpu_backtrace();
|
|
}
|
|
#else
|
|
void arch_trigger_all_cpu_backtrace(void)
|
|
{
|
|
dump_stack();
|
|
}
|
|
#endif
|
|
|
|
|
|
/*
|
|
* Main handler for inter-processor interrupts
|
|
*/
|
|
void handle_IPI(int ipinr, struct pt_regs *regs)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
|
|
|
if ((unsigned)ipinr < NR_IPI) {
|
|
trace_ipi_entry_rcuidle(ipi_types[ipinr]);
|
|
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
|
|
}
|
|
|
|
switch (ipinr) {
|
|
case IPI_RESCHEDULE:
|
|
scheduler_ipi();
|
|
break;
|
|
|
|
case IPI_CALL_FUNC:
|
|
irq_enter();
|
|
generic_smp_call_function_interrupt();
|
|
irq_exit();
|
|
break;
|
|
|
|
case IPI_CPU_STOP:
|
|
irq_enter();
|
|
ipi_cpu_stop(cpu, regs);
|
|
irq_exit();
|
|
break;
|
|
|
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
|
case IPI_TIMER:
|
|
irq_enter();
|
|
tick_receive_broadcast();
|
|
irq_exit();
|
|
break;
|
|
#endif
|
|
|
|
#ifdef CONFIG_IRQ_WORK
|
|
case IPI_IRQ_WORK:
|
|
irq_enter();
|
|
irq_work_run();
|
|
irq_exit();
|
|
break;
|
|
#endif
|
|
|
|
case IPI_CPU_BACKTRACE:
|
|
ipi_cpu_backtrace(cpu, regs);
|
|
break;
|
|
|
|
#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
|
|
case IPI_WAKEUP:
|
|
WARN_ONCE(!acpi_parking_protocol_valid(cpu),
|
|
"CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
|
|
cpu);
|
|
break;
|
|
#endif
|
|
|
|
default:
|
|
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
|
|
break;
|
|
}
|
|
|
|
if ((unsigned)ipinr < NR_IPI)
|
|
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
|
|
|
|
per_cpu(pending_ipi, cpu) = false;
|
|
set_irq_regs(old_regs);
|
|
}
|
|
|
|
void smp_send_reschedule(int cpu)
|
|
{
|
|
BUG_ON(cpu_is_offline(cpu));
|
|
smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE);
|
|
}
|
|
|
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
|
void tick_broadcast(const struct cpumask *mask)
|
|
{
|
|
smp_cross_call_common(mask, IPI_TIMER);
|
|
}
|
|
#endif
|
|
|
|
void smp_send_stop(void)
|
|
{
|
|
unsigned long timeout;
|
|
|
|
if (num_online_cpus() > 1) {
|
|
cpumask_t mask;
|
|
|
|
cpumask_copy(&mask, cpu_online_mask);
|
|
cpumask_clear_cpu(smp_processor_id(), &mask);
|
|
|
|
smp_cross_call_common(&mask, IPI_CPU_STOP);
|
|
}
|
|
|
|
/* Wait up to one second for other CPUs to stop */
|
|
timeout = USEC_PER_SEC;
|
|
while (num_active_cpus() > 1 && timeout--)
|
|
udelay(1);
|
|
|
|
if (num_active_cpus() > 1)
|
|
pr_warning("SMP: failed to stop secondary CPUs\n");
|
|
}
|
|
|
|
/*
|
|
* not supported here
|
|
*/
|
|
int setup_profiling_timer(unsigned int multiplier)
|
|
{
|
|
return -EINVAL;
|
|
}
|