-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlt0SdMACgkQONu9yGCS aT4NLxAAovDVqFFejBk8M1nxAtQSqRzB2PMboc+l62clKa6BAJtWAsgPFjECgzEp edlDeUttliQoTB6S3GYYM82oj50myUKlGvlJRptRE3Gr1iYubdB/U2RDmwEzCxbC AEzu4tEv+Z23jaLGsuAIOg66faBTqqgVoKtp/TlKwl+Y/b6WzkI0gRzxWTBFnAlj AKuhmoc1JoS9JF/MQ4q02gYSQ0g1eTpr1gIU2GMow9pK9Rahk4Jdl4yRjNLUFDxd ojrBYCoElf90R3q+NvmZBbzxwanm2OgzeEBffhh647aB5kHEUd5h4z9w+sIoXmSq 50uD59q62Umdpp2O125HH5KHeHbcTUCXXp3g1VY6A/+d9dTs9GZqo//vf6aJsxEb gixoYyNbIcqw1k0jhEEW2ah3F3j+ZHvNmLKPyV4U8h2Tw2K5QKzFu/fVnQw7Xfv6 Gv0z1TQ4Y+w2bqpzDiDBO4sRgKOXVr3hzWa0jggW5AoKWTco/oIVkE+Rqmj65AfK DROqCMQq75K+pymrM8I3wTXRSxtSH9bO/iqCu2LiiaG+JAkvr0OIHEHgizxLtAFO ivpREPDsWhVAYUmnoCgJa8Za1GdJk1I9uvxoJY1TBL8gbcYc61yjjeJDYqLghuNT EhrvFvJ4r/fQ6BJ76+rO7FSJIl+Kov2Uf7CWql3Lzxps6/u5GNQ= =73dO -----END PGP SIGNATURE----- Merge 4.4.148 into android-4.4 Changes in 4.4.148 ext4: fix check to prevent initializing reserved inodes tpm: fix race condition in tpm_common_write() ipv4+ipv6: Make INET*_ESP select CRYPTO_ECHAINIV fork: unconditionally clear stack on fork parisc: Enable CONFIG_MLONGCALLS by default parisc: Define mb() and add memory barriers to assembler unlock sequences xen/netfront: don't cache skb_shinfo() ACPI / LPSS: Add missing prv_offset setting for byt/cht PWM devices scsi: sr: Avoid that opening a CD-ROM hangs with runtime power management enabled root dentries need RCU-delayed freeing fix mntput/mntput race fix __legitimize_mnt()/mntput() race IB/core: Make testing MR flags for writability a static inline function IB/mlx4: Mark user MR as writable if actual virtual memory is writable IB/ocrdma: fix out of bounds access to local buffer ARM: dts: imx6sx: fix irq for pcie bridge x86/paravirt: Fix spectre-v2 mitigations for paravirt guests x86/speculation: Protect against userspace-userspace spectreRSB kprobes/x86: Fix %p uses in error messages x86/irqflags: Provide a declaration for native_save_fl x86/speculation/l1tf: Increase 32bit PAE __PHYSICAL_PAGE_SHIFT x86/mm: Move swap offset/type up in PTE to work around erratum x86/mm: Fix swap entry comment and macro mm: x86: move _PAGE_SWP_SOFT_DIRTY from bit 7 to bit 1 x86/speculation/l1tf: Change order of offset/type in swap entry x86/speculation/l1tf: Protect swap entries against L1TF x86/speculation/l1tf: Protect PROT_NONE PTEs against speculation x86/speculation/l1tf: Make sure the first page is always reserved x86/speculation/l1tf: Add sysfs reporting for l1tf mm: Add vm_insert_pfn_prot() mm: fix cache mode tracking in vm_insert_mixed() x86/speculation/l1tf: Disallow non privileged high MMIO PROT_NONE mappings x86/speculation/l1tf: Limit swap file size to MAX_PA/2 x86/bugs: Move the l1tf function and define pr_fmt properly x86/speculation/l1tf: Extend 64bit swap file size limit x86/cpufeatures: Add detection of L1D cache flush support. x86/speculation/l1tf: Protect PAE swap entries against L1TF x86/speculation/l1tf: Fix up pte->pfn conversion for PAE x86/speculation/l1tf: Invert all not present mappings x86/speculation/l1tf: Make pmd/pud_mknotpresent() invert x86/mm/pat: Make set_memory_np() L1TF safe x86/mm/kmmio: Make the tracer robust against L1TF x86/speculation/l1tf: Fix up CPU feature flags x86/init: fix build with CONFIG_SWAP=n x86/speculation/l1tf: Unbreak !__HAVE_ARCH_PFN_MODIFY_ALLOWED architectures Linux 4.4.148 Change-Id: I83c857d9d9d74ee47e61d15eb411f276f057ba3d Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
304 lines
9.4 KiB
C
304 lines
9.4 KiB
C
/*
|
|
* include/linux/cpu.h - generic cpu definition
|
|
*
|
|
* This is mainly for topological representation. We define the
|
|
* basic 'struct cpu' here, which can be embedded in per-arch
|
|
* definitions of processors.
|
|
*
|
|
* Basic handling of the devices is done in drivers/base/cpu.c
|
|
*
|
|
* CPUs are exported via sysfs in the devices/system/cpu
|
|
* directory.
|
|
*/
|
|
#ifndef _LINUX_CPU_H_
|
|
#define _LINUX_CPU_H_
|
|
|
|
#include <linux/node.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/cpumask.h>
|
|
|
|
struct device;
|
|
struct device_node;
|
|
struct attribute_group;
|
|
|
|
struct cpu {
|
|
int node_id; /* The node which contains the CPU */
|
|
int hotpluggable; /* creates sysfs control file if hotpluggable */
|
|
struct device dev;
|
|
};
|
|
|
|
extern int register_cpu(struct cpu *cpu, int num);
|
|
extern struct device *get_cpu_device(unsigned cpu);
|
|
extern bool cpu_is_hotpluggable(unsigned cpu);
|
|
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
|
|
extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
|
|
int cpu, unsigned int *thread);
|
|
|
|
extern int cpu_add_dev_attr(struct device_attribute *attr);
|
|
extern void cpu_remove_dev_attr(struct device_attribute *attr);
|
|
|
|
extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
|
|
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
|
|
|
|
extern ssize_t cpu_show_meltdown(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_spectre_v1(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_spectre_v2(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
extern ssize_t cpu_show_l1tf(struct device *dev,
|
|
struct device_attribute *attr, char *buf);
|
|
|
|
extern __printf(4, 5)
|
|
struct device *cpu_device_create(struct device *parent, void *drvdata,
|
|
const struct attribute_group **groups,
|
|
const char *fmt, ...);
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
extern void unregister_cpu(struct cpu *cpu);
|
|
extern ssize_t arch_cpu_probe(const char *, size_t);
|
|
extern ssize_t arch_cpu_release(const char *, size_t);
|
|
#endif
|
|
struct notifier_block;
|
|
|
|
/*
|
|
* CPU notifier priorities.
|
|
*/
|
|
enum {
|
|
/*
|
|
* SCHED_ACTIVE marks a cpu which is coming up active during
|
|
* CPU_ONLINE and CPU_DOWN_FAILED and must be the first
|
|
* notifier. CPUSET_ACTIVE adjusts cpuset according to
|
|
* cpu_active mask right after SCHED_ACTIVE. During
|
|
* CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
|
|
* ordered in the similar way.
|
|
*
|
|
* This ordering guarantees consistent cpu_active mask and
|
|
* migration behavior to all cpu notifiers.
|
|
*/
|
|
CPU_PRI_SCHED_ACTIVE = INT_MAX,
|
|
CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1,
|
|
CPU_PRI_SCHED_INACTIVE = INT_MIN + 1,
|
|
CPU_PRI_CPUSET_INACTIVE = INT_MIN,
|
|
|
|
/* migration should happen before other stuff but after perf */
|
|
CPU_PRI_PERF = 20,
|
|
CPU_PRI_MIGRATION = 10,
|
|
CPU_PRI_SMPBOOT = 9,
|
|
/* bring up workqueues before normal notifiers and down after */
|
|
CPU_PRI_WORKQUEUE_UP = 5,
|
|
CPU_PRI_WORKQUEUE_DOWN = -5,
|
|
};
|
|
|
|
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
|
|
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
|
|
#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
|
|
#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
|
|
#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
|
|
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
|
|
#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
|
|
* not handling interrupts, soon dead.
|
|
* Called on the dying cpu, interrupts
|
|
* are already disabled. Must not
|
|
* sleep, must not fail */
|
|
#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
|
|
* lock is dropped */
|
|
#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
|
|
* Called on the new cpu, just before
|
|
* enabling interrupts. Must not sleep,
|
|
* must not fail */
|
|
#define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached
|
|
* idle loop. */
|
|
#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly,
|
|
* perhaps due to preemption. */
|
|
|
|
/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
|
|
* operation in progress
|
|
*/
|
|
#define CPU_TASKS_FROZEN 0x0010
|
|
|
|
#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
|
|
#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
|
|
#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
|
|
#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
|
|
#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
|
|
#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
|
|
#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
|
|
#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
/* Need to know about CPUs going up/down? */
|
|
#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
|
|
#define cpu_notifier(fn, pri) { \
|
|
static struct notifier_block fn##_nb = \
|
|
{ .notifier_call = fn, .priority = pri }; \
|
|
register_cpu_notifier(&fn##_nb); \
|
|
}
|
|
|
|
#define __cpu_notifier(fn, pri) { \
|
|
static struct notifier_block fn##_nb = \
|
|
{ .notifier_call = fn, .priority = pri }; \
|
|
__register_cpu_notifier(&fn##_nb); \
|
|
}
|
|
|
|
extern int register_cpu_notifier(struct notifier_block *nb);
|
|
extern int __register_cpu_notifier(struct notifier_block *nb);
|
|
extern void unregister_cpu_notifier(struct notifier_block *nb);
|
|
extern void __unregister_cpu_notifier(struct notifier_block *nb);
|
|
|
|
#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
|
|
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
|
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
|
|
|
static inline int register_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int __register_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void unregister_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
}
|
|
|
|
static inline void __unregister_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
void smpboot_thread_init(void);
|
|
int cpu_up(unsigned int cpu);
|
|
void notify_cpu_starting(unsigned int cpu);
|
|
extern void cpu_maps_update_begin(void);
|
|
extern void cpu_maps_update_done(void);
|
|
|
|
#define cpu_notifier_register_begin cpu_maps_update_begin
|
|
#define cpu_notifier_register_done cpu_maps_update_done
|
|
|
|
#else /* CONFIG_SMP */
|
|
|
|
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
|
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
|
|
|
static inline int register_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int __register_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void unregister_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
}
|
|
|
|
static inline void __unregister_cpu_notifier(struct notifier_block *nb)
|
|
{
|
|
}
|
|
|
|
static inline void cpu_maps_update_begin(void)
|
|
{
|
|
}
|
|
|
|
static inline void cpu_maps_update_done(void)
|
|
{
|
|
}
|
|
|
|
static inline void cpu_notifier_register_begin(void)
|
|
{
|
|
}
|
|
|
|
static inline void cpu_notifier_register_done(void)
|
|
{
|
|
}
|
|
|
|
static inline void smpboot_thread_init(void)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
extern struct bus_type cpu_subsys;
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
/* Stop CPUs going up and down. */
|
|
|
|
extern void cpu_hotplug_begin(void);
|
|
extern void cpu_hotplug_done(void);
|
|
extern void get_online_cpus(void);
|
|
extern void put_online_cpus(void);
|
|
extern void cpu_hotplug_disable(void);
|
|
extern void cpu_hotplug_enable(void);
|
|
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
|
|
#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
|
|
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
|
|
#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb)
|
|
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
|
|
#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb)
|
|
void clear_tasks_mm_cpumask(int cpu);
|
|
int cpu_down(unsigned int cpu);
|
|
|
|
#else /* CONFIG_HOTPLUG_CPU */
|
|
|
|
static inline void cpu_hotplug_begin(void) {}
|
|
static inline void cpu_hotplug_done(void) {}
|
|
#define get_online_cpus() do { } while (0)
|
|
#define put_online_cpus() do { } while (0)
|
|
#define cpu_hotplug_disable() do { } while (0)
|
|
#define cpu_hotplug_enable() do { } while (0)
|
|
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
|
#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
|
/* These aren't inline functions due to a GCC bug. */
|
|
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
|
|
#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
|
|
#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
|
|
#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); })
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
|
|
|
#ifdef CONFIG_PM_SLEEP_SMP
|
|
extern int disable_nonboot_cpus(void);
|
|
extern void enable_nonboot_cpus(void);
|
|
#else /* !CONFIG_PM_SLEEP_SMP */
|
|
static inline int disable_nonboot_cpus(void) { return 0; }
|
|
static inline void enable_nonboot_cpus(void) {}
|
|
#endif /* !CONFIG_PM_SLEEP_SMP */
|
|
|
|
enum cpuhp_state {
|
|
CPUHP_OFFLINE,
|
|
CPUHP_ONLINE,
|
|
};
|
|
|
|
void cpu_startup_entry(enum cpuhp_state state);
|
|
|
|
void cpu_idle_poll_ctrl(bool enable);
|
|
|
|
void arch_cpu_idle(void);
|
|
void arch_cpu_idle_prepare(void);
|
|
void arch_cpu_idle_enter(void);
|
|
void arch_cpu_idle_exit(void);
|
|
void arch_cpu_idle_dead(void);
|
|
|
|
DECLARE_PER_CPU(bool, cpu_dead_idle);
|
|
|
|
int cpu_report_state(int cpu);
|
|
int cpu_check_up_prepare(int cpu);
|
|
void cpu_set_state_online(int cpu);
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
bool cpu_wait_death(unsigned int cpu, int seconds);
|
|
bool cpu_report_death(void);
|
|
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
|
|
|
#define IDLE_START 1
|
|
#define IDLE_END 2
|
|
|
|
void idle_notifier_register(struct notifier_block *n);
|
|
void idle_notifier_unregister(struct notifier_block *n);
|
|
void idle_notifier_call_chain(unsigned long val);
|
|
|
|
#endif /* _LINUX_CPU_H_ */
|