android_kernel_oneplus_msm8998/include/linux/cpu.h
Srinivasarao P facb909e66 Merge android-4.4.144 (4b2d6ba) into msm-4.4
* refs/heads/tmp-4b2d6ba
  Linux 4.4.144
  ubi: fastmap: Erase outdated anchor PEBs during attach
  ubi: Fix Fastmap's update_vol()
  ubi: Fix races around ubi_refill_pools()
  ubi: Be more paranoid while seaching for the most recent Fastmap
  ubi: Rework Fastmap attach base code
  ubi: Introduce vol_ignored()
  clk: tegra: Fix PLL_U post divider and initial rate on Tegra30
  block: do not use interruptible wait anywhere
  x86/cpu: Re-apply forced caps every time CPU caps are re-read
  x86/xen: Add call of speculative_store_bypass_ht_init() to PV paths
  x86/bugs: Rename SSBD_NO to SSB_NO
  x86/speculation, KVM: Implement support for VIRT_SPEC_CTRL/LS_CFG
  x86/bugs: Rework spec_ctrl base and mask logic
  x86/bugs: Remove x86_spec_ctrl_set()
  x86/bugs: Expose x86_spec_ctrl_base directly
  x86/bugs: Unify x86_spec_ctrl_{set_guest, restore_host}
  x86/speculation: Rework speculative_store_bypass_update()
  x86/speculation: Add virtualized speculative store bypass disable support
  x86/bugs, KVM: Extend speculation control for VIRT_SPEC_CTRL
  x86/speculation: Handle HT correctly on AMD
  x86/cpufeatures: Add FEATURE_ZEN
  x86/cpu/AMD: Fix erratum 1076 (CPB bit)
  x86/cpufeatures: Disentangle SSBD enumeration
  x86/cpufeatures: Disentangle MSR_SPEC_CTRL enumeration from IBRS
  x86/speculation: Use synthetic bits for IBRS/IBPB/STIBP
  x86/cpu: Make alternative_msr_write work for 32-bit code
  x86/bugs: Fix the parameters alignment and missing void
  x86/bugs: Make cpu_show_common() static
  x86/bugs: Fix __ssb_select_mitigation() return type
  Documentation/spec_ctrl: Do some minor cleanups
  proc: Use underscores for SSBD in 'status'
  x86/bugs: Rename _RDS to _SSBD
  x86/speculation: Make "seccomp" the default mode for Speculative Store Bypass
  seccomp: Move speculation migitation control to arch code
  seccomp: Add filter flag to opt-out of SSB mitigation
  seccomp: Use PR_SPEC_FORCE_DISABLE
  prctl: Add force disable speculation
  seccomp: Enable speculation flaw mitigations
  proc: Provide details on speculation flaw mitigations
  nospec: Allow getting/setting on non-current task
  x86/speculation: Add prctl for Speculative Store Bypass mitigation
  x86/process: Allow runtime control of Speculative Store Bypass
  x86/process: Optimize TIF_NOTSC switch
  x86/process: Correct and optimize TIF_BLOCKSTEP switch
  x86/process: Optimize TIF checks in __switch_to_xtra()
  prctl: Add speculation control prctls
  x86/speculation: Create spec-ctrl.h to avoid include hell
  x86/bugs/AMD: Add support to disable RDS on Fam[15, 16, 17]h if requested
  x86/bugs: Whitelist allowed SPEC_CTRL MSR values
  x86/bugs/intel: Set proper CPU features and setup RDS
  x86/bugs: Provide boot parameters for the spec_store_bypass_disable mitigation
  x86/cpufeatures: Add X86_FEATURE_RDS
  x86/bugs: Expose /sys/../spec_store_bypass
  x86/cpu/intel: Add Knights Mill to Intel family
  x86/cpu: Rename Merrifield2 to Moorefield
  x86/bugs, KVM: Support the combination of guest and host IBRS
  x86/bugs: Read SPEC_CTRL MSR during boot and re-use reserved bits
  x86/bugs: Concentrate bug reporting into a separate function
  x86/bugs: Concentrate bug detection into a separate function
  x86/nospec: Simplify alternative_msr_write()
  x86/amd: don't set X86_BUG_SYSRET_SS_ATTRS when running under Xen
  xen: set cpu capabilities from xen_start_kernel()
  selftest/seccomp: Fix the seccomp(2) signature
  selftest/seccomp: Fix the flag name SECCOMP_FILTER_FLAG_TSYNC
  x86/speculation: Remove Skylake C2 from Speculation Control microcode blacklist
  x86/speculation: Move firmware_restrict_branch_speculation_*() from C to CPP
  x86/speculation: Use IBRS if available before calling into firmware
  x86/spectre_v2: Don't check microcode versions when running under hypervisors
  x86/speculation: Use Indirect Branch Prediction Barrier in context switch
  x86/mm: Give each mm TLB flush generation a unique ID
  x86/mm: Factor out LDT init from context init
  x86/xen: Zero MSR_IA32_SPEC_CTRL before suspend
  x86/speculation: Add <asm/msr-index.h> dependency
  x86/speculation: Fix up array_index_nospec_mask() asm constraint
  x86/speculation: Clean up various Spectre related details
  x86/speculation: Correct Speculation Control microcode blacklist again
  x86/speculation: Update Speculation Control microcode blacklist
  x86/entry/64/compat: Clear registers for compat syscalls, to reduce speculation attack surface
  x86/asm/entry/32: Simplify pushes of zeroed pt_regs->REGs
  x86/pti: Mark constant arrays as __initconst
  x86/cpuid: Fix up "virtual" IBRS/IBPB/STIBP feature bits on Intel
  x86/cpufeatures: Clean up Spectre v2 related CPUID flags
  x86/speculation: Add basic IBPB (Indirect Branch Prediction Barrier) support
  x86/cpufeature: Blacklist SPEC_CTRL/PRED_CMD on early Spectre v2 microcodes
  x86/pti: Do not enable PTI on CPUs which are not vulnerable to Meltdown
  x86/msr: Add definitions for new speculation control MSRs
  x86/cpufeatures: Add AMD feature bits for Speculation Control
  x86/cpufeatures: Add Intel feature bits for Speculation Control
  x86/cpufeatures: Add CPUID_7_EDX CPUID leaf
  x86/paravirt: Make native_save_fl() extern inline
  xhci: Fix perceived dead host due to runtime suspend race with event handler
  skbuff: Unconditionally copy pfmemalloc in __skb_clone()
  net: Don't copy pfmemalloc flag in __copy_skb_header()
  tg3: Add higher cpu clock for 5762.
  ptp: fix missing break in switch
  net: phy: fix flag masking in __set_phy_supported
  net/ipv4: Set oif in fib_compute_spec_dst
  lib/rhashtable: consider param->min_size when setting initial table size
  ipv6: fix useless rol32 call on hash
  ipv4: Return EINVAL when ping_group_range sysctl doesn't map to user ns
  mm: memcg: fix use after free in mem_cgroup_iter()
  ARC: mm: allow mprotect to make stack mappings executable
  ARC: Fix CONFIG_SWAP
  ALSA: rawmidi: Change resized buffers atomically
  fat: fix memory allocation failure handling of match_strdup()
  x86/MCE: Remove min interval polling limitation
  KVM/Eventfd: Avoid crash when assign and deassign specific eventfd in parallel.

Conflicts:
	drivers/mtd/ubi/wl.c
	sound/core/rawmidi.c

Change-Id: I277fe9260a764e7923ddc90e7327d9aa5865a038
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
2018-08-03 17:05:13 +05:30

320 lines
9.7 KiB
C

/*
* include/linux/cpu.h - generic cpu definition
*
* This is mainly for topological representation. We define the
* basic 'struct cpu' here, which can be embedded in per-arch
* definitions of processors.
*
* Basic handling of the devices is done in drivers/base/cpu.c
*
* CPUs are exported via sysfs in the devices/system/cpu
* directory.
*/
#ifndef _LINUX_CPU_H_
#define _LINUX_CPU_H_
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/cpumask.h>
struct device;
struct device_node;
struct attribute_group;
struct cpu {
int node_id; /* The node which contains the CPU */
int hotpluggable; /* creates sysfs control file if hotpluggable */
struct device dev;
};
struct cpu_pstate_pwr {
unsigned int freq;
uint32_t power;
};
struct cpu_pwr_stats {
int cpu;
long temp;
struct cpu_pstate_pwr *ptable;
bool throttling;
int len;
};
extern int register_cpu(struct cpu *cpu, int num);
extern struct device *get_cpu_device(unsigned cpu);
extern bool cpu_is_hotpluggable(unsigned cpu);
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
int cpu, unsigned int *thread);
extern int cpu_add_dev_attr(struct device_attribute *attr);
extern void cpu_remove_dev_attr(struct device_attribute *attr);
extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
extern ssize_t cpu_show_meltdown(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spectre_v1(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
extern ssize_t arch_cpu_release(const char *, size_t);
#endif
struct notifier_block;
/*
* CPU notifier priorities.
*/
enum {
/*
* SCHED_ACTIVE marks a cpu which is coming up active during
* CPU_ONLINE and CPU_DOWN_FAILED and must be the first
* notifier. CPUSET_ACTIVE adjusts cpuset according to
* cpu_active mask right after SCHED_ACTIVE. During
* CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
* ordered in the similar way.
*
* This ordering guarantees consistent cpu_active mask and
* migration behavior to all cpu notifiers.
*/
CPU_PRI_SCHED_ACTIVE = INT_MAX,
CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1,
CPU_PRI_SCHED_INACTIVE = INT_MIN + 1,
CPU_PRI_CPUSET_INACTIVE = INT_MIN,
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
CPU_PRI_SMPBOOT = 9,
/* bring up workqueues before normal notifiers and down after */
CPU_PRI_WORKQUEUE_UP = 5,
CPU_PRI_WORKQUEUE_DOWN = -5,
};
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
* not handling interrupts, soon dead.
* Called on the dying cpu, interrupts
* are already disabled. Must not
* sleep, must not fail */
#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
* lock is dropped */
#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
* Called on the new cpu, just before
* enabling interrupts. Must not sleep,
* must not fail */
#define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached
* idle loop. */
#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly,
* perhaps due to preemption. */
/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
* operation in progress
*/
#define CPU_TASKS_FROZEN 0x0010
#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)
#ifdef CONFIG_SMP
/* Need to know about CPUs going up/down? */
#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
#define cpu_notifier(fn, pri) { \
static struct notifier_block fn##_nb = \
{ .notifier_call = fn, .priority = pri }; \
register_cpu_notifier(&fn##_nb); \
}
#define __cpu_notifier(fn, pri) { \
static struct notifier_block fn##_nb = \
{ .notifier_call = fn, .priority = pri }; \
__register_cpu_notifier(&fn##_nb); \
}
extern int register_cpu_notifier(struct notifier_block *nb);
extern int __register_cpu_notifier(struct notifier_block *nb);
extern void unregister_cpu_notifier(struct notifier_block *nb);
extern void __unregister_cpu_notifier(struct notifier_block *nb);
#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
static inline int register_cpu_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int __register_cpu_notifier(struct notifier_block *nb)
{
return 0;
}
static inline void unregister_cpu_notifier(struct notifier_block *nb)
{
}
static inline void __unregister_cpu_notifier(struct notifier_block *nb)
{
}
#endif
void smpboot_thread_init(void);
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
#define cpu_notifier_register_begin cpu_maps_update_begin
#define cpu_notifier_register_done cpu_maps_update_done
#else /* CONFIG_SMP */
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
static inline int register_cpu_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int __register_cpu_notifier(struct notifier_block *nb)
{
return 0;
}
static inline void unregister_cpu_notifier(struct notifier_block *nb)
{
}
static inline void __unregister_cpu_notifier(struct notifier_block *nb)
{
}
static inline void cpu_maps_update_begin(void)
{
}
static inline void cpu_maps_update_done(void)
{
}
static inline void cpu_notifier_register_begin(void)
{
}
static inline void cpu_notifier_register_done(void)
{
}
static inline void smpboot_thread_init(void)
{
}
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
extern void cpu_hotplug_begin(void);
extern void cpu_hotplug_done(void);
extern void get_online_cpus(void);
extern void cpu_hotplug_mutex_held(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb)
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb)
void clear_tasks_mm_cpumask(int cpu);
int cpu_down(unsigned int cpu);
#else /* CONFIG_HOTPLUG_CPU */
static inline void cpu_hotplug_begin(void) {}
static inline void cpu_hotplug_done(void) {}
#define get_online_cpus() do { } while (0)
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define cpu_hotplug_mutex_held() do { } while (0)
/* These aren't inline functions due to a GCC bug. */
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); })
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_PM_SLEEP_SMP
extern int disable_nonboot_cpus(void);
extern void enable_nonboot_cpus(void);
#else /* !CONFIG_PM_SLEEP_SMP */
static inline int disable_nonboot_cpus(void) { return 0; }
static inline void enable_nonboot_cpus(void) {}
#endif /* !CONFIG_PM_SLEEP_SMP */
struct cpu_pwr_stats *get_cpu_pwr_stats(void);
void trigger_cpu_pwr_stats_calc(void);
enum cpuhp_state {
CPUHP_OFFLINE,
CPUHP_ONLINE,
};
void cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable);
void arch_cpu_idle(void);
void arch_cpu_idle_prepare(void);
void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
void arch_cpu_idle_dead(void);
DECLARE_PER_CPU(bool, cpu_dead_idle);
int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu);
void cpu_set_state_online(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
bool cpu_wait_death(unsigned int cpu, int seconds);
bool cpu_report_death(void);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
#define IDLE_START 1
#define IDLE_END 2
void idle_notifier_register(struct notifier_block *n);
void idle_notifier_unregister(struct notifier_block *n);
void idle_notifier_call_chain(unsigned long val);
#endif /* _LINUX_CPU_H_ */