commit 772439717dbf703b39990be58d8d4e3e4ad0598a upstream Intel CPUs expose methods to: - Detect whether RDS capability is available via CPUID.7.0.EDX[31], - The SPEC_CTRL MSR(0x48), bit 2 set to enable RDS. - MSR_IA32_ARCH_CAPABILITIES, Bit(4) no need to enable RRS. With that in mind if spec_store_bypass_disable=[auto,on] is selected set at boot-time the SPEC_CTRL MSR to enable RDS if the platform requires it. Note that this does not fix the KVM case where the SPEC_CTRL is exposed to guests which can muck with it, see patch titled : KVM/SVM/VMX/x86/spectre_v2: Support the combination of guest and host IBRS. And for the firmware (IBRS to be set), see patch titled: x86/spectre_v2: Read SPEC_CTRL MSR during boot and re-use reserved bits [ tglx: Distangled it from the intel implementation and kept the call order ] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@suse.de> Reviewed-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Srivatsa S. Bhat <srivatsa@csail.mit.edu> Reviewed-by: Matt Helsley (VMware) <matt.helsley@gmail.com> Reviewed-by: Alexey Makhalov <amakhalov@vmware.com> Reviewed-by: Bo Gan <ganb@vmware.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
52 lines
1.4 KiB
C
52 lines
1.4 KiB
C
#ifndef ARCH_X86_CPU_H
|
|
#define ARCH_X86_CPU_H
|
|
|
|
/* attempt to consolidate cpu attributes */
|
|
struct cpu_dev {
|
|
const char *c_vendor;
|
|
|
|
/* some have two possibilities for cpuid string */
|
|
const char *c_ident[2];
|
|
|
|
void (*c_early_init)(struct cpuinfo_x86 *);
|
|
void (*c_bsp_init)(struct cpuinfo_x86 *);
|
|
void (*c_init)(struct cpuinfo_x86 *);
|
|
void (*c_identify)(struct cpuinfo_x86 *);
|
|
void (*c_detect_tlb)(struct cpuinfo_x86 *);
|
|
void (*c_bsp_resume)(struct cpuinfo_x86 *);
|
|
int c_x86_vendor;
|
|
#ifdef CONFIG_X86_32
|
|
/* Optional vendor specific routine to obtain the cache size. */
|
|
unsigned int (*legacy_cache_size)(struct cpuinfo_x86 *,
|
|
unsigned int);
|
|
|
|
/* Family/stepping-based lookup table for model names. */
|
|
struct legacy_cpu_model_info {
|
|
int family;
|
|
const char *model_names[16];
|
|
} legacy_models[5];
|
|
#endif
|
|
};
|
|
|
|
struct _tlb_table {
|
|
unsigned char descriptor;
|
|
char tlb_type;
|
|
unsigned int entries;
|
|
/* unsigned int ways; */
|
|
char info[128];
|
|
};
|
|
|
|
#define cpu_dev_register(cpu_devX) \
|
|
static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
|
|
__attribute__((__section__(".x86_cpu_dev.init"))) = \
|
|
&cpu_devX;
|
|
|
|
extern const struct cpu_dev *const __x86_cpu_dev_start[],
|
|
*const __x86_cpu_dev_end[];
|
|
|
|
extern void get_cpu_cap(struct cpuinfo_x86 *c);
|
|
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
|
|
|
|
extern void x86_spec_ctrl_setup_ap(void);
|
|
|
|
#endif /* ARCH_X86_CPU_H */
|