Merge branch 'x86-microcode-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 microcode loader updates from Ingo Molnar: "There are two main changes in this tree: - AMD microcode early loading fixes - some microcode loader source files reorganization" * 'x86-microcode-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, microcode: Move to a proper location x86, microcode, AMD: Fix early ucode loading x86, microcode: Share native MSR accessing variants x86, ramdisk: Export relocated ramdisk VA
This commit is contained in:
commit
2bb2c5e235
15 changed files with 206 additions and 123 deletions
|
@ -1065,10 +1065,6 @@ config MICROCODE_OLD_INTERFACE
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on MICROCODE
|
depends on MICROCODE
|
||||||
|
|
||||||
config MICROCODE_INTEL_LIB
|
|
||||||
def_bool y
|
|
||||||
depends on MICROCODE_INTEL
|
|
||||||
|
|
||||||
config MICROCODE_INTEL_EARLY
|
config MICROCODE_INTEL_EARLY
|
||||||
def_bool n
|
def_bool n
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,21 @@
|
||||||
#ifndef _ASM_X86_MICROCODE_H
|
#ifndef _ASM_X86_MICROCODE_H
|
||||||
#define _ASM_X86_MICROCODE_H
|
#define _ASM_X86_MICROCODE_H
|
||||||
|
|
||||||
|
#define native_rdmsr(msr, val1, val2) \
|
||||||
|
do { \
|
||||||
|
u64 __val = native_read_msr((msr)); \
|
||||||
|
(void)((val1) = (u32)__val); \
|
||||||
|
(void)((val2) = (u32)(__val >> 32)); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define native_wrmsr(msr, low, high) \
|
||||||
|
native_write_msr(msr, low, high)
|
||||||
|
|
||||||
|
#define native_wrmsrl(msr, val) \
|
||||||
|
native_write_msr((msr), \
|
||||||
|
(u32)((u64)(val)), \
|
||||||
|
(u32)((u64)(val) >> 32))
|
||||||
|
|
||||||
struct cpu_signature {
|
struct cpu_signature {
|
||||||
unsigned int sig;
|
unsigned int sig;
|
||||||
unsigned int pf;
|
unsigned int pf;
|
||||||
|
|
|
@ -61,11 +61,10 @@ extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
|
||||||
extern int apply_microcode_amd(int cpu);
|
extern int apply_microcode_amd(int cpu);
|
||||||
extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
|
extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
|
||||||
|
|
||||||
|
#define PATCH_MAX_SIZE PAGE_SIZE
|
||||||
|
extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
|
||||||
|
|
||||||
#ifdef CONFIG_MICROCODE_AMD_EARLY
|
#ifdef CONFIG_MICROCODE_AMD_EARLY
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
#define MPB_MAX_SIZE PAGE_SIZE
|
|
||||||
extern u8 amd_bsp_mpb[MPB_MAX_SIZE];
|
|
||||||
#endif
|
|
||||||
extern void __init load_ucode_amd_bsp(void);
|
extern void __init load_ucode_amd_bsp(void);
|
||||||
extern void load_ucode_amd_ap(void);
|
extern void load_ucode_amd_ap(void);
|
||||||
extern int __init save_microcode_in_initrd_amd(void);
|
extern int __init save_microcode_in_initrd_amd(void);
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
|
|
||||||
#include <uapi/asm/setup.h>
|
#include <uapi/asm/setup.h>
|
||||||
|
|
||||||
|
|
||||||
#define COMMAND_LINE_SIZE 2048
|
#define COMMAND_LINE_SIZE 2048
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
|
@ -29,6 +28,8 @@
|
||||||
#include <asm/bootparam.h>
|
#include <asm/bootparam.h>
|
||||||
#include <asm/x86_init.h>
|
#include <asm/x86_init.h>
|
||||||
|
|
||||||
|
extern u64 relocated_ramdisk;
|
||||||
|
|
||||||
/* Interrupt control for vSMPowered x86_64 systems */
|
/* Interrupt control for vSMPowered x86_64 systems */
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
void vsmp_init(void);
|
void vsmp_init(void);
|
||||||
|
|
|
@ -92,15 +92,6 @@ obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
|
||||||
|
|
||||||
obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
|
obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
|
||||||
|
|
||||||
obj-$(CONFIG_MICROCODE_EARLY) += microcode_core_early.o
|
|
||||||
obj-$(CONFIG_MICROCODE_INTEL_EARLY) += microcode_intel_early.o
|
|
||||||
obj-$(CONFIG_MICROCODE_INTEL_LIB) += microcode_intel_lib.o
|
|
||||||
microcode-y := microcode_core.o
|
|
||||||
microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o
|
|
||||||
microcode-$(CONFIG_MICROCODE_AMD) += microcode_amd.o
|
|
||||||
obj-$(CONFIG_MICROCODE_AMD_EARLY) += microcode_amd_early.o
|
|
||||||
obj-$(CONFIG_MICROCODE) += microcode.o
|
|
||||||
|
|
||||||
obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
|
obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
|
||||||
|
|
||||||
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
|
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
|
||||||
|
|
|
@ -42,6 +42,7 @@ endif
|
||||||
|
|
||||||
obj-$(CONFIG_X86_MCE) += mcheck/
|
obj-$(CONFIG_X86_MCE) += mcheck/
|
||||||
obj-$(CONFIG_MTRR) += mtrr/
|
obj-$(CONFIG_MTRR) += mtrr/
|
||||||
|
obj-$(CONFIG_MICROCODE) += microcode/
|
||||||
|
|
||||||
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o
|
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o
|
||||||
|
|
||||||
|
|
7
arch/x86/kernel/cpu/microcode/Makefile
Normal file
7
arch/x86/kernel/cpu/microcode/Makefile
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
microcode-y := core.o
|
||||||
|
obj-$(CONFIG_MICROCODE) += microcode.o
|
||||||
|
microcode-$(CONFIG_MICROCODE_INTEL) += intel.o intel_lib.o
|
||||||
|
microcode-$(CONFIG_MICROCODE_AMD) += amd.o
|
||||||
|
obj-$(CONFIG_MICROCODE_EARLY) += core_early.o
|
||||||
|
obj-$(CONFIG_MICROCODE_INTEL_EARLY) += intel_early.o
|
||||||
|
obj-$(CONFIG_MICROCODE_AMD_EARLY) += amd_early.o
|
|
@ -182,10 +182,10 @@ int __apply_microcode_amd(struct microcode_amd *mc_amd)
|
||||||
{
|
{
|
||||||
u32 rev, dummy;
|
u32 rev, dummy;
|
||||||
|
|
||||||
wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
|
native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
|
||||||
|
|
||||||
/* verify patch application was successful */
|
/* verify patch application was successful */
|
||||||
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||||
if (rev != mc_amd->hdr.patch_id)
|
if (rev != mc_amd->hdr.patch_id)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
@ -332,6 +332,9 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
|
||||||
patch->patch_id = mc_hdr->patch_id;
|
patch->patch_id = mc_hdr->patch_id;
|
||||||
patch->equiv_cpu = proc_id;
|
patch->equiv_cpu = proc_id;
|
||||||
|
|
||||||
|
pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
|
||||||
|
__func__, patch->patch_id, proc_id);
|
||||||
|
|
||||||
/* ... and add to cache. */
|
/* ... and add to cache. */
|
||||||
update_cache(patch);
|
update_cache(patch);
|
||||||
|
|
||||||
|
@ -390,9 +393,9 @@ enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
|
||||||
if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
|
if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
|
||||||
struct ucode_patch *p = find_patch(smp_processor_id());
|
struct ucode_patch *p = find_patch(smp_processor_id());
|
||||||
if (p) {
|
if (p) {
|
||||||
memset(amd_bsp_mpb, 0, MPB_MAX_SIZE);
|
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
|
||||||
memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data),
|
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
|
||||||
MPB_MAX_SIZE));
|
PATCH_MAX_SIZE));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
|
@ -2,6 +2,7 @@
|
||||||
* Copyright (C) 2013 Advanced Micro Devices, Inc.
|
* Copyright (C) 2013 Advanced Micro Devices, Inc.
|
||||||
*
|
*
|
||||||
* Author: Jacob Shin <jacob.shin@amd.com>
|
* Author: Jacob Shin <jacob.shin@amd.com>
|
||||||
|
* Fixes: Borislav Petkov <bp@suse.de>
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify
|
* This program is free software; you can redistribute it and/or modify
|
||||||
* it under the terms of the GNU General Public License version 2 as
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
|
@ -15,10 +16,18 @@
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/microcode_amd.h>
|
#include <asm/microcode_amd.h>
|
||||||
|
|
||||||
static bool ucode_loaded;
|
/*
|
||||||
|
* This points to the current valid container of microcode patches which we will
|
||||||
|
* save from the initrd before jettisoning its contents.
|
||||||
|
*/
|
||||||
|
static u8 *container;
|
||||||
|
static size_t container_size;
|
||||||
|
|
||||||
static u32 ucode_new_rev;
|
static u32 ucode_new_rev;
|
||||||
static unsigned long ucode_offset;
|
u8 amd_ucode_patch[PATCH_MAX_SIZE];
|
||||||
static size_t ucode_size;
|
static u16 this_equiv_id;
|
||||||
|
|
||||||
|
struct cpio_data ucode_cpio;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Microcode patch container file is prepended to the initrd in cpio format.
|
* Microcode patch container file is prepended to the initrd in cpio format.
|
||||||
|
@ -32,9 +41,6 @@ static struct cpio_data __init find_ucode_in_initrd(void)
|
||||||
char *path;
|
char *path;
|
||||||
void *start;
|
void *start;
|
||||||
size_t size;
|
size_t size;
|
||||||
unsigned long *uoffset;
|
|
||||||
size_t *usize;
|
|
||||||
struct cpio_data cd;
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
struct boot_params *p;
|
struct boot_params *p;
|
||||||
|
@ -47,30 +53,50 @@ static struct cpio_data __init find_ucode_in_initrd(void)
|
||||||
path = (char *)__pa_nodebug(ucode_path);
|
path = (char *)__pa_nodebug(ucode_path);
|
||||||
start = (void *)p->hdr.ramdisk_image;
|
start = (void *)p->hdr.ramdisk_image;
|
||||||
size = p->hdr.ramdisk_size;
|
size = p->hdr.ramdisk_size;
|
||||||
uoffset = (unsigned long *)__pa_nodebug(&ucode_offset);
|
|
||||||
usize = (size_t *)__pa_nodebug(&ucode_size);
|
|
||||||
#else
|
#else
|
||||||
path = ucode_path;
|
path = ucode_path;
|
||||||
start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET);
|
start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET);
|
||||||
size = boot_params.hdr.ramdisk_size;
|
size = boot_params.hdr.ramdisk_size;
|
||||||
uoffset = &ucode_offset;
|
|
||||||
usize = &ucode_size;
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
cd = find_cpio_data(path, start, size, &offset);
|
return find_cpio_data(path, start, size, &offset);
|
||||||
if (!cd.data)
|
}
|
||||||
return cd;
|
|
||||||
|
|
||||||
if (*(u32 *)cd.data != UCODE_MAGIC) {
|
static size_t compute_container_size(u8 *data, u32 total_size)
|
||||||
cd.data = NULL;
|
{
|
||||||
cd.size = 0;
|
size_t size = 0;
|
||||||
return cd;
|
u32 *header = (u32 *)data;
|
||||||
|
|
||||||
|
if (header[0] != UCODE_MAGIC ||
|
||||||
|
header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
|
||||||
|
header[2] == 0) /* size */
|
||||||
|
return size;
|
||||||
|
|
||||||
|
size = header[2] + CONTAINER_HDR_SZ;
|
||||||
|
total_size -= size;
|
||||||
|
data += size;
|
||||||
|
|
||||||
|
while (total_size) {
|
||||||
|
u16 patch_size;
|
||||||
|
|
||||||
|
header = (u32 *)data;
|
||||||
|
|
||||||
|
if (header[0] != UCODE_UCODE_TYPE)
|
||||||
|
break;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sanity-check patch size.
|
||||||
|
*/
|
||||||
|
patch_size = header[1];
|
||||||
|
if (patch_size > PATCH_MAX_SIZE)
|
||||||
|
break;
|
||||||
|
|
||||||
|
size += patch_size + SECTION_HDR_SIZE;
|
||||||
|
data += patch_size + SECTION_HDR_SIZE;
|
||||||
|
total_size -= patch_size + SECTION_HDR_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
*uoffset = (u8 *)cd.data - (u8 *)start;
|
return size;
|
||||||
*usize = cd.size;
|
|
||||||
|
|
||||||
return cd;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -85,23 +111,22 @@ static struct cpio_data __init find_ucode_in_initrd(void)
|
||||||
static void apply_ucode_in_initrd(void *ucode, size_t size)
|
static void apply_ucode_in_initrd(void *ucode, size_t size)
|
||||||
{
|
{
|
||||||
struct equiv_cpu_entry *eq;
|
struct equiv_cpu_entry *eq;
|
||||||
|
size_t *cont_sz;
|
||||||
u32 *header;
|
u32 *header;
|
||||||
u8 *data;
|
u8 *data, **cont;
|
||||||
u16 eq_id = 0;
|
u16 eq_id = 0;
|
||||||
int offset, left;
|
int offset, left;
|
||||||
u32 rev, eax;
|
u32 rev, eax, ebx, ecx, edx;
|
||||||
u32 *new_rev;
|
u32 *new_rev;
|
||||||
unsigned long *uoffset;
|
|
||||||
size_t *usize;
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
|
new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
|
||||||
uoffset = (unsigned long *)__pa_nodebug(&ucode_offset);
|
cont_sz = (size_t *)__pa_nodebug(&container_size);
|
||||||
usize = (size_t *)__pa_nodebug(&ucode_size);
|
cont = (u8 **)__pa_nodebug(&container);
|
||||||
#else
|
#else
|
||||||
new_rev = &ucode_new_rev;
|
new_rev = &ucode_new_rev;
|
||||||
uoffset = &ucode_offset;
|
cont_sz = &container_size;
|
||||||
usize = &ucode_size;
|
cont = &container;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
data = ucode;
|
data = ucode;
|
||||||
|
@ -109,23 +134,37 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
|
||||||
header = (u32 *)data;
|
header = (u32 *)data;
|
||||||
|
|
||||||
/* find equiv cpu table */
|
/* find equiv cpu table */
|
||||||
|
if (header[0] != UCODE_MAGIC ||
|
||||||
if (header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
|
header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
|
||||||
header[2] == 0) /* size */
|
header[2] == 0) /* size */
|
||||||
return;
|
return;
|
||||||
|
|
||||||
eax = cpuid_eax(0x00000001);
|
eax = 0x00000001;
|
||||||
|
ecx = 0;
|
||||||
|
native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||||
|
|
||||||
while (left > 0) {
|
while (left > 0) {
|
||||||
eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
|
eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
|
||||||
|
|
||||||
|
*cont = data;
|
||||||
|
|
||||||
|
/* Advance past the container header */
|
||||||
offset = header[2] + CONTAINER_HDR_SZ;
|
offset = header[2] + CONTAINER_HDR_SZ;
|
||||||
data += offset;
|
data += offset;
|
||||||
left -= offset;
|
left -= offset;
|
||||||
|
|
||||||
eq_id = find_equiv_id(eq, eax);
|
eq_id = find_equiv_id(eq, eax);
|
||||||
if (eq_id)
|
if (eq_id) {
|
||||||
|
this_equiv_id = eq_id;
|
||||||
|
*cont_sz = compute_container_size(*cont, left + offset);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* truncate how much we need to iterate over in the
|
||||||
|
* ucode update loop below
|
||||||
|
*/
|
||||||
|
left = *cont_sz - offset;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* support multiple container files appended together. if this
|
* support multiple container files appended together. if this
|
||||||
|
@ -145,19 +184,18 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
|
||||||
|
|
||||||
/* mark where the next microcode container file starts */
|
/* mark where the next microcode container file starts */
|
||||||
offset = data - (u8 *)ucode;
|
offset = data - (u8 *)ucode;
|
||||||
*uoffset += offset;
|
|
||||||
*usize -= offset;
|
|
||||||
ucode = data;
|
ucode = data;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!eq_id) {
|
if (!eq_id) {
|
||||||
*usize = 0;
|
*cont = NULL;
|
||||||
|
*cont_sz = 0;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* find ucode and update if needed */
|
/* find ucode and update if needed */
|
||||||
|
|
||||||
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
|
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
|
||||||
|
|
||||||
while (left > 0) {
|
while (left > 0) {
|
||||||
struct microcode_amd *mc;
|
struct microcode_amd *mc;
|
||||||
|
@ -168,73 +206,83 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE);
|
mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE);
|
||||||
if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id)
|
|
||||||
if (__apply_microcode_amd(mc) == 0) {
|
if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) {
|
||||||
|
|
||||||
|
if (!__apply_microcode_amd(mc)) {
|
||||||
rev = mc->hdr.patch_id;
|
rev = mc->hdr.patch_id;
|
||||||
*new_rev = rev;
|
*new_rev = rev;
|
||||||
|
|
||||||
|
/* save ucode patch */
|
||||||
|
memcpy(amd_ucode_patch, mc,
|
||||||
|
min_t(u32, header[1], PATCH_MAX_SIZE));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
offset = header[1] + SECTION_HDR_SIZE;
|
offset = header[1] + SECTION_HDR_SIZE;
|
||||||
data += offset;
|
data += offset;
|
||||||
left -= offset;
|
left -= offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* mark where this microcode container file ends */
|
|
||||||
offset = *usize - (data - (u8 *)ucode);
|
|
||||||
*usize -= offset;
|
|
||||||
|
|
||||||
if (!(*new_rev))
|
|
||||||
*usize = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init load_ucode_amd_bsp(void)
|
void __init load_ucode_amd_bsp(void)
|
||||||
{
|
{
|
||||||
struct cpio_data cd = find_ucode_in_initrd();
|
struct cpio_data cp;
|
||||||
if (!cd.data)
|
void **data;
|
||||||
|
size_t *size;
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
data = (void **)__pa_nodebug(&ucode_cpio.data);
|
||||||
|
size = (size_t *)__pa_nodebug(&ucode_cpio.size);
|
||||||
|
#else
|
||||||
|
data = &ucode_cpio.data;
|
||||||
|
size = &ucode_cpio.size;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
cp = find_ucode_in_initrd();
|
||||||
|
if (!cp.data)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
apply_ucode_in_initrd(cd.data, cd.size);
|
*data = cp.data;
|
||||||
|
*size = cp.size;
|
||||||
|
|
||||||
|
apply_ucode_in_initrd(cp.data, cp.size);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
u8 amd_bsp_mpb[MPB_MAX_SIZE];
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On 32-bit, since AP's early load occurs before paging is turned on, we
|
* On 32-bit, since AP's early load occurs before paging is turned on, we
|
||||||
* cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during
|
* cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during
|
||||||
* cold boot, AP will apply_ucode_in_initrd() just like the BSP. During
|
* cold boot, AP will apply_ucode_in_initrd() just like the BSP. During
|
||||||
* save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which
|
* save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
|
||||||
* is used upon resume from suspend.
|
* which is used upon resume from suspend.
|
||||||
*/
|
*/
|
||||||
void load_ucode_amd_ap(void)
|
void load_ucode_amd_ap(void)
|
||||||
{
|
{
|
||||||
struct microcode_amd *mc;
|
struct microcode_amd *mc;
|
||||||
unsigned long *initrd;
|
|
||||||
unsigned long *uoffset;
|
|
||||||
size_t *usize;
|
size_t *usize;
|
||||||
void *ucode;
|
void **ucode;
|
||||||
|
|
||||||
mc = (struct microcode_amd *)__pa(amd_bsp_mpb);
|
mc = (struct microcode_amd *)__pa(amd_ucode_patch);
|
||||||
if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
|
if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
|
||||||
__apply_microcode_amd(mc);
|
__apply_microcode_amd(mc);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
initrd = (unsigned long *)__pa(&initrd_start);
|
ucode = (void *)__pa_nodebug(&container);
|
||||||
uoffset = (unsigned long *)__pa(&ucode_offset);
|
usize = (size_t *)__pa_nodebug(&container_size);
|
||||||
usize = (size_t *)__pa(&ucode_size);
|
|
||||||
|
|
||||||
if (!*usize || !*initrd)
|
if (!*ucode || !*usize)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ucode = (void *)((unsigned long)__pa(*initrd) + *uoffset);
|
apply_ucode_in_initrd(*ucode, *usize);
|
||||||
apply_ucode_in_initrd(ucode, *usize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init collect_cpu_sig_on_bsp(void *arg)
|
static void __init collect_cpu_sig_on_bsp(void *arg)
|
||||||
{
|
{
|
||||||
unsigned int cpu = smp_processor_id();
|
unsigned int cpu = smp_processor_id();
|
||||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||||
|
|
||||||
uci->cpu_sig.sig = cpuid_eax(0x00000001);
|
uci->cpu_sig.sig = cpuid_eax(0x00000001);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
@ -242,36 +290,54 @@ void load_ucode_amd_ap(void)
|
||||||
{
|
{
|
||||||
unsigned int cpu = smp_processor_id();
|
unsigned int cpu = smp_processor_id();
|
||||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||||
|
struct equiv_cpu_entry *eq;
|
||||||
|
struct microcode_amd *mc;
|
||||||
u32 rev, eax;
|
u32 rev, eax;
|
||||||
|
u16 eq_id;
|
||||||
|
|
||||||
|
/* Exit if called on the BSP. */
|
||||||
|
if (!cpu)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!container)
|
||||||
|
return;
|
||||||
|
|
||||||
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
|
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
|
||||||
eax = cpuid_eax(0x00000001);
|
|
||||||
|
|
||||||
uci->cpu_sig.rev = rev;
|
uci->cpu_sig.rev = rev;
|
||||||
uci->cpu_sig.sig = eax;
|
uci->cpu_sig.sig = eax;
|
||||||
|
|
||||||
if (cpu && !ucode_loaded) {
|
eax = cpuid_eax(0x00000001);
|
||||||
void *ucode;
|
eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ);
|
||||||
|
|
||||||
if (!ucode_size || !initrd_start)
|
eq_id = find_equiv_id(eq, eax);
|
||||||
|
if (!eq_id)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (eq_id == this_equiv_id) {
|
||||||
|
mc = (struct microcode_amd *)amd_ucode_patch;
|
||||||
|
|
||||||
|
if (mc && rev < mc->hdr.patch_id) {
|
||||||
|
if (!__apply_microcode_amd(mc))
|
||||||
|
ucode_new_rev = mc->hdr.patch_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
if (!ucode_cpio.data)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ucode = (void *)(initrd_start + ucode_offset);
|
/*
|
||||||
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
|
* AP has a different equivalence ID than BSP, looks like
|
||||||
if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK)
|
* mixed-steppings silicon so go through the ucode blob anew.
|
||||||
return;
|
*/
|
||||||
|
apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size);
|
||||||
ucode_loaded = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
apply_microcode_amd(cpu);
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int __init save_microcode_in_initrd_amd(void)
|
int __init save_microcode_in_initrd_amd(void)
|
||||||
{
|
{
|
||||||
enum ucode_state ret;
|
enum ucode_state ret;
|
||||||
void *ucode;
|
|
||||||
u32 eax;
|
u32 eax;
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
|
@ -280,22 +346,35 @@ int __init save_microcode_in_initrd_amd(void)
|
||||||
|
|
||||||
if (!uci->cpu_sig.sig)
|
if (!uci->cpu_sig.sig)
|
||||||
smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
|
smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Take into account the fact that the ramdisk might get relocated
|
||||||
|
* and therefore we need to recompute the container's position in
|
||||||
|
* virtual memory space.
|
||||||
|
*/
|
||||||
|
container = (u8 *)(__va((u32)relocated_ramdisk) +
|
||||||
|
((u32)container - boot_params.hdr.ramdisk_image));
|
||||||
#endif
|
#endif
|
||||||
if (ucode_new_rev)
|
if (ucode_new_rev)
|
||||||
pr_info("microcode: updated early to new patch_level=0x%08x\n",
|
pr_info("microcode: updated early to new patch_level=0x%08x\n",
|
||||||
ucode_new_rev);
|
ucode_new_rev);
|
||||||
|
|
||||||
if (ucode_loaded || !ucode_size || !initrd_start)
|
if (!container)
|
||||||
return 0;
|
return -EINVAL;
|
||||||
|
|
||||||
ucode = (void *)(initrd_start + ucode_offset);
|
|
||||||
eax = cpuid_eax(0x00000001);
|
eax = cpuid_eax(0x00000001);
|
||||||
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
|
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
|
||||||
|
|
||||||
ret = load_microcode_amd(eax, ucode, ucode_size);
|
ret = load_microcode_amd(eax, container, container_size);
|
||||||
if (ret != UCODE_OK)
|
if (ret != UCODE_OK)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ucode_loaded = true;
|
/*
|
||||||
|
* This will be freed any msec now, stash patches for the current
|
||||||
|
* family and switch to patch cache for cpu hotplug, etc later.
|
||||||
|
*/
|
||||||
|
container = NULL;
|
||||||
|
container_size = 0;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
|
@ -365,16 +365,6 @@ out:
|
||||||
return state;
|
return state;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define native_rdmsr(msr, val1, val2) \
|
|
||||||
do { \
|
|
||||||
u64 __val = native_read_msr((msr)); \
|
|
||||||
(void)((val1) = (u32)__val); \
|
|
||||||
(void)((val2) = (u32)(__val >> 32)); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define native_wrmsr(msr, low, high) \
|
|
||||||
native_write_msr(msr, low, high);
|
|
||||||
|
|
||||||
static int collect_cpu_info_early(struct ucode_cpu_info *uci)
|
static int collect_cpu_info_early(struct ucode_cpu_info *uci)
|
||||||
{
|
{
|
||||||
unsigned int val[2];
|
unsigned int val[2];
|
|
@ -295,6 +295,8 @@ static void __init reserve_brk(void)
|
||||||
_brk_start = 0;
|
_brk_start = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u64 relocated_ramdisk;
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
#ifdef CONFIG_BLK_DEV_INITRD
|
||||||
|
|
||||||
static u64 __init get_ramdisk_image(void)
|
static u64 __init get_ramdisk_image(void)
|
||||||
|
@ -321,25 +323,24 @@ static void __init relocate_initrd(void)
|
||||||
u64 ramdisk_image = get_ramdisk_image();
|
u64 ramdisk_image = get_ramdisk_image();
|
||||||
u64 ramdisk_size = get_ramdisk_size();
|
u64 ramdisk_size = get_ramdisk_size();
|
||||||
u64 area_size = PAGE_ALIGN(ramdisk_size);
|
u64 area_size = PAGE_ALIGN(ramdisk_size);
|
||||||
u64 ramdisk_here;
|
|
||||||
unsigned long slop, clen, mapaddr;
|
unsigned long slop, clen, mapaddr;
|
||||||
char *p, *q;
|
char *p, *q;
|
||||||
|
|
||||||
/* We need to move the initrd down into directly mapped mem */
|
/* We need to move the initrd down into directly mapped mem */
|
||||||
ramdisk_here = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
|
relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
|
||||||
area_size, PAGE_SIZE);
|
area_size, PAGE_SIZE);
|
||||||
|
|
||||||
if (!ramdisk_here)
|
if (!relocated_ramdisk)
|
||||||
panic("Cannot find place for new RAMDISK of size %lld\n",
|
panic("Cannot find place for new RAMDISK of size %lld\n",
|
||||||
ramdisk_size);
|
ramdisk_size);
|
||||||
|
|
||||||
/* Note: this includes all the mem currently occupied by
|
/* Note: this includes all the mem currently occupied by
|
||||||
the initrd, we rely on that fact to keep the data intact. */
|
the initrd, we rely on that fact to keep the data intact. */
|
||||||
memblock_reserve(ramdisk_here, area_size);
|
memblock_reserve(relocated_ramdisk, area_size);
|
||||||
initrd_start = ramdisk_here + PAGE_OFFSET;
|
initrd_start = relocated_ramdisk + PAGE_OFFSET;
|
||||||
initrd_end = initrd_start + ramdisk_size;
|
initrd_end = initrd_start + ramdisk_size;
|
||||||
printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
|
printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
|
||||||
ramdisk_here, ramdisk_here + ramdisk_size - 1);
|
relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
|
||||||
|
|
||||||
q = (char *)initrd_start;
|
q = (char *)initrd_start;
|
||||||
|
|
||||||
|
@ -363,7 +364,7 @@ static void __init relocate_initrd(void)
|
||||||
printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
|
printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
|
||||||
" [mem %#010llx-%#010llx]\n",
|
" [mem %#010llx-%#010llx]\n",
|
||||||
ramdisk_image, ramdisk_image + ramdisk_size - 1,
|
ramdisk_image, ramdisk_image + ramdisk_size - 1,
|
||||||
ramdisk_here, ramdisk_here + ramdisk_size - 1);
|
relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init early_reserve_initrd(void)
|
static void __init early_reserve_initrd(void)
|
||||||
|
|
Loading…
Add table
Reference in a new issue