Merge branches 'x86-build-for-linus', 'x86-cleanups-for-linus' and 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 build/cleanup/debug updates from Ingo Molnar: "Robustify the build process with a quirk to avoid GCC reordering related bugs. Two code cleanups. Simplify entry_64.S CFI annotations, by Jan Beulich" * 'x86-build-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, build: Change code16gcc.h from a C header to an assembly header * 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86: Simplify __HAVE_ARCH_CMPXCHG tests x86/tsc: Get rid of custom DIV_ROUND() macro * 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/debug: Drop several unnecessary CFI annotations
This commit is contained in:
commit
19d402c1e7
10 changed files with 47 additions and 75 deletions
|
@ -15,12 +15,9 @@ endif
|
||||||
# that way we can complain to the user if the CPU is insufficient.
|
# that way we can complain to the user if the CPU is insufficient.
|
||||||
#
|
#
|
||||||
# The -m16 option is supported by GCC >= 4.9 and clang >= 3.5. For
|
# The -m16 option is supported by GCC >= 4.9 and clang >= 3.5. For
|
||||||
# older versions of GCC, we need to play evil and unreliable tricks to
|
# older versions of GCC, include an *assembly* header to make sure that
|
||||||
# attempt to ensure that our asm(".code16gcc") is first in the asm
|
# gcc doesn't play any games behind our back.
|
||||||
# output.
|
CODE16GCC_CFLAGS := -m32 -Wa,$(srctree)/arch/x86/boot/code16gcc.h
|
||||||
CODE16GCC_CFLAGS := -m32 -include $(srctree)/arch/x86/boot/code16gcc.h \
|
|
||||||
$(call cc-option, -fno-toplevel-reorder,\
|
|
||||||
$(call cc-option, -fno-unit-at-a-time))
|
|
||||||
M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS))
|
M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS))
|
||||||
|
|
||||||
REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \
|
REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \
|
||||||
|
|
|
@ -1,15 +1,11 @@
|
||||||
/*
|
#
|
||||||
* code16gcc.h
|
# code16gcc.h
|
||||||
*
|
#
|
||||||
* This file is -include'd when compiling 16-bit C code.
|
# This file is added to the assembler via -Wa when compiling 16-bit C code.
|
||||||
* Note: this asm() needs to be emitted before gcc emits any code.
|
# This is done this way instead via asm() to make sure gcc does not reorder
|
||||||
* Depending on gcc version, this requires -fno-unit-at-a-time or
|
# things around us.
|
||||||
* -fno-toplevel-reorder.
|
#
|
||||||
*
|
# gcc 4.9+ has a real -m16 option so we can drop this hack long term.
|
||||||
* Hopefully gcc will eventually have a real -m16 option so we can
|
#
|
||||||
* drop this hack long term.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
.code16gcc
|
||||||
asm(".code16gcc");
|
|
||||||
#endif
|
|
||||||
|
|
|
@ -4,6 +4,8 @@
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <asm/alternative.h> /* Provides LOCK_PREFIX */
|
#include <asm/alternative.h> /* Provides LOCK_PREFIX */
|
||||||
|
|
||||||
|
#define __HAVE_ARCH_CMPXCHG 1
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Non-existant functions to indicate usage errors at link time
|
* Non-existant functions to indicate usage errors at link time
|
||||||
* (or compile-time if the compiler implements __compiletime_error().
|
* (or compile-time if the compiler implements __compiletime_error().
|
||||||
|
@ -143,7 +145,6 @@ extern void __add_wrong_size(void)
|
||||||
# include <asm/cmpxchg_64.h>
|
# include <asm/cmpxchg_64.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __HAVE_ARCH_CMPXCHG
|
|
||||||
#define cmpxchg(ptr, old, new) \
|
#define cmpxchg(ptr, old, new) \
|
||||||
__cmpxchg(ptr, old, new, sizeof(*(ptr)))
|
__cmpxchg(ptr, old, new, sizeof(*(ptr)))
|
||||||
|
|
||||||
|
@ -152,7 +153,6 @@ extern void __add_wrong_size(void)
|
||||||
|
|
||||||
#define cmpxchg_local(ptr, old, new) \
|
#define cmpxchg_local(ptr, old, new) \
|
||||||
__cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
|
__cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* xadd() adds "inc" to "*ptr" and atomically returns the previous
|
* xadd() adds "inc" to "*ptr" and atomically returns the previous
|
||||||
|
|
|
@ -34,8 +34,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 value)
|
||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __HAVE_ARCH_CMPXCHG 1
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_CMPXCHG64
|
#ifdef CONFIG_X86_CMPXCHG64
|
||||||
#define cmpxchg64(ptr, o, n) \
|
#define cmpxchg64(ptr, o, n) \
|
||||||
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
|
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
|
||||||
|
|
|
@ -6,8 +6,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
|
||||||
*ptr = val;
|
*ptr = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __HAVE_ARCH_CMPXCHG 1
|
|
||||||
|
|
||||||
#define cmpxchg64(ptr, o, n) \
|
#define cmpxchg64(ptr, o, n) \
|
||||||
({ \
|
({ \
|
||||||
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
|
#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_X86_32) && defined(__HAVE_ARCH_CMPXCHG)
|
#if defined(CONFIG_X86_32)
|
||||||
/*
|
/*
|
||||||
* This lock provides nmi access to the CMOS/RTC registers. It has some
|
* This lock provides nmi access to the CMOS/RTC registers. It has some
|
||||||
* special properties. It is owned by a CPU and stores the index register
|
* special properties. It is owned by a CPU and stores the index register
|
||||||
|
|
|
@ -100,23 +100,11 @@ do { \
|
||||||
static inline int __mutex_fastpath_trylock(atomic_t *count,
|
static inline int __mutex_fastpath_trylock(atomic_t *count,
|
||||||
int (*fail_fn)(atomic_t *))
|
int (*fail_fn)(atomic_t *))
|
||||||
{
|
{
|
||||||
/*
|
/* cmpxchg because it never induces a false contention state. */
|
||||||
* We have two variants here. The cmpxchg based one is the best one
|
|
||||||
* because it never induce a false contention state. It is included
|
|
||||||
* here because architectures using the inc/dec algorithms over the
|
|
||||||
* xchg ones are much more likely to support cmpxchg natively.
|
|
||||||
*
|
|
||||||
* If not we fall back to the spinlock based variant - that is
|
|
||||||
* just as efficient (and simpler) as a 'destructive' probing of
|
|
||||||
* the mutex state would be.
|
|
||||||
*/
|
|
||||||
#ifdef __HAVE_ARCH_CMPXCHG
|
|
||||||
if (likely(atomic_cmpxchg(count, 1, 0) == 1))
|
if (likely(atomic_cmpxchg(count, 1, 0) == 1))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
#else
|
|
||||||
return fail_fn(count);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* _ASM_X86_MUTEX_32_H */
|
#endif /* _ASM_X86_MUTEX_32_H */
|
||||||
|
|
|
@ -74,10 +74,6 @@ int acpi_fix_pin2_polarity __initdata;
|
||||||
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
|
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef __HAVE_ARCH_CMPXCHG
|
|
||||||
#warning ACPI uses CMPXCHG, i486 and later hardware
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* --------------------------------------------------------------------------
|
/* --------------------------------------------------------------------------
|
||||||
Boot-time Configuration
|
Boot-time Configuration
|
||||||
-------------------------------------------------------------------------- */
|
-------------------------------------------------------------------------- */
|
||||||
|
|
|
@ -207,7 +207,6 @@ ENDPROC(native_usergs_sysret64)
|
||||||
*/
|
*/
|
||||||
.macro XCPT_FRAME start=1 offset=0
|
.macro XCPT_FRAME start=1 offset=0
|
||||||
INTR_FRAME \start, RIP+\offset-ORIG_RAX
|
INTR_FRAME \start, RIP+\offset-ORIG_RAX
|
||||||
/*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -287,21 +286,21 @@ ENDPROC(native_usergs_sysret64)
|
||||||
ENTRY(save_paranoid)
|
ENTRY(save_paranoid)
|
||||||
XCPT_FRAME 1 RDI+8
|
XCPT_FRAME 1 RDI+8
|
||||||
cld
|
cld
|
||||||
movq_cfi rdi, RDI+8
|
movq %rdi, RDI+8(%rsp)
|
||||||
movq_cfi rsi, RSI+8
|
movq %rsi, RSI+8(%rsp)
|
||||||
movq_cfi rdx, RDX+8
|
movq_cfi rdx, RDX+8
|
||||||
movq_cfi rcx, RCX+8
|
movq_cfi rcx, RCX+8
|
||||||
movq_cfi rax, RAX+8
|
movq_cfi rax, RAX+8
|
||||||
movq_cfi r8, R8+8
|
movq %r8, R8+8(%rsp)
|
||||||
movq_cfi r9, R9+8
|
movq %r9, R9+8(%rsp)
|
||||||
movq_cfi r10, R10+8
|
movq %r10, R10+8(%rsp)
|
||||||
movq_cfi r11, R11+8
|
movq %r11, R11+8(%rsp)
|
||||||
movq_cfi rbx, RBX+8
|
movq_cfi rbx, RBX+8
|
||||||
movq_cfi rbp, RBP+8
|
movq %rbp, RBP+8(%rsp)
|
||||||
movq_cfi r12, R12+8
|
movq %r12, R12+8(%rsp)
|
||||||
movq_cfi r13, R13+8
|
movq %r13, R13+8(%rsp)
|
||||||
movq_cfi r14, R14+8
|
movq %r14, R14+8(%rsp)
|
||||||
movq_cfi r15, R15+8
|
movq %r15, R15+8(%rsp)
|
||||||
movl $1,%ebx
|
movl $1,%ebx
|
||||||
movl $MSR_GS_BASE,%ecx
|
movl $MSR_GS_BASE,%ecx
|
||||||
rdmsr
|
rdmsr
|
||||||
|
@ -1387,21 +1386,21 @@ ENTRY(error_entry)
|
||||||
CFI_ADJUST_CFA_OFFSET 15*8
|
CFI_ADJUST_CFA_OFFSET 15*8
|
||||||
/* oldrax contains error code */
|
/* oldrax contains error code */
|
||||||
cld
|
cld
|
||||||
movq_cfi rdi, RDI+8
|
movq %rdi, RDI+8(%rsp)
|
||||||
movq_cfi rsi, RSI+8
|
movq %rsi, RSI+8(%rsp)
|
||||||
movq_cfi rdx, RDX+8
|
movq %rdx, RDX+8(%rsp)
|
||||||
movq_cfi rcx, RCX+8
|
movq %rcx, RCX+8(%rsp)
|
||||||
movq_cfi rax, RAX+8
|
movq %rax, RAX+8(%rsp)
|
||||||
movq_cfi r8, R8+8
|
movq %r8, R8+8(%rsp)
|
||||||
movq_cfi r9, R9+8
|
movq %r9, R9+8(%rsp)
|
||||||
movq_cfi r10, R10+8
|
movq %r10, R10+8(%rsp)
|
||||||
movq_cfi r11, R11+8
|
movq %r11, R11+8(%rsp)
|
||||||
movq_cfi rbx, RBX+8
|
movq_cfi rbx, RBX+8
|
||||||
movq_cfi rbp, RBP+8
|
movq %rbp, RBP+8(%rsp)
|
||||||
movq_cfi r12, R12+8
|
movq %r12, R12+8(%rsp)
|
||||||
movq_cfi r13, R13+8
|
movq %r13, R13+8(%rsp)
|
||||||
movq_cfi r14, R14+8
|
movq %r14, R14+8(%rsp)
|
||||||
movq_cfi r15, R15+8
|
movq %r15, R15+8(%rsp)
|
||||||
xorl %ebx,%ebx
|
xorl %ebx,%ebx
|
||||||
testl $3,CS+8(%rsp)
|
testl $3,CS+8(%rsp)
|
||||||
je error_kernelspace
|
je error_kernelspace
|
||||||
|
@ -1419,6 +1418,7 @@ error_sti:
|
||||||
* compat mode. Check for these here too.
|
* compat mode. Check for these here too.
|
||||||
*/
|
*/
|
||||||
error_kernelspace:
|
error_kernelspace:
|
||||||
|
CFI_REL_OFFSET rcx, RCX+8
|
||||||
incl %ebx
|
incl %ebx
|
||||||
leaq native_irq_return_iret(%rip),%rcx
|
leaq native_irq_return_iret(%rip),%rcx
|
||||||
cmpq %rcx,RIP+8(%rsp)
|
cmpq %rcx,RIP+8(%rsp)
|
||||||
|
|
|
@ -234,9 +234,6 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
|
||||||
return ns;
|
return ns;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* XXX surely we already have this someplace in the kernel?! */
|
|
||||||
#define DIV_ROUND(n, d) (((n) + ((d) / 2)) / (d))
|
|
||||||
|
|
||||||
static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
|
static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
|
||||||
{
|
{
|
||||||
unsigned long long tsc_now, ns_now;
|
unsigned long long tsc_now, ns_now;
|
||||||
|
@ -259,7 +256,9 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
|
||||||
* time function is continuous; see the comment near struct
|
* time function is continuous; see the comment near struct
|
||||||
* cyc2ns_data.
|
* cyc2ns_data.
|
||||||
*/
|
*/
|
||||||
data->cyc2ns_mul = DIV_ROUND(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, cpu_khz);
|
data->cyc2ns_mul =
|
||||||
|
DIV_ROUND_CLOSEST(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR,
|
||||||
|
cpu_khz);
|
||||||
data->cyc2ns_shift = CYC2NS_SCALE_FACTOR;
|
data->cyc2ns_shift = CYC2NS_SCALE_FACTOR;
|
||||||
data->cyc2ns_offset = ns_now -
|
data->cyc2ns_offset = ns_now -
|
||||||
mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR);
|
mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR);
|
||||||
|
|
Loading…
Add table
Reference in a new issue