Merge android-4.4.113 (ef588ef) into msm-4.4

* refs/heads/tmp-ef588ef
  Linux 4.4.113
  MIPS: AR7: ensure the port type's FCR value is used
  x86/retpoline: Optimize inline assembler for vmexit_fill_RSB
  x86/pti: Document fix wrong index
  kprobes/x86: Disable optimizing on the function jumps to indirect thunk
  kprobes/x86: Blacklist indirect thunk functions for kprobes
  retpoline: Introduce start/end markers of indirect thunk
  x86/mce: Make machine check speculation protected
  kbuild: modversions for EXPORT_SYMBOL() for asm
  x86/cpu, x86/pti: Do not enable PTI on AMD processors
  arm64: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls
  dm thin metadata: THIN_MAX_CONCURRENT_LOCKS should be 6
  dm btree: fix serious bug in btree_split_beneath()
  libata: apply MAX_SEC_1024 to all LITEON EP1 series devices
  can: peak: fix potential bug in packet fragmentation
  ARM: dts: kirkwood: fix pin-muxing of MPP7 on OpenBlocks A7
  phy: work around 'phys' references to usb-nop-xceiv devices
  tracing: Fix converting enum's from the map in trace_event_eval_update()
  Input: twl4030-vibra - fix sibling-node lookup
  Input: twl6040-vibra - fix child-node lookup
  Input: twl6040-vibra - fix DT node memory management
  Input: 88pm860x-ts - fix child-node lookup
  x86/apic/vector: Fix off by one in error path
  pipe: avoid round_pipe_size() nr_pages overflow on 32-bit
  module: Add retpoline tag to VERMAGIC
  x86/retpoline: Add LFENCE to the retpoline/RSB filling RSB macros
  sched/deadline: Zero out positive runtime after throttling constrained tasks
  scsi: hpsa: fix volume offline state
  af_key: fix buffer overread in parse_exthdrs()
  af_key: fix buffer overread in verify_address_len()
  ALSA: hda - Apply the existing quirk to iMac 14,1
  ALSA: hda - Apply headphone noise quirk for another Dell XPS 13 variant
  ALSA: pcm: Remove yet superfluous WARN_ON()
  futex: Prevent overflow by strengthen input validation
  scsi: sg: disable SET_FORCE_LOW_DMA
  x86/retpoline: Remove compile time warning
  x86/retpoline: Fill return stack buffer on vmexit
  x86/retpoline/irq32: Convert assembler indirect jumps
  x86/retpoline/checksum32: Convert assembler indirect jumps
  x86/retpoline/xen: Convert Xen hypercall indirect jumps
  x86/retpoline/hyperv: Convert assembler indirect jumps
  x86/retpoline/ftrace: Convert ftrace assembler indirect jumps
  x86/retpoline/entry: Convert entry assembler indirect jumps
  x86/retpoline/crypto: Convert crypto assembler indirect jumps
  x86/spectre: Add boot time option to select Spectre v2 mitigation
  x86/retpoline: Add initial retpoline support
  kconfig.h: use __is_defined() to check if MODULE is defined
  EXPORT_SYMBOL() for asm
  x86/asm: Make asm/alternative.h safe from assembly
  x86/kbuild: enable modversions for symbols exported from asm
  x86/asm: Use register variable to get stack pointer value
  x86/mm/32: Move setup_clear_cpu_cap(X86_FEATURE_PCID) earlier
  x86/cpu/AMD: Use LFENCE_RDTSC in preference to MFENCE_RDTSC
  x86/cpu/AMD: Make LFENCE a serializing instruction
  gcov: disable for COMPILE_TEST
  ANDROID: sdcardfs: Move default_normal to superblock
  blkdev: Refactoring block io latency histogram codes
  FROMLIST: arm64: kpti: Fix the interaction between ASID switching and software PAN
  FROMLIST: arm64: Move post_ttbr_update_workaround to C code
  FROMLIST: arm64: mm: Rename post_ttbr0_update_workaround
  sched: EAS: Initialize push_task as NULL to avoid direct reference on out_unlock path

Conflicts:
	arch/arm64/include/asm/efi.h
	arch/arm64/include/asm/mmu_context.h
	drivers/scsi/sg.c
	drivers/scsi/ufs/ufshcd.h

Change-Id: Ibfa06af8ef308077aad6995874d4b7b0a73e95f3
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
This commit is contained in:
Srinivasarao P 2018-01-24 18:25:18 +05:30
commit c43902eef7
88 changed files with 1092 additions and 296 deletions

View file

@ -2468,6 +2468,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nohugeiomap [KNL,x86] Disable kernel huge I/O mappings. nohugeiomap [KNL,x86] Disable kernel huge I/O mappings.
nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
(indirect branch prediction) vulnerability. System may
allow data leaks with this option, which is equivalent
to spectre_v2=off.
noxsave [BUGS=X86] Disables x86 extended register state save noxsave [BUGS=X86] Disables x86 extended register state save
and restore using xsave. The kernel will fallback to and restore using xsave. The kernel will fallback to
enabling legacy floating-point and sse state. enabling legacy floating-point and sse state.
@ -3619,6 +3624,29 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
sonypi.*= [HW] Sony Programmable I/O Control Device driver sonypi.*= [HW] Sony Programmable I/O Control Device driver
See Documentation/laptops/sonypi.txt See Documentation/laptops/sonypi.txt
spectre_v2= [X86] Control mitigation of Spectre variant 2
(indirect branch speculation) vulnerability.
on - unconditionally enable
off - unconditionally disable
auto - kernel detects whether your CPU model is
vulnerable
Selecting 'on' will, and 'auto' may, choose a
mitigation method at run time according to the
CPU, the available microcode, the setting of the
CONFIG_RETPOLINE configuration option, and the
compiler with which the kernel was built.
Specific mitigations can also be selected manually:
retpoline - replace indirect branches
retpoline,generic - google's original retpoline
retpoline,amd - AMD-specific minimal thunk
Not specifying this option is equivalent to
spectre_v2=auto.
spia_io_base= [HW,MTD] spia_io_base= [HW,MTD]
spia_fio_base= spia_fio_base=
spia_pedr= spia_pedr=

View file

@ -78,7 +78,7 @@ this protection comes at a cost:
non-PTI SYSCALL entry code, so requires mapping fewer non-PTI SYSCALL entry code, so requires mapping fewer
things into the userspace page tables. The downside is things into the userspace page tables. The downside is
that stacks must be switched at entry time. that stacks must be switched at entry time.
d. Global pages are disabled for all kernel structures not c. Global pages are disabled for all kernel structures not
mapped into both kernel and userspace page tables. This mapped into both kernel and userspace page tables. This
feature of the MMU allows different processes to share TLB feature of the MMU allows different processes to share TLB
entries mapping the kernel. Losing the feature means more entries mapping the kernel. Losing the feature means more

View file

@ -1,6 +1,6 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 112 SUBLEVEL = 113
EXTRAVERSION = EXTRAVERSION =
NAME = Blurry Fish Butt NAME = Blurry Fish Butt

View file

@ -53,7 +53,8 @@
}; };
pinctrl: pin-controller@10000 { pinctrl: pin-controller@10000 {
pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>; pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header
&pmx_gpio_header_gpo>;
pinctrl-names = "default"; pinctrl-names = "default";
pmx_uart0: pmx-uart0 { pmx_uart0: pmx-uart0 {
@ -85,11 +86,16 @@
* ground. * ground.
*/ */
pmx_gpio_header: pmx-gpio-header { pmx_gpio_header: pmx-gpio-header {
marvell,pins = "mpp17", "mpp7", "mpp29", "mpp28", marvell,pins = "mpp17", "mpp29", "mpp28",
"mpp35", "mpp34", "mpp40"; "mpp35", "mpp34", "mpp40";
marvell,function = "gpio"; marvell,function = "gpio";
}; };
pmx_gpio_header_gpo: pxm-gpio-header-gpo {
marvell,pins = "mpp7";
marvell,function = "gpo";
};
pmx_gpio_init: pmx-init { pmx_gpio_init: pmx-init {
marvell,pins = "mpp38"; marvell,pins = "mpp38";
marvell,function = "gpio"; marvell,function = "gpio";

View file

@ -410,17 +410,4 @@ alternative_endif
mrs \rd, sp_el0 mrs \rd, sp_el0
.endm .endm
/*
* Errata workaround post TTBR0_EL1 update.
*/
.macro post_ttbr0_update_workaround
#ifdef CONFIG_CAVIUM_ERRATUM_27456
alternative_if ARM64_WORKAROUND_CAVIUM_27456
ic iallu
dsb nsh
isb
alternative_else_nop_endif
#endif
.endm
#endif /* __ASM_ASSEMBLER_H */ #endif /* __ASM_ASSEMBLER_H */

View file

@ -76,19 +76,21 @@ static inline void efi_set_pgd(struct mm_struct *mm)
if (mm != current->active_mm) { if (mm != current->active_mm) {
/* /*
* Update the current thread's saved ttbr0 since it is * Update the current thread's saved ttbr0 since it is
* restored as part of a return from exception. Set * restored as part of a return from exception. Enable
* the hardware TTBR0_EL1 using cpu_switch_mm() * access to the valid TTBR0_EL1 and invoke the errata
* directly to enable potential errata workarounds. * workaround directly since there is no return from
* exception when invoking the EFI run-time services.
*/ */
update_saved_ttbr0(current, mm); update_saved_ttbr0(current, mm);
cpu_switch_mm(mm->pgd, mm); uaccess_ttbr0_enable();
post_ttbr_update_workaround();
} else { } else {
/* /*
* Defer the switch to the current thread's TTBR0_EL1 * Defer the switch to the current thread's TTBR0_EL1
* until uaccess_enable(). Restore the current * until uaccess_enable(). Restore the current
* thread's saved ttbr0 corresponding to its active_mm * thread's saved ttbr0 corresponding to its active_mm
*/ */
cpu_set_reserved_ttbr0(); uaccess_ttbr0_disable();
update_saved_ttbr0(current, current->active_mm); update_saved_ttbr0(current, current->active_mm);
} }
} }

View file

@ -186,7 +186,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
else else
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
task_thread_info(tsk)->ttbr0 = ttbr; WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
} }
#else #else
static inline void update_saved_ttbr0(struct task_struct *tsk, static inline void update_saved_ttbr0(struct task_struct *tsk,
@ -240,4 +240,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#define deactivate_mm(tsk,mm) do { } while (0) #define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) switch_mm(prev, next, current) #define activate_mm(prev,next) switch_mm(prev, next, current)
void post_ttbr_update_workaround(void);
#endif #endif

View file

@ -143,16 +143,18 @@ static inline void set_fs(mm_segment_t fs)
#ifdef CONFIG_ARM64_SW_TTBR0_PAN #ifdef CONFIG_ARM64_SW_TTBR0_PAN
static inline void __uaccess_ttbr0_disable(void) static inline void __uaccess_ttbr0_disable(void)
{ {
unsigned long ttbr; unsigned long flags, ttbr;
local_irq_save(flags);
ttbr = read_sysreg(ttbr1_el1); ttbr = read_sysreg(ttbr1_el1);
ttbr &= ~TTBR_ASID_MASK;
/* reserved_ttbr0 placed at the end of swapper_pg_dir */ /* reserved_ttbr0 placed at the end of swapper_pg_dir */
write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1); write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
isb(); isb();
/* Set reserved ASID */ /* Set reserved ASID */
ttbr &= ~TTBR_ASID_MASK;
write_sysreg(ttbr, ttbr1_el1); write_sysreg(ttbr, ttbr1_el1);
isb(); isb();
local_irq_restore(flags);
} }
static inline void __uaccess_ttbr0_enable(void) static inline void __uaccess_ttbr0_enable(void)
@ -165,10 +167,11 @@ static inline void __uaccess_ttbr0_enable(void)
* roll-over and an update of 'ttbr0'. * roll-over and an update of 'ttbr0'.
*/ */
local_irq_save(flags); local_irq_save(flags);
ttbr0 = current_thread_info()->ttbr0; ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
/* Restore active ASID */ /* Restore active ASID */
ttbr1 = read_sysreg(ttbr1_el1); ttbr1 = read_sysreg(ttbr1_el1);
ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
ttbr1 |= ttbr0 & TTBR_ASID_MASK; ttbr1 |= ttbr0 & TTBR_ASID_MASK;
write_sysreg(ttbr1, ttbr1_el1); write_sysreg(ttbr1, ttbr1_el1);
isb(); isb();
@ -453,11 +456,11 @@ extern __must_check long strnlen_user(const char __user *str, long n);
#ifdef CONFIG_ARM64_SW_TTBR0_PAN #ifdef CONFIG_ARM64_SW_TTBR0_PAN
.macro __uaccess_ttbr0_disable, tmp1 .macro __uaccess_ttbr0_disable, tmp1
mrs \tmp1, ttbr1_el1 // swapper_pg_dir mrs \tmp1, ttbr1_el1 // swapper_pg_dir
bic \tmp1, \tmp1, #TTBR_ASID_MASK
add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
isb isb
sub \tmp1, \tmp1, #SWAPPER_DIR_SIZE sub \tmp1, \tmp1, #SWAPPER_DIR_SIZE
bic \tmp1, \tmp1, #TTBR_ASID_MASK
msr ttbr1_el1, \tmp1 // set reserved ASID msr ttbr1_el1, \tmp1 // set reserved ASID
isb isb
.endm .endm
@ -474,9 +477,11 @@ extern __must_check long strnlen_user(const char __user *str, long n);
isb isb
.endm .endm
.macro uaccess_ttbr0_disable, tmp1 .macro uaccess_ttbr0_disable, tmp1, tmp2
alternative_if_not ARM64_HAS_PAN alternative_if_not ARM64_HAS_PAN
save_and_disable_irq \tmp2 // avoid preemption
__uaccess_ttbr0_disable \tmp1 __uaccess_ttbr0_disable \tmp1
restore_irq \tmp2
alternative_else_nop_endif alternative_else_nop_endif
.endm .endm
@ -488,7 +493,7 @@ alternative_if_not ARM64_HAS_PAN
alternative_else_nop_endif alternative_else_nop_endif
.endm .endm
#else #else
.macro uaccess_ttbr0_disable, tmp1 .macro uaccess_ttbr0_disable, tmp1, tmp2
.endm .endm
.macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3 .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
@ -498,8 +503,8 @@ alternative_else_nop_endif
/* /*
* These macros are no-ops when UAO is present. * These macros are no-ops when UAO is present.
*/ */
.macro uaccess_disable_not_uao, tmp1 .macro uaccess_disable_not_uao, tmp1, tmp2
uaccess_ttbr0_disable \tmp1 uaccess_ttbr0_disable \tmp1, \tmp2
alternative_if ARM64_ALT_PAN_NOT_UAO alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(1) SET_PSTATE_PAN(1)
alternative_else_nop_endif alternative_else_nop_endif

View file

@ -164,7 +164,7 @@ alternative_if ARM64_HAS_PAN
alternative_else_nop_endif alternative_else_nop_endif
.if \el != 0 .if \el != 0
mrs x21, ttbr1_el1 mrs x21, ttbr0_el1
tst x21, #TTBR_ASID_MASK // Check for the reserved ASID tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
b.eq 1f // TTBR0 access already disabled b.eq 1f // TTBR0 access already disabled
@ -241,7 +241,7 @@ alternative_else_nop_endif
* Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
* corruption). * corruption).
*/ */
post_ttbr0_update_workaround bl post_ttbr_update_workaround
.endif .endif
1: 1:
.if \el != 0 .if \el != 0

View file

@ -43,7 +43,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_psci_call(vcpu); ret = kvm_psci_call(vcpu);
if (ret < 0) { if (ret < 0) {
kvm_inject_undefined(vcpu); vcpu_set_reg(vcpu, 0, ~0UL);
return 1; return 1;
} }
@ -52,7 +52,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{ {
kvm_inject_undefined(vcpu); vcpu_set_reg(vcpu, 0, ~0UL);
return 1; return 1;
} }

View file

@ -50,7 +50,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
b.mi 5f b.mi 5f
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0 5: mov x0, #0
uaccess_disable_not_uao x2 uaccess_disable_not_uao x2, x3
ret ret
ENDPROC(__clear_user) ENDPROC(__clear_user)

View file

@ -67,7 +67,7 @@ ENTRY(__arch_copy_from_user)
uaccess_enable_not_uao x3, x4, x5 uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3 uaccess_disable_not_uao x3, x4
mov x0, #0 // Nothing to copy mov x0, #0 // Nothing to copy
ret ret
ENDPROC(__arch_copy_from_user) ENDPROC(__arch_copy_from_user)

View file

@ -68,7 +68,7 @@ ENTRY(__copy_in_user)
uaccess_enable_not_uao x3, x4, x5 uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3 uaccess_disable_not_uao x3, x4
mov x0, #0 mov x0, #0
ret ret
ENDPROC(__copy_in_user) ENDPROC(__copy_in_user)

View file

@ -66,7 +66,7 @@ ENTRY(__arch_copy_to_user)
uaccess_enable_not_uao x3, x4, x5 uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3 uaccess_disable_not_uao x3, x4
mov x0, #0 mov x0, #0
ret ret
ENDPROC(__arch_copy_to_user) ENDPROC(__arch_copy_to_user)

View file

@ -145,7 +145,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
isb isb
mov x0, #0 mov x0, #0
1: 1:
uaccess_ttbr0_disable x1 uaccess_ttbr0_disable x1, x2
ret ret
9: 9:
mov x0, #-EFAULT mov x0, #-EFAULT

View file

@ -199,6 +199,15 @@ switch_mm_fastpath:
cpu_switch_mm(mm->pgd, mm); cpu_switch_mm(mm->pgd, mm);
} }
/* Errata workaround post TTBRx_EL1 update. */
asmlinkage void post_ttbr_update_workaround(void)
{
asm(ALTERNATIVE("nop; nop; nop",
"ic iallu; dsb nsh; isb",
ARM64_WORKAROUND_CAVIUM_27456,
CONFIG_CAVIUM_ERRATUM_27456));
}
static int asids_init(void) static int asids_init(void)
{ {
int fld = cpuid_feature_extract_field(read_cpuid(SYS_ID_AA64MMFR0_EL1), 4); int fld = cpuid_feature_extract_field(read_cpuid(SYS_ID_AA64MMFR0_EL1), 4);

View file

@ -175,13 +175,15 @@ ENDPROC(cpu_do_resume)
ENTRY(cpu_do_switch_mm) ENTRY(cpu_do_switch_mm)
mrs x2, ttbr1_el1 mrs x2, ttbr1_el1
mmid x1, x1 // get mm->context.id mmid x1, x1 // get mm->context.id
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
bfi x0, x1, #48, #16 // set the ASID field in TTBR0
#endif
bfi x2, x1, #48, #16 // set the ASID bfi x2, x1, #48, #16 // set the ASID
msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set) msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set)
isb isb
msr ttbr0_el1, x0 // now update TTBR0 msr ttbr0_el1, x0 // now update TTBR0
isb isb
post_ttbr0_update_workaround b post_ttbr_update_workaround // Back to C code...
ret
ENDPROC(cpu_do_switch_mm) ENDPROC(cpu_do_switch_mm)
.pushsection ".idmap.text", "ax" .pushsection ".idmap.text", "ax"

View file

@ -104,6 +104,6 @@ ENTRY(privcmd_call)
/* /*
* Disable userspace access from kernel once the hyp call completed. * Disable userspace access from kernel once the hyp call completed.
*/ */
uaccess_ttbr0_disable x6 uaccess_ttbr0_disable x6, x7
ret ret
ENDPROC(privcmd_call); ENDPROC(privcmd_call);

View file

@ -576,7 +576,7 @@ static int __init ar7_register_uarts(void)
uart_port.type = PORT_AR7; uart_port.type = PORT_AR7;
uart_port.uartclk = clk_get_rate(bus_clk) / 2; uart_port.uartclk = clk_get_rate(bus_clk) / 2;
uart_port.iotype = UPIO_MEM32; uart_port.iotype = UPIO_MEM32;
uart_port.flags = UPF_FIXED_TYPE; uart_port.flags = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
uart_port.regshift = 2; uart_port.regshift = 2;
uart_port.line = 0; uart_port.line = 0;

View file

@ -396,6 +396,19 @@ config GOLDFISH
def_bool y def_bool y
depends on X86_GOLDFISH depends on X86_GOLDFISH
config RETPOLINE
bool "Avoid speculative indirect branches in kernel"
default y
---help---
Compile kernel with the retpoline compiler options to guard against
kernel-to-user data leaks by avoiding speculative indirect
branches. Requires a compiler with -mindirect-branch=thunk-extern
support for full protection. The kernel may run slower.
Without compiler support, at least indirect branches in assembler
code are eliminated. Since this includes the syscall entry path,
it is not entirely pointless.
if X86_32 if X86_32
config X86_EXTENDED_PLATFORM config X86_EXTENDED_PLATFORM
bool "Support for extended (non-PC) x86 platforms" bool "Support for extended (non-PC) x86 platforms"

View file

@ -210,6 +210,14 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += $(mflags-y) KBUILD_CFLAGS += $(mflags-y)
KBUILD_AFLAGS += $(mflags-y) KBUILD_AFLAGS += $(mflags-y)
# Avoid indirect branches in kernel to deal with Spectre
ifdef CONFIG_RETPOLINE
RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
ifneq ($(RETPOLINE_CFLAGS),)
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
endif
endif
archscripts: scripts_basic archscripts: scripts_basic
$(Q)$(MAKE) $(build)=arch/x86/tools relocs $(Q)$(MAKE) $(build)=arch/x86/tools relocs

View file

@ -31,6 +31,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/inst.h> #include <asm/inst.h>
#include <asm/nospec-branch.h>
/* /*
* The following macros are used to move an (un)aligned 16 byte value to/from * The following macros are used to move an (un)aligned 16 byte value to/from
@ -2714,7 +2715,7 @@ ENTRY(aesni_xts_crypt8)
pxor INC, STATE4 pxor INC, STATE4
movdqu IV, 0x30(OUTP) movdqu IV, 0x30(OUTP)
call *%r11 CALL_NOSPEC %r11
movdqu 0x00(OUTP), INC movdqu 0x00(OUTP), INC
pxor INC, STATE1 pxor INC, STATE1
@ -2759,7 +2760,7 @@ ENTRY(aesni_xts_crypt8)
_aesni_gf128mul_x_ble() _aesni_gf128mul_x_ble()
movups IV, (IVP) movups IV, (IVP)
call *%r11 CALL_NOSPEC %r11
movdqu 0x40(OUTP), INC movdqu 0x40(OUTP), INC
pxor INC, STATE1 pxor INC, STATE1

View file

@ -16,6 +16,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/nospec-branch.h>
#define CAMELLIA_TABLE_BYTE_LEN 272 #define CAMELLIA_TABLE_BYTE_LEN 272
@ -1210,7 +1211,7 @@ camellia_xts_crypt_16way:
vpxor 14 * 16(%rax), %xmm15, %xmm14; vpxor 14 * 16(%rax), %xmm15, %xmm14;
vpxor 15 * 16(%rax), %xmm15, %xmm15; vpxor 15 * 16(%rax), %xmm15, %xmm15;
call *%r9; CALL_NOSPEC %r9;
addq $(16 * 16), %rsp; addq $(16 * 16), %rsp;

View file

@ -11,6 +11,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/nospec-branch.h>
#define CAMELLIA_TABLE_BYTE_LEN 272 #define CAMELLIA_TABLE_BYTE_LEN 272
@ -1323,7 +1324,7 @@ camellia_xts_crypt_32way:
vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 14 * 32(%rax), %ymm15, %ymm14;
vpxor 15 * 32(%rax), %ymm15, %ymm15; vpxor 15 * 32(%rax), %ymm15, %ymm15;
call *%r9; CALL_NOSPEC %r9;
addq $(16 * 32), %rsp; addq $(16 * 32), %rsp;

View file

@ -45,6 +45,7 @@
#include <asm/inst.h> #include <asm/inst.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/nospec-branch.h>
## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
@ -172,7 +173,7 @@ continue_block:
movzxw (bufp, %rax, 2), len movzxw (bufp, %rax, 2), len
offset=crc_array-jump_table offset=crc_array-jump_table
lea offset(bufp, len, 1), bufp lea offset(bufp, len, 1), bufp
jmp *bufp JMP_NOSPEC bufp
################################################################ ################################################################
## 2a) PROCESS FULL BLOCKS: ## 2a) PROCESS FULL BLOCKS:

View file

@ -44,6 +44,7 @@
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/smap.h> #include <asm/smap.h>
#include <asm/nospec-branch.h>
.section .entry.text, "ax" .section .entry.text, "ax"
@ -226,7 +227,8 @@ ENTRY(ret_from_kernel_thread)
pushl $0x0202 # Reset kernel eflags pushl $0x0202 # Reset kernel eflags
popfl popfl
movl PT_EBP(%esp), %eax movl PT_EBP(%esp), %eax
call *PT_EBX(%esp) movl PT_EBX(%esp), %edx
CALL_NOSPEC %edx
movl $0, PT_EAX(%esp) movl $0, PT_EAX(%esp)
/* /*
@ -861,7 +863,8 @@ trace:
movl 0x4(%ebp), %edx movl 0x4(%ebp), %edx
subl $MCOUNT_INSN_SIZE, %eax subl $MCOUNT_INSN_SIZE, %eax
call *ftrace_trace_function movl ftrace_trace_function, %ecx
CALL_NOSPEC %ecx
popl %edx popl %edx
popl %ecx popl %ecx
@ -896,7 +899,7 @@ return_to_handler:
movl %eax, %ecx movl %eax, %ecx
popl %edx popl %edx
popl %eax popl %eax
jmp *%ecx JMP_NOSPEC %ecx
#endif #endif
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
@ -938,7 +941,7 @@ error_code:
movl %ecx, %es movl %ecx, %es
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl %esp, %eax # pt_regs pointer movl %esp, %eax # pt_regs pointer
call *%edi CALL_NOSPEC %edi
jmp ret_from_exception jmp ret_from_exception
END(page_fault) END(page_fault)

View file

@ -36,6 +36,7 @@
#include <asm/smap.h> #include <asm/smap.h>
#include <asm/pgtable_types.h> #include <asm/pgtable_types.h>
#include <asm/kaiser.h> #include <asm/kaiser.h>
#include <asm/nospec-branch.h>
#include <linux/err.h> #include <linux/err.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
@ -184,7 +185,13 @@ entry_SYSCALL_64_fastpath:
#endif #endif
ja 1f /* return -ENOSYS (already in pt_regs->ax) */ ja 1f /* return -ENOSYS (already in pt_regs->ax) */
movq %r10, %rcx movq %r10, %rcx
#ifdef CONFIG_RETPOLINE
movq sys_call_table(, %rax, 8), %rax
call __x86_indirect_thunk_rax
#else
call *sys_call_table(, %rax, 8) call *sys_call_table(, %rax, 8)
#endif
movq %rax, RAX(%rsp) movq %rax, RAX(%rsp)
1: 1:
/* /*
@ -276,7 +283,12 @@ tracesys_phase2:
#endif #endif
ja 1f /* return -ENOSYS (already in pt_regs->ax) */ ja 1f /* return -ENOSYS (already in pt_regs->ax) */
movq %r10, %rcx /* fixup for C */ movq %r10, %rcx /* fixup for C */
#ifdef CONFIG_RETPOLINE
movq sys_call_table(, %rax, 8), %rax
call __x86_indirect_thunk_rax
#else
call *sys_call_table(, %rax, 8) call *sys_call_table(, %rax, 8)
#endif
movq %rax, RAX(%rsp) movq %rax, RAX(%rsp)
1: 1:
/* Use IRET because user could have changed pt_regs->foo */ /* Use IRET because user could have changed pt_regs->foo */
@ -491,7 +503,7 @@ ENTRY(ret_from_fork)
* nb: we depend on RESTORE_EXTRA_REGS above * nb: we depend on RESTORE_EXTRA_REGS above
*/ */
movq %rbp, %rdi movq %rbp, %rdi
call *%rbx CALL_NOSPEC %rbx
movl $0, RAX(%rsp) movl $0, RAX(%rsp)
RESTORE_EXTRA_REGS RESTORE_EXTRA_REGS
jmp int_ret_from_sys_call jmp int_ret_from_sys_call
@ -1025,7 +1037,7 @@ idtentry async_page_fault do_async_page_fault has_error_code=1
#endif #endif
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) idtentry machine_check do_mce has_error_code=0 paranoid=1
#endif #endif
/* /*

View file

@ -1,6 +1,8 @@
#ifndef _ASM_X86_ALTERNATIVE_H #ifndef _ASM_X86_ALTERNATIVE_H
#define _ASM_X86_ALTERNATIVE_H #define _ASM_X86_ALTERNATIVE_H
#ifndef __ASSEMBLY__
#include <linux/types.h> #include <linux/types.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/stringify.h> #include <linux/stringify.h>
@ -271,4 +273,6 @@ extern void *text_poke(void *addr, const void *opcode, size_t len);
extern int poke_int3_handler(struct pt_regs *regs); extern int poke_int3_handler(struct pt_regs *regs);
extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_ALTERNATIVE_H */ #endif /* _ASM_X86_ALTERNATIVE_H */

View file

@ -0,0 +1,41 @@
#include <asm/ftrace.h>
#include <asm/uaccess.h>
#include <asm/string.h>
#include <asm/page.h>
#include <asm/checksum.h>
#include <asm-generic/asm-prototypes.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/special_insns.h>
#include <asm/preempt.h>
#include <asm/asm.h>
#ifndef CONFIG_X86_CMPXCHG64
extern void cmpxchg8b_emu(void);
#endif
#ifdef CONFIG_RETPOLINE
#ifdef CONFIG_X86_32
#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void);
#else
#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void);
INDIRECT_THUNK(8)
INDIRECT_THUNK(9)
INDIRECT_THUNK(10)
INDIRECT_THUNK(11)
INDIRECT_THUNK(12)
INDIRECT_THUNK(13)
INDIRECT_THUNK(14)
INDIRECT_THUNK(15)
#endif
INDIRECT_THUNK(ax)
INDIRECT_THUNK(bx)
INDIRECT_THUNK(cx)
INDIRECT_THUNK(dx)
INDIRECT_THUNK(si)
INDIRECT_THUNK(di)
INDIRECT_THUNK(bp)
INDIRECT_THUNK(sp)
#endif /* CONFIG_RETPOLINE */

View file

@ -106,4 +106,15 @@
/* For C file, we already have NOKPROBE_SYMBOL macro */ /* For C file, we already have NOKPROBE_SYMBOL macro */
#endif #endif
#ifndef __ASSEMBLY__
/*
* This output constraint should be used for any inline asm which has a "call"
* instruction. Otherwise the asm may be inserted before the frame pointer
* gets set up by the containing function. If you forget to do this, objtool
* may print a "call without frame pointer save/setup" warning.
*/
register unsigned long current_stack_pointer asm(_ASM_SP);
#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
#endif
#endif /* _ASM_X86_ASM_H */ #endif /* _ASM_X86_ASM_H */

View file

@ -200,6 +200,8 @@
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
#define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */
/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */ /* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */ #define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */

View file

@ -330,6 +330,9 @@
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
#define FAM10H_MMIO_CONF_BASE_SHIFT 20 #define FAM10H_MMIO_CONF_BASE_SHIFT 20
#define MSR_FAM10H_NODE_ID 0xc001100c #define MSR_FAM10H_NODE_ID 0xc001100c
#define MSR_F10H_DECFG 0xc0011029
#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
/* K8 MSRs */ /* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a #define MSR_K8_TOP_MEM1 0xc001001a

View file

@ -0,0 +1,198 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOSPEC_BRANCH_H__
#define __NOSPEC_BRANCH_H__
#include <asm/alternative.h>
#include <asm/alternative-asm.h>
#include <asm/cpufeature.h>
/*
* Fill the CPU return stack buffer.
*
* Each entry in the RSB, if used for a speculative 'ret', contains an
* infinite 'pause; lfence; jmp' loop to capture speculative execution.
*
* This is required in various cases for retpoline and IBRS-based
* mitigations for the Spectre variant 2 vulnerability. Sometimes to
* eliminate potentially bogus entries from the RSB, and sometimes
* purely to ensure that it doesn't get empty, which on some CPUs would
* allow predictions from other (unwanted!) sources to be used.
*
* We define a CPP macro such that it can be used from both .S files and
* inline assembly. It's possible to do a .macro and then include that
* from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
*/
#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
#define RSB_FILL_LOOPS 16 /* To avoid underflow */
/*
* Google experimented with loop-unrolling and this turned out to be
* the optimal version two calls, each with their own speculation
* trap should their return address end up getting used, in a loop.
*/
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
mov $(nr/2), reg; \
771: \
call 772f; \
773: /* speculation trap */ \
pause; \
lfence; \
jmp 773b; \
772: \
call 774f; \
775: /* speculation trap */ \
pause; \
lfence; \
jmp 775b; \
774: \
dec reg; \
jnz 771b; \
add $(BITS_PER_LONG/8) * nr, sp;
#ifdef __ASSEMBLY__
/*
* These are the bare retpoline primitives for indirect jmp and call.
* Do not use these directly; they only exist to make the ALTERNATIVE
* invocation below less ugly.
*/
.macro RETPOLINE_JMP reg:req
call .Ldo_rop_\@
.Lspec_trap_\@:
pause
lfence
jmp .Lspec_trap_\@
.Ldo_rop_\@:
mov \reg, (%_ASM_SP)
ret
.endm
/*
* This is a wrapper around RETPOLINE_JMP so the called function in reg
* returns to the instruction after the macro.
*/
.macro RETPOLINE_CALL reg:req
jmp .Ldo_call_\@
.Ldo_retpoline_jmp_\@:
RETPOLINE_JMP \reg
.Ldo_call_\@:
call .Ldo_retpoline_jmp_\@
.endm
/*
* JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
* indirect jmp/call which may be susceptible to the Spectre variant 2
* attack.
*/
.macro JMP_NOSPEC reg:req
#ifdef CONFIG_RETPOLINE
ALTERNATIVE_2 __stringify(jmp *\reg), \
__stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
__stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
#else
jmp *\reg
#endif
.endm
.macro CALL_NOSPEC reg:req
#ifdef CONFIG_RETPOLINE
ALTERNATIVE_2 __stringify(call *\reg), \
__stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
__stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
#else
call *\reg
#endif
.endm
/*
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
* monstrosity above, manually.
*/
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
#ifdef CONFIG_RETPOLINE
ALTERNATIVE "jmp .Lskip_rsb_\@", \
__stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
\ftr
.Lskip_rsb_\@:
#endif
.endm
#else /* __ASSEMBLY__ */
#if defined(CONFIG_X86_64) && defined(RETPOLINE)
/*
* Since the inline asm uses the %V modifier which is only in newer GCC,
* the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
*/
# define CALL_NOSPEC \
ALTERNATIVE( \
"call *%[thunk_target]\n", \
"call __x86_indirect_thunk_%V[thunk_target]\n", \
X86_FEATURE_RETPOLINE)
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
/*
* For i386 we use the original ret-equivalent retpoline, because
* otherwise we'll run out of registers. We don't care about CET
* here, anyway.
*/
# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \
" jmp 904f;\n" \
" .align 16\n" \
"901: call 903f;\n" \
"902: pause;\n" \
" lfence;\n" \
" jmp 902b;\n" \
" .align 16\n" \
"903: addl $4, %%esp;\n" \
" pushl %[thunk_target];\n" \
" ret;\n" \
" .align 16\n" \
"904: call 901b;\n", \
X86_FEATURE_RETPOLINE)
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
#else /* No retpoline for C / inline asm */
# define CALL_NOSPEC "call *%[thunk_target]\n"
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
#endif
/* The Spectre V2 mitigation variants */
enum spectre_v2_mitigation {
SPECTRE_V2_NONE,
SPECTRE_V2_RETPOLINE_MINIMAL,
SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
SPECTRE_V2_RETPOLINE_GENERIC,
SPECTRE_V2_RETPOLINE_AMD,
SPECTRE_V2_IBRS,
};
extern char __indirect_thunk_start[];
extern char __indirect_thunk_end[];
/*
* On VMEXIT we must ensure that no RSB predictions learned in the guest
* can be followed in the host, by overwriting the RSB completely. Both
* retpoline and IBRS mitigations for Spectre v2 need this; only on future
* CPUs with IBRS_ATT *might* it be avoided.
*/
static inline void vmexit_fill_RSB(void)
{
#ifdef CONFIG_RETPOLINE
unsigned long loops;
asm volatile (ALTERNATIVE("jmp 910f",
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
X86_FEATURE_RETPOLINE)
"910:"
: "=r" (loops), ASM_CALL_CONSTRAINT
: : "memory" );
#endif
}
#endif /* __ASSEMBLY__ */
#endif /* __NOSPEC_BRANCH_H__ */

View file

@ -166,17 +166,6 @@ static inline struct thread_info *current_thread_info(void)
return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE); return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE);
} }
static inline unsigned long current_stack_pointer(void)
{
unsigned long sp;
#ifdef CONFIG_X86_64
asm("mov %%rsp,%0" : "=g" (sp));
#else
asm("mov %%esp,%0" : "=g" (sp));
#endif
return sp;
}
/* /*
* Walks up the stack frames to make sure that the specified object is * Walks up the stack frames to make sure that the specified object is
* entirely contained by a single stack frame. * entirely contained by a single stack frame.

View file

@ -92,6 +92,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
dotraplinkage void do_iret_error(struct pt_regs *, long); dotraplinkage void do_iret_error(struct pt_regs *, long);
#endif #endif
dotraplinkage void do_mce(struct pt_regs *, long);
static inline int get_si_code(unsigned long condition) static inline int get_si_code(unsigned long condition)
{ {

View file

@ -44,6 +44,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/smap.h> #include <asm/smap.h>
#include <asm/nospec-branch.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
#include <xen/interface/sched.h> #include <xen/interface/sched.h>
@ -215,9 +216,9 @@ privcmd_call(unsigned call,
__HYPERCALL_5ARG(a1, a2, a3, a4, a5); __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
stac(); stac();
asm volatile("call *%[call]" asm volatile(CALL_NOSPEC
: __HYPERCALL_5PARAM : __HYPERCALL_5PARAM
: [call] "a" (&hypercall_page[call]) : [thunk_target] "a" (&hypercall_page[call])
: __HYPERCALL_CLOBBER5); : __HYPERCALL_CLOBBER5);
clac(); clac();

View file

@ -359,14 +359,17 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
irq_data->chip_data = data; irq_data->chip_data = data;
irq_data->hwirq = virq + i; irq_data->hwirq = virq + i;
err = assign_irq_vector_policy(virq + i, node, data, info); err = assign_irq_vector_policy(virq + i, node, data, info);
if (err) if (err) {
irq_data->chip_data = NULL;
free_apic_chip_data(data);
goto error; goto error;
}
} }
return 0; return 0;
error: error:
x86_vector_free_irqs(domain, virq, i + 1); x86_vector_free_irqs(domain, virq, i);
return err; return err;
} }

View file

@ -746,8 +746,32 @@ static void init_amd(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_K8); set_cpu_cap(c, X86_FEATURE_K8);
if (cpu_has_xmm2) { if (cpu_has_xmm2) {
/* MFENCE stops RDTSC speculation */ unsigned long long val;
set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); int ret;
/*
* A serializing LFENCE has less overhead than MFENCE, so
* use it for execution serialization. On families which
* don't have that MSR, LFENCE is already serializing.
* msr_set_bit() uses the safe accessors, too, even if the MSR
* is not present.
*/
msr_set_bit(MSR_F10H_DECFG,
MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
/*
* Verify that the MSR write was successful (could be running
* under a hypervisor) and only then assume that LFENCE is
* serializing.
*/
ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
/* A serializing LFENCE stops RDTSC speculation */
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
} else {
/* MFENCE stops RDTSC speculation */
set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
}
} }
/* /*

View file

@ -10,6 +10,9 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <asm/nospec-branch.h>
#include <asm/cmdline.h>
#include <asm/bugs.h> #include <asm/bugs.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
@ -20,16 +23,10 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
static void __init spectre_v2_select_mitigation(void);
void __init check_bugs(void) void __init check_bugs(void)
{ {
#ifdef CONFIG_X86_32
/*
* Regardless of whether PCID is enumerated, the SDM says
* that it can't be enabled in 32-bit mode.
*/
setup_clear_cpu_cap(X86_FEATURE_PCID);
#endif
identify_boot_cpu(); identify_boot_cpu();
if (!IS_ENABLED(CONFIG_SMP)) { if (!IS_ENABLED(CONFIG_SMP)) {
@ -37,6 +34,9 @@ void __init check_bugs(void)
print_cpu_info(&boot_cpu_data); print_cpu_info(&boot_cpu_data);
} }
/* Select the proper spectre mitigation before patching alternatives */
spectre_v2_select_mitigation();
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* /*
* Check whether we are able to run this kernel safely on SMP. * Check whether we are able to run this kernel safely on SMP.
@ -69,6 +69,153 @@ void __init check_bugs(void)
#endif #endif
} }
/* The kernel command line selection */
enum spectre_v2_mitigation_cmd {
SPECTRE_V2_CMD_NONE,
SPECTRE_V2_CMD_AUTO,
SPECTRE_V2_CMD_FORCE,
SPECTRE_V2_CMD_RETPOLINE,
SPECTRE_V2_CMD_RETPOLINE_GENERIC,
SPECTRE_V2_CMD_RETPOLINE_AMD,
};
static const char *spectre_v2_strings[] = {
[SPECTRE_V2_NONE] = "Vulnerable",
[SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
[SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
[SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
[SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
};
#undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 mitigation: " fmt
static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
static void __init spec2_print_if_insecure(const char *reason)
{
if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
pr_info("%s\n", reason);
}
static void __init spec2_print_if_secure(const char *reason)
{
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
pr_info("%s\n", reason);
}
static inline bool retp_compiler(void)
{
return __is_defined(RETPOLINE);
}
static inline bool match_option(const char *arg, int arglen, const char *opt)
{
int len = strlen(opt);
return len == arglen && !strncmp(arg, opt, len);
}
static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
{
char arg[20];
int ret;
ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
sizeof(arg));
if (ret > 0) {
if (match_option(arg, ret, "off")) {
goto disable;
} else if (match_option(arg, ret, "on")) {
spec2_print_if_secure("force enabled on command line.");
return SPECTRE_V2_CMD_FORCE;
} else if (match_option(arg, ret, "retpoline")) {
spec2_print_if_insecure("retpoline selected on command line.");
return SPECTRE_V2_CMD_RETPOLINE;
} else if (match_option(arg, ret, "retpoline,amd")) {
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
return SPECTRE_V2_CMD_AUTO;
}
spec2_print_if_insecure("AMD retpoline selected on command line.");
return SPECTRE_V2_CMD_RETPOLINE_AMD;
} else if (match_option(arg, ret, "retpoline,generic")) {
spec2_print_if_insecure("generic retpoline selected on command line.");
return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
} else if (match_option(arg, ret, "auto")) {
return SPECTRE_V2_CMD_AUTO;
}
}
if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
return SPECTRE_V2_CMD_AUTO;
disable:
spec2_print_if_insecure("disabled on command line.");
return SPECTRE_V2_CMD_NONE;
}
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
/*
* If the CPU is not affected and the command line mode is NONE or AUTO
* then nothing to do.
*/
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
(cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
return;
switch (cmd) {
case SPECTRE_V2_CMD_NONE:
return;
case SPECTRE_V2_CMD_FORCE:
/* FALLTRHU */
case SPECTRE_V2_CMD_AUTO:
goto retpoline_auto;
case SPECTRE_V2_CMD_RETPOLINE_AMD:
if (IS_ENABLED(CONFIG_RETPOLINE))
goto retpoline_amd;
break;
case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
if (IS_ENABLED(CONFIG_RETPOLINE))
goto retpoline_generic;
break;
case SPECTRE_V2_CMD_RETPOLINE:
if (IS_ENABLED(CONFIG_RETPOLINE))
goto retpoline_auto;
break;
}
pr_err("kernel not compiled with retpoline; no mitigation available!");
return;
retpoline_auto:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
retpoline_amd:
if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
pr_err("LFENCE not serializing. Switching to generic retpoline\n");
goto retpoline_generic;
}
mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
} else {
retpoline_generic:
mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
SPECTRE_V2_RETPOLINE_MINIMAL;
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
}
spectre_v2_enabled = mode;
pr_info("%s\n", spectre_v2_strings[mode]);
}
#undef pr_fmt
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
ssize_t cpu_show_meltdown(struct device *dev, ssize_t cpu_show_meltdown(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
@ -93,6 +240,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
{ {
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
return sprintf(buf, "Not affected\n"); return sprintf(buf, "Not affected\n");
return sprintf(buf, "Vulnerable\n");
return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
} }
#endif #endif

View file

@ -831,13 +831,21 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
setup_force_cpu_cap(X86_FEATURE_ALWAYS); setup_force_cpu_cap(X86_FEATURE_ALWAYS);
/* Assume for now that ALL x86 CPUs are insecure */ if (c->x86_vendor != X86_VENDOR_AMD)
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
setup_force_cpu_bug(X86_BUG_SPECTRE_V1); setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
setup_force_cpu_bug(X86_BUG_SPECTRE_V2); setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
fpu__init_system(c); fpu__init_system(c);
#ifdef CONFIG_X86_32
/*
* Regardless of whether PCID is enumerated, the SDM says
* that it can't be enabled in 32-bit mode.
*/
setup_clear_cpu_cap(X86_FEATURE_PCID);
#endif
} }
void __init early_cpu_init(void) void __init early_cpu_init(void)

View file

@ -1672,6 +1672,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
void (*machine_check_vector)(struct pt_regs *, long error_code) = void (*machine_check_vector)(struct pt_regs *, long error_code) =
unexpected_machine_check; unexpected_machine_check;
dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
{
machine_check_vector(regs, error_code);
}
/* /*
* Called for each booted CPU to set up machine checks. * Called for each booted CPU to set up machine checks.
* Must be called with preempt off: * Must be called with preempt off:

View file

@ -20,6 +20,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/nospec-branch.h>
#ifdef CONFIG_DEBUG_STACKOVERFLOW #ifdef CONFIG_DEBUG_STACKOVERFLOW
@ -55,17 +56,17 @@ DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
static void call_on_stack(void *func, void *stack) static void call_on_stack(void *func, void *stack)
{ {
asm volatile("xchgl %%ebx,%%esp \n" asm volatile("xchgl %%ebx,%%esp \n"
"call *%%edi \n" CALL_NOSPEC
"movl %%ebx,%%esp \n" "movl %%ebx,%%esp \n"
: "=b" (stack) : "=b" (stack)
: "0" (stack), : "0" (stack),
"D"(func) [thunk_target] "D"(func)
: "memory", "cc", "edx", "ecx", "eax"); : "memory", "cc", "edx", "ecx", "eax");
} }
static inline void *current_stack(void) static inline void *current_stack(void)
{ {
return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1)); return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
} }
static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
@ -89,17 +90,17 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
/* Save the next esp at the bottom of the stack */ /* Save the next esp at the bottom of the stack */
prev_esp = (u32 *)irqstk; prev_esp = (u32 *)irqstk;
*prev_esp = current_stack_pointer(); *prev_esp = current_stack_pointer;
if (unlikely(overflow)) if (unlikely(overflow))
call_on_stack(print_stack_overflow, isp); call_on_stack(print_stack_overflow, isp);
asm volatile("xchgl %%ebx,%%esp \n" asm volatile("xchgl %%ebx,%%esp \n"
"call *%%edi \n" CALL_NOSPEC
"movl %%ebx,%%esp \n" "movl %%ebx,%%esp \n"
: "=a" (arg1), "=b" (isp) : "=a" (arg1), "=b" (isp)
: "0" (desc), "1" (isp), : "0" (desc), "1" (isp),
"D" (desc->handle_irq) [thunk_target] "D" (desc->handle_irq)
: "memory", "cc", "ecx"); : "memory", "cc", "ecx");
return 1; return 1;
} }
@ -142,7 +143,7 @@ void do_softirq_own_stack(void)
/* Push the previous esp onto the stack */ /* Push the previous esp onto the stack */
prev_esp = (u32 *)irqstk; prev_esp = (u32 *)irqstk;
*prev_esp = current_stack_pointer(); *prev_esp = current_stack_pointer;
call_on_stack(__do_softirq, isp); call_on_stack(__do_softirq, isp);
} }

View file

@ -36,6 +36,7 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/insn.h> #include <asm/insn.h>
#include <asm/debugreg.h> #include <asm/debugreg.h>
#include <asm/nospec-branch.h>
#include "common.h" #include "common.h"
@ -191,7 +192,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src)
} }
/* Check whether insn is indirect jump */ /* Check whether insn is indirect jump */
static int insn_is_indirect_jump(struct insn *insn) static int __insn_is_indirect_jump(struct insn *insn)
{ {
return ((insn->opcode.bytes[0] == 0xff && return ((insn->opcode.bytes[0] == 0xff &&
(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
@ -225,6 +226,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
return (start <= target && target <= start + len); return (start <= target && target <= start + len);
} }
static int insn_is_indirect_jump(struct insn *insn)
{
int ret = __insn_is_indirect_jump(insn);
#ifdef CONFIG_RETPOLINE
/*
* Jump to x86_indirect_thunk_* is treated as an indirect jump.
* Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
* older gcc may use indirect jump. So we add this check instead of
* replace indirect-jump check.
*/
if (!ret)
ret = insn_jump_into_range(insn,
(unsigned long)__indirect_thunk_start,
(unsigned long)__indirect_thunk_end -
(unsigned long)__indirect_thunk_start);
#endif
return ret;
}
/* Decode whole function to ensure any instructions don't jump into target */ /* Decode whole function to ensure any instructions don't jump into target */
static int can_optimize(unsigned long paddr) static int can_optimize(unsigned long paddr)
{ {

View file

@ -7,7 +7,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/nospec-branch.h>
.code64 .code64
.section .entry.text, "ax" .section .entry.text, "ax"
@ -285,8 +285,9 @@ trace:
* ip and parent ip are used and the list function is called when * ip and parent ip are used and the list function is called when
* function tracing is enabled. * function tracing is enabled.
*/ */
call *ftrace_trace_function
movq ftrace_trace_function, %r8
CALL_NOSPEC %r8
restore_mcount_regs restore_mcount_regs
jmp fgraph_trace jmp fgraph_trace
@ -329,5 +330,5 @@ GLOBAL(return_to_handler)
movq 8(%rsp), %rdx movq 8(%rsp), %rdx
movq (%rsp), %rax movq (%rsp), %rax
addq $24, %rsp addq $24, %rsp
jmp *%rdi JMP_NOSPEC %rdi
#endif #endif

View file

@ -166,7 +166,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
* from double_fault. * from double_fault.
*/ */
BUG_ON((unsigned long)(current_top_of_stack() - BUG_ON((unsigned long)(current_top_of_stack() -
current_stack_pointer()) >= THREAD_SIZE); current_stack_pointer) >= THREAD_SIZE);
preempt_enable_no_resched(); preempt_enable_no_resched();
} }

View file

@ -104,6 +104,13 @@ SECTIONS
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
*(.fixup) *(.fixup)
*(.gnu.warning) *(.gnu.warning)
#ifdef CONFIG_RETPOLINE
__indirect_thunk_start = .;
*(.text.__x86.indirect_thunk)
__indirect_thunk_end = .;
#endif
/* End of text section */ /* End of text section */
_etext = .; _etext = .;
} :text = 0x9090 } :text = 0x9090

View file

@ -37,6 +37,7 @@
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/debugreg.h> #include <asm/debugreg.h>
#include <asm/kvm_para.h> #include <asm/kvm_para.h>
#include <asm/nospec-branch.h>
#include <asm/virtext.h> #include <asm/virtext.h>
#include "trace.h" #include "trace.h"
@ -3904,6 +3905,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
#endif #endif
); );
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, svm->host.gs_base); wrmsrl(MSR_GS_BASE, svm->host.gs_base);
#else #else

View file

@ -47,6 +47,7 @@
#include <asm/kexec.h> #include <asm/kexec.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/irq_remapping.h> #include <asm/irq_remapping.h>
#include <asm/nospec-branch.h>
#include "trace.h" #include "trace.h"
#include "pmu.h" #include "pmu.h"
@ -8701,6 +8702,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif #endif
); );
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
if (debugctlmsr) if (debugctlmsr)
update_debugctlmsr(debugctlmsr); update_debugctlmsr(debugctlmsr);

View file

@ -24,6 +24,7 @@ lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
lib-y += memcpy_$(BITS).o lib-y += memcpy_$(BITS).o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
lib-$(CONFIG_RETPOLINE) += retpoline.o
obj-y += msr.o msr-reg.o msr-reg-export.o hash.o hweight.o obj-y += msr.o msr-reg.o msr-reg-export.o hash.o hweight.o

View file

@ -28,6 +28,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/nospec-branch.h>
/* /*
* computes a partial checksum, e.g. for TCP/UDP fragments * computes a partial checksum, e.g. for TCP/UDP fragments
@ -155,7 +156,7 @@ ENTRY(csum_partial)
negl %ebx negl %ebx
lea 45f(%ebx,%ebx,2), %ebx lea 45f(%ebx,%ebx,2), %ebx
testl %esi, %esi testl %esi, %esi
jmp *%ebx JMP_NOSPEC %ebx
# Handle 2-byte-aligned regions # Handle 2-byte-aligned regions
20: addw (%esi), %ax 20: addw (%esi), %ax
@ -437,7 +438,7 @@ ENTRY(csum_partial_copy_generic)
andl $-32,%edx andl $-32,%edx
lea 3f(%ebx,%ebx), %ebx lea 3f(%ebx,%ebx), %ebx
testl %esi, %esi testl %esi, %esi
jmp *%ebx JMP_NOSPEC %ebx
1: addl $64,%esi 1: addl $64,%esi
addl $64,%edi addl $64,%edi
SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)

49
arch/x86/lib/retpoline.S Normal file
View file

@ -0,0 +1,49 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/stringify.h>
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeature.h>
#include <asm/alternative-asm.h>
#include <asm-generic/export.h>
#include <asm/nospec-branch.h>
.macro THUNK reg
.section .text.__x86.indirect_thunk
ENTRY(__x86_indirect_thunk_\reg)
CFI_STARTPROC
JMP_NOSPEC %\reg
CFI_ENDPROC
ENDPROC(__x86_indirect_thunk_\reg)
.endm
/*
* Despite being an assembler file we can't just use .irp here
* because __KSYM_DEPS__ only uses the C preprocessor and would
* only see one instance of "__x86_indirect_thunk_\reg" rather
* than one per register with the correct names. So we do it
* the simple and nasty way...
*/
#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
GENERATE_THUNK(_ASM_AX)
GENERATE_THUNK(_ASM_BX)
GENERATE_THUNK(_ASM_CX)
GENERATE_THUNK(_ASM_DX)
GENERATE_THUNK(_ASM_SI)
GENERATE_THUNK(_ASM_DI)
GENERATE_THUNK(_ASM_BP)
GENERATE_THUNK(_ASM_SP)
#ifdef CONFIG_64BIT
GENERATE_THUNK(r8)
GENERATE_THUNK(r9)
GENERATE_THUNK(r10)
GENERATE_THUNK(r11)
GENERATE_THUNK(r12)
GENERATE_THUNK(r13)
GENERATE_THUNK(r14)
GENERATE_THUNK(r15)
#endif

View file

@ -4069,76 +4069,43 @@ int __init blk_dev_init(void)
* TODO : If necessary, we can make the histograms per-cpu and aggregate * TODO : If necessary, we can make the histograms per-cpu and aggregate
* them when printing them out. * them when printing them out.
*/ */
void
blk_zero_latency_hist(struct io_latency_state *s)
{
memset(s->latency_y_axis_read, 0,
sizeof(s->latency_y_axis_read));
memset(s->latency_y_axis_write, 0,
sizeof(s->latency_y_axis_write));
s->latency_reads_elems = 0;
s->latency_writes_elems = 0;
}
EXPORT_SYMBOL(blk_zero_latency_hist);
ssize_t ssize_t
blk_latency_hist_show(struct io_latency_state *s, char *buf) blk_latency_hist_show(char* name, struct io_latency_state *s, char *buf,
int buf_size)
{ {
int i; int i;
int bytes_written = 0; int bytes_written = 0;
u_int64_t num_elem, elem; u_int64_t num_elem, elem;
int pct; int pct;
u_int64_t average;
num_elem = s->latency_reads_elems; num_elem = s->latency_elems;
if (num_elem > 0) { if (num_elem > 0) {
bytes_written += scnprintf(buf + bytes_written, average = div64_u64(s->latency_sum, s->latency_elems);
PAGE_SIZE - bytes_written, bytes_written += scnprintf(buf + bytes_written,
"IO svc_time Read Latency Histogram (n = %llu):\n", buf_size - bytes_written,
num_elem); "IO svc_time %s Latency Histogram (n = %llu,"
for (i = 0; " average = %llu):\n", name, num_elem, average);
i < ARRAY_SIZE(latency_x_axis_us); for (i = 0;
i++) { i < ARRAY_SIZE(latency_x_axis_us);
elem = s->latency_y_axis_read[i]; i++) {
pct = div64_u64(elem * 100, num_elem); elem = s->latency_y_axis[i];
bytes_written += scnprintf(buf + bytes_written, pct = div64_u64(elem * 100, num_elem);
PAGE_SIZE - bytes_written, bytes_written += scnprintf(buf + bytes_written,
"\t< %5lluus%15llu%15d%%\n", PAGE_SIZE - bytes_written,
latency_x_axis_us[i], "\t< %6lluus%15llu%15d%%\n",
elem, pct); latency_x_axis_us[i],
} elem, pct);
/* Last element in y-axis table is overflow */ }
elem = s->latency_y_axis_read[i]; /* Last element in y-axis table is overflow */
pct = div64_u64(elem * 100, num_elem); elem = s->latency_y_axis[i];
bytes_written += scnprintf(buf + bytes_written, pct = div64_u64(elem * 100, num_elem);
PAGE_SIZE - bytes_written, bytes_written += scnprintf(buf + bytes_written,
"\t> %5dms%15llu%15d%%\n", 10, PAGE_SIZE - bytes_written,
elem, pct); "\t>=%6lluus%15llu%15d%%\n",
} latency_x_axis_us[i - 1], elem, pct);
num_elem = s->latency_writes_elems;
if (num_elem > 0) {
bytes_written += scnprintf(buf + bytes_written,
PAGE_SIZE - bytes_written,
"IO svc_time Write Latency Histogram (n = %llu):\n",
num_elem);
for (i = 0;
i < ARRAY_SIZE(latency_x_axis_us);
i++) {
elem = s->latency_y_axis_write[i];
pct = div64_u64(elem * 100, num_elem);
bytes_written += scnprintf(buf + bytes_written,
PAGE_SIZE - bytes_written,
"\t< %5lluus%15llu%15d%%\n",
latency_x_axis_us[i],
elem, pct);
}
/* Last element in y-axis table is overflow */
elem = s->latency_y_axis_write[i];
pct = div64_u64(elem * 100, num_elem);
bytes_written += scnprintf(buf + bytes_written,
PAGE_SIZE - bytes_written,
"\t> %5dms%15llu%15d%%\n", 10,
elem, pct);
} }
return bytes_written; return bytes_written;
} }
EXPORT_SYMBOL(blk_latency_hist_show); EXPORT_SYMBOL(blk_latency_hist_show);

View file

@ -4143,6 +4143,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
* https://bugzilla.kernel.org/show_bug.cgi?id=121671 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
*/ */
{ "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
{ "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
/* Devices we expect to fail diagnostics */ /* Devices we expect to fail diagnostics */

View file

@ -31,6 +31,7 @@
#include <linux/clockchips.h> #include <linux/clockchips.h>
#include <asm/hyperv.h> #include <asm/hyperv.h>
#include <asm/mshyperv.h> #include <asm/mshyperv.h>
#include <asm/nospec-branch.h>
#include "hyperv_vmbus.h" #include "hyperv_vmbus.h"
/* The one and only */ /* The one and only */
@ -103,9 +104,10 @@ static u64 do_hypercall(u64 control, void *input, void *output)
return (u64)ULLONG_MAX; return (u64)ULLONG_MAX;
__asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8"); __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
__asm__ __volatile__("call *%3" : "=a" (hv_status) : __asm__ __volatile__(CALL_NOSPEC :
"=a" (hv_status) :
"c" (control), "d" (input_address), "c" (control), "d" (input_address),
"m" (hypercall_page)); THUNK_TARGET(hypercall_page));
return hv_status; return hv_status;
@ -123,11 +125,12 @@ static u64 do_hypercall(u64 control, void *input, void *output)
if (!hypercall_page) if (!hypercall_page)
return (u64)ULLONG_MAX; return (u64)ULLONG_MAX;
__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi), __asm__ __volatile__ (CALL_NOSPEC : "=d"(hv_status_hi),
"=a"(hv_status_lo) : "d" (control_hi), "=a"(hv_status_lo) : "d" (control_hi),
"a" (control_lo), "b" (input_address_hi), "a" (control_lo), "b" (input_address_hi),
"c" (input_address_lo), "D"(output_address_hi), "c" (input_address_lo), "D"(output_address_hi),
"S"(output_address_lo), "m" (hypercall_page)); "S"(output_address_lo),
THUNK_TARGET(hypercall_page));
return hv_status_lo | ((u64)hv_status_hi << 32); return hv_status_lo | ((u64)hv_status_hi << 32);
#endif /* !x86_64 */ #endif /* !x86_64 */

View file

@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
twl4030_vibra_suspend, twl4030_vibra_resume); twl4030_vibra_suspend, twl4030_vibra_resume);
static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata, static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
struct device_node *node) struct device_node *parent)
{ {
struct device_node *node;
if (pdata && pdata->coexist) if (pdata && pdata->coexist)
return true; return true;
node = of_find_node_by_name(node, "codec"); node = of_get_child_by_name(parent, "codec");
if (node) { if (node) {
of_node_put(node); of_node_put(node);
return true; return true;

View file

@ -262,7 +262,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
int vddvibr_uV = 0; int vddvibr_uV = 0;
int error; int error;
twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node, twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node,
"vibra"); "vibra");
if (!twl6040_core_node) { if (!twl6040_core_node) {
dev_err(&pdev->dev, "parent of node is missing?\n"); dev_err(&pdev->dev, "parent of node is missing?\n");

View file

@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
int data, n, ret; int data, n, ret;
if (!np) if (!np)
return -ENODEV; return -ENODEV;
np = of_find_node_by_name(np, "touch"); np = of_get_child_by_name(np, "touch");
if (!np) { if (!np) {
dev_err(&pdev->dev, "Can't find touch node\n"); dev_err(&pdev->dev, "Can't find touch node\n");
return -EINVAL; return -EINVAL;
@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
if (data) { if (data) {
ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data); ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
if (ret < 0) if (ret < 0)
return -EINVAL; goto err_put_node;
} }
/* set tsi prebias time */ /* set tsi prebias time */
if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) { if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) {
ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data); ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
if (ret < 0) if (ret < 0)
return -EINVAL; goto err_put_node;
} }
/* set prebias & prechg time of pen detect */ /* set prebias & prechg time of pen detect */
data = 0; data = 0;
@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
if (data) { if (data) {
ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data); ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
if (ret < 0) if (ret < 0)
return -EINVAL; goto err_put_node;
} }
of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x); of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x);
of_node_put(np);
return 0; return 0;
err_put_node:
of_node_put(np);
return -EINVAL;
} }
#else #else
#define pm860x_touch_dt_init(x, y, z) (-1) #define pm860x_touch_dt_init(x, y, z) (-1)

View file

@ -81,10 +81,14 @@
#define SECTOR_TO_BLOCK_SHIFT 3 #define SECTOR_TO_BLOCK_SHIFT 3
/* /*
* For btree insert:
* 3 for btree insert + * 3 for btree insert +
* 2 for btree lookup used within space map * 2 for btree lookup used within space map
* For btree remove:
* 2 for shadow spine +
* 4 for rebalance 3 child node
*/ */
#define THIN_MAX_CONCURRENT_LOCKS 5 #define THIN_MAX_CONCURRENT_LOCKS 6
/* This should be plenty */ /* This should be plenty */
#define SPACE_MAP_ROOT_SIZE 128 #define SPACE_MAP_ROOT_SIZE 128

View file

@ -671,23 +671,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
pn->keys[1] = rn->keys[0]; pn->keys[1] = rn->keys[0];
memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64)); memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
/* unlock_block(s->info, left);
* rejig the spine. This is ugly, since it knows too unlock_block(s->info, right);
* much about the spine
*/
if (s->nodes[0] != new_parent) {
unlock_block(s->info, s->nodes[0]);
s->nodes[0] = new_parent;
}
if (key < le64_to_cpu(rn->keys[0])) {
unlock_block(s->info, right);
s->nodes[1] = left;
} else {
unlock_block(s->info, left);
s->nodes[1] = right;
}
s->count = 2;
return 0; return 0;
} }

View file

@ -1029,9 +1029,10 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
completion = ktime_get(); completion = ktime_get();
delta_us = ktime_us_delta(completion, delta_us = ktime_us_delta(completion,
mrq->io_start); mrq->io_start);
blk_update_latency_hist(&host->io_lat_s, blk_update_latency_hist(
(mrq->data->flags & MMC_DATA_READ), (mrq->data->flags & MMC_DATA_READ) ?
delta_us); &host->io_lat_read :
&host->io_lat_write, delta_us);
} }
#endif #endif
trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data); trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data);
@ -4588,8 +4589,14 @@ static ssize_t
latency_hist_show(struct device *dev, struct device_attribute *attr, char *buf) latency_hist_show(struct device *dev, struct device_attribute *attr, char *buf)
{ {
struct mmc_host *host = cls_dev_to_mmc_host(dev); struct mmc_host *host = cls_dev_to_mmc_host(dev);
size_t written_bytes;
return blk_latency_hist_show(&host->io_lat_s, buf); written_bytes = blk_latency_hist_show("Read", &host->io_lat_read,
buf, PAGE_SIZE);
written_bytes += blk_latency_hist_show("Write", &host->io_lat_write,
buf + written_bytes, PAGE_SIZE - written_bytes);
return written_bytes;
} }
/* /*
@ -4607,9 +4614,10 @@ latency_hist_store(struct device *dev, struct device_attribute *attr,
if (kstrtol(buf, 0, &value)) if (kstrtol(buf, 0, &value))
return -EINVAL; return -EINVAL;
if (value == BLK_IO_LAT_HIST_ZERO) if (value == BLK_IO_LAT_HIST_ZERO) {
blk_zero_latency_hist(&host->io_lat_s); memset(&host->io_lat_read, 0, sizeof(host->io_lat_read));
else if (value == BLK_IO_LAT_HIST_ENABLE || memset(&host->io_lat_write, 0, sizeof(host->io_lat_write));
} else if (value == BLK_IO_LAT_HIST_ENABLE ||
value == BLK_IO_LAT_HIST_DISABLE) value == BLK_IO_LAT_HIST_DISABLE)
host->latency_hist_enabled = value; host->latency_hist_enabled = value;
return count; return count;

View file

@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
void *cmd_head = pcan_usb_fd_cmd_buffer(dev); void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
int err = 0; int err = 0;
u8 *packet_ptr; u8 *packet_ptr;
int i, n = 1, packet_len; int packet_len;
ptrdiff_t cmd_len; ptrdiff_t cmd_len;
/* usb device unregistered? */ /* usb device unregistered? */
@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
} }
packet_ptr = cmd_head; packet_ptr = cmd_head;
packet_len = cmd_len;
/* firmware is not able to re-assemble 512 bytes buffer in full-speed */ /* firmware is not able to re-assemble 512 bytes buffer in full-speed */
if ((dev->udev->speed != USB_SPEED_HIGH) && if (unlikely(dev->udev->speed != USB_SPEED_HIGH))
(cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) { packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE);
packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
n += cmd_len / packet_len;
} else {
packet_len = cmd_len;
}
for (i = 0; i < n; i++) { do {
err = usb_bulk_msg(dev->udev, err = usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev, usb_sndbulkpipe(dev->udev,
PCAN_USBPRO_EP_CMDOUT), PCAN_USBPRO_EP_CMDOUT),
@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
} }
packet_ptr += packet_len; packet_ptr += packet_len;
} cmd_len -= packet_len;
if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE)
packet_len = cmd_len;
} while (packet_len > 0);
return err; return err;
} }

View file

@ -365,6 +365,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index)
if (ret) if (ret)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
/* This phy type handled by the usb-phy subsystem for now */
if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
return ERR_PTR(-ENODEV);
mutex_lock(&phy_provider_mutex); mutex_lock(&phy_provider_mutex);
phy_provider = of_phy_provider_lookup(args.np); phy_provider = of_phy_provider_lookup(args.np);
if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) { if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {

View file

@ -3638,6 +3638,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
hpsa_get_ioaccel_status(h, scsi3addr, this_device); hpsa_get_ioaccel_status(h, scsi3addr, this_device);
volume_offline = hpsa_volume_offline(h, scsi3addr); volume_offline = hpsa_volume_offline(h, scsi3addr);
this_device->volume_offline = volume_offline;
if (volume_offline == HPSA_LV_FAILED) { if (volume_offline == HPSA_LV_FAILED) {
rc = HPSA_LV_FAILED; rc = HPSA_LV_FAILED;
dev_err(&h->pdev->dev, dev_err(&h->pdev->dev,

View file

@ -160,7 +160,6 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
struct list_head rq_list; /* head of request list */ struct list_head rq_list; /* head of request list */
struct fasync_struct *async_qp; /* used by asynchronous notification */ struct fasync_struct *async_qp; /* used by asynchronous notification */
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
char low_dma; /* as in parent but possibly overridden to 1 */
char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
@ -934,26 +933,14 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
/* strange ..., for backward compatibility */ /* strange ..., for backward compatibility */
return sfp->timeout_user; return sfp->timeout_user;
case SG_SET_FORCE_LOW_DMA: case SG_SET_FORCE_LOW_DMA:
result = get_user(val, ip); /*
if (result) * N.B. This ioctl never worked properly, but failed to
return result; * return an error value. So returning '0' to keep compability
if (val) { * with legacy applications.
sfp->low_dma = 1; */
if ((0 == sfp->low_dma) && !sfp->res_in_use) {
val = (int) sfp->reserve.bufflen;
mutex_lock(&sfp->parentdp->open_rel_lock);
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
mutex_unlock(&sfp->parentdp->open_rel_lock);
}
} else {
if (atomic_read(&sdp->detaching))
return -ENODEV;
sfp->low_dma = sdp->device->host->unchecked_isa_dma;
}
return 0; return 0;
case SG_GET_LOW_DMA: case SG_GET_LOW_DMA:
return put_user((int) sfp->low_dma, ip); return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
case SG_GET_SCSI_ID: case SG_GET_SCSI_ID:
if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t))) if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
return -EFAULT; return -EFAULT;
@ -1872,6 +1859,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
int sg_tablesize = sfp->parentdp->sg_tablesize; int sg_tablesize = sfp->parentdp->sg_tablesize;
int blk_size = buff_size, order; int blk_size = buff_size, order;
gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
struct sg_device *sdp = sfp->parentdp;
if (blk_size < 0) if (blk_size < 0)
return -EFAULT; return -EFAULT;
@ -1897,7 +1885,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
scatter_elem_sz_prev = num; scatter_elem_sz_prev = num;
} }
if (sfp->low_dma) if (sdp->device->host->unchecked_isa_dma)
gfp_mask |= GFP_DMA; gfp_mask |= GFP_DMA;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
@ -2160,8 +2148,6 @@ sg_add_sfp(Sg_device * sdp)
sfp->timeout = SG_DEFAULT_TIMEOUT; sfp->timeout = SG_DEFAULT_TIMEOUT;
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
sfp->force_packid = SG_DEF_FORCE_PACK_ID; sfp->force_packid = SG_DEF_FORCE_PACK_ID;
sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
sdp->device->host->unchecked_isa_dma : 1;
sfp->cmd_q = SG_DEF_COMMAND_Q; sfp->cmd_q = SG_DEF_COMMAND_Q;
sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
sfp->parentdp = sdp; sfp->parentdp = sdp;
@ -2620,7 +2606,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
jiffies_to_msecs(fp->timeout), jiffies_to_msecs(fp->timeout),
fp->reserve.bufflen, fp->reserve.bufflen,
(int) fp->reserve.k_use_sg, (int) fp->reserve.k_use_sg,
(int) fp->low_dma); (int) sdp->device->host->unchecked_isa_dma);
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n", seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
(int) fp->cmd_q, (int) fp->force_packid, (int) fp->cmd_q, (int) fp->force_packid,
(int) fp->keep_orphan); (int) fp->keep_orphan);

View file

@ -5621,10 +5621,10 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
completion = ktime_get(); completion = ktime_get();
delta_us = ktime_us_delta(completion, delta_us = ktime_us_delta(completion,
req->lat_hist_io_start); req->lat_hist_io_start);
/* rq_data_dir() => true if WRITE */ blk_update_latency_hist(
blk_update_latency_hist(&hba->io_lat_s, (rq_data_dir(req) == READ) ?
(rq_data_dir(req) == READ), &hba->io_lat_read :
delta_us); &hba->io_lat_write, delta_us);
} }
} }
/* Do not touch lrbp after scsi done */ /* Do not touch lrbp after scsi done */
@ -9310,9 +9310,10 @@ latency_hist_store(struct device *dev, struct device_attribute *attr,
if (kstrtol(buf, 0, &value)) if (kstrtol(buf, 0, &value))
return -EINVAL; return -EINVAL;
if (value == BLK_IO_LAT_HIST_ZERO) if (value == BLK_IO_LAT_HIST_ZERO) {
blk_zero_latency_hist(&hba->io_lat_s); memset(&hba->io_lat_read, 0, sizeof(hba->io_lat_read));
else if (value == BLK_IO_LAT_HIST_ENABLE || memset(&hba->io_lat_write, 0, sizeof(hba->io_lat_write));
} else if (value == BLK_IO_LAT_HIST_ENABLE ||
value == BLK_IO_LAT_HIST_DISABLE) value == BLK_IO_LAT_HIST_DISABLE)
hba->latency_hist_enabled = value; hba->latency_hist_enabled = value;
return count; return count;
@ -9323,8 +9324,14 @@ latency_hist_show(struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct ufs_hba *hba = dev_get_drvdata(dev); struct ufs_hba *hba = dev_get_drvdata(dev);
size_t written_bytes;
return blk_latency_hist_show(&hba->io_lat_s, buf); written_bytes = blk_latency_hist_show("Read", &hba->io_lat_read,
buf, PAGE_SIZE);
written_bytes += blk_latency_hist_show("Write", &hba->io_lat_write,
buf + written_bytes, PAGE_SIZE - written_bytes);
return written_bytes;
} }
static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR, static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,

View file

@ -954,7 +954,8 @@ struct ufs_hba {
struct pinctrl *pctrl; struct pinctrl *pctrl;
int latency_hist_enabled; int latency_hist_enabled;
struct io_latency_state io_lat_s; struct io_latency_state io_lat_read;
struct io_latency_state io_lat_write;
bool restore_needed; bool restore_needed;
}; };

View file

@ -1001,6 +1001,9 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
{ {
struct pipe_buffer *bufs; struct pipe_buffer *bufs;
if (!nr_pages)
return -EINVAL;
/* /*
* We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
* expect a lot of shrink+grow operations, just free and allocate * expect a lot of shrink+grow operations, just free and allocate
@ -1045,13 +1048,19 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
/* /*
* Currently we rely on the pipe array holding a power-of-2 number * Currently we rely on the pipe array holding a power-of-2 number
* of pages. * of pages. Returns 0 on error.
*/ */
static inline unsigned int round_pipe_size(unsigned int size) static inline unsigned int round_pipe_size(unsigned int size)
{ {
unsigned long nr_pages; unsigned long nr_pages;
if (size < pipe_min_size)
size = pipe_min_size;
nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (nr_pages == 0)
return 0;
return roundup_pow_of_two(nr_pages) << PAGE_SHIFT; return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
} }
@ -1062,13 +1071,18 @@ static inline unsigned int round_pipe_size(unsigned int size)
int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf, int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
size_t *lenp, loff_t *ppos) size_t *lenp, loff_t *ppos)
{ {
unsigned int rounded_pipe_max_size;
int ret; int ret;
ret = proc_dointvec_minmax(table, write, buf, lenp, ppos); ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
if (ret < 0 || !write) if (ret < 0 || !write)
return ret; return ret;
pipe_max_size = round_pipe_size(pipe_max_size); rounded_pipe_max_size = round_pipe_size(pipe_max_size);
if (rounded_pipe_max_size == 0)
return -EINVAL;
pipe_max_size = rounded_pipe_max_size;
return ret; return ret;
} }

View file

@ -641,7 +641,7 @@ static int sdcardfs_permission(struct vfsmount *mnt, struct inode *inode, int ma
*/ */
copy_attrs(&tmp, inode); copy_attrs(&tmp, inode);
tmp.i_uid = make_kuid(&init_user_ns, top->d_uid); tmp.i_uid = make_kuid(&init_user_ns, top->d_uid);
tmp.i_gid = make_kgid(&init_user_ns, get_gid(mnt, top)); tmp.i_gid = make_kgid(&init_user_ns, get_gid(mnt, inode->i_sb, top));
tmp.i_mode = (inode->i_mode & S_IFMT) tmp.i_mode = (inode->i_mode & S_IFMT)
| get_mode(mnt, SDCARDFS_I(inode), top); | get_mode(mnt, SDCARDFS_I(inode), top);
data_put(top); data_put(top);
@ -718,7 +718,7 @@ static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct
*/ */
copy_attrs(&tmp, inode); copy_attrs(&tmp, inode);
tmp.i_uid = make_kuid(&init_user_ns, top->d_uid); tmp.i_uid = make_kuid(&init_user_ns, top->d_uid);
tmp.i_gid = make_kgid(&init_user_ns, get_gid(mnt, top)); tmp.i_gid = make_kgid(&init_user_ns, get_gid(mnt, dentry->d_sb, top));
tmp.i_mode = (inode->i_mode & S_IFMT) tmp.i_mode = (inode->i_mode & S_IFMT)
| get_mode(mnt, SDCARDFS_I(inode), top); | get_mode(mnt, SDCARDFS_I(inode), top);
tmp.i_size = i_size_read(inode); tmp.i_size = i_size_read(inode);
@ -819,6 +819,7 @@ static int sdcardfs_fillattr(struct vfsmount *mnt,
{ {
struct sdcardfs_inode_info *info = SDCARDFS_I(inode); struct sdcardfs_inode_info *info = SDCARDFS_I(inode);
struct sdcardfs_inode_data *top = top_data_get(info); struct sdcardfs_inode_data *top = top_data_get(info);
struct super_block *sb = inode->i_sb;
if (!top) if (!top)
return -EINVAL; return -EINVAL;
@ -828,7 +829,7 @@ static int sdcardfs_fillattr(struct vfsmount *mnt,
stat->mode = (inode->i_mode & S_IFMT) | get_mode(mnt, info, top); stat->mode = (inode->i_mode & S_IFMT) | get_mode(mnt, info, top);
stat->nlink = inode->i_nlink; stat->nlink = inode->i_nlink;
stat->uid = make_kuid(&init_user_ns, top->d_uid); stat->uid = make_kuid(&init_user_ns, top->d_uid);
stat->gid = make_kgid(&init_user_ns, get_gid(mnt, top)); stat->gid = make_kgid(&init_user_ns, get_gid(mnt, sb, top));
stat->rdev = inode->i_rdev; stat->rdev = inode->i_rdev;
stat->size = i_size_read(inode); stat->size = i_size_read(inode);
stat->atime = inode->i_atime; stat->atime = inode->i_atime;

View file

@ -70,7 +70,7 @@ static int parse_options(struct super_block *sb, char *options, int silent,
opts->reserved_mb = 0; opts->reserved_mb = 0;
/* by default, gid derivation is off */ /* by default, gid derivation is off */
opts->gid_derivation = false; opts->gid_derivation = false;
vfsopts->default_normal = false; opts->default_normal = false;
*debug = 0; *debug = 0;
@ -126,7 +126,7 @@ static int parse_options(struct super_block *sb, char *options, int silent,
opts->gid_derivation = true; opts->gid_derivation = true;
break; break;
case Opt_default_normal: case Opt_default_normal:
vfsopts->default_normal = true; opts->default_normal = true;
break; break;
/* unknown option */ /* unknown option */
default: default:

View file

@ -220,13 +220,13 @@ struct sdcardfs_mount_options {
userid_t fs_user_id; userid_t fs_user_id;
bool multiuser; bool multiuser;
bool gid_derivation; bool gid_derivation;
bool default_normal;
unsigned int reserved_mb; unsigned int reserved_mb;
}; };
struct sdcardfs_vfsmount_options { struct sdcardfs_vfsmount_options {
gid_t gid; gid_t gid;
mode_t mask; mode_t mask;
bool default_normal;
}; };
extern int parse_options_remount(struct super_block *sb, char *options, int silent, extern int parse_options_remount(struct super_block *sb, char *options, int silent,
@ -414,11 +414,13 @@ static inline void set_top(struct sdcardfs_inode_info *info,
} }
static inline int get_gid(struct vfsmount *mnt, static inline int get_gid(struct vfsmount *mnt,
struct super_block *sb,
struct sdcardfs_inode_data *data) struct sdcardfs_inode_data *data)
{ {
struct sdcardfs_vfsmount_options *opts = mnt->data; struct sdcardfs_vfsmount_options *vfsopts = mnt->data;
struct sdcardfs_sb_info *sbi = SDCARDFS_SB(sb);
if (opts->gid == AID_SDCARD_RW && !opts->default_normal) if (vfsopts->gid == AID_SDCARD_RW && !sbi->options.default_normal)
/* As an optimization, certain trusted system components only run /* As an optimization, certain trusted system components only run
* as owner but operate across all users. Since we're now handing * as owner but operate across all users. Since we're now handing
* out the sdcard_rw GID only to trusted apps, we're okay relaxing * out the sdcard_rw GID only to trusted apps, we're okay relaxing
@ -427,7 +429,7 @@ static inline int get_gid(struct vfsmount *mnt,
*/ */
return AID_SDCARD_RW; return AID_SDCARD_RW;
else else
return multiuser_get_uid(data->userid, opts->gid); return multiuser_get_uid(data->userid, vfsopts->gid);
} }
static inline int get_mode(struct vfsmount *mnt, static inline int get_mode(struct vfsmount *mnt,

View file

@ -304,7 +304,7 @@ static int sdcardfs_show_options(struct vfsmount *mnt, struct seq_file *m,
seq_printf(m, ",userid=%u", opts->fs_user_id); seq_printf(m, ",userid=%u", opts->fs_user_id);
if (opts->gid_derivation) if (opts->gid_derivation)
seq_puts(m, ",derive_gid"); seq_puts(m, ",derive_gid");
if (vfsopts->default_normal) if (opts->default_normal)
seq_puts(m, ",default_normal"); seq_puts(m, ",default_normal");
if (opts->reserved_mb != 0) if (opts->reserved_mb != 0)
seq_printf(m, ",reserved=%uMB", opts->reserved_mb); seq_printf(m, ",reserved=%uMB", opts->reserved_mb);

View file

@ -0,0 +1,7 @@
#include <linux/bitops.h>
extern void *__memset(void *, int, __kernel_size_t);
extern void *__memcpy(void *, const void *, __kernel_size_t);
extern void *__memmove(void *, const void *, __kernel_size_t);
extern void *memset(void *, int, __kernel_size_t);
extern void *memcpy(void *, const void *, __kernel_size_t);
extern void *memmove(void *, const void *, __kernel_size_t);

View file

@ -0,0 +1,94 @@
#ifndef __ASM_GENERIC_EXPORT_H
#define __ASM_GENERIC_EXPORT_H
#ifndef KSYM_FUNC
#define KSYM_FUNC(x) x
#endif
#ifdef CONFIG_64BIT
#define __put .quad
#ifndef KSYM_ALIGN
#define KSYM_ALIGN 8
#endif
#ifndef KCRC_ALIGN
#define KCRC_ALIGN 8
#endif
#else
#define __put .long
#ifndef KSYM_ALIGN
#define KSYM_ALIGN 4
#endif
#ifndef KCRC_ALIGN
#define KCRC_ALIGN 4
#endif
#endif
#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
#define KSYM(name) _##name
#else
#define KSYM(name) name
#endif
/*
* note on .section use: @progbits vs %progbits nastiness doesn't matter,
* since we immediately emit into those sections anyway.
*/
.macro ___EXPORT_SYMBOL name,val,sec
#ifdef CONFIG_MODULES
.globl KSYM(__ksymtab_\name)
.section ___ksymtab\sec+\name,"a"
.balign KSYM_ALIGN
KSYM(__ksymtab_\name):
__put \val, KSYM(__kstrtab_\name)
.previous
.section __ksymtab_strings,"a"
KSYM(__kstrtab_\name):
#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
.asciz "_\name"
#else
.asciz "\name"
#endif
.previous
#ifdef CONFIG_MODVERSIONS
.section ___kcrctab\sec+\name,"a"
.balign KCRC_ALIGN
KSYM(__kcrctab_\name):
__put KSYM(__crc_\name)
.weak KSYM(__crc_\name)
.previous
#endif
#endif
.endm
#undef __put
#if defined(__KSYM_DEPS__)
#define __EXPORT_SYMBOL(sym, val, sec) === __KSYM_##sym ===
#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
#include <linux/kconfig.h>
#include <generated/autoksyms.h>
#define __EXPORT_SYMBOL(sym, val, sec) \
__cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym))
#define __cond_export_sym(sym, val, sec, conf) \
___cond_export_sym(sym, val, sec, conf)
#define ___cond_export_sym(sym, val, sec, enabled) \
__cond_export_sym_##enabled(sym, val, sec)
#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
#define __cond_export_sym_0(sym, val, sec) /* nothing */
#else
#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
#endif
#define EXPORT_SYMBOL(name) \
__EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),)
#define EXPORT_SYMBOL_GPL(name) \
__EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl)
#define EXPORT_DATA_SYMBOL(name) \
__EXPORT_SYMBOL(name, KSYM(name),)
#define EXPORT_DATA_SYMBOL_GPL(name) \
__EXPORT_SYMBOL(name, KSYM(name),_gpl)
#endif

View file

@ -1700,43 +1700,26 @@ static const u_int64_t latency_x_axis_us[] = {
#define BLK_IO_LAT_HIST_ZERO 2 #define BLK_IO_LAT_HIST_ZERO 2
struct io_latency_state { struct io_latency_state {
u_int64_t latency_y_axis_read[ARRAY_SIZE(latency_x_axis_us) + 1]; u_int64_t latency_y_axis[ARRAY_SIZE(latency_x_axis_us) + 1];
u_int64_t latency_reads_elems; u_int64_t latency_elems;
u_int64_t latency_y_axis_write[ARRAY_SIZE(latency_x_axis_us) + 1]; u_int64_t latency_sum;
u_int64_t latency_writes_elems;
}; };
static inline void static inline void
blk_update_latency_hist(struct io_latency_state *s, blk_update_latency_hist(struct io_latency_state *s, u_int64_t delta_us)
int read,
u_int64_t delta_us)
{ {
int i; int i;
for (i = 0; i < ARRAY_SIZE(latency_x_axis_us); i++) { for (i = 0; i < ARRAY_SIZE(latency_x_axis_us); i++)
if (delta_us < (u_int64_t)latency_x_axis_us[i]) { if (delta_us < (u_int64_t)latency_x_axis_us[i])
if (read)
s->latency_y_axis_read[i]++;
else
s->latency_y_axis_write[i]++;
break; break;
} s->latency_y_axis[i]++;
} s->latency_elems++;
if (i == ARRAY_SIZE(latency_x_axis_us)) { s->latency_sum += delta_us;
/* Overflowed the histogram */
if (read)
s->latency_y_axis_read[i]++;
else
s->latency_y_axis_write[i]++;
}
if (read)
s->latency_reads_elems++;
else
s->latency_writes_elems++;
} }
void blk_zero_latency_hist(struct io_latency_state *s); ssize_t blk_latency_hist_show(char* name, struct io_latency_state *s,
ssize_t blk_latency_hist_show(struct io_latency_state *s, char *buf); char *buf, int buf_size);
#else /* CONFIG_BLOCK */ #else /* CONFIG_BLOCK */

View file

@ -17,10 +17,11 @@
* the last step cherry picks the 2nd arg, we get a zero. * the last step cherry picks the 2nd arg, we get a zero.
*/ */
#define __ARG_PLACEHOLDER_1 0, #define __ARG_PLACEHOLDER_1 0,
#define config_enabled(cfg) _config_enabled(cfg) #define config_enabled(cfg) ___is_defined(cfg)
#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) #define __is_defined(x) ___is_defined(x)
#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) #define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
#define ___config_enabled(__ignored, val, ...) val #define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
#define __take_second_arg(__ignored, val, ...) val
/* /*
* IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0 * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
@ -42,7 +43,7 @@
* built-in code when CONFIG_FOO is set to 'm'. * built-in code when CONFIG_FOO is set to 'm'.
*/ */
#define IS_REACHABLE(option) (config_enabled(option) || \ #define IS_REACHABLE(option) (config_enabled(option) || \
(config_enabled(option##_MODULE) && config_enabled(MODULE))) (config_enabled(option##_MODULE) && __is_defined(MODULE)))
/* /*
* IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',

View file

@ -595,7 +595,8 @@ struct mmc_host {
struct mmc_request *err_mrq; struct mmc_request *err_mrq;
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
int latency_hist_enabled; int latency_hist_enabled;
struct io_latency_state io_lat_s; struct io_latency_state io_lat_read;
struct io_latency_state io_lat_write;
#endif #endif
bool sdr104_wa; bool sdr104_wa;

View file

@ -24,10 +24,16 @@
#ifndef MODULE_ARCH_VERMAGIC #ifndef MODULE_ARCH_VERMAGIC
#define MODULE_ARCH_VERMAGIC "" #define MODULE_ARCH_VERMAGIC ""
#endif #endif
#ifdef RETPOLINE
#define MODULE_VERMAGIC_RETPOLINE "retpoline "
#else
#define MODULE_VERMAGIC_RETPOLINE ""
#endif
#define VERMAGIC_STRING \ #define VERMAGIC_STRING \
UTS_RELEASE " " \ UTS_RELEASE " " \
MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \ MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
MODULE_ARCH_VERMAGIC MODULE_ARCH_VERMAGIC \
MODULE_VERMAGIC_RETPOLINE

View file

@ -198,7 +198,6 @@ typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
#define SG_DEFAULT_RETRIES 0 #define SG_DEFAULT_RETRIES 0
/* Defaults, commented if they differ from original sg driver */ /* Defaults, commented if they differ from original sg driver */
#define SG_DEF_FORCE_LOW_DMA 0 /* was 1 -> memory below 16MB on i386 */
#define SG_DEF_FORCE_PACK_ID 0 #define SG_DEF_FORCE_PACK_ID 0
#define SG_DEF_KEEP_ORPHAN 0 #define SG_DEF_KEEP_ORPHAN 0
#define SG_DEF_RESERVED_SIZE SG_SCATTER_SZ /* load time option */ #define SG_DEF_RESERVED_SIZE SG_SCATTER_SZ /* load time option */

View file

@ -1621,6 +1621,9 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
struct futex_q *this, *next; struct futex_q *this, *next;
WAKE_Q(wake_q); WAKE_Q(wake_q);
if (nr_wake < 0 || nr_requeue < 0)
return -EINVAL;
if (requeue_pi) { if (requeue_pi) {
/* /*
* Requeue PI only works on two distinct uaddrs. This * Requeue PI only works on two distinct uaddrs. This

View file

@ -37,6 +37,7 @@ config ARCH_HAS_GCOV_PROFILE_ALL
config GCOV_PROFILE_ALL config GCOV_PROFILE_ALL
bool "Profile entire Kernel" bool "Profile entire Kernel"
depends on !COMPILE_TEST
depends on GCOV_KERNEL depends on GCOV_KERNEL
depends on ARCH_HAS_GCOV_PROFILE_ALL depends on ARCH_HAS_GCOV_PROFILE_ALL
default n default n

View file

@ -757,6 +757,8 @@ static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
if (unlikely(dl_se->dl_boosted || !start_dl_timer(p))) if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
return; return;
dl_se->dl_throttled = 1; dl_se->dl_throttled = 1;
if (dl_se->runtime > 0)
dl_se->runtime = 0;
} }
} }

View file

@ -10818,7 +10818,7 @@ static int active_load_balance_cpu_stop(void *data)
struct rq *target_rq = cpu_rq(target_cpu); struct rq *target_rq = cpu_rq(target_cpu);
struct sched_domain *sd = NULL; struct sched_domain *sd = NULL;
struct task_struct *p = NULL; struct task_struct *p = NULL;
struct task_struct *push_task; struct task_struct *push_task = NULL;
int push_task_detached = 0; int push_task_detached = 0;
struct lb_env env = { struct lb_env env = {
.sd = sd, .sd = sd,

View file

@ -2301,6 +2301,7 @@ void trace_event_enum_update(struct trace_enum_map **map, int len)
{ {
struct trace_event_call *call, *p; struct trace_event_call *call, *p;
const char *last_system = NULL; const char *last_system = NULL;
bool first = false;
int last_i; int last_i;
int i; int i;
@ -2308,15 +2309,28 @@ void trace_event_enum_update(struct trace_enum_map **map, int len)
list_for_each_entry_safe(call, p, &ftrace_events, list) { list_for_each_entry_safe(call, p, &ftrace_events, list) {
/* events are usually grouped together with systems */ /* events are usually grouped together with systems */
if (!last_system || call->class->system != last_system) { if (!last_system || call->class->system != last_system) {
first = true;
last_i = 0; last_i = 0;
last_system = call->class->system; last_system = call->class->system;
} }
/*
* Since calls are grouped by systems, the likelyhood that the
* next call in the iteration belongs to the same system as the
* previous call is high. As an optimization, we skip seaching
* for a map[] that matches the call's system if the last call
* was from the same system. That's what last_i is for. If the
* call has the same system as the previous call, then last_i
* will be the index of the first map[] that has a matching
* system.
*/
for (i = last_i; i < len; i++) { for (i = last_i; i < len; i++) {
if (call->class->system == map[i]->system) { if (call->class->system == map[i]->system) {
/* Save the first system if need be */ /* Save the first system if need be */
if (!last_i) if (first) {
last_i = i; last_i = i;
first = false;
}
update_event_printk(call, map[i]); update_event_printk(call, map[i]);
} }
} }

View file

@ -401,6 +401,11 @@ static int verify_address_len(const void *p)
#endif #endif
int len; int len;
if (sp->sadb_address_len <
DIV_ROUND_UP(sizeof(*sp) + offsetofend(typeof(*addr), sa_family),
sizeof(uint64_t)))
return -EINVAL;
switch (addr->sa_family) { switch (addr->sa_family) {
case AF_INET: case AF_INET:
len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t)); len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t));
@ -511,6 +516,9 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
uint16_t ext_type; uint16_t ext_type;
int ext_len; int ext_len;
if (len < sizeof(*ehdr))
return -EINVAL;
ext_len = ehdr->sadb_ext_len; ext_len = ehdr->sadb_ext_len;
ext_len *= sizeof(uint64_t); ext_len *= sizeof(uint64_t);
ext_type = ehdr->sadb_ext_type; ext_type = ehdr->sadb_ext_type;

View file

@ -163,7 +163,8 @@ cmd_cc_i_c = $(CPP) $(c_flags) -o $@ $<
$(obj)/%.i: $(src)/%.c FORCE $(obj)/%.i: $(src)/%.c FORCE
$(call if_changed_dep,cc_i_c) $(call if_changed_dep,cc_i_c)
cmd_gensymtypes = \ # These mirror gensymtypes_S and co below, keep them in synch.
cmd_gensymtypes_c = \
$(CPP) -D__GENKSYMS__ $(c_flags) $< | \ $(CPP) -D__GENKSYMS__ $(c_flags) $< | \
$(GENKSYMS) $(if $(1), -T $(2)) \ $(GENKSYMS) $(if $(1), -T $(2)) \
$(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \
@ -173,7 +174,7 @@ cmd_gensymtypes = \
quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@ quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@
cmd_cc_symtypes_c = \ cmd_cc_symtypes_c = \
set -e; \ set -e; \
$(call cmd_gensymtypes,true,$@) >/dev/null; \ $(call cmd_gensymtypes_c,true,$@) >/dev/null; \
test -s $@ || rm -f $@ test -s $@ || rm -f $@
$(obj)/%.symtypes : $(src)/%.c FORCE $(obj)/%.symtypes : $(src)/%.c FORCE
@ -210,9 +211,10 @@ else
# the actual value of the checksum generated by genksyms # the actual value of the checksum generated by genksyms
cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $< cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $<
cmd_modversions = \
cmd_modversions_c = \
if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \
$(call cmd_gensymtypes,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ $(call cmd_gensymtypes_c,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \
> $(@D)/.tmp_$(@F:.o=.ver); \ > $(@D)/.tmp_$(@F:.o=.ver); \
\ \
$(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \
@ -257,7 +259,7 @@ endif
define rule_cc_o_c define rule_cc_o_c
$(call echo-cmd,checksrc) $(cmd_checksrc) \ $(call echo-cmd,checksrc) $(cmd_checksrc) \
$(call echo-cmd,cc_o_c) $(cmd_cc_o_c); \ $(call echo-cmd,cc_o_c) $(cmd_cc_o_c); \
$(cmd_modversions) \ $(cmd_modversions_c) \
$(call echo-cmd,record_mcount) \ $(call echo-cmd,record_mcount) \
$(cmd_record_mcount) \ $(cmd_record_mcount) \
scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_c)' > \ scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_c)' > \
@ -266,6 +268,15 @@ define rule_cc_o_c
mv -f $(dot-target).tmp $(dot-target).cmd mv -f $(dot-target).tmp $(dot-target).cmd
endef endef
define rule_as_o_S
$(call echo-cmd,as_o_S) $(cmd_as_o_S); \
scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,as_o_S)' > \
$(dot-target).tmp; \
$(cmd_modversions_S) \
rm -f $(depfile); \
mv -f $(dot-target).tmp $(dot-target).cmd
endef
# Built-in and composite module parts # Built-in and composite module parts
$(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE $(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE
$(call cmd,force_checksrc) $(call cmd,force_checksrc)
@ -294,6 +305,38 @@ modkern_aflags := $(KBUILD_AFLAGS_KERNEL) $(AFLAGS_KERNEL)
$(real-objs-m) : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) $(real-objs-m) : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE)
$(real-objs-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) $(real-objs-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE)
# .S file exports must have their C prototypes defined in asm/asm-prototypes.h
# or a file that it includes, in order to get versioned symbols. We build a
# dummy C file that includes asm-prototypes and the EXPORT_SYMBOL lines from
# the .S file (with trailing ';'), and run genksyms on that, to extract vers.
#
# This is convoluted. The .S file must first be preprocessed to run guards and
# expand names, then the resulting exports must be constructed into plain
# EXPORT_SYMBOL(symbol); to build our dummy C file, and that gets preprocessed
# to make the genksyms input.
#
# These mirror gensymtypes_c and co above, keep them in synch.
cmd_gensymtypes_S = \
(echo "\#include <linux/kernel.h>" ; \
echo "\#include <asm/asm-prototypes.h>" ; \
$(CPP) $(a_flags) $< | \
grep "\<___EXPORT_SYMBOL\>" | \
sed 's/.*___EXPORT_SYMBOL[[:space:]]*\([a-zA-Z0-9_]*\)[[:space:]]*,.*/EXPORT_SYMBOL(\1);/' ) | \
$(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \
$(GENKSYMS) $(if $(1), -T $(2)) \
$(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \
$(if $(KBUILD_PRESERVE),-p) \
-r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
quiet_cmd_cc_symtypes_S = SYM $(quiet_modtag) $@
cmd_cc_symtypes_S = \
set -e; \
$(call cmd_gensymtypes_S,true,$@) >/dev/null; \
test -s $@ || rm -f $@
$(obj)/%.symtypes : $(src)/%.S FORCE
$(call cmd,cc_symtypes_S)
quiet_cmd_as_s_S = CPP $(quiet_modtag) $@ quiet_cmd_as_s_S = CPP $(quiet_modtag) $@
cmd_as_s_S = $(CPP) $(a_flags) -o $@ $< cmd_as_s_S = $(CPP) $(a_flags) -o $@ $<
@ -301,10 +344,40 @@ $(obj)/%.s: $(src)/%.S FORCE
$(call if_changed_dep,as_s_S) $(call if_changed_dep,as_s_S)
quiet_cmd_as_o_S = AS $(quiet_modtag) $@ quiet_cmd_as_o_S = AS $(quiet_modtag) $@
cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
ifndef CONFIG_MODVERSIONS
cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
else
ASM_PROTOTYPES := $(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/asm-prototypes.h)
ifeq ($(ASM_PROTOTYPES),)
cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
else
# versioning matches the C process described above, with difference that
# we parse asm-prototypes.h C header to get function definitions.
cmd_as_o_S = $(CC) $(a_flags) -c -o $(@D)/.tmp_$(@F) $<
cmd_modversions_S = \
if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \
$(call cmd_gensymtypes_S,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \
> $(@D)/.tmp_$(@F:.o=.ver); \
\
$(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \
-T $(@D)/.tmp_$(@F:.o=.ver); \
rm -f $(@D)/.tmp_$(@F) $(@D)/.tmp_$(@F:.o=.ver); \
else \
mv -f $(@D)/.tmp_$(@F) $@; \
fi;
endif
endif
$(obj)/%.o: $(src)/%.S FORCE $(obj)/%.o: $(src)/%.S FORCE
$(call if_changed_dep,as_o_S) $(call if_changed_rule,as_o_S)
targets += $(real-objs-y) $(real-objs-m) $(lib-y) targets += $(real-objs-y) $(real-objs-m) $(lib-y)
targets += $(extra-y) $(MAKECMDGOALS) $(always) targets += $(extra-y) $(MAKECMDGOALS) $(always)

View file

@ -582,7 +582,6 @@ static inline unsigned int muldiv32(unsigned int a, unsigned int b,
{ {
u_int64_t n = (u_int64_t) a * b; u_int64_t n = (u_int64_t) a * b;
if (c == 0) { if (c == 0) {
snd_BUG_ON(!n);
*r = 0; *r = 0;
return UINT_MAX; return UINT_MAX;
} }

View file

@ -408,6 +408,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
/*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/ /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
/* codec SSID */ /* codec SSID */
SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122),
SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),

View file

@ -5600,6 +5600,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),