This patch introduces our implementation of KAISER (Kernel Address Isolation to have Side-channels Efficiently Removed), a kernel isolation technique to close hardware side channels on kernel address information. More information about the patch can be found on: https://github.com/IAIK/KAISER From: Richard Fellner <richard.fellner@student.tugraz.at> From: Daniel Gruss <daniel.gruss@iaik.tugraz.at> X-Subject: [RFC, PATCH] x86_64: KAISER - do not map kernel in user mode Date: Thu, 4 May 2017 14:26:50 +0200 Link: http://marc.info/?l=linux-kernel&m=149390087310405&w=2 Kaiser-4.10-SHA1: c4b1831d44c6144d3762ccc72f0c4e71a0c713e5 To: <linux-kernel@vger.kernel.org> To: <kernel-hardening@lists.openwall.com> Cc: <clementine.maurice@iaik.tugraz.at> Cc: <moritz.lipp@iaik.tugraz.at> Cc: Michael Schwarz <michael.schwarz@iaik.tugraz.at> Cc: Richard Fellner <richard.fellner@student.tugraz.at> Cc: Ingo Molnar <mingo@kernel.org> Cc: <kirill.shutemov@linux.intel.com> Cc: <anders.fogh@gdata-adan.de> After several recent works [1,2,3] KASLR on x86_64 was basically considered dead by many researchers. We have been working on an efficient but effective fix for this problem and found that not mapping the kernel space when running in user mode is the solution to this problem [4] (the corresponding paper [5] will be presented at ESSoS17). With this RFC patch we allow anybody to configure their kernel with the flag CONFIG_KAISER to add our defense mechanism. If there are any questions we would love to answer them. We also appreciate any comments! Cheers, Daniel (+ the KAISER team from Graz University of Technology) [1] http://www.ieee-security.org/TC/SP2013/papers/4977a191.pdf [2] https://www.blackhat.com/docs/us-16/materials/us-16-Fogh-Using-Undocumented-CPU-Behaviour-To-See-Into-Kernel-Mode-And-Break-KASLR-In-The-Process.pdf [3] https://www.blackhat.com/docs/us-16/materials/us-16-Jang-Breaking-Kernel-Address-Space-Layout-Randomization-KASLR-With-Intel-TSX.pdf [4] https://github.com/IAIK/KAISER [5] https://gruss.cc/files/kaiser.pdf [patch based also on https://raw.githubusercontent.com/IAIK/KAISER/master/KAISER/0001-KAISER-Kernel-Address-Isolation.patch] Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at> Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at> Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at> Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at> Acked-by: Jiri Kosina <jkosina@suse.cz> Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
113 lines
3.1 KiB
C
113 lines
3.1 KiB
C
#ifndef _ASM_X86_KAISER_H
|
|
#define _ASM_X86_KAISER_H
|
|
|
|
/* This file includes the definitions for the KAISER feature.
|
|
* KAISER is a counter measure against x86_64 side channel attacks on the kernel virtual memory.
|
|
* It has a shodow-pgd for every process. the shadow-pgd has a minimalistic kernel-set mapped,
|
|
* but includes the whole user memory. Within a kernel context switch, or when an interrupt is handled,
|
|
* the pgd is switched to the normal one. When the system switches to user mode, the shadow pgd is enabled.
|
|
* By this, the virtual memory chaches are freed, and the user may not attack the whole kernel memory.
|
|
*
|
|
* A minimalistic kernel mapping holds the parts needed to be mapped in user mode, as the entry/exit functions
|
|
* of the user space, or the stacks.
|
|
*/
|
|
#ifdef __ASSEMBLY__
|
|
#ifdef CONFIG_KAISER
|
|
|
|
.macro _SWITCH_TO_KERNEL_CR3 reg
|
|
movq %cr3, \reg
|
|
andq $(~0x1000), \reg
|
|
movq \reg, %cr3
|
|
.endm
|
|
|
|
.macro _SWITCH_TO_USER_CR3 reg
|
|
movq %cr3, \reg
|
|
orq $(0x1000), \reg
|
|
movq \reg, %cr3
|
|
.endm
|
|
|
|
.macro SWITCH_KERNEL_CR3
|
|
pushq %rax
|
|
_SWITCH_TO_KERNEL_CR3 %rax
|
|
popq %rax
|
|
.endm
|
|
|
|
.macro SWITCH_USER_CR3
|
|
pushq %rax
|
|
_SWITCH_TO_USER_CR3 %rax
|
|
popq %rax
|
|
.endm
|
|
|
|
.macro SWITCH_KERNEL_CR3_NO_STACK
|
|
movq %rax, PER_CPU_VAR(unsafe_stack_register_backup)
|
|
_SWITCH_TO_KERNEL_CR3 %rax
|
|
movq PER_CPU_VAR(unsafe_stack_register_backup), %rax
|
|
.endm
|
|
|
|
|
|
.macro SWITCH_USER_CR3_NO_STACK
|
|
|
|
movq %rax, PER_CPU_VAR(unsafe_stack_register_backup)
|
|
_SWITCH_TO_USER_CR3 %rax
|
|
movq PER_CPU_VAR(unsafe_stack_register_backup), %rax
|
|
|
|
.endm
|
|
|
|
#else /* CONFIG_KAISER */
|
|
|
|
.macro SWITCH_KERNEL_CR3 reg
|
|
.endm
|
|
.macro SWITCH_USER_CR3 reg
|
|
.endm
|
|
.macro SWITCH_USER_CR3_NO_STACK
|
|
.endm
|
|
.macro SWITCH_KERNEL_CR3_NO_STACK
|
|
.endm
|
|
|
|
#endif /* CONFIG_KAISER */
|
|
#else /* __ASSEMBLY__ */
|
|
|
|
|
|
#ifdef CONFIG_KAISER
|
|
// Upon kernel/user mode switch, it may happen that
|
|
// the address space has to be switched before the registers have been stored.
|
|
// To change the address space, another register is needed.
|
|
// A register therefore has to be stored/restored.
|
|
//
|
|
DECLARE_PER_CPU_USER_MAPPED(unsigned long, unsafe_stack_register_backup);
|
|
|
|
#endif /* CONFIG_KAISER */
|
|
|
|
/**
|
|
* shadowmem_add_mapping - map a virtual memory part to the shadow mapping
|
|
* @addr: the start address of the range
|
|
* @size: the size of the range
|
|
* @flags: The mapping flags of the pages
|
|
*
|
|
* the mapping is done on a global scope, so no bigger synchronization has to be done.
|
|
* the pages have to be manually unmapped again when they are not needed any longer.
|
|
*/
|
|
extern void kaiser_add_mapping(unsigned long addr, unsigned long size, unsigned long flags);
|
|
|
|
|
|
/**
|
|
* shadowmem_remove_mapping - unmap a virtual memory part of the shadow mapping
|
|
* @addr: the start address of the range
|
|
* @size: the size of the range
|
|
*/
|
|
extern void kaiser_remove_mapping(unsigned long start, unsigned long size);
|
|
|
|
/**
|
|
* shadowmem_initialize_mapping - Initalize the shadow mapping
|
|
*
|
|
* most parts of the shadow mapping can be mapped upon boot time.
|
|
* only the thread stacks have to be mapped on runtime.
|
|
* the mapped regions are not unmapped at all.
|
|
*/
|
|
extern void kaiser_init(void);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* _ASM_X86_KAISER_H */
|