BACKPORT: arm64: Introduce uaccess_{disable,enable} functionality based on TTBR0_EL1
This patch adds the uaccess macros/functions to disable access to user space by setting TTBR0_EL1 to a reserved zeroed page. Since the value written to TTBR0_EL1 must be a physical address, for simplicity this patch introduces a reserved_ttbr0 page at a constant offset from swapper_pg_dir. The uaccess_disable code uses the ttbr1_el1 value adjusted by the reserved_ttbr0 offset. Enabling access to user is done by restoring TTBR0_EL1 with the value from the struct thread_info ttbr0 variable. Interrupts must be disabled during the uaccess_ttbr0_enable code to ensure the atomicity of the thread_info.ttbr0 read and TTBR0_EL1 write. This patch also moves the get_thread_info asm macro from entry.S to assembler.h for reuse in the uaccess_ttbr0_* macros. Cc: Will Deacon <will.deacon@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Kees Cook <keescook@chromium.org> Cc: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Bug: 31432001 Change-Id: I54ada623160cb47f5762e0e39a5e84a75252dbfd (cherry picked from commit 4b65a5db362783ab4b04ca1c1d2ad70ed9b0ba2a) Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
This commit is contained in:
parent
d9c25e86cd
commit
8d2de42235
10 changed files with 146 additions and 13 deletions
|
@ -53,6 +53,15 @@
|
||||||
msr daifclr, #2
|
msr daifclr, #2
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
.macro save_and_disable_irq, flags
|
||||||
|
mrs \flags, daif
|
||||||
|
msr daifset, #2
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro restore_irq, flags
|
||||||
|
msr daif, \flags
|
||||||
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable and disable debug exceptions.
|
* Enable and disable debug exceptions.
|
||||||
*/
|
*/
|
||||||
|
@ -371,6 +380,13 @@ alternative_endif
|
||||||
movk \reg, :abs_g0_nc:\val
|
movk \reg, :abs_g0_nc:\val
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return the current thread_info.
|
||||||
|
*/
|
||||||
|
.macro get_thread_info, rd
|
||||||
|
mrs \rd, sp_el0
|
||||||
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Errata workaround post TTBR0_EL1 update.
|
* Errata workaround post TTBR0_EL1 update.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -189,6 +189,12 @@ static inline bool system_supports_mixed_endian_el0(void)
|
||||||
return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
|
return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool system_uses_ttbr0_pan(void)
|
||||||
|
{
|
||||||
|
return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
|
||||||
|
!cpus_have_cap(ARM64_HAS_PAN);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#ifndef __ASM_KERNEL_PGTABLE_H
|
#ifndef __ASM_KERNEL_PGTABLE_H
|
||||||
#define __ASM_KERNEL_PGTABLE_H
|
#define __ASM_KERNEL_PGTABLE_H
|
||||||
|
|
||||||
|
#include <asm/pgtable.h>
|
||||||
#include <asm/sparsemem.h>
|
#include <asm/sparsemem.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -54,6 +55,12 @@
|
||||||
#define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
|
#define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
|
||||||
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
|
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||||
|
#define RESERVED_TTBR0_SIZE (PAGE_SIZE)
|
||||||
|
#else
|
||||||
|
#define RESERVED_TTBR0_SIZE (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Initial memory map size */
|
/* Initial memory map size */
|
||||||
#if ARM64_SWAPPER_USES_SECTION_MAPS
|
#if ARM64_SWAPPER_USES_SECTION_MAPS
|
||||||
#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT
|
#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT
|
||||||
|
|
|
@ -48,6 +48,9 @@ struct thread_info {
|
||||||
unsigned long flags; /* low level flags */
|
unsigned long flags; /* low level flags */
|
||||||
mm_segment_t addr_limit; /* address limit */
|
mm_segment_t addr_limit; /* address limit */
|
||||||
struct task_struct *task; /* main task structure */
|
struct task_struct *task; /* main task structure */
|
||||||
|
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||||
|
u64 ttbr0; /* saved TTBR0_EL1 */
|
||||||
|
#endif
|
||||||
int preempt_count; /* 0 => preemptable, <0 => bug */
|
int preempt_count; /* 0 => preemptable, <0 => bug */
|
||||||
int cpu; /* cpu */
|
int cpu; /* cpu */
|
||||||
};
|
};
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#define __ASM_UACCESS_H
|
#define __ASM_UACCESS_H
|
||||||
|
|
||||||
#include <asm/alternative.h>
|
#include <asm/alternative.h>
|
||||||
|
#include <asm/kernel-pgtable.h>
|
||||||
#include <asm/sysreg.h>
|
#include <asm/sysreg.h>
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
@ -129,16 +130,71 @@ static inline void set_fs(mm_segment_t fs)
|
||||||
/*
|
/*
|
||||||
* User access enabling/disabling.
|
* User access enabling/disabling.
|
||||||
*/
|
*/
|
||||||
|
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||||
|
static inline void __uaccess_ttbr0_disable(void)
|
||||||
|
{
|
||||||
|
unsigned long ttbr;
|
||||||
|
|
||||||
|
/* reserved_ttbr0 placed at the end of swapper_pg_dir */
|
||||||
|
ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
|
||||||
|
write_sysreg(ttbr, ttbr0_el1);
|
||||||
|
isb();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __uaccess_ttbr0_enable(void)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Disable interrupts to avoid preemption between reading the 'ttbr0'
|
||||||
|
* variable and the MSR. A context switch could trigger an ASID
|
||||||
|
* roll-over and an update of 'ttbr0'.
|
||||||
|
*/
|
||||||
|
local_irq_save(flags);
|
||||||
|
write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
|
||||||
|
isb();
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool uaccess_ttbr0_disable(void)
|
||||||
|
{
|
||||||
|
if (!system_uses_ttbr0_pan())
|
||||||
|
return false;
|
||||||
|
__uaccess_ttbr0_disable();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool uaccess_ttbr0_enable(void)
|
||||||
|
{
|
||||||
|
if (!system_uses_ttbr0_pan())
|
||||||
|
return false;
|
||||||
|
__uaccess_ttbr0_enable();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline bool uaccess_ttbr0_disable(void)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool uaccess_ttbr0_enable(void)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#define __uaccess_disable(alt) \
|
#define __uaccess_disable(alt) \
|
||||||
do { \
|
do { \
|
||||||
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
|
if (!uaccess_ttbr0_disable()) \
|
||||||
CONFIG_ARM64_PAN)); \
|
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
|
||||||
|
CONFIG_ARM64_PAN)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define __uaccess_enable(alt) \
|
#define __uaccess_enable(alt) \
|
||||||
do { \
|
do { \
|
||||||
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
|
if (uaccess_ttbr0_enable()) \
|
||||||
CONFIG_ARM64_PAN)); \
|
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
|
||||||
|
CONFIG_ARM64_PAN)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
static inline void uaccess_disable(void)
|
static inline void uaccess_disable(void)
|
||||||
|
@ -369,16 +425,56 @@ extern __must_check long strnlen_user(const char __user *str, long n);
|
||||||
#include <asm/assembler.h>
|
#include <asm/assembler.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* User access enabling/disabling macros. These are no-ops when UAO is
|
* User access enabling/disabling macros.
|
||||||
* present.
|
*/
|
||||||
|
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||||
|
.macro __uaccess_ttbr0_disable, tmp1
|
||||||
|
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
|
||||||
|
add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
|
||||||
|
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
|
||||||
|
isb
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro __uaccess_ttbr0_enable, tmp1
|
||||||
|
get_thread_info \tmp1
|
||||||
|
ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
|
||||||
|
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
|
||||||
|
isb
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro uaccess_ttbr0_disable, tmp1
|
||||||
|
alternative_if_not ARM64_HAS_PAN
|
||||||
|
__uaccess_ttbr0_disable \tmp1
|
||||||
|
alternative_else_nop_endif
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro uaccess_ttbr0_enable, tmp1, tmp2
|
||||||
|
alternative_if_not ARM64_HAS_PAN
|
||||||
|
save_and_disable_irq \tmp2 // avoid preemption
|
||||||
|
__uaccess_ttbr0_enable \tmp1
|
||||||
|
restore_irq \tmp2
|
||||||
|
alternative_else_nop_endif
|
||||||
|
.endm
|
||||||
|
#else
|
||||||
|
.macro uaccess_ttbr0_disable, tmp1
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro uaccess_ttbr0_enable, tmp1, tmp2
|
||||||
|
.endm
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* These macros are no-ops when UAO is present.
|
||||||
*/
|
*/
|
||||||
.macro uaccess_disable_not_uao, tmp1
|
.macro uaccess_disable_not_uao, tmp1
|
||||||
|
uaccess_ttbr0_disable \tmp1
|
||||||
alternative_if ARM64_ALT_PAN_NOT_UAO
|
alternative_if ARM64_ALT_PAN_NOT_UAO
|
||||||
SET_PSTATE_PAN(1)
|
SET_PSTATE_PAN(1)
|
||||||
alternative_else_nop_endif
|
alternative_else_nop_endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro uaccess_enable_not_uao, tmp1, tmp2
|
.macro uaccess_enable_not_uao, tmp1, tmp2
|
||||||
|
uaccess_ttbr0_enable \tmp1, \tmp2
|
||||||
alternative_if ARM64_ALT_PAN_NOT_UAO
|
alternative_if ARM64_ALT_PAN_NOT_UAO
|
||||||
SET_PSTATE_PAN(0)
|
SET_PSTATE_PAN(0)
|
||||||
alternative_else_nop_endif
|
alternative_else_nop_endif
|
||||||
|
|
|
@ -40,6 +40,9 @@ int main(void)
|
||||||
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
|
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
|
||||||
DEFINE(TI_TASK, offsetof(struct thread_info, task));
|
DEFINE(TI_TASK, offsetof(struct thread_info, task));
|
||||||
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
|
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
|
||||||
|
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||||
|
DEFINE(TSK_TI_TTBR0, offsetof(struct thread_info, ttbr0));
|
||||||
|
#endif
|
||||||
BLANK();
|
BLANK();
|
||||||
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
|
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
|
||||||
BLANK();
|
BLANK();
|
||||||
|
|
|
@ -46,6 +46,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
|
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
|
||||||
|
EXPORT_SYMBOL(cpu_hwcaps);
|
||||||
|
|
||||||
#define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
|
#define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
|
||||||
{ \
|
{ \
|
||||||
|
|
|
@ -187,10 +187,6 @@ alternative_endif
|
||||||
eret // return to kernel
|
eret // return to kernel
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro get_thread_info, rd
|
|
||||||
mrs \rd, sp_el0
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro irq_stack_entry
|
.macro irq_stack_entry
|
||||||
mov x19, sp // preserve the original sp
|
mov x19, sp // preserve the original sp
|
||||||
|
|
||||||
|
|
|
@ -318,14 +318,14 @@ __create_page_tables:
|
||||||
* dirty cache lines being evicted.
|
* dirty cache lines being evicted.
|
||||||
*/
|
*/
|
||||||
mov x0, x25
|
mov x0, x25
|
||||||
add x1, x26, #SWAPPER_DIR_SIZE
|
add x1, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
|
||||||
bl __inval_cache_range
|
bl __inval_cache_range
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear the idmap and swapper page tables.
|
* Clear the idmap and swapper page tables.
|
||||||
*/
|
*/
|
||||||
mov x0, x25
|
mov x0, x25
|
||||||
add x6, x26, #SWAPPER_DIR_SIZE
|
add x6, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
|
||||||
1: stp xzr, xzr, [x0], #16
|
1: stp xzr, xzr, [x0], #16
|
||||||
stp xzr, xzr, [x0], #16
|
stp xzr, xzr, [x0], #16
|
||||||
stp xzr, xzr, [x0], #16
|
stp xzr, xzr, [x0], #16
|
||||||
|
@ -404,7 +404,7 @@ __create_page_tables:
|
||||||
* tables again to remove any speculatively loaded cache lines.
|
* tables again to remove any speculatively loaded cache lines.
|
||||||
*/
|
*/
|
||||||
mov x0, x25
|
mov x0, x25
|
||||||
add x1, x26, #SWAPPER_DIR_SIZE
|
add x1, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
|
||||||
dmb sy
|
dmb sy
|
||||||
bl __inval_cache_range
|
bl __inval_cache_range
|
||||||
|
|
||||||
|
|
|
@ -194,6 +194,11 @@ SECTIONS
|
||||||
swapper_pg_dir = .;
|
swapper_pg_dir = .;
|
||||||
. += SWAPPER_DIR_SIZE;
|
. += SWAPPER_DIR_SIZE;
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||||
|
reserved_ttbr0 = .;
|
||||||
|
. += RESERVED_TTBR0_SIZE;
|
||||||
|
#endif
|
||||||
|
|
||||||
_end = .;
|
_end = .;
|
||||||
|
|
||||||
STABS_DEBUG
|
STABS_DEBUG
|
||||||
|
|
Loading…
Add table
Reference in a new issue