Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
Conflicts: arch/arm/Kconfig
This commit is contained in:
commit
b56111f481
38 changed files with 744 additions and 140 deletions
|
@ -423,6 +423,15 @@ config CC_STACKPROTECTOR_STRONG
|
|||
|
||||
endchoice
|
||||
|
||||
config HAVE_ARCH_WITHIN_STACK_FRAMES
|
||||
bool
|
||||
help
|
||||
An architecture should select this if it can walk the kernel stack
|
||||
frames to determine if an object is part of either the arguments
|
||||
or local variables (i.e. that it excludes saved return addresses,
|
||||
and similar) by implementing an inline arch_within_stack_frames(),
|
||||
which is used by CONFIG_HARDENED_USERCOPY.
|
||||
|
||||
config HAVE_CONTEXT_TRACKING
|
||||
bool
|
||||
help
|
||||
|
|
|
@ -36,6 +36,7 @@ config ARM
|
|||
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32
|
||||
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32
|
||||
select HAVE_ARCH_MMAP_RND_BITS if MMU
|
||||
select HAVE_ARCH_HARDENED_USERCOPY
|
||||
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_BPF_JIT
|
||||
|
|
|
@ -496,7 +496,10 @@ arm_copy_from_user(void *to, const void __user *from, unsigned long n);
|
|||
static inline unsigned long __must_check
|
||||
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned int __ua_flags = uaccess_save_and_enable();
|
||||
unsigned int __ua_flags;
|
||||
|
||||
check_object_size(to, n, false);
|
||||
__ua_flags = uaccess_save_and_enable();
|
||||
n = arm_copy_from_user(to, from, n);
|
||||
uaccess_restore(__ua_flags);
|
||||
return n;
|
||||
|
@ -511,11 +514,15 @@ static inline unsigned long __must_check
|
|||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
#ifndef CONFIG_UACCESS_WITH_MEMCPY
|
||||
unsigned int __ua_flags = uaccess_save_and_enable();
|
||||
unsigned int __ua_flags;
|
||||
|
||||
check_object_size(from, n, true);
|
||||
__ua_flags = uaccess_save_and_enable();
|
||||
n = arm_copy_to_user(to, from, n);
|
||||
uaccess_restore(__ua_flags);
|
||||
return n;
|
||||
#else
|
||||
check_object_size(from, n, true);
|
||||
return arm_copy_to_user(to, from, n);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@ config ARM64
|
|||
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_ARCH_BITREVERSE
|
||||
select HAVE_ARCH_HARDENED_USERCOPY
|
||||
select HAVE_ARCH_HUGE_VMAP
|
||||
select HAVE_ARCH_JUMP_LABEL
|
||||
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
|
||||
|
|
|
@ -269,24 +269,39 @@ do { \
|
|||
-EFAULT; \
|
||||
})
|
||||
|
||||
extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
|
||||
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
|
||||
extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
|
||||
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
check_object_size(to, n, false);
|
||||
return __arch_copy_from_user(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
check_object_size(from, n, true);
|
||||
return __arch_copy_to_user(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_READ, from, n))
|
||||
n = __copy_from_user(to, from, n);
|
||||
else /* security hole - plug it */
|
||||
if (access_ok(VERIFY_READ, from, n)) {
|
||||
check_object_size(to, n, false);
|
||||
n = __arch_copy_from_user(to, from, n);
|
||||
} else /* security hole - plug it */
|
||||
memset(to, 0, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, to, n))
|
||||
n = __copy_to_user(to, from, n);
|
||||
if (access_ok(VERIFY_WRITE, to, n)) {
|
||||
check_object_size(from, n, true);
|
||||
n = __arch_copy_to_user(to, from, n);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
|
|
|
@ -33,8 +33,8 @@ EXPORT_SYMBOL(copy_page);
|
|||
EXPORT_SYMBOL(clear_page);
|
||||
|
||||
/* user mem (segment) */
|
||||
EXPORT_SYMBOL(__copy_from_user);
|
||||
EXPORT_SYMBOL(__copy_to_user);
|
||||
EXPORT_SYMBOL(__arch_copy_from_user);
|
||||
EXPORT_SYMBOL(__arch_copy_to_user);
|
||||
EXPORT_SYMBOL(__clear_user);
|
||||
EXPORT_SYMBOL(__copy_in_user);
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@
|
|||
.endm
|
||||
|
||||
end .req x5
|
||||
ENTRY(__copy_from_user)
|
||||
ENTRY(__arch_copy_from_user)
|
||||
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
|
||||
CONFIG_ARM64_PAN)
|
||||
add end, x0, x2
|
||||
|
@ -75,7 +75,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
|
|||
CONFIG_ARM64_PAN)
|
||||
mov x0, #0 // Nothing to copy
|
||||
ret
|
||||
ENDPROC(__copy_from_user)
|
||||
ENDPROC(__arch_copy_from_user)
|
||||
|
||||
.section .fixup,"ax"
|
||||
.align 2
|
||||
|
|
|
@ -65,7 +65,7 @@
|
|||
.endm
|
||||
|
||||
end .req x5
|
||||
ENTRY(__copy_to_user)
|
||||
ENTRY(__arch_copy_to_user)
|
||||
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
|
||||
CONFIG_ARM64_PAN)
|
||||
add end, x0, x2
|
||||
|
@ -74,7 +74,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
|
|||
CONFIG_ARM64_PAN)
|
||||
mov x0, #0
|
||||
ret
|
||||
ENDPROC(__copy_to_user)
|
||||
ENDPROC(__arch_copy_to_user)
|
||||
|
||||
.section .fixup,"ax"
|
||||
.align 2
|
||||
|
|
|
@ -53,6 +53,7 @@ config IA64
|
|||
select MODULES_USE_ELF_RELA
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_ARCH_HARDENED_USERCOPY
|
||||
default y
|
||||
help
|
||||
The Itanium Processor Family is Intel's 64-bit successor to
|
||||
|
|
|
@ -241,12 +241,18 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
|
|||
static inline unsigned long
|
||||
__copy_to_user (void __user *to, const void *from, unsigned long count)
|
||||
{
|
||||
if (!__builtin_constant_p(count))
|
||||
check_object_size(from, count, true);
|
||||
|
||||
return __copy_user(to, (__force void __user *) from, count);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
__copy_from_user (void *to, const void __user *from, unsigned long count)
|
||||
{
|
||||
if (!__builtin_constant_p(count))
|
||||
check_object_size(to, count, false);
|
||||
|
||||
return __copy_user((__force void __user *) to, from, count);
|
||||
}
|
||||
|
||||
|
@ -258,8 +264,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
|
|||
const void *__cu_from = (from); \
|
||||
long __cu_len = (n); \
|
||||
\
|
||||
if (__access_ok(__cu_to, __cu_len, get_fs())) \
|
||||
__cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
|
||||
if (__access_ok(__cu_to, __cu_len, get_fs())) { \
|
||||
if (!__builtin_constant_p(n)) \
|
||||
check_object_size(__cu_from, __cu_len, true); \
|
||||
__cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
|
||||
} \
|
||||
__cu_len; \
|
||||
})
|
||||
|
||||
|
@ -270,8 +279,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
|
|||
long __cu_len = (n); \
|
||||
\
|
||||
__chk_user_ptr(__cu_from); \
|
||||
if (__access_ok(__cu_from, __cu_len, get_fs())) \
|
||||
if (__access_ok(__cu_from, __cu_len, get_fs())) { \
|
||||
if (!__builtin_constant_p(n)) \
|
||||
check_object_size(__cu_to, __cu_len, false); \
|
||||
__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
|
||||
} \
|
||||
__cu_len; \
|
||||
})
|
||||
|
||||
|
|
|
@ -160,6 +160,7 @@ config PPC
|
|||
select EDAC_ATOMIC_SCRUB
|
||||
select ARCH_HAS_DMA_SET_COHERENT_MASK
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_HARDENED_USERCOPY
|
||||
|
||||
config GENERIC_CSUM
|
||||
def_bool CPU_LITTLE_ENDIAN
|
||||
|
|
|
@ -325,10 +325,15 @@ static inline unsigned long copy_from_user(void *to,
|
|||
{
|
||||
unsigned long over;
|
||||
|
||||
if (access_ok(VERIFY_READ, from, n))
|
||||
if (access_ok(VERIFY_READ, from, n)) {
|
||||
if (!__builtin_constant_p(n))
|
||||
check_object_size(to, n, false);
|
||||
return __copy_tofrom_user((__force void __user *)to, from, n);
|
||||
}
|
||||
if ((unsigned long)from < TASK_SIZE) {
|
||||
over = (unsigned long)from + n - TASK_SIZE;
|
||||
if (!__builtin_constant_p(n - over))
|
||||
check_object_size(to, n - over, false);
|
||||
return __copy_tofrom_user((__force void __user *)to, from,
|
||||
n - over) + over;
|
||||
}
|
||||
|
@ -340,10 +345,15 @@ static inline unsigned long copy_to_user(void __user *to,
|
|||
{
|
||||
unsigned long over;
|
||||
|
||||
if (access_ok(VERIFY_WRITE, to, n))
|
||||
if (access_ok(VERIFY_WRITE, to, n)) {
|
||||
if (!__builtin_constant_p(n))
|
||||
check_object_size(from, n, true);
|
||||
return __copy_tofrom_user(to, (__force void __user *)from, n);
|
||||
}
|
||||
if ((unsigned long)to < TASK_SIZE) {
|
||||
over = (unsigned long)to + n - TASK_SIZE;
|
||||
if (!__builtin_constant_p(n))
|
||||
check_object_size(from, n - over, true);
|
||||
return __copy_tofrom_user(to, (__force void __user *)from,
|
||||
n - over) + over;
|
||||
}
|
||||
|
@ -387,6 +397,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
|
|||
if (ret == 0)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!__builtin_constant_p(n))
|
||||
check_object_size(to, n, false);
|
||||
|
||||
return __copy_tofrom_user((__force void __user *)to, from, n);
|
||||
}
|
||||
|
||||
|
@ -413,6 +427,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
|
|||
if (ret == 0)
|
||||
return 0;
|
||||
}
|
||||
if (!__builtin_constant_p(n))
|
||||
check_object_size(from, n, true);
|
||||
|
||||
return __copy_tofrom_user(to, (__force const void __user *)from, n);
|
||||
}
|
||||
|
||||
|
|
|
@ -117,6 +117,7 @@ config S390
|
|||
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_ARCH_EARLY_PFN_TO_NID
|
||||
select HAVE_ARCH_HARDENED_USERCOPY
|
||||
select HAVE_ARCH_JUMP_LABEL
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_SOFT_DIRTY
|
||||
|
|
|
@ -104,6 +104,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
|
|||
|
||||
unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
check_object_size(to, n, false);
|
||||
if (static_branch_likely(&have_mvcos))
|
||||
return copy_from_user_mvcos(to, from, n);
|
||||
return copy_from_user_mvcp(to, from, n);
|
||||
|
@ -177,6 +178,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
|
|||
|
||||
unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
check_object_size(from, n, true);
|
||||
if (static_branch_likely(&have_mvcos))
|
||||
return copy_to_user_mvcos(to, from, n);
|
||||
return copy_to_user_mvcs(to, from, n);
|
||||
|
|
|
@ -43,6 +43,7 @@ config SPARC
|
|||
select ODD_RT_SIGACTION
|
||||
select OLD_SIGSUSPEND
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select HAVE_ARCH_HARDENED_USERCOPY
|
||||
|
||||
config SPARC32
|
||||
def_bool !64BIT
|
||||
|
|
|
@ -313,22 +313,28 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
|
|||
|
||||
static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
if (n && __access_ok((unsigned long) to, n))
|
||||
if (n && __access_ok((unsigned long) to, n)) {
|
||||
if (!__builtin_constant_p(n))
|
||||
check_object_size(from, n, true);
|
||||
return __copy_user(to, (__force void __user *) from, n);
|
||||
else
|
||||
} else
|
||||
return n;
|
||||
}
|
||||
|
||||
static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
if (!__builtin_constant_p(n))
|
||||
check_object_size(from, n, true);
|
||||
return __copy_user(to, (__force void __user *) from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (n && __access_ok((unsigned long) from, n))
|
||||
if (n && __access_ok((unsigned long) from, n)) {
|
||||
if (!__builtin_constant_p(n))
|
||||
check_object_size(to, n, false);
|
||||
return __copy_user((__force void __user *) to, from, n);
|
||||
else
|
||||
} else
|
||||
return n;
|
||||
}
|
||||
|
||||
|
|
|
@ -250,8 +250,12 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
|
|||
static inline unsigned long __must_check
|
||||
copy_from_user(void *to, const void __user *from, unsigned long size)
|
||||
{
|
||||
unsigned long ret = ___copy_from_user(to, from, size);
|
||||
unsigned long ret;
|
||||
|
||||
if (!__builtin_constant_p(size))
|
||||
check_object_size(to, size, false);
|
||||
|
||||
ret = ___copy_from_user(to, from, size);
|
||||
if (unlikely(ret))
|
||||
ret = copy_from_user_fixup(to, from, size);
|
||||
|
||||
|
@ -267,8 +271,11 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
|
|||
static inline unsigned long __must_check
|
||||
copy_to_user(void __user *to, const void *from, unsigned long size)
|
||||
{
|
||||
unsigned long ret = ___copy_to_user(to, from, size);
|
||||
unsigned long ret;
|
||||
|
||||
if (!__builtin_constant_p(size))
|
||||
check_object_size(from, size, true);
|
||||
ret = ___copy_to_user(to, from, size);
|
||||
if (unlikely(ret))
|
||||
ret = copy_to_user_fixup(to, from, size);
|
||||
return ret;
|
||||
|
|
|
@ -79,6 +79,7 @@ config X86
|
|||
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
|
||||
select HAVE_AOUT if X86_32
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_ARCH_HARDENED_USERCOPY
|
||||
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
|
||||
select HAVE_ARCH_JUMP_LABEL
|
||||
select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
|
||||
|
@ -88,7 +89,7 @@ config X86
|
|||
select HAVE_ARCH_SOFT_DIRTY if X86_64
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
select HAVE_BPF_JIT if X86_64
|
||||
select HAVE_ARCH_WITHIN_STACK_FRAMES
|
||||
select HAVE_CC_STACKPROTECTOR
|
||||
select HAVE_CMPXCHG_DOUBLE
|
||||
select HAVE_CMPXCHG_LOCAL
|
||||
|
|
|
@ -177,6 +177,50 @@ static inline unsigned long current_stack_pointer(void)
|
|||
return sp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Walks up the stack frames to make sure that the specified object is
|
||||
* entirely contained by a single stack frame.
|
||||
*
|
||||
* Returns:
|
||||
* 1 if within a frame
|
||||
* -1 if placed across a frame boundary (or outside stack)
|
||||
* 0 unable to determine (no frame pointers, etc)
|
||||
*/
|
||||
static inline int arch_within_stack_frames(const void * const stack,
|
||||
const void * const stackend,
|
||||
const void *obj, unsigned long len)
|
||||
{
|
||||
#if defined(CONFIG_FRAME_POINTER)
|
||||
const void *frame = NULL;
|
||||
const void *oldframe;
|
||||
|
||||
oldframe = __builtin_frame_address(1);
|
||||
if (oldframe)
|
||||
frame = __builtin_frame_address(2);
|
||||
/*
|
||||
* low ----------------------------------------------> high
|
||||
* [saved bp][saved ip][args][local vars][saved bp][saved ip]
|
||||
* ^----------------^
|
||||
* allow copies only within here
|
||||
*/
|
||||
while (stack <= frame && frame < stackend) {
|
||||
/*
|
||||
* If obj + len extends past the last frame, this
|
||||
* check won't pass and the next frame will be 0,
|
||||
* causing us to bail out and correctly report
|
||||
* the copy as invalid.
|
||||
*/
|
||||
if (obj + len <= frame)
|
||||
return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1;
|
||||
oldframe = frame;
|
||||
frame = *(const void * const *)frame;
|
||||
}
|
||||
return -1;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#else /* !__ASSEMBLY__ */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
|
|
@ -134,6 +134,9 @@ extern int __get_user_4(void);
|
|||
extern int __get_user_8(void);
|
||||
extern int __get_user_bad(void);
|
||||
|
||||
#define __uaccess_begin() stac()
|
||||
#define __uaccess_end() clac()
|
||||
|
||||
/*
|
||||
* This is a type: either unsigned long, if the argument fits into
|
||||
* that type, or otherwise unsigned long long.
|
||||
|
@ -193,10 +196,10 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
|
|||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define __put_user_asm_u64(x, addr, err, errret) \
|
||||
asm volatile(ASM_STAC "\n" \
|
||||
asm volatile("\n" \
|
||||
"1: movl %%eax,0(%2)\n" \
|
||||
"2: movl %%edx,4(%2)\n" \
|
||||
"3: " ASM_CLAC "\n" \
|
||||
"3:" \
|
||||
".section .fixup,\"ax\"\n" \
|
||||
"4: movl %3,%0\n" \
|
||||
" jmp 3b\n" \
|
||||
|
@ -207,10 +210,10 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
|
|||
: "A" (x), "r" (addr), "i" (errret), "0" (err))
|
||||
|
||||
#define __put_user_asm_ex_u64(x, addr) \
|
||||
asm volatile(ASM_STAC "\n" \
|
||||
asm volatile("\n" \
|
||||
"1: movl %%eax,0(%1)\n" \
|
||||
"2: movl %%edx,4(%1)\n" \
|
||||
"3: " ASM_CLAC "\n" \
|
||||
"3:" \
|
||||
_ASM_EXTABLE_EX(1b, 2b) \
|
||||
_ASM_EXTABLE_EX(2b, 3b) \
|
||||
: : "A" (x), "r" (addr))
|
||||
|
@ -304,6 +307,10 @@ do { \
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* This doesn't do __uaccess_begin/end - the exception handling
|
||||
* around it must do that.
|
||||
*/
|
||||
#define __put_user_size_ex(x, ptr, size) \
|
||||
do { \
|
||||
__chk_user_ptr(ptr); \
|
||||
|
@ -358,9 +365,9 @@ do { \
|
|||
} while (0)
|
||||
|
||||
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
|
||||
asm volatile(ASM_STAC "\n" \
|
||||
asm volatile("\n" \
|
||||
"1: mov"itype" %2,%"rtype"1\n" \
|
||||
"2: " ASM_CLAC "\n" \
|
||||
"2:\n" \
|
||||
".section .fixup,\"ax\"\n" \
|
||||
"3: mov %3,%0\n" \
|
||||
" xor"itype" %"rtype"1,%"rtype"1\n" \
|
||||
|
@ -370,6 +377,10 @@ do { \
|
|||
: "=r" (err), ltype(x) \
|
||||
: "m" (__m(addr)), "i" (errret), "0" (err))
|
||||
|
||||
/*
|
||||
* This doesn't do __uaccess_begin/end - the exception handling
|
||||
* around it must do that.
|
||||
*/
|
||||
#define __get_user_size_ex(x, ptr, size) \
|
||||
do { \
|
||||
__chk_user_ptr(ptr); \
|
||||
|
@ -400,7 +411,9 @@ do { \
|
|||
#define __put_user_nocheck(x, ptr, size) \
|
||||
({ \
|
||||
int __pu_err; \
|
||||
__uaccess_begin(); \
|
||||
__put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
|
||||
__uaccess_end(); \
|
||||
__builtin_expect(__pu_err, 0); \
|
||||
})
|
||||
|
||||
|
@ -408,7 +421,9 @@ do { \
|
|||
({ \
|
||||
int __gu_err; \
|
||||
unsigned long __gu_val; \
|
||||
__uaccess_begin(); \
|
||||
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
|
||||
__uaccess_end(); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__builtin_expect(__gu_err, 0); \
|
||||
})
|
||||
|
@ -423,9 +438,9 @@ struct __large_struct { unsigned long buf[100]; };
|
|||
* aliasing issues.
|
||||
*/
|
||||
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
|
||||
asm volatile(ASM_STAC "\n" \
|
||||
asm volatile("\n" \
|
||||
"1: mov"itype" %"rtype"1,%2\n" \
|
||||
"2: " ASM_CLAC "\n" \
|
||||
"2:\n" \
|
||||
".section .fixup,\"ax\"\n" \
|
||||
"3: mov %3,%0\n" \
|
||||
" jmp 2b\n" \
|
||||
|
@ -445,11 +460,11 @@ struct __large_struct { unsigned long buf[100]; };
|
|||
*/
|
||||
#define uaccess_try do { \
|
||||
current_thread_info()->uaccess_err = 0; \
|
||||
stac(); \
|
||||
__uaccess_begin(); \
|
||||
barrier();
|
||||
|
||||
#define uaccess_catch(err) \
|
||||
clac(); \
|
||||
__uaccess_end(); \
|
||||
(err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
|
||||
} while (0)
|
||||
|
||||
|
@ -547,12 +562,13 @@ extern void __cmpxchg_wrong_size(void)
|
|||
__typeof__(ptr) __uval = (uval); \
|
||||
__typeof__(*(ptr)) __old = (old); \
|
||||
__typeof__(*(ptr)) __new = (new); \
|
||||
__uaccess_begin(); \
|
||||
switch (size) { \
|
||||
case 1: \
|
||||
{ \
|
||||
asm volatile("\t" ASM_STAC "\n" \
|
||||
asm volatile("\n" \
|
||||
"1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
|
||||
"2:\t" ASM_CLAC "\n" \
|
||||
"2:\n" \
|
||||
"\t.section .fixup, \"ax\"\n" \
|
||||
"3:\tmov %3, %0\n" \
|
||||
"\tjmp 2b\n" \
|
||||
|
@ -566,9 +582,9 @@ extern void __cmpxchg_wrong_size(void)
|
|||
} \
|
||||
case 2: \
|
||||
{ \
|
||||
asm volatile("\t" ASM_STAC "\n" \
|
||||
asm volatile("\n" \
|
||||
"1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
|
||||
"2:\t" ASM_CLAC "\n" \
|
||||
"2:\n" \
|
||||
"\t.section .fixup, \"ax\"\n" \
|
||||
"3:\tmov %3, %0\n" \
|
||||
"\tjmp 2b\n" \
|
||||
|
@ -582,9 +598,9 @@ extern void __cmpxchg_wrong_size(void)
|
|||
} \
|
||||
case 4: \
|
||||
{ \
|
||||
asm volatile("\t" ASM_STAC "\n" \
|
||||
asm volatile("\n" \
|
||||
"1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
|
||||
"2:\t" ASM_CLAC "\n" \
|
||||
"2:\n" \
|
||||
"\t.section .fixup, \"ax\"\n" \
|
||||
"3:\tmov %3, %0\n" \
|
||||
"\tjmp 2b\n" \
|
||||
|
@ -601,9 +617,9 @@ extern void __cmpxchg_wrong_size(void)
|
|||
if (!IS_ENABLED(CONFIG_X86_64)) \
|
||||
__cmpxchg_wrong_size(); \
|
||||
\
|
||||
asm volatile("\t" ASM_STAC "\n" \
|
||||
asm volatile("\n" \
|
||||
"1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
|
||||
"2:\t" ASM_CLAC "\n" \
|
||||
"2:\n" \
|
||||
"\t.section .fixup, \"ax\"\n" \
|
||||
"3:\tmov %3, %0\n" \
|
||||
"\tjmp 2b\n" \
|
||||
|
@ -618,6 +634,7 @@ extern void __cmpxchg_wrong_size(void)
|
|||
default: \
|
||||
__cmpxchg_wrong_size(); \
|
||||
} \
|
||||
__uaccess_end(); \
|
||||
*__uval = __old; \
|
||||
__ret; \
|
||||
})
|
||||
|
@ -714,9 +731,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
|
|||
* case, and do only runtime checking for non-constant sizes.
|
||||
*/
|
||||
|
||||
if (likely(sz < 0 || sz >= n))
|
||||
if (likely(sz < 0 || sz >= n)) {
|
||||
check_object_size(to, n, false);
|
||||
n = _copy_from_user(to, from, n);
|
||||
else if(__builtin_constant_p(n))
|
||||
} else if (__builtin_constant_p(n))
|
||||
copy_from_user_overflow();
|
||||
else
|
||||
__copy_from_user_overflow(sz, n);
|
||||
|
@ -732,9 +750,10 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
|
|||
might_fault();
|
||||
|
||||
/* See the comment in copy_from_user() above. */
|
||||
if (likely(sz < 0 || sz >= n))
|
||||
if (likely(sz < 0 || sz >= n)) {
|
||||
check_object_size(from, n, true);
|
||||
n = _copy_to_user(to, from, n);
|
||||
else if(__builtin_constant_p(n))
|
||||
} else if (__builtin_constant_p(n))
|
||||
copy_to_user_overflow();
|
||||
else
|
||||
__copy_to_user_overflow(sz, n);
|
||||
|
@ -745,5 +764,30 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
|
|||
#undef __copy_from_user_overflow
|
||||
#undef __copy_to_user_overflow
|
||||
|
||||
/*
|
||||
* The "unsafe" user accesses aren't really "unsafe", but the naming
|
||||
* is a big fat warning: you have to not only do the access_ok()
|
||||
* checking before using them, but you have to surround them with the
|
||||
* user_access_begin/end() pair.
|
||||
*/
|
||||
#define user_access_begin() __uaccess_begin()
|
||||
#define user_access_end() __uaccess_end()
|
||||
|
||||
#define unsafe_put_user(x, ptr) \
|
||||
({ \
|
||||
int __pu_err; \
|
||||
__put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
|
||||
__builtin_expect(__pu_err, 0); \
|
||||
})
|
||||
|
||||
#define unsafe_get_user(x, ptr) \
|
||||
({ \
|
||||
int __gu_err; \
|
||||
unsigned long __gu_val; \
|
||||
__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__builtin_expect(__gu_err, 0); \
|
||||
})
|
||||
|
||||
#endif /* _ASM_X86_UACCESS_H */
|
||||
|
||||
|
|
|
@ -33,38 +33,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
|
|||
* the specified block with access_ok() before calling this function.
|
||||
* The caller should also make sure he pins the user space address
|
||||
* so that we don't result in page fault and sleep.
|
||||
*
|
||||
* Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
|
||||
* we return the initial request size (1, 2 or 4), as copy_*_user should do.
|
||||
* If a store crosses a page boundary and gets a fault, the x86 will not write
|
||||
* anything, so this is accurate.
|
||||
*/
|
||||
|
||||
static __always_inline unsigned long __must_check
|
||||
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
if (__builtin_constant_p(n)) {
|
||||
unsigned long ret;
|
||||
|
||||
switch (n) {
|
||||
case 1:
|
||||
__put_user_size(*(u8 *)from, (u8 __user *)to,
|
||||
1, ret, 1);
|
||||
return ret;
|
||||
case 2:
|
||||
__put_user_size(*(u16 *)from, (u16 __user *)to,
|
||||
2, ret, 2);
|
||||
return ret;
|
||||
case 4:
|
||||
__put_user_size(*(u32 *)from, (u32 __user *)to,
|
||||
4, ret, 4);
|
||||
return ret;
|
||||
case 8:
|
||||
__put_user_size(*(u64 *)from, (u64 __user *)to,
|
||||
8, ret, 8);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
check_object_size(from, n, true);
|
||||
return __copy_to_user_ll(to, from, n);
|
||||
}
|
||||
|
||||
|
@ -93,26 +66,6 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
|
|||
static __always_inline unsigned long
|
||||
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
/* Avoid zeroing the tail if the copy fails..
|
||||
* If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
|
||||
* but as the zeroing behaviour is only significant when n is not
|
||||
* constant, that shouldn't be a problem.
|
||||
*/
|
||||
if (__builtin_constant_p(n)) {
|
||||
unsigned long ret;
|
||||
|
||||
switch (n) {
|
||||
case 1:
|
||||
__get_user_size(*(u8 *)to, from, 1, ret, 1);
|
||||
return ret;
|
||||
case 2:
|
||||
__get_user_size(*(u16 *)to, from, 2, ret, 2);
|
||||
return ret;
|
||||
case 4:
|
||||
__get_user_size(*(u32 *)to, from, 4, ret, 4);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return __copy_from_user_ll_nozero(to, from, n);
|
||||
}
|
||||
|
||||
|
@ -143,18 +96,25 @@ static __always_inline unsigned long
|
|||
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
check_object_size(to, n, false);
|
||||
if (__builtin_constant_p(n)) {
|
||||
unsigned long ret;
|
||||
|
||||
switch (n) {
|
||||
case 1:
|
||||
__uaccess_begin();
|
||||
__get_user_size(*(u8 *)to, from, 1, ret, 1);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 2:
|
||||
__uaccess_begin();
|
||||
__get_user_size(*(u16 *)to, from, 2, ret, 2);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 4:
|
||||
__uaccess_begin();
|
||||
__get_user_size(*(u32 *)to, from, 4, ret, 4);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -170,13 +130,19 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
|
|||
|
||||
switch (n) {
|
||||
case 1:
|
||||
__uaccess_begin();
|
||||
__get_user_size(*(u8 *)to, from, 1, ret, 1);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 2:
|
||||
__uaccess_begin();
|
||||
__get_user_size(*(u16 *)to, from, 2, ret, 2);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 4:
|
||||
__uaccess_begin();
|
||||
__get_user_size(*(u32 *)to, from, 4, ret, 4);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,38 +53,53 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
check_object_size(dst, size, false);
|
||||
if (!__builtin_constant_p(size))
|
||||
return copy_user_generic(dst, (__force void *)src, size);
|
||||
switch (size) {
|
||||
case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
|
||||
case 1:
|
||||
__uaccess_begin();
|
||||
__get_user_asm(*(u8 *)dst, (u8 __user *)src,
|
||||
ret, "b", "b", "=q", 1);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
|
||||
case 2:
|
||||
__uaccess_begin();
|
||||
__get_user_asm(*(u16 *)dst, (u16 __user *)src,
|
||||
ret, "w", "w", "=r", 2);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
|
||||
case 4:
|
||||
__uaccess_begin();
|
||||
__get_user_asm(*(u32 *)dst, (u32 __user *)src,
|
||||
ret, "l", "k", "=r", 4);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
|
||||
case 8:
|
||||
__uaccess_begin();
|
||||
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
|
||||
ret, "q", "", "=r", 8);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 10:
|
||||
__uaccess_begin();
|
||||
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
|
||||
ret, "q", "", "=r", 10);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
__get_user_asm(*(u16 *)(8 + (char *)dst),
|
||||
(u16 __user *)(8 + (char __user *)src),
|
||||
ret, "w", "w", "=r", 2);
|
||||
if (likely(!ret))
|
||||
__get_user_asm(*(u16 *)(8 + (char *)dst),
|
||||
(u16 __user *)(8 + (char __user *)src),
|
||||
ret, "w", "w", "=r", 2);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 16:
|
||||
__uaccess_begin();
|
||||
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
|
||||
ret, "q", "", "=r", 16);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
__get_user_asm(*(u64 *)(8 + (char *)dst),
|
||||
(u64 __user *)(8 + (char __user *)src),
|
||||
ret, "q", "", "=r", 8);
|
||||
if (likely(!ret))
|
||||
__get_user_asm(*(u64 *)(8 + (char *)dst),
|
||||
(u64 __user *)(8 + (char __user *)src),
|
||||
ret, "q", "", "=r", 8);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
default:
|
||||
return copy_user_generic(dst, (__force void *)src, size);
|
||||
|
@ -103,38 +118,55 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
check_object_size(src, size, true);
|
||||
if (!__builtin_constant_p(size))
|
||||
return copy_user_generic((__force void *)dst, src, size);
|
||||
switch (size) {
|
||||
case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
|
||||
case 1:
|
||||
__uaccess_begin();
|
||||
__put_user_asm(*(u8 *)src, (u8 __user *)dst,
|
||||
ret, "b", "b", "iq", 1);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
|
||||
case 2:
|
||||
__uaccess_begin();
|
||||
__put_user_asm(*(u16 *)src, (u16 __user *)dst,
|
||||
ret, "w", "w", "ir", 2);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
|
||||
case 4:
|
||||
__uaccess_begin();
|
||||
__put_user_asm(*(u32 *)src, (u32 __user *)dst,
|
||||
ret, "l", "k", "ir", 4);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
||||
case 8:
|
||||
__uaccess_begin();
|
||||
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
||||
ret, "q", "", "er", 8);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 10:
|
||||
__uaccess_begin();
|
||||
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
||||
ret, "q", "", "er", 10);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
asm("":::"memory");
|
||||
__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
|
||||
ret, "w", "w", "ir", 2);
|
||||
if (likely(!ret)) {
|
||||
asm("":::"memory");
|
||||
__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
|
||||
ret, "w", "w", "ir", 2);
|
||||
}
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
case 16:
|
||||
__uaccess_begin();
|
||||
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
||||
ret, "q", "", "er", 16);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
asm("":::"memory");
|
||||
__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
|
||||
ret, "q", "", "er", 8);
|
||||
if (likely(!ret)) {
|
||||
asm("":::"memory");
|
||||
__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
|
||||
ret, "q", "", "er", 8);
|
||||
}
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
default:
|
||||
return copy_user_generic((__force void *)dst, src, size);
|
||||
|
@ -160,39 +192,47 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
|
|||
switch (size) {
|
||||
case 1: {
|
||||
u8 tmp;
|
||||
__uaccess_begin();
|
||||
__get_user_asm(tmp, (u8 __user *)src,
|
||||
ret, "b", "b", "=q", 1);
|
||||
if (likely(!ret))
|
||||
__put_user_asm(tmp, (u8 __user *)dst,
|
||||
ret, "b", "b", "iq", 1);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
case 2: {
|
||||
u16 tmp;
|
||||
__uaccess_begin();
|
||||
__get_user_asm(tmp, (u16 __user *)src,
|
||||
ret, "w", "w", "=r", 2);
|
||||
if (likely(!ret))
|
||||
__put_user_asm(tmp, (u16 __user *)dst,
|
||||
ret, "w", "w", "ir", 2);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
|
||||
case 4: {
|
||||
u32 tmp;
|
||||
__uaccess_begin();
|
||||
__get_user_asm(tmp, (u32 __user *)src,
|
||||
ret, "l", "k", "=r", 4);
|
||||
if (likely(!ret))
|
||||
__put_user_asm(tmp, (u32 __user *)dst,
|
||||
ret, "l", "k", "ir", 4);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
case 8: {
|
||||
u64 tmp;
|
||||
__uaccess_begin();
|
||||
__get_user_asm(tmp, (u64 __user *)src,
|
||||
ret, "q", "", "=r", 8);
|
||||
if (likely(!ret))
|
||||
__put_user_asm(tmp, (u64 __user *)dst,
|
||||
ret, "q", "", "er", 8);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
default:
|
||||
|
|
|
@ -513,9 +513,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
|
|||
return ret;
|
||||
|
||||
if (r->presumed_offset != offset &&
|
||||
__copy_to_user_inatomic(&user_relocs->presumed_offset,
|
||||
&r->presumed_offset,
|
||||
sizeof(r->presumed_offset))) {
|
||||
__put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
|
|
@ -65,8 +65,10 @@ enum {
|
|||
|
||||
#ifdef CONFIG_CMA
|
||||
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
|
||||
# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
|
||||
#else
|
||||
# define is_migrate_cma(migratetype) false
|
||||
# define is_migrate_cma_page(_page) false
|
||||
#endif
|
||||
|
||||
#define for_each_migratetype_order(order, type) \
|
||||
|
|
|
@ -144,6 +144,18 @@ void kfree(const void *);
|
|||
void kzfree(const void *);
|
||||
size_t ksize(const void *);
|
||||
|
||||
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
|
||||
const char *__check_heap_object(const void *ptr, unsigned long n,
|
||||
struct page *page);
|
||||
#else
|
||||
static inline const char *__check_heap_object(const void *ptr,
|
||||
unsigned long n,
|
||||
struct page *page)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
|
||||
* alignment larger than the alignment of a 64-bit integer.
|
||||
|
|
|
@ -145,6 +145,30 @@ static inline bool test_and_clear_restore_sigmask(void)
|
|||
#error "no set_restore_sigmask() provided and default one won't work"
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
|
||||
static inline int arch_within_stack_frames(const void * const stack,
|
||||
const void * const stackend,
|
||||
const void *obj, unsigned long len)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HARDENED_USERCOPY
|
||||
extern void __check_object_size(const void *ptr, unsigned long n,
|
||||
bool to_user);
|
||||
|
||||
static inline void check_object_size(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
{
|
||||
__check_object_size(ptr, n, to_user);
|
||||
}
|
||||
#else
|
||||
static inline void check_object_size(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
{ }
|
||||
#endif /* CONFIG_HARDENED_USERCOPY */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _LINUX_THREAD_INFO_H */
|
||||
|
|
|
@ -111,4 +111,11 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
|
|||
#define probe_kernel_address(addr, retval) \
|
||||
probe_kernel_read(&retval, addr, sizeof(retval))
|
||||
|
||||
#ifndef user_access_begin
|
||||
#define user_access_begin() do { } while (0)
|
||||
#define user_access_end() do { } while (0)
|
||||
#define unsafe_get_user(x, ptr) __get_user(x, ptr)
|
||||
#define unsafe_put_user(x, ptr) __put_user(x, ptr)
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_UACCESS_H__ */
|
||||
|
|
|
@ -1719,6 +1719,7 @@ choice
|
|||
|
||||
config SLAB
|
||||
bool "SLAB"
|
||||
select HAVE_HARDENED_USERCOPY_ALLOCATOR
|
||||
help
|
||||
The regular slab allocator that is established and known to work
|
||||
well in all environments. It organizes cache hot objects in
|
||||
|
@ -1726,6 +1727,7 @@ config SLAB
|
|||
|
||||
config SLUB
|
||||
bool "SLUB (Unqueued Allocator)"
|
||||
select HAVE_HARDENED_USERCOPY_ALLOCATOR
|
||||
help
|
||||
SLUB is a slab allocator that minimizes cache line usage
|
||||
instead of managing queues of cached objects (SLAB approach).
|
||||
|
|
|
@ -1692,8 +1692,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
|
|||
int result;
|
||||
|
||||
pagefault_disable();
|
||||
result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
|
||||
sizeof(opcode));
|
||||
result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
|
||||
pagefault_enable();
|
||||
|
||||
if (likely(result == 0))
|
||||
|
|
|
@ -681,7 +681,7 @@ static int get_futex_value_locked(u32 *dest, u32 __user *from)
|
|||
int ret;
|
||||
|
||||
pagefault_disable();
|
||||
ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
|
||||
ret = __get_user(*dest, from);
|
||||
pagefault_enable();
|
||||
|
||||
return ret ? -EFAULT : 0;
|
||||
|
|
|
@ -39,7 +39,7 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
|
|||
unsigned long c, data;
|
||||
|
||||
/* Fall back to byte-at-a-time if we get a page fault */
|
||||
if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
|
||||
if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
|
||||
break;
|
||||
*(unsigned long *)(dst+res) = c;
|
||||
if (has_zero(c, &data, &constants)) {
|
||||
|
@ -55,7 +55,7 @@ byte_at_a_time:
|
|||
while (max) {
|
||||
char c;
|
||||
|
||||
if (unlikely(__get_user(c,src+res)))
|
||||
if (unlikely(unsafe_get_user(c,src+res)))
|
||||
return -EFAULT;
|
||||
dst[res] = c;
|
||||
if (!c)
|
||||
|
@ -107,7 +107,12 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
|
|||
src_addr = (unsigned long)src;
|
||||
if (likely(src_addr < max_addr)) {
|
||||
unsigned long max = max_addr - src_addr;
|
||||
return do_strncpy_from_user(dst, src, count, max);
|
||||
long retval;
|
||||
|
||||
user_access_begin();
|
||||
retval = do_strncpy_from_user(dst, src, count, max);
|
||||
user_access_end();
|
||||
return retval;
|
||||
}
|
||||
return -EFAULT;
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
|
|||
src -= align;
|
||||
max += align;
|
||||
|
||||
if (unlikely(__get_user(c,(unsigned long __user *)src)))
|
||||
if (unlikely(unsafe_get_user(c,(unsigned long __user *)src)))
|
||||
return 0;
|
||||
c |= aligned_byte_mask(align);
|
||||
|
||||
|
@ -61,7 +61,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
|
|||
if (unlikely(max <= sizeof(unsigned long)))
|
||||
break;
|
||||
max -= sizeof(unsigned long);
|
||||
if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
|
||||
if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
|
||||
return 0;
|
||||
}
|
||||
res -= align;
|
||||
|
@ -112,7 +112,12 @@ long strnlen_user(const char __user *str, long count)
|
|||
src_addr = (unsigned long)str;
|
||||
if (likely(src_addr < max_addr)) {
|
||||
unsigned long max = max_addr - src_addr;
|
||||
return do_strnlen_user(str, count, max);
|
||||
long retval;
|
||||
|
||||
user_access_begin();
|
||||
retval = do_strnlen_user(str, count, max);
|
||||
user_access_end();
|
||||
return retval;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -141,7 +146,12 @@ long strlen_user(const char __user *str)
|
|||
src_addr = (unsigned long)str;
|
||||
if (likely(src_addr < max_addr)) {
|
||||
unsigned long max = max_addr - src_addr;
|
||||
return do_strnlen_user(str, ~0ul, max);
|
||||
long retval;
|
||||
|
||||
user_access_begin();
|
||||
retval = do_strnlen_user(str, ~0ul, max);
|
||||
user_access_end();
|
||||
return retval;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -5,6 +5,9 @@
|
|||
KASAN_SANITIZE_slab_common.o := n
|
||||
KASAN_SANITIZE_slub.o := n
|
||||
|
||||
# Since __builtin_frame_address does work as used, disable the warning.
|
||||
CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
|
||||
|
||||
mmu-y := nommu.o
|
||||
mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
|
||||
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
|
||||
|
@ -81,3 +84,4 @@ obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o
|
|||
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
|
||||
obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
|
||||
obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
|
||||
obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
|
||||
|
|
|
@ -96,8 +96,7 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
|
|||
pagefault_disable();
|
||||
|
||||
do {
|
||||
ret = __copy_from_user_inatomic(dst++,
|
||||
(const void __user __force *)src++, 1);
|
||||
ret = __get_user(*dst++, (const char __user __force *)src++);
|
||||
} while (dst[-1] && ret == 0 && src - unsafe_addr < count);
|
||||
|
||||
dst[-1] = '\0';
|
||||
|
|
30
mm/slab.c
30
mm/slab.c
|
@ -4228,6 +4228,36 @@ static int __init slab_proc_init(void)
|
|||
module_init(slab_proc_init);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HARDENED_USERCOPY
|
||||
/*
|
||||
* Rejects objects that are incorrectly sized.
|
||||
*
|
||||
* Returns NULL if check passes, otherwise const char * to name of cache
|
||||
* to indicate an error.
|
||||
*/
|
||||
const char *__check_heap_object(const void *ptr, unsigned long n,
|
||||
struct page *page)
|
||||
{
|
||||
struct kmem_cache *cachep;
|
||||
unsigned int objnr;
|
||||
unsigned long offset;
|
||||
|
||||
/* Find and validate object. */
|
||||
cachep = page->slab_cache;
|
||||
objnr = obj_to_index(cachep, page, (void *)ptr);
|
||||
BUG_ON(objnr >= cachep->num);
|
||||
|
||||
/* Find offset within object. */
|
||||
offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
|
||||
|
||||
/* Allow address range falling entirely within object size. */
|
||||
if (offset <= cachep->object_size && n <= cachep->object_size - offset)
|
||||
return NULL;
|
||||
|
||||
return cachep->name;
|
||||
}
|
||||
#endif /* CONFIG_HARDENED_USERCOPY */
|
||||
|
||||
/**
|
||||
* ksize - get the actual amount of memory allocated for a given object
|
||||
* @objp: Pointer to the object
|
||||
|
|
40
mm/slub.c
40
mm/slub.c
|
@ -3585,6 +3585,46 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
|||
EXPORT_SYMBOL(__kmalloc_node);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HARDENED_USERCOPY
|
||||
/*
|
||||
* Rejects objects that are incorrectly sized.
|
||||
*
|
||||
* Returns NULL if check passes, otherwise const char * to name of cache
|
||||
* to indicate an error.
|
||||
*/
|
||||
const char *__check_heap_object(const void *ptr, unsigned long n,
|
||||
struct page *page)
|
||||
{
|
||||
struct kmem_cache *s;
|
||||
unsigned long offset;
|
||||
size_t object_size;
|
||||
|
||||
/* Find object and usable object size. */
|
||||
s = page->slab_cache;
|
||||
object_size = slab_ksize(s);
|
||||
|
||||
/* Reject impossible pointers. */
|
||||
if (ptr < page_address(page))
|
||||
return s->name;
|
||||
|
||||
/* Find offset within object. */
|
||||
offset = (ptr - page_address(page)) % s->size;
|
||||
|
||||
/* Adjust for redzone and reject if within the redzone. */
|
||||
if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
|
||||
if (offset < s->red_left_pad)
|
||||
return s->name;
|
||||
offset -= s->red_left_pad;
|
||||
}
|
||||
|
||||
/* Allow address range falling entirely within object size. */
|
||||
if (offset <= object_size && n <= object_size - offset)
|
||||
return NULL;
|
||||
|
||||
return s->name;
|
||||
}
|
||||
#endif /* CONFIG_HARDENED_USERCOPY */
|
||||
|
||||
static size_t __ksize(const void *object)
|
||||
{
|
||||
struct page *page;
|
||||
|
|
268
mm/usercopy.c
Normal file
268
mm/usercopy.c
Normal file
|
@ -0,0 +1,268 @@
|
|||
/*
|
||||
* This implements the various checks for CONFIG_HARDENED_USERCOPY*,
|
||||
* which are designed to protect kernel memory from needless exposure
|
||||
* and overwrite under many unintended conditions. This code is based
|
||||
* on PAX_USERCOPY, which is:
|
||||
*
|
||||
* Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
|
||||
* Security Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
enum {
|
||||
BAD_STACK = -1,
|
||||
NOT_STACK = 0,
|
||||
GOOD_FRAME,
|
||||
GOOD_STACK,
|
||||
};
|
||||
|
||||
/*
|
||||
* Checks if a given pointer and length is contained by the current
|
||||
* stack frame (if possible).
|
||||
*
|
||||
* Returns:
|
||||
* NOT_STACK: not at all on the stack
|
||||
* GOOD_FRAME: fully within a valid stack frame
|
||||
* GOOD_STACK: fully on the stack (when can't do frame-checking)
|
||||
* BAD_STACK: error condition (invalid stack position or bad stack frame)
|
||||
*/
|
||||
static noinline int check_stack_object(const void *obj, unsigned long len)
|
||||
{
|
||||
const void * const stack = task_stack_page(current);
|
||||
const void * const stackend = stack + THREAD_SIZE;
|
||||
int ret;
|
||||
|
||||
/* Object is not on the stack at all. */
|
||||
if (obj + len <= stack || stackend <= obj)
|
||||
return NOT_STACK;
|
||||
|
||||
/*
|
||||
* Reject: object partially overlaps the stack (passing the
|
||||
* the check above means at least one end is within the stack,
|
||||
* so if this check fails, the other end is outside the stack).
|
||||
*/
|
||||
if (obj < stack || stackend < obj + len)
|
||||
return BAD_STACK;
|
||||
|
||||
/* Check if object is safely within a valid frame. */
|
||||
ret = arch_within_stack_frames(stack, stackend, obj, len);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return GOOD_STACK;
|
||||
}
|
||||
|
||||
static void report_usercopy(const void *ptr, unsigned long len,
|
||||
bool to_user, const char *type)
|
||||
{
|
||||
pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
|
||||
to_user ? "exposure" : "overwrite",
|
||||
to_user ? "from" : "to", ptr, type ? : "unknown", len);
|
||||
/*
|
||||
* For greater effect, it would be nice to do do_group_exit(),
|
||||
* but BUG() actually hooks all the lock-breaking and per-arch
|
||||
* Oops code, so that is used here instead.
|
||||
*/
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
|
||||
static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
|
||||
unsigned long high)
|
||||
{
|
||||
unsigned long check_low = (uintptr_t)ptr;
|
||||
unsigned long check_high = check_low + n;
|
||||
|
||||
/* Does not overlap if entirely above or entirely below. */
|
||||
if (check_low >= high || check_high < low)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Is this address range in the kernel text area? */
|
||||
static inline const char *check_kernel_text_object(const void *ptr,
|
||||
unsigned long n)
|
||||
{
|
||||
unsigned long textlow = (unsigned long)_stext;
|
||||
unsigned long texthigh = (unsigned long)_etext;
|
||||
unsigned long textlow_linear, texthigh_linear;
|
||||
|
||||
if (overlaps(ptr, n, textlow, texthigh))
|
||||
return "<kernel text>";
|
||||
|
||||
/*
|
||||
* Some architectures have virtual memory mappings with a secondary
|
||||
* mapping of the kernel text, i.e. there is more than one virtual
|
||||
* kernel address that points to the kernel image. It is usually
|
||||
* when there is a separate linear physical memory mapping, in that
|
||||
* __pa() is not just the reverse of __va(). This can be detected
|
||||
* and checked:
|
||||
*/
|
||||
textlow_linear = (unsigned long)__va(__pa(textlow));
|
||||
/* No different mapping: we're done. */
|
||||
if (textlow_linear == textlow)
|
||||
return NULL;
|
||||
|
||||
/* Check the secondary mapping... */
|
||||
texthigh_linear = (unsigned long)__va(__pa(texthigh));
|
||||
if (overlaps(ptr, n, textlow_linear, texthigh_linear))
|
||||
return "<linear kernel text>";
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline const char *check_bogus_address(const void *ptr, unsigned long n)
|
||||
{
|
||||
/* Reject if object wraps past end of memory. */
|
||||
if (ptr + n < ptr)
|
||||
return "<wrapped address>";
|
||||
|
||||
/* Reject if NULL or ZERO-allocation. */
|
||||
if (ZERO_OR_NULL_PTR(ptr))
|
||||
return "<null>";
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline const char *check_heap_object(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
{
|
||||
struct page *page, *endpage;
|
||||
const void *end = ptr + n - 1;
|
||||
bool is_reserved, is_cma;
|
||||
|
||||
/*
|
||||
* Some architectures (arm64) return true for virt_addr_valid() on
|
||||
* vmalloced addresses. Work around this by checking for vmalloc
|
||||
* first.
|
||||
*/
|
||||
if (is_vmalloc_addr(ptr))
|
||||
return NULL;
|
||||
|
||||
if (!virt_addr_valid(ptr))
|
||||
return NULL;
|
||||
|
||||
page = virt_to_head_page(ptr);
|
||||
|
||||
/* Check slab allocator for flags and size. */
|
||||
if (PageSlab(page))
|
||||
return __check_heap_object(ptr, n, page);
|
||||
|
||||
/*
|
||||
* Sometimes the kernel data regions are not marked Reserved (see
|
||||
* check below). And sometimes [_sdata,_edata) does not cover
|
||||
* rodata and/or bss, so check each range explicitly.
|
||||
*/
|
||||
|
||||
/* Allow reads of kernel rodata region (if not marked as Reserved). */
|
||||
if (ptr >= (const void *)__start_rodata &&
|
||||
end <= (const void *)__end_rodata) {
|
||||
if (!to_user)
|
||||
return "<rodata>";
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Allow kernel data region (if not marked as Reserved). */
|
||||
if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
|
||||
return NULL;
|
||||
|
||||
/* Allow kernel bss region (if not marked as Reserved). */
|
||||
if (ptr >= (const void *)__bss_start &&
|
||||
end <= (const void *)__bss_stop)
|
||||
return NULL;
|
||||
|
||||
/* Is the object wholly within one base page? */
|
||||
if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
|
||||
((unsigned long)end & (unsigned long)PAGE_MASK)))
|
||||
return NULL;
|
||||
|
||||
/* Allow if start and end are inside the same compound page. */
|
||||
endpage = virt_to_head_page(end);
|
||||
if (likely(endpage == page))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Reject if range is entirely either Reserved (i.e. special or
|
||||
* device memory), or CMA. Otherwise, reject since the object spans
|
||||
* several independently allocated pages.
|
||||
*/
|
||||
is_reserved = PageReserved(page);
|
||||
is_cma = is_migrate_cma_page(page);
|
||||
if (!is_reserved && !is_cma)
|
||||
goto reject;
|
||||
|
||||
for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
|
||||
page = virt_to_head_page(ptr);
|
||||
if (is_reserved && !PageReserved(page))
|
||||
goto reject;
|
||||
if (is_cma && !is_migrate_cma_page(page))
|
||||
goto reject;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
||||
reject:
|
||||
return "<spans multiple pages>";
|
||||
}
|
||||
|
||||
/*
|
||||
* Validates that the given object is:
|
||||
* - not bogus address
|
||||
* - known-safe heap or stack object
|
||||
* - not in kernel text
|
||||
*/
|
||||
void __check_object_size(const void *ptr, unsigned long n, bool to_user)
|
||||
{
|
||||
const char *err;
|
||||
|
||||
/* Skip all tests if size is zero. */
|
||||
if (!n)
|
||||
return;
|
||||
|
||||
/* Check for invalid addresses. */
|
||||
err = check_bogus_address(ptr, n);
|
||||
if (err)
|
||||
goto report;
|
||||
|
||||
/* Check for bad heap object. */
|
||||
err = check_heap_object(ptr, n, to_user);
|
||||
if (err)
|
||||
goto report;
|
||||
|
||||
/* Check for bad stack object. */
|
||||
switch (check_stack_object(ptr, n)) {
|
||||
case NOT_STACK:
|
||||
/* Object is not touching the current process stack. */
|
||||
break;
|
||||
case GOOD_FRAME:
|
||||
case GOOD_STACK:
|
||||
/*
|
||||
* Object is either in the correct frame (when it
|
||||
* is possible to check) or just generally on the
|
||||
* process stack (when frame checking not available).
|
||||
*/
|
||||
return;
|
||||
default:
|
||||
err = "<process stack>";
|
||||
goto report;
|
||||
}
|
||||
|
||||
/* Check for object in kernel to avoid text exposure. */
|
||||
err = check_kernel_text_object(ptr, n);
|
||||
if (!err)
|
||||
return;
|
||||
|
||||
report:
|
||||
report_usercopy(ptr, n, to_user, err);
|
||||
}
|
||||
EXPORT_SYMBOL(__check_object_size);
|
|
@ -127,6 +127,34 @@ config LSM_MMAP_MIN_ADDR
|
|||
this low address space will need the permission specific to the
|
||||
systems running LSM.
|
||||
|
||||
config HAVE_HARDENED_USERCOPY_ALLOCATOR
|
||||
bool
|
||||
help
|
||||
The heap allocator implements __check_heap_object() for
|
||||
validating memory ranges against heap object sizes in
|
||||
support of CONFIG_HARDENED_USERCOPY.
|
||||
|
||||
config HAVE_ARCH_HARDENED_USERCOPY
|
||||
bool
|
||||
help
|
||||
The architecture supports CONFIG_HARDENED_USERCOPY by
|
||||
calling check_object_size() just before performing the
|
||||
userspace copies in the low level implementation of
|
||||
copy_to_user() and copy_from_user().
|
||||
|
||||
config HARDENED_USERCOPY
|
||||
bool "Harden memory copies between kernel and userspace"
|
||||
depends on HAVE_ARCH_HARDENED_USERCOPY
|
||||
select BUG
|
||||
help
|
||||
This option checks for obviously wrong memory regions when
|
||||
copying memory to/from the kernel (via copy_to_user() and
|
||||
copy_from_user() functions) by rejecting memory ranges that
|
||||
are larger than the specified heap object, span multiple
|
||||
separately allocates pages, are not on the process stack,
|
||||
or are part of the kernel text. This kills entire classes
|
||||
of heap overflow exploits and similar kernel memory exposures.
|
||||
|
||||
source security/selinux/Kconfig
|
||||
source security/smack/Kconfig
|
||||
source security/tomoyo/Kconfig
|
||||
|
|
Loading…
Add table
Reference in a new issue