android_kernel_oneplus_msm8998/arch/x86/include/asm/uaccess_64.h
Greg Kroah-Hartman b1bad9e232 This is the 4.4.141 stable release
-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAltNt4MACgkQONu9yGCS
 aT4Otg//e7FAfNGllvjx+53RBbpRUoa4ltdKNrdKa94ZGgVbGCdctKa9BntDkHSb
 Vw6tfvdonuJSs3e9KBSt4vOiTWkJ0eOnajdRYEQUg/jtufIULWgHNEl1dk0JB2Oj
 +8GAfXzlZ7NRfjEV0l0m44aU/qHaWVBBPQcmqLlxnLEr+0idWfSAGALEBnK6W+nH
 5yNU8X1pxVb1qSnL2YVM03+B9cfrFlpiPv46+hrHaQ6r87e+veD6f1tE1o8BvVy6
 f8CxWGvYisKJZ+OOQLH95xVahzcsGG5RKcarXzjsq30XJM1QZj8hBSWlzj0aBZmW
 OAiJ2dJccZaThxBSPJWLm6jzrUpjmQOtQMRK6TnlGxhG03eA8noxffTE03RUzL7Q
 jog6oxGgnrM+h08kmNHQEWP8EMgc6GTextKY2v9LQL51L+IBkvX8YOJwZS8YltOI
 XcoriH/lrNq5O7gSEQ4WoZWYlDlVYNc8r5EqI8lYeeShdGJqps6/wOZa1zqBFtbE
 BD0UxIDOs4zmcqPBebVUqGoPklLsGW5QfZi1dgBTiGNnopokMxia3DlPnQeq/euM
 b7+DBzL0ce2EamIh///HS+HF2uAM5N7w+BdEbYpIUCoSTKB0hUuKIM+T6rgXEvzD
 y0wJhH4SmjBH8w/Hc57VYVqOMAG+cUPDlhrw5XBkZ9HXy1ns1HM=
 =780A
 -----END PGP SIGNATURE-----

Merge 4.4.141 into android-4.4

Changes in 4.4.141
	MIPS: Fix ioremap() RAM check
	ibmasm: don't write out of bounds in read handler
	vmw_balloon: fix inflation with batching
	ahci: Disable LPM on Lenovo 50 series laptops with a too old BIOS
	USB: serial: ch341: fix type promotion bug in ch341_control_in()
	USB: serial: cp210x: add another USB ID for Qivicon ZigBee stick
	USB: serial: keyspan_pda: fix modem-status error handling
	USB: yurex: fix out-of-bounds uaccess in read handler
	USB: serial: mos7840: fix status-register error handling
	usb: quirks: add delay quirks for Corsair Strafe
	xhci: xhci-mem: off by one in xhci_stream_id_to_ring()
	HID: usbhid: add quirk for innomedia INNEX GENESIS/ATARI adapter
	Fix up non-directory creation in SGID directories
	tools build: fix # escaping in .cmd files for future Make
	iw_cxgb4: correctly enforce the max reg_mr depth
	x86/cpufeature: Move some of the scattered feature bits to x86_capability
	x86/cpufeature: Cleanup get_cpu_cap()
	x86/cpu: Provide a config option to disable static_cpu_has
	x86/fpu: Add an XSTATE_OP() macro
	x86/fpu: Get rid of xstate_fault()
	x86/headers: Don't include asm/processor.h in asm/atomic.h
	x86/cpufeature: Carve out X86_FEATURE_*
	x86/cpufeature: Replace the old static_cpu_has() with safe variant
	x86/cpufeature: Get rid of the non-asm goto variant
	x86/alternatives: Add an auxilary section
	x86/alternatives: Discard dynamic check after init
	x86/vdso: Use static_cpu_has()
	x86/boot: Simplify kernel load address alignment check
	x86/cpufeature: Speed up cpu_feature_enabled()
	x86/cpufeature, x86/mm/pkeys: Add protection keys related CPUID definitions
	x86/mm/pkeys: Fix mismerge of protection keys CPUID bits
	x86/cpu: Add detection of AMD RAS Capabilities
	x86/cpufeature, x86/mm/pkeys: Fix broken compile-time disabling of pkeys
	x86/cpufeature: Update cpufeaure macros
	x86/cpufeature: Make sure DISABLED/REQUIRED macros are updated
	x86/cpufeature: Add helper macro for mask check macros
	uprobes/x86: Remove incorrect WARN_ON() in uprobe_init_insn()
	netfilter: nf_queue: augment nfqa_cfg_policy
	netfilter: x_tables: initialise match/target check parameter struct
	loop: add recursion validation to LOOP_CHANGE_FD
	PM / hibernate: Fix oops at snapshot_write()
	RDMA/ucm: Mark UCM interface as BROKEN
	loop: remember whether sysfs_create_group() was done
	Linux 4.4.141

Change-Id: I777b39a0ede95b58638add97756d6beaf4a9d154
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2018-07-17 12:15:52 +02:00

276 lines
6.9 KiB
C

#ifndef _ASM_X86_UACCESS_64_H
#define _ASM_X86_UACCESS_64_H
/*
* User space memory access functions
*/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/lockdep.h>
#include <asm/alternative.h>
#include <asm/cpufeatures.h>
#include <asm/page.h>
/*
* Copy To/From Userspace
*/
/* Handles exceptions in both to and from, but doesn't do access_ok */
__must_check unsigned long
copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
__must_check unsigned long
copy_user_generic_string(void *to, const void *from, unsigned len);
__must_check unsigned long
copy_user_generic_unrolled(void *to, const void *from, unsigned len);
static __always_inline __must_check unsigned long
copy_user_generic(void *to, const void *from, unsigned len)
{
unsigned ret;
/*
* If CPU has ERMS feature, use copy_user_enhanced_fast_string.
* Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
* Otherwise, use copy_user_generic_unrolled.
*/
alternative_call_2(copy_user_generic_unrolled,
copy_user_generic_string,
X86_FEATURE_REP_GOOD,
copy_user_enhanced_fast_string,
X86_FEATURE_ERMS,
ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
"=d" (len)),
"1" (to), "2" (from), "3" (len)
: "memory", "rcx", "r8", "r9", "r10", "r11");
return ret;
}
__must_check unsigned long
copy_in_user(void __user *to, const void __user *from, unsigned len);
static __always_inline __must_check
int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
{
int ret = 0;
check_object_size(dst, size, false);
if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size);
switch (size) {
case 1:
__uaccess_begin();
__get_user_asm(*(u8 *)dst, (u8 __user *)src,
ret, "b", "b", "=q", 1);
__uaccess_end();
return ret;
case 2:
__uaccess_begin();
__get_user_asm(*(u16 *)dst, (u16 __user *)src,
ret, "w", "w", "=r", 2);
__uaccess_end();
return ret;
case 4:
__uaccess_begin();
__get_user_asm(*(u32 *)dst, (u32 __user *)src,
ret, "l", "k", "=r", 4);
__uaccess_end();
return ret;
case 8:
__uaccess_begin();
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 8);
__uaccess_end();
return ret;
case 10:
__uaccess_begin();
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 10);
if (likely(!ret))
__get_user_asm(*(u16 *)(8 + (char *)dst),
(u16 __user *)(8 + (char __user *)src),
ret, "w", "w", "=r", 2);
__uaccess_end();
return ret;
case 16:
__uaccess_begin();
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 16);
if (likely(!ret))
__get_user_asm(*(u64 *)(8 + (char *)dst),
(u64 __user *)(8 + (char __user *)src),
ret, "q", "", "=r", 8);
__uaccess_end();
return ret;
default:
return copy_user_generic(dst, (__force void *)src, size);
}
}
static __always_inline __must_check
int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
might_fault();
return __copy_from_user_nocheck(dst, src, size);
}
static __always_inline __must_check
int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
{
int ret = 0;
check_object_size(src, size, true);
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size);
switch (size) {
case 1:
__uaccess_begin();
__put_user_asm(*(u8 *)src, (u8 __user *)dst,
ret, "b", "b", "iq", 1);
__uaccess_end();
return ret;
case 2:
__uaccess_begin();
__put_user_asm(*(u16 *)src, (u16 __user *)dst,
ret, "w", "w", "ir", 2);
__uaccess_end();
return ret;
case 4:
__uaccess_begin();
__put_user_asm(*(u32 *)src, (u32 __user *)dst,
ret, "l", "k", "ir", 4);
__uaccess_end();
return ret;
case 8:
__uaccess_begin();
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "er", 8);
__uaccess_end();
return ret;
case 10:
__uaccess_begin();
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "er", 10);
if (likely(!ret)) {
asm("":::"memory");
__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
ret, "w", "w", "ir", 2);
}
__uaccess_end();
return ret;
case 16:
__uaccess_begin();
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "er", 16);
if (likely(!ret)) {
asm("":::"memory");
__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
ret, "q", "", "er", 8);
}
__uaccess_end();
return ret;
default:
return copy_user_generic((__force void *)dst, src, size);
}
}
static __always_inline __must_check
int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
might_fault();
return __copy_to_user_nocheck(dst, src, size);
}
static __always_inline __must_check
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
{
int ret = 0;
might_fault();
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst,
(__force void *)src, size);
switch (size) {
case 1: {
u8 tmp;
__uaccess_begin();
__get_user_asm(tmp, (u8 __user *)src,
ret, "b", "b", "=q", 1);
if (likely(!ret))
__put_user_asm(tmp, (u8 __user *)dst,
ret, "b", "b", "iq", 1);
__uaccess_end();
return ret;
}
case 2: {
u16 tmp;
__uaccess_begin();
__get_user_asm(tmp, (u16 __user *)src,
ret, "w", "w", "=r", 2);
if (likely(!ret))
__put_user_asm(tmp, (u16 __user *)dst,
ret, "w", "w", "ir", 2);
__uaccess_end();
return ret;
}
case 4: {
u32 tmp;
__uaccess_begin();
__get_user_asm(tmp, (u32 __user *)src,
ret, "l", "k", "=r", 4);
if (likely(!ret))
__put_user_asm(tmp, (u32 __user *)dst,
ret, "l", "k", "ir", 4);
__uaccess_end();
return ret;
}
case 8: {
u64 tmp;
__uaccess_begin();
__get_user_asm(tmp, (u64 __user *)src,
ret, "q", "", "=r", 8);
if (likely(!ret))
__put_user_asm(tmp, (u64 __user *)dst,
ret, "q", "", "er", 8);
__uaccess_end();
return ret;
}
default:
return copy_user_generic((__force void *)dst,
(__force void *)src, size);
}
}
static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
return __copy_from_user_nocheck(dst, src, size);
}
static __must_check __always_inline int
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
{
return __copy_to_user_nocheck(dst, src, size);
}
extern long __copy_user_nocache(void *dst, const void __user *src,
unsigned size, int zerorest);
static inline int
__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
{
might_fault();
return __copy_user_nocache(dst, src, size, 1);
}
static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
unsigned size)
{
return __copy_user_nocache(dst, src, size, 0);
}
unsigned long
copy_user_handle_tail(char *to, char *from, unsigned len);
#endif /* _ASM_X86_UACCESS_64_H */