Merge remote-tracking branch 'common/android-4.4' into android-4.4.y

This commit is contained in:
Dmitry Shmidt 2017-01-09 10:12:53 -08:00
commit cf0533acda
22 changed files with 157 additions and 123 deletions

View file

@ -8,6 +8,7 @@
# CONFIG_VT is not set
CONFIG_ANDROID_TIMED_GPIO=y
CONFIG_ARM_KERNMEM_PERMS=y
CONFIG_ARM64_SW_TTBR0_PAN=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y

View file

@ -716,7 +716,7 @@ config SETEND_EMULATION
endif
config ARM64_SW_TTBR0_PAN
bool "Emulate Priviledged Access Never using TTBR0_EL1 switching"
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
help
Enabling this option prevents the kernel from accessing
user-space memory directly by pointing TTBR0_EL1 to a reserved

View file

@ -2,6 +2,7 @@
#define __ASM_ALTERNATIVE_H
#include <asm/cpufeature.h>
#include <asm/insn.h>
#ifndef __ASSEMBLY__
@ -90,24 +91,15 @@ void apply_alternatives(void *start, size_t length);
.endm
/*
* Begin an alternative code sequence.
* Alternative sequences
*
* The code that follows this macro will be assembled and linked as
* normal. There are no restrictions on this code.
*/
.macro alternative_if_not cap
.pushsection .altinstructions, "a"
altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
.popsection
661:
.endm
/*
* Provide the alternative code sequence.
* The code for the case where the capability is not present will be
* assembled and linked as normal. There are no restrictions on this
* code.
*
* The code that follows this macro is assembled into a special
* section to be used for dynamic patching. Code that follows this
* macro must:
* The code for the case where the capability is present will be
* assembled into a special section to be used for dynamic patching.
* Code for that case must:
*
* 1. Be exactly the same length (in bytes) as the default code
* sequence.
@ -116,8 +108,38 @@ void apply_alternatives(void *start, size_t length);
* alternative sequence it is defined in (branches into an
* alternative sequence are not fixed up).
*/
/*
* Begin an alternative code sequence.
*/
.macro alternative_if_not cap
.set .Lasm_alt_mode, 0
.pushsection .altinstructions, "a"
altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
.popsection
661:
.endm
.macro alternative_if cap
.set .Lasm_alt_mode, 1
.pushsection .altinstructions, "a"
altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
.popsection
.pushsection .altinstr_replacement, "ax"
.align 2 /* So GAS knows label 661 is suitably aligned */
661:
.endm
/*
* Provide the other half of the alternative code sequence.
*/
.macro alternative_else
662: .pushsection .altinstr_replacement, "ax"
662:
.if .Lasm_alt_mode==0
.pushsection .altinstr_replacement, "ax"
.else
.popsection
.endif
663:
.endm
@ -125,11 +147,25 @@ void apply_alternatives(void *start, size_t length);
* Complete an alternative code sequence.
*/
.macro alternative_endif
664: .popsection
664:
.if .Lasm_alt_mode==0
.popsection
.endif
.org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
.endm
/*
* Provides a trivial alternative or default sequence consisting solely
* of NOPs. The number of NOPs is chosen automatically to match the
* previous case.
*/
.macro alternative_else_nop_endif
alternative_else
nops (662b-661b) / AARCH64_INSN_SIZE
alternative_endif
.endm
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)

View file

@ -107,6 +107,15 @@
dmb \opt
.endm
/*
* NOP sequence
*/
.macro nops, num
.rept \num
nop
.endr
.endm
/*
* Emit an entry into the exception table
*/
@ -383,15 +392,11 @@ alternative_endif
*/
.macro post_ttbr0_update_workaround
#ifdef CONFIG_CAVIUM_ERRATUM_27456
alternative_if_not ARM64_WORKAROUND_CAVIUM_27456
nop
nop
nop
alternative_else
alternative_if ARM64_WORKAROUND_CAVIUM_27456
ic iallu
dsb nsh
isb
alternative_endif
alternative_else_nop_endif
#endif
.endm

View file

@ -20,6 +20,9 @@
#ifndef __ASSEMBLY__
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
#define nops(n) asm volatile(__nops(n))
#define sev() asm volatile("sev" : : : "memory")
#define wfe() asm volatile("wfe" : : : "memory")
#define wfi() asm volatile("wfi" : : : "memory")

View file

@ -21,10 +21,7 @@
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/errno.h>
#include <asm/sysreg.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
do { \

View file

@ -218,9 +218,11 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
* Update the saved TTBR0_EL1 of the scheduled-in task as the previous
* value may have not been initialised yet (activate_mm caller) or the
* ASID has changed since the last run (following the context switch
* of another thread of the same process).
* of another thread of the same process). Avoid setting the reserved
* TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
*/
update_saved_ttbr0(tsk, next);
if (next != &init_mm)
update_saved_ttbr0(tsk, next);
}
#define deactivate_mm(tsk,mm) do { } while (0)

View file

@ -21,8 +21,6 @@
#include <uapi/asm/ptrace.h>
#define _PSR_PAN_BIT 22
/* Current Exception Level values, as contained in CurrentEL */
#define CurrentEL_EL1 (1 << 2)
#define CurrentEL_EL2 (2 << 2)

View file

@ -47,10 +47,10 @@ typedef unsigned long mm_segment_t;
struct thread_info {
unsigned long flags; /* low level flags */
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
u64 ttbr0; /* saved TTBR0_EL1 */
#endif
struct task_struct *task; /* main task structure */
int preempt_count; /* 0 => preemptable, <0 => bug */
int cpu; /* cpu */
};

View file

@ -18,6 +18,10 @@
#ifndef __ASM_UACCESS_H
#define __ASM_UACCESS_H
#include <asm/alternative.h>
#include <asm/kernel-pgtable.h>
#include <asm/sysreg.h>
#ifndef __ASSEMBLY__
/*
@ -26,11 +30,8 @@
#include <linux/string.h>
#include <linux/thread_info.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/kernel-pgtable.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>
#include <asm/errno.h>
#include <asm/memory.h>
#include <asm/compiler.h>
@ -130,7 +131,7 @@ static inline void set_fs(mm_segment_t fs)
* User access enabling/disabling.
*/
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
static inline void uaccess_ttbr0_disable(void)
static inline void __uaccess_ttbr0_disable(void)
{
unsigned long ttbr;
@ -140,7 +141,7 @@ static inline void uaccess_ttbr0_disable(void)
isb();
}
static inline void uaccess_ttbr0_enable(void)
static inline void __uaccess_ttbr0_enable(void)
{
unsigned long flags;
@ -154,30 +155,44 @@ static inline void uaccess_ttbr0_enable(void)
isb();
local_irq_restore(flags);
}
#else
static inline void uaccess_ttbr0_disable(void)
static inline bool uaccess_ttbr0_disable(void)
{
if (!system_uses_ttbr0_pan())
return false;
__uaccess_ttbr0_disable();
return true;
}
static inline void uaccess_ttbr0_enable(void)
static inline bool uaccess_ttbr0_enable(void)
{
if (!system_uses_ttbr0_pan())
return false;
__uaccess_ttbr0_enable();
return true;
}
#else
static inline bool uaccess_ttbr0_disable(void)
{
return false;
}
static inline bool uaccess_ttbr0_enable(void)
{
return false;
}
#endif
#define __uaccess_disable(alt) \
do { \
if (system_uses_ttbr0_pan()) \
uaccess_ttbr0_disable(); \
else \
if (!uaccess_ttbr0_disable()) \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
CONFIG_ARM64_PAN)); \
} while (0)
#define __uaccess_enable(alt) \
do { \
if (system_uses_ttbr0_pan()) \
uaccess_ttbr0_enable(); \
else \
if (!uaccess_ttbr0_enable()) \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
CONFIG_ARM64_PAN)); \
} while (0)
@ -407,69 +422,62 @@ extern __must_check long strnlen_user(const char __user *str, long n);
#else /* __ASSEMBLY__ */
#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/kernel-pgtable.h>
/*
* User access enabling/disabling macros.
*/
.macro uaccess_ttbr0_disable, tmp1
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
.macro __uaccess_ttbr0_disable, tmp1
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
isb
.endm
.macro uaccess_ttbr0_enable, tmp1
.macro __uaccess_ttbr0_enable, tmp1
get_thread_info \tmp1
ldr \tmp1, [\tmp1, #TI_TTBR0] // load saved TTBR0_EL1
ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
isb
.endm
.macro uaccess_ttbr0_disable, tmp1
alternative_if_not ARM64_HAS_PAN
__uaccess_ttbr0_disable \tmp1
alternative_else_nop_endif
.endm
.macro uaccess_ttbr0_enable, tmp1, tmp2
alternative_if_not ARM64_HAS_PAN
save_and_disable_irq \tmp2 // avoid preemption
__uaccess_ttbr0_enable \tmp1
restore_irq \tmp2
alternative_else_nop_endif
.endm
#else
.macro uaccess_ttbr0_disable, tmp1
.endm
.macro uaccess_ttbr0_enable, tmp1, tmp2
.endm
#endif
/*
* These macros are no-ops when UAO is present.
*/
.macro uaccess_disable_not_uao, tmp1
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
alternative_if_not ARM64_HAS_PAN
uaccess_ttbr0_disable \tmp1
alternative_else
nop
nop
nop
nop
alternative_endif
#endif
alternative_if_not ARM64_ALT_PAN_NOT_UAO
nop
alternative_else
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(1)
alternative_endif
alternative_else_nop_endif
.endm
.macro uaccess_enable_not_uao, tmp1, tmp2
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
alternative_if_not ARM64_HAS_PAN
save_and_disable_irq \tmp2 // avoid preemption
uaccess_ttbr0_enable \tmp1
restore_irq \tmp2
alternative_else
nop
nop
nop
nop
nop
nop
nop
alternative_endif
#endif
alternative_if_not ARM64_ALT_PAN_NOT_UAO
nop
alternative_else
uaccess_ttbr0_enable \tmp1, \tmp2
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(0)
alternative_endif
alternative_else_nop_endif
.endm
#endif /* __ASSEMBLY__ */

View file

@ -14,7 +14,6 @@
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
#include <asm/opcodes.h>

View file

@ -36,11 +36,11 @@ int main(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
DEFINE(TI_TTBR0, offsetof(struct thread_info, ttbr0));
#endif
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
DEFINE(TSK_TI_TTBR0, offsetof(struct thread_info, ttbr0));
#endif
BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
BLANK();

View file

@ -121,11 +121,9 @@
* feature as all TTBR0_EL1 accesses are disabled, not just those to
* user mappings.
*/
alternative_if_not ARM64_HAS_PAN
nop
alternative_else
alternative_if ARM64_HAS_PAN
b 1f // skip TTBR0 PAN
alternative_endif
alternative_else_nop_endif
.if \el != 0
mrs x21, ttbr0_el1
@ -135,7 +133,7 @@ alternative_endif
and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
.endif
uaccess_ttbr0_disable x21
__uaccess_ttbr0_disable x21
1:
#endif
@ -184,17 +182,15 @@ alternative_endif
* Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
* PAN bit checking.
*/
alternative_if_not ARM64_HAS_PAN
nop
alternative_else
alternative_if ARM64_HAS_PAN
b 2f // skip TTBR0 PAN
alternative_endif
alternative_else_nop_endif
.if \el != 0
tbnz x22, #_PSR_PAN_BIT, 1f // Skip re-enabling TTBR0 access if previously disabled
tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
.endif
uaccess_ttbr0_enable x0
__uaccess_ttbr0_enable x0
.if \el == 0
/*

View file

@ -17,9 +17,6 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
#include <asm/uaccess.h>
.text

View file

@ -16,10 +16,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/cache.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
#include <asm/uaccess.h>
/*

View file

@ -18,10 +18,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/cache.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
#include <asm/uaccess.h>
/*

View file

@ -16,10 +16,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/cache.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
#include <asm/uaccess.h>
/*

View file

@ -23,6 +23,7 @@
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
#include <asm/uaccess.h>
/*
* flush_icache_range(start,end)
@ -48,6 +49,7 @@ ENTRY(flush_icache_range)
* - end - virtual end address of region
*/
ENTRY(__flush_cache_user_range)
uaccess_ttbr0_enable x2, x3
dcache_line_size x2, x3
sub x3, x2, #1
bic x4, x0, x3
@ -69,10 +71,12 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
dsb ish
isb
mov x0, #0
1:
uaccess_ttbr0_disable x1
ret
9:
mov x0, #-EFAULT
ret
b 1b
ENDPROC(flush_icache_range)
ENDPROC(__flush_cache_user_range)

View file

@ -486,10 +486,10 @@ static const struct fault_info {
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous parity error" },
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },

View file

@ -90,7 +90,6 @@ ENTRY(privcmd_call)
mov x2, x3
mov x3, x4
mov x4, x5
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Privcmd calls are issued by the userspace. The kernel needs to
* enable access to TTBR0_EL1 as the hypervisor would issue stage 1
@ -99,15 +98,12 @@ ENTRY(privcmd_call)
* need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
* is enabled (it implies that hardware UAO and PAN disabled).
*/
uaccess_enable_not_uao x6, x7
#endif
uaccess_ttbr0_enable x6, x7
hvc XEN_IMM
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Disable userspace access from kernel once the hyp call completed.
*/
uaccess_disable_not_uao x6
#endif
uaccess_ttbr0_disable x6
ret
ENDPROC(privcmd_call);

View file

@ -395,6 +395,7 @@ endchoice
config SCHED_WALT
bool "Support window based load tracking"
depends on SMP
depends on FAIR_GROUP_SCHED
help
This feature will allow the scheduler to maintain a tunable window
based set of metrics for tasks and runqueues. These metrics can be

View file

@ -524,7 +524,7 @@ int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
{
int err = simple_setattr(dentry, iattr);
if (!err) {
if (!err && (iattr->ia_valid & ATTR_UID)) {
struct socket *sock = SOCKET_I(d_inode(dentry));
sock->sk->sk_uid = iattr->ia_uid;