android_kernel_oneplus_msm8998/arch/arm64/kernel/process.c
Blagovest Kolenichev 901bf6ddcc Merge android-4.4@4b8fc9f (v4.4.82) into msm-4.4
* refs/heads/tmp-4b8fc9f
  UPSTREAM: locking: avoid passing around 'thread_info' in mutex debugging code
  ANDROID: arm64: fix undeclared 'init_thread_info' error
  UPSTREAM: kdb: use task_cpu() instead of task_thread_info()->cpu
  Linux 4.4.82
  net: account for current skb length when deciding about UFO
  ipv4: Should use consistent conditional judgement for ip fragment in __ip_append_data and ip_finish_output
  mm/mempool: avoid KASAN marking mempool poison checks as use-after-free
  KVM: arm/arm64: Handle hva aging while destroying the vm
  sparc64: Prevent perf from running during super critical sections
  udp: consistently apply ufo or fragmentation
  revert "ipv4: Should use consistent conditional judgement for ip fragment in __ip_append_data and ip_finish_output"
  revert "net: account for current skb length when deciding about UFO"
  packet: fix tp_reserve race in packet_set_ring
  net: avoid skb_warn_bad_offload false positives on UFO
  tcp: fastopen: tcp_connect() must refresh the route
  net: sched: set xt_tgchk_param par.nft_compat as 0 in ipt_init_target
  bpf, s390: fix jit branch offset related to ldimm64
  net: fix keepalive code vs TCP_FASTOPEN_CONNECT
  tcp: avoid setting cwnd to invalid ssthresh after cwnd reduction states
  ANDROID: keychord: Fix for a memory leak in keychord.
  ANDROID: keychord: Fix races in keychord_write.
  Use %zu to print resid (size_t).
  ANDROID: keychord: Fix a slab out-of-bounds read.
  Linux 4.4.81
  workqueue: implicit ordered attribute should be overridable
  net: account for current skb length when deciding about UFO
  ipv4: Should use consistent conditional judgement for ip fragment in __ip_append_data and ip_finish_output
  mm: don't dereference struct page fields of invalid pages
  signal: protect SIGNAL_UNKILLABLE from unintentional clearing.
  lib/Kconfig.debug: fix frv build failure
  mm, slab: make sure that KMALLOC_MAX_SIZE will fit into MAX_ORDER
  ARM: 8632/1: ftrace: fix syscall name matching
  virtio_blk: fix panic in initialization error path
  drm/virtio: fix framebuffer sparse warning
  scsi: qla2xxx: Get mutex lock before checking optrom_state
  phy state machine: failsafe leave invalid RUNNING state
  x86/boot: Add missing declaration of string functions
  tg3: Fix race condition in tg3_get_stats64().
  net: phy: dp83867: fix irq generation
  sh_eth: R8A7740 supports packet shecksumming
  wext: handle NULL extra data in iwe_stream_add_point better
  sparc64: Measure receiver forward progress to avoid send mondo timeout
  xen-netback: correctly schedule rate-limited queues
  net: phy: Fix PHY unbind crash
  net: phy: Correctly process PHY_HALTED in phy_stop_machine()
  net/mlx5: Fix command bad flow on command entry allocation failure
  sctp: fix the check for _sctp_walk_params and _sctp_walk_errors
  sctp: don't dereference ptr before leaving _sctp_walk_{params, errors}()
  dccp: fix a memleak for dccp_feat_init err process
  dccp: fix a memleak that dccp_ipv4 doesn't put reqsk properly
  dccp: fix a memleak that dccp_ipv6 doesn't put reqsk properly
  net: ethernet: nb8800: Handle all 4 RGMII modes identically
  ipv6: Don't increase IPSTATS_MIB_FRAGFAILS twice in ip6_fragment()
  packet: fix use-after-free in prb_retire_rx_blk_timer_expired()
  openvswitch: fix potential out of bound access in parse_ct
  mcs7780: Fix initialization when CONFIG_VMAP_STACK is enabled
  rtnetlink: allocate more memory for dev_set_mac_address()
  ipv4: initialize fib_trie prior to register_netdev_notifier call.
  ipv6: avoid overflow of offset in ip6_find_1stfragopt
  net: Zero terminate ifr_name in dev_ifname().
  ipv4: ipv6: initialize treq->txhash in cookie_v[46]_check()
  saa7164: fix double fetch PCIe access condition
  drm: rcar-du: fix backport bug
  f2fs: sanity check checkpoint segno and blkoff
  media: lirc: LIRC_GET_REC_RESOLUTION should return microseconds
  mm, mprotect: flush TLB if potentially racing with a parallel reclaim leaving stale TLB entries
  iser-target: Avoid isert_conn->cm_id dereference in isert_login_recv_done
  iscsi-target: Fix delayed logout processing greater than SECONDS_FOR_LOGOUT_COMP
  iscsi-target: Fix initial login PDU asynchronous socket close OOPs
  iscsi-target: Fix early sk_data_ready LOGIN_FLAGS_READY race
  iscsi-target: Always wait for kthread_should_stop() before kthread exit
  target: Avoid mappedlun symlink creation during lun shutdown
  media: platform: davinci: return -EINVAL for VPFE_CMD_S_CCDC_RAW_PARAMS ioctl
  ARM: dts: armada-38x: Fix irq type for pca955
  ext4: fix overflow caused by missing cast in ext4_resize_fs()
  ext4: fix SEEK_HOLE/SEEK_DATA for blocksize < pagesize
  mm/page_alloc: Remove kernel address exposure in free_reserved_area()
  KVM: async_pf: make rcu irq exit if not triggered from idle task
  ASoC: do not close shared backend dailink
  ALSA: hda - Fix speaker output from VAIO VPCL14M1R
  workqueue: restore WQ_UNBOUND/max_active==1 to be ordered
  libata: array underflow in ata_find_dev()
  ANDROID: binder: don't queue async transactions to thread.
  ANDROID: binder: don't enqueue death notifications to thread todo.
  ANDROID: binder: call poll_wait() unconditionally.
  android: configs: move quota-related configs to recommended
  BACKPORT: arm64: split thread_info from task stack
  UPSTREAM: arm64: assembler: introduce ldr_this_cpu
  UPSTREAM: arm64: make cpu number a percpu variable
  UPSTREAM: arm64: smp: prepare for smp_processor_id() rework
  BACKPORT: arm64: move sp_el0 and tpidr_el1 into cpu_suspend_ctx
  UPSTREAM: arm64: prep stack walkers for THREAD_INFO_IN_TASK
  UPSTREAM: arm64: unexport walk_stackframe
  UPSTREAM: arm64: traps: simplify die() and __die()
  UPSTREAM: arm64: factor out current_stack_pointer
  BACKPORT: arm64: asm-offsets: remove unused definitions
  UPSTREAM: arm64: thread_info remove stale items
  UPSTREAM: thread_info: include <current.h> for THREAD_INFO_IN_TASK
  UPSTREAM: thread_info: factor out restart_block
  UPSTREAM: kthread: Pin the stack via try_get_task_stack()/put_task_stack() in to_live_kthread() function
  UPSTREAM: sched/core: Add try_get_task_stack() and put_task_stack()
  UPSTREAM: sched/core: Allow putting thread_info into task_struct
  UPSTREAM: printk: when dumping regs, show the stack, not thread_info
  UPSTREAM: fix up initial thread stack pointer vs thread_info confusion
  UPSTREAM: Clarify naming of thread info/stack allocators
  ANDROID: sdcardfs: override credential for ioctl to lower fs

Conflicts:
	android/configs/android-base.cfg
	arch/arm64/Kconfig
	arch/arm64/include/asm/suspend.h
	arch/arm64/kernel/head.S
	arch/arm64/kernel/smp.c
	arch/arm64/kernel/suspend.c
	arch/arm64/kernel/traps.c
	arch/arm64/mm/proc.S
	kernel/fork.c
	sound/soc/soc-pcm.c

Change-Id: I273e216c94899a838bbd208391c6cbe20b2bf683
Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
2017-09-01 11:47:49 -07:00

495 lines
12 KiB
C

/*
* Based on arch/arm/kernel/process.c
*
* Original Copyright (C) 1995 Linus Torvalds
* Copyright (C) 1996-2000 Russell King - Converted to ARM.
* Copyright (C) 2012 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdarg.h>
#include <linux/compat.h>
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/interrupt.h>
#include <linux/kallsyms.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/elfcore.h>
#include <linux/pm.h>
#include <linux/tick.h>
#include <linux/utsname.h>
#include <linux/uaccess.h>
#include <linux/random.h>
#include <linux/hw_breakpoint.h>
#include <linux/personality.h>
#include <linux/notifier.h>
#include <trace/events/power.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
#include <linux/percpu.h>
#endif
#include <asm/alternative.h>
#include <asm/compat.h>
#include <asm/cacheflush.h>
#include <asm/exec.h>
#include <asm/fpsimd.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <asm/stacktrace.h>
#ifdef CONFIG_CC_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
/*
* Function pointers to optional machine specific functions
*/
void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
/*
* This is our default idle handler.
*/
void arch_cpu_idle(void)
{
/*
* This should do all the clock switching and wait for interrupt
* tricks
*/
trace_cpu_idle_rcuidle(1, smp_processor_id());
cpu_do_idle();
local_irq_enable();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
void arch_cpu_idle_enter(void)
{
idle_notifier_call_chain(IDLE_START);
}
void arch_cpu_idle_exit(void)
{
idle_notifier_call_chain(IDLE_END);
}
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
cpu_die();
}
#endif
/*
* Called by kexec, immediately prior to machine_kexec().
*
* This must completely disable all secondary CPUs; simply causing those CPUs
* to execute e.g. a RAM-based pin loop is not sufficient. This allows the
* kexec'd kernel to use any and all RAM as it sees fit, without having to
* avoid any code or data used by any SW CPU pin loop. The CPU hotplug
* functionality embodied in disable_nonboot_cpus() to achieve this.
*/
void machine_shutdown(void)
{
disable_nonboot_cpus();
}
/*
* Halting simply requires that the secondary CPUs stop performing any
* activity (executing tasks, handling interrupts). smp_send_stop()
* achieves this.
*/
void machine_halt(void)
{
local_irq_disable();
smp_send_stop();
while (1);
}
/*
* Power-off simply requires that the secondary CPUs stop performing any
* activity (executing tasks, handling interrupts). smp_send_stop()
* achieves this. When the system power is turned off, it will take all CPUs
* with it.
*/
void machine_power_off(void)
{
local_irq_disable();
smp_send_stop();
if (pm_power_off)
pm_power_off();
}
/*
* Restart requires that the secondary CPUs stop performing any activity
* while the primary CPU resets the system. Systems with multiple CPUs must
* provide a HW restart implementation, to ensure that all CPUs reset at once.
* This is required so that any code running after reset on the primary CPU
* doesn't have to co-ordinate with other CPUs to ensure they aren't still
* executing pre-reset code, and using RAM that the primary CPU's code wishes
* to use. Implementing such co-ordination would be essentially impossible.
*/
void machine_restart(char *cmd)
{
/* Disable interrupts first */
local_irq_disable();
smp_send_stop();
/*
* UpdateCapsule() depends on the system being reset via
* ResetSystem().
*/
if (efi_enabled(EFI_RUNTIME_SERVICES))
efi_reboot(reboot_mode, NULL);
/* Now call the architecture specific reboot code. */
if (arm_pm_restart)
arm_pm_restart(reboot_mode, cmd);
else
do_kernel_restart(cmd);
/*
* Whoops - the architecture was unable to reboot.
*/
printk("Reboot failed -- System halted\n");
while (1);
}
/*
* dump a block of kernel memory from around the given address
*/
static void show_data(unsigned long addr, int nbytes, const char *name)
{
int i, j;
int nlines;
u32 *p;
/*
* don't attempt to dump non-kernel addresses or
* values that are probably just small negative numbers
*/
if (addr < KIMAGE_VADDR || addr > -256UL)
return;
printk("\n%s: %#lx:\n", name, addr);
/*
* round address down to a 32 bit boundary
* and always dump a multiple of 32 bytes
*/
p = (u32 *)(addr & ~(sizeof(u32) - 1));
nbytes += (addr & (sizeof(u32) - 1));
nlines = (nbytes + 31) / 32;
for (i = 0; i < nlines; i++) {
/*
* just display low 16 bits of address to keep
* each line of the dump < 80 characters
*/
printk("%04lx ", (unsigned long)p & 0xffff);
for (j = 0; j < 8; j++) {
u32 data;
if (probe_kernel_address(p, data)) {
printk(" ********");
} else {
pr_cont(" %08x", data);
}
++p;
}
pr_cont("\n");
}
}
static void show_extra_register_data(struct pt_regs *regs, int nbytes)
{
mm_segment_t fs;
fs = get_fs();
set_fs(KERNEL_DS);
show_data(regs->pc - nbytes, nbytes * 2, "PC");
show_data(regs->regs[30] - nbytes, nbytes * 2, "LR");
show_data(regs->sp - nbytes, nbytes * 2, "SP");
set_fs(fs);
}
void __show_regs(struct pt_regs *regs)
{
int i, top_reg;
u64 lr, sp;
if (compat_user_mode(regs)) {
lr = regs->compat_lr;
sp = regs->compat_sp;
top_reg = 12;
} else {
lr = regs->regs[30];
sp = regs->sp;
top_reg = 29;
}
show_regs_print_info(KERN_DEFAULT);
print_symbol("PC is at %s\n", instruction_pointer(regs));
print_symbol("LR is at %s\n", lr);
printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
regs->pc, lr, regs->pstate);
printk("sp : %016llx\n", sp);
for (i = top_reg; i >= 0; i--) {
printk("x%-2d: %016llx ", i, regs->regs[i]);
if (i % 2 == 0)
printk("\n");
}
if (!user_mode(regs))
show_extra_register_data(regs, 64);
printk("\n");
}
void show_regs(struct pt_regs * regs)
{
printk("\n");
__show_regs(regs);
}
/*
* Free current thread data structures etc..
*/
void exit_thread(void)
{
}
static void tls_thread_flush(void)
{
asm ("msr tpidr_el0, xzr");
if (is_compat_task()) {
current->thread.tp_value = 0;
/*
* We need to ensure ordering between the shadow state and the
* hardware state, so that we don't corrupt the hardware state
* with a stale shadow state during context switch.
*/
barrier();
asm ("msr tpidrro_el0, xzr");
}
}
void flush_thread(void)
{
fpsimd_flush_thread();
tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
}
void release_thread(struct task_struct *dead_task)
{
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
if (current->mm)
fpsimd_preserve_current_state();
*dst = *src;
return 0;
}
asmlinkage void ret_from_fork(void) asm("ret_from_fork");
int copy_thread(unsigned long clone_flags, unsigned long stack_start,
unsigned long stk_sz, struct task_struct *p)
{
struct pt_regs *childregs = task_pt_regs(p);
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs();
childregs->regs[0] = 0;
/*
* Read the current TLS pointer from tpidr_el0 as it may be
* out-of-sync with the saved value.
*/
asm("mrs %0, tpidr_el0" : "=r" (*task_user_tls(p)));
if (stack_start) {
if (is_compat_thread(task_thread_info(p)))
childregs->compat_sp = stack_start;
/* 16-byte aligned stack mandatory on AArch64 */
else if (stack_start & 15)
return -EINVAL;
else
childregs->sp = stack_start;
}
/*
* If a TLS pointer was passed to clone (4th argument), use it
* for the new thread.
*/
if (clone_flags & CLONE_SETTLS)
p->thread.tp_value = childregs->regs[3];
} else {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h;
if (IS_ENABLED(CONFIG_ARM64_UAO) &&
cpus_have_cap(ARM64_HAS_UAO))
childregs->pstate |= PSR_UAO_BIT;
p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz;
}
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
p->thread.cpu_context.sp = (unsigned long)childregs;
ptrace_hw_copy_thread(p);
return 0;
}
static void tls_thread_switch(struct task_struct *next)
{
unsigned long tpidr, tpidrro;
asm("mrs %0, tpidr_el0" : "=r" (tpidr));
*task_user_tls(current) = tpidr;
tpidr = *task_user_tls(next);
tpidrro = is_compat_thread(task_thread_info(next)) ?
next->thread.tp_value : 0;
asm(
" msr tpidr_el0, %0\n"
" msr tpidrro_el0, %1"
: : "r" (tpidr), "r" (tpidrro));
}
/* Restore the UAO state depending on next's addr_limit */
void uao_thread_switch(struct task_struct *next)
{
if (IS_ENABLED(CONFIG_ARM64_UAO)) {
if (task_thread_info(next)->addr_limit == KERNEL_DS)
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
else
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
}
}
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
* We store our current task in sp_el0, which is clobbered by userspace. Keep a
* shadow copy so that we can restore this upon entry from userspace.
*
* This is *only* for exception entry from EL0, and is not valid until we
* __switch_to() a user task.
*/
DEFINE_PER_CPU(struct task_struct *, __entry_task);
static void entry_task_switch(struct task_struct *next)
{
__this_cpu_write(__entry_task, next);
}
#endif
/*
* Thread switching.
*/
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next)
{
struct task_struct *last;
fpsimd_thread_switch(next);
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
contextidr_thread_switch(next);
#ifdef CONFIG_THREAD_INFO_IN_TASK
entry_task_switch(next);
#endif
uao_thread_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
*/
dsb(ish);
/* the actual thread switch */
last = cpu_switch_to(prev, next);
return last;
}
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
unsigned long stack_page, ret = 0;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = (unsigned long)try_get_task_stack(p);
if (!stack_page)
return 0;
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = p->curr_ret_stack;
#endif
do {
if (frame.sp < stack_page ||
frame.sp >= stack_page + THREAD_SIZE ||
unwind_frame(p, &frame))
goto out;
if (!in_sched_functions(frame.pc)) {
ret = frame.pc;
goto out;
}
} while (count ++ < 16);
out:
put_task_stack(p);
return ret;
}
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_int() & ~PAGE_MASK;
return sp & ~0xf;
}
static unsigned long randomize_base(unsigned long base)
{
unsigned long range_end = base + (STACK_RND_MASK << PAGE_SHIFT) + 1;
return randomize_range(base, range_end, 0) ? : base;
}
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
return randomize_base(mm->brk);
}