2005-04-16 15:20:36 -07:00
|
|
|
/*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
|
2006-02-08 13:38:18 +00:00
|
|
|
* Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
|
2005-04-16 15:20:36 -07:00
|
|
|
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
|
|
|
|
* Copyright (C) 2004 Thiemo Seufer
|
2013-03-25 13:18:07 -05:00
|
|
|
* Copyright (C) 2013 Imagination Technologies Ltd.
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/sched.h>
|
2007-10-11 23:46:09 +01:00
|
|
|
#include <linux/tick.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/stddef.h>
|
|
|
|
#include <linux/unistd.h>
|
2011-07-28 18:46:31 -04:00
|
|
|
#include <linux/export.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/personality.h>
|
|
|
|
#include <linux/sys.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/completion.h>
|
2006-02-08 01:48:03 +09:00
|
|
|
#include <linux/kallsyms.h>
|
2007-07-19 14:04:21 +02:00
|
|
|
#include <linux/random.h>
|
2015-01-08 12:17:37 +00:00
|
|
|
#include <linux/prctl.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2007-07-19 14:04:21 +02:00
|
|
|
#include <asm/asm.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
#include <asm/bootinfo.h>
|
|
|
|
#include <asm/cpu.h>
|
2005-05-31 11:49:19 +00:00
|
|
|
#include <asm/dsp.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
#include <asm/fpu.h>
|
2016-12-19 14:20:57 +00:00
|
|
|
#include <asm/irq.h>
|
2014-01-27 15:23:11 +00:00
|
|
|
#include <asm/msa.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/mipsregs.h>
|
|
|
|
#include <asm/processor.h>
|
2014-07-23 14:40:15 +01:00
|
|
|
#include <asm/reg.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/elf.h>
|
|
|
|
#include <asm/isadep.h>
|
|
|
|
#include <asm/inst.h>
|
2006-09-26 23:44:01 +09:00
|
|
|
#include <asm/stacktrace.h>
|
2014-10-22 06:39:56 +00:00
|
|
|
#include <asm/irq_regs.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2013-03-21 22:49:52 +01:00
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
void arch_cpu_idle_dead(void)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2016-11-04 09:28:56 +00:00
|
|
|
play_dead();
|
2013-03-21 22:49:52 +01:00
|
|
|
}
|
|
|
|
#endif
|
2009-06-23 10:00:31 +01:00
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
asmlinkage void ret_from_fork(void);
|
2012-10-09 16:27:45 -04:00
|
|
|
asmlinkage void ret_from_kernel_thread(void);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
|
|
|
|
{
|
|
|
|
unsigned long status;
|
|
|
|
|
|
|
|
/* New thread loses kernel privileges. */
|
2007-12-13 22:42:19 +00:00
|
|
|
status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
|
2005-04-16 15:20:36 -07:00
|
|
|
status |= KU_USER;
|
|
|
|
regs->cp0_status = status;
|
|
|
|
clear_used_math();
|
2006-10-09 00:10:01 +09:00
|
|
|
clear_fpu_owner();
|
2013-11-19 17:30:38 +00:00
|
|
|
init_dsp();
|
2014-07-11 16:47:05 +01:00
|
|
|
clear_thread_flag(TIF_USEDMSA);
|
2014-01-27 15:23:11 +00:00
|
|
|
clear_thread_flag(TIF_MSA_CTX_LIVE);
|
|
|
|
disable_msa();
|
2005-04-16 15:20:36 -07:00
|
|
|
regs->cp0_epc = pc;
|
|
|
|
regs->regs[29] = sp;
|
|
|
|
}
|
|
|
|
|
|
|
|
void exit_thread(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void flush_thread(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
MIPS: fork: Fix MSA/FPU/DSP context duplication race
There is a race in the MIPS fork code which allows the child to get a
stale copy of parent MSA/FPU/DSP state that is active in hardware
registers when the fork() is called. This is because copy_thread() saves
the live register state into the child context only if the hardware is
currently in use, apparently on the assumption that the hardware state
cannot have been saved and disabled since the initial duplication of the
task_struct. However preemption is certainly possible during this
window.
An example sequence of events is as follows:
1) The parent userland process puts important data into saved floating
point registers ($f20-$f31), which are then dirty compared to the
process' stored context.
2) The parent process calls fork() which does a clone system call.
3) In the kernel, do_fork() -> copy_process() -> dup_task_struct() ->
arch_dup_task_struct() (which uses the weakly defined default
implementation). This duplicates the parent process' task context,
which includes a stale version of its FP context from when it was
last saved, probably some time before (1).
4) At some point before copy_process() calls copy_thread(), such as when
duplicating the memory map, the process is desceduled. Perhaps it is
preempted asynchronously, or perhaps it sleeps while blocked on a
mutex. The dirty FP state in the FP registers is saved to the parent
process' context and the FPU is disabled.
5) When the process is rescheduled again it continues copying state
until it gets to copy_thread(), which checks whether the FPU is in
use, so that it can copy that dirty state to the child process' task
context. Because of the deschedule however the FPU is not in use, so
the child process' context is left with stale FP context from the
last time the parent saved it (some time before (1)).
6) When the new child process is scheduled it reads the important data
from the saved floating point register, and ends up doing a NULL
pointer dereference as a result of the stale data.
This use of saved floating point registers across function calls can be
triggered fairly easily by explicitly using inline asm with a current
(MIPS R2) compiler, but is far more likely to happen unintentionally
with a MIPS R6 compiler where the FP registers are more likely to get
used as scratch registers for storing non-fp data.
It is easily fixed, in the same way that other architectures do it, by
overriding the implementation of arch_dup_task_struct() to sync the
dirty hardware state to the parent process' task context *prior* to
duplicating it, rather than copying straight to the child process' task
context in copy_thread(). Note, the FPU hardware is not disabled so the
parent process may continue executing with the live register context,
but now the child process is guaranteed to have an identical copy of it
at that point.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Reported-by: Matthew Fortune <matthew.fortune@imgtec.com>
Tested-by: Markos Chandras <markos.chandras@imgtec.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/9075/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2015-01-19 10:30:54 +00:00
|
|
|
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Save any process state which is live in hardware registers to the
|
|
|
|
* parent context prior to duplication. This prevents the new child
|
|
|
|
* state becoming stale if the parent is preempted before copy_thread()
|
|
|
|
* gets a chance to save the parent's live hardware registers to the
|
|
|
|
* child context.
|
|
|
|
*/
|
|
|
|
preempt_disable();
|
|
|
|
|
|
|
|
if (is_msa_enabled())
|
|
|
|
save_msa(current);
|
|
|
|
else if (is_fpu_owner())
|
|
|
|
_save_fp(current);
|
|
|
|
|
|
|
|
save_dsp(current);
|
|
|
|
|
|
|
|
preempt_enable();
|
|
|
|
|
|
|
|
*dst = *src;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-13 20:14:41 +02:00
|
|
|
/*
|
|
|
|
* Copy architecture-specific thread state
|
|
|
|
*/
|
2009-04-02 16:56:59 -07:00
|
|
|
int copy_thread(unsigned long clone_flags, unsigned long usp,
|
2015-03-13 20:14:41 +02:00
|
|
|
unsigned long kthread_arg, struct task_struct *p)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2006-01-12 01:06:08 -08:00
|
|
|
struct thread_info *ti = task_thread_info(p);
|
2012-10-22 22:51:14 -04:00
|
|
|
struct pt_regs *childregs, *regs = current_pt_regs();
|
2009-07-08 10:07:50 -07:00
|
|
|
unsigned long childksp;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2006-01-12 01:06:08 -08:00
|
|
|
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/* set up new TSS. */
|
|
|
|
childregs = (struct pt_regs *) childksp - 1;
|
2009-07-08 10:07:50 -07:00
|
|
|
/* Put the stack after the struct pt_regs. */
|
|
|
|
childksp = (unsigned long) childregs;
|
2012-10-09 16:27:45 -04:00
|
|
|
p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
|
|
|
|
if (unlikely(p->flags & PF_KTHREAD)) {
|
2015-03-13 20:14:41 +02:00
|
|
|
/* kernel thread */
|
2012-10-09 16:27:45 -04:00
|
|
|
unsigned long status = p->thread.cp0_status;
|
|
|
|
memset(childregs, 0, sizeof(struct pt_regs));
|
|
|
|
ti->addr_limit = KERNEL_DS;
|
|
|
|
p->thread.reg16 = usp; /* fn */
|
2015-03-13 20:14:41 +02:00
|
|
|
p->thread.reg17 = kthread_arg;
|
2012-10-09 16:27:45 -04:00
|
|
|
p->thread.reg29 = childksp;
|
|
|
|
p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
|
|
|
|
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
|
|
|
|
status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
|
|
|
|
((status & (ST0_KUC | ST0_IEC)) << 2);
|
|
|
|
#else
|
|
|
|
status |= ST0_EXL;
|
|
|
|
#endif
|
|
|
|
childregs->cp0_status = status;
|
|
|
|
return 0;
|
|
|
|
}
|
2015-03-13 20:14:41 +02:00
|
|
|
|
|
|
|
/* user thread */
|
2005-04-16 15:20:36 -07:00
|
|
|
*childregs = *regs;
|
2013-01-22 12:59:30 +01:00
|
|
|
childregs->regs[7] = 0; /* Clear error flag */
|
|
|
|
childregs->regs[2] = 0; /* Child gets zero as return value */
|
2012-12-27 11:52:32 -05:00
|
|
|
if (usp)
|
|
|
|
childregs->regs[29] = usp;
|
2012-10-09 16:27:45 -04:00
|
|
|
ti->addr_limit = USER_DS;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
p->thread.reg29 = (unsigned long) childregs;
|
|
|
|
p->thread.reg31 = (unsigned long) ret_from_fork;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* New tasks lose permission to use the fpu. This accelerates context
|
|
|
|
* switching for most programs since they don't use the fpu.
|
|
|
|
*/
|
|
|
|
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
|
|
|
|
|
|
|
|
clear_tsk_thread_flag(p, TIF_USEDFPU);
|
2014-07-11 16:47:05 +01:00
|
|
|
clear_tsk_thread_flag(p, TIF_USEDMSA);
|
|
|
|
clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2006-04-05 09:45:47 +01:00
|
|
|
#ifdef CONFIG_MIPS_MT_FPAFF
|
2008-09-09 15:19:10 +02:00
|
|
|
clear_tsk_thread_flag(p, TIF_FPUBOUND);
|
2006-04-05 09:45:47 +01:00
|
|
|
#endif /* CONFIG_MIPS_MT_FPAFF */
|
|
|
|
|
2005-04-13 17:43:59 +00:00
|
|
|
if (clone_flags & CLONE_SETTLS)
|
|
|
|
ti->tp_value = regs->regs[7];
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-06-12 17:08:54 +00:00
|
|
|
#ifdef CONFIG_CC_STACKPROTECTOR
|
|
|
|
#include <linux/stackprotector.h>
|
|
|
|
unsigned long __stack_chk_guard __read_mostly;
|
|
|
|
EXPORT_SYMBOL(__stack_chk_guard);
|
|
|
|
#endif
|
|
|
|
|
2006-08-18 16:18:09 +02:00
|
|
|
struct mips_frame_info {
|
|
|
|
void *func;
|
|
|
|
unsigned long func_size;
|
|
|
|
int frame_size;
|
|
|
|
int pc_offset;
|
|
|
|
};
|
2005-02-21 10:55:16 +00:00
|
|
|
|
2013-05-12 15:05:34 +00:00
|
|
|
#define J_TARGET(pc,target) \
|
|
|
|
(((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
|
|
|
|
|
2016-11-07 15:07:06 +00:00
|
|
|
static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
|
2006-08-03 09:29:15 +02:00
|
|
|
{
|
2013-03-25 13:18:07 -05:00
|
|
|
#ifdef CONFIG_CPU_MICROMIPS
|
|
|
|
/*
|
|
|
|
* swsp ra,offset
|
|
|
|
* swm16 reglist,offset(sp)
|
|
|
|
* swm32 reglist,offset(sp)
|
|
|
|
* sw32 ra,offset(sp)
|
|
|
|
* jradiussp - NOT SUPPORTED
|
|
|
|
*
|
|
|
|
* microMIPS is way more fun...
|
|
|
|
*/
|
2016-11-07 15:07:03 +00:00
|
|
|
if (mm_insn_16bit(ip->halfword[1])) {
|
2016-11-07 15:07:06 +00:00
|
|
|
switch (ip->mm16_r5_format.opcode) {
|
|
|
|
case mm_swsp16_op:
|
|
|
|
if (ip->mm16_r5_format.rt != 31)
|
|
|
|
return 0;
|
|
|
|
|
2017-08-08 13:22:33 +01:00
|
|
|
*poff = ip->mm16_r5_format.imm;
|
2016-11-07 15:07:06 +00:00
|
|
|
*poff = (*poff << 2) / sizeof(ulong);
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
case mm_pool16c_op:
|
|
|
|
switch (ip->mm16_m_format.func) {
|
|
|
|
case mm_swm16_op:
|
|
|
|
*poff = ip->mm16_m_format.imm;
|
|
|
|
*poff += 1 + ip->mm16_m_format.rlist;
|
|
|
|
*poff = (*poff << 2) / sizeof(ulong);
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
2013-03-25 13:18:07 -05:00
|
|
|
}
|
2016-11-07 15:07:06 +00:00
|
|
|
|
|
|
|
switch (ip->i_format.opcode) {
|
|
|
|
case mm_sw32_op:
|
|
|
|
if (ip->i_format.rs != 29)
|
|
|
|
return 0;
|
|
|
|
if (ip->i_format.rt != 31)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
*poff = ip->i_format.simmediate / sizeof(ulong);
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
case mm_pool32b_op:
|
|
|
|
switch (ip->mm_m_format.func) {
|
|
|
|
case mm_swm32_func:
|
|
|
|
if (ip->mm_m_format.rd < 0x10)
|
|
|
|
return 0;
|
|
|
|
if (ip->mm_m_format.base != 29)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
*poff = ip->mm_m_format.simmediate;
|
|
|
|
*poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
|
|
|
|
*poff /= sizeof(ulong);
|
|
|
|
return 1;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
return 0;
|
2013-03-25 13:18:07 -05:00
|
|
|
}
|
|
|
|
#else
|
2006-08-03 09:29:15 +02:00
|
|
|
/* sw / sd $ra, offset($sp) */
|
2016-11-07 15:07:06 +00:00
|
|
|
if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
|
|
|
|
ip->i_format.rs == 29 && ip->i_format.rt == 31) {
|
|
|
|
*poff = ip->i_format.simmediate / sizeof(ulong);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2013-03-25 13:18:07 -05:00
|
|
|
#endif
|
2006-08-03 09:29:15 +02:00
|
|
|
}
|
|
|
|
|
MIPS: Fix sibling call handling in get_frame_info
Given a function, get_frame_info() analyzes its instructions
to figure out frame size and return address. get_frame_info()
works as follows:
1. analyze up to 128 instructions if the function size is unknown
2. search for 'addiu/daddiu sp,sp,-immed' for frame size
3. search for 'sw ra,offset(sp)' for return address
4. end search when it sees jr/jal/jalr
This leads to an issue when the given function is a sibling
call, example shown as follows.
801ca110 <schedule>:
801ca110: 8f820000 lw v0,0(gp)
801ca114: 8c420000 lw v0,0(v0)
801ca118: 080726f0 j 801c9bc0 <__schedule>
801ca11c: 00000000 nop
801ca120 <io_schedule>:
801ca120: 27bdffe8 addiu sp,sp,-24
801ca124: 3c028022 lui v0,0x8022
801ca128: afbf0014 sw ra,20(sp)
In this case, get_frame_info() cannot properly detect schedule's
frame info, and eventually returns io_schedule's instead.
This patch adds 'j' to the end search condition to workaround
sibling call cases.
Signed-off-by: Tony Wu <tung7970@gmail.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5236/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-05-12 15:04:29 +00:00
|
|
|
static inline int is_jump_ins(union mips_instruction *ip)
|
2006-08-03 09:29:15 +02:00
|
|
|
{
|
2013-03-25 13:18:07 -05:00
|
|
|
#ifdef CONFIG_CPU_MICROMIPS
|
|
|
|
/*
|
|
|
|
* jr16,jrc,jalr16,jalr16
|
|
|
|
* jal
|
|
|
|
* jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
|
|
|
|
* jraddiusp - NOT SUPPORTED
|
|
|
|
*
|
|
|
|
* microMIPS is kind of more fun...
|
|
|
|
*/
|
2016-11-07 15:07:05 +00:00
|
|
|
if (mm_insn_16bit(ip->halfword[1])) {
|
|
|
|
if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
|
|
|
|
(ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-07 15:07:07 +00:00
|
|
|
if (ip->j_format.opcode == mm_j32_op)
|
|
|
|
return 1;
|
2016-11-07 15:07:05 +00:00
|
|
|
if (ip->j_format.opcode == mm_jal32_op)
|
2013-03-25 13:18:07 -05:00
|
|
|
return 1;
|
|
|
|
if (ip->r_format.opcode != mm_pool32a_op ||
|
|
|
|
ip->r_format.func != mm_pool32axf_op)
|
|
|
|
return 0;
|
2014-10-21 14:12:49 +02:00
|
|
|
return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
|
2013-03-25 13:18:07 -05:00
|
|
|
#else
|
MIPS: Fix sibling call handling in get_frame_info
Given a function, get_frame_info() analyzes its instructions
to figure out frame size and return address. get_frame_info()
works as follows:
1. analyze up to 128 instructions if the function size is unknown
2. search for 'addiu/daddiu sp,sp,-immed' for frame size
3. search for 'sw ra,offset(sp)' for return address
4. end search when it sees jr/jal/jalr
This leads to an issue when the given function is a sibling
call, example shown as follows.
801ca110 <schedule>:
801ca110: 8f820000 lw v0,0(gp)
801ca114: 8c420000 lw v0,0(v0)
801ca118: 080726f0 j 801c9bc0 <__schedule>
801ca11c: 00000000 nop
801ca120 <io_schedule>:
801ca120: 27bdffe8 addiu sp,sp,-24
801ca124: 3c028022 lui v0,0x8022
801ca128: afbf0014 sw ra,20(sp)
In this case, get_frame_info() cannot properly detect schedule's
frame info, and eventually returns io_schedule's instead.
This patch adds 'j' to the end search condition to workaround
sibling call cases.
Signed-off-by: Tony Wu <tung7970@gmail.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5236/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-05-12 15:04:29 +00:00
|
|
|
if (ip->j_format.opcode == j_op)
|
|
|
|
return 1;
|
2006-08-03 09:29:15 +02:00
|
|
|
if (ip->j_format.opcode == jal_op)
|
|
|
|
return 1;
|
|
|
|
if (ip->r_format.opcode != spec_op)
|
|
|
|
return 0;
|
|
|
|
return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
|
2013-03-25 13:18:07 -05:00
|
|
|
#endif
|
2006-08-03 09:29:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_sp_move_ins(union mips_instruction *ip)
|
|
|
|
{
|
2013-03-25 13:18:07 -05:00
|
|
|
#ifdef CONFIG_CPU_MICROMIPS
|
|
|
|
/*
|
|
|
|
* addiusp -imm
|
|
|
|
* addius5 sp,-imm
|
|
|
|
* addiu32 sp,sp,-imm
|
|
|
|
* jradiussp - NOT SUPPORTED
|
|
|
|
*
|
|
|
|
* microMIPS is not more fun...
|
|
|
|
*/
|
2016-11-07 15:07:03 +00:00
|
|
|
if (mm_insn_16bit(ip->halfword[1])) {
|
|
|
|
return (ip->mm16_r3_format.opcode == mm_pool16d_op &&
|
|
|
|
ip->mm16_r3_format.simmediate && mm_addiusp_func) ||
|
|
|
|
(ip->mm16_r5_format.opcode == mm_pool16d_op &&
|
|
|
|
ip->mm16_r5_format.rt == 29);
|
2013-03-25 13:18:07 -05:00
|
|
|
}
|
2016-11-07 15:07:03 +00:00
|
|
|
|
2014-10-21 14:12:49 +02:00
|
|
|
return ip->mm_i_format.opcode == mm_addiu32_op &&
|
|
|
|
ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29;
|
2013-03-25 13:18:07 -05:00
|
|
|
#else
|
2006-08-03 09:29:15 +02:00
|
|
|
/* addiu/daddiu sp,sp,-imm */
|
|
|
|
if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
|
|
|
|
return 0;
|
|
|
|
if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
|
|
|
|
return 1;
|
2013-03-25 13:18:07 -05:00
|
|
|
#endif
|
2006-08-03 09:29:15 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-07-29 23:27:20 +09:00
|
|
|
static int get_frame_info(struct mips_frame_info *info)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2016-11-07 15:07:03 +00:00
|
|
|
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
|
2019-01-29 11:56:07 +08:00
|
|
|
union mips_instruction insn, *ip;
|
2016-11-07 15:07:04 +00:00
|
|
|
const unsigned int max_insns = 128;
|
2017-08-08 13:22:30 +01:00
|
|
|
unsigned int last_insn_size = 0;
|
2016-11-07 15:07:04 +00:00
|
|
|
unsigned int i;
|
2006-08-03 09:29:15 +02:00
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
info->pc_offset = -1;
|
2006-02-08 01:48:03 +09:00
|
|
|
info->frame_size = 0;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2016-11-07 15:07:02 +00:00
|
|
|
ip = (void *)msk_isa16_mode((ulong)info->func);
|
2006-08-18 16:18:08 +02:00
|
|
|
if (!ip)
|
|
|
|
goto err;
|
|
|
|
|
2019-01-29 11:56:07 +08:00
|
|
|
for (i = 0; i < max_insns; i++) {
|
2017-08-08 13:22:30 +01:00
|
|
|
ip = (void *)ip + last_insn_size;
|
2019-01-29 11:56:07 +08:00
|
|
|
|
2016-11-07 15:07:03 +00:00
|
|
|
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
|
|
|
|
insn.halfword[0] = 0;
|
|
|
|
insn.halfword[1] = ip->halfword[0];
|
2017-08-08 13:22:30 +01:00
|
|
|
last_insn_size = 2;
|
2016-11-07 15:07:03 +00:00
|
|
|
} else if (is_mmips) {
|
|
|
|
insn.halfword[0] = ip->halfword[1];
|
|
|
|
insn.halfword[1] = ip->halfword[0];
|
2017-08-08 13:22:30 +01:00
|
|
|
last_insn_size = 4;
|
2016-11-07 15:07:03 +00:00
|
|
|
} else {
|
|
|
|
insn.word = ip->word;
|
2017-08-08 13:22:30 +01:00
|
|
|
last_insn_size = 4;
|
2016-11-07 15:07:03 +00:00
|
|
|
}
|
2006-08-03 09:29:15 +02:00
|
|
|
|
2016-11-07 15:07:03 +00:00
|
|
|
if (is_jump_ins(&insn))
|
2006-02-08 01:48:03 +09:00
|
|
|
break;
|
2016-11-07 15:07:03 +00:00
|
|
|
|
2006-08-03 09:29:20 +02:00
|
|
|
if (!info->frame_size) {
|
2016-11-07 15:07:03 +00:00
|
|
|
if (is_sp_move_ins(&insn))
|
2013-03-25 13:18:07 -05:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_CPU_MICROMIPS
|
|
|
|
if (mm_insn_16bit(ip->halfword[0]))
|
|
|
|
{
|
|
|
|
unsigned short tmp;
|
|
|
|
|
|
|
|
if (ip->halfword[0] & mm_addiusp_func)
|
|
|
|
{
|
|
|
|
tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
|
|
|
|
info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
|
|
|
|
} else {
|
|
|
|
tmp = (ip->halfword[0] >> 1);
|
|
|
|
info->frame_size = -(signed short)(tmp & 0xf);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
#endif
|
2006-08-03 09:29:20 +02:00
|
|
|
info->frame_size = - ip->i_format.simmediate;
|
2013-03-25 13:18:07 -05:00
|
|
|
}
|
2006-08-03 09:29:20 +02:00
|
|
|
continue;
|
2006-02-08 01:48:03 +09:00
|
|
|
}
|
2016-11-07 15:07:06 +00:00
|
|
|
if (info->pc_offset == -1 &&
|
|
|
|
is_ra_save_ins(&insn, &info->pc_offset))
|
2006-08-03 09:29:20 +02:00
|
|
|
break;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
2006-07-29 23:27:20 +09:00
|
|
|
if (info->frame_size && info->pc_offset >= 0) /* nested */
|
|
|
|
return 0;
|
|
|
|
if (info->pc_offset < 0) /* leaf */
|
|
|
|
return 1;
|
|
|
|
/* prologue seems boggus... */
|
2006-08-18 16:18:08 +02:00
|
|
|
err:
|
2006-07-29 23:27:20 +09:00
|
|
|
return -1;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
2006-08-18 16:18:09 +02:00
|
|
|
static struct mips_frame_info schedule_mfi __read_mostly;
|
|
|
|
|
2013-05-12 15:05:34 +00:00
|
|
|
#ifdef CONFIG_KALLSYMS
|
|
|
|
static unsigned long get___schedule_addr(void)
|
|
|
|
{
|
|
|
|
return kallsyms_lookup_name("__schedule");
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static unsigned long get___schedule_addr(void)
|
|
|
|
{
|
|
|
|
union mips_instruction *ip = (void *)schedule;
|
|
|
|
int max_insns = 8;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < max_insns; i++, ip++) {
|
|
|
|
if (ip->j_format.opcode == j_op)
|
|
|
|
return J_TARGET(ip, ip->j_format.target);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
static int __init frame_info_init(void)
|
|
|
|
{
|
2006-08-18 16:18:09 +02:00
|
|
|
unsigned long size = 0;
|
2006-02-08 01:48:03 +09:00
|
|
|
#ifdef CONFIG_KALLSYMS
|
2006-08-18 16:18:09 +02:00
|
|
|
unsigned long ofs;
|
2013-05-12 15:05:34 +00:00
|
|
|
#endif
|
|
|
|
unsigned long addr;
|
2006-08-18 16:18:09 +02:00
|
|
|
|
2013-05-12 15:05:34 +00:00
|
|
|
addr = get___schedule_addr();
|
|
|
|
if (!addr)
|
|
|
|
addr = (unsigned long)schedule;
|
|
|
|
|
|
|
|
#ifdef CONFIG_KALLSYMS
|
|
|
|
kallsyms_lookup_size_offset(addr, &size, &ofs);
|
2006-02-08 01:48:03 +09:00
|
|
|
#endif
|
2013-05-12 15:05:34 +00:00
|
|
|
schedule_mfi.func = (void *)addr;
|
2006-08-18 16:18:09 +02:00
|
|
|
schedule_mfi.func_size = size;
|
|
|
|
|
|
|
|
get_frame_info(&schedule_mfi);
|
2006-08-03 09:29:18 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Without schedule() frame info, result given by
|
|
|
|
* thread_saved_pc() and get_wchan() are not reliable.
|
|
|
|
*/
|
2006-08-18 16:18:09 +02:00
|
|
|
if (schedule_mfi.pc_offset < 0)
|
2006-08-03 09:29:18 +02:00
|
|
|
printk("Can't analyze schedule() prologue at %p\n", schedule);
|
2006-02-08 01:48:03 +09:00
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
arch_initcall(frame_info_init);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return saved PC of a blocked thread.
|
|
|
|
*/
|
|
|
|
unsigned long thread_saved_pc(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
struct thread_struct *t = &tsk->thread;
|
|
|
|
|
|
|
|
/* New born processes are a special case */
|
|
|
|
if (t->reg31 == (unsigned long) ret_from_fork)
|
|
|
|
return t->reg31;
|
2006-08-18 16:18:09 +02:00
|
|
|
if (schedule_mfi.pc_offset < 0)
|
2005-04-16 15:20:36 -07:00
|
|
|
return 0;
|
2006-08-18 16:18:09 +02:00
|
|
|
return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-07-29 23:27:20 +09:00
|
|
|
#ifdef CONFIG_KALLSYMS
|
2011-05-13 08:38:04 -04:00
|
|
|
/* generic stack unwinding function */
|
|
|
|
unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
|
|
|
|
unsigned long *sp,
|
|
|
|
unsigned long pc,
|
|
|
|
unsigned long *ra)
|
2006-07-29 23:27:20 +09:00
|
|
|
{
|
2017-03-21 14:52:25 +00:00
|
|
|
unsigned long low, high, irq_stack_high;
|
2006-07-29 23:27:20 +09:00
|
|
|
struct mips_frame_info info;
|
|
|
|
unsigned long size, ofs;
|
2017-03-21 14:52:25 +00:00
|
|
|
struct pt_regs *regs;
|
2006-08-03 09:29:21 +02:00
|
|
|
int leaf;
|
2006-07-29 23:27:20 +09:00
|
|
|
|
|
|
|
if (!stack_page)
|
|
|
|
return 0;
|
|
|
|
|
2006-09-29 18:02:51 +09:00
|
|
|
/*
|
2017-03-21 14:52:25 +00:00
|
|
|
* IRQ stacks start at IRQ_STACK_START
|
|
|
|
* task stacks at THREAD_SIZE - 32
|
2006-09-29 18:02:51 +09:00
|
|
|
*/
|
2017-03-21 14:52:25 +00:00
|
|
|
low = stack_page;
|
|
|
|
if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
|
|
|
|
high = stack_page + IRQ_STACK_START;
|
|
|
|
irq_stack_high = high;
|
|
|
|
} else {
|
|
|
|
high = stack_page + THREAD_SIZE - 32;
|
|
|
|
irq_stack_high = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we reached the top of the interrupt stack, start unwinding
|
|
|
|
* the interrupted task stack.
|
|
|
|
*/
|
|
|
|
if (unlikely(*sp == irq_stack_high)) {
|
|
|
|
unsigned long task_sp = *(unsigned long *)*sp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that the pointer saved in the IRQ stack head points to
|
|
|
|
* something within the stack of the current task
|
|
|
|
*/
|
|
|
|
if (!object_is_on_stack((void *)task_sp))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Follow pointer to tasks kernel stack frame where interrupted
|
|
|
|
* state was saved.
|
|
|
|
*/
|
|
|
|
regs = (struct pt_regs *)task_sp;
|
|
|
|
pc = regs->cp0_epc;
|
|
|
|
if (!user_mode(regs) && __kernel_text_address(pc)) {
|
|
|
|
*sp = regs->regs[29];
|
|
|
|
*ra = regs->regs[31];
|
|
|
|
return pc;
|
2006-09-29 18:02:51 +09:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2006-10-13 13:37:35 +02:00
|
|
|
if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
|
2006-07-29 23:27:20 +09:00
|
|
|
return 0;
|
2006-08-18 16:18:07 +02:00
|
|
|
/*
|
2011-03-30 22:57:33 -03:00
|
|
|
* Return ra if an exception occurred at the first instruction
|
2006-08-18 16:18:07 +02:00
|
|
|
*/
|
2006-09-29 18:02:51 +09:00
|
|
|
if (unlikely(ofs == 0)) {
|
|
|
|
pc = *ra;
|
|
|
|
*ra = 0;
|
|
|
|
return pc;
|
|
|
|
}
|
2006-07-29 23:27:20 +09:00
|
|
|
|
|
|
|
info.func = (void *)(pc - ofs);
|
|
|
|
info.func_size = ofs; /* analyze from start to ofs */
|
2006-08-03 09:29:21 +02:00
|
|
|
leaf = get_frame_info(&info);
|
|
|
|
if (leaf < 0)
|
2006-07-29 23:27:20 +09:00
|
|
|
return 0;
|
2006-08-03 09:29:21 +02:00
|
|
|
|
2017-03-21 14:52:25 +00:00
|
|
|
if (*sp < low || *sp + info.frame_size > high)
|
2006-07-29 23:27:20 +09:00
|
|
|
return 0;
|
|
|
|
|
2006-08-03 09:29:21 +02:00
|
|
|
if (leaf)
|
|
|
|
/*
|
|
|
|
* For some extreme cases, get_frame_info() can
|
|
|
|
* consider wrongly a nested function as a leaf
|
|
|
|
* one. In that cases avoid to return always the
|
|
|
|
* same value.
|
|
|
|
*/
|
2006-09-29 18:02:51 +09:00
|
|
|
pc = pc != *ra ? *ra : 0;
|
2006-08-03 09:29:21 +02:00
|
|
|
else
|
|
|
|
pc = ((unsigned long *)(*sp))[info.pc_offset];
|
|
|
|
|
|
|
|
*sp += info.frame_size;
|
2006-09-29 18:02:51 +09:00
|
|
|
*ra = 0;
|
2006-08-03 09:29:21 +02:00
|
|
|
return __kernel_text_address(pc) ? pc : 0;
|
2006-07-29 23:27:20 +09:00
|
|
|
}
|
2011-05-13 08:38:04 -04:00
|
|
|
EXPORT_SYMBOL(unwind_stack_by_address);
|
|
|
|
|
|
|
|
/* used by show_backtrace() */
|
|
|
|
unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
|
|
|
|
unsigned long pc, unsigned long *ra)
|
|
|
|
{
|
2016-12-19 14:20:57 +00:00
|
|
|
unsigned long stack_page = 0;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
if (on_irq_stack(cpu, *sp)) {
|
|
|
|
stack_page = (unsigned long)irq_stack[cpu];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!stack_page)
|
|
|
|
stack_page = (unsigned long)task_stack_page(task);
|
|
|
|
|
2011-05-13 08:38:04 -04:00
|
|
|
return unwind_stack_by_address(stack_page, sp, pc, ra);
|
|
|
|
}
|
2006-07-29 23:27:20 +09:00
|
|
|
#endif
|
2006-08-18 16:18:09 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* get_wchan - a maintenance nightmare^W^Wpain in the ass ...
|
|
|
|
*/
|
|
|
|
unsigned long get_wchan(struct task_struct *task)
|
|
|
|
{
|
|
|
|
unsigned long pc = 0;
|
|
|
|
#ifdef CONFIG_KALLSYMS
|
|
|
|
unsigned long sp;
|
2006-09-29 18:02:51 +09:00
|
|
|
unsigned long ra = 0;
|
2006-08-18 16:18:09 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (!task || task == current || task->state == TASK_RUNNING)
|
|
|
|
goto out;
|
|
|
|
if (!task_stack_page(task))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
pc = thread_saved_pc(task);
|
|
|
|
|
|
|
|
#ifdef CONFIG_KALLSYMS
|
|
|
|
sp = task->thread.reg29 + schedule_mfi.frame_size;
|
|
|
|
|
|
|
|
while (in_sched_functions(pc))
|
2006-09-29 18:02:51 +09:00
|
|
|
pc = unwind_stack(task, &sp, pc, &ra);
|
2006-08-18 16:18:09 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
out:
|
|
|
|
return pc;
|
|
|
|
}
|
2007-07-19 14:04:21 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't forget that the stack pointer must be aligned on a 8 bytes
|
|
|
|
* boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
|
|
|
|
*/
|
|
|
|
unsigned long arch_align_stack(unsigned long sp)
|
|
|
|
{
|
|
|
|
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
|
|
|
|
sp -= get_random_int() & ~PAGE_MASK;
|
|
|
|
|
|
|
|
return sp & ALMASK;
|
|
|
|
}
|
2014-10-22 06:39:56 +00:00
|
|
|
|
MIPS: Use async IPIs for arch_trigger_cpumask_backtrace()
commit b63e132b6433a41cf311e8bc382d33fd2b73b505 upstream.
The current MIPS implementation of arch_trigger_cpumask_backtrace() is
broken because it attempts to use synchronous IPIs despite the fact that
it may be run with interrupts disabled.
This means that when arch_trigger_cpumask_backtrace() is invoked, for
example by the RCU CPU stall watchdog, we may:
- Deadlock due to use of synchronous IPIs with interrupts disabled,
causing the CPU that's attempting to generate the backtrace output
to hang itself.
- Not succeed in generating the desired output from remote CPUs.
- Produce warnings about this from smp_call_function_many(), for
example:
[42760.526910] INFO: rcu_sched detected stalls on CPUs/tasks:
[42760.535755] 0-...!: (1 GPs behind) idle=ade/140000000000000/0 softirq=526944/526945 fqs=0
[42760.547874] 1-...!: (0 ticks this GP) idle=e4a/140000000000000/0 softirq=547885/547885 fqs=0
[42760.559869] (detected by 2, t=2162 jiffies, g=266689, c=266688, q=33)
[42760.568927] ------------[ cut here ]------------
[42760.576146] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:416 smp_call_function_many+0x88/0x20c
[42760.587839] Modules linked in:
[42760.593152] CPU: 2 PID: 1216 Comm: sh Not tainted 4.15.4-00373-gee058bb4d0c2 #2
[42760.603767] Stack : 8e09bd20 8e09bd20 8e09bd20 fffffff0 00000007 00000006 00000000 8e09bca8
[42760.616937] 95b2b379 95b2b379 807a0080 00000007 81944518 0000018a 00000032 00000000
[42760.630095] 00000000 00000030 80000000 00000000 806eca74 00000009 8017e2b8 000001a0
[42760.643169] 00000000 00000002 00000000 8e09baa4 00000008 808b8008 86d69080 8e09bca0
[42760.656282] 8e09ad50 805e20aa 00000000 00000000 00000000 8017e2b8 00000009 801070ca
[42760.669424] ...
[42760.673919] Call Trace:
[42760.678672] [<27fde568>] show_stack+0x70/0xf0
[42760.685417] [<84751641>] dump_stack+0xaa/0xd0
[42760.692188] [<699d671c>] __warn+0x80/0x92
[42760.698549] [<68915d41>] warn_slowpath_null+0x28/0x36
[42760.705912] [<f7c76c1c>] smp_call_function_many+0x88/0x20c
[42760.713696] [<6bbdfc2a>] arch_trigger_cpumask_backtrace+0x30/0x4a
[42760.722216] [<f845bd33>] rcu_dump_cpu_stacks+0x6a/0x98
[42760.729580] [<796e7629>] rcu_check_callbacks+0x672/0x6ac
[42760.737476] [<059b3b43>] update_process_times+0x18/0x34
[42760.744981] [<6eb94941>] tick_sched_handle.isra.5+0x26/0x38
[42760.752793] [<478d3d70>] tick_sched_timer+0x1c/0x50
[42760.759882] [<e56ea39f>] __hrtimer_run_queues+0xc6/0x226
[42760.767418] [<e88bbcae>] hrtimer_interrupt+0x88/0x19a
[42760.775031] [<6765a19e>] gic_compare_interrupt+0x2e/0x3a
[42760.782761] [<0558bf5f>] handle_percpu_devid_irq+0x78/0x168
[42760.790795] [<90c11ba2>] generic_handle_irq+0x1e/0x2c
[42760.798117] [<1b6d462c>] gic_handle_local_int+0x38/0x86
[42760.805545] [<b2ada1c7>] gic_irq_dispatch+0xa/0x14
[42760.812534] [<90c11ba2>] generic_handle_irq+0x1e/0x2c
[42760.820086] [<c7521934>] do_IRQ+0x16/0x20
[42760.826274] [<9aef3ce6>] plat_irq_dispatch+0x62/0x94
[42760.833458] [<6a94b53c>] except_vec_vi_end+0x70/0x78
[42760.840655] [<22284043>] smp_call_function_many+0x1ba/0x20c
[42760.848501] [<54022b58>] smp_call_function+0x1e/0x2c
[42760.855693] [<ab9fc705>] flush_tlb_mm+0x2a/0x98
[42760.862730] [<0844cdd0>] tlb_flush_mmu+0x1c/0x44
[42760.869628] [<cb259b74>] arch_tlb_finish_mmu+0x26/0x3e
[42760.877021] [<1aeaaf74>] tlb_finish_mmu+0x18/0x66
[42760.883907] [<b3fce717>] exit_mmap+0x76/0xea
[42760.890428] [<c4c8a2f6>] mmput+0x80/0x11a
[42760.896632] [<a41a08f4>] do_exit+0x1f4/0x80c
[42760.903158] [<ee01cef6>] do_group_exit+0x20/0x7e
[42760.909990] [<13fa8d54>] __wake_up_parent+0x0/0x1e
[42760.917045] [<46cf89d0>] smp_call_function_many+0x1a2/0x20c
[42760.924893] [<8c21a93b>] syscall_common+0x14/0x1c
[42760.931765] ---[ end trace 02aa09da9dc52a60 ]---
[42760.938342] ------------[ cut here ]------------
[42760.945311] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:291 smp_call_function_single+0xee/0xf8
...
This patch switches MIPS' arch_trigger_cpumask_backtrace() to use async
IPIs & smp_call_function_single_async() in order to resolve this
problem. We ensure use of the pre-allocated call_single_data_t
structures is serialized by maintaining a cpumask indicating that
they're busy, and refusing to attempt to send an IPI when a CPU's bit is
set in this mask. This should only happen if a CPU hasn't responded to a
previous backtrace IPI - ie. if it's hung - and we print a warning to
the console in this case.
I've marked this for stable branches as far back as v4.9, to which it
applies cleanly. Strictly speaking the faulty MIPS implementation can be
traced further back to commit 856839b76836 ("MIPS: Add
arch_trigger_all_cpu_backtrace() function") in v3.19, but kernel
versions v3.19 through v4.8 will require further work to backport due to
the rework performed in commit 9a01c3ed5cdb ("nmi_backtrace: add more
trigger_*_cpu_backtrace() methods").
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/19597/
Cc: James Hogan <jhogan@kernel.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: linux-mips@linux-mips.org
Cc: stable@vger.kernel.org # v4.9+
Fixes: 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function")
Fixes: 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods")
[ Huacai: backported to 4.4: Restruction since generic NMI solution is unavailable ]
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-06-22 10:55:46 -07:00
|
|
|
static DEFINE_PER_CPU(struct call_single_data, backtrace_csd);
|
|
|
|
static struct cpumask backtrace_csd_busy;
|
|
|
|
|
2014-10-22 06:39:56 +00:00
|
|
|
static void arch_dump_stack(void *info)
|
|
|
|
{
|
|
|
|
struct pt_regs *regs;
|
MIPS: Use async IPIs for arch_trigger_cpumask_backtrace()
commit b63e132b6433a41cf311e8bc382d33fd2b73b505 upstream.
The current MIPS implementation of arch_trigger_cpumask_backtrace() is
broken because it attempts to use synchronous IPIs despite the fact that
it may be run with interrupts disabled.
This means that when arch_trigger_cpumask_backtrace() is invoked, for
example by the RCU CPU stall watchdog, we may:
- Deadlock due to use of synchronous IPIs with interrupts disabled,
causing the CPU that's attempting to generate the backtrace output
to hang itself.
- Not succeed in generating the desired output from remote CPUs.
- Produce warnings about this from smp_call_function_many(), for
example:
[42760.526910] INFO: rcu_sched detected stalls on CPUs/tasks:
[42760.535755] 0-...!: (1 GPs behind) idle=ade/140000000000000/0 softirq=526944/526945 fqs=0
[42760.547874] 1-...!: (0 ticks this GP) idle=e4a/140000000000000/0 softirq=547885/547885 fqs=0
[42760.559869] (detected by 2, t=2162 jiffies, g=266689, c=266688, q=33)
[42760.568927] ------------[ cut here ]------------
[42760.576146] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:416 smp_call_function_many+0x88/0x20c
[42760.587839] Modules linked in:
[42760.593152] CPU: 2 PID: 1216 Comm: sh Not tainted 4.15.4-00373-gee058bb4d0c2 #2
[42760.603767] Stack : 8e09bd20 8e09bd20 8e09bd20 fffffff0 00000007 00000006 00000000 8e09bca8
[42760.616937] 95b2b379 95b2b379 807a0080 00000007 81944518 0000018a 00000032 00000000
[42760.630095] 00000000 00000030 80000000 00000000 806eca74 00000009 8017e2b8 000001a0
[42760.643169] 00000000 00000002 00000000 8e09baa4 00000008 808b8008 86d69080 8e09bca0
[42760.656282] 8e09ad50 805e20aa 00000000 00000000 00000000 8017e2b8 00000009 801070ca
[42760.669424] ...
[42760.673919] Call Trace:
[42760.678672] [<27fde568>] show_stack+0x70/0xf0
[42760.685417] [<84751641>] dump_stack+0xaa/0xd0
[42760.692188] [<699d671c>] __warn+0x80/0x92
[42760.698549] [<68915d41>] warn_slowpath_null+0x28/0x36
[42760.705912] [<f7c76c1c>] smp_call_function_many+0x88/0x20c
[42760.713696] [<6bbdfc2a>] arch_trigger_cpumask_backtrace+0x30/0x4a
[42760.722216] [<f845bd33>] rcu_dump_cpu_stacks+0x6a/0x98
[42760.729580] [<796e7629>] rcu_check_callbacks+0x672/0x6ac
[42760.737476] [<059b3b43>] update_process_times+0x18/0x34
[42760.744981] [<6eb94941>] tick_sched_handle.isra.5+0x26/0x38
[42760.752793] [<478d3d70>] tick_sched_timer+0x1c/0x50
[42760.759882] [<e56ea39f>] __hrtimer_run_queues+0xc6/0x226
[42760.767418] [<e88bbcae>] hrtimer_interrupt+0x88/0x19a
[42760.775031] [<6765a19e>] gic_compare_interrupt+0x2e/0x3a
[42760.782761] [<0558bf5f>] handle_percpu_devid_irq+0x78/0x168
[42760.790795] [<90c11ba2>] generic_handle_irq+0x1e/0x2c
[42760.798117] [<1b6d462c>] gic_handle_local_int+0x38/0x86
[42760.805545] [<b2ada1c7>] gic_irq_dispatch+0xa/0x14
[42760.812534] [<90c11ba2>] generic_handle_irq+0x1e/0x2c
[42760.820086] [<c7521934>] do_IRQ+0x16/0x20
[42760.826274] [<9aef3ce6>] plat_irq_dispatch+0x62/0x94
[42760.833458] [<6a94b53c>] except_vec_vi_end+0x70/0x78
[42760.840655] [<22284043>] smp_call_function_many+0x1ba/0x20c
[42760.848501] [<54022b58>] smp_call_function+0x1e/0x2c
[42760.855693] [<ab9fc705>] flush_tlb_mm+0x2a/0x98
[42760.862730] [<0844cdd0>] tlb_flush_mmu+0x1c/0x44
[42760.869628] [<cb259b74>] arch_tlb_finish_mmu+0x26/0x3e
[42760.877021] [<1aeaaf74>] tlb_finish_mmu+0x18/0x66
[42760.883907] [<b3fce717>] exit_mmap+0x76/0xea
[42760.890428] [<c4c8a2f6>] mmput+0x80/0x11a
[42760.896632] [<a41a08f4>] do_exit+0x1f4/0x80c
[42760.903158] [<ee01cef6>] do_group_exit+0x20/0x7e
[42760.909990] [<13fa8d54>] __wake_up_parent+0x0/0x1e
[42760.917045] [<46cf89d0>] smp_call_function_many+0x1a2/0x20c
[42760.924893] [<8c21a93b>] syscall_common+0x14/0x1c
[42760.931765] ---[ end trace 02aa09da9dc52a60 ]---
[42760.938342] ------------[ cut here ]------------
[42760.945311] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:291 smp_call_function_single+0xee/0xf8
...
This patch switches MIPS' arch_trigger_cpumask_backtrace() to use async
IPIs & smp_call_function_single_async() in order to resolve this
problem. We ensure use of the pre-allocated call_single_data_t
structures is serialized by maintaining a cpumask indicating that
they're busy, and refusing to attempt to send an IPI when a CPU's bit is
set in this mask. This should only happen if a CPU hasn't responded to a
previous backtrace IPI - ie. if it's hung - and we print a warning to
the console in this case.
I've marked this for stable branches as far back as v4.9, to which it
applies cleanly. Strictly speaking the faulty MIPS implementation can be
traced further back to commit 856839b76836 ("MIPS: Add
arch_trigger_all_cpu_backtrace() function") in v3.19, but kernel
versions v3.19 through v4.8 will require further work to backport due to
the rework performed in commit 9a01c3ed5cdb ("nmi_backtrace: add more
trigger_*_cpu_backtrace() methods").
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/19597/
Cc: James Hogan <jhogan@kernel.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: linux-mips@linux-mips.org
Cc: stable@vger.kernel.org # v4.9+
Fixes: 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function")
Fixes: 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods")
[ Huacai: backported to 4.4: Restruction since generic NMI solution is unavailable ]
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-06-22 10:55:46 -07:00
|
|
|
static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
2014-10-22 06:39:56 +00:00
|
|
|
|
MIPS: Use async IPIs for arch_trigger_cpumask_backtrace()
commit b63e132b6433a41cf311e8bc382d33fd2b73b505 upstream.
The current MIPS implementation of arch_trigger_cpumask_backtrace() is
broken because it attempts to use synchronous IPIs despite the fact that
it may be run with interrupts disabled.
This means that when arch_trigger_cpumask_backtrace() is invoked, for
example by the RCU CPU stall watchdog, we may:
- Deadlock due to use of synchronous IPIs with interrupts disabled,
causing the CPU that's attempting to generate the backtrace output
to hang itself.
- Not succeed in generating the desired output from remote CPUs.
- Produce warnings about this from smp_call_function_many(), for
example:
[42760.526910] INFO: rcu_sched detected stalls on CPUs/tasks:
[42760.535755] 0-...!: (1 GPs behind) idle=ade/140000000000000/0 softirq=526944/526945 fqs=0
[42760.547874] 1-...!: (0 ticks this GP) idle=e4a/140000000000000/0 softirq=547885/547885 fqs=0
[42760.559869] (detected by 2, t=2162 jiffies, g=266689, c=266688, q=33)
[42760.568927] ------------[ cut here ]------------
[42760.576146] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:416 smp_call_function_many+0x88/0x20c
[42760.587839] Modules linked in:
[42760.593152] CPU: 2 PID: 1216 Comm: sh Not tainted 4.15.4-00373-gee058bb4d0c2 #2
[42760.603767] Stack : 8e09bd20 8e09bd20 8e09bd20 fffffff0 00000007 00000006 00000000 8e09bca8
[42760.616937] 95b2b379 95b2b379 807a0080 00000007 81944518 0000018a 00000032 00000000
[42760.630095] 00000000 00000030 80000000 00000000 806eca74 00000009 8017e2b8 000001a0
[42760.643169] 00000000 00000002 00000000 8e09baa4 00000008 808b8008 86d69080 8e09bca0
[42760.656282] 8e09ad50 805e20aa 00000000 00000000 00000000 8017e2b8 00000009 801070ca
[42760.669424] ...
[42760.673919] Call Trace:
[42760.678672] [<27fde568>] show_stack+0x70/0xf0
[42760.685417] [<84751641>] dump_stack+0xaa/0xd0
[42760.692188] [<699d671c>] __warn+0x80/0x92
[42760.698549] [<68915d41>] warn_slowpath_null+0x28/0x36
[42760.705912] [<f7c76c1c>] smp_call_function_many+0x88/0x20c
[42760.713696] [<6bbdfc2a>] arch_trigger_cpumask_backtrace+0x30/0x4a
[42760.722216] [<f845bd33>] rcu_dump_cpu_stacks+0x6a/0x98
[42760.729580] [<796e7629>] rcu_check_callbacks+0x672/0x6ac
[42760.737476] [<059b3b43>] update_process_times+0x18/0x34
[42760.744981] [<6eb94941>] tick_sched_handle.isra.5+0x26/0x38
[42760.752793] [<478d3d70>] tick_sched_timer+0x1c/0x50
[42760.759882] [<e56ea39f>] __hrtimer_run_queues+0xc6/0x226
[42760.767418] [<e88bbcae>] hrtimer_interrupt+0x88/0x19a
[42760.775031] [<6765a19e>] gic_compare_interrupt+0x2e/0x3a
[42760.782761] [<0558bf5f>] handle_percpu_devid_irq+0x78/0x168
[42760.790795] [<90c11ba2>] generic_handle_irq+0x1e/0x2c
[42760.798117] [<1b6d462c>] gic_handle_local_int+0x38/0x86
[42760.805545] [<b2ada1c7>] gic_irq_dispatch+0xa/0x14
[42760.812534] [<90c11ba2>] generic_handle_irq+0x1e/0x2c
[42760.820086] [<c7521934>] do_IRQ+0x16/0x20
[42760.826274] [<9aef3ce6>] plat_irq_dispatch+0x62/0x94
[42760.833458] [<6a94b53c>] except_vec_vi_end+0x70/0x78
[42760.840655] [<22284043>] smp_call_function_many+0x1ba/0x20c
[42760.848501] [<54022b58>] smp_call_function+0x1e/0x2c
[42760.855693] [<ab9fc705>] flush_tlb_mm+0x2a/0x98
[42760.862730] [<0844cdd0>] tlb_flush_mmu+0x1c/0x44
[42760.869628] [<cb259b74>] arch_tlb_finish_mmu+0x26/0x3e
[42760.877021] [<1aeaaf74>] tlb_finish_mmu+0x18/0x66
[42760.883907] [<b3fce717>] exit_mmap+0x76/0xea
[42760.890428] [<c4c8a2f6>] mmput+0x80/0x11a
[42760.896632] [<a41a08f4>] do_exit+0x1f4/0x80c
[42760.903158] [<ee01cef6>] do_group_exit+0x20/0x7e
[42760.909990] [<13fa8d54>] __wake_up_parent+0x0/0x1e
[42760.917045] [<46cf89d0>] smp_call_function_many+0x1a2/0x20c
[42760.924893] [<8c21a93b>] syscall_common+0x14/0x1c
[42760.931765] ---[ end trace 02aa09da9dc52a60 ]---
[42760.938342] ------------[ cut here ]------------
[42760.945311] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:291 smp_call_function_single+0xee/0xf8
...
This patch switches MIPS' arch_trigger_cpumask_backtrace() to use async
IPIs & smp_call_function_single_async() in order to resolve this
problem. We ensure use of the pre-allocated call_single_data_t
structures is serialized by maintaining a cpumask indicating that
they're busy, and refusing to attempt to send an IPI when a CPU's bit is
set in this mask. This should only happen if a CPU hasn't responded to a
previous backtrace IPI - ie. if it's hung - and we print a warning to
the console in this case.
I've marked this for stable branches as far back as v4.9, to which it
applies cleanly. Strictly speaking the faulty MIPS implementation can be
traced further back to commit 856839b76836 ("MIPS: Add
arch_trigger_all_cpu_backtrace() function") in v3.19, but kernel
versions v3.19 through v4.8 will require further work to backport due to
the rework performed in commit 9a01c3ed5cdb ("nmi_backtrace: add more
trigger_*_cpu_backtrace() methods").
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/19597/
Cc: James Hogan <jhogan@kernel.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: linux-mips@linux-mips.org
Cc: stable@vger.kernel.org # v4.9+
Fixes: 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function")
Fixes: 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods")
[ Huacai: backported to 4.4: Restruction since generic NMI solution is unavailable ]
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-06-22 10:55:46 -07:00
|
|
|
arch_spin_lock(&lock);
|
2014-10-22 06:39:56 +00:00
|
|
|
regs = get_irq_regs();
|
|
|
|
|
|
|
|
if (regs)
|
|
|
|
show_regs(regs);
|
2018-06-22 10:55:45 -07:00
|
|
|
else
|
|
|
|
dump_stack();
|
MIPS: Use async IPIs for arch_trigger_cpumask_backtrace()
commit b63e132b6433a41cf311e8bc382d33fd2b73b505 upstream.
The current MIPS implementation of arch_trigger_cpumask_backtrace() is
broken because it attempts to use synchronous IPIs despite the fact that
it may be run with interrupts disabled.
This means that when arch_trigger_cpumask_backtrace() is invoked, for
example by the RCU CPU stall watchdog, we may:
- Deadlock due to use of synchronous IPIs with interrupts disabled,
causing the CPU that's attempting to generate the backtrace output
to hang itself.
- Not succeed in generating the desired output from remote CPUs.
- Produce warnings about this from smp_call_function_many(), for
example:
[42760.526910] INFO: rcu_sched detected stalls on CPUs/tasks:
[42760.535755] 0-...!: (1 GPs behind) idle=ade/140000000000000/0 softirq=526944/526945 fqs=0
[42760.547874] 1-...!: (0 ticks this GP) idle=e4a/140000000000000/0 softirq=547885/547885 fqs=0
[42760.559869] (detected by 2, t=2162 jiffies, g=266689, c=266688, q=33)
[42760.568927] ------------[ cut here ]------------
[42760.576146] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:416 smp_call_function_many+0x88/0x20c
[42760.587839] Modules linked in:
[42760.593152] CPU: 2 PID: 1216 Comm: sh Not tainted 4.15.4-00373-gee058bb4d0c2 #2
[42760.603767] Stack : 8e09bd20 8e09bd20 8e09bd20 fffffff0 00000007 00000006 00000000 8e09bca8
[42760.616937] 95b2b379 95b2b379 807a0080 00000007 81944518 0000018a 00000032 00000000
[42760.630095] 00000000 00000030 80000000 00000000 806eca74 00000009 8017e2b8 000001a0
[42760.643169] 00000000 00000002 00000000 8e09baa4 00000008 808b8008 86d69080 8e09bca0
[42760.656282] 8e09ad50 805e20aa 00000000 00000000 00000000 8017e2b8 00000009 801070ca
[42760.669424] ...
[42760.673919] Call Trace:
[42760.678672] [<27fde568>] show_stack+0x70/0xf0
[42760.685417] [<84751641>] dump_stack+0xaa/0xd0
[42760.692188] [<699d671c>] __warn+0x80/0x92
[42760.698549] [<68915d41>] warn_slowpath_null+0x28/0x36
[42760.705912] [<f7c76c1c>] smp_call_function_many+0x88/0x20c
[42760.713696] [<6bbdfc2a>] arch_trigger_cpumask_backtrace+0x30/0x4a
[42760.722216] [<f845bd33>] rcu_dump_cpu_stacks+0x6a/0x98
[42760.729580] [<796e7629>] rcu_check_callbacks+0x672/0x6ac
[42760.737476] [<059b3b43>] update_process_times+0x18/0x34
[42760.744981] [<6eb94941>] tick_sched_handle.isra.5+0x26/0x38
[42760.752793] [<478d3d70>] tick_sched_timer+0x1c/0x50
[42760.759882] [<e56ea39f>] __hrtimer_run_queues+0xc6/0x226
[42760.767418] [<e88bbcae>] hrtimer_interrupt+0x88/0x19a
[42760.775031] [<6765a19e>] gic_compare_interrupt+0x2e/0x3a
[42760.782761] [<0558bf5f>] handle_percpu_devid_irq+0x78/0x168
[42760.790795] [<90c11ba2>] generic_handle_irq+0x1e/0x2c
[42760.798117] [<1b6d462c>] gic_handle_local_int+0x38/0x86
[42760.805545] [<b2ada1c7>] gic_irq_dispatch+0xa/0x14
[42760.812534] [<90c11ba2>] generic_handle_irq+0x1e/0x2c
[42760.820086] [<c7521934>] do_IRQ+0x16/0x20
[42760.826274] [<9aef3ce6>] plat_irq_dispatch+0x62/0x94
[42760.833458] [<6a94b53c>] except_vec_vi_end+0x70/0x78
[42760.840655] [<22284043>] smp_call_function_many+0x1ba/0x20c
[42760.848501] [<54022b58>] smp_call_function+0x1e/0x2c
[42760.855693] [<ab9fc705>] flush_tlb_mm+0x2a/0x98
[42760.862730] [<0844cdd0>] tlb_flush_mmu+0x1c/0x44
[42760.869628] [<cb259b74>] arch_tlb_finish_mmu+0x26/0x3e
[42760.877021] [<1aeaaf74>] tlb_finish_mmu+0x18/0x66
[42760.883907] [<b3fce717>] exit_mmap+0x76/0xea
[42760.890428] [<c4c8a2f6>] mmput+0x80/0x11a
[42760.896632] [<a41a08f4>] do_exit+0x1f4/0x80c
[42760.903158] [<ee01cef6>] do_group_exit+0x20/0x7e
[42760.909990] [<13fa8d54>] __wake_up_parent+0x0/0x1e
[42760.917045] [<46cf89d0>] smp_call_function_many+0x1a2/0x20c
[42760.924893] [<8c21a93b>] syscall_common+0x14/0x1c
[42760.931765] ---[ end trace 02aa09da9dc52a60 ]---
[42760.938342] ------------[ cut here ]------------
[42760.945311] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:291 smp_call_function_single+0xee/0xf8
...
This patch switches MIPS' arch_trigger_cpumask_backtrace() to use async
IPIs & smp_call_function_single_async() in order to resolve this
problem. We ensure use of the pre-allocated call_single_data_t
structures is serialized by maintaining a cpumask indicating that
they're busy, and refusing to attempt to send an IPI when a CPU's bit is
set in this mask. This should only happen if a CPU hasn't responded to a
previous backtrace IPI - ie. if it's hung - and we print a warning to
the console in this case.
I've marked this for stable branches as far back as v4.9, to which it
applies cleanly. Strictly speaking the faulty MIPS implementation can be
traced further back to commit 856839b76836 ("MIPS: Add
arch_trigger_all_cpu_backtrace() function") in v3.19, but kernel
versions v3.19 through v4.8 will require further work to backport due to
the rework performed in commit 9a01c3ed5cdb ("nmi_backtrace: add more
trigger_*_cpu_backtrace() methods").
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/19597/
Cc: James Hogan <jhogan@kernel.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: linux-mips@linux-mips.org
Cc: stable@vger.kernel.org # v4.9+
Fixes: 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function")
Fixes: 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods")
[ Huacai: backported to 4.4: Restruction since generic NMI solution is unavailable ]
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-06-22 10:55:46 -07:00
|
|
|
arch_spin_unlock(&lock);
|
|
|
|
|
|
|
|
cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
|
2014-10-22 06:39:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void arch_trigger_all_cpu_backtrace(bool include_self)
|
|
|
|
{
|
MIPS: Use async IPIs for arch_trigger_cpumask_backtrace()
commit b63e132b6433a41cf311e8bc382d33fd2b73b505 upstream.
The current MIPS implementation of arch_trigger_cpumask_backtrace() is
broken because it attempts to use synchronous IPIs despite the fact that
it may be run with interrupts disabled.
This means that when arch_trigger_cpumask_backtrace() is invoked, for
example by the RCU CPU stall watchdog, we may:
- Deadlock due to use of synchronous IPIs with interrupts disabled,
causing the CPU that's attempting to generate the backtrace output
to hang itself.
- Not succeed in generating the desired output from remote CPUs.
- Produce warnings about this from smp_call_function_many(), for
example:
[42760.526910] INFO: rcu_sched detected stalls on CPUs/tasks:
[42760.535755] 0-...!: (1 GPs behind) idle=ade/140000000000000/0 softirq=526944/526945 fqs=0
[42760.547874] 1-...!: (0 ticks this GP) idle=e4a/140000000000000/0 softirq=547885/547885 fqs=0
[42760.559869] (detected by 2, t=2162 jiffies, g=266689, c=266688, q=33)
[42760.568927] ------------[ cut here ]------------
[42760.576146] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:416 smp_call_function_many+0x88/0x20c
[42760.587839] Modules linked in:
[42760.593152] CPU: 2 PID: 1216 Comm: sh Not tainted 4.15.4-00373-gee058bb4d0c2 #2
[42760.603767] Stack : 8e09bd20 8e09bd20 8e09bd20 fffffff0 00000007 00000006 00000000 8e09bca8
[42760.616937] 95b2b379 95b2b379 807a0080 00000007 81944518 0000018a 00000032 00000000
[42760.630095] 00000000 00000030 80000000 00000000 806eca74 00000009 8017e2b8 000001a0
[42760.643169] 00000000 00000002 00000000 8e09baa4 00000008 808b8008 86d69080 8e09bca0
[42760.656282] 8e09ad50 805e20aa 00000000 00000000 00000000 8017e2b8 00000009 801070ca
[42760.669424] ...
[42760.673919] Call Trace:
[42760.678672] [<27fde568>] show_stack+0x70/0xf0
[42760.685417] [<84751641>] dump_stack+0xaa/0xd0
[42760.692188] [<699d671c>] __warn+0x80/0x92
[42760.698549] [<68915d41>] warn_slowpath_null+0x28/0x36
[42760.705912] [<f7c76c1c>] smp_call_function_many+0x88/0x20c
[42760.713696] [<6bbdfc2a>] arch_trigger_cpumask_backtrace+0x30/0x4a
[42760.722216] [<f845bd33>] rcu_dump_cpu_stacks+0x6a/0x98
[42760.729580] [<796e7629>] rcu_check_callbacks+0x672/0x6ac
[42760.737476] [<059b3b43>] update_process_times+0x18/0x34
[42760.744981] [<6eb94941>] tick_sched_handle.isra.5+0x26/0x38
[42760.752793] [<478d3d70>] tick_sched_timer+0x1c/0x50
[42760.759882] [<e56ea39f>] __hrtimer_run_queues+0xc6/0x226
[42760.767418] [<e88bbcae>] hrtimer_interrupt+0x88/0x19a
[42760.775031] [<6765a19e>] gic_compare_interrupt+0x2e/0x3a
[42760.782761] [<0558bf5f>] handle_percpu_devid_irq+0x78/0x168
[42760.790795] [<90c11ba2>] generic_handle_irq+0x1e/0x2c
[42760.798117] [<1b6d462c>] gic_handle_local_int+0x38/0x86
[42760.805545] [<b2ada1c7>] gic_irq_dispatch+0xa/0x14
[42760.812534] [<90c11ba2>] generic_handle_irq+0x1e/0x2c
[42760.820086] [<c7521934>] do_IRQ+0x16/0x20
[42760.826274] [<9aef3ce6>] plat_irq_dispatch+0x62/0x94
[42760.833458] [<6a94b53c>] except_vec_vi_end+0x70/0x78
[42760.840655] [<22284043>] smp_call_function_many+0x1ba/0x20c
[42760.848501] [<54022b58>] smp_call_function+0x1e/0x2c
[42760.855693] [<ab9fc705>] flush_tlb_mm+0x2a/0x98
[42760.862730] [<0844cdd0>] tlb_flush_mmu+0x1c/0x44
[42760.869628] [<cb259b74>] arch_tlb_finish_mmu+0x26/0x3e
[42760.877021] [<1aeaaf74>] tlb_finish_mmu+0x18/0x66
[42760.883907] [<b3fce717>] exit_mmap+0x76/0xea
[42760.890428] [<c4c8a2f6>] mmput+0x80/0x11a
[42760.896632] [<a41a08f4>] do_exit+0x1f4/0x80c
[42760.903158] [<ee01cef6>] do_group_exit+0x20/0x7e
[42760.909990] [<13fa8d54>] __wake_up_parent+0x0/0x1e
[42760.917045] [<46cf89d0>] smp_call_function_many+0x1a2/0x20c
[42760.924893] [<8c21a93b>] syscall_common+0x14/0x1c
[42760.931765] ---[ end trace 02aa09da9dc52a60 ]---
[42760.938342] ------------[ cut here ]------------
[42760.945311] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:291 smp_call_function_single+0xee/0xf8
...
This patch switches MIPS' arch_trigger_cpumask_backtrace() to use async
IPIs & smp_call_function_single_async() in order to resolve this
problem. We ensure use of the pre-allocated call_single_data_t
structures is serialized by maintaining a cpumask indicating that
they're busy, and refusing to attempt to send an IPI when a CPU's bit is
set in this mask. This should only happen if a CPU hasn't responded to a
previous backtrace IPI - ie. if it's hung - and we print a warning to
the console in this case.
I've marked this for stable branches as far back as v4.9, to which it
applies cleanly. Strictly speaking the faulty MIPS implementation can be
traced further back to commit 856839b76836 ("MIPS: Add
arch_trigger_all_cpu_backtrace() function") in v3.19, but kernel
versions v3.19 through v4.8 will require further work to backport due to
the rework performed in commit 9a01c3ed5cdb ("nmi_backtrace: add more
trigger_*_cpu_backtrace() methods").
Signed-off-by: Paul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/19597/
Cc: James Hogan <jhogan@kernel.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: linux-mips@linux-mips.org
Cc: stable@vger.kernel.org # v4.9+
Fixes: 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function")
Fixes: 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods")
[ Huacai: backported to 4.4: Restruction since generic NMI solution is unavailable ]
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-06-22 10:55:46 -07:00
|
|
|
struct call_single_data *csd;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
for_each_cpu(cpu, cpu_online_mask) {
|
|
|
|
/*
|
|
|
|
* If we previously sent an IPI to the target CPU & it hasn't
|
|
|
|
* cleared its bit in the busy cpumask then it didn't handle
|
|
|
|
* our previous IPI & it's not safe for us to reuse the
|
|
|
|
* call_single_data_t.
|
|
|
|
*/
|
|
|
|
if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
|
|
|
|
pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
|
|
|
|
cpu);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
csd = &per_cpu(backtrace_csd, cpu);
|
|
|
|
csd->func = arch_dump_stack;
|
|
|
|
smp_call_function_single_async(cpu, csd);
|
|
|
|
}
|
2014-10-22 06:39:56 +00:00
|
|
|
}
|
2015-01-08 12:17:37 +00:00
|
|
|
|
|
|
|
int mips_get_process_fp_mode(struct task_struct *task)
|
|
|
|
{
|
|
|
|
int value = 0;
|
|
|
|
|
|
|
|
if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
|
|
|
|
value |= PR_FP_MODE_FR;
|
|
|
|
if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
|
|
|
|
value |= PR_FP_MODE_FRE;
|
|
|
|
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
|
|
|
|
{
|
|
|
|
const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
|
|
|
|
unsigned long switch_count;
|
|
|
|
struct task_struct *t;
|
|
|
|
|
2017-11-27 09:33:03 +00:00
|
|
|
/* If nothing to change, return right away, successfully. */
|
|
|
|
if (value == mips_get_process_fp_mode(task))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Only accept a mode change if 64-bit FP enabled for o32. */
|
|
|
|
if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
/* And only for o32 tasks. */
|
|
|
|
if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2015-01-08 12:17:37 +00:00
|
|
|
/* Check the value is valid */
|
|
|
|
if (value & ~known_bits)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
MIPS: prctl: Disallow FRE without FR with PR_SET_FP_MODE requests
commit 28e4213dd331e944e7fca1954a946829162ed9d4 upstream.
Having PR_FP_MODE_FRE (i.e. Config5.FRE) set without PR_FP_MODE_FR (i.e.
Status.FR) is not supported as the lone purpose of Config5.FRE is to
emulate Status.FR=0 handling on FPU hardware that has Status.FR=1
hardwired[1][2]. Also we do not handle this case elsewhere, and assume
throughout our code that TIF_HYBRID_FPREGS and TIF_32BIT_FPREGS cannot
be set both at once for a task, leading to inconsistent behaviour if
this does happen.
Return unsuccessfully then from prctl(2) PR_SET_FP_MODE calls requesting
PR_FP_MODE_FRE to be set with PR_FP_MODE_FR clear. This corresponds to
modes allowed by `mips_set_personality_fp'.
References:
[1] "MIPS Architecture For Programmers, Vol. III: MIPS32 / microMIPS32
Privileged Resource Architecture", Imagination Technologies,
Document Number: MD00090, Revision 6.02, July 10, 2015, Table 9.69
"Config5 Register Field Descriptions", p. 262
[2] "MIPS Architecture For Programmers, Volume III: MIPS64 / microMIPS64
Privileged Resource Architecture", Imagination Technologies,
Document Number: MD00091, Revision 6.03, December 22, 2015, Table
9.72 "Config5 Register Field Descriptions", p. 288
Fixes: 9791554b45a2 ("MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS")
Signed-off-by: Maciej W. Rozycki <macro@mips.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: <stable@vger.kernel.org> # 4.0+
Patchwork: https://patchwork.linux-mips.org/patch/19327/
Signed-off-by: James Hogan <jhogan@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-05-15 23:04:44 +01:00
|
|
|
/* Setting FRE without FR is not supported. */
|
|
|
|
if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2015-01-08 12:17:37 +00:00
|
|
|
/* Avoid inadvertently triggering emulation */
|
2016-08-31 12:33:23 +02:00
|
|
|
if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
|
|
|
|
!(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
|
2015-01-08 12:17:37 +00:00
|
|
|
return -EOPNOTSUPP;
|
2016-08-31 12:33:23 +02:00
|
|
|
if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
|
2015-01-08 12:17:37 +00:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2015-01-13 13:01:49 +00:00
|
|
|
/* FR = 0 not supported in MIPS R6 */
|
2016-08-31 12:33:23 +02:00
|
|
|
if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
|
2015-01-13 13:01:49 +00:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2016-04-21 12:43:57 +01:00
|
|
|
/* Proceed with the mode switch */
|
|
|
|
preempt_disable();
|
|
|
|
|
2015-01-08 12:17:37 +00:00
|
|
|
/* Save FP & vector context, then disable FPU & MSA */
|
|
|
|
if (task->signal == current->signal)
|
|
|
|
lose_fpu(1);
|
|
|
|
|
|
|
|
/* Prevent any threads from obtaining live FP context */
|
|
|
|
atomic_set(&task->mm->context.fp_mode_switching, 1);
|
|
|
|
smp_mb__after_atomic();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there are multiple online CPUs then wait until all threads whose
|
|
|
|
* FP mode is about to change have been context switched. This approach
|
|
|
|
* allows us to only worry about whether an FP mode switch is in
|
|
|
|
* progress when FP is first used in a tasks time slice. Pretty much all
|
|
|
|
* of the mode switch overhead can thus be confined to cases where mode
|
|
|
|
* switches are actually occuring. That is, to here. However for the
|
|
|
|
* thread performing the mode switch it may take a while...
|
|
|
|
*/
|
|
|
|
if (num_online_cpus() > 1) {
|
|
|
|
spin_lock_irq(&task->sighand->siglock);
|
|
|
|
|
|
|
|
for_each_thread(task, t) {
|
|
|
|
if (t == current)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
switch_count = t->nvcsw + t->nivcsw;
|
|
|
|
|
|
|
|
do {
|
|
|
|
spin_unlock_irq(&task->sighand->siglock);
|
|
|
|
cond_resched();
|
|
|
|
spin_lock_irq(&task->sighand->siglock);
|
|
|
|
} while ((t->nvcsw + t->nivcsw) == switch_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irq(&task->sighand->siglock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There are now no threads of the process with live FP context, so it
|
|
|
|
* is safe to proceed with the FP mode switch.
|
|
|
|
*/
|
|
|
|
for_each_thread(task, t) {
|
|
|
|
/* Update desired FP register width */
|
|
|
|
if (value & PR_FP_MODE_FR) {
|
|
|
|
clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
|
|
|
|
} else {
|
|
|
|
set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
|
|
|
|
clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update desired FP single layout */
|
|
|
|
if (value & PR_FP_MODE_FRE)
|
|
|
|
set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
|
|
|
|
else
|
|
|
|
clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allow threads to use FP again */
|
|
|
|
atomic_set(&task->mm->context.fp_mode_switching, 0);
|
2016-04-21 12:43:57 +01:00
|
|
|
preempt_enable();
|
2015-01-08 12:17:37 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|