sh64: Port OOM changes to do_page_fault
Reflect the sh32 OOM changes for the sh64 page fault handler, too. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
11fd982400
commit
a1e2030122
1 changed files with 30 additions and 10 deletions
|
@ -3,7 +3,7 @@
|
||||||
*
|
*
|
||||||
* Copyright (C) 2000, 2001 Paolo Alberelli
|
* Copyright (C) 2000, 2001 Paolo Alberelli
|
||||||
* Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
|
* Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
|
||||||
* Copyright (C) 2003 - 2009 Paul Mundt
|
* Copyright (C) 2003 - 2012 Paul Mundt
|
||||||
*
|
*
|
||||||
* This file is subject to the terms and conditions of the GNU General Public
|
* This file is subject to the terms and conditions of the GNU General Public
|
||||||
* License. See the file "COPYING" in the main directory of this archive
|
* License. See the file "COPYING" in the main directory of this archive
|
||||||
|
@ -95,6 +95,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
struct vm_area_struct * vma;
|
struct vm_area_struct * vma;
|
||||||
const struct exception_table_entry *fixup;
|
const struct exception_table_entry *fixup;
|
||||||
|
unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
|
||||||
|
(writeaccess ? FAULT_FLAG_WRITE : 0));
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
int fault;
|
int fault;
|
||||||
|
|
||||||
|
@ -124,6 +126,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
|
||||||
if (in_atomic() || !mm)
|
if (in_atomic() || !mm)
|
||||||
goto no_context;
|
goto no_context;
|
||||||
|
|
||||||
|
retry:
|
||||||
/* TLB misses upon some cache flushes get done under cli() */
|
/* TLB misses upon some cache flushes get done under cli() */
|
||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
|
|
||||||
|
@ -188,7 +191,11 @@ good_area:
|
||||||
* make sure we exit gracefully rather than endlessly redo
|
* make sure we exit gracefully rather than endlessly redo
|
||||||
* the fault.
|
* the fault.
|
||||||
*/
|
*/
|
||||||
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
|
fault = handle_mm_fault(mm, vma, address, flags);
|
||||||
|
|
||||||
|
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||||
|
return;
|
||||||
|
|
||||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||||
if (fault & VM_FAULT_OOM)
|
if (fault & VM_FAULT_OOM)
|
||||||
goto out_of_memory;
|
goto out_of_memory;
|
||||||
|
@ -197,14 +204,27 @@ good_area:
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fault & VM_FAULT_MAJOR) {
|
if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
||||||
tsk->maj_flt++;
|
if (fault & VM_FAULT_MAJOR) {
|
||||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
|
tsk->maj_flt++;
|
||||||
regs, address);
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
|
||||||
} else {
|
regs, address);
|
||||||
tsk->min_flt++;
|
} else {
|
||||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
|
tsk->min_flt++;
|
||||||
regs, address);
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
|
||||||
|
regs, address);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fault & VM_FAULT_RETRY) {
|
||||||
|
flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* No need to up_read(&mm->mmap_sem) as we would
|
||||||
|
* have already released it in __lock_page_or_retry
|
||||||
|
* in mm/filemap.c.
|
||||||
|
*/
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If we get here, the page fault has been handled. Do the TLB refill
|
/* If we get here, the page fault has been handled. Do the TLB refill
|
||||||
|
|
Loading…
Add table
Reference in a new issue