/* * Page fault handler for SH with an MMU. * * Copyright (C) 1999 Niibe Yutaka * Copyright (C) 2003 - 2012 Paul Mundt * * Based on linux/arch/i386/mm/fault.c: * Copyright (C) 1995 Linus Torvalds * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details.
*/ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/sched/signal.h> #include <linux/hardirq.h> #include <linux/kprobes.h> #include <linux/perf_event.h> #include <linux/kdebug.h> #include <linux/uaccess.h> #include <asm/io_trapped.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include <asm/traps.h>
/* * This is useful to dump out the page tables associated with * 'addr' in mm 'mm'.
*/ staticvoid show_pte(struct mm_struct *mm, unsignedlong addr)
{
pgd_t *pgd;
if (!pmd_present(*pmd))
set_pmd(pmd, *pmd_k); else { /* * The page tables are fully synchronised so there must * be another reason for the fault. Return NULL here to * signal that we have not taken care of the fault.
*/
BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); return NULL;
}
/* * Handle a fault on the vmalloc or module mapping area
*/ static noinline int vmalloc_fault(unsignedlong address)
{
pgd_t *pgd_k;
pmd_t *pmd_k;
pte_t *pte_k;
/* Make sure we are in vmalloc/module/P3 area: */ if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT)) return -1;
/* * Synchronize this task's top level page-table * with the 'reference' page table. * * Do _not_ use "current" here. We might be inside * an interrupt in the middle of a task switch..
*/
pgd_k = get_TTB();
pmd_k = vmalloc_sync_one(pgd_k, address); if (!pmd_k) return -1;
pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k)) return -1;
return 0;
}
staticvoid
show_fault_oops(struct pt_regs *regs, unsignedlong address)
{ if (!oops_may_print()) return;
static noinline void
no_context(struct pt_regs *regs, unsignedlong error_code, unsignedlong address)
{ /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) return;
if (handle_trapped_io(regs, address)) return;
/* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice.
*/
bust_spinlocks(1);
show_fault_oops(regs, address);
die("Oops", regs, error_code);
}
staticvoid
__bad_area_nosemaphore(struct pt_regs *regs, unsignedlong error_code, unsignedlong address, int si_code)
{ /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { /* * It's possible to have interrupts off here:
*/
local_irq_enable();
static noinline int
mm_fault_error(struct pt_regs *regs, unsignedlong error_code, unsignedlong address, vm_fault_t fault)
{ /* * Pagefault was interrupted by SIGKILL. We have no reason to * continue pagefault.
*/ if (fault_signal_pending(fault, regs)) { if (!user_mode(regs))
no_context(regs, error_code, address); return 1;
}
/* Release mmap_lock first if necessary */ if (!(fault & VM_FAULT_RETRY))
mmap_read_unlock(current->mm);
if (!(fault & VM_FAULT_ERROR)) return 0;
if (fault & VM_FAULT_OOM) { /* Kernel mode? Handle exceptions or die: */ if (!user_mode(regs)) {
no_context(regs, error_code, address); return 1;
}
/* * We ran out of memory, call the OOM killer, and return the * userspace (which will retry the fault, or kill us if we got * oom-killed):
*/
pagefault_out_of_memory();
} else { if (fault & VM_FAULT_SIGBUS)
do_sigbus(regs, error_code, address); elseif (fault & VM_FAULT_SIGSEGV)
bad_area(regs, error_code, address); else
BUG();
}
return 1;
}
staticinlineint access_error(int error_code, struct vm_area_struct *vma)
{ if (error_code & FAULT_CODE_WRITE) { /* write, present and write, not present: */ if (unlikely(!(vma->vm_flags & VM_WRITE))) return 1; return 0;
}
/* ITLB miss on NX page */ if (unlikely((error_code & FAULT_CODE_ITLB) &&
!(vma->vm_flags & VM_EXEC))) return 1;
/* read, not present: */ if (unlikely(!vma_is_accessible(vma))) return 1;
/* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines.
*/
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsignedlong error_code, unsignedlong address)
{ unsignedlong vec; struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma;
vm_fault_t fault; unsignedint flags = FAULT_FLAG_DEFAULT;
tsk = current;
mm = tsk->mm;
vec = lookup_exception_vector();
/* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more.
*/ if (unlikely(fault_in_kernel_space(address))) { if (vmalloc_fault(address) >= 0) return; if (kprobe_page_fault(regs, vec)) return;
/* * If we're in an interrupt, have no user context or are running * with pagefaults disabled then we must not take the fault:
*/ if (unlikely(faulthandler_disabled() || !mm)) {
bad_area_nosemaphore(regs, error_code, address); return;
}
/* * Ok, we have a good vm_area for this memory access, so * we can handle it..
*/ if (unlikely(access_error(error_code, vma))) {
bad_area_access_error(regs, error_code, address); return;
}
set_thread_fault_code(error_code);
if (user_mode(regs))
flags |= FAULT_FLAG_USER; if (error_code & FAULT_CODE_WRITE)
flags |= FAULT_FLAG_WRITE;
/* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault.
*/
fault = handle_mm_fault(vma, address, flags, regs);
if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) if (mm_fault_error(regs, error_code, address, fault)) return;
/* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) return;
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
/* * No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c.
*/ goto retry;
}
mmap_read_unlock(mm);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.1 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.