if (!mm)
mm = &init_mm;
- printk(KERN_ALERT "pgd = %p\n", mm->pgd);
+ pr_alert("pgd = %p\n", mm->pgd);
pgd = pgd_offset(mm, addr);
- printk(KERN_ALERT "[%08lx] *pgd=%08llx",
+ pr_alert("[%08lx] *pgd=%08llx",
addr, (long long)pgd_val(*pgd));
do {
break;
if (pgd_bad(*pgd)) {
- printk("(bad)");
+ pr_cont("(bad)");
break;
}
pud = pud_offset(pgd, addr);
if (PTRS_PER_PUD != 1)
- printk(", *pud=%08llx", (long long)pud_val(*pud));
+ pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
if (pud_none(*pud))
break;
if (pud_bad(*pud)) {
- printk("(bad)");
+ pr_cont("(bad)");
break;
}
pmd = pmd_offset(pud, addr);
if (PTRS_PER_PMD != 1)
- printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
+ pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd));
if (pmd_none(*pmd))
break;
if (pmd_bad(*pmd)) {
- printk("(bad)");
+ pr_cont("(bad)");
break;
}
break;
pte = pte_offset_map(pmd, addr);
- printk(", *pte=%08llx", (long long)pte_val(*pte));
+ pr_cont(", *pte=%08llx", (long long)pte_val(*pte));
#ifndef CONFIG_ARM_LPAE
- printk(", *ppte=%08llx",
+ pr_cont(", *ppte=%08llx",
(long long)pte_val(pte[PTE_HWTABLE_PTRS]));
#endif
pte_unmap(pte);
} while(0);
- printk("\n");
+ pr_cont("\n");
}
#else /* CONFIG_MMU */
void show_pte(struct mm_struct *mm, unsigned long addr)
* No handler, we'll have to terminate things with extreme prejudice.
*/
bust_spinlocks(1);
- printk(KERN_ALERT
- "Unable to handle kernel %s at virtual address %08lx\n",
- (addr < PAGE_SIZE) ? "NULL pointer dereference" :
- "paging request", addr);
+ pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
+ (addr < PAGE_SIZE) ? "NULL pointer dereference" :
+ "paging request", addr);
show_pte(mm, addr);
die("Oops", regs, fsr);
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
- int write = fsr & FSR_WRITE;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
- (write ? FAULT_FLAG_WRITE : 0);
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (notify_page_fault(regs, fsr))
return 0;
if (in_atomic() || !mm)
goto no_context;
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+ if (fsr & FSR_WRITE)
+ flags |= FAULT_FLAG_WRITE;
+
/*
* As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code,
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
return 0;
+ /*
+ * If we are in kernel mode at this point, we
+ * have no context to handle this fault with.
+ */
+ if (!user_mode(regs))
+ goto no_context;
+
if (fault & VM_FAULT_OOM) {
/*
* We ran out of memory, call the OOM killer, and return to
return 0;
}
- /*
- * If we are in kernel mode at this point, we
- * have no context to handle this fault with.
- */
- if (!user_mode(regs))
- goto no_context;
-
if (fault & VM_FAULT_SIGBUS) {
/*
* We had some memory, but were unable to
if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
return;
- printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
+ pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
inf->name, fsr, addr);
info.si_signo = inf->sig;
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
return;
- printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
+ pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
inf->name, ifsr, addr);
info.si_signo = inf->sig;