xref: /linux/arch/arm/mm/fault.c (revision 129d6eb266e0848c9bf45da6e30291688c12b5ad)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/mm/fault.c
4  *
5  *  Copyright (C) 1995  Linus Torvalds
6  *  Modifications for ARM processor (c) 1995-2004 Russell King
7  */
8 #include <linux/extable.h>
9 #include <linux/signal.h>
10 #include <linux/mm.h>
11 #include <linux/hardirq.h>
12 #include <linux/init.h>
13 #include <linux/kprobes.h>
14 #include <linux/uaccess.h>
15 #include <linux/page-flags.h>
16 #include <linux/sched/signal.h>
17 #include <linux/sched/debug.h>
18 #include <linux/highmem.h>
19 #include <linux/perf_event.h>
20 #include <linux/kfence.h>
21 
22 #include <asm/system_misc.h>
23 #include <asm/system_info.h>
24 #include <asm/tlbflush.h>
25 
26 #include "fault.h"
27 
28 #ifdef CONFIG_MMU
29 
copy_from_kernel_nofault_allowed(const void * unsafe_src,size_t size)30 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
31 {
32 	unsigned long addr = (unsigned long)unsafe_src;
33 
34 	return addr >= TASK_SIZE && ULONG_MAX - addr >= size;
35 }
36 
37 /*
38  * This is useful to dump out the page tables associated with
39  * 'addr' in mm 'mm'.
40  */
show_pte(const char * lvl,struct mm_struct * mm,unsigned long addr)41 void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
42 {
43 	pgd_t *pgd;
44 
45 	if (!mm)
46 		mm = &init_mm;
47 
48 	pgd = pgd_offset(mm, addr);
49 	printk("%s[%08lx] *pgd=%08llx", lvl, addr, (long long)pgd_val(*pgd));
50 
51 	do {
52 		p4d_t *p4d;
53 		pud_t *pud;
54 		pmd_t *pmd;
55 		pte_t *pte;
56 
57 		p4d = p4d_offset(pgd, addr);
58 		if (p4d_none(*p4d))
59 			break;
60 
61 		if (p4d_bad(*p4d)) {
62 			pr_cont("(bad)");
63 			break;
64 		}
65 
66 		pud = pud_offset(p4d, addr);
67 		if (PTRS_PER_PUD != 1)
68 			pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
69 
70 		if (pud_none(*pud))
71 			break;
72 
73 		if (pud_bad(*pud)) {
74 			pr_cont("(bad)");
75 			break;
76 		}
77 
78 		pmd = pmd_offset(pud, addr);
79 		if (PTRS_PER_PMD != 1)
80 			pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd));
81 
82 		if (pmd_none(*pmd))
83 			break;
84 
85 		if (pmd_bad(*pmd)) {
86 			pr_cont("(bad)");
87 			break;
88 		}
89 
90 		/* We must not map this if we have highmem enabled */
91 		if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
92 			break;
93 
94 		pte = pte_offset_map(pmd, addr);
95 		if (!pte)
96 			break;
97 
98 		pr_cont(", *pte=%08llx", (long long)pte_val(*pte));
99 #ifndef CONFIG_ARM_LPAE
100 		pr_cont(", *ppte=%08llx",
101 		       (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
102 #endif
103 		pte_unmap(pte);
104 	} while(0);
105 
106 	pr_cont("\n");
107 }
108 #else					/* CONFIG_MMU */
show_pte(const char * lvl,struct mm_struct * mm,unsigned long addr)109 void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
110 { }
111 #endif					/* CONFIG_MMU */
112 
is_write_fault(unsigned int fsr)113 static inline bool is_write_fault(unsigned int fsr)
114 {
115 	return (fsr & FSR_WRITE) && !(fsr & FSR_CM);
116 }
117 
die_kernel_fault(const char * msg,struct mm_struct * mm,unsigned long addr,unsigned int fsr,struct pt_regs * regs)118 static void die_kernel_fault(const char *msg, struct mm_struct *mm,
119 			     unsigned long addr, unsigned int fsr,
120 			     struct pt_regs *regs)
121 {
122 	bust_spinlocks(1);
123 	pr_alert("8<--- cut here ---\n");
124 	pr_alert("Unable to handle kernel %s at virtual address %08lx when %s\n",
125 		 msg, addr, fsr & FSR_LNX_PF ? "execute" : str_write_read(fsr & FSR_WRITE));
126 
127 	show_pte(KERN_ALERT, mm, addr);
128 	die("Oops", regs, fsr);
129 	bust_spinlocks(0);
130 	make_task_dead(SIGKILL);
131 }
132 
133 /*
134  * Oops.  The kernel tried to access some page that wasn't present.
135  */
136 static void
__do_kernel_fault(struct mm_struct * mm,unsigned long addr,unsigned int fsr,struct pt_regs * regs)137 __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
138 		  struct pt_regs *regs)
139 {
140 	const char *msg;
141 	/*
142 	 * Are we prepared to handle this kernel fault?
143 	 */
144 	if (fixup_exception(regs))
145 		return;
146 
147 	/*
148 	 * No handler, we'll have to terminate things with extreme prejudice.
149 	 */
150 	if (addr < PAGE_SIZE) {
151 		msg = "NULL pointer dereference";
152 	} else if (is_permission_fault(fsr) && fsr & FSR_LNX_PF) {
153 		msg = "execution of memory";
154 	} else {
155 		if (is_translation_fault(fsr) &&
156 		    kfence_handle_page_fault(addr, is_write_fault(fsr), regs))
157 			return;
158 
159 		msg = "paging request";
160 	}
161 
162 	die_kernel_fault(msg, mm, addr, fsr, regs);
163 }
164 
165 /*
166  * Something tried to access memory that isn't in our memory map..
167  * User mode accesses just cause a SIGSEGV. Ensure interrupts are enabled
168  * for preempt RT.
169  */
170 static void
__do_user_fault(unsigned long addr,unsigned int fsr,unsigned int sig,int code,struct pt_regs * regs)171 __do_user_fault(unsigned long addr, unsigned int fsr, unsigned int sig,
172 		int code, struct pt_regs *regs)
173 {
174 	struct task_struct *tsk = current;
175 
176 	local_irq_enable();
177 
178 #ifdef CONFIG_DEBUG_USER
179 	if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
180 	    ((user_debug & UDBG_BUS)  && (sig == SIGBUS))) {
181 		pr_err("8<--- cut here ---\n");
182 		pr_err("%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
183 		       tsk->comm, sig, addr, fsr);
184 		show_pte(KERN_ERR, tsk->mm, addr);
185 		show_regs(regs);
186 	}
187 #endif
188 #ifndef CONFIG_KUSER_HELPERS
189 	if ((sig == SIGSEGV) && ((addr & PAGE_MASK) == 0xffff0000))
190 		printk_ratelimited(KERN_DEBUG
191 				   "%s: CONFIG_KUSER_HELPERS disabled at 0x%08lx\n",
192 				   tsk->comm, addr);
193 #endif
194 
195 	tsk->thread.address = addr;
196 	tsk->thread.error_code = fsr;
197 	tsk->thread.trap_no = 14;
198 	force_sig_fault(sig, code, (void __user *)addr);
199 }
200 
do_bad_area(unsigned long addr,unsigned int fsr,struct pt_regs * regs)201 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
202 {
203 	struct task_struct *tsk = current;
204 	struct mm_struct *mm = tsk->active_mm;
205 
206 	/*
207 	 * If we are in kernel mode at this point, we
208 	 * have no context to handle this fault with.
209 	 */
210 	if (user_mode(regs))
211 		__do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
212 	else
213 		__do_kernel_fault(mm, addr, fsr, regs);
214 }
215 
216 #ifdef CONFIG_MMU
217 #ifdef CONFIG_CPU_TTBR0_PAN
ttbr0_usermode_access_allowed(struct pt_regs * regs)218 static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs)
219 {
220 	struct svc_pt_regs *svcregs;
221 
222 	/* If we are in user mode: permission granted */
223 	if (user_mode(regs))
224 		return true;
225 
226 	/* uaccess state saved above pt_regs on SVC exception entry */
227 	svcregs = to_svc_pt_regs(regs);
228 
229 	return !(svcregs->ttbcr & TTBCR_EPD0);
230 }
231 #else
ttbr0_usermode_access_allowed(struct pt_regs * regs)232 static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs)
233 {
234 	return true;
235 }
236 #endif
237 
238 /*
239  * Handle a vmalloc fault, copying the non-leaf page table entries from
240  * init_mm.pgd. Any kernel context can trigger this, so we must not sleep
241  * or enable interrupts. Having two CPUs execute this for the same page is
242  * no problem, we'll just copy the same data twice.
243  *
244  * Returns false on failure.
245  */
vmalloc_fault(unsigned long addr)246 static bool __kprobes __maybe_unused vmalloc_fault(unsigned long addr)
247 {
248 	unsigned int index;
249 	pgd_t *pgd, *pgd_k;
250 	p4d_t *p4d, *p4d_k;
251 	pud_t *pud, *pud_k;
252 	pmd_t *pmd, *pmd_k;
253 
254 	index = pgd_index(addr);
255 
256 	pgd = cpu_get_pgd() + index;
257 	pgd_k = init_mm.pgd + index;
258 
259 	p4d = p4d_offset(pgd, addr);
260 	p4d_k = p4d_offset(pgd_k, addr);
261 
262 	if (p4d_none(*p4d_k))
263 		return false;
264 	if (!p4d_present(*p4d))
265 		set_p4d(p4d, *p4d_k);
266 
267 	pud = pud_offset(p4d, addr);
268 	pud_k = pud_offset(p4d_k, addr);
269 
270 	if (pud_none(*pud_k))
271 		return false;
272 	if (!pud_present(*pud))
273 		set_pud(pud, *pud_k);
274 
275 	pmd = pmd_offset(pud, addr);
276 	pmd_k = pmd_offset(pud_k, addr);
277 
278 #ifdef CONFIG_ARM_LPAE
279 	/*
280 	 * Only one hardware entry per PMD with LPAE.
281 	 */
282 	index = 0;
283 #else
284 	/*
285 	 * On ARM one Linux PGD entry contains two hardware entries (see page
286 	 * tables layout in pgtable.h). We normally guarantee that we always
287 	 * fill both L1 entries. But create_mapping() doesn't follow the rule.
288 	 * It can create inidividual L1 entries, so here we have to call
289 	 * pmd_none() check for the entry really corresponded to address, not
290 	 * for the first of pair.
291 	 */
292 	index = (addr >> SECTION_SHIFT) & 1;
293 #endif
294 	if (pmd_none(pmd_k[index]))
295 		return false;
296 
297 	copy_pmd(pmd, pmd_k);
298 
299 	return true;
300 }
301 
302 static int __kprobes
do_kernel_address_page_fault(struct mm_struct * mm,unsigned long addr,unsigned int fsr,struct pt_regs * regs)303 do_kernel_address_page_fault(struct mm_struct *mm, unsigned long addr,
304 			     unsigned int fsr, struct pt_regs *regs)
305 {
306 	if (user_mode(regs)) {
307 		/*
308 		 * Fault from user mode for a kernel space address. User mode
309 		 * should not be faulting in kernel space, which includes the
310 		 * vector/khelper page. Handle the branch predictor hardening
311 		 * while interrupts are still disabled, then send a SIGSEGV.
312 		 * Note that __do_user_fault() will enable interrupts.
313 		 */
314 		harden_branch_predictor();
315 		__do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
316 	} else {
317 		/*
318 		 * Fault from kernel mode. Enable interrupts if they were
319 		 * enabled in the parent context. Section (upper page table)
320 		 * translation faults are handled via do_translation_fault(),
321 		 * so we will only get here for a non-present kernel space
322 		 * PTE or PTE permission fault. This may happen in exceptional
323 		 * circumstances and need the fixup tables to be walked.
324 		 */
325 		if (interrupts_enabled(regs))
326 			local_irq_enable();
327 
328 		__do_kernel_fault(mm, addr, fsr, regs);
329 	}
330 
331 	return 0;
332 }
333 
334 static int __kprobes
do_page_fault(unsigned long addr,unsigned int fsr,struct pt_regs * regs)335 do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
336 {
337 	struct mm_struct *mm = current->mm;
338 	struct vm_area_struct *vma;
339 	int sig, code;
340 	vm_fault_t fault;
341 	unsigned int flags = FAULT_FLAG_DEFAULT;
342 	vm_flags_t vm_flags = VM_ACCESS_FLAGS;
343 
344 	if (kprobe_page_fault(regs, fsr))
345 		return 0;
346 
347 	/*
348 	 * Handle kernel addresses faults separately, which avoids touching
349 	 * the mmap lock from contexts that are not able to sleep.
350 	 */
351 	if (addr >= TASK_SIZE)
352 		return do_kernel_address_page_fault(mm, addr, fsr, regs);
353 
354 	/* Enable interrupts if they were enabled in the parent context. */
355 	if (interrupts_enabled(regs))
356 		local_irq_enable();
357 
358 	/*
359 	 * If we're in an interrupt or have no user
360 	 * context, we must not take the fault..
361 	 */
362 	if (faulthandler_disabled() || !mm)
363 		goto no_context;
364 
365 	if (user_mode(regs))
366 		flags |= FAULT_FLAG_USER;
367 
368 	if (is_write_fault(fsr)) {
369 		flags |= FAULT_FLAG_WRITE;
370 		vm_flags = VM_WRITE;
371 	}
372 
373 	if (fsr & FSR_LNX_PF) {
374 		vm_flags = VM_EXEC;
375 
376 		if (is_permission_fault(fsr) && !user_mode(regs))
377 			die_kernel_fault("execution of memory",
378 					 mm, addr, fsr, regs);
379 	}
380 
381 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
382 
383 	/*
384 	 * Privileged access aborts with CONFIG_CPU_TTBR0_PAN enabled are
385 	 * routed via the translation fault mechanism. Check whether uaccess
386 	 * is disabled while in kernel mode.
387 	 */
388 	if (!ttbr0_usermode_access_allowed(regs))
389 		goto no_context;
390 
391 	if (!(flags & FAULT_FLAG_USER))
392 		goto lock_mmap;
393 
394 	vma = lock_vma_under_rcu(mm, addr);
395 	if (!vma)
396 		goto lock_mmap;
397 
398 	if (!(vma->vm_flags & vm_flags)) {
399 		vma_end_read(vma);
400 		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
401 		fault = 0;
402 		code = SEGV_ACCERR;
403 		goto bad_area;
404 	}
405 	fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
406 	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
407 		vma_end_read(vma);
408 
409 	if (!(fault & VM_FAULT_RETRY)) {
410 		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
411 		goto done;
412 	}
413 	count_vm_vma_lock_event(VMA_LOCK_RETRY);
414 	if (fault & VM_FAULT_MAJOR)
415 		flags |= FAULT_FLAG_TRIED;
416 
417 	/* Quick path to respond to signals */
418 	if (fault_signal_pending(fault, regs)) {
419 		if (!user_mode(regs))
420 			goto no_context;
421 		return 0;
422 	}
423 lock_mmap:
424 
425 retry:
426 	vma = lock_mm_and_find_vma(mm, addr, regs);
427 	if (unlikely(!vma)) {
428 		fault = 0;
429 		code = SEGV_MAPERR;
430 		goto bad_area;
431 	}
432 
433 	/*
434 	 * ok, we have a good vm_area for this memory access, check the
435 	 * permissions on the VMA allow for the fault which occurred.
436 	 */
437 	if (!(vma->vm_flags & vm_flags)) {
438 		mmap_read_unlock(mm);
439 		fault = 0;
440 		code = SEGV_ACCERR;
441 		goto bad_area;
442 	}
443 
444 	fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
445 
446 	/* If we need to retry but a fatal signal is pending, handle the
447 	 * signal first. We do not need to release the mmap_lock because
448 	 * it would already be released in __lock_page_or_retry in
449 	 * mm/filemap.c. */
450 	if (fault_signal_pending(fault, regs)) {
451 		if (!user_mode(regs))
452 			goto no_context;
453 		return 0;
454 	}
455 
456 	/* The fault is fully completed (including releasing mmap lock) */
457 	if (fault & VM_FAULT_COMPLETED)
458 		return 0;
459 
460 	if (!(fault & VM_FAULT_ERROR)) {
461 		if (fault & VM_FAULT_RETRY) {
462 			flags |= FAULT_FLAG_TRIED;
463 			goto retry;
464 		}
465 	}
466 
467 	mmap_read_unlock(mm);
468 done:
469 
470 	/* Handle the "normal" case first */
471 	if (likely(!(fault & VM_FAULT_ERROR)))
472 		return 0;
473 
474 	code = SEGV_MAPERR;
475 bad_area:
476 	/*
477 	 * If we are in kernel mode at this point, we
478 	 * have no context to handle this fault with.
479 	 */
480 	if (!user_mode(regs))
481 		goto no_context;
482 
483 	if (fault & VM_FAULT_OOM) {
484 		/*
485 		 * We ran out of memory, call the OOM killer, and return to
486 		 * userspace (which will retry the fault, or kill us if we
487 		 * got oom-killed)
488 		 */
489 		pagefault_out_of_memory();
490 		return 0;
491 	}
492 
493 	if (fault & VM_FAULT_SIGBUS) {
494 		/*
495 		 * We had some memory, but were unable to
496 		 * successfully fix up this page fault.
497 		 */
498 		sig = SIGBUS;
499 		code = BUS_ADRERR;
500 	} else {
501 		/*
502 		 * Something tried to access memory that
503 		 * isn't in our memory map..
504 		 */
505 		sig = SIGSEGV;
506 	}
507 
508 	__do_user_fault(addr, fsr, sig, code, regs);
509 	return 0;
510 
511 no_context:
512 	__do_kernel_fault(mm, addr, fsr, regs);
513 	return 0;
514 }
515 #else					/* CONFIG_MMU */
516 static int
do_page_fault(unsigned long addr,unsigned int fsr,struct pt_regs * regs)517 do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
518 {
519 	return 0;
520 }
521 #endif					/* CONFIG_MMU */
522 
523 /*
524  * First Level Translation Fault Handler
525  *
526  * We enter here because the first level page table doesn't contain
527  * a valid entry for the address.
528  *
529  * If this is a user address (addr < TASK_SIZE), we handle this as a
530  * normal page fault. This leaves the remainder of the function to handle
531  * kernel address translation faults.
532  *
533  * Since user mode is not permitted to access kernel addresses, pass these
534  * directly to do_kernel_address_page_fault() to handle.
535  *
536  * Otherwise, we're probably faulting in the vmalloc() area, so try to fix
537  * that up via vmalloc_fault().
538  *
539  * If vmalloc_fault() fails, that means the non-leaf page tables did not
540  * contain an entry for this address, so handle this via
541  * do_kernel_address_page_fault().
542  */
543 #ifdef CONFIG_MMU
544 static int __kprobes
do_translation_fault(unsigned long addr,unsigned int fsr,struct pt_regs * regs)545 do_translation_fault(unsigned long addr, unsigned int fsr,
546 		     struct pt_regs *regs)
547 {
548 	if (addr < TASK_SIZE)
549 		return do_page_fault(addr, fsr, regs);
550 
551 	if (!user_mode(regs) && vmalloc_fault(addr))
552 		return 0;
553 
554 	do_kernel_address_page_fault(current->mm, addr, fsr, regs);
555 
556 	return 0;
557 }
558 #else					/* CONFIG_MMU */
559 static int
do_translation_fault(unsigned long addr,unsigned int fsr,struct pt_regs * regs)560 do_translation_fault(unsigned long addr, unsigned int fsr,
561 		     struct pt_regs *regs)
562 {
563 	return 0;
564 }
565 #endif					/* CONFIG_MMU */
566 
567 /*
568  * Some section permission faults need to be handled gracefully.
569  * They can happen due to a __{get,put}_user during an oops.
570  */
571 #ifndef CONFIG_ARM_LPAE
572 static int
do_sect_fault(unsigned long addr,unsigned int fsr,struct pt_regs * regs)573 do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
574 {
575 	/*
576 	 * If this is a kernel address, but from user mode, then userspace
577 	 * is trying bad stuff. Invoke the branch predictor handling.
578 	 * Interrupts are disabled here.
579 	 */
580 	if (addr >= TASK_SIZE && user_mode(regs))
581 		harden_branch_predictor();
582 
583 	do_bad_area(addr, fsr, regs);
584 
585 	return 0;
586 }
587 #endif /* CONFIG_ARM_LPAE */
588 
589 /*
590  * This abort handler always returns "fault".
591  */
592 static int
do_bad(unsigned long addr,unsigned int fsr,struct pt_regs * regs)593 do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
594 {
595 	return 1;
596 }
597 
598 struct fsr_info {
599 	int	(*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
600 	int	sig;
601 	int	code;
602 	const char *name;
603 };
604 
605 /* FSR definition */
606 #ifdef CONFIG_ARM_LPAE
607 #include "fsr-3level.c"
608 #else
609 #include "fsr-2level.c"
610 #endif
611 
612 void __init
hook_fault_code(int nr,int (* fn)(unsigned long,unsigned int,struct pt_regs *),int sig,int code,const char * name)613 hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
614 		int sig, int code, const char *name)
615 {
616 	if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
617 		BUG();
618 
619 	fsr_info[nr].fn   = fn;
620 	fsr_info[nr].sig  = sig;
621 	fsr_info[nr].code = code;
622 	fsr_info[nr].name = name;
623 }
624 
625 /*
626  * Dispatch a data abort to the relevant handler.
627  */
628 asmlinkage void
do_DataAbort(unsigned long addr,unsigned int fsr,struct pt_regs * regs)629 do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
630 {
631 	const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
632 
633 	if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
634 		return;
635 
636 	pr_alert("8<--- cut here ---\n");
637 	pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
638 		inf->name, fsr, addr);
639 	show_pte(KERN_ALERT, current->mm, addr);
640 
641 	arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
642 		       fsr, 0);
643 }
644 
645 void __init
hook_ifault_code(int nr,int (* fn)(unsigned long,unsigned int,struct pt_regs *),int sig,int code,const char * name)646 hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
647 		 int sig, int code, const char *name)
648 {
649 	if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
650 		BUG();
651 
652 	ifsr_info[nr].fn   = fn;
653 	ifsr_info[nr].sig  = sig;
654 	ifsr_info[nr].code = code;
655 	ifsr_info[nr].name = name;
656 }
657 
658 asmlinkage void
do_PrefetchAbort(unsigned long addr,unsigned int ifsr,struct pt_regs * regs)659 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
660 {
661 	const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
662 
663 	if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
664 		return;
665 
666 	pr_alert("8<--- cut here ---\n");
667 	pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
668 		inf->name, ifsr, addr);
669 
670 	arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
671 		       ifsr, 0);
672 }
673 
674 /*
675  * Abort handler to be used only during first unmasking of asynchronous aborts
676  * on the boot CPU. This makes sure that the machine will not die if the
677  * firmware/bootloader left an imprecise abort pending for us to trip over.
678  */
early_abort_handler(unsigned long addr,unsigned int fsr,struct pt_regs * regs)679 static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
680 				      struct pt_regs *regs)
681 {
682 	pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during "
683 		"first unmask, this is most likely caused by a "
684 		"firmware/bootloader bug.\n", fsr);
685 
686 	return 0;
687 }
688 
early_abt_enable(void)689 void __init early_abt_enable(void)
690 {
691 	fsr_info[FSR_FS_AEA].fn = early_abort_handler;
692 	local_abt_enable();
693 	fsr_info[FSR_FS_AEA].fn = do_bad;
694 }
695 
696 #ifndef CONFIG_ARM_LPAE
exceptions_init(void)697 static int __init exceptions_init(void)
698 {
699 	if (cpu_architecture() >= CPU_ARCH_ARMv6) {
700 		hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
701 				"I-cache maintenance fault");
702 	}
703 
704 	if (cpu_architecture() >= CPU_ARCH_ARMv7) {
705 		/*
706 		 * TODO: Access flag faults introduced in ARMv6K.
707 		 * Runtime check for 'K' extension is needed
708 		 */
709 		hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
710 				"section access flag fault");
711 		hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
712 				"section access flag fault");
713 	}
714 
715 	return 0;
716 }
717 
718 arch_initcall(exceptions_init);
719 #endif
720