xref: /freebsd/sys/arm/arm/trap-v6.c (revision d29771a722acf17b3d2693e237c0da7ce866997f)
1 /*-
2  * Copyright 2014 Olivier Houchard <cognet@FreeBSD.org>
3  * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
4  * Copyright 2014 Michal Meloun <meloun@miracle.cz>
5  * Copyright 2014 Andrew Turner <andrew@FreeBSD.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include "opt_ktrace.h"
31 
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/systm.h>
35 #include <sys/proc.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/signalvar.h>
40 #include <sys/ktr.h>
41 #include <sys/vmmeter.h>
42 #ifdef KTRACE
43 #include <sys/uio.h>
44 #include <sys/ktrace.h>
45 #endif
46 
47 #include <vm/vm.h>
48 #include <vm/pmap.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_param.h>
53 
54 #include <machine/cpu.h>
55 #include <machine/frame.h>
56 #include <machine/machdep.h>
57 #include <machine/pcb.h>
58 
59 #ifdef KDB
60 #include <sys/kdb.h>
61 #include <machine/db_machdep.h>
62 #endif
63 
64 #ifdef KDTRACE_HOOKS
65 #include <sys/dtrace_bsd.h>
66 #endif
67 
68 extern char cachebailout[];
69 
70 struct ksig {
71 	int sig;
72 	u_long code;
73 	vm_offset_t	addr;
74 };
75 
76 typedef int abort_func_t(struct trapframe *, u_int, u_int, u_int, u_int,
77     struct thread *, struct ksig *);
78 
79 static abort_func_t abort_fatal;
80 static abort_func_t abort_align;
81 static abort_func_t abort_icache;
82 
83 struct abort {
84 	abort_func_t	*func;
85 	const char	*desc;
86 };
87 
88 /*
89  * How are the aborts handled?
90  *
91  * Undefined Code:
92  *  - Always fatal as we do not know what does it mean.
93  * Imprecise External Abort:
94  *  - Always fatal, but can be handled somehow in the future.
95  *    Now, due to PCIe buggy hardware, ignored.
96  * Precise External Abort:
97  *  - Always fatal, but who knows in the future???
98  * Debug Event:
99  *  - Special handling.
100  * External Translation Abort (L1 & L2)
101  *  - Always fatal as something is screwed up in page tables or hardware.
102  * Domain Fault (L1 & L2):
103  *  - Always fatal as we do not play game with domains.
104  * Alignment Fault:
105  *  - Everything should be aligned in kernel with exception of user to kernel
106  *    and vice versa data copying, so if pcb_onfault is not set, it's fatal.
107  *    We generate signal in case of abort from user mode.
108  * Instruction cache maintenance:
109  *  - According to manual, this is translation fault during cache maintenance
110  *    operation. So, it could be really complex in SMP case and fuzzy too
111  *    for cache operations working on virtual addresses. For now, we will
112  *    consider this abort as fatal. In fact, no cache maintenance on
113  *    not mapped virtual addresses should be called. As cache maintenance
114  *    operation (except DMB, DSB, and Flush Prefetch Buffer) are privileged,
115  *    the abort is fatal for user mode as well for now. (This is good place to
116  *    note that cache maintenance on virtual address fill TLB.)
117  * Acces Bit (L1 & L2):
118  *  - Fast hardware emulation for kernel and user mode.
119  * Translation Fault (L1 & L2):
120  *  - Standard fault mechanism is held including vm_fault().
121  * Permission Fault (L1 & L2):
122  *  - Fast hardware emulation of modify bits and in other cases, standard
123  *    fault mechanism is held including vm_fault().
124  */
125 
126 static const struct abort aborts[] = {
127 	{abort_fatal,	"Undefined Code (0x000)"},
128 	{abort_align,	"Alignment Fault"},
129 	{abort_fatal,	"Debug Event"},
130 	{NULL,		"Access Bit (L1)"},
131 	{NULL,		"Instruction cache maintenance"},
132 	{NULL,		"Translation Fault (L1)"},
133 	{NULL,		"Access Bit (L2)"},
134 	{NULL,		"Translation Fault (L2)"},
135 
136 	{abort_fatal,	"External Abort"},
137 	{abort_fatal,	"Domain Fault (L1)"},
138 	{abort_fatal,	"Undefined Code (0x00A)"},
139 	{abort_fatal,	"Domain Fault (L2)"},
140 	{abort_fatal,	"External Translation Abort (L1)"},
141 	{NULL,		"Permission Fault (L1)"},
142 	{abort_fatal,	"External Translation Abort (L2)"},
143 	{NULL,		"Permission Fault (L2)"},
144 
145 	{abort_fatal,	"TLB Conflict Abort"},
146 	{abort_fatal,	"Undefined Code (0x401)"},
147 	{abort_fatal,	"Undefined Code (0x402)"},
148 	{abort_fatal,	"Undefined Code (0x403)"},
149 	{abort_fatal,	"Undefined Code (0x404)"},
150 	{abort_fatal,	"Undefined Code (0x405)"},
151 	{abort_fatal,	"Asynchronous External Abort"},
152 	{abort_fatal,	"Undefined Code (0x407)"},
153 
154 	{abort_fatal,	"Asynchronous Parity Error on Memory Access"},
155 	{abort_fatal,	"Parity Error on Memory Access"},
156 	{abort_fatal,	"Undefined Code (0x40A)"},
157 	{abort_fatal,	"Undefined Code (0x40B)"},
158 	{abort_fatal,	"Parity Error on Translation (L1)"},
159 	{abort_fatal,	"Undefined Code (0x40D)"},
160 	{abort_fatal,	"Parity Error on Translation (L2)"},
161 	{abort_fatal,	"Undefined Code (0x40F)"}
162 };
163 
164 static __inline void
call_trapsignal(struct thread * td,int sig,int code,vm_offset_t addr,int trapno)165 call_trapsignal(struct thread *td, int sig, int code, vm_offset_t addr,
166     int trapno)
167 {
168 	ksiginfo_t ksi;
169 
170 	CTR4(KTR_TRAP, "%s: addr: %#x, sig: %d, code: %d",
171 	   __func__, addr, sig, code);
172 
173 	/*
174 	 * TODO: some info would be nice to know
175 	 * if we are serving data or prefetch abort.
176 	 */
177 
178 	ksiginfo_init_trap(&ksi);
179 	ksi.ksi_signo = sig;
180 	ksi.ksi_code = code;
181 	ksi.ksi_addr = (void *)addr;
182 	ksi.ksi_trapno = trapno;
183 	trapsignal(td, &ksi);
184 }
185 
186 /*
187  * abort_imprecise() handles the following abort:
188  *
189  *  FAULT_EA_IMPREC - Imprecise External Abort
190  *
191  * The imprecise means that we don't know where the abort happened,
192  * thus FAR is undefined. The abort should not never fire, but hot
193  * plugging or accidental hardware failure can be the cause of it.
194  * If the abort happens, it can even be on different (thread) context.
195  * Without any additional support, the abort is fatal, as we do not
196  * know what really happened.
197  *
198  * QQQ: Some additional functionality, like pcb_onfault but global,
199  *      can be implemented. Imprecise handlers could be registered
200  *      which tell us if the abort is caused by something they know
201  *      about. They should return one of three codes like:
202  *		FAULT_IS_MINE,
203  *		FAULT_CAN_BE_MINE,
204  *		FAULT_IS_NOT_MINE.
205  *      The handlers should be called until some of them returns
206  *      FAULT_IS_MINE value or all was called. If all handlers return
207  *	FAULT_IS_NOT_MINE value, then the abort is fatal.
208  */
209 static __inline void
abort_imprecise(struct trapframe * tf,u_int fsr,u_int prefetch,bool usermode)210 abort_imprecise(struct trapframe *tf, u_int fsr, u_int prefetch, bool usermode)
211 {
212 
213 	/*
214 	 * XXX - We can got imprecise abort as result of access
215 	 * to not-present PCI/PCIe configuration space.
216 	 */
217 #if 0
218 	goto out;
219 #endif
220 	abort_fatal(tf, FAULT_EA_IMPREC, fsr, 0, prefetch, curthread, NULL);
221 
222 	/*
223 	 * Returning from this function means that we ignore
224 	 * the abort for good reason. Note that imprecise abort
225 	 * could fire any time even in user mode.
226 	 */
227 
228 #if 0
229 out:
230 	if (usermode)
231 		userret(curthread, tf);
232 #endif
233 }
234 
235 /*
236  * abort_debug() handles the following abort:
237  *
238  *  FAULT_DEBUG - Debug Event
239  *
240  */
241 static __inline void
abort_debug(struct trapframe * tf,u_int fsr,u_int prefetch,bool usermode,u_int far)242 abort_debug(struct trapframe *tf, u_int fsr, u_int prefetch, bool usermode,
243     u_int far)
244 {
245 
246 	if (usermode) {
247 		struct thread *td;
248 
249 		td = curthread;
250 		call_trapsignal(td, SIGTRAP, TRAP_BRKPT, far, FAULT_DEBUG);
251 		userret(td, tf);
252 	} else {
253 #ifdef KDB
254 		kdb_trap((prefetch) ? T_BREAKPOINT : T_WATCHPOINT, 0, tf);
255 #else
256 		printf("No debugger in kernel.\n");
257 #endif
258 	}
259 }
260 
261 /*
262  * Abort handler.
263  *
264  * FAR, FSR, and everything what can be lost after enabling
265  * interrupts must be grabbed before the interrupts will be
266  * enabled. Note that when interrupts will be enabled, we
267  * could even migrate to another CPU ...
268  *
269  * TODO: move quick cases to ASM
270  */
271 void
abort_handler(struct trapframe * tf,int prefetch)272 abort_handler(struct trapframe *tf, int prefetch)
273 {
274 	struct thread *td;
275 	vm_offset_t far, va;
276 	int idx, rv;
277 	uint32_t fsr;
278 	struct ksig ksig;
279 	struct proc *p;
280 	struct pcb *pcb;
281 	struct vm_map *map;
282 	struct vmspace *vm;
283 	vm_prot_t ftype;
284 	bool usermode;
285 	int bp_harden, ucode;
286 #ifdef INVARIANTS
287 	void *onfault;
288 #endif
289 
290 	VM_CNT_INC(v_trap);
291 	td = curthread;
292 
293 	fsr = (prefetch) ? cp15_ifsr_get(): cp15_dfsr_get();
294 	far = (prefetch) ? cp15_ifar_get() : cp15_dfar_get();
295 
296 	idx = FSR_TO_FAULT(fsr);
297 	usermode = TRAPF_USERMODE(tf);	/* Abort came from user mode? */
298 
299 	/*
300 	 * Apply BP hardening by flushing the branch prediction cache
301 	 * for prefaults on kernel addresses.
302 	 */
303 	if (__predict_false(prefetch && far > VM_MAXUSER_ADDRESS &&
304 	    (idx == FAULT_TRAN_L2 || idx == FAULT_PERM_L2))) {
305 		bp_harden = PCPU_GET(bp_harden_kind);
306 		if (bp_harden == PCPU_BP_HARDEN_KIND_BPIALL)
307 			_CP15_BPIALL();
308 		else if (bp_harden == PCPU_BP_HARDEN_KIND_ICIALLU)
309 			_CP15_ICIALLU();
310 	}
311 
312 	if (usermode)
313 		td->td_frame = tf;
314 
315 	CTR6(KTR_TRAP, "%s: fsr %#x (idx %u) far %#x prefetch %u usermode %d",
316 	    __func__, fsr, idx, far, prefetch, usermode);
317 
318 	/*
319 	 * Firstly, handle aborts that are not directly related to mapping.
320 	 */
321 	if (__predict_false(idx == FAULT_EA_IMPREC)) {
322 		abort_imprecise(tf, fsr, prefetch, usermode);
323 		return;
324 	}
325 
326 	if (__predict_false(idx == FAULT_DEBUG)) {
327 		abort_debug(tf, fsr, prefetch, usermode, far);
328 		return;
329 	}
330 
331 	/*
332 	 * ARM has a set of unprivileged load and store instructions
333 	 * (LDRT/LDRBT/STRT/STRBT ...) which are supposed to be used in other
334 	 * than user mode and OS should recognize their aborts and behave
335 	 * appropriately. However, there is no way how to do that reasonably
336 	 * in general unless we restrict the handling somehow.
337 	 *
338 	 * For now, these instructions are used only in copyin()/copyout()
339 	 * like functions where usermode buffers are checked in advance that
340 	 * they are not from KVA space. Thus, no action is needed here.
341 	 */
342 
343 	/*
344 	 * (1) Handle access and R/W hardware emulation aborts.
345 	 * (2) Check that abort is not on pmap essential address ranges.
346 	 *     There is no way how to fix it, so we don't even try.
347 	 */
348 	rv = pmap_fault(PCPU_GET(curpmap), far, fsr, idx, usermode);
349 	if (rv == KERN_SUCCESS)
350 		return;
351 #ifdef KDB
352 	if (kdb_active) {
353 		kdb_reenter();
354 		goto out;
355 	}
356 #endif
357 	if (rv == KERN_INVALID_ADDRESS)
358 		goto nogo;
359 
360 	if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) {
361 		/*
362 		 * Due to both processor errata and lazy TLB invalidation when
363 		 * access restrictions are removed from virtual pages, memory
364 		 * accesses that are allowed by the physical mapping layer may
365 		 * nonetheless cause one spurious page fault per virtual page.
366 		 * When the thread is executing a "no faulting" section that
367 		 * is bracketed by vm_fault_{disable,enable}_pagefaults(),
368 		 * every page fault is treated as a spurious page fault,
369 		 * unless it accesses the same virtual address as the most
370 		 * recent page fault within the same "no faulting" section.
371 		 */
372 		if (td->td_md.md_spurflt_addr != far ||
373 		    (td->td_pflags & TDP_RESETSPUR) != 0) {
374 			td->td_md.md_spurflt_addr = far;
375 			td->td_pflags &= ~TDP_RESETSPUR;
376 
377 			tlb_flush_local(far & ~PAGE_MASK);
378 			return;
379 		}
380 	} else {
381 		/*
382 		 * If we get a page fault while in a critical section, then
383 		 * it is most likely a fatal kernel page fault.  The kernel
384 		 * is already going to panic trying to get a sleep lock to
385 		 * do the VM lookup, so just consider it a fatal trap so the
386 		 * kernel can print out a useful trap message and even get
387 		 * to the debugger.
388 		 *
389 		 * If we get a page fault while holding a non-sleepable
390 		 * lock, then it is most likely a fatal kernel page fault.
391 		 * If WITNESS is enabled, then it's going to whine about
392 		 * bogus LORs with various VM locks, so just skip to the
393 		 * fatal trap handling directly.
394 		 */
395 		if (td->td_critnest != 0 ||
396 		    WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
397 		    "Kernel page fault") != 0) {
398 			abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
399 			return;
400 		}
401 	}
402 
403 	/* Re-enable interrupts if they were enabled previously. */
404 	if (td->td_md.md_spinlock_count == 0) {
405 		if (__predict_true(tf->tf_spsr & PSR_I) == 0)
406 			enable_interrupts(PSR_I);
407 	}
408 
409 	p = td->td_proc;
410 	if (usermode) {
411 		td->td_pticks = 0;
412 		if (td->td_cowgen != atomic_load_int(&p->p_cowgen))
413 			thread_cow_update(td);
414 	}
415 
416 	/* Invoke the appropriate handler, if necessary. */
417 	if (__predict_false(aborts[idx].func != NULL)) {
418 		if ((aborts[idx].func)(tf, idx, fsr, far, prefetch, td, &ksig))
419 			goto do_trapsignal;
420 		goto out;
421 	}
422 
423 	/*
424 	 * At this point, we're dealing with one of the following aborts:
425 	 *
426 	 *  FAULT_ICACHE   - I-cache maintenance
427 	 *  FAULT_TRAN_xx  - Translation
428 	 *  FAULT_PERM_xx  - Permission
429 	 */
430 
431 	/*
432 	 * Don't pass faulting cache operation to vm_fault(). We don't want
433 	 * to handle all vm stuff at this moment.
434 	 */
435 	pcb = td->td_pcb;
436 	if (__predict_false(pcb->pcb_onfault == cachebailout)) {
437 		tf->tf_r0 = far;		/* return failing address */
438 		tf->tf_pc = (register_t)pcb->pcb_onfault;
439 		return;
440 	}
441 
442 	/* Handle remaining I-cache aborts. */
443 	if (idx == FAULT_ICACHE) {
444 		if (abort_icache(tf, idx, fsr, far, prefetch, td, &ksig))
445 			goto do_trapsignal;
446 		goto out;
447 	}
448 
449 	va = trunc_page(far);
450 	if (va >= KERNBASE) {
451 		/*
452 		 * Don't allow user-mode faults in kernel address space.
453 		 */
454 		if (usermode) {
455 			ksig.sig = SIGSEGV;
456 			ksig.code = SEGV_ACCERR;
457 			goto nogo;
458 		}
459 
460 		map = kernel_map;
461 	} else {
462 		/*
463 		 * This is a fault on non-kernel virtual memory. If curproc
464 		 * is NULL or curproc->p_vmspace is NULL the fault is fatal.
465 		 */
466 		vm = (p != NULL) ? p->p_vmspace : NULL;
467 		if (vm == NULL) {
468 			ksig.sig = SIGSEGV;
469 			ksig.code = 0;
470 			goto nogo;
471 		}
472 
473 		map = &vm->vm_map;
474 		if (!usermode && (td->td_intr_nesting_level != 0 ||
475 		    pcb->pcb_onfault == NULL)) {
476 			abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
477 			return;
478 		}
479 	}
480 
481 	ftype = (fsr & FSR_WNR) ? VM_PROT_WRITE : VM_PROT_READ;
482 	if (prefetch)
483 		ftype |= VM_PROT_EXECUTE;
484 
485 #ifdef INVARIANTS
486 	onfault = pcb->pcb_onfault;
487 	pcb->pcb_onfault = NULL;
488 #endif
489 
490 	/* Fault in the page. */
491 	rv = vm_fault_trap(map, va, ftype, VM_FAULT_NORMAL, &ksig.sig,
492 	    &ucode);
493 	ksig.code = ucode;
494 
495 #ifdef INVARIANTS
496 	pcb->pcb_onfault = onfault;
497 #endif
498 
499 	if (__predict_true(rv == KERN_SUCCESS))
500 		goto out;
501 nogo:
502 	if (!usermode) {
503 		if (td->td_intr_nesting_level == 0 &&
504 		    pcb->pcb_onfault != NULL) {
505 			tf->tf_r0 = rv;
506 			tf->tf_pc = (int)pcb->pcb_onfault;
507 			return;
508 		}
509 		CTR2(KTR_TRAP, "%s: vm_fault() failed with %d", __func__, rv);
510 		abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
511 		return;
512 	}
513 
514 	ksig.addr = far;
515 
516 do_trapsignal:
517 	call_trapsignal(td, ksig.sig, ksig.code, ksig.addr, idx);
518 out:
519 	if (usermode)
520 		userret(td, tf);
521 }
522 
523 /*
524  * abort_fatal() handles the following data aborts:
525  *
526  *  FAULT_DEBUG		- Debug Event
527  *  FAULT_ACCESS_xx	- Acces Bit
528  *  FAULT_EA_PREC	- Precise External Abort
529  *  FAULT_DOMAIN_xx	- Domain Fault
530  *  FAULT_EA_TRAN_xx	- External Translation Abort
531  *  FAULT_EA_IMPREC	- Imprecise External Abort
532  *  + all undefined codes for ABORT
533  *
534  * We should never see these on a properly functioning system.
535  *
536  * This function is also called by the other handlers if they
537  * detect a fatal problem.
538  *
539  * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort.
540  */
541 static int
abort_fatal(struct trapframe * tf,u_int idx,u_int fsr,u_int far,u_int prefetch,struct thread * td,struct ksig * ksig)542 abort_fatal(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
543     u_int prefetch, struct thread *td, struct ksig *ksig)
544 {
545 	bool usermode;
546 	const char *mode;
547 	const char *rw_mode;
548 #ifdef KDB
549 	bool handled;
550 #endif
551 
552 	usermode = TRAPF_USERMODE(tf);
553 #ifdef KDTRACE_HOOKS
554 	if (!usermode) {
555 		if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far))
556 			return (0);
557 	}
558 #endif
559 
560 	mode = usermode ? "user" : "kernel";
561 	rw_mode  = fsr & FSR_WNR ? "write" : "read";
562 	disable_interrupts(PSR_I);
563 
564 	if (td != NULL) {
565 		printf("Fatal %s mode data abort: '%s' on %s\n", mode,
566 		    aborts[idx].desc, rw_mode);
567 		printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr);
568 		if (idx != FAULT_EA_IMPREC)
569 			printf("%08x, ", far);
570 		else
571 			printf("Invalid,  ");
572 		printf("spsr=%08x\n", tf->tf_spsr);
573 	} else {
574 		printf("Fatal %s mode prefetch abort at 0x%08x\n",
575 		    mode, tf->tf_pc);
576 		printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr);
577 	}
578 
579 	printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n",
580 	    tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3);
581 	printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n",
582 	    tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7);
583 	printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n",
584 	    tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11);
585 	printf("r12=%08x, ", tf->tf_r12);
586 
587 	if (usermode)
588 		printf("usp=%08x, ulr=%08x",
589 		    tf->tf_usr_sp, tf->tf_usr_lr);
590 	else
591 		printf("ssp=%08x, slr=%08x",
592 		    tf->tf_svc_sp, tf->tf_svc_lr);
593 	printf(", pc =%08x\n\n", tf->tf_pc);
594 
595 #ifdef KDB
596 	if (debugger_on_trap) {
597 		kdb_why = KDB_WHY_TRAP;
598 		handled = kdb_trap(fsr, 0, tf);
599 		kdb_why = KDB_WHY_UNSET;
600 		if (handled)
601 			return (0);
602 	}
603 #endif
604 	panic("Fatal abort");
605 	/*NOTREACHED*/
606 }
607 
608 /*
609  * abort_align() handles the following data abort:
610  *
611  *  FAULT_ALIGN - Alignment fault
612  *
613  * Everything should be aligned in kernel with exception of user to kernel
614  * and vice versa data copying, so if pcb_onfault is not set, it's fatal.
615  * We generate signal in case of abort from user mode.
616  */
617 static int
abort_align(struct trapframe * tf,u_int idx,u_int fsr,u_int far,u_int prefetch,struct thread * td,struct ksig * ksig)618 abort_align(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
619     u_int prefetch, struct thread *td, struct ksig *ksig)
620 {
621 	bool usermode;
622 
623 	usermode = TRAPF_USERMODE(tf);
624 	if (!usermode) {
625 		if (td->td_intr_nesting_level == 0 && td != NULL &&
626 		    td->td_pcb->pcb_onfault != NULL) {
627 			tf->tf_r0 = EFAULT;
628 			tf->tf_pc = (int)td->td_pcb->pcb_onfault;
629 			return (0);
630 		}
631 		abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
632 	}
633 	/* Deliver a bus error signal to the process */
634 	ksig->code = BUS_ADRALN;
635 	ksig->sig = SIGBUS;
636 	ksig->addr = far;
637 	return (1);
638 }
639 
640 /*
641  * abort_icache() handles the following data abort:
642  *
643  * FAULT_ICACHE - Instruction cache maintenance
644  *
645  * According to manual, FAULT_ICACHE is translation fault during cache
646  * maintenance operation. In fact, no cache maintenance operation on
647  * not mapped virtual addresses should be called. As cache maintenance
648  * operation (except DMB, DSB, and Flush Prefetch Buffer) are privileged,
649  * the abort is concider as fatal for now. However, all the matter with
650  * cache maintenance operation on virtual addresses could be really complex
651  * and fuzzy in SMP case, so maybe in future standard fault mechanism
652  * should be held here including vm_fault() calling.
653  */
654 static int
abort_icache(struct trapframe * tf,u_int idx,u_int fsr,u_int far,u_int prefetch,struct thread * td,struct ksig * ksig)655 abort_icache(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
656     u_int prefetch, struct thread *td, struct ksig *ksig)
657 {
658 
659 	abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
660 	return(0);
661 }
662