1 /*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28 #include "opt_ddb.h"
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/asan.h>
33 #include <sys/kernel.h>
34 #include <sys/ktr.h>
35 #include <sys/lock.h>
36 #include <sys/msan.h>
37 #include <sys/mutex.h>
38 #include <sys/proc.h>
39 #include <sys/ptrace.h>
40 #include <sys/syscall.h>
41 #include <sys/sysent.h>
42 #ifdef KDB
43 #include <sys/kdb.h>
44 #endif
45
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_param.h>
51 #include <vm/vm_extern.h>
52
53 #include <machine/frame.h>
54 #include <machine/md_var.h>
55 #include <machine/pcb.h>
56 #include <machine/pcpu.h>
57 #include <machine/undefined.h>
58
59 #ifdef KDTRACE_HOOKS
60 #include <sys/dtrace_bsd.h>
61 #endif
62
63 #ifdef VFP
64 #include <machine/vfp.h>
65 #endif
66
67 #ifdef KDB
68 #include <machine/db_machdep.h>
69 #endif
70
71 #ifdef DDB
72 #include <ddb/ddb.h>
73 #include <ddb/db_sym.h>
74 #endif
75
76 /* Called from exception.S */
77 void do_el1h_sync(struct thread *, struct trapframe *);
78 void do_el0_sync(struct thread *, struct trapframe *);
79 void do_el0_error(struct trapframe *);
80 void do_serror(struct trapframe *);
81 void unhandled_exception(struct trapframe *);
82
83 static void print_gp_register(const char *name, uint64_t value);
84 static void print_registers(struct trapframe *frame);
85
86 int (*dtrace_invop_jump_addr)(struct trapframe *);
87
88 u_long cnt_efirt_faults;
89 int print_efirt_faults;
90
91 typedef void (abort_handler)(struct thread *, struct trapframe *, uint64_t,
92 uint64_t, int);
93
94 static abort_handler align_abort;
95 static abort_handler data_abort;
96 static abort_handler external_abort;
97
98 static abort_handler *abort_handlers[] = {
99 [ISS_DATA_DFSC_TF_L0] = data_abort,
100 [ISS_DATA_DFSC_TF_L1] = data_abort,
101 [ISS_DATA_DFSC_TF_L2] = data_abort,
102 [ISS_DATA_DFSC_TF_L3] = data_abort,
103 [ISS_DATA_DFSC_AFF_L1] = data_abort,
104 [ISS_DATA_DFSC_AFF_L2] = data_abort,
105 [ISS_DATA_DFSC_AFF_L3] = data_abort,
106 [ISS_DATA_DFSC_PF_L1] = data_abort,
107 [ISS_DATA_DFSC_PF_L2] = data_abort,
108 [ISS_DATA_DFSC_PF_L3] = data_abort,
109 [ISS_DATA_DFSC_ALIGN] = align_abort,
110 [ISS_DATA_DFSC_EXT] = external_abort,
111 [ISS_DATA_DFSC_EXT_L0] = external_abort,
112 [ISS_DATA_DFSC_EXT_L1] = external_abort,
113 [ISS_DATA_DFSC_EXT_L2] = external_abort,
114 [ISS_DATA_DFSC_EXT_L3] = external_abort,
115 [ISS_DATA_DFSC_ECC] = external_abort,
116 [ISS_DATA_DFSC_ECC_L0] = external_abort,
117 [ISS_DATA_DFSC_ECC_L1] = external_abort,
118 [ISS_DATA_DFSC_ECC_L2] = external_abort,
119 [ISS_DATA_DFSC_ECC_L3] = external_abort,
120 };
121
122 static __inline void
call_trapsignal(struct thread * td,int sig,int code,void * addr,int trapno)123 call_trapsignal(struct thread *td, int sig, int code, void *addr, int trapno)
124 {
125 ksiginfo_t ksi;
126
127 ksiginfo_init_trap(&ksi);
128 ksi.ksi_signo = sig;
129 ksi.ksi_code = code;
130 ksi.ksi_addr = addr;
131 ksi.ksi_trapno = trapno;
132 trapsignal(td, &ksi);
133 }
134
135 int
cpu_fetch_syscall_args(struct thread * td)136 cpu_fetch_syscall_args(struct thread *td)
137 {
138 struct proc *p;
139 syscallarg_t *ap, *dst_ap;
140 struct syscall_args *sa;
141
142 p = td->td_proc;
143 sa = &td->td_sa;
144 ap = td->td_frame->tf_x;
145 dst_ap = &sa->args[0];
146
147 sa->code = td->td_frame->tf_x[8];
148 sa->original_code = sa->code;
149
150 if (__predict_false(sa->code == SYS_syscall || sa->code == SYS___syscall)) {
151 sa->code = *ap++;
152 } else {
153 *dst_ap++ = *ap++;
154 }
155
156 if (__predict_false(sa->code >= p->p_sysent->sv_size))
157 sa->callp = &nosys_sysent;
158 else
159 sa->callp = &p->p_sysent->sv_table[sa->code];
160
161 KASSERT(sa->callp->sy_narg <= nitems(sa->args),
162 ("Syscall %d takes too many arguments", sa->code));
163
164 memcpy(dst_ap, ap, (nitems(sa->args) - 1) * sizeof(*dst_ap));
165
166 td->td_retval[0] = 0;
167 td->td_retval[1] = 0;
168
169 return (0);
170 }
171
172 #include "../../kern/subr_syscall.c"
173
174 /*
175 * Test for fault generated by given access instruction in
176 * bus_peek_<foo> or bus_poke_<foo> bus function.
177 */
178 extern uint32_t generic_bs_peek_1f, generic_bs_peek_2f;
179 extern uint32_t generic_bs_peek_4f, generic_bs_peek_8f;
180 extern uint32_t generic_bs_poke_1f, generic_bs_poke_2f;
181 extern uint32_t generic_bs_poke_4f, generic_bs_poke_8f;
182
183 static bool
test_bs_fault(void * addr)184 test_bs_fault(void *addr)
185 {
186 return (addr == &generic_bs_peek_1f ||
187 addr == &generic_bs_peek_2f ||
188 addr == &generic_bs_peek_4f ||
189 addr == &generic_bs_peek_8f ||
190 addr == &generic_bs_poke_1f ||
191 addr == &generic_bs_poke_2f ||
192 addr == &generic_bs_poke_4f ||
193 addr == &generic_bs_poke_8f);
194 }
195
196 static void
svc_handler(struct thread * td,struct trapframe * frame)197 svc_handler(struct thread *td, struct trapframe *frame)
198 {
199
200 if ((frame->tf_esr & ESR_ELx_ISS_MASK) == 0) {
201 syscallenter(td);
202 syscallret(td);
203 } else {
204 call_trapsignal(td, SIGILL, ILL_ILLOPN, (void *)frame->tf_elr,
205 ESR_ELx_EXCEPTION(frame->tf_esr));
206 userret(td, frame);
207 }
208 }
209
210 static void
align_abort(struct thread * td,struct trapframe * frame,uint64_t esr,uint64_t far,int lower)211 align_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
212 uint64_t far, int lower)
213 {
214 if (!lower) {
215 print_registers(frame);
216 print_gp_register("far", far);
217 printf(" esr: 0x%.16lx\n", esr);
218 panic("Misaligned access from kernel space!");
219 }
220
221 call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_elr,
222 ESR_ELx_EXCEPTION(frame->tf_esr));
223 userret(td, frame);
224 }
225
226
227 static void
external_abort(struct thread * td,struct trapframe * frame,uint64_t esr,uint64_t far,int lower)228 external_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
229 uint64_t far, int lower)
230 {
231 if (lower) {
232 call_trapsignal(td, SIGBUS, BUS_OBJERR, (void *)far,
233 ESR_ELx_EXCEPTION(frame->tf_esr));
234 userret(td, frame);
235 return;
236 }
237
238 /*
239 * Try to handle synchronous external aborts caused by
240 * bus_space_peek() and/or bus_space_poke() functions.
241 */
242 if (test_bs_fault((void *)frame->tf_elr)) {
243 frame->tf_elr = (uint64_t)generic_bs_fault;
244 return;
245 }
246
247 print_registers(frame);
248 print_gp_register("far", far);
249 panic("Unhandled external data abort");
250 }
251
252 /*
253 * It is unsafe to access the stack canary value stored in "td" until
254 * kernel map translation faults are handled, see the pmap_klookup() call below.
255 * Thus, stack-smashing detection with per-thread canaries must be disabled in
256 * this function.
257 */
258 static void NO_PERTHREAD_SSP
data_abort(struct thread * td,struct trapframe * frame,uint64_t esr,uint64_t far,int lower)259 data_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
260 uint64_t far, int lower)
261 {
262 struct vm_map *map;
263 struct pcb *pcb;
264 vm_prot_t ftype;
265 int error, sig, ucode;
266 #ifdef KDB
267 bool handled;
268 #endif
269
270 /*
271 * According to the ARMv8-A rev. A.g, B2.10.5 "Load-Exclusive
272 * and Store-Exclusive instruction usage restrictions", state
273 * of the exclusive monitors after data abort exception is unknown.
274 */
275 clrex();
276
277 #ifdef KDB
278 if (kdb_active) {
279 kdb_reenter();
280 return;
281 }
282 #endif
283
284 if (lower) {
285 map = &td->td_proc->p_vmspace->vm_map;
286 } else if (!ADDR_IS_CANONICAL(far)) {
287 /* We received a TBI/PAC/etc. fault from the kernel */
288 error = KERN_INVALID_ADDRESS;
289 pcb = td->td_pcb;
290 goto bad_far;
291 } else if (ADDR_IS_KERNEL(far)) {
292 /*
293 * Handle a special case: the data abort was caused by accessing
294 * a thread structure while its mapping was being promoted or
295 * demoted, as a consequence of the break-before-make rule. It
296 * is not safe to enable interrupts or dereference "td" before
297 * this case is handled.
298 *
299 * In principle, if pmap_klookup() fails, there is no need to
300 * call pmap_fault() below, but avoiding that call is not worth
301 * the effort.
302 */
303 if (ESR_ELx_EXCEPTION(esr) == EXCP_DATA_ABORT) {
304 switch (esr & ISS_DATA_DFSC_MASK) {
305 case ISS_DATA_DFSC_TF_L0:
306 case ISS_DATA_DFSC_TF_L1:
307 case ISS_DATA_DFSC_TF_L2:
308 case ISS_DATA_DFSC_TF_L3:
309 if (pmap_klookup(far, NULL))
310 return;
311 break;
312 }
313 }
314 if (td->td_md.md_spinlock_count == 0 &&
315 (frame->tf_spsr & PSR_DAIF_INTR) != PSR_DAIF_INTR) {
316 MPASS((frame->tf_spsr & PSR_DAIF_INTR) == 0);
317 intr_enable();
318 }
319 map = kernel_map;
320 } else {
321 if (td->td_md.md_spinlock_count == 0 &&
322 (frame->tf_spsr & PSR_DAIF_INTR) != PSR_DAIF_INTR) {
323 MPASS((frame->tf_spsr & PSR_DAIF_INTR) == 0);
324 intr_enable();
325 }
326 map = &td->td_proc->p_vmspace->vm_map;
327 if (map == NULL)
328 map = kernel_map;
329 }
330 pcb = td->td_pcb;
331
332 /*
333 * Try to handle translation, access flag, and permission faults.
334 * Translation faults may occur as a result of the required
335 * break-before-make sequence used when promoting or demoting
336 * superpages. Such faults must not occur while holding the pmap lock,
337 * or pmap_fault() will recurse on that lock.
338 */
339 if ((lower || map == kernel_map || pcb->pcb_onfault != 0) &&
340 pmap_fault(map->pmap, esr, far) == KERN_SUCCESS)
341 return;
342
343 #ifdef INVARIANTS
344 if (td->td_md.md_spinlock_count != 0) {
345 print_registers(frame);
346 print_gp_register("far", far);
347 printf(" esr: 0x%.16lx\n", esr);
348 panic("data abort with spinlock held (spinlock count %d != 0)",
349 td->td_md.md_spinlock_count);
350 }
351 #endif
352 if ((td->td_pflags & TDP_NOFAULTING) == 0 &&
353 (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK |
354 WARN_GIANTOK, NULL, "Kernel page fault") != 0)) {
355 print_registers(frame);
356 print_gp_register("far", far);
357 printf(" esr: 0x%.16lx\n", esr);
358 panic("data abort in critical section or under mutex");
359 }
360
361 switch (ESR_ELx_EXCEPTION(esr)) {
362 case EXCP_INSN_ABORT:
363 case EXCP_INSN_ABORT_L:
364 ftype = VM_PROT_EXECUTE;
365 break;
366 default:
367 /*
368 * If the exception was because of a read or cache operation
369 * pass a read fault type into the vm code. Cache operations
370 * need read permission but will set the WnR flag when the
371 * memory is unmapped.
372 */
373 if ((esr & ISS_DATA_WnR) == 0 || (esr & ISS_DATA_CM) != 0)
374 ftype = VM_PROT_READ;
375 else
376 ftype = VM_PROT_WRITE;
377 break;
378 }
379
380 /* Fault in the page. */
381 error = vm_fault_trap(map, far, ftype, VM_FAULT_NORMAL, &sig, &ucode);
382 if (error != KERN_SUCCESS) {
383 if (lower) {
384 call_trapsignal(td, sig, ucode, (void *)far,
385 ESR_ELx_EXCEPTION(esr));
386 } else {
387 bad_far:
388 if (td->td_intr_nesting_level == 0 &&
389 pcb->pcb_onfault != 0) {
390 frame->tf_elr = pcb->pcb_onfault;
391 return;
392 }
393
394 printf("Fatal data abort:\n");
395 print_registers(frame);
396 print_gp_register("far", far);
397 printf(" esr: 0x%.16lx\n", esr);
398
399 #ifdef KDB
400 if (debugger_on_trap) {
401 kdb_why = KDB_WHY_TRAP;
402 handled = kdb_trap(ESR_ELx_EXCEPTION(esr), 0,
403 frame);
404 kdb_why = KDB_WHY_UNSET;
405 if (handled)
406 return;
407 }
408 #endif
409 panic("vm_fault failed: 0x%lx error %d",
410 frame->tf_elr, error);
411 }
412 }
413
414 if (lower)
415 userret(td, frame);
416 }
417
418 static void
print_gp_register(const char * name,uint64_t value)419 print_gp_register(const char *name, uint64_t value)
420 {
421 #if defined(DDB)
422 c_db_sym_t sym;
423 const char *sym_name;
424 db_expr_t sym_value;
425 db_expr_t offset;
426 #endif
427
428 printf(" %s: 0x%.16lx", name, value);
429 #if defined(DDB)
430 /* If this looks like a kernel address try to find the symbol */
431 if (value >= VM_MIN_KERNEL_ADDRESS) {
432 sym = db_search_symbol(value, DB_STGY_ANY, &offset);
433 if (sym != C_DB_SYM_NULL) {
434 db_symbol_values(sym, &sym_name, &sym_value);
435 printf(" (%s + 0x%lx)", sym_name, offset);
436 }
437 }
438 #endif
439 printf("\n");
440 }
441
442 static void
print_registers(struct trapframe * frame)443 print_registers(struct trapframe *frame)
444 {
445 char name[4];
446 u_int reg;
447
448 for (reg = 0; reg < nitems(frame->tf_x); reg++) {
449 snprintf(name, sizeof(name), "%sx%d", (reg < 10) ? " " : "",
450 reg);
451 print_gp_register(name, frame->tf_x[reg]);
452 }
453 printf(" sp: 0x%.16lx\n", frame->tf_sp);
454 print_gp_register(" lr", frame->tf_lr);
455 print_gp_register("elr", frame->tf_elr);
456 printf("spsr: 0x%.16lx\n", frame->tf_spsr);
457 }
458
459 #ifdef VFP
460 static void
fpe_trap(struct thread * td,void * addr,uint32_t exception)461 fpe_trap(struct thread *td, void *addr, uint32_t exception)
462 {
463 int code;
464
465 code = FPE_FLTIDO;
466 if ((exception & ISS_FP_TFV) != 0) {
467 if ((exception & ISS_FP_IOF) != 0)
468 code = FPE_FLTINV;
469 else if ((exception & ISS_FP_DZF) != 0)
470 code = FPE_FLTDIV;
471 else if ((exception & ISS_FP_OFF) != 0)
472 code = FPE_FLTOVF;
473 else if ((exception & ISS_FP_UFF) != 0)
474 code = FPE_FLTUND;
475 else if ((exception & ISS_FP_IXF) != 0)
476 code = FPE_FLTRES;
477 }
478 call_trapsignal(td, SIGFPE, code, addr, exception);
479 }
480 #endif
481
482 /*
483 * See the comment above data_abort().
484 */
485 void NO_PERTHREAD_SSP
do_el1h_sync(struct thread * td,struct trapframe * frame)486 do_el1h_sync(struct thread *td, struct trapframe *frame)
487 {
488 uint32_t exception;
489 uint64_t esr, far;
490 int dfsc;
491
492 kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
493 kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
494
495 far = frame->tf_far;
496 /* Read the esr register to get the exception details */
497 esr = frame->tf_esr;
498 exception = ESR_ELx_EXCEPTION(esr);
499
500 #ifdef KDTRACE_HOOKS
501 if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, exception))
502 return;
503 #endif
504
505 CTR4(KTR_TRAP, "%s: exception=%lu, elr=0x%lx, esr=0x%lx",
506 __func__, exception, frame->tf_elr, esr);
507
508 /*
509 * Enable debug exceptions if we aren't already handling one. They will
510 * be masked again in the exception handler's epilogue.
511 */
512 switch (exception) {
513 case EXCP_BRK:
514 case EXCP_BRKPT_EL1:
515 case EXCP_WATCHPT_EL1:
516 case EXCP_SOFTSTP_EL1:
517 break;
518 default:
519 dbg_enable();
520 break;
521 }
522
523 switch (exception) {
524 case EXCP_FP_SIMD:
525 case EXCP_TRAP_FP:
526 #ifdef VFP
527 if ((td->td_pcb->pcb_fpflags & PCB_FP_KERN) != 0) {
528 vfp_restore_state();
529 } else
530 #endif
531 {
532 print_registers(frame);
533 printf(" esr: 0x%.16lx\n", esr);
534 panic("VFP exception in the kernel");
535 }
536 break;
537 case EXCP_INSN_ABORT:
538 case EXCP_DATA_ABORT:
539 dfsc = esr & ISS_DATA_DFSC_MASK;
540 if (dfsc < nitems(abort_handlers) &&
541 abort_handlers[dfsc] != NULL) {
542 abort_handlers[dfsc](td, frame, esr, far, 0);
543 } else {
544 print_registers(frame);
545 print_gp_register("far", far);
546 printf(" esr: 0x%.16lx\n", esr);
547 panic("Unhandled EL1 %s abort: 0x%x",
548 exception == EXCP_INSN_ABORT ? "instruction" :
549 "data", dfsc);
550 }
551 break;
552 case EXCP_BRK:
553 #ifdef KDTRACE_HOOKS
554 if ((esr & ESR_ELx_ISS_MASK) == 0x40d /* BRK_IMM16_VAL */ &&
555 dtrace_invop_jump_addr != NULL &&
556 dtrace_invop_jump_addr(frame) == 0)
557 break;
558 #endif
559 #ifdef KDB
560 kdb_trap(exception, 0, frame);
561 #else
562 panic("No debugger in kernel.");
563 #endif
564 break;
565 case EXCP_BRKPT_EL1:
566 case EXCP_WATCHPT_EL1:
567 case EXCP_SOFTSTP_EL1:
568 #ifdef KDB
569 kdb_trap(exception, 0, frame);
570 #else
571 panic("No debugger in kernel.");
572 #endif
573 break;
574 case EXCP_FPAC:
575 /* We can see this if the authentication on PAC fails */
576 print_registers(frame);
577 print_gp_register("far", far);
578 panic("FPAC kernel exception");
579 break;
580 case EXCP_UNKNOWN:
581 print_registers(frame);
582 print_gp_register("far", far);
583 panic("Undefined instruction: %08x",
584 *(uint32_t *)frame->tf_elr);
585 break;
586 case EXCP_BTI:
587 print_registers(frame);
588 print_gp_register("far", far);
589 panic("Branch Target exception");
590 break;
591 default:
592 print_registers(frame);
593 print_gp_register("far", far);
594 panic("Unknown kernel exception 0x%x esr_el1 0x%lx", exception,
595 esr);
596 }
597 }
598
599 void
do_el0_sync(struct thread * td,struct trapframe * frame)600 do_el0_sync(struct thread *td, struct trapframe *frame)
601 {
602 pcpu_bp_harden bp_harden;
603 uint32_t exception;
604 uint64_t esr, far;
605 int dfsc;
606
607 /* Check we have a sane environment when entering from userland */
608 KASSERT((uintptr_t)get_pcpu() >= VM_MIN_KERNEL_ADDRESS,
609 ("Invalid pcpu address from userland: %p (tpidr 0x%lx)",
610 get_pcpu(), READ_SPECIALREG(tpidr_el1)));
611
612 kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
613 kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
614
615 far = frame->tf_far;
616 esr = frame->tf_esr;
617 exception = ESR_ELx_EXCEPTION(esr);
618 if (exception == EXCP_INSN_ABORT_L && far > VM_MAXUSER_ADDRESS) {
619 /*
620 * Userspace may be trying to train the branch predictor to
621 * attack the kernel. If we are on a CPU affected by this
622 * call the handler to clear the branch predictor state.
623 */
624 bp_harden = PCPU_GET(bp_harden);
625 if (bp_harden != NULL)
626 bp_harden();
627 }
628 intr_enable();
629
630 CTR4(KTR_TRAP, "%s: exception=%lu, elr=0x%lx, esr=0x%lx",
631 __func__, exception, frame->tf_elr, esr);
632
633 switch (exception) {
634 case EXCP_FP_SIMD:
635 #ifdef VFP
636 vfp_restore_state();
637 #else
638 panic("VFP exception in userland");
639 #endif
640 break;
641 case EXCP_TRAP_FP:
642 #ifdef VFP
643 fpe_trap(td, (void *)frame->tf_elr, esr);
644 userret(td, frame);
645 #else
646 panic("VFP exception in userland");
647 #endif
648 break;
649 case EXCP_SVE:
650 /* Returns true if this thread can use SVE */
651 if (!sve_restore_state(td))
652 call_trapsignal(td, SIGILL, ILL_ILLTRP,
653 (void *)frame->tf_elr, exception);
654 userret(td, frame);
655 break;
656 case EXCP_SVC32:
657 case EXCP_SVC64:
658 svc_handler(td, frame);
659 break;
660 case EXCP_INSN_ABORT_L:
661 case EXCP_DATA_ABORT_L:
662 case EXCP_DATA_ABORT:
663 dfsc = esr & ISS_DATA_DFSC_MASK;
664 if (dfsc < nitems(abort_handlers) &&
665 abort_handlers[dfsc] != NULL)
666 abort_handlers[dfsc](td, frame, esr, far, 1);
667 else {
668 print_registers(frame);
669 print_gp_register("far", far);
670 printf(" esr: 0x%.16lx\n", esr);
671 panic("Unhandled EL0 %s abort: 0x%x",
672 exception == EXCP_INSN_ABORT_L ? "instruction" :
673 "data", dfsc);
674 }
675 break;
676 case EXCP_UNKNOWN:
677 if (!undef_insn(frame))
678 call_trapsignal(td, SIGILL, ILL_ILLTRP, (void *)far,
679 exception);
680 userret(td, frame);
681 break;
682 case EXCP_FPAC:
683 call_trapsignal(td, SIGILL, ILL_ILLOPN, (void *)frame->tf_elr,
684 exception);
685 userret(td, frame);
686 break;
687 case EXCP_SP_ALIGN:
688 call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_sp,
689 exception);
690 userret(td, frame);
691 break;
692 case EXCP_PC_ALIGN:
693 call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_elr,
694 exception);
695 userret(td, frame);
696 break;
697 case EXCP_BRKPT_EL0:
698 case EXCP_BRK:
699 #ifdef COMPAT_FREEBSD32
700 case EXCP_BRKPT_32:
701 #endif /* COMPAT_FREEBSD32 */
702 call_trapsignal(td, SIGTRAP, TRAP_BRKPT, (void *)frame->tf_elr,
703 exception);
704 userret(td, frame);
705 break;
706 case EXCP_WATCHPT_EL0:
707 call_trapsignal(td, SIGTRAP, TRAP_TRACE, (void *)far,
708 exception);
709 userret(td, frame);
710 break;
711 case EXCP_MSR:
712 /*
713 * The CPU can raise EXCP_MSR when userspace executes an mrs
714 * instruction to access a special register userspace doesn't
715 * have access to.
716 */
717 if (!undef_insn(frame))
718 call_trapsignal(td, SIGILL, ILL_PRVOPC,
719 (void *)frame->tf_elr, exception);
720 userret(td, frame);
721 break;
722 case EXCP_SOFTSTP_EL0:
723 PROC_LOCK(td->td_proc);
724 if ((td->td_dbgflags & TDB_STEP) != 0) {
725 td->td_frame->tf_spsr &= ~PSR_SS;
726 td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
727 WRITE_SPECIALREG(mdscr_el1,
728 READ_SPECIALREG(mdscr_el1) & ~MDSCR_SS);
729 }
730 PROC_UNLOCK(td->td_proc);
731 call_trapsignal(td, SIGTRAP, TRAP_TRACE,
732 (void *)frame->tf_elr, exception);
733 userret(td, frame);
734 break;
735 case EXCP_BTI:
736 call_trapsignal(td, SIGILL, ILL_ILLOPC, (void *)frame->tf_elr,
737 exception);
738 userret(td, frame);
739 break;
740 default:
741 call_trapsignal(td, SIGBUS, BUS_OBJERR, (void *)frame->tf_elr,
742 exception);
743 userret(td, frame);
744 break;
745 }
746
747 KASSERT(
748 (td->td_pcb->pcb_fpflags & ~(PCB_FP_USERMASK|PCB_FP_SVEVALID)) == 0,
749 ("Kernel VFP flags set while entering userspace"));
750 KASSERT(
751 td->td_pcb->pcb_fpusaved == &td->td_pcb->pcb_fpustate,
752 ("Kernel VFP state in use when entering userspace"));
753 }
754
755 /*
756 * TODO: We will need to handle these later when we support ARMv8.2 RAS.
757 */
758 void
do_serror(struct trapframe * frame)759 do_serror(struct trapframe *frame)
760 {
761 uint64_t esr, far;
762
763 kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
764 kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
765
766 far = frame->tf_far;
767 esr = frame->tf_esr;
768
769 print_registers(frame);
770 print_gp_register("far", far);
771 printf(" esr: 0x%.16lx\n", esr);
772 panic("Unhandled System Error");
773 }
774
775 void
unhandled_exception(struct trapframe * frame)776 unhandled_exception(struct trapframe *frame)
777 {
778 uint64_t esr, far;
779
780 kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
781 kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
782
783 far = frame->tf_far;
784 esr = frame->tf_esr;
785
786 print_registers(frame);
787 print_gp_register("far", far);
788 printf(" esr: 0x%.16lx\n", esr);
789 panic("Unhandled exception");
790 }
791