1 /* 2 * arch/xtensa/kernel/traps.c 3 * 4 * Exception handling. 5 * 6 * Derived from code with the following copyrights: 7 * Copyright (C) 1994 - 1999 by Ralf Baechle 8 * Modified for R3000 by Paul M. Antoine, 1995, 1996 9 * Complete output from die() by Ulf Carlsson, 1998 10 * Copyright (C) 1999 Silicon Graphics, Inc. 11 * 12 * Essentially rewritten for the Xtensa architecture port. 13 * 14 * Copyright (C) 2001 - 2013 Tensilica Inc. 15 * 16 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> 17 * Chris Zankel <chris@zankel.net> 18 * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca> 19 * Kevin Chea 20 * 21 * This file is subject to the terms and conditions of the GNU General Public 22 * License. See the file "COPYING" in the main directory of this archive 23 * for more details. 24 */ 25 26 #include <linux/kernel.h> 27 #include <linux/sched/signal.h> 28 #include <linux/sched/debug.h> 29 #include <linux/sched/task_stack.h> 30 #include <linux/init.h> 31 #include <linux/module.h> 32 #include <linux/stringify.h> 33 #include <linux/kallsyms.h> 34 #include <linux/delay.h> 35 #include <linux/hardirq.h> 36 #include <linux/ratelimit.h> 37 #include <linux/pgtable.h> 38 39 #include <asm/stacktrace.h> 40 #include <asm/ptrace.h> 41 #include <asm/timex.h> 42 #include <linux/uaccess.h> 43 #include <asm/processor.h> 44 #include <asm/traps.h> 45 #include <asm/hw_breakpoint.h> 46 47 /* 48 * Machine specific interrupt handlers 49 */ 50 51 static void do_illegal_instruction(struct pt_regs *regs); 52 static void do_div0(struct pt_regs *regs); 53 static void do_interrupt(struct pt_regs *regs); 54 #if XTENSA_FAKE_NMI 55 static void do_nmi(struct pt_regs *regs); 56 #endif 57 #ifdef CONFIG_XTENSA_LOAD_STORE 58 static void do_load_store(struct pt_regs *regs); 59 #endif 60 static void do_unaligned_user(struct pt_regs *regs); 61 static void do_multihit(struct pt_regs *regs); 62 #if XTENSA_HAVE_COPROCESSORS 63 static void do_coprocessor(struct pt_regs *regs); 64 #endif 65 static void do_debug(struct pt_regs *regs); 66 67 /* 68 * The vector table must be preceded by a save area (which 69 * implies it must be in RAM, unless one places RAM immediately 70 * before a ROM and puts the vector at the start of the ROM (!)) 71 */ 72 73 #define KRNL 0x01 74 #define USER 0x02 75 76 #define COPROCESSOR(x) \ 77 { EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER|KRNL, fast_coprocessor },\ 78 { EXCCAUSE_COPROCESSOR ## x ## _DISABLED, 0, do_coprocessor } 79 80 typedef struct { 81 int cause; 82 int fast; 83 void* handler; 84 } dispatch_init_table_t; 85 86 static dispatch_init_table_t __initdata dispatch_init_table[] = { 87 88 #ifdef CONFIG_USER_ABI_CALL0_PROBE 89 { EXCCAUSE_ILLEGAL_INSTRUCTION, USER, fast_illegal_instruction_user }, 90 #endif 91 { EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction}, 92 { EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user }, 93 { EXCCAUSE_SYSTEM_CALL, 0, system_call }, 94 /* EXCCAUSE_INSTRUCTION_FETCH unhandled */ 95 #ifdef CONFIG_XTENSA_LOAD_STORE 96 { EXCCAUSE_LOAD_STORE_ERROR, USER|KRNL, fast_load_store }, 97 { EXCCAUSE_LOAD_STORE_ERROR, 0, do_load_store }, 98 #endif 99 { EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt }, 100 #ifdef SUPPORT_WINDOWED 101 { EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca }, 102 #endif 103 { EXCCAUSE_INTEGER_DIVIDE_BY_ZERO, 0, do_div0 }, 104 /* EXCCAUSE_PRIVILEGED unhandled */ 105 #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION 106 #ifdef CONFIG_XTENSA_UNALIGNED_USER 107 { EXCCAUSE_UNALIGNED, USER, fast_unaligned }, 108 #endif 109 { EXCCAUSE_UNALIGNED, KRNL, fast_unaligned }, 110 #endif 111 { EXCCAUSE_UNALIGNED, 0, do_unaligned_user }, 112 #ifdef CONFIG_MMU 113 { EXCCAUSE_ITLB_MISS, 0, do_page_fault }, 114 { EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss}, 115 { EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss}, 116 { EXCCAUSE_DTLB_MISS, 0, do_page_fault }, 117 { EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited }, 118 #endif /* CONFIG_MMU */ 119 #ifdef CONFIG_PFAULT 120 { EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit }, 121 { EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault }, 122 { EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault }, 123 { EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit }, 124 { EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault }, 125 { EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault }, 126 { EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault }, 127 #endif 128 /* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */ 129 #if XTENSA_HAVE_COPROCESSOR(0) 130 COPROCESSOR(0), 131 #endif 132 #if XTENSA_HAVE_COPROCESSOR(1) 133 COPROCESSOR(1), 134 #endif 135 #if XTENSA_HAVE_COPROCESSOR(2) 136 COPROCESSOR(2), 137 #endif 138 #if XTENSA_HAVE_COPROCESSOR(3) 139 COPROCESSOR(3), 140 #endif 141 #if XTENSA_HAVE_COPROCESSOR(4) 142 COPROCESSOR(4), 143 #endif 144 #if XTENSA_HAVE_COPROCESSOR(5) 145 COPROCESSOR(5), 146 #endif 147 #if XTENSA_HAVE_COPROCESSOR(6) 148 COPROCESSOR(6), 149 #endif 150 #if XTENSA_HAVE_COPROCESSOR(7) 151 COPROCESSOR(7), 152 #endif 153 #if XTENSA_FAKE_NMI 154 { EXCCAUSE_MAPPED_NMI, 0, do_nmi }, 155 #endif 156 { EXCCAUSE_MAPPED_DEBUG, 0, do_debug }, 157 { -1, -1, 0 } 158 159 }; 160 161 /* The exception table <exc_table> serves two functions: 162 * 1. it contains three dispatch tables (fast_user, fast_kernel, default-c) 163 * 2. it is a temporary memory buffer for the exception handlers. 164 */ 165 166 DEFINE_PER_CPU(struct exc_table, exc_table); 167 DEFINE_PER_CPU(struct debug_table, debug_table); 168 169 void die(const char*, struct pt_regs*, long); 170 171 static inline void 172 __die_if_kernel(const char *str, struct pt_regs *regs, long err) 173 { 174 if (!user_mode(regs)) 175 die(str, regs, err); 176 } 177 178 #ifdef CONFIG_PRINT_USER_CODE_ON_UNHANDLED_EXCEPTION 179 static inline void dump_user_code(struct pt_regs *regs) 180 { 181 char buf[32]; 182 183 if (copy_from_user(buf, (void __user *)(regs->pc & -16), sizeof(buf)) == 0) { 184 print_hex_dump(KERN_INFO, " ", DUMP_PREFIX_NONE, 185 32, 1, buf, sizeof(buf), false); 186 187 } 188 } 189 #else 190 static inline void dump_user_code(struct pt_regs *regs) 191 { 192 } 193 #endif 194 195 /* 196 * Unhandled Exceptions. Kill user task or panic if in kernel space. 197 */ 198 199 void do_unhandled(struct pt_regs *regs) 200 { 201 __die_if_kernel("Caught unhandled exception - should not happen", 202 regs, SIGKILL); 203 204 /* If in user mode, send SIGILL signal to current process */ 205 pr_info_ratelimited("Caught unhandled exception in '%s' " 206 "(pid = %d, pc = %#010lx) - should not happen\n" 207 "\tEXCCAUSE is %ld\n", 208 current->comm, task_pid_nr(current), regs->pc, 209 regs->exccause); 210 dump_user_code(regs); 211 force_sig(SIGILL); 212 } 213 214 /* 215 * Multi-hit exception. This if fatal! 216 */ 217 218 static void do_multihit(struct pt_regs *regs) 219 { 220 die("Caught multihit exception", regs, SIGKILL); 221 } 222 223 /* 224 * IRQ handler. 225 */ 226 227 #if XTENSA_FAKE_NMI 228 229 #define IS_POW2(v) (((v) & ((v) - 1)) == 0) 230 231 #if !(PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \ 232 IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL))) 233 #warning "Fake NMI is requested for PMM, but there are other IRQs at or above its level." 234 #warning "Fake NMI will be used, but there will be a bugcheck if one of those IRQs fire." 235 236 static inline void check_valid_nmi(void) 237 { 238 unsigned intread = xtensa_get_sr(interrupt); 239 unsigned intenable = xtensa_get_sr(intenable); 240 241 BUG_ON(intread & intenable & 242 ~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^ 243 XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL) ^ 244 BIT(XCHAL_PROFILING_INTERRUPT))); 245 } 246 247 #else 248 249 static inline void check_valid_nmi(void) 250 { 251 } 252 253 #endif 254 255 irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id); 256 257 DEFINE_PER_CPU(unsigned long, nmi_count); 258 259 static void do_nmi(struct pt_regs *regs) 260 { 261 struct pt_regs *old_regs = set_irq_regs(regs); 262 263 nmi_enter(); 264 ++*this_cpu_ptr(&nmi_count); 265 check_valid_nmi(); 266 xtensa_pmu_irq_handler(0, NULL); 267 nmi_exit(); 268 set_irq_regs(old_regs); 269 } 270 #endif 271 272 static void do_interrupt(struct pt_regs *regs) 273 { 274 static const unsigned int_level_mask[] = { 275 0, 276 XCHAL_INTLEVEL1_MASK, 277 XCHAL_INTLEVEL2_MASK, 278 XCHAL_INTLEVEL3_MASK, 279 XCHAL_INTLEVEL4_MASK, 280 XCHAL_INTLEVEL5_MASK, 281 XCHAL_INTLEVEL6_MASK, 282 XCHAL_INTLEVEL7_MASK, 283 }; 284 struct pt_regs *old_regs = set_irq_regs(regs); 285 unsigned unhandled = ~0u; 286 287 irq_enter(); 288 289 for (;;) { 290 unsigned intread = xtensa_get_sr(interrupt); 291 unsigned intenable = xtensa_get_sr(intenable); 292 unsigned int_at_level = intread & intenable; 293 unsigned level; 294 295 for (level = LOCKLEVEL; level > 0; --level) { 296 if (int_at_level & int_level_mask[level]) { 297 int_at_level &= int_level_mask[level]; 298 if (int_at_level & unhandled) 299 int_at_level &= unhandled; 300 else 301 unhandled |= int_level_mask[level]; 302 break; 303 } 304 } 305 306 if (level == 0) 307 break; 308 309 /* clear lowest pending irq in the unhandled mask */ 310 unhandled ^= (int_at_level & -int_at_level); 311 do_IRQ(__ffs(int_at_level), regs); 312 } 313 314 irq_exit(); 315 set_irq_regs(old_regs); 316 } 317 318 static bool check_div0(struct pt_regs *regs) 319 { 320 static const u8 pattern[] = {'D', 'I', 'V', '0'}; 321 const u8 *p; 322 u8 buf[5]; 323 324 if (user_mode(regs)) { 325 if (copy_from_user(buf, (void __user *)regs->pc + 2, 5)) 326 return false; 327 p = buf; 328 } else { 329 p = (const u8 *)regs->pc + 2; 330 } 331 332 return memcmp(p, pattern, sizeof(pattern)) == 0 || 333 memcmp(p + 1, pattern, sizeof(pattern)) == 0; 334 } 335 336 /* 337 * Illegal instruction. Fatal if in kernel space. 338 */ 339 340 static void do_illegal_instruction(struct pt_regs *regs) 341 { 342 #ifdef CONFIG_USER_ABI_CALL0_PROBE 343 /* 344 * When call0 application encounters an illegal instruction fast 345 * exception handler will attempt to set PS.WOE and retry failing 346 * instruction. 347 * If we get here we know that that instruction is also illegal 348 * with PS.WOE set, so it's not related to the windowed option 349 * hence PS.WOE may be cleared. 350 */ 351 if (regs->pc == current_thread_info()->ps_woe_fix_addr) 352 regs->ps &= ~PS_WOE_MASK; 353 #endif 354 if (check_div0(regs)) { 355 do_div0(regs); 356 return; 357 } 358 359 __die_if_kernel("Illegal instruction in kernel", regs, SIGKILL); 360 361 /* If in user mode, send SIGILL signal to current process. */ 362 363 pr_info_ratelimited("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n", 364 current->comm, task_pid_nr(current), regs->pc); 365 force_sig(SIGILL); 366 } 367 368 static void do_div0(struct pt_regs *regs) 369 { 370 __die_if_kernel("Unhandled division by 0 in kernel", regs, SIGKILL); 371 force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->pc); 372 } 373 374 #ifdef CONFIG_XTENSA_LOAD_STORE 375 static void do_load_store(struct pt_regs *regs) 376 { 377 __die_if_kernel("Unhandled load/store exception in kernel", 378 regs, SIGKILL); 379 380 pr_info_ratelimited("Load/store error to %08lx in '%s' (pid = %d, pc = %#010lx)\n", 381 regs->excvaddr, current->comm, 382 task_pid_nr(current), regs->pc); 383 force_sig_fault(SIGBUS, BUS_ADRERR, (void *)regs->excvaddr); 384 } 385 #endif 386 387 /* 388 * Handle unaligned memory accesses from user space. Kill task. 389 * 390 * If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory 391 * accesses causes from user space. 392 */ 393 394 static void do_unaligned_user(struct pt_regs *regs) 395 { 396 __die_if_kernel("Unhandled unaligned exception in kernel", 397 regs, SIGKILL); 398 399 pr_info_ratelimited("Unaligned memory access to %08lx in '%s' " 400 "(pid = %d, pc = %#010lx)\n", 401 regs->excvaddr, current->comm, 402 task_pid_nr(current), regs->pc); 403 force_sig_fault(SIGBUS, BUS_ADRALN, (void *) regs->excvaddr); 404 } 405 406 #if XTENSA_HAVE_COPROCESSORS 407 static void do_coprocessor(struct pt_regs *regs) 408 { 409 coprocessor_flush_release_all(current_thread_info()); 410 } 411 #endif 412 413 /* Handle debug events. 414 * When CONFIG_HAVE_HW_BREAKPOINT is on this handler is called with 415 * preemption disabled to avoid rescheduling and keep mapping of hardware 416 * breakpoint structures to debug registers intact, so that 417 * DEBUGCAUSE.DBNUM could be used in case of data breakpoint hit. 418 */ 419 static void do_debug(struct pt_regs *regs) 420 { 421 #ifdef CONFIG_HAVE_HW_BREAKPOINT 422 int ret = check_hw_breakpoint(regs); 423 424 preempt_enable(); 425 if (ret == 0) 426 return; 427 #endif 428 __die_if_kernel("Breakpoint in kernel", regs, SIGKILL); 429 430 /* If in user mode, send SIGTRAP signal to current process */ 431 432 force_sig(SIGTRAP); 433 } 434 435 436 #define set_handler(type, cause, handler) \ 437 do { \ 438 unsigned int cpu; \ 439 \ 440 for_each_possible_cpu(cpu) \ 441 per_cpu(exc_table, cpu).type[cause] = (handler);\ 442 } while (0) 443 444 /* Set exception C handler - for temporary use when probing exceptions */ 445 446 xtensa_exception_handler * 447 __init trap_set_handler(int cause, xtensa_exception_handler *handler) 448 { 449 void *previous = per_cpu(exc_table, 0).default_handler[cause]; 450 451 set_handler(default_handler, cause, handler); 452 return previous; 453 } 454 455 456 static void trap_init_excsave(void) 457 { 458 xtensa_set_sr(this_cpu_ptr(&exc_table), excsave1); 459 } 460 461 static void trap_init_debug(void) 462 { 463 unsigned long debugsave = (unsigned long)this_cpu_ptr(&debug_table); 464 465 this_cpu_ptr(&debug_table)->debug_exception = debug_exception; 466 __asm__ __volatile__("wsr %0, excsave" __stringify(XCHAL_DEBUGLEVEL) 467 :: "a"(debugsave)); 468 } 469 470 /* 471 * Initialize dispatch tables. 472 * 473 * The exception vectors are stored compressed the __init section in the 474 * dispatch_init_table. This function initializes the following three tables 475 * from that compressed table: 476 * - fast user first dispatch table for user exceptions 477 * - fast kernel first dispatch table for kernel exceptions 478 * - default C-handler C-handler called by the default fast handler. 479 * 480 * See vectors.S for more details. 481 */ 482 483 void __init trap_init(void) 484 { 485 int i; 486 487 /* Setup default vectors. */ 488 489 for (i = 0; i < EXCCAUSE_N; i++) { 490 set_handler(fast_user_handler, i, user_exception); 491 set_handler(fast_kernel_handler, i, kernel_exception); 492 set_handler(default_handler, i, do_unhandled); 493 } 494 495 /* Setup specific handlers. */ 496 497 for(i = 0; dispatch_init_table[i].cause >= 0; i++) { 498 int fast = dispatch_init_table[i].fast; 499 int cause = dispatch_init_table[i].cause; 500 void *handler = dispatch_init_table[i].handler; 501 502 if (fast == 0) 503 set_handler(default_handler, cause, handler); 504 if ((fast & USER) != 0) 505 set_handler(fast_user_handler, cause, handler); 506 if ((fast & KRNL) != 0) 507 set_handler(fast_kernel_handler, cause, handler); 508 } 509 510 /* Initialize EXCSAVE_1 to hold the address of the exception table. */ 511 trap_init_excsave(); 512 trap_init_debug(); 513 } 514 515 #ifdef CONFIG_SMP 516 void secondary_trap_init(void) 517 { 518 trap_init_excsave(); 519 trap_init_debug(); 520 } 521 #endif 522 523 /* 524 * This function dumps the current valid window frame and other base registers. 525 */ 526 527 void show_regs(struct pt_regs * regs) 528 { 529 int i; 530 531 show_regs_print_info(KERN_DEFAULT); 532 533 for (i = 0; i < 16; i++) { 534 if ((i % 8) == 0) 535 pr_info("a%02d:", i); 536 pr_cont(" %08lx", regs->areg[i]); 537 } 538 pr_cont("\n"); 539 pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n", 540 regs->pc, regs->ps, regs->depc, regs->excvaddr); 541 pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n", 542 regs->lbeg, regs->lend, regs->lcount, regs->sar); 543 if (user_mode(regs)) 544 pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n", 545 regs->windowbase, regs->windowstart, regs->wmask, 546 regs->syscall); 547 } 548 549 static int show_trace_cb(struct stackframe *frame, void *data) 550 { 551 const char *loglvl = data; 552 553 if (kernel_text_address(frame->pc)) 554 printk("%s [<%08lx>] %pB\n", 555 loglvl, frame->pc, (void *)frame->pc); 556 return 0; 557 } 558 559 static void show_trace(struct task_struct *task, unsigned long *sp, 560 const char *loglvl) 561 { 562 if (!sp) 563 sp = stack_pointer(task); 564 565 printk("%sCall Trace:\n", loglvl); 566 walk_stackframe(sp, show_trace_cb, (void *)loglvl); 567 } 568 569 #define STACK_DUMP_ENTRY_SIZE 4 570 #define STACK_DUMP_LINE_SIZE 16 571 static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; 572 573 struct stack_fragment 574 { 575 size_t len; 576 size_t off; 577 u8 *sp; 578 const char *loglvl; 579 }; 580 581 static int show_stack_fragment_cb(struct stackframe *frame, void *data) 582 { 583 struct stack_fragment *sf = data; 584 585 while (sf->off < sf->len) { 586 u8 line[STACK_DUMP_LINE_SIZE]; 587 size_t line_len = sf->len - sf->off > STACK_DUMP_LINE_SIZE ? 588 STACK_DUMP_LINE_SIZE : sf->len - sf->off; 589 bool arrow = sf->off == 0; 590 591 if (frame && frame->sp == (unsigned long)(sf->sp + sf->off)) 592 arrow = true; 593 594 __memcpy(line, sf->sp + sf->off, line_len); 595 print_hex_dump(sf->loglvl, arrow ? "> " : " ", DUMP_PREFIX_NONE, 596 STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE, 597 line, line_len, false); 598 sf->off += STACK_DUMP_LINE_SIZE; 599 if (arrow) 600 return 0; 601 } 602 return 1; 603 } 604 605 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) 606 { 607 struct stack_fragment sf; 608 609 if (!sp) 610 sp = stack_pointer(task); 611 612 sf.len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE), 613 kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE); 614 sf.off = 0; 615 sf.sp = (u8 *)sp; 616 sf.loglvl = loglvl; 617 618 printk("%sStack:\n", loglvl); 619 walk_stackframe(sp, show_stack_fragment_cb, &sf); 620 while (sf.off < sf.len) 621 show_stack_fragment_cb(NULL, &sf); 622 show_trace(task, sp, loglvl); 623 } 624 625 DEFINE_SPINLOCK(die_lock); 626 627 void __noreturn die(const char * str, struct pt_regs * regs, long err) 628 { 629 static int die_counter; 630 const char *pr = ""; 631 632 if (IS_ENABLED(CONFIG_PREEMPTION)) 633 pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT"; 634 635 console_verbose(); 636 spin_lock_irq(&die_lock); 637 638 pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr); 639 show_regs(regs); 640 if (!user_mode(regs)) 641 show_stack(NULL, (unsigned long *)regs->areg[1], KERN_INFO); 642 643 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 644 spin_unlock_irq(&die_lock); 645 646 if (in_interrupt()) 647 panic("Fatal exception in interrupt"); 648 649 if (panic_on_oops) 650 panic("Fatal exception"); 651 652 make_task_dead(err); 653 } 654