1 /* 2 * arch/s390/kernel/traps.c 3 * 4 * S390 version 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * 9 * Derived from "arch/i386/kernel/traps.c" 10 * Copyright (C) 1991, 1992 Linus Torvalds 11 */ 12 13 /* 14 * 'Traps.c' handles hardware traps and faults after we have saved some 15 * state in 'asm.s'. 16 */ 17 #include <linux/sched.h> 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/ptrace.h> 22 #include <linux/timer.h> 23 #include <linux/mm.h> 24 #include <linux/smp.h> 25 #include <linux/init.h> 26 #include <linux/interrupt.h> 27 #include <linux/seq_file.h> 28 #include <linux/delay.h> 29 #include <linux/module.h> 30 #include <linux/kdebug.h> 31 #include <linux/kallsyms.h> 32 #include <linux/reboot.h> 33 #include <linux/kprobes.h> 34 #include <linux/bug.h> 35 #include <linux/utsname.h> 36 #include <asm/system.h> 37 #include <asm/uaccess.h> 38 #include <asm/io.h> 39 #include <linux/atomic.h> 40 #include <asm/mathemu.h> 41 #include <asm/cpcmd.h> 42 #include <asm/lowcore.h> 43 #include <asm/debug.h> 44 #include <asm/ipl.h> 45 #include "entry.h" 46 47 void (*pgm_check_table[128])(struct pt_regs *regs); 48 49 int show_unhandled_signals = 1; 50 51 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 52 53 #ifndef CONFIG_64BIT 54 #define LONG "%08lx " 55 #define FOURLONG "%08lx %08lx %08lx %08lx\n" 56 static int kstack_depth_to_print = 12; 57 #else /* CONFIG_64BIT */ 58 #define LONG "%016lx " 59 #define FOURLONG "%016lx %016lx %016lx %016lx\n" 60 static int kstack_depth_to_print = 20; 61 #endif /* CONFIG_64BIT */ 62 63 /* 64 * For show_trace we have tree different stack to consider: 65 * - the panic stack which is used if the kernel stack has overflown 66 * - the asynchronous interrupt stack (cpu related) 67 * - the synchronous kernel stack (process related) 68 * The stack trace can start at any of the three stack and can potentially 69 * touch all of them. The order is: panic stack, async stack, sync stack. 70 */ 71 static unsigned long 72 __show_trace(unsigned long sp, unsigned long low, unsigned long high) 73 { 74 struct stack_frame *sf; 75 struct pt_regs *regs; 76 77 while (1) { 78 sp = sp & PSW_ADDR_INSN; 79 if (sp < low || sp > high - sizeof(*sf)) 80 return sp; 81 sf = (struct stack_frame *) sp; 82 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 83 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); 84 /* Follow the backchain. */ 85 while (1) { 86 low = sp; 87 sp = sf->back_chain & PSW_ADDR_INSN; 88 if (!sp) 89 break; 90 if (sp <= low || sp > high - sizeof(*sf)) 91 return sp; 92 sf = (struct stack_frame *) sp; 93 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 94 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); 95 } 96 /* Zero backchain detected, check for interrupt frame. */ 97 sp = (unsigned long) (sf + 1); 98 if (sp <= low || sp > high - sizeof(*regs)) 99 return sp; 100 regs = (struct pt_regs *) sp; 101 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); 102 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); 103 low = sp; 104 sp = regs->gprs[15]; 105 } 106 } 107 108 static void show_trace(struct task_struct *task, unsigned long *stack) 109 { 110 register unsigned long __r15 asm ("15"); 111 unsigned long sp; 112 113 sp = (unsigned long) stack; 114 if (!sp) 115 sp = task ? task->thread.ksp : __r15; 116 printk("Call Trace:\n"); 117 #ifdef CONFIG_CHECK_STACK 118 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, 119 S390_lowcore.panic_stack); 120 #endif 121 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, 122 S390_lowcore.async_stack); 123 if (task) 124 __show_trace(sp, (unsigned long) task_stack_page(task), 125 (unsigned long) task_stack_page(task) + THREAD_SIZE); 126 else 127 __show_trace(sp, S390_lowcore.thread_info, 128 S390_lowcore.thread_info + THREAD_SIZE); 129 if (!task) 130 task = current; 131 debug_show_held_locks(task); 132 } 133 134 void show_stack(struct task_struct *task, unsigned long *sp) 135 { 136 register unsigned long * __r15 asm ("15"); 137 unsigned long *stack; 138 int i; 139 140 if (!sp) 141 stack = task ? (unsigned long *) task->thread.ksp : __r15; 142 else 143 stack = sp; 144 145 for (i = 0; i < kstack_depth_to_print; i++) { 146 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 147 break; 148 if ((i * sizeof(long) % 32) == 0) 149 printk("%s ", i == 0 ? "" : "\n"); 150 printk(LONG, *stack++); 151 } 152 printk("\n"); 153 show_trace(task, sp); 154 } 155 156 static void show_last_breaking_event(struct pt_regs *regs) 157 { 158 #ifdef CONFIG_64BIT 159 printk("Last Breaking-Event-Address:\n"); 160 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); 161 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); 162 #endif 163 } 164 165 /* 166 * The architecture-independent dump_stack generator 167 */ 168 void dump_stack(void) 169 { 170 printk("CPU: %d %s %s %.*s\n", 171 task_thread_info(current)->cpu, print_tainted(), 172 init_utsname()->release, 173 (int)strcspn(init_utsname()->version, " "), 174 init_utsname()->version); 175 printk("Process %s (pid: %d, task: %p, ksp: %p)\n", 176 current->comm, current->pid, current, 177 (void *) current->thread.ksp); 178 show_stack(NULL, NULL); 179 } 180 EXPORT_SYMBOL(dump_stack); 181 182 static inline int mask_bits(struct pt_regs *regs, unsigned long bits) 183 { 184 return (regs->psw.mask & bits) / ((~bits + 1) & bits); 185 } 186 187 void show_registers(struct pt_regs *regs) 188 { 189 char *mode; 190 191 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; 192 printk("%s PSW : %p %p", 193 mode, (void *) regs->psw.mask, 194 (void *) regs->psw.addr); 195 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); 196 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " 197 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), 198 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), 199 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), 200 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), 201 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), 202 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); 203 #ifdef CONFIG_64BIT 204 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); 205 #endif 206 printk("\n%s GPRS: " FOURLONG, mode, 207 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 208 printk(" " FOURLONG, 209 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); 210 printk(" " FOURLONG, 211 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); 212 printk(" " FOURLONG, 213 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); 214 215 show_code(regs); 216 } 217 218 void show_regs(struct pt_regs *regs) 219 { 220 print_modules(); 221 printk("CPU: %d %s %s %.*s\n", 222 task_thread_info(current)->cpu, print_tainted(), 223 init_utsname()->release, 224 (int)strcspn(init_utsname()->version, " "), 225 init_utsname()->version); 226 printk("Process %s (pid: %d, task: %p, ksp: %p)\n", 227 current->comm, current->pid, current, 228 (void *) current->thread.ksp); 229 show_registers(regs); 230 /* Show stack backtrace if pt_regs is from kernel mode */ 231 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 232 show_trace(NULL, (unsigned long *) regs->gprs[15]); 233 show_last_breaking_event(regs); 234 } 235 236 static DEFINE_SPINLOCK(die_lock); 237 238 void die(struct pt_regs *regs, const char *str) 239 { 240 static int die_counter; 241 242 oops_enter(); 243 lgr_info_log(); 244 debug_stop_all(); 245 console_verbose(); 246 spin_lock_irq(&die_lock); 247 bust_spinlocks(1); 248 printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); 249 #ifdef CONFIG_PREEMPT 250 printk("PREEMPT "); 251 #endif 252 #ifdef CONFIG_SMP 253 printk("SMP "); 254 #endif 255 #ifdef CONFIG_DEBUG_PAGEALLOC 256 printk("DEBUG_PAGEALLOC"); 257 #endif 258 printk("\n"); 259 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); 260 show_regs(regs); 261 bust_spinlocks(0); 262 add_taint(TAINT_DIE); 263 spin_unlock_irq(&die_lock); 264 if (in_interrupt()) 265 panic("Fatal exception in interrupt"); 266 if (panic_on_oops) 267 panic("Fatal exception: panic_on_oops"); 268 oops_exit(); 269 do_exit(SIGSEGV); 270 } 271 272 static inline void report_user_fault(struct pt_regs *regs, int signr) 273 { 274 if ((task_pid_nr(current) > 1) && !show_unhandled_signals) 275 return; 276 if (!unhandled_signal(current, signr)) 277 return; 278 if (!printk_ratelimit()) 279 return; 280 printk("User process fault: interruption code 0x%X ", regs->int_code); 281 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); 282 printk("\n"); 283 show_regs(regs); 284 } 285 286 int is_valid_bugaddr(unsigned long addr) 287 { 288 return 1; 289 } 290 291 static inline void __user *get_psw_address(struct pt_regs *regs) 292 { 293 return (void __user *) 294 ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN); 295 } 296 297 static void __kprobes do_trap(struct pt_regs *regs, 298 int si_signo, int si_code, char *str) 299 { 300 siginfo_t info; 301 302 if (notify_die(DIE_TRAP, str, regs, 0, 303 regs->int_code, si_signo) == NOTIFY_STOP) 304 return; 305 306 if (regs->psw.mask & PSW_MASK_PSTATE) { 307 info.si_signo = si_signo; 308 info.si_errno = 0; 309 info.si_code = si_code; 310 info.si_addr = get_psw_address(regs); 311 force_sig_info(si_signo, &info, current); 312 report_user_fault(regs, si_signo); 313 } else { 314 const struct exception_table_entry *fixup; 315 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 316 if (fixup) 317 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 318 else { 319 enum bug_trap_type btt; 320 321 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); 322 if (btt == BUG_TRAP_TYPE_WARN) 323 return; 324 die(regs, str); 325 } 326 } 327 } 328 329 void __kprobes do_per_trap(struct pt_regs *regs) 330 { 331 siginfo_t info; 332 333 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) 334 return; 335 if (!current->ptrace) 336 return; 337 info.si_signo = SIGTRAP; 338 info.si_errno = 0; 339 info.si_code = TRAP_HWBKPT; 340 info.si_addr = 341 (void __force __user *) current->thread.per_event.address; 342 force_sig_info(SIGTRAP, &info, current); 343 } 344 345 static void default_trap_handler(struct pt_regs *regs) 346 { 347 if (regs->psw.mask & PSW_MASK_PSTATE) { 348 report_user_fault(regs, SIGSEGV); 349 do_exit(SIGSEGV); 350 } else 351 die(regs, "Unknown program exception"); 352 } 353 354 #define DO_ERROR_INFO(name, signr, sicode, str) \ 355 static void name(struct pt_regs *regs) \ 356 { \ 357 do_trap(regs, signr, sicode, str); \ 358 } 359 360 DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR, 361 "addressing exception") 362 DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN, 363 "execute exception") 364 DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV, 365 "fixpoint divide exception") 366 DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF, 367 "fixpoint overflow exception") 368 DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF, 369 "HFP overflow exception") 370 DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND, 371 "HFP underflow exception") 372 DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES, 373 "HFP significance exception") 374 DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV, 375 "HFP divide exception") 376 DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV, 377 "HFP square root exception") 378 DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN, 379 "operand exception") 380 DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC, 381 "privileged operation") 382 DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, 383 "special operation exception") 384 DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN, 385 "translation exception") 386 387 static inline void do_fp_trap(struct pt_regs *regs, int fpc) 388 { 389 int si_code = 0; 390 /* FPC[2] is Data Exception Code */ 391 if ((fpc & 0x00000300) == 0) { 392 /* bits 6 and 7 of DXC are 0 iff IEEE exception */ 393 if (fpc & 0x8000) /* invalid fp operation */ 394 si_code = FPE_FLTINV; 395 else if (fpc & 0x4000) /* div by 0 */ 396 si_code = FPE_FLTDIV; 397 else if (fpc & 0x2000) /* overflow */ 398 si_code = FPE_FLTOVF; 399 else if (fpc & 0x1000) /* underflow */ 400 si_code = FPE_FLTUND; 401 else if (fpc & 0x0800) /* inexact */ 402 si_code = FPE_FLTRES; 403 } 404 do_trap(regs, SIGFPE, si_code, "floating point exception"); 405 } 406 407 static void __kprobes illegal_op(struct pt_regs *regs) 408 { 409 siginfo_t info; 410 __u8 opcode[6]; 411 __u16 __user *location; 412 int signal = 0; 413 414 location = get_psw_address(regs); 415 416 if (regs->psw.mask & PSW_MASK_PSTATE) { 417 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 418 return; 419 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 420 if (current->ptrace) { 421 info.si_signo = SIGTRAP; 422 info.si_errno = 0; 423 info.si_code = TRAP_BRKPT; 424 info.si_addr = location; 425 force_sig_info(SIGTRAP, &info, current); 426 } else 427 signal = SIGILL; 428 #ifdef CONFIG_MATHEMU 429 } else if (opcode[0] == 0xb3) { 430 if (get_user(*((__u16 *) (opcode+2)), location+1)) 431 return; 432 signal = math_emu_b3(opcode, regs); 433 } else if (opcode[0] == 0xed) { 434 if (get_user(*((__u32 *) (opcode+2)), 435 (__u32 __user *)(location+1))) 436 return; 437 signal = math_emu_ed(opcode, regs); 438 } else if (*((__u16 *) opcode) == 0xb299) { 439 if (get_user(*((__u16 *) (opcode+2)), location+1)) 440 return; 441 signal = math_emu_srnm(opcode, regs); 442 } else if (*((__u16 *) opcode) == 0xb29c) { 443 if (get_user(*((__u16 *) (opcode+2)), location+1)) 444 return; 445 signal = math_emu_stfpc(opcode, regs); 446 } else if (*((__u16 *) opcode) == 0xb29d) { 447 if (get_user(*((__u16 *) (opcode+2)), location+1)) 448 return; 449 signal = math_emu_lfpc(opcode, regs); 450 #endif 451 } else 452 signal = SIGILL; 453 } else { 454 /* 455 * If we get an illegal op in kernel mode, send it through the 456 * kprobes notifier. If kprobes doesn't pick it up, SIGILL 457 */ 458 if (notify_die(DIE_BPT, "bpt", regs, 0, 459 3, SIGTRAP) != NOTIFY_STOP) 460 signal = SIGILL; 461 } 462 463 #ifdef CONFIG_MATHEMU 464 if (signal == SIGFPE) 465 do_fp_trap(regs, current->thread.fp_regs.fpc); 466 else if (signal == SIGSEGV) 467 do_trap(regs, signal, SEGV_MAPERR, "user address fault"); 468 else 469 #endif 470 if (signal) 471 do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); 472 } 473 474 475 #ifdef CONFIG_MATHEMU 476 void specification_exception(struct pt_regs *regs) 477 { 478 __u8 opcode[6]; 479 __u16 __user *location = NULL; 480 int signal = 0; 481 482 location = (__u16 __user *) get_psw_address(regs); 483 484 if (regs->psw.mask & PSW_MASK_PSTATE) { 485 get_user(*((__u16 *) opcode), location); 486 switch (opcode[0]) { 487 case 0x28: /* LDR Rx,Ry */ 488 signal = math_emu_ldr(opcode); 489 break; 490 case 0x38: /* LER Rx,Ry */ 491 signal = math_emu_ler(opcode); 492 break; 493 case 0x60: /* STD R,D(X,B) */ 494 get_user(*((__u16 *) (opcode+2)), location+1); 495 signal = math_emu_std(opcode, regs); 496 break; 497 case 0x68: /* LD R,D(X,B) */ 498 get_user(*((__u16 *) (opcode+2)), location+1); 499 signal = math_emu_ld(opcode, regs); 500 break; 501 case 0x70: /* STE R,D(X,B) */ 502 get_user(*((__u16 *) (opcode+2)), location+1); 503 signal = math_emu_ste(opcode, regs); 504 break; 505 case 0x78: /* LE R,D(X,B) */ 506 get_user(*((__u16 *) (opcode+2)), location+1); 507 signal = math_emu_le(opcode, regs); 508 break; 509 default: 510 signal = SIGILL; 511 break; 512 } 513 } else 514 signal = SIGILL; 515 516 if (signal == SIGFPE) 517 do_fp_trap(regs, current->thread.fp_regs.fpc); 518 else if (signal) 519 do_trap(regs, signal, ILL_ILLOPN, "specification exception"); 520 } 521 #else 522 DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, 523 "specification exception"); 524 #endif 525 526 static void data_exception(struct pt_regs *regs) 527 { 528 __u16 __user *location; 529 int signal = 0; 530 531 location = get_psw_address(regs); 532 533 if (MACHINE_HAS_IEEE) 534 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 535 536 #ifdef CONFIG_MATHEMU 537 else if (regs->psw.mask & PSW_MASK_PSTATE) { 538 __u8 opcode[6]; 539 get_user(*((__u16 *) opcode), location); 540 switch (opcode[0]) { 541 case 0x28: /* LDR Rx,Ry */ 542 signal = math_emu_ldr(opcode); 543 break; 544 case 0x38: /* LER Rx,Ry */ 545 signal = math_emu_ler(opcode); 546 break; 547 case 0x60: /* STD R,D(X,B) */ 548 get_user(*((__u16 *) (opcode+2)), location+1); 549 signal = math_emu_std(opcode, regs); 550 break; 551 case 0x68: /* LD R,D(X,B) */ 552 get_user(*((__u16 *) (opcode+2)), location+1); 553 signal = math_emu_ld(opcode, regs); 554 break; 555 case 0x70: /* STE R,D(X,B) */ 556 get_user(*((__u16 *) (opcode+2)), location+1); 557 signal = math_emu_ste(opcode, regs); 558 break; 559 case 0x78: /* LE R,D(X,B) */ 560 get_user(*((__u16 *) (opcode+2)), location+1); 561 signal = math_emu_le(opcode, regs); 562 break; 563 case 0xb3: 564 get_user(*((__u16 *) (opcode+2)), location+1); 565 signal = math_emu_b3(opcode, regs); 566 break; 567 case 0xed: 568 get_user(*((__u32 *) (opcode+2)), 569 (__u32 __user *)(location+1)); 570 signal = math_emu_ed(opcode, regs); 571 break; 572 case 0xb2: 573 if (opcode[1] == 0x99) { 574 get_user(*((__u16 *) (opcode+2)), location+1); 575 signal = math_emu_srnm(opcode, regs); 576 } else if (opcode[1] == 0x9c) { 577 get_user(*((__u16 *) (opcode+2)), location+1); 578 signal = math_emu_stfpc(opcode, regs); 579 } else if (opcode[1] == 0x9d) { 580 get_user(*((__u16 *) (opcode+2)), location+1); 581 signal = math_emu_lfpc(opcode, regs); 582 } else 583 signal = SIGILL; 584 break; 585 default: 586 signal = SIGILL; 587 break; 588 } 589 } 590 #endif 591 if (current->thread.fp_regs.fpc & FPC_DXC_MASK) 592 signal = SIGFPE; 593 else 594 signal = SIGILL; 595 if (signal == SIGFPE) 596 do_fp_trap(regs, current->thread.fp_regs.fpc); 597 else if (signal) 598 do_trap(regs, signal, ILL_ILLOPN, "data exception"); 599 } 600 601 static void space_switch_exception(struct pt_regs *regs) 602 { 603 /* Set user psw back to home space mode. */ 604 if (regs->psw.mask & PSW_MASK_PSTATE) 605 regs->psw.mask |= PSW_ASC_HOME; 606 /* Send SIGILL. */ 607 do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event"); 608 } 609 610 void __kprobes kernel_stack_overflow(struct pt_regs * regs) 611 { 612 bust_spinlocks(1); 613 printk("Kernel stack overflow.\n"); 614 show_regs(regs); 615 bust_spinlocks(0); 616 panic("Corrupt kernel stack, can't continue."); 617 } 618 619 /* init is done in lowcore.S and head.S */ 620 621 void __init trap_init(void) 622 { 623 int i; 624 625 for (i = 0; i < 128; i++) 626 pgm_check_table[i] = &default_trap_handler; 627 pgm_check_table[1] = &illegal_op; 628 pgm_check_table[2] = &privileged_op; 629 pgm_check_table[3] = &execute_exception; 630 pgm_check_table[4] = &do_protection_exception; 631 pgm_check_table[5] = &addressing_exception; 632 pgm_check_table[6] = &specification_exception; 633 pgm_check_table[7] = &data_exception; 634 pgm_check_table[8] = &overflow_exception; 635 pgm_check_table[9] = ÷_exception; 636 pgm_check_table[0x0A] = &overflow_exception; 637 pgm_check_table[0x0B] = ÷_exception; 638 pgm_check_table[0x0C] = &hfp_overflow_exception; 639 pgm_check_table[0x0D] = &hfp_underflow_exception; 640 pgm_check_table[0x0E] = &hfp_significance_exception; 641 pgm_check_table[0x0F] = &hfp_divide_exception; 642 pgm_check_table[0x10] = &do_dat_exception; 643 pgm_check_table[0x11] = &do_dat_exception; 644 pgm_check_table[0x12] = &translation_exception; 645 pgm_check_table[0x13] = &special_op_exception; 646 #ifdef CONFIG_64BIT 647 pgm_check_table[0x38] = &do_asce_exception; 648 pgm_check_table[0x39] = &do_dat_exception; 649 pgm_check_table[0x3A] = &do_dat_exception; 650 pgm_check_table[0x3B] = &do_dat_exception; 651 #endif /* CONFIG_64BIT */ 652 pgm_check_table[0x15] = &operand_exception; 653 pgm_check_table[0x1C] = &space_switch_exception; 654 pgm_check_table[0x1D] = &hfp_sqrt_exception; 655 /* Enable machine checks early. */ 656 local_mcck_enable(); 657 } 658