1 /* 2 * arch/s390/kernel/traps.c 3 * 4 * S390 version 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * 9 * Derived from "arch/i386/kernel/traps.c" 10 * Copyright (C) 1991, 1992 Linus Torvalds 11 */ 12 13 /* 14 * 'Traps.c' handles hardware traps and faults after we have saved some 15 * state in 'asm.s'. 16 */ 17 #include <linux/sched.h> 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/ptrace.h> 22 #include <linux/timer.h> 23 #include <linux/mm.h> 24 #include <linux/smp.h> 25 #include <linux/init.h> 26 #include <linux/interrupt.h> 27 #include <linux/seq_file.h> 28 #include <linux/delay.h> 29 #include <linux/module.h> 30 #include <linux/kdebug.h> 31 #include <linux/kallsyms.h> 32 #include <linux/reboot.h> 33 #include <linux/kprobes.h> 34 #include <linux/bug.h> 35 #include <linux/utsname.h> 36 #include <asm/system.h> 37 #include <asm/uaccess.h> 38 #include <asm/io.h> 39 #include <linux/atomic.h> 40 #include <asm/mathemu.h> 41 #include <asm/cpcmd.h> 42 #include <asm/lowcore.h> 43 #include <asm/debug.h> 44 #include "entry.h" 45 46 void (*pgm_check_table[128])(struct pt_regs *regs); 47 48 int show_unhandled_signals = 1; 49 50 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 51 52 #ifndef CONFIG_64BIT 53 #define LONG "%08lx " 54 #define FOURLONG "%08lx %08lx %08lx %08lx\n" 55 static int kstack_depth_to_print = 12; 56 #else /* CONFIG_64BIT */ 57 #define LONG "%016lx " 58 #define FOURLONG "%016lx %016lx %016lx %016lx\n" 59 static int kstack_depth_to_print = 20; 60 #endif /* CONFIG_64BIT */ 61 62 /* 63 * For show_trace we have tree different stack to consider: 64 * - the panic stack which is used if the kernel stack has overflown 65 * - the asynchronous interrupt stack (cpu related) 66 * - the synchronous kernel stack (process related) 67 * The stack trace can start at any of the three stack and can potentially 68 * touch all of them. The order is: panic stack, async stack, sync stack. 69 */ 70 static unsigned long 71 __show_trace(unsigned long sp, unsigned long low, unsigned long high) 72 { 73 struct stack_frame *sf; 74 struct pt_regs *regs; 75 76 while (1) { 77 sp = sp & PSW_ADDR_INSN; 78 if (sp < low || sp > high - sizeof(*sf)) 79 return sp; 80 sf = (struct stack_frame *) sp; 81 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 82 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); 83 /* Follow the backchain. */ 84 while (1) { 85 low = sp; 86 sp = sf->back_chain & PSW_ADDR_INSN; 87 if (!sp) 88 break; 89 if (sp <= low || sp > high - sizeof(*sf)) 90 return sp; 91 sf = (struct stack_frame *) sp; 92 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 93 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); 94 } 95 /* Zero backchain detected, check for interrupt frame. */ 96 sp = (unsigned long) (sf + 1); 97 if (sp <= low || sp > high - sizeof(*regs)) 98 return sp; 99 regs = (struct pt_regs *) sp; 100 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); 101 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); 102 low = sp; 103 sp = regs->gprs[15]; 104 } 105 } 106 107 static void show_trace(struct task_struct *task, unsigned long *stack) 108 { 109 register unsigned long __r15 asm ("15"); 110 unsigned long sp; 111 112 sp = (unsigned long) stack; 113 if (!sp) 114 sp = task ? task->thread.ksp : __r15; 115 printk("Call Trace:\n"); 116 #ifdef CONFIG_CHECK_STACK 117 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, 118 S390_lowcore.panic_stack); 119 #endif 120 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, 121 S390_lowcore.async_stack); 122 if (task) 123 __show_trace(sp, (unsigned long) task_stack_page(task), 124 (unsigned long) task_stack_page(task) + THREAD_SIZE); 125 else 126 __show_trace(sp, S390_lowcore.thread_info, 127 S390_lowcore.thread_info + THREAD_SIZE); 128 if (!task) 129 task = current; 130 debug_show_held_locks(task); 131 } 132 133 void show_stack(struct task_struct *task, unsigned long *sp) 134 { 135 register unsigned long * __r15 asm ("15"); 136 unsigned long *stack; 137 int i; 138 139 if (!sp) 140 stack = task ? (unsigned long *) task->thread.ksp : __r15; 141 else 142 stack = sp; 143 144 for (i = 0; i < kstack_depth_to_print; i++) { 145 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 146 break; 147 if (i && ((i * sizeof (long) % 32) == 0)) 148 printk("\n "); 149 printk(LONG, *stack++); 150 } 151 printk("\n"); 152 show_trace(task, sp); 153 } 154 155 static void show_last_breaking_event(struct pt_regs *regs) 156 { 157 #ifdef CONFIG_64BIT 158 printk("Last Breaking-Event-Address:\n"); 159 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); 160 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); 161 #endif 162 } 163 164 /* 165 * The architecture-independent dump_stack generator 166 */ 167 void dump_stack(void) 168 { 169 printk("CPU: %d %s %s %.*s\n", 170 task_thread_info(current)->cpu, print_tainted(), 171 init_utsname()->release, 172 (int)strcspn(init_utsname()->version, " "), 173 init_utsname()->version); 174 printk("Process %s (pid: %d, task: %p, ksp: %p)\n", 175 current->comm, current->pid, current, 176 (void *) current->thread.ksp); 177 show_stack(NULL, NULL); 178 } 179 EXPORT_SYMBOL(dump_stack); 180 181 static inline int mask_bits(struct pt_regs *regs, unsigned long bits) 182 { 183 return (regs->psw.mask & bits) / ((~bits + 1) & bits); 184 } 185 186 void show_registers(struct pt_regs *regs) 187 { 188 char *mode; 189 190 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; 191 printk("%s PSW : %p %p", 192 mode, (void *) regs->psw.mask, 193 (void *) regs->psw.addr); 194 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); 195 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " 196 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), 197 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), 198 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), 199 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), 200 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), 201 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); 202 #ifdef CONFIG_64BIT 203 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); 204 #endif 205 printk("\n%s GPRS: " FOURLONG, mode, 206 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 207 printk(" " FOURLONG, 208 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); 209 printk(" " FOURLONG, 210 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); 211 printk(" " FOURLONG, 212 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); 213 214 show_code(regs); 215 } 216 217 void show_regs(struct pt_regs *regs) 218 { 219 print_modules(); 220 printk("CPU: %d %s %s %.*s\n", 221 task_thread_info(current)->cpu, print_tainted(), 222 init_utsname()->release, 223 (int)strcspn(init_utsname()->version, " "), 224 init_utsname()->version); 225 printk("Process %s (pid: %d, task: %p, ksp: %p)\n", 226 current->comm, current->pid, current, 227 (void *) current->thread.ksp); 228 show_registers(regs); 229 /* Show stack backtrace if pt_regs is from kernel mode */ 230 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 231 show_trace(NULL, (unsigned long *) regs->gprs[15]); 232 show_last_breaking_event(regs); 233 } 234 235 static DEFINE_SPINLOCK(die_lock); 236 237 void die(struct pt_regs *regs, const char *str) 238 { 239 static int die_counter; 240 241 oops_enter(); 242 debug_stop_all(); 243 console_verbose(); 244 spin_lock_irq(&die_lock); 245 bust_spinlocks(1); 246 printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); 247 #ifdef CONFIG_PREEMPT 248 printk("PREEMPT "); 249 #endif 250 #ifdef CONFIG_SMP 251 printk("SMP "); 252 #endif 253 #ifdef CONFIG_DEBUG_PAGEALLOC 254 printk("DEBUG_PAGEALLOC"); 255 #endif 256 printk("\n"); 257 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); 258 show_regs(regs); 259 bust_spinlocks(0); 260 add_taint(TAINT_DIE); 261 spin_unlock_irq(&die_lock); 262 if (in_interrupt()) 263 panic("Fatal exception in interrupt"); 264 if (panic_on_oops) 265 panic("Fatal exception: panic_on_oops"); 266 oops_exit(); 267 do_exit(SIGSEGV); 268 } 269 270 static inline void report_user_fault(struct pt_regs *regs, int signr) 271 { 272 if ((task_pid_nr(current) > 1) && !show_unhandled_signals) 273 return; 274 if (!unhandled_signal(current, signr)) 275 return; 276 if (!printk_ratelimit()) 277 return; 278 printk("User process fault: interruption code 0x%X ", regs->int_code); 279 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); 280 printk("\n"); 281 show_regs(regs); 282 } 283 284 int is_valid_bugaddr(unsigned long addr) 285 { 286 return 1; 287 } 288 289 static inline void __user *get_psw_address(struct pt_regs *regs) 290 { 291 return (void __user *) 292 ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN); 293 } 294 295 static void __kprobes do_trap(struct pt_regs *regs, 296 int si_signo, int si_code, char *str) 297 { 298 siginfo_t info; 299 300 if (notify_die(DIE_TRAP, str, regs, 0, 301 regs->int_code, si_signo) == NOTIFY_STOP) 302 return; 303 304 if (regs->psw.mask & PSW_MASK_PSTATE) { 305 info.si_signo = si_signo; 306 info.si_errno = 0; 307 info.si_code = si_code; 308 info.si_addr = get_psw_address(regs); 309 force_sig_info(si_signo, &info, current); 310 report_user_fault(regs, si_signo); 311 } else { 312 const struct exception_table_entry *fixup; 313 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 314 if (fixup) 315 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 316 else { 317 enum bug_trap_type btt; 318 319 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); 320 if (btt == BUG_TRAP_TYPE_WARN) 321 return; 322 die(regs, str); 323 } 324 } 325 } 326 327 void __kprobes do_per_trap(struct pt_regs *regs) 328 { 329 siginfo_t info; 330 331 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) 332 return; 333 if (!current->ptrace) 334 return; 335 info.si_signo = SIGTRAP; 336 info.si_errno = 0; 337 info.si_code = TRAP_HWBKPT; 338 info.si_addr = 339 (void __force __user *) current->thread.per_event.address; 340 force_sig_info(SIGTRAP, &info, current); 341 } 342 343 static void default_trap_handler(struct pt_regs *regs) 344 { 345 if (regs->psw.mask & PSW_MASK_PSTATE) { 346 report_user_fault(regs, SIGSEGV); 347 do_exit(SIGSEGV); 348 } else 349 die(regs, "Unknown program exception"); 350 } 351 352 #define DO_ERROR_INFO(name, signr, sicode, str) \ 353 static void name(struct pt_regs *regs) \ 354 { \ 355 do_trap(regs, signr, sicode, str); \ 356 } 357 358 DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR, 359 "addressing exception") 360 DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN, 361 "execute exception") 362 DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV, 363 "fixpoint divide exception") 364 DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF, 365 "fixpoint overflow exception") 366 DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF, 367 "HFP overflow exception") 368 DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND, 369 "HFP underflow exception") 370 DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES, 371 "HFP significance exception") 372 DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV, 373 "HFP divide exception") 374 DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV, 375 "HFP square root exception") 376 DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN, 377 "operand exception") 378 DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC, 379 "privileged operation") 380 DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, 381 "special operation exception") 382 DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN, 383 "translation exception") 384 385 static inline void do_fp_trap(struct pt_regs *regs, int fpc) 386 { 387 int si_code = 0; 388 /* FPC[2] is Data Exception Code */ 389 if ((fpc & 0x00000300) == 0) { 390 /* bits 6 and 7 of DXC are 0 iff IEEE exception */ 391 if (fpc & 0x8000) /* invalid fp operation */ 392 si_code = FPE_FLTINV; 393 else if (fpc & 0x4000) /* div by 0 */ 394 si_code = FPE_FLTDIV; 395 else if (fpc & 0x2000) /* overflow */ 396 si_code = FPE_FLTOVF; 397 else if (fpc & 0x1000) /* underflow */ 398 si_code = FPE_FLTUND; 399 else if (fpc & 0x0800) /* inexact */ 400 si_code = FPE_FLTRES; 401 } 402 do_trap(regs, SIGFPE, si_code, "floating point exception"); 403 } 404 405 static void __kprobes illegal_op(struct pt_regs *regs) 406 { 407 siginfo_t info; 408 __u8 opcode[6]; 409 __u16 __user *location; 410 int signal = 0; 411 412 location = get_psw_address(regs); 413 414 if (regs->psw.mask & PSW_MASK_PSTATE) { 415 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 416 return; 417 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 418 if (current->ptrace) { 419 info.si_signo = SIGTRAP; 420 info.si_errno = 0; 421 info.si_code = TRAP_BRKPT; 422 info.si_addr = location; 423 force_sig_info(SIGTRAP, &info, current); 424 } else 425 signal = SIGILL; 426 #ifdef CONFIG_MATHEMU 427 } else if (opcode[0] == 0xb3) { 428 if (get_user(*((__u16 *) (opcode+2)), location+1)) 429 return; 430 signal = math_emu_b3(opcode, regs); 431 } else if (opcode[0] == 0xed) { 432 if (get_user(*((__u32 *) (opcode+2)), 433 (__u32 __user *)(location+1))) 434 return; 435 signal = math_emu_ed(opcode, regs); 436 } else if (*((__u16 *) opcode) == 0xb299) { 437 if (get_user(*((__u16 *) (opcode+2)), location+1)) 438 return; 439 signal = math_emu_srnm(opcode, regs); 440 } else if (*((__u16 *) opcode) == 0xb29c) { 441 if (get_user(*((__u16 *) (opcode+2)), location+1)) 442 return; 443 signal = math_emu_stfpc(opcode, regs); 444 } else if (*((__u16 *) opcode) == 0xb29d) { 445 if (get_user(*((__u16 *) (opcode+2)), location+1)) 446 return; 447 signal = math_emu_lfpc(opcode, regs); 448 #endif 449 } else 450 signal = SIGILL; 451 } else { 452 /* 453 * If we get an illegal op in kernel mode, send it through the 454 * kprobes notifier. If kprobes doesn't pick it up, SIGILL 455 */ 456 if (notify_die(DIE_BPT, "bpt", regs, 0, 457 3, SIGTRAP) != NOTIFY_STOP) 458 signal = SIGILL; 459 } 460 461 #ifdef CONFIG_MATHEMU 462 if (signal == SIGFPE) 463 do_fp_trap(regs, current->thread.fp_regs.fpc); 464 else if (signal == SIGSEGV) 465 do_trap(regs, signal, SEGV_MAPERR, "user address fault"); 466 else 467 #endif 468 if (signal) 469 do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); 470 } 471 472 473 #ifdef CONFIG_MATHEMU 474 void specification_exception(struct pt_regs *regs) 475 { 476 __u8 opcode[6]; 477 __u16 __user *location = NULL; 478 int signal = 0; 479 480 location = (__u16 __user *) get_psw_address(regs); 481 482 if (regs->psw.mask & PSW_MASK_PSTATE) { 483 get_user(*((__u16 *) opcode), location); 484 switch (opcode[0]) { 485 case 0x28: /* LDR Rx,Ry */ 486 signal = math_emu_ldr(opcode); 487 break; 488 case 0x38: /* LER Rx,Ry */ 489 signal = math_emu_ler(opcode); 490 break; 491 case 0x60: /* STD R,D(X,B) */ 492 get_user(*((__u16 *) (opcode+2)), location+1); 493 signal = math_emu_std(opcode, regs); 494 break; 495 case 0x68: /* LD R,D(X,B) */ 496 get_user(*((__u16 *) (opcode+2)), location+1); 497 signal = math_emu_ld(opcode, regs); 498 break; 499 case 0x70: /* STE R,D(X,B) */ 500 get_user(*((__u16 *) (opcode+2)), location+1); 501 signal = math_emu_ste(opcode, regs); 502 break; 503 case 0x78: /* LE R,D(X,B) */ 504 get_user(*((__u16 *) (opcode+2)), location+1); 505 signal = math_emu_le(opcode, regs); 506 break; 507 default: 508 signal = SIGILL; 509 break; 510 } 511 } else 512 signal = SIGILL; 513 514 if (signal == SIGFPE) 515 do_fp_trap(regs, current->thread.fp_regs.fpc); 516 else if (signal) 517 do_trap(regs, signal, ILL_ILLOPN, "specification exception"); 518 } 519 #else 520 DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, 521 "specification exception"); 522 #endif 523 524 static void data_exception(struct pt_regs *regs) 525 { 526 __u16 __user *location; 527 int signal = 0; 528 529 location = get_psw_address(regs); 530 531 if (MACHINE_HAS_IEEE) 532 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 533 534 #ifdef CONFIG_MATHEMU 535 else if (regs->psw.mask & PSW_MASK_PSTATE) { 536 __u8 opcode[6]; 537 get_user(*((__u16 *) opcode), location); 538 switch (opcode[0]) { 539 case 0x28: /* LDR Rx,Ry */ 540 signal = math_emu_ldr(opcode); 541 break; 542 case 0x38: /* LER Rx,Ry */ 543 signal = math_emu_ler(opcode); 544 break; 545 case 0x60: /* STD R,D(X,B) */ 546 get_user(*((__u16 *) (opcode+2)), location+1); 547 signal = math_emu_std(opcode, regs); 548 break; 549 case 0x68: /* LD R,D(X,B) */ 550 get_user(*((__u16 *) (opcode+2)), location+1); 551 signal = math_emu_ld(opcode, regs); 552 break; 553 case 0x70: /* STE R,D(X,B) */ 554 get_user(*((__u16 *) (opcode+2)), location+1); 555 signal = math_emu_ste(opcode, regs); 556 break; 557 case 0x78: /* LE R,D(X,B) */ 558 get_user(*((__u16 *) (opcode+2)), location+1); 559 signal = math_emu_le(opcode, regs); 560 break; 561 case 0xb3: 562 get_user(*((__u16 *) (opcode+2)), location+1); 563 signal = math_emu_b3(opcode, regs); 564 break; 565 case 0xed: 566 get_user(*((__u32 *) (opcode+2)), 567 (__u32 __user *)(location+1)); 568 signal = math_emu_ed(opcode, regs); 569 break; 570 case 0xb2: 571 if (opcode[1] == 0x99) { 572 get_user(*((__u16 *) (opcode+2)), location+1); 573 signal = math_emu_srnm(opcode, regs); 574 } else if (opcode[1] == 0x9c) { 575 get_user(*((__u16 *) (opcode+2)), location+1); 576 signal = math_emu_stfpc(opcode, regs); 577 } else if (opcode[1] == 0x9d) { 578 get_user(*((__u16 *) (opcode+2)), location+1); 579 signal = math_emu_lfpc(opcode, regs); 580 } else 581 signal = SIGILL; 582 break; 583 default: 584 signal = SIGILL; 585 break; 586 } 587 } 588 #endif 589 if (current->thread.fp_regs.fpc & FPC_DXC_MASK) 590 signal = SIGFPE; 591 else 592 signal = SIGILL; 593 if (signal == SIGFPE) 594 do_fp_trap(regs, current->thread.fp_regs.fpc); 595 else if (signal) 596 do_trap(regs, signal, ILL_ILLOPN, "data exception"); 597 } 598 599 static void space_switch_exception(struct pt_regs *regs) 600 { 601 /* Set user psw back to home space mode. */ 602 if (regs->psw.mask & PSW_MASK_PSTATE) 603 regs->psw.mask |= PSW_ASC_HOME; 604 /* Send SIGILL. */ 605 do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event"); 606 } 607 608 void __kprobes kernel_stack_overflow(struct pt_regs * regs) 609 { 610 bust_spinlocks(1); 611 printk("Kernel stack overflow.\n"); 612 show_regs(regs); 613 bust_spinlocks(0); 614 panic("Corrupt kernel stack, can't continue."); 615 } 616 617 /* init is done in lowcore.S and head.S */ 618 619 void __init trap_init(void) 620 { 621 int i; 622 623 for (i = 0; i < 128; i++) 624 pgm_check_table[i] = &default_trap_handler; 625 pgm_check_table[1] = &illegal_op; 626 pgm_check_table[2] = &privileged_op; 627 pgm_check_table[3] = &execute_exception; 628 pgm_check_table[4] = &do_protection_exception; 629 pgm_check_table[5] = &addressing_exception; 630 pgm_check_table[6] = &specification_exception; 631 pgm_check_table[7] = &data_exception; 632 pgm_check_table[8] = &overflow_exception; 633 pgm_check_table[9] = ÷_exception; 634 pgm_check_table[0x0A] = &overflow_exception; 635 pgm_check_table[0x0B] = ÷_exception; 636 pgm_check_table[0x0C] = &hfp_overflow_exception; 637 pgm_check_table[0x0D] = &hfp_underflow_exception; 638 pgm_check_table[0x0E] = &hfp_significance_exception; 639 pgm_check_table[0x0F] = &hfp_divide_exception; 640 pgm_check_table[0x10] = &do_dat_exception; 641 pgm_check_table[0x11] = &do_dat_exception; 642 pgm_check_table[0x12] = &translation_exception; 643 pgm_check_table[0x13] = &special_op_exception; 644 #ifdef CONFIG_64BIT 645 pgm_check_table[0x38] = &do_asce_exception; 646 pgm_check_table[0x39] = &do_dat_exception; 647 pgm_check_table[0x3A] = &do_dat_exception; 648 pgm_check_table[0x3B] = &do_dat_exception; 649 #endif /* CONFIG_64BIT */ 650 pgm_check_table[0x15] = &operand_exception; 651 pgm_check_table[0x1C] = &space_switch_exception; 652 pgm_check_table[0x1D] = &hfp_sqrt_exception; 653 /* Enable machine checks early. */ 654 local_mcck_enable(); 655 } 656