1 /* 2 * arch/s390/kernel/traps.c 3 * 4 * S390 version 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * 9 * Derived from "arch/i386/kernel/traps.c" 10 * Copyright (C) 1991, 1992 Linus Torvalds 11 */ 12 13 /* 14 * 'Traps.c' handles hardware traps and faults after we have saved some 15 * state in 'asm.s'. 16 */ 17 #include <linux/sched.h> 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/ptrace.h> 22 #include <linux/timer.h> 23 #include <linux/mm.h> 24 #include <linux/smp.h> 25 #include <linux/smp_lock.h> 26 #include <linux/init.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 #include <linux/module.h> 30 #include <linux/kallsyms.h> 31 #include <linux/reboot.h> 32 #include <linux/kprobes.h> 33 34 #include <asm/system.h> 35 #include <asm/uaccess.h> 36 #include <asm/io.h> 37 #include <asm/atomic.h> 38 #include <asm/mathemu.h> 39 #include <asm/cpcmd.h> 40 #include <asm/s390_ext.h> 41 #include <asm/lowcore.h> 42 #include <asm/debug.h> 43 #include <asm/kdebug.h> 44 45 /* Called from entry.S only */ 46 extern void handle_per_exception(struct pt_regs *regs); 47 48 typedef void pgm_check_handler_t(struct pt_regs *, long); 49 pgm_check_handler_t *pgm_check_table[128]; 50 51 #ifdef CONFIG_SYSCTL 52 #ifdef CONFIG_PROCESS_DEBUG 53 int sysctl_userprocess_debug = 1; 54 #else 55 int sysctl_userprocess_debug = 0; 56 #endif 57 #endif 58 59 extern pgm_check_handler_t do_protection_exception; 60 extern pgm_check_handler_t do_dat_exception; 61 extern pgm_check_handler_t do_monitor_call; 62 63 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 64 65 #ifndef CONFIG_64BIT 66 #define FOURLONG "%08lx %08lx %08lx %08lx\n" 67 static int kstack_depth_to_print = 12; 68 #else /* CONFIG_64BIT */ 69 #define FOURLONG "%016lx %016lx %016lx %016lx\n" 70 static int kstack_depth_to_print = 20; 71 #endif /* CONFIG_64BIT */ 72 73 ATOMIC_NOTIFIER_HEAD(s390die_chain); 74 75 int register_die_notifier(struct notifier_block *nb) 76 { 77 return atomic_notifier_chain_register(&s390die_chain, nb); 78 } 79 EXPORT_SYMBOL(register_die_notifier); 80 81 int unregister_die_notifier(struct notifier_block *nb) 82 { 83 return atomic_notifier_chain_unregister(&s390die_chain, nb); 84 } 85 EXPORT_SYMBOL(unregister_die_notifier); 86 87 /* 88 * For show_trace we have tree different stack to consider: 89 * - the panic stack which is used if the kernel stack has overflown 90 * - the asynchronous interrupt stack (cpu related) 91 * - the synchronous kernel stack (process related) 92 * The stack trace can start at any of the three stack and can potentially 93 * touch all of them. The order is: panic stack, async stack, sync stack. 94 */ 95 static unsigned long 96 __show_trace(unsigned long sp, unsigned long low, unsigned long high) 97 { 98 struct stack_frame *sf; 99 struct pt_regs *regs; 100 101 while (1) { 102 sp = sp & PSW_ADDR_INSN; 103 if (sp < low || sp > high - sizeof(*sf)) 104 return sp; 105 sf = (struct stack_frame *) sp; 106 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 107 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); 108 /* Follow the backchain. */ 109 while (1) { 110 low = sp; 111 sp = sf->back_chain & PSW_ADDR_INSN; 112 if (!sp) 113 break; 114 if (sp <= low || sp > high - sizeof(*sf)) 115 return sp; 116 sf = (struct stack_frame *) sp; 117 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 118 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); 119 } 120 /* Zero backchain detected, check for interrupt frame. */ 121 sp = (unsigned long) (sf + 1); 122 if (sp <= low || sp > high - sizeof(*regs)) 123 return sp; 124 regs = (struct pt_regs *) sp; 125 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); 126 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); 127 low = sp; 128 sp = regs->gprs[15]; 129 } 130 } 131 132 void show_trace(struct task_struct *task, unsigned long *stack) 133 { 134 register unsigned long __r15 asm ("15"); 135 unsigned long sp; 136 137 sp = (unsigned long) stack; 138 if (!sp) 139 sp = task ? task->thread.ksp : __r15; 140 printk("Call Trace:\n"); 141 #ifdef CONFIG_CHECK_STACK 142 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, 143 S390_lowcore.panic_stack); 144 #endif 145 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, 146 S390_lowcore.async_stack); 147 if (task) 148 __show_trace(sp, (unsigned long) task_stack_page(task), 149 (unsigned long) task_stack_page(task) + THREAD_SIZE); 150 else 151 __show_trace(sp, S390_lowcore.thread_info, 152 S390_lowcore.thread_info + THREAD_SIZE); 153 printk("\n"); 154 if (!task) 155 task = current; 156 debug_show_held_locks(task); 157 } 158 159 void show_stack(struct task_struct *task, unsigned long *sp) 160 { 161 register unsigned long * __r15 asm ("15"); 162 unsigned long *stack; 163 int i; 164 165 if (!sp) 166 stack = task ? (unsigned long *) task->thread.ksp : __r15; 167 else 168 stack = sp; 169 170 for (i = 0; i < kstack_depth_to_print; i++) { 171 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 172 break; 173 if (i && ((i * sizeof (long) % 32) == 0)) 174 printk("\n "); 175 printk("%p ", (void *)*stack++); 176 } 177 printk("\n"); 178 show_trace(task, sp); 179 } 180 181 /* 182 * The architecture-independent dump_stack generator 183 */ 184 void dump_stack(void) 185 { 186 show_stack(NULL, NULL); 187 } 188 189 EXPORT_SYMBOL(dump_stack); 190 191 void show_registers(struct pt_regs *regs) 192 { 193 mm_segment_t old_fs; 194 char *mode; 195 int i; 196 197 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; 198 printk("%s PSW : %p %p", 199 mode, (void *) regs->psw.mask, 200 (void *) regs->psw.addr); 201 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); 202 printk("%s GPRS: " FOURLONG, mode, 203 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 204 printk(" " FOURLONG, 205 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); 206 printk(" " FOURLONG, 207 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); 208 printk(" " FOURLONG, 209 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); 210 211 #if 0 212 /* FIXME: this isn't needed any more but it changes the ksymoops 213 * input. To remove or not to remove ... */ 214 save_access_regs(regs->acrs); 215 printk("%s ACRS: %08x %08x %08x %08x\n", mode, 216 regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]); 217 printk(" %08x %08x %08x %08x\n", 218 regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]); 219 printk(" %08x %08x %08x %08x\n", 220 regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]); 221 printk(" %08x %08x %08x %08x\n", 222 regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]); 223 #endif 224 225 /* 226 * Print the first 20 byte of the instruction stream at the 227 * time of the fault. 228 */ 229 old_fs = get_fs(); 230 if (regs->psw.mask & PSW_MASK_PSTATE) 231 set_fs(USER_DS); 232 else 233 set_fs(KERNEL_DS); 234 printk("%s Code: ", mode); 235 for (i = 0; i < 20; i++) { 236 unsigned char c; 237 if (__get_user(c, (char __user *)(regs->psw.addr + i))) { 238 printk(" Bad PSW."); 239 break; 240 } 241 printk("%02x ", c); 242 } 243 set_fs(old_fs); 244 245 printk("\n"); 246 } 247 248 /* This is called from fs/proc/array.c */ 249 char *task_show_regs(struct task_struct *task, char *buffer) 250 { 251 struct pt_regs *regs; 252 253 regs = task_pt_regs(task); 254 buffer += sprintf(buffer, "task: %p, ksp: %p\n", 255 task, (void *)task->thread.ksp); 256 buffer += sprintf(buffer, "User PSW : %p %p\n", 257 (void *) regs->psw.mask, (void *)regs->psw.addr); 258 259 buffer += sprintf(buffer, "User GPRS: " FOURLONG, 260 regs->gprs[0], regs->gprs[1], 261 regs->gprs[2], regs->gprs[3]); 262 buffer += sprintf(buffer, " " FOURLONG, 263 regs->gprs[4], regs->gprs[5], 264 regs->gprs[6], regs->gprs[7]); 265 buffer += sprintf(buffer, " " FOURLONG, 266 regs->gprs[8], regs->gprs[9], 267 regs->gprs[10], regs->gprs[11]); 268 buffer += sprintf(buffer, " " FOURLONG, 269 regs->gprs[12], regs->gprs[13], 270 regs->gprs[14], regs->gprs[15]); 271 buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n", 272 task->thread.acrs[0], task->thread.acrs[1], 273 task->thread.acrs[2], task->thread.acrs[3]); 274 buffer += sprintf(buffer, " %08x %08x %08x %08x\n", 275 task->thread.acrs[4], task->thread.acrs[5], 276 task->thread.acrs[6], task->thread.acrs[7]); 277 buffer += sprintf(buffer, " %08x %08x %08x %08x\n", 278 task->thread.acrs[8], task->thread.acrs[9], 279 task->thread.acrs[10], task->thread.acrs[11]); 280 buffer += sprintf(buffer, " %08x %08x %08x %08x\n", 281 task->thread.acrs[12], task->thread.acrs[13], 282 task->thread.acrs[14], task->thread.acrs[15]); 283 return buffer; 284 } 285 286 static DEFINE_SPINLOCK(die_lock); 287 288 void die(const char * str, struct pt_regs * regs, long err) 289 { 290 static int die_counter; 291 292 debug_stop_all(); 293 console_verbose(); 294 spin_lock_irq(&die_lock); 295 bust_spinlocks(1); 296 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); 297 show_regs(regs); 298 bust_spinlocks(0); 299 spin_unlock_irq(&die_lock); 300 if (in_interrupt()) 301 panic("Fatal exception in interrupt"); 302 if (panic_on_oops) 303 panic("Fatal exception: panic_on_oops"); 304 do_exit(SIGSEGV); 305 } 306 307 static void inline 308 report_user_fault(long interruption_code, struct pt_regs *regs) 309 { 310 #if defined(CONFIG_SYSCTL) 311 if (!sysctl_userprocess_debug) 312 return; 313 #endif 314 #if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) 315 printk("User process fault: interruption code 0x%lX\n", 316 interruption_code); 317 show_regs(regs); 318 #endif 319 } 320 321 static void __kprobes inline do_trap(long interruption_code, int signr, 322 char *str, struct pt_regs *regs, 323 siginfo_t *info) 324 { 325 /* 326 * We got all needed information from the lowcore and can 327 * now safely switch on interrupts. 328 */ 329 if (regs->psw.mask & PSW_MASK_PSTATE) 330 local_irq_enable(); 331 332 if (notify_die(DIE_TRAP, str, regs, interruption_code, 333 interruption_code, signr) == NOTIFY_STOP) 334 return; 335 336 if (regs->psw.mask & PSW_MASK_PSTATE) { 337 struct task_struct *tsk = current; 338 339 tsk->thread.trap_no = interruption_code & 0xffff; 340 force_sig_info(signr, info, tsk); 341 report_user_fault(interruption_code, regs); 342 } else { 343 const struct exception_table_entry *fixup; 344 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 345 if (fixup) 346 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 347 else 348 die(str, regs, interruption_code); 349 } 350 } 351 352 static inline void __user *get_check_address(struct pt_regs *regs) 353 { 354 return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN); 355 } 356 357 void __kprobes do_single_step(struct pt_regs *regs) 358 { 359 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, 360 SIGTRAP) == NOTIFY_STOP){ 361 return; 362 } 363 if ((current->ptrace & PT_PTRACED) != 0) 364 force_sig(SIGTRAP, current); 365 } 366 367 static void default_trap_handler(struct pt_regs * regs, long interruption_code) 368 { 369 if (regs->psw.mask & PSW_MASK_PSTATE) { 370 local_irq_enable(); 371 do_exit(SIGSEGV); 372 report_user_fault(interruption_code, regs); 373 } else 374 die("Unknown program exception", regs, interruption_code); 375 } 376 377 #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ 378 static void name(struct pt_regs * regs, long interruption_code) \ 379 { \ 380 siginfo_t info; \ 381 info.si_signo = signr; \ 382 info.si_errno = 0; \ 383 info.si_code = sicode; \ 384 info.si_addr = siaddr; \ 385 do_trap(interruption_code, signr, str, regs, &info); \ 386 } 387 388 DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception, 389 ILL_ILLADR, get_check_address(regs)) 390 DO_ERROR_INFO(SIGILL, "execute exception", execute_exception, 391 ILL_ILLOPN, get_check_address(regs)) 392 DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception, 393 FPE_INTDIV, get_check_address(regs)) 394 DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception, 395 FPE_INTOVF, get_check_address(regs)) 396 DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception, 397 FPE_FLTOVF, get_check_address(regs)) 398 DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception, 399 FPE_FLTUND, get_check_address(regs)) 400 DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception, 401 FPE_FLTRES, get_check_address(regs)) 402 DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception, 403 FPE_FLTDIV, get_check_address(regs)) 404 DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception, 405 FPE_FLTINV, get_check_address(regs)) 406 DO_ERROR_INFO(SIGILL, "operand exception", operand_exception, 407 ILL_ILLOPN, get_check_address(regs)) 408 DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op, 409 ILL_PRVOPC, get_check_address(regs)) 410 DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception, 411 ILL_ILLOPN, get_check_address(regs)) 412 DO_ERROR_INFO(SIGILL, "translation exception", translation_exception, 413 ILL_ILLOPN, get_check_address(regs)) 414 415 static inline void 416 do_fp_trap(struct pt_regs *regs, void __user *location, 417 int fpc, long interruption_code) 418 { 419 siginfo_t si; 420 421 si.si_signo = SIGFPE; 422 si.si_errno = 0; 423 si.si_addr = location; 424 si.si_code = 0; 425 /* FPC[2] is Data Exception Code */ 426 if ((fpc & 0x00000300) == 0) { 427 /* bits 6 and 7 of DXC are 0 iff IEEE exception */ 428 if (fpc & 0x8000) /* invalid fp operation */ 429 si.si_code = FPE_FLTINV; 430 else if (fpc & 0x4000) /* div by 0 */ 431 si.si_code = FPE_FLTDIV; 432 else if (fpc & 0x2000) /* overflow */ 433 si.si_code = FPE_FLTOVF; 434 else if (fpc & 0x1000) /* underflow */ 435 si.si_code = FPE_FLTUND; 436 else if (fpc & 0x0800) /* inexact */ 437 si.si_code = FPE_FLTRES; 438 } 439 current->thread.ieee_instruction_pointer = (addr_t) location; 440 do_trap(interruption_code, SIGFPE, 441 "floating point exception", regs, &si); 442 } 443 444 static void illegal_op(struct pt_regs * regs, long interruption_code) 445 { 446 siginfo_t info; 447 __u8 opcode[6]; 448 __u16 __user *location; 449 int signal = 0; 450 451 location = get_check_address(regs); 452 453 /* 454 * We got all needed information from the lowcore and can 455 * now safely switch on interrupts. 456 */ 457 if (regs->psw.mask & PSW_MASK_PSTATE) 458 local_irq_enable(); 459 460 if (regs->psw.mask & PSW_MASK_PSTATE) { 461 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 462 return; 463 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 464 if (current->ptrace & PT_PTRACED) 465 force_sig(SIGTRAP, current); 466 else 467 signal = SIGILL; 468 #ifdef CONFIG_MATHEMU 469 } else if (opcode[0] == 0xb3) { 470 if (get_user(*((__u16 *) (opcode+2)), location+1)) 471 return; 472 signal = math_emu_b3(opcode, regs); 473 } else if (opcode[0] == 0xed) { 474 if (get_user(*((__u32 *) (opcode+2)), 475 (__u32 __user *)(location+1))) 476 return; 477 signal = math_emu_ed(opcode, regs); 478 } else if (*((__u16 *) opcode) == 0xb299) { 479 if (get_user(*((__u16 *) (opcode+2)), location+1)) 480 return; 481 signal = math_emu_srnm(opcode, regs); 482 } else if (*((__u16 *) opcode) == 0xb29c) { 483 if (get_user(*((__u16 *) (opcode+2)), location+1)) 484 return; 485 signal = math_emu_stfpc(opcode, regs); 486 } else if (*((__u16 *) opcode) == 0xb29d) { 487 if (get_user(*((__u16 *) (opcode+2)), location+1)) 488 return; 489 signal = math_emu_lfpc(opcode, regs); 490 #endif 491 } else 492 signal = SIGILL; 493 } else { 494 /* 495 * If we get an illegal op in kernel mode, send it through the 496 * kprobes notifier. If kprobes doesn't pick it up, SIGILL 497 */ 498 if (notify_die(DIE_BPT, "bpt", regs, interruption_code, 499 3, SIGTRAP) != NOTIFY_STOP) 500 signal = SIGILL; 501 } 502 503 #ifdef CONFIG_MATHEMU 504 if (signal == SIGFPE) 505 do_fp_trap(regs, location, 506 current->thread.fp_regs.fpc, interruption_code); 507 else if (signal == SIGSEGV) { 508 info.si_signo = signal; 509 info.si_errno = 0; 510 info.si_code = SEGV_MAPERR; 511 info.si_addr = (void __user *) location; 512 do_trap(interruption_code, signal, 513 "user address fault", regs, &info); 514 } else 515 #endif 516 if (signal) { 517 info.si_signo = signal; 518 info.si_errno = 0; 519 info.si_code = ILL_ILLOPC; 520 info.si_addr = (void __user *) location; 521 do_trap(interruption_code, signal, 522 "illegal operation", regs, &info); 523 } 524 } 525 526 527 #ifdef CONFIG_MATHEMU 528 asmlinkage void 529 specification_exception(struct pt_regs * regs, long interruption_code) 530 { 531 __u8 opcode[6]; 532 __u16 __user *location = NULL; 533 int signal = 0; 534 535 location = (__u16 __user *) get_check_address(regs); 536 537 /* 538 * We got all needed information from the lowcore and can 539 * now safely switch on interrupts. 540 */ 541 if (regs->psw.mask & PSW_MASK_PSTATE) 542 local_irq_enable(); 543 544 if (regs->psw.mask & PSW_MASK_PSTATE) { 545 get_user(*((__u16 *) opcode), location); 546 switch (opcode[0]) { 547 case 0x28: /* LDR Rx,Ry */ 548 signal = math_emu_ldr(opcode); 549 break; 550 case 0x38: /* LER Rx,Ry */ 551 signal = math_emu_ler(opcode); 552 break; 553 case 0x60: /* STD R,D(X,B) */ 554 get_user(*((__u16 *) (opcode+2)), location+1); 555 signal = math_emu_std(opcode, regs); 556 break; 557 case 0x68: /* LD R,D(X,B) */ 558 get_user(*((__u16 *) (opcode+2)), location+1); 559 signal = math_emu_ld(opcode, regs); 560 break; 561 case 0x70: /* STE R,D(X,B) */ 562 get_user(*((__u16 *) (opcode+2)), location+1); 563 signal = math_emu_ste(opcode, regs); 564 break; 565 case 0x78: /* LE R,D(X,B) */ 566 get_user(*((__u16 *) (opcode+2)), location+1); 567 signal = math_emu_le(opcode, regs); 568 break; 569 default: 570 signal = SIGILL; 571 break; 572 } 573 } else 574 signal = SIGILL; 575 576 if (signal == SIGFPE) 577 do_fp_trap(regs, location, 578 current->thread.fp_regs.fpc, interruption_code); 579 else if (signal) { 580 siginfo_t info; 581 info.si_signo = signal; 582 info.si_errno = 0; 583 info.si_code = ILL_ILLOPN; 584 info.si_addr = location; 585 do_trap(interruption_code, signal, 586 "specification exception", regs, &info); 587 } 588 } 589 #else 590 DO_ERROR_INFO(SIGILL, "specification exception", specification_exception, 591 ILL_ILLOPN, get_check_address(regs)); 592 #endif 593 594 static void data_exception(struct pt_regs * regs, long interruption_code) 595 { 596 __u16 __user *location; 597 int signal = 0; 598 599 location = get_check_address(regs); 600 601 /* 602 * We got all needed information from the lowcore and can 603 * now safely switch on interrupts. 604 */ 605 if (regs->psw.mask & PSW_MASK_PSTATE) 606 local_irq_enable(); 607 608 if (MACHINE_HAS_IEEE) 609 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 610 611 #ifdef CONFIG_MATHEMU 612 else if (regs->psw.mask & PSW_MASK_PSTATE) { 613 __u8 opcode[6]; 614 get_user(*((__u16 *) opcode), location); 615 switch (opcode[0]) { 616 case 0x28: /* LDR Rx,Ry */ 617 signal = math_emu_ldr(opcode); 618 break; 619 case 0x38: /* LER Rx,Ry */ 620 signal = math_emu_ler(opcode); 621 break; 622 case 0x60: /* STD R,D(X,B) */ 623 get_user(*((__u16 *) (opcode+2)), location+1); 624 signal = math_emu_std(opcode, regs); 625 break; 626 case 0x68: /* LD R,D(X,B) */ 627 get_user(*((__u16 *) (opcode+2)), location+1); 628 signal = math_emu_ld(opcode, regs); 629 break; 630 case 0x70: /* STE R,D(X,B) */ 631 get_user(*((__u16 *) (opcode+2)), location+1); 632 signal = math_emu_ste(opcode, regs); 633 break; 634 case 0x78: /* LE R,D(X,B) */ 635 get_user(*((__u16 *) (opcode+2)), location+1); 636 signal = math_emu_le(opcode, regs); 637 break; 638 case 0xb3: 639 get_user(*((__u16 *) (opcode+2)), location+1); 640 signal = math_emu_b3(opcode, regs); 641 break; 642 case 0xed: 643 get_user(*((__u32 *) (opcode+2)), 644 (__u32 __user *)(location+1)); 645 signal = math_emu_ed(opcode, regs); 646 break; 647 case 0xb2: 648 if (opcode[1] == 0x99) { 649 get_user(*((__u16 *) (opcode+2)), location+1); 650 signal = math_emu_srnm(opcode, regs); 651 } else if (opcode[1] == 0x9c) { 652 get_user(*((__u16 *) (opcode+2)), location+1); 653 signal = math_emu_stfpc(opcode, regs); 654 } else if (opcode[1] == 0x9d) { 655 get_user(*((__u16 *) (opcode+2)), location+1); 656 signal = math_emu_lfpc(opcode, regs); 657 } else 658 signal = SIGILL; 659 break; 660 default: 661 signal = SIGILL; 662 break; 663 } 664 } 665 #endif 666 if (current->thread.fp_regs.fpc & FPC_DXC_MASK) 667 signal = SIGFPE; 668 else 669 signal = SIGILL; 670 if (signal == SIGFPE) 671 do_fp_trap(regs, location, 672 current->thread.fp_regs.fpc, interruption_code); 673 else if (signal) { 674 siginfo_t info; 675 info.si_signo = signal; 676 info.si_errno = 0; 677 info.si_code = ILL_ILLOPN; 678 info.si_addr = location; 679 do_trap(interruption_code, signal, 680 "data exception", regs, &info); 681 } 682 } 683 684 static void space_switch_exception(struct pt_regs * regs, long int_code) 685 { 686 siginfo_t info; 687 688 /* Set user psw back to home space mode. */ 689 if (regs->psw.mask & PSW_MASK_PSTATE) 690 regs->psw.mask |= PSW_ASC_HOME; 691 /* Send SIGILL. */ 692 info.si_signo = SIGILL; 693 info.si_errno = 0; 694 info.si_code = ILL_PRVOPC; 695 info.si_addr = get_check_address(regs); 696 do_trap(int_code, SIGILL, "space switch event", regs, &info); 697 } 698 699 asmlinkage void kernel_stack_overflow(struct pt_regs * regs) 700 { 701 bust_spinlocks(1); 702 printk("Kernel stack overflow.\n"); 703 show_regs(regs); 704 bust_spinlocks(0); 705 panic("Corrupt kernel stack, can't continue."); 706 } 707 708 /* init is done in lowcore.S and head.S */ 709 710 void __init trap_init(void) 711 { 712 int i; 713 714 for (i = 0; i < 128; i++) 715 pgm_check_table[i] = &default_trap_handler; 716 pgm_check_table[1] = &illegal_op; 717 pgm_check_table[2] = &privileged_op; 718 pgm_check_table[3] = &execute_exception; 719 pgm_check_table[4] = &do_protection_exception; 720 pgm_check_table[5] = &addressing_exception; 721 pgm_check_table[6] = &specification_exception; 722 pgm_check_table[7] = &data_exception; 723 pgm_check_table[8] = &overflow_exception; 724 pgm_check_table[9] = ÷_exception; 725 pgm_check_table[0x0A] = &overflow_exception; 726 pgm_check_table[0x0B] = ÷_exception; 727 pgm_check_table[0x0C] = &hfp_overflow_exception; 728 pgm_check_table[0x0D] = &hfp_underflow_exception; 729 pgm_check_table[0x0E] = &hfp_significance_exception; 730 pgm_check_table[0x0F] = &hfp_divide_exception; 731 pgm_check_table[0x10] = &do_dat_exception; 732 pgm_check_table[0x11] = &do_dat_exception; 733 pgm_check_table[0x12] = &translation_exception; 734 pgm_check_table[0x13] = &special_op_exception; 735 #ifdef CONFIG_64BIT 736 pgm_check_table[0x38] = &do_dat_exception; 737 pgm_check_table[0x39] = &do_dat_exception; 738 pgm_check_table[0x3A] = &do_dat_exception; 739 pgm_check_table[0x3B] = &do_dat_exception; 740 #endif /* CONFIG_64BIT */ 741 pgm_check_table[0x15] = &operand_exception; 742 pgm_check_table[0x1C] = &space_switch_exception; 743 pgm_check_table[0x1D] = &hfp_sqrt_exception; 744 pgm_check_table[0x40] = &do_monitor_call; 745 pfault_irq_init(); 746 } 747