1 /* 2 * arch/s390/kernel/traps.c 3 * 4 * S390 version 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * 9 * Derived from "arch/i386/kernel/traps.c" 10 * Copyright (C) 1991, 1992 Linus Torvalds 11 */ 12 13 /* 14 * 'Traps.c' handles hardware traps and faults after we have saved some 15 * state in 'asm.s'. 16 */ 17 #include <linux/config.h> 18 #include <linux/sched.h> 19 #include <linux/kernel.h> 20 #include <linux/string.h> 21 #include <linux/errno.h> 22 #include <linux/ptrace.h> 23 #include <linux/timer.h> 24 #include <linux/mm.h> 25 #include <linux/smp.h> 26 #include <linux/smp_lock.h> 27 #include <linux/init.h> 28 #include <linux/interrupt.h> 29 #include <linux/delay.h> 30 #include <linux/module.h> 31 #include <linux/kallsyms.h> 32 #include <linux/reboot.h> 33 34 #include <asm/system.h> 35 #include <asm/uaccess.h> 36 #include <asm/io.h> 37 #include <asm/atomic.h> 38 #include <asm/mathemu.h> 39 #include <asm/cpcmd.h> 40 #include <asm/s390_ext.h> 41 #include <asm/lowcore.h> 42 #include <asm/debug.h> 43 44 /* Called from entry.S only */ 45 extern void handle_per_exception(struct pt_regs *regs); 46 47 typedef void pgm_check_handler_t(struct pt_regs *, long); 48 pgm_check_handler_t *pgm_check_table[128]; 49 50 #ifdef CONFIG_SYSCTL 51 #ifdef CONFIG_PROCESS_DEBUG 52 int sysctl_userprocess_debug = 1; 53 #else 54 int sysctl_userprocess_debug = 0; 55 #endif 56 #endif 57 58 extern pgm_check_handler_t do_protection_exception; 59 extern pgm_check_handler_t do_dat_exception; 60 #ifdef CONFIG_PFAULT 61 extern int pfault_init(void); 62 extern void pfault_fini(void); 63 extern void pfault_interrupt(struct pt_regs *regs, __u16 error_code); 64 static ext_int_info_t ext_int_pfault; 65 #endif 66 extern pgm_check_handler_t do_monitor_call; 67 68 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 69 70 #ifndef CONFIG_64BIT 71 #define FOURLONG "%08lx %08lx %08lx %08lx\n" 72 static int kstack_depth_to_print = 12; 73 #else /* CONFIG_64BIT */ 74 #define FOURLONG "%016lx %016lx %016lx %016lx\n" 75 static int kstack_depth_to_print = 20; 76 #endif /* CONFIG_64BIT */ 77 78 /* 79 * For show_trace we have tree different stack to consider: 80 * - the panic stack which is used if the kernel stack has overflown 81 * - the asynchronous interrupt stack (cpu related) 82 * - the synchronous kernel stack (process related) 83 * The stack trace can start at any of the three stack and can potentially 84 * touch all of them. The order is: panic stack, async stack, sync stack. 85 */ 86 static unsigned long 87 __show_trace(unsigned long sp, unsigned long low, unsigned long high) 88 { 89 struct stack_frame *sf; 90 struct pt_regs *regs; 91 92 while (1) { 93 sp = sp & PSW_ADDR_INSN; 94 if (sp < low || sp > high - sizeof(*sf)) 95 return sp; 96 sf = (struct stack_frame *) sp; 97 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 98 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); 99 /* Follow the backchain. */ 100 while (1) { 101 low = sp; 102 sp = sf->back_chain & PSW_ADDR_INSN; 103 if (!sp) 104 break; 105 if (sp <= low || sp > high - sizeof(*sf)) 106 return sp; 107 sf = (struct stack_frame *) sp; 108 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 109 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); 110 } 111 /* Zero backchain detected, check for interrupt frame. */ 112 sp = (unsigned long) (sf + 1); 113 if (sp <= low || sp > high - sizeof(*regs)) 114 return sp; 115 regs = (struct pt_regs *) sp; 116 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); 117 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); 118 low = sp; 119 sp = regs->gprs[15]; 120 } 121 } 122 123 void show_trace(struct task_struct *task, unsigned long * stack) 124 { 125 register unsigned long __r15 asm ("15"); 126 unsigned long sp; 127 128 sp = (unsigned long) stack; 129 if (!sp) 130 sp = task ? task->thread.ksp : __r15; 131 printk("Call Trace:\n"); 132 #ifdef CONFIG_CHECK_STACK 133 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, 134 S390_lowcore.panic_stack); 135 #endif 136 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, 137 S390_lowcore.async_stack); 138 if (task) 139 __show_trace(sp, (unsigned long) task_stack_page(task), 140 (unsigned long) task_stack_page(task) + THREAD_SIZE); 141 else 142 __show_trace(sp, S390_lowcore.thread_info, 143 S390_lowcore.thread_info + THREAD_SIZE); 144 printk("\n"); 145 } 146 147 void show_stack(struct task_struct *task, unsigned long *sp) 148 { 149 register unsigned long * __r15 asm ("15"); 150 unsigned long *stack; 151 int i; 152 153 if (!sp) 154 stack = task ? (unsigned long *) task->thread.ksp : __r15; 155 else 156 stack = sp; 157 158 for (i = 0; i < kstack_depth_to_print; i++) { 159 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 160 break; 161 if (i && ((i * sizeof (long) % 32) == 0)) 162 printk("\n "); 163 printk("%p ", (void *)*stack++); 164 } 165 printk("\n"); 166 show_trace(task, sp); 167 } 168 169 /* 170 * The architecture-independent dump_stack generator 171 */ 172 void dump_stack(void) 173 { 174 show_stack(0, 0); 175 } 176 177 EXPORT_SYMBOL(dump_stack); 178 179 void show_registers(struct pt_regs *regs) 180 { 181 mm_segment_t old_fs; 182 char *mode; 183 int i; 184 185 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; 186 printk("%s PSW : %p %p", 187 mode, (void *) regs->psw.mask, 188 (void *) regs->psw.addr); 189 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); 190 printk("%s GPRS: " FOURLONG, mode, 191 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 192 printk(" " FOURLONG, 193 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); 194 printk(" " FOURLONG, 195 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); 196 printk(" " FOURLONG, 197 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); 198 199 #if 0 200 /* FIXME: this isn't needed any more but it changes the ksymoops 201 * input. To remove or not to remove ... */ 202 save_access_regs(regs->acrs); 203 printk("%s ACRS: %08x %08x %08x %08x\n", mode, 204 regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]); 205 printk(" %08x %08x %08x %08x\n", 206 regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]); 207 printk(" %08x %08x %08x %08x\n", 208 regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]); 209 printk(" %08x %08x %08x %08x\n", 210 regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]); 211 #endif 212 213 /* 214 * Print the first 20 byte of the instruction stream at the 215 * time of the fault. 216 */ 217 old_fs = get_fs(); 218 if (regs->psw.mask & PSW_MASK_PSTATE) 219 set_fs(USER_DS); 220 else 221 set_fs(KERNEL_DS); 222 printk("%s Code: ", mode); 223 for (i = 0; i < 20; i++) { 224 unsigned char c; 225 if (__get_user(c, (char __user *)(regs->psw.addr + i))) { 226 printk(" Bad PSW."); 227 break; 228 } 229 printk("%02x ", c); 230 } 231 set_fs(old_fs); 232 233 printk("\n"); 234 } 235 236 /* This is called from fs/proc/array.c */ 237 char *task_show_regs(struct task_struct *task, char *buffer) 238 { 239 struct pt_regs *regs; 240 241 regs = task_pt_regs(task); 242 buffer += sprintf(buffer, "task: %p, ksp: %p\n", 243 task, (void *)task->thread.ksp); 244 buffer += sprintf(buffer, "User PSW : %p %p\n", 245 (void *) regs->psw.mask, (void *)regs->psw.addr); 246 247 buffer += sprintf(buffer, "User GPRS: " FOURLONG, 248 regs->gprs[0], regs->gprs[1], 249 regs->gprs[2], regs->gprs[3]); 250 buffer += sprintf(buffer, " " FOURLONG, 251 regs->gprs[4], regs->gprs[5], 252 regs->gprs[6], regs->gprs[7]); 253 buffer += sprintf(buffer, " " FOURLONG, 254 regs->gprs[8], regs->gprs[9], 255 regs->gprs[10], regs->gprs[11]); 256 buffer += sprintf(buffer, " " FOURLONG, 257 regs->gprs[12], regs->gprs[13], 258 regs->gprs[14], regs->gprs[15]); 259 buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n", 260 task->thread.acrs[0], task->thread.acrs[1], 261 task->thread.acrs[2], task->thread.acrs[3]); 262 buffer += sprintf(buffer, " %08x %08x %08x %08x\n", 263 task->thread.acrs[4], task->thread.acrs[5], 264 task->thread.acrs[6], task->thread.acrs[7]); 265 buffer += sprintf(buffer, " %08x %08x %08x %08x\n", 266 task->thread.acrs[8], task->thread.acrs[9], 267 task->thread.acrs[10], task->thread.acrs[11]); 268 buffer += sprintf(buffer, " %08x %08x %08x %08x\n", 269 task->thread.acrs[12], task->thread.acrs[13], 270 task->thread.acrs[14], task->thread.acrs[15]); 271 return buffer; 272 } 273 274 DEFINE_SPINLOCK(die_lock); 275 276 void die(const char * str, struct pt_regs * regs, long err) 277 { 278 static int die_counter; 279 280 debug_stop_all(); 281 console_verbose(); 282 spin_lock_irq(&die_lock); 283 bust_spinlocks(1); 284 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); 285 show_regs(regs); 286 bust_spinlocks(0); 287 spin_unlock_irq(&die_lock); 288 if (in_interrupt()) 289 panic("Fatal exception in interrupt"); 290 if (panic_on_oops) 291 panic("Fatal exception: panic_on_oops"); 292 do_exit(SIGSEGV); 293 } 294 295 static void inline 296 report_user_fault(long interruption_code, struct pt_regs *regs) 297 { 298 #if defined(CONFIG_SYSCTL) 299 if (!sysctl_userprocess_debug) 300 return; 301 #endif 302 #if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) 303 printk("User process fault: interruption code 0x%lX\n", 304 interruption_code); 305 show_regs(regs); 306 #endif 307 } 308 309 static void inline do_trap(long interruption_code, int signr, char *str, 310 struct pt_regs *regs, siginfo_t *info) 311 { 312 /* 313 * We got all needed information from the lowcore and can 314 * now safely switch on interrupts. 315 */ 316 if (regs->psw.mask & PSW_MASK_PSTATE) 317 local_irq_enable(); 318 319 if (regs->psw.mask & PSW_MASK_PSTATE) { 320 struct task_struct *tsk = current; 321 322 tsk->thread.trap_no = interruption_code & 0xffff; 323 force_sig_info(signr, info, tsk); 324 report_user_fault(interruption_code, regs); 325 } else { 326 const struct exception_table_entry *fixup; 327 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 328 if (fixup) 329 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 330 else 331 die(str, regs, interruption_code); 332 } 333 } 334 335 static inline void *get_check_address(struct pt_regs *regs) 336 { 337 return (void *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN); 338 } 339 340 void do_single_step(struct pt_regs *regs) 341 { 342 if ((current->ptrace & PT_PTRACED) != 0) 343 force_sig(SIGTRAP, current); 344 } 345 346 asmlinkage void 347 default_trap_handler(struct pt_regs * regs, long interruption_code) 348 { 349 if (regs->psw.mask & PSW_MASK_PSTATE) { 350 local_irq_enable(); 351 do_exit(SIGSEGV); 352 report_user_fault(interruption_code, regs); 353 } else 354 die("Unknown program exception", regs, interruption_code); 355 } 356 357 #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ 358 asmlinkage void name(struct pt_regs * regs, long interruption_code) \ 359 { \ 360 siginfo_t info; \ 361 info.si_signo = signr; \ 362 info.si_errno = 0; \ 363 info.si_code = sicode; \ 364 info.si_addr = (void *)siaddr; \ 365 do_trap(interruption_code, signr, str, regs, &info); \ 366 } 367 368 DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception, 369 ILL_ILLADR, get_check_address(regs)) 370 DO_ERROR_INFO(SIGILL, "execute exception", execute_exception, 371 ILL_ILLOPN, get_check_address(regs)) 372 DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception, 373 FPE_INTDIV, get_check_address(regs)) 374 DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception, 375 FPE_INTOVF, get_check_address(regs)) 376 DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception, 377 FPE_FLTOVF, get_check_address(regs)) 378 DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception, 379 FPE_FLTUND, get_check_address(regs)) 380 DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception, 381 FPE_FLTRES, get_check_address(regs)) 382 DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception, 383 FPE_FLTDIV, get_check_address(regs)) 384 DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception, 385 FPE_FLTINV, get_check_address(regs)) 386 DO_ERROR_INFO(SIGILL, "operand exception", operand_exception, 387 ILL_ILLOPN, get_check_address(regs)) 388 DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op, 389 ILL_PRVOPC, get_check_address(regs)) 390 DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception, 391 ILL_ILLOPN, get_check_address(regs)) 392 DO_ERROR_INFO(SIGILL, "translation exception", translation_exception, 393 ILL_ILLOPN, get_check_address(regs)) 394 395 static inline void 396 do_fp_trap(struct pt_regs *regs, void *location, 397 int fpc, long interruption_code) 398 { 399 siginfo_t si; 400 401 si.si_signo = SIGFPE; 402 si.si_errno = 0; 403 si.si_addr = location; 404 si.si_code = 0; 405 /* FPC[2] is Data Exception Code */ 406 if ((fpc & 0x00000300) == 0) { 407 /* bits 6 and 7 of DXC are 0 iff IEEE exception */ 408 if (fpc & 0x8000) /* invalid fp operation */ 409 si.si_code = FPE_FLTINV; 410 else if (fpc & 0x4000) /* div by 0 */ 411 si.si_code = FPE_FLTDIV; 412 else if (fpc & 0x2000) /* overflow */ 413 si.si_code = FPE_FLTOVF; 414 else if (fpc & 0x1000) /* underflow */ 415 si.si_code = FPE_FLTUND; 416 else if (fpc & 0x0800) /* inexact */ 417 si.si_code = FPE_FLTRES; 418 } 419 current->thread.ieee_instruction_pointer = (addr_t) location; 420 do_trap(interruption_code, SIGFPE, 421 "floating point exception", regs, &si); 422 } 423 424 asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code) 425 { 426 siginfo_t info; 427 __u8 opcode[6]; 428 __u16 *location; 429 int signal = 0; 430 431 location = (__u16 *) get_check_address(regs); 432 433 /* 434 * We got all needed information from the lowcore and can 435 * now safely switch on interrupts. 436 */ 437 if (regs->psw.mask & PSW_MASK_PSTATE) 438 local_irq_enable(); 439 440 if (regs->psw.mask & PSW_MASK_PSTATE) { 441 get_user(*((__u16 *) opcode), (__u16 __user *) location); 442 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 443 if (current->ptrace & PT_PTRACED) 444 force_sig(SIGTRAP, current); 445 else 446 signal = SIGILL; 447 #ifdef CONFIG_MATHEMU 448 } else if (opcode[0] == 0xb3) { 449 get_user(*((__u16 *) (opcode+2)), location+1); 450 signal = math_emu_b3(opcode, regs); 451 } else if (opcode[0] == 0xed) { 452 get_user(*((__u32 *) (opcode+2)), 453 (__u32 *)(location+1)); 454 signal = math_emu_ed(opcode, regs); 455 } else if (*((__u16 *) opcode) == 0xb299) { 456 get_user(*((__u16 *) (opcode+2)), location+1); 457 signal = math_emu_srnm(opcode, regs); 458 } else if (*((__u16 *) opcode) == 0xb29c) { 459 get_user(*((__u16 *) (opcode+2)), location+1); 460 signal = math_emu_stfpc(opcode, regs); 461 } else if (*((__u16 *) opcode) == 0xb29d) { 462 get_user(*((__u16 *) (opcode+2)), location+1); 463 signal = math_emu_lfpc(opcode, regs); 464 #endif 465 } else 466 signal = SIGILL; 467 } else 468 signal = SIGILL; 469 470 #ifdef CONFIG_MATHEMU 471 if (signal == SIGFPE) 472 do_fp_trap(regs, location, 473 current->thread.fp_regs.fpc, interruption_code); 474 else if (signal == SIGSEGV) { 475 info.si_signo = signal; 476 info.si_errno = 0; 477 info.si_code = SEGV_MAPERR; 478 info.si_addr = (void *) location; 479 do_trap(interruption_code, signal, 480 "user address fault", regs, &info); 481 } else 482 #endif 483 if (signal) { 484 info.si_signo = signal; 485 info.si_errno = 0; 486 info.si_code = ILL_ILLOPC; 487 info.si_addr = (void __user *) location; 488 do_trap(interruption_code, signal, 489 "illegal operation", regs, &info); 490 } 491 } 492 493 494 #ifdef CONFIG_MATHEMU 495 asmlinkage void 496 specification_exception(struct pt_regs * regs, long interruption_code) 497 { 498 __u8 opcode[6]; 499 __u16 *location = NULL; 500 int signal = 0; 501 502 location = (__u16 *) get_check_address(regs); 503 504 /* 505 * We got all needed information from the lowcore and can 506 * now safely switch on interrupts. 507 */ 508 if (regs->psw.mask & PSW_MASK_PSTATE) 509 local_irq_enable(); 510 511 if (regs->psw.mask & PSW_MASK_PSTATE) { 512 get_user(*((__u16 *) opcode), location); 513 switch (opcode[0]) { 514 case 0x28: /* LDR Rx,Ry */ 515 signal = math_emu_ldr(opcode); 516 break; 517 case 0x38: /* LER Rx,Ry */ 518 signal = math_emu_ler(opcode); 519 break; 520 case 0x60: /* STD R,D(X,B) */ 521 get_user(*((__u16 *) (opcode+2)), location+1); 522 signal = math_emu_std(opcode, regs); 523 break; 524 case 0x68: /* LD R,D(X,B) */ 525 get_user(*((__u16 *) (opcode+2)), location+1); 526 signal = math_emu_ld(opcode, regs); 527 break; 528 case 0x70: /* STE R,D(X,B) */ 529 get_user(*((__u16 *) (opcode+2)), location+1); 530 signal = math_emu_ste(opcode, regs); 531 break; 532 case 0x78: /* LE R,D(X,B) */ 533 get_user(*((__u16 *) (opcode+2)), location+1); 534 signal = math_emu_le(opcode, regs); 535 break; 536 default: 537 signal = SIGILL; 538 break; 539 } 540 } else 541 signal = SIGILL; 542 543 if (signal == SIGFPE) 544 do_fp_trap(regs, location, 545 current->thread.fp_regs.fpc, interruption_code); 546 else if (signal) { 547 siginfo_t info; 548 info.si_signo = signal; 549 info.si_errno = 0; 550 info.si_code = ILL_ILLOPN; 551 info.si_addr = location; 552 do_trap(interruption_code, signal, 553 "specification exception", regs, &info); 554 } 555 } 556 #else 557 DO_ERROR_INFO(SIGILL, "specification exception", specification_exception, 558 ILL_ILLOPN, get_check_address(regs)); 559 #endif 560 561 asmlinkage void data_exception(struct pt_regs * regs, long interruption_code) 562 { 563 __u16 *location; 564 int signal = 0; 565 566 location = (__u16 *) get_check_address(regs); 567 568 /* 569 * We got all needed information from the lowcore and can 570 * now safely switch on interrupts. 571 */ 572 if (regs->psw.mask & PSW_MASK_PSTATE) 573 local_irq_enable(); 574 575 if (MACHINE_HAS_IEEE) 576 __asm__ volatile ("stfpc %0\n\t" 577 : "=m" (current->thread.fp_regs.fpc)); 578 579 #ifdef CONFIG_MATHEMU 580 else if (regs->psw.mask & PSW_MASK_PSTATE) { 581 __u8 opcode[6]; 582 get_user(*((__u16 *) opcode), location); 583 switch (opcode[0]) { 584 case 0x28: /* LDR Rx,Ry */ 585 signal = math_emu_ldr(opcode); 586 break; 587 case 0x38: /* LER Rx,Ry */ 588 signal = math_emu_ler(opcode); 589 break; 590 case 0x60: /* STD R,D(X,B) */ 591 get_user(*((__u16 *) (opcode+2)), location+1); 592 signal = math_emu_std(opcode, regs); 593 break; 594 case 0x68: /* LD R,D(X,B) */ 595 get_user(*((__u16 *) (opcode+2)), location+1); 596 signal = math_emu_ld(opcode, regs); 597 break; 598 case 0x70: /* STE R,D(X,B) */ 599 get_user(*((__u16 *) (opcode+2)), location+1); 600 signal = math_emu_ste(opcode, regs); 601 break; 602 case 0x78: /* LE R,D(X,B) */ 603 get_user(*((__u16 *) (opcode+2)), location+1); 604 signal = math_emu_le(opcode, regs); 605 break; 606 case 0xb3: 607 get_user(*((__u16 *) (opcode+2)), location+1); 608 signal = math_emu_b3(opcode, regs); 609 break; 610 case 0xed: 611 get_user(*((__u32 *) (opcode+2)), 612 (__u32 *)(location+1)); 613 signal = math_emu_ed(opcode, regs); 614 break; 615 case 0xb2: 616 if (opcode[1] == 0x99) { 617 get_user(*((__u16 *) (opcode+2)), location+1); 618 signal = math_emu_srnm(opcode, regs); 619 } else if (opcode[1] == 0x9c) { 620 get_user(*((__u16 *) (opcode+2)), location+1); 621 signal = math_emu_stfpc(opcode, regs); 622 } else if (opcode[1] == 0x9d) { 623 get_user(*((__u16 *) (opcode+2)), location+1); 624 signal = math_emu_lfpc(opcode, regs); 625 } else 626 signal = SIGILL; 627 break; 628 default: 629 signal = SIGILL; 630 break; 631 } 632 } 633 #endif 634 if (current->thread.fp_regs.fpc & FPC_DXC_MASK) 635 signal = SIGFPE; 636 else 637 signal = SIGILL; 638 if (signal == SIGFPE) 639 do_fp_trap(regs, location, 640 current->thread.fp_regs.fpc, interruption_code); 641 else if (signal) { 642 siginfo_t info; 643 info.si_signo = signal; 644 info.si_errno = 0; 645 info.si_code = ILL_ILLOPN; 646 info.si_addr = location; 647 do_trap(interruption_code, signal, 648 "data exception", regs, &info); 649 } 650 } 651 652 asmlinkage void space_switch_exception(struct pt_regs * regs, long int_code) 653 { 654 siginfo_t info; 655 656 /* Set user psw back to home space mode. */ 657 if (regs->psw.mask & PSW_MASK_PSTATE) 658 regs->psw.mask |= PSW_ASC_HOME; 659 /* Send SIGILL. */ 660 info.si_signo = SIGILL; 661 info.si_errno = 0; 662 info.si_code = ILL_PRVOPC; 663 info.si_addr = get_check_address(regs); 664 do_trap(int_code, SIGILL, "space switch event", regs, &info); 665 } 666 667 asmlinkage void kernel_stack_overflow(struct pt_regs * regs) 668 { 669 bust_spinlocks(1); 670 printk("Kernel stack overflow.\n"); 671 show_regs(regs); 672 bust_spinlocks(0); 673 panic("Corrupt kernel stack, can't continue."); 674 } 675 676 /* init is done in lowcore.S and head.S */ 677 678 void __init trap_init(void) 679 { 680 int i; 681 682 for (i = 0; i < 128; i++) 683 pgm_check_table[i] = &default_trap_handler; 684 pgm_check_table[1] = &illegal_op; 685 pgm_check_table[2] = &privileged_op; 686 pgm_check_table[3] = &execute_exception; 687 pgm_check_table[4] = &do_protection_exception; 688 pgm_check_table[5] = &addressing_exception; 689 pgm_check_table[6] = &specification_exception; 690 pgm_check_table[7] = &data_exception; 691 pgm_check_table[8] = &overflow_exception; 692 pgm_check_table[9] = ÷_exception; 693 pgm_check_table[0x0A] = &overflow_exception; 694 pgm_check_table[0x0B] = ÷_exception; 695 pgm_check_table[0x0C] = &hfp_overflow_exception; 696 pgm_check_table[0x0D] = &hfp_underflow_exception; 697 pgm_check_table[0x0E] = &hfp_significance_exception; 698 pgm_check_table[0x0F] = &hfp_divide_exception; 699 pgm_check_table[0x10] = &do_dat_exception; 700 pgm_check_table[0x11] = &do_dat_exception; 701 pgm_check_table[0x12] = &translation_exception; 702 pgm_check_table[0x13] = &special_op_exception; 703 #ifdef CONFIG_64BIT 704 pgm_check_table[0x38] = &do_dat_exception; 705 pgm_check_table[0x39] = &do_dat_exception; 706 pgm_check_table[0x3A] = &do_dat_exception; 707 pgm_check_table[0x3B] = &do_dat_exception; 708 #endif /* CONFIG_64BIT */ 709 pgm_check_table[0x15] = &operand_exception; 710 pgm_check_table[0x1C] = &space_switch_exception; 711 pgm_check_table[0x1D] = &hfp_sqrt_exception; 712 pgm_check_table[0x40] = &do_monitor_call; 713 714 if (MACHINE_IS_VM) { 715 #ifdef CONFIG_PFAULT 716 /* 717 * Try to get pfault pseudo page faults going. 718 */ 719 if (register_early_external_interrupt(0x2603, pfault_interrupt, 720 &ext_int_pfault) != 0) 721 panic("Couldn't request external interrupt 0x2603"); 722 723 if (pfault_init() == 0) 724 return; 725 726 /* Tough luck, no pfault. */ 727 unregister_early_external_interrupt(0x2603, pfault_interrupt, 728 &ext_int_pfault); 729 #endif 730 } 731 } 732