1 /* 2 * arch/s390/kernel/traps.c 3 * 4 * S390 version 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * 9 * Derived from "arch/i386/kernel/traps.c" 10 * Copyright (C) 1991, 1992 Linus Torvalds 11 */ 12 13 /* 14 * 'Traps.c' handles hardware traps and faults after we have saved some 15 * state in 'asm.s'. 16 */ 17 #include <linux/sched.h> 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/tracehook.h> 22 #include <linux/timer.h> 23 #include <linux/mm.h> 24 #include <linux/smp.h> 25 #include <linux/init.h> 26 #include <linux/interrupt.h> 27 #include <linux/seq_file.h> 28 #include <linux/delay.h> 29 #include <linux/module.h> 30 #include <linux/kdebug.h> 31 #include <linux/kallsyms.h> 32 #include <linux/reboot.h> 33 #include <linux/kprobes.h> 34 #include <linux/bug.h> 35 #include <linux/utsname.h> 36 #include <asm/system.h> 37 #include <asm/uaccess.h> 38 #include <asm/io.h> 39 #include <asm/atomic.h> 40 #include <asm/mathemu.h> 41 #include <asm/cpcmd.h> 42 #include <asm/s390_ext.h> 43 #include <asm/lowcore.h> 44 #include <asm/debug.h> 45 #include "entry.h" 46 47 pgm_check_handler_t *pgm_check_table[128]; 48 49 #ifdef CONFIG_SYSCTL 50 #ifdef CONFIG_PROCESS_DEBUG 51 int sysctl_userprocess_debug = 1; 52 #else 53 int sysctl_userprocess_debug = 0; 54 #endif 55 #endif 56 57 extern pgm_check_handler_t do_protection_exception; 58 extern pgm_check_handler_t do_dat_exception; 59 extern pgm_check_handler_t do_asce_exception; 60 61 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 62 63 #ifndef CONFIG_64BIT 64 #define LONG "%08lx " 65 #define FOURLONG "%08lx %08lx %08lx %08lx\n" 66 static int kstack_depth_to_print = 12; 67 #else /* CONFIG_64BIT */ 68 #define LONG "%016lx " 69 #define FOURLONG "%016lx %016lx %016lx %016lx\n" 70 static int kstack_depth_to_print = 20; 71 #endif /* CONFIG_64BIT */ 72 73 /* 74 * For show_trace we have tree different stack to consider: 75 * - the panic stack which is used if the kernel stack has overflown 76 * - the asynchronous interrupt stack (cpu related) 77 * - the synchronous kernel stack (process related) 78 * The stack trace can start at any of the three stack and can potentially 79 * touch all of them. The order is: panic stack, async stack, sync stack. 80 */ 81 static unsigned long 82 __show_trace(unsigned long sp, unsigned long low, unsigned long high) 83 { 84 struct stack_frame *sf; 85 struct pt_regs *regs; 86 87 while (1) { 88 sp = sp & PSW_ADDR_INSN; 89 if (sp < low || sp > high - sizeof(*sf)) 90 return sp; 91 sf = (struct stack_frame *) sp; 92 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 93 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); 94 /* Follow the backchain. */ 95 while (1) { 96 low = sp; 97 sp = sf->back_chain & PSW_ADDR_INSN; 98 if (!sp) 99 break; 100 if (sp <= low || sp > high - sizeof(*sf)) 101 return sp; 102 sf = (struct stack_frame *) sp; 103 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 104 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); 105 } 106 /* Zero backchain detected, check for interrupt frame. */ 107 sp = (unsigned long) (sf + 1); 108 if (sp <= low || sp > high - sizeof(*regs)) 109 return sp; 110 regs = (struct pt_regs *) sp; 111 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); 112 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); 113 low = sp; 114 sp = regs->gprs[15]; 115 } 116 } 117 118 static void show_trace(struct task_struct *task, unsigned long *stack) 119 { 120 register unsigned long __r15 asm ("15"); 121 unsigned long sp; 122 123 sp = (unsigned long) stack; 124 if (!sp) 125 sp = task ? task->thread.ksp : __r15; 126 printk("Call Trace:\n"); 127 #ifdef CONFIG_CHECK_STACK 128 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, 129 S390_lowcore.panic_stack); 130 #endif 131 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, 132 S390_lowcore.async_stack); 133 if (task) 134 __show_trace(sp, (unsigned long) task_stack_page(task), 135 (unsigned long) task_stack_page(task) + THREAD_SIZE); 136 else 137 __show_trace(sp, S390_lowcore.thread_info, 138 S390_lowcore.thread_info + THREAD_SIZE); 139 if (!task) 140 task = current; 141 debug_show_held_locks(task); 142 } 143 144 void show_stack(struct task_struct *task, unsigned long *sp) 145 { 146 register unsigned long * __r15 asm ("15"); 147 unsigned long *stack; 148 int i; 149 150 if (!sp) 151 stack = task ? (unsigned long *) task->thread.ksp : __r15; 152 else 153 stack = sp; 154 155 for (i = 0; i < kstack_depth_to_print; i++) { 156 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 157 break; 158 if (i && ((i * sizeof (long) % 32) == 0)) 159 printk("\n "); 160 printk(LONG, *stack++); 161 } 162 printk("\n"); 163 show_trace(task, sp); 164 } 165 166 static void show_last_breaking_event(struct pt_regs *regs) 167 { 168 #ifdef CONFIG_64BIT 169 printk("Last Breaking-Event-Address:\n"); 170 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); 171 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); 172 #endif 173 } 174 175 /* 176 * The architecture-independent dump_stack generator 177 */ 178 void dump_stack(void) 179 { 180 printk("CPU: %d %s %s %.*s\n", 181 task_thread_info(current)->cpu, print_tainted(), 182 init_utsname()->release, 183 (int)strcspn(init_utsname()->version, " "), 184 init_utsname()->version); 185 printk("Process %s (pid: %d, task: %p, ksp: %p)\n", 186 current->comm, current->pid, current, 187 (void *) current->thread.ksp); 188 show_stack(NULL, NULL); 189 } 190 EXPORT_SYMBOL(dump_stack); 191 192 static inline int mask_bits(struct pt_regs *regs, unsigned long bits) 193 { 194 return (regs->psw.mask & bits) / ((~bits + 1) & bits); 195 } 196 197 void show_registers(struct pt_regs *regs) 198 { 199 char *mode; 200 201 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; 202 printk("%s PSW : %p %p", 203 mode, (void *) regs->psw.mask, 204 (void *) regs->psw.addr); 205 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); 206 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " 207 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), 208 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), 209 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), 210 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), 211 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), 212 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); 213 #ifdef CONFIG_64BIT 214 printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS)); 215 #endif 216 printk("\n%s GPRS: " FOURLONG, mode, 217 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 218 printk(" " FOURLONG, 219 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); 220 printk(" " FOURLONG, 221 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); 222 printk(" " FOURLONG, 223 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); 224 225 show_code(regs); 226 } 227 228 void show_regs(struct pt_regs *regs) 229 { 230 print_modules(); 231 printk("CPU: %d %s %s %.*s\n", 232 task_thread_info(current)->cpu, print_tainted(), 233 init_utsname()->release, 234 (int)strcspn(init_utsname()->version, " "), 235 init_utsname()->version); 236 printk("Process %s (pid: %d, task: %p, ksp: %p)\n", 237 current->comm, current->pid, current, 238 (void *) current->thread.ksp); 239 show_registers(regs); 240 /* Show stack backtrace if pt_regs is from kernel mode */ 241 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 242 show_trace(NULL, (unsigned long *) regs->gprs[15]); 243 show_last_breaking_event(regs); 244 } 245 246 /* This is called from fs/proc/array.c */ 247 void task_show_regs(struct seq_file *m, struct task_struct *task) 248 { 249 struct pt_regs *regs; 250 251 regs = task_pt_regs(task); 252 seq_printf(m, "task: %p, ksp: %p\n", 253 task, (void *)task->thread.ksp); 254 seq_printf(m, "User PSW : %p %p\n", 255 (void *) regs->psw.mask, (void *)regs->psw.addr); 256 257 seq_printf(m, "User GPRS: " FOURLONG, 258 regs->gprs[0], regs->gprs[1], 259 regs->gprs[2], regs->gprs[3]); 260 seq_printf(m, " " FOURLONG, 261 regs->gprs[4], regs->gprs[5], 262 regs->gprs[6], regs->gprs[7]); 263 seq_printf(m, " " FOURLONG, 264 regs->gprs[8], regs->gprs[9], 265 regs->gprs[10], regs->gprs[11]); 266 seq_printf(m, " " FOURLONG, 267 regs->gprs[12], regs->gprs[13], 268 regs->gprs[14], regs->gprs[15]); 269 seq_printf(m, "User ACRS: %08x %08x %08x %08x\n", 270 task->thread.acrs[0], task->thread.acrs[1], 271 task->thread.acrs[2], task->thread.acrs[3]); 272 seq_printf(m, " %08x %08x %08x %08x\n", 273 task->thread.acrs[4], task->thread.acrs[5], 274 task->thread.acrs[6], task->thread.acrs[7]); 275 seq_printf(m, " %08x %08x %08x %08x\n", 276 task->thread.acrs[8], task->thread.acrs[9], 277 task->thread.acrs[10], task->thread.acrs[11]); 278 seq_printf(m, " %08x %08x %08x %08x\n", 279 task->thread.acrs[12], task->thread.acrs[13], 280 task->thread.acrs[14], task->thread.acrs[15]); 281 } 282 283 static DEFINE_SPINLOCK(die_lock); 284 285 void die(const char * str, struct pt_regs * regs, long err) 286 { 287 static int die_counter; 288 289 oops_enter(); 290 debug_stop_all(); 291 console_verbose(); 292 spin_lock_irq(&die_lock); 293 bust_spinlocks(1); 294 printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); 295 #ifdef CONFIG_PREEMPT 296 printk("PREEMPT "); 297 #endif 298 #ifdef CONFIG_SMP 299 printk("SMP "); 300 #endif 301 #ifdef CONFIG_DEBUG_PAGEALLOC 302 printk("DEBUG_PAGEALLOC"); 303 #endif 304 printk("\n"); 305 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV); 306 show_regs(regs); 307 bust_spinlocks(0); 308 add_taint(TAINT_DIE); 309 spin_unlock_irq(&die_lock); 310 if (in_interrupt()) 311 panic("Fatal exception in interrupt"); 312 if (panic_on_oops) 313 panic("Fatal exception: panic_on_oops"); 314 oops_exit(); 315 do_exit(SIGSEGV); 316 } 317 318 static void inline 319 report_user_fault(long interruption_code, struct pt_regs *regs) 320 { 321 #if defined(CONFIG_SYSCTL) 322 if (!sysctl_userprocess_debug) 323 return; 324 #endif 325 #if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) 326 printk("User process fault: interruption code 0x%lX\n", 327 interruption_code); 328 show_regs(regs); 329 #endif 330 } 331 332 int is_valid_bugaddr(unsigned long addr) 333 { 334 return 1; 335 } 336 337 static void __kprobes inline do_trap(long interruption_code, int signr, 338 char *str, struct pt_regs *regs, 339 siginfo_t *info) 340 { 341 /* 342 * We got all needed information from the lowcore and can 343 * now safely switch on interrupts. 344 */ 345 if (regs->psw.mask & PSW_MASK_PSTATE) 346 local_irq_enable(); 347 348 if (notify_die(DIE_TRAP, str, regs, interruption_code, 349 interruption_code, signr) == NOTIFY_STOP) 350 return; 351 352 if (regs->psw.mask & PSW_MASK_PSTATE) { 353 struct task_struct *tsk = current; 354 355 tsk->thread.trap_no = interruption_code & 0xffff; 356 force_sig_info(signr, info, tsk); 357 report_user_fault(interruption_code, regs); 358 } else { 359 const struct exception_table_entry *fixup; 360 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 361 if (fixup) 362 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 363 else { 364 enum bug_trap_type btt; 365 366 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); 367 if (btt == BUG_TRAP_TYPE_WARN) 368 return; 369 die(str, regs, interruption_code); 370 } 371 } 372 } 373 374 static inline void __user *get_check_address(struct pt_regs *regs) 375 { 376 return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN); 377 } 378 379 void __kprobes do_single_step(struct pt_regs *regs) 380 { 381 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, 382 SIGTRAP) == NOTIFY_STOP){ 383 return; 384 } 385 if (tracehook_consider_fatal_signal(current, SIGTRAP)) 386 force_sig(SIGTRAP, current); 387 } 388 389 static void default_trap_handler(struct pt_regs * regs, long interruption_code) 390 { 391 if (regs->psw.mask & PSW_MASK_PSTATE) { 392 local_irq_enable(); 393 do_exit(SIGSEGV); 394 report_user_fault(interruption_code, regs); 395 } else 396 die("Unknown program exception", regs, interruption_code); 397 } 398 399 #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ 400 static void name(struct pt_regs * regs, long interruption_code) \ 401 { \ 402 siginfo_t info; \ 403 info.si_signo = signr; \ 404 info.si_errno = 0; \ 405 info.si_code = sicode; \ 406 info.si_addr = siaddr; \ 407 do_trap(interruption_code, signr, str, regs, &info); \ 408 } 409 410 DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception, 411 ILL_ILLADR, get_check_address(regs)) 412 DO_ERROR_INFO(SIGILL, "execute exception", execute_exception, 413 ILL_ILLOPN, get_check_address(regs)) 414 DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception, 415 FPE_INTDIV, get_check_address(regs)) 416 DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception, 417 FPE_INTOVF, get_check_address(regs)) 418 DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception, 419 FPE_FLTOVF, get_check_address(regs)) 420 DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception, 421 FPE_FLTUND, get_check_address(regs)) 422 DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception, 423 FPE_FLTRES, get_check_address(regs)) 424 DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception, 425 FPE_FLTDIV, get_check_address(regs)) 426 DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception, 427 FPE_FLTINV, get_check_address(regs)) 428 DO_ERROR_INFO(SIGILL, "operand exception", operand_exception, 429 ILL_ILLOPN, get_check_address(regs)) 430 DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op, 431 ILL_PRVOPC, get_check_address(regs)) 432 DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception, 433 ILL_ILLOPN, get_check_address(regs)) 434 DO_ERROR_INFO(SIGILL, "translation exception", translation_exception, 435 ILL_ILLOPN, get_check_address(regs)) 436 437 static inline void 438 do_fp_trap(struct pt_regs *regs, void __user *location, 439 int fpc, long interruption_code) 440 { 441 siginfo_t si; 442 443 si.si_signo = SIGFPE; 444 si.si_errno = 0; 445 si.si_addr = location; 446 si.si_code = 0; 447 /* FPC[2] is Data Exception Code */ 448 if ((fpc & 0x00000300) == 0) { 449 /* bits 6 and 7 of DXC are 0 iff IEEE exception */ 450 if (fpc & 0x8000) /* invalid fp operation */ 451 si.si_code = FPE_FLTINV; 452 else if (fpc & 0x4000) /* div by 0 */ 453 si.si_code = FPE_FLTDIV; 454 else if (fpc & 0x2000) /* overflow */ 455 si.si_code = FPE_FLTOVF; 456 else if (fpc & 0x1000) /* underflow */ 457 si.si_code = FPE_FLTUND; 458 else if (fpc & 0x0800) /* inexact */ 459 si.si_code = FPE_FLTRES; 460 } 461 current->thread.ieee_instruction_pointer = (addr_t) location; 462 do_trap(interruption_code, SIGFPE, 463 "floating point exception", regs, &si); 464 } 465 466 static void illegal_op(struct pt_regs * regs, long interruption_code) 467 { 468 siginfo_t info; 469 __u8 opcode[6]; 470 __u16 __user *location; 471 int signal = 0; 472 473 location = get_check_address(regs); 474 475 /* 476 * We got all needed information from the lowcore and can 477 * now safely switch on interrupts. 478 */ 479 if (regs->psw.mask & PSW_MASK_PSTATE) 480 local_irq_enable(); 481 482 if (regs->psw.mask & PSW_MASK_PSTATE) { 483 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 484 return; 485 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 486 if (tracehook_consider_fatal_signal(current, SIGTRAP)) 487 force_sig(SIGTRAP, current); 488 else 489 signal = SIGILL; 490 #ifdef CONFIG_MATHEMU 491 } else if (opcode[0] == 0xb3) { 492 if (get_user(*((__u16 *) (opcode+2)), location+1)) 493 return; 494 signal = math_emu_b3(opcode, regs); 495 } else if (opcode[0] == 0xed) { 496 if (get_user(*((__u32 *) (opcode+2)), 497 (__u32 __user *)(location+1))) 498 return; 499 signal = math_emu_ed(opcode, regs); 500 } else if (*((__u16 *) opcode) == 0xb299) { 501 if (get_user(*((__u16 *) (opcode+2)), location+1)) 502 return; 503 signal = math_emu_srnm(opcode, regs); 504 } else if (*((__u16 *) opcode) == 0xb29c) { 505 if (get_user(*((__u16 *) (opcode+2)), location+1)) 506 return; 507 signal = math_emu_stfpc(opcode, regs); 508 } else if (*((__u16 *) opcode) == 0xb29d) { 509 if (get_user(*((__u16 *) (opcode+2)), location+1)) 510 return; 511 signal = math_emu_lfpc(opcode, regs); 512 #endif 513 } else 514 signal = SIGILL; 515 } else { 516 /* 517 * If we get an illegal op in kernel mode, send it through the 518 * kprobes notifier. If kprobes doesn't pick it up, SIGILL 519 */ 520 if (notify_die(DIE_BPT, "bpt", regs, interruption_code, 521 3, SIGTRAP) != NOTIFY_STOP) 522 signal = SIGILL; 523 } 524 525 #ifdef CONFIG_MATHEMU 526 if (signal == SIGFPE) 527 do_fp_trap(regs, location, 528 current->thread.fp_regs.fpc, interruption_code); 529 else if (signal == SIGSEGV) { 530 info.si_signo = signal; 531 info.si_errno = 0; 532 info.si_code = SEGV_MAPERR; 533 info.si_addr = (void __user *) location; 534 do_trap(interruption_code, signal, 535 "user address fault", regs, &info); 536 } else 537 #endif 538 if (signal) { 539 info.si_signo = signal; 540 info.si_errno = 0; 541 info.si_code = ILL_ILLOPC; 542 info.si_addr = (void __user *) location; 543 do_trap(interruption_code, signal, 544 "illegal operation", regs, &info); 545 } 546 } 547 548 549 #ifdef CONFIG_MATHEMU 550 asmlinkage void 551 specification_exception(struct pt_regs * regs, long interruption_code) 552 { 553 __u8 opcode[6]; 554 __u16 __user *location = NULL; 555 int signal = 0; 556 557 location = (__u16 __user *) get_check_address(regs); 558 559 /* 560 * We got all needed information from the lowcore and can 561 * now safely switch on interrupts. 562 */ 563 if (regs->psw.mask & PSW_MASK_PSTATE) 564 local_irq_enable(); 565 566 if (regs->psw.mask & PSW_MASK_PSTATE) { 567 get_user(*((__u16 *) opcode), location); 568 switch (opcode[0]) { 569 case 0x28: /* LDR Rx,Ry */ 570 signal = math_emu_ldr(opcode); 571 break; 572 case 0x38: /* LER Rx,Ry */ 573 signal = math_emu_ler(opcode); 574 break; 575 case 0x60: /* STD R,D(X,B) */ 576 get_user(*((__u16 *) (opcode+2)), location+1); 577 signal = math_emu_std(opcode, regs); 578 break; 579 case 0x68: /* LD R,D(X,B) */ 580 get_user(*((__u16 *) (opcode+2)), location+1); 581 signal = math_emu_ld(opcode, regs); 582 break; 583 case 0x70: /* STE R,D(X,B) */ 584 get_user(*((__u16 *) (opcode+2)), location+1); 585 signal = math_emu_ste(opcode, regs); 586 break; 587 case 0x78: /* LE R,D(X,B) */ 588 get_user(*((__u16 *) (opcode+2)), location+1); 589 signal = math_emu_le(opcode, regs); 590 break; 591 default: 592 signal = SIGILL; 593 break; 594 } 595 } else 596 signal = SIGILL; 597 598 if (signal == SIGFPE) 599 do_fp_trap(regs, location, 600 current->thread.fp_regs.fpc, interruption_code); 601 else if (signal) { 602 siginfo_t info; 603 info.si_signo = signal; 604 info.si_errno = 0; 605 info.si_code = ILL_ILLOPN; 606 info.si_addr = location; 607 do_trap(interruption_code, signal, 608 "specification exception", regs, &info); 609 } 610 } 611 #else 612 DO_ERROR_INFO(SIGILL, "specification exception", specification_exception, 613 ILL_ILLOPN, get_check_address(regs)); 614 #endif 615 616 static void data_exception(struct pt_regs * regs, long interruption_code) 617 { 618 __u16 __user *location; 619 int signal = 0; 620 621 location = get_check_address(regs); 622 623 /* 624 * We got all needed information from the lowcore and can 625 * now safely switch on interrupts. 626 */ 627 if (regs->psw.mask & PSW_MASK_PSTATE) 628 local_irq_enable(); 629 630 if (MACHINE_HAS_IEEE) 631 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 632 633 #ifdef CONFIG_MATHEMU 634 else if (regs->psw.mask & PSW_MASK_PSTATE) { 635 __u8 opcode[6]; 636 get_user(*((__u16 *) opcode), location); 637 switch (opcode[0]) { 638 case 0x28: /* LDR Rx,Ry */ 639 signal = math_emu_ldr(opcode); 640 break; 641 case 0x38: /* LER Rx,Ry */ 642 signal = math_emu_ler(opcode); 643 break; 644 case 0x60: /* STD R,D(X,B) */ 645 get_user(*((__u16 *) (opcode+2)), location+1); 646 signal = math_emu_std(opcode, regs); 647 break; 648 case 0x68: /* LD R,D(X,B) */ 649 get_user(*((__u16 *) (opcode+2)), location+1); 650 signal = math_emu_ld(opcode, regs); 651 break; 652 case 0x70: /* STE R,D(X,B) */ 653 get_user(*((__u16 *) (opcode+2)), location+1); 654 signal = math_emu_ste(opcode, regs); 655 break; 656 case 0x78: /* LE R,D(X,B) */ 657 get_user(*((__u16 *) (opcode+2)), location+1); 658 signal = math_emu_le(opcode, regs); 659 break; 660 case 0xb3: 661 get_user(*((__u16 *) (opcode+2)), location+1); 662 signal = math_emu_b3(opcode, regs); 663 break; 664 case 0xed: 665 get_user(*((__u32 *) (opcode+2)), 666 (__u32 __user *)(location+1)); 667 signal = math_emu_ed(opcode, regs); 668 break; 669 case 0xb2: 670 if (opcode[1] == 0x99) { 671 get_user(*((__u16 *) (opcode+2)), location+1); 672 signal = math_emu_srnm(opcode, regs); 673 } else if (opcode[1] == 0x9c) { 674 get_user(*((__u16 *) (opcode+2)), location+1); 675 signal = math_emu_stfpc(opcode, regs); 676 } else if (opcode[1] == 0x9d) { 677 get_user(*((__u16 *) (opcode+2)), location+1); 678 signal = math_emu_lfpc(opcode, regs); 679 } else 680 signal = SIGILL; 681 break; 682 default: 683 signal = SIGILL; 684 break; 685 } 686 } 687 #endif 688 if (current->thread.fp_regs.fpc & FPC_DXC_MASK) 689 signal = SIGFPE; 690 else 691 signal = SIGILL; 692 if (signal == SIGFPE) 693 do_fp_trap(regs, location, 694 current->thread.fp_regs.fpc, interruption_code); 695 else if (signal) { 696 siginfo_t info; 697 info.si_signo = signal; 698 info.si_errno = 0; 699 info.si_code = ILL_ILLOPN; 700 info.si_addr = location; 701 do_trap(interruption_code, signal, 702 "data exception", regs, &info); 703 } 704 } 705 706 static void space_switch_exception(struct pt_regs * regs, long int_code) 707 { 708 siginfo_t info; 709 710 /* Set user psw back to home space mode. */ 711 if (regs->psw.mask & PSW_MASK_PSTATE) 712 regs->psw.mask |= PSW_ASC_HOME; 713 /* Send SIGILL. */ 714 info.si_signo = SIGILL; 715 info.si_errno = 0; 716 info.si_code = ILL_PRVOPC; 717 info.si_addr = get_check_address(regs); 718 do_trap(int_code, SIGILL, "space switch event", regs, &info); 719 } 720 721 asmlinkage void kernel_stack_overflow(struct pt_regs * regs) 722 { 723 bust_spinlocks(1); 724 printk("Kernel stack overflow.\n"); 725 show_regs(regs); 726 bust_spinlocks(0); 727 panic("Corrupt kernel stack, can't continue."); 728 } 729 730 /* init is done in lowcore.S and head.S */ 731 732 void __init trap_init(void) 733 { 734 int i; 735 736 for (i = 0; i < 128; i++) 737 pgm_check_table[i] = &default_trap_handler; 738 pgm_check_table[1] = &illegal_op; 739 pgm_check_table[2] = &privileged_op; 740 pgm_check_table[3] = &execute_exception; 741 pgm_check_table[4] = &do_protection_exception; 742 pgm_check_table[5] = &addressing_exception; 743 pgm_check_table[6] = &specification_exception; 744 pgm_check_table[7] = &data_exception; 745 pgm_check_table[8] = &overflow_exception; 746 pgm_check_table[9] = ÷_exception; 747 pgm_check_table[0x0A] = &overflow_exception; 748 pgm_check_table[0x0B] = ÷_exception; 749 pgm_check_table[0x0C] = &hfp_overflow_exception; 750 pgm_check_table[0x0D] = &hfp_underflow_exception; 751 pgm_check_table[0x0E] = &hfp_significance_exception; 752 pgm_check_table[0x0F] = &hfp_divide_exception; 753 pgm_check_table[0x10] = &do_dat_exception; 754 pgm_check_table[0x11] = &do_dat_exception; 755 pgm_check_table[0x12] = &translation_exception; 756 pgm_check_table[0x13] = &special_op_exception; 757 #ifdef CONFIG_64BIT 758 pgm_check_table[0x38] = &do_asce_exception; 759 pgm_check_table[0x39] = &do_dat_exception; 760 pgm_check_table[0x3A] = &do_dat_exception; 761 pgm_check_table[0x3B] = &do_dat_exception; 762 #endif /* CONFIG_64BIT */ 763 pgm_check_table[0x15] = &operand_exception; 764 pgm_check_table[0x1C] = &space_switch_exception; 765 pgm_check_table[0x1D] = &hfp_sqrt_exception; 766 pfault_irq_init(); 767 } 768