1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Modified by Cort Dougan (cort@cs.nmt.edu) 10 * and Paul Mackerras (paulus@samba.org) 11 */ 12 13 /* 14 * This file handles the architecture-dependent parts of hardware exceptions 15 */ 16 17 #include <linux/errno.h> 18 #include <linux/sched.h> 19 #include <linux/kernel.h> 20 #include <linux/mm.h> 21 #include <linux/stddef.h> 22 #include <linux/unistd.h> 23 #include <linux/ptrace.h> 24 #include <linux/slab.h> 25 #include <linux/user.h> 26 #include <linux/a.out.h> 27 #include <linux/interrupt.h> 28 #include <linux/init.h> 29 #include <linux/module.h> 30 #include <linux/prctl.h> 31 #include <linux/delay.h> 32 #include <linux/kprobes.h> 33 #include <linux/kexec.h> 34 #include <linux/backlight.h> 35 36 #include <asm/kdebug.h> 37 #include <asm/pgtable.h> 38 #include <asm/uaccess.h> 39 #include <asm/system.h> 40 #include <asm/io.h> 41 #include <asm/machdep.h> 42 #include <asm/rtas.h> 43 #include <asm/pmc.h> 44 #ifdef CONFIG_PPC32 45 #include <asm/reg.h> 46 #endif 47 #ifdef CONFIG_PMAC_BACKLIGHT 48 #include <asm/backlight.h> 49 #endif 50 #ifdef CONFIG_PPC64 51 #include <asm/firmware.h> 52 #include <asm/processor.h> 53 #endif 54 #include <asm/kexec.h> 55 56 #ifdef CONFIG_PPC64 /* XXX */ 57 #define _IO_BASE pci_io_base 58 #endif 59 60 #ifdef CONFIG_DEBUGGER 61 int (*__debugger)(struct pt_regs *regs); 62 int (*__debugger_ipi)(struct pt_regs *regs); 63 int (*__debugger_bpt)(struct pt_regs *regs); 64 int (*__debugger_sstep)(struct pt_regs *regs); 65 int (*__debugger_iabr_match)(struct pt_regs *regs); 66 int (*__debugger_dabr_match)(struct pt_regs *regs); 67 int (*__debugger_fault_handler)(struct pt_regs *regs); 68 69 EXPORT_SYMBOL(__debugger); 70 EXPORT_SYMBOL(__debugger_ipi); 71 EXPORT_SYMBOL(__debugger_bpt); 72 EXPORT_SYMBOL(__debugger_sstep); 73 EXPORT_SYMBOL(__debugger_iabr_match); 74 EXPORT_SYMBOL(__debugger_dabr_match); 75 EXPORT_SYMBOL(__debugger_fault_handler); 76 #endif 77 78 ATOMIC_NOTIFIER_HEAD(powerpc_die_chain); 79 80 int register_die_notifier(struct notifier_block *nb) 81 { 82 return atomic_notifier_chain_register(&powerpc_die_chain, nb); 83 } 84 EXPORT_SYMBOL(register_die_notifier); 85 86 int unregister_die_notifier(struct notifier_block *nb) 87 { 88 return atomic_notifier_chain_unregister(&powerpc_die_chain, nb); 89 } 90 EXPORT_SYMBOL(unregister_die_notifier); 91 92 /* 93 * Trap & Exception support 94 */ 95 96 static DEFINE_SPINLOCK(die_lock); 97 98 int die(const char *str, struct pt_regs *regs, long err) 99 { 100 static int die_counter; 101 102 if (debugger(regs)) 103 return 1; 104 105 console_verbose(); 106 spin_lock_irq(&die_lock); 107 bust_spinlocks(1); 108 #ifdef CONFIG_PMAC_BACKLIGHT 109 mutex_lock(&pmac_backlight_mutex); 110 if (machine_is(powermac) && pmac_backlight) { 111 struct backlight_properties *props; 112 113 down(&pmac_backlight->sem); 114 props = pmac_backlight->props; 115 props->brightness = props->max_brightness; 116 props->power = FB_BLANK_UNBLANK; 117 props->update_status(pmac_backlight); 118 up(&pmac_backlight->sem); 119 } 120 mutex_unlock(&pmac_backlight_mutex); 121 #endif 122 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 123 #ifdef CONFIG_PREEMPT 124 printk("PREEMPT "); 125 #endif 126 #ifdef CONFIG_SMP 127 printk("SMP NR_CPUS=%d ", NR_CPUS); 128 #endif 129 #ifdef CONFIG_DEBUG_PAGEALLOC 130 printk("DEBUG_PAGEALLOC "); 131 #endif 132 #ifdef CONFIG_NUMA 133 printk("NUMA "); 134 #endif 135 printk("%s\n", ppc_md.name ? "" : ppc_md.name); 136 137 print_modules(); 138 show_regs(regs); 139 bust_spinlocks(0); 140 spin_unlock_irq(&die_lock); 141 142 if (kexec_should_crash(current) || 143 kexec_sr_activated(smp_processor_id())) 144 crash_kexec(regs); 145 crash_kexec_secondary(regs); 146 147 if (in_interrupt()) 148 panic("Fatal exception in interrupt"); 149 150 if (panic_on_oops) 151 panic("Fatal exception"); 152 153 do_exit(err); 154 155 return 0; 156 } 157 158 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 159 { 160 siginfo_t info; 161 162 if (!user_mode(regs)) { 163 if (die("Exception in kernel mode", regs, signr)) 164 return; 165 } 166 167 memset(&info, 0, sizeof(info)); 168 info.si_signo = signr; 169 info.si_code = code; 170 info.si_addr = (void __user *) addr; 171 force_sig_info(signr, &info, current); 172 173 /* 174 * Init gets no signals that it doesn't have a handler for. 175 * That's all very well, but if it has caused a synchronous 176 * exception and we ignore the resulting signal, it will just 177 * generate the same exception over and over again and we get 178 * nowhere. Better to kill it and let the kernel panic. 179 */ 180 if (current->pid == 1) { 181 __sighandler_t handler; 182 183 spin_lock_irq(¤t->sighand->siglock); 184 handler = current->sighand->action[signr-1].sa.sa_handler; 185 spin_unlock_irq(¤t->sighand->siglock); 186 if (handler == SIG_DFL) { 187 /* init has generated a synchronous exception 188 and it doesn't have a handler for the signal */ 189 printk(KERN_CRIT "init has generated signal %d " 190 "but has no handler for it\n", signr); 191 do_exit(signr); 192 } 193 } 194 } 195 196 #ifdef CONFIG_PPC64 197 void system_reset_exception(struct pt_regs *regs) 198 { 199 /* See if any machine dependent calls */ 200 if (ppc_md.system_reset_exception) { 201 if (ppc_md.system_reset_exception(regs)) 202 return; 203 } 204 205 #ifdef CONFIG_KEXEC 206 cpu_set(smp_processor_id(), cpus_in_sr); 207 #endif 208 209 die("System Reset", regs, SIGABRT); 210 211 /* 212 * Some CPUs when released from the debugger will execute this path. 213 * These CPUs entered the debugger via a soft-reset. If the CPU was 214 * hung before entering the debugger it will return to the hung 215 * state when exiting this function. This causes a problem in 216 * kdump since the hung CPU(s) will not respond to the IPI sent 217 * from kdump. To prevent the problem we call crash_kexec_secondary() 218 * here. If a kdump had not been initiated or we exit the debugger 219 * with the "exit and recover" command (x) crash_kexec_secondary() 220 * will return after 5ms and the CPU returns to its previous state. 221 */ 222 crash_kexec_secondary(regs); 223 224 /* Must die if the interrupt is not recoverable */ 225 if (!(regs->msr & MSR_RI)) 226 panic("Unrecoverable System Reset"); 227 228 /* What should we do here? We could issue a shutdown or hard reset. */ 229 } 230 #endif 231 232 /* 233 * I/O accesses can cause machine checks on powermacs. 234 * Check if the NIP corresponds to the address of a sync 235 * instruction for which there is an entry in the exception 236 * table. 237 * Note that the 601 only takes a machine check on TEA 238 * (transfer error ack) signal assertion, and does not 239 * set any of the top 16 bits of SRR1. 240 * -- paulus. 241 */ 242 static inline int check_io_access(struct pt_regs *regs) 243 { 244 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) 245 unsigned long msr = regs->msr; 246 const struct exception_table_entry *entry; 247 unsigned int *nip = (unsigned int *)regs->nip; 248 249 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 250 && (entry = search_exception_tables(regs->nip)) != NULL) { 251 /* 252 * Check that it's a sync instruction, or somewhere 253 * in the twi; isync; nop sequence that inb/inw/inl uses. 254 * As the address is in the exception table 255 * we should be able to read the instr there. 256 * For the debug message, we look at the preceding 257 * load or store. 258 */ 259 if (*nip == 0x60000000) /* nop */ 260 nip -= 2; 261 else if (*nip == 0x4c00012c) /* isync */ 262 --nip; 263 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { 264 /* sync or twi */ 265 unsigned int rb; 266 267 --nip; 268 rb = (*nip >> 11) & 0x1f; 269 printk(KERN_DEBUG "%s bad port %lx at %p\n", 270 (*nip & 0x100)? "OUT to": "IN from", 271 regs->gpr[rb] - _IO_BASE, nip); 272 regs->msr |= MSR_RI; 273 regs->nip = entry->fixup; 274 return 1; 275 } 276 } 277 #endif /* CONFIG_PPC_PMAC && CONFIG_PPC32 */ 278 return 0; 279 } 280 281 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 282 /* On 4xx, the reason for the machine check or program exception 283 is in the ESR. */ 284 #define get_reason(regs) ((regs)->dsisr) 285 #ifndef CONFIG_FSL_BOOKE 286 #define get_mc_reason(regs) ((regs)->dsisr) 287 #else 288 #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 289 #endif 290 #define REASON_FP ESR_FP 291 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 292 #define REASON_PRIVILEGED ESR_PPR 293 #define REASON_TRAP ESR_PTR 294 295 /* single-step stuff */ 296 #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) 297 #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) 298 299 #else 300 /* On non-4xx, the reason for the machine check or program 301 exception is in the MSR. */ 302 #define get_reason(regs) ((regs)->msr) 303 #define get_mc_reason(regs) ((regs)->msr) 304 #define REASON_FP 0x100000 305 #define REASON_ILLEGAL 0x80000 306 #define REASON_PRIVILEGED 0x40000 307 #define REASON_TRAP 0x20000 308 309 #define single_stepping(regs) ((regs)->msr & MSR_SE) 310 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 311 #endif 312 313 /* 314 * This is "fall-back" implementation for configurations 315 * which don't provide platform-specific machine check info 316 */ 317 void __attribute__ ((weak)) 318 platform_machine_check(struct pt_regs *regs) 319 { 320 } 321 322 void machine_check_exception(struct pt_regs *regs) 323 { 324 int recover = 0; 325 unsigned long reason = get_mc_reason(regs); 326 327 /* See if any machine dependent calls */ 328 if (ppc_md.machine_check_exception) 329 recover = ppc_md.machine_check_exception(regs); 330 331 if (recover) 332 return; 333 334 if (user_mode(regs)) { 335 regs->msr |= MSR_RI; 336 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); 337 return; 338 } 339 340 #if defined(CONFIG_8xx) && defined(CONFIG_PCI) 341 /* the qspan pci read routines can cause machine checks -- Cort */ 342 bad_page_fault(regs, regs->dar, SIGBUS); 343 return; 344 #endif 345 346 if (debugger_fault_handler(regs)) { 347 regs->msr |= MSR_RI; 348 return; 349 } 350 351 if (check_io_access(regs)) 352 return; 353 354 #if defined(CONFIG_4xx) && !defined(CONFIG_440A) 355 if (reason & ESR_IMCP) { 356 printk("Instruction"); 357 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 358 } else 359 printk("Data"); 360 printk(" machine check in kernel mode.\n"); 361 #elif defined(CONFIG_440A) 362 printk("Machine check in kernel mode.\n"); 363 if (reason & ESR_IMCP){ 364 printk("Instruction Synchronous Machine Check exception\n"); 365 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 366 } 367 else { 368 u32 mcsr = mfspr(SPRN_MCSR); 369 if (mcsr & MCSR_IB) 370 printk("Instruction Read PLB Error\n"); 371 if (mcsr & MCSR_DRB) 372 printk("Data Read PLB Error\n"); 373 if (mcsr & MCSR_DWB) 374 printk("Data Write PLB Error\n"); 375 if (mcsr & MCSR_TLBP) 376 printk("TLB Parity Error\n"); 377 if (mcsr & MCSR_ICP){ 378 flush_instruction_cache(); 379 printk("I-Cache Parity Error\n"); 380 } 381 if (mcsr & MCSR_DCSP) 382 printk("D-Cache Search Parity Error\n"); 383 if (mcsr & MCSR_DCFP) 384 printk("D-Cache Flush Parity Error\n"); 385 if (mcsr & MCSR_IMPE) 386 printk("Machine Check exception is imprecise\n"); 387 388 /* Clear MCSR */ 389 mtspr(SPRN_MCSR, mcsr); 390 } 391 #elif defined (CONFIG_E500) 392 printk("Machine check in kernel mode.\n"); 393 printk("Caused by (from MCSR=%lx): ", reason); 394 395 if (reason & MCSR_MCP) 396 printk("Machine Check Signal\n"); 397 if (reason & MCSR_ICPERR) 398 printk("Instruction Cache Parity Error\n"); 399 if (reason & MCSR_DCP_PERR) 400 printk("Data Cache Push Parity Error\n"); 401 if (reason & MCSR_DCPERR) 402 printk("Data Cache Parity Error\n"); 403 if (reason & MCSR_GL_CI) 404 printk("Guarded Load or Cache-Inhibited stwcx.\n"); 405 if (reason & MCSR_BUS_IAERR) 406 printk("Bus - Instruction Address Error\n"); 407 if (reason & MCSR_BUS_RAERR) 408 printk("Bus - Read Address Error\n"); 409 if (reason & MCSR_BUS_WAERR) 410 printk("Bus - Write Address Error\n"); 411 if (reason & MCSR_BUS_IBERR) 412 printk("Bus - Instruction Data Error\n"); 413 if (reason & MCSR_BUS_RBERR) 414 printk("Bus - Read Data Bus Error\n"); 415 if (reason & MCSR_BUS_WBERR) 416 printk("Bus - Read Data Bus Error\n"); 417 if (reason & MCSR_BUS_IPERR) 418 printk("Bus - Instruction Parity Error\n"); 419 if (reason & MCSR_BUS_RPERR) 420 printk("Bus - Read Parity Error\n"); 421 #elif defined (CONFIG_E200) 422 printk("Machine check in kernel mode.\n"); 423 printk("Caused by (from MCSR=%lx): ", reason); 424 425 if (reason & MCSR_MCP) 426 printk("Machine Check Signal\n"); 427 if (reason & MCSR_CP_PERR) 428 printk("Cache Push Parity Error\n"); 429 if (reason & MCSR_CPERR) 430 printk("Cache Parity Error\n"); 431 if (reason & MCSR_EXCP_ERR) 432 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 433 if (reason & MCSR_BUS_IRERR) 434 printk("Bus - Read Bus Error on instruction fetch\n"); 435 if (reason & MCSR_BUS_DRERR) 436 printk("Bus - Read Bus Error on data load\n"); 437 if (reason & MCSR_BUS_WRERR) 438 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 439 #else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */ 440 printk("Machine check in kernel mode.\n"); 441 printk("Caused by (from SRR1=%lx): ", reason); 442 switch (reason & 0x601F0000) { 443 case 0x80000: 444 printk("Machine check signal\n"); 445 break; 446 case 0: /* for 601 */ 447 case 0x40000: 448 case 0x140000: /* 7450 MSS error and TEA */ 449 printk("Transfer error ack signal\n"); 450 break; 451 case 0x20000: 452 printk("Data parity error signal\n"); 453 break; 454 case 0x10000: 455 printk("Address parity error signal\n"); 456 break; 457 case 0x20000000: 458 printk("L1 Data Cache error\n"); 459 break; 460 case 0x40000000: 461 printk("L1 Instruction Cache error\n"); 462 break; 463 case 0x00100000: 464 printk("L2 data cache parity error\n"); 465 break; 466 default: 467 printk("Unknown values in msr\n"); 468 } 469 #endif /* CONFIG_4xx */ 470 471 /* 472 * Optional platform-provided routine to print out 473 * additional info, e.g. bus error registers. 474 */ 475 platform_machine_check(regs); 476 477 if (debugger_fault_handler(regs)) 478 return; 479 die("Machine check", regs, SIGBUS); 480 481 /* Must die if the interrupt is not recoverable */ 482 if (!(regs->msr & MSR_RI)) 483 panic("Unrecoverable Machine check"); 484 } 485 486 void SMIException(struct pt_regs *regs) 487 { 488 die("System Management Interrupt", regs, SIGABRT); 489 } 490 491 void unknown_exception(struct pt_regs *regs) 492 { 493 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 494 regs->nip, regs->msr, regs->trap); 495 496 _exception(SIGTRAP, regs, 0, 0); 497 } 498 499 void instruction_breakpoint_exception(struct pt_regs *regs) 500 { 501 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 502 5, SIGTRAP) == NOTIFY_STOP) 503 return; 504 if (debugger_iabr_match(regs)) 505 return; 506 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 507 } 508 509 void RunModeException(struct pt_regs *regs) 510 { 511 _exception(SIGTRAP, regs, 0, 0); 512 } 513 514 void __kprobes single_step_exception(struct pt_regs *regs) 515 { 516 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */ 517 518 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 519 5, SIGTRAP) == NOTIFY_STOP) 520 return; 521 if (debugger_sstep(regs)) 522 return; 523 524 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 525 } 526 527 /* 528 * After we have successfully emulated an instruction, we have to 529 * check if the instruction was being single-stepped, and if so, 530 * pretend we got a single-step exception. This was pointed out 531 * by Kumar Gala. -- paulus 532 */ 533 static void emulate_single_step(struct pt_regs *regs) 534 { 535 if (single_stepping(regs)) { 536 clear_single_step(regs); 537 _exception(SIGTRAP, regs, TRAP_TRACE, 0); 538 } 539 } 540 541 static void parse_fpe(struct pt_regs *regs) 542 { 543 int code = 0; 544 unsigned long fpscr; 545 546 flush_fp_to_thread(current); 547 548 fpscr = current->thread.fpscr.val; 549 550 /* Invalid operation */ 551 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 552 code = FPE_FLTINV; 553 554 /* Overflow */ 555 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 556 code = FPE_FLTOVF; 557 558 /* Underflow */ 559 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 560 code = FPE_FLTUND; 561 562 /* Divide by zero */ 563 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 564 code = FPE_FLTDIV; 565 566 /* Inexact result */ 567 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 568 code = FPE_FLTRES; 569 570 _exception(SIGFPE, regs, code, regs->nip); 571 } 572 573 /* 574 * Illegal instruction emulation support. Originally written to 575 * provide the PVR to user applications using the mfspr rd, PVR. 576 * Return non-zero if we can't emulate, or -EFAULT if the associated 577 * memory access caused an access fault. Return zero on success. 578 * 579 * There are a couple of ways to do this, either "decode" the instruction 580 * or directly match lots of bits. In this case, matching lots of 581 * bits is faster and easier. 582 * 583 */ 584 #define INST_MFSPR_PVR 0x7c1f42a6 585 #define INST_MFSPR_PVR_MASK 0xfc1fffff 586 587 #define INST_DCBA 0x7c0005ec 588 #define INST_DCBA_MASK 0xfc0007fe 589 590 #define INST_MCRXR 0x7c000400 591 #define INST_MCRXR_MASK 0xfc0007fe 592 593 #define INST_STRING 0x7c00042a 594 #define INST_STRING_MASK 0xfc0007fe 595 #define INST_STRING_GEN_MASK 0xfc00067e 596 #define INST_LSWI 0x7c0004aa 597 #define INST_LSWX 0x7c00042a 598 #define INST_STSWI 0x7c0005aa 599 #define INST_STSWX 0x7c00052a 600 601 #define INST_POPCNTB 0x7c0000f4 602 #define INST_POPCNTB_MASK 0xfc0007fe 603 604 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 605 { 606 u8 rT = (instword >> 21) & 0x1f; 607 u8 rA = (instword >> 16) & 0x1f; 608 u8 NB_RB = (instword >> 11) & 0x1f; 609 u32 num_bytes; 610 unsigned long EA; 611 int pos = 0; 612 613 /* Early out if we are an invalid form of lswx */ 614 if ((instword & INST_STRING_MASK) == INST_LSWX) 615 if ((rT == rA) || (rT == NB_RB)) 616 return -EINVAL; 617 618 EA = (rA == 0) ? 0 : regs->gpr[rA]; 619 620 switch (instword & INST_STRING_MASK) { 621 case INST_LSWX: 622 case INST_STSWX: 623 EA += NB_RB; 624 num_bytes = regs->xer & 0x7f; 625 break; 626 case INST_LSWI: 627 case INST_STSWI: 628 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 629 break; 630 default: 631 return -EINVAL; 632 } 633 634 while (num_bytes != 0) 635 { 636 u8 val; 637 u32 shift = 8 * (3 - (pos & 0x3)); 638 639 switch ((instword & INST_STRING_MASK)) { 640 case INST_LSWX: 641 case INST_LSWI: 642 if (get_user(val, (u8 __user *)EA)) 643 return -EFAULT; 644 /* first time updating this reg, 645 * zero it out */ 646 if (pos == 0) 647 regs->gpr[rT] = 0; 648 regs->gpr[rT] |= val << shift; 649 break; 650 case INST_STSWI: 651 case INST_STSWX: 652 val = regs->gpr[rT] >> shift; 653 if (put_user(val, (u8 __user *)EA)) 654 return -EFAULT; 655 break; 656 } 657 /* move EA to next address */ 658 EA += 1; 659 num_bytes--; 660 661 /* manage our position within the register */ 662 if (++pos == 4) { 663 pos = 0; 664 if (++rT == 32) 665 rT = 0; 666 } 667 } 668 669 return 0; 670 } 671 672 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 673 { 674 u32 ra,rs; 675 unsigned long tmp; 676 677 ra = (instword >> 16) & 0x1f; 678 rs = (instword >> 21) & 0x1f; 679 680 tmp = regs->gpr[rs]; 681 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 682 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 683 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 684 regs->gpr[ra] = tmp; 685 686 return 0; 687 } 688 689 static int emulate_instruction(struct pt_regs *regs) 690 { 691 u32 instword; 692 u32 rd; 693 694 if (!user_mode(regs) || (regs->msr & MSR_LE)) 695 return -EINVAL; 696 CHECK_FULL_REGS(regs); 697 698 if (get_user(instword, (u32 __user *)(regs->nip))) 699 return -EFAULT; 700 701 /* Emulate the mfspr rD, PVR. */ 702 if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) { 703 rd = (instword >> 21) & 0x1f; 704 regs->gpr[rd] = mfspr(SPRN_PVR); 705 return 0; 706 } 707 708 /* Emulating the dcba insn is just a no-op. */ 709 if ((instword & INST_DCBA_MASK) == INST_DCBA) 710 return 0; 711 712 /* Emulate the mcrxr insn. */ 713 if ((instword & INST_MCRXR_MASK) == INST_MCRXR) { 714 int shift = (instword >> 21) & 0x1c; 715 unsigned long msk = 0xf0000000UL >> shift; 716 717 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 718 regs->xer &= ~0xf0000000UL; 719 return 0; 720 } 721 722 /* Emulate load/store string insn. */ 723 if ((instword & INST_STRING_GEN_MASK) == INST_STRING) 724 return emulate_string_inst(regs, instword); 725 726 /* Emulate the popcntb (Population Count Bytes) instruction. */ 727 if ((instword & INST_POPCNTB_MASK) == INST_POPCNTB) { 728 return emulate_popcntb_inst(regs, instword); 729 } 730 731 return -EINVAL; 732 } 733 734 /* 735 * Look through the list of trap instructions that are used for BUG(), 736 * BUG_ON() and WARN_ON() and see if we hit one. At this point we know 737 * that the exception was caused by a trap instruction of some kind. 738 * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0 739 * otherwise. 740 */ 741 extern struct bug_entry __start___bug_table[], __stop___bug_table[]; 742 743 #ifndef CONFIG_MODULES 744 #define module_find_bug(x) NULL 745 #endif 746 747 struct bug_entry *find_bug(unsigned long bugaddr) 748 { 749 struct bug_entry *bug; 750 751 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) 752 if (bugaddr == bug->bug_addr) 753 return bug; 754 return module_find_bug(bugaddr); 755 } 756 757 static int check_bug_trap(struct pt_regs *regs) 758 { 759 struct bug_entry *bug; 760 unsigned long addr; 761 762 if (regs->msr & MSR_PR) 763 return 0; /* not in kernel */ 764 addr = regs->nip; /* address of trap instruction */ 765 if (addr < PAGE_OFFSET) 766 return 0; 767 bug = find_bug(regs->nip); 768 if (bug == NULL) 769 return 0; 770 if (bug->line & BUG_WARNING_TRAP) { 771 /* this is a WARN_ON rather than BUG/BUG_ON */ 772 printk(KERN_ERR "Badness in %s at %s:%ld\n", 773 bug->function, bug->file, 774 bug->line & ~BUG_WARNING_TRAP); 775 dump_stack(); 776 return 1; 777 } 778 printk(KERN_CRIT "kernel BUG in %s at %s:%ld!\n", 779 bug->function, bug->file, bug->line); 780 781 return 0; 782 } 783 784 void __kprobes program_check_exception(struct pt_regs *regs) 785 { 786 unsigned int reason = get_reason(regs); 787 extern int do_mathemu(struct pt_regs *regs); 788 789 #ifdef CONFIG_MATH_EMULATION 790 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 791 * but there seems to be a hardware bug on the 405GP (RevD) 792 * that means ESR is sometimes set incorrectly - either to 793 * ESR_DST (!?) or 0. In the process of chasing this with the 794 * hardware people - not sure if it can happen on any illegal 795 * instruction or only on FP instructions, whether there is a 796 * pattern to occurences etc. -dgibson 31/Mar/2003 */ 797 if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) { 798 emulate_single_step(regs); 799 return; 800 } 801 #endif /* CONFIG_MATH_EMULATION */ 802 803 if (reason & REASON_FP) { 804 /* IEEE FP exception */ 805 parse_fpe(regs); 806 return; 807 } 808 if (reason & REASON_TRAP) { 809 /* trap exception */ 810 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 811 == NOTIFY_STOP) 812 return; 813 if (debugger_bpt(regs)) 814 return; 815 if (check_bug_trap(regs)) { 816 regs->nip += 4; 817 return; 818 } 819 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 820 return; 821 } 822 823 local_irq_enable(); 824 825 /* Try to emulate it if we should. */ 826 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 827 switch (emulate_instruction(regs)) { 828 case 0: 829 regs->nip += 4; 830 emulate_single_step(regs); 831 return; 832 case -EFAULT: 833 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 834 return; 835 } 836 } 837 838 if (reason & REASON_PRIVILEGED) 839 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 840 else 841 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 842 } 843 844 void alignment_exception(struct pt_regs *regs) 845 { 846 int fixed = 0; 847 848 /* we don't implement logging of alignment exceptions */ 849 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 850 fixed = fix_alignment(regs); 851 852 if (fixed == 1) { 853 regs->nip += 4; /* skip over emulated instruction */ 854 emulate_single_step(regs); 855 return; 856 } 857 858 /* Operand address was bad */ 859 if (fixed == -EFAULT) { 860 if (user_mode(regs)) 861 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar); 862 else 863 /* Search exception table */ 864 bad_page_fault(regs, regs->dar, SIGSEGV); 865 return; 866 } 867 _exception(SIGBUS, regs, BUS_ADRALN, regs->dar); 868 } 869 870 void StackOverflow(struct pt_regs *regs) 871 { 872 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 873 current, regs->gpr[1]); 874 debugger(regs); 875 show_regs(regs); 876 panic("kernel stack overflow"); 877 } 878 879 void nonrecoverable_exception(struct pt_regs *regs) 880 { 881 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 882 regs->nip, regs->msr); 883 debugger(regs); 884 die("nonrecoverable exception", regs, SIGKILL); 885 } 886 887 void trace_syscall(struct pt_regs *regs) 888 { 889 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", 890 current, current->pid, regs->nip, regs->link, regs->gpr[0], 891 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); 892 } 893 894 void kernel_fp_unavailable_exception(struct pt_regs *regs) 895 { 896 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 897 "%lx at %lx\n", regs->trap, regs->nip); 898 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 899 } 900 901 void altivec_unavailable_exception(struct pt_regs *regs) 902 { 903 #if !defined(CONFIG_ALTIVEC) 904 if (user_mode(regs)) { 905 /* A user program has executed an altivec instruction, 906 but this kernel doesn't support altivec. */ 907 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 908 return; 909 } 910 #endif 911 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 912 "%lx at %lx\n", regs->trap, regs->nip); 913 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 914 } 915 916 void performance_monitor_exception(struct pt_regs *regs) 917 { 918 perf_irq(regs); 919 } 920 921 #ifdef CONFIG_8xx 922 void SoftwareEmulation(struct pt_regs *regs) 923 { 924 extern int do_mathemu(struct pt_regs *); 925 extern int Soft_emulate_8xx(struct pt_regs *); 926 int errcode; 927 928 CHECK_FULL_REGS(regs); 929 930 if (!user_mode(regs)) { 931 debugger(regs); 932 die("Kernel Mode Software FPU Emulation", regs, SIGFPE); 933 } 934 935 #ifdef CONFIG_MATH_EMULATION 936 errcode = do_mathemu(regs); 937 #else 938 errcode = Soft_emulate_8xx(regs); 939 #endif 940 if (errcode) { 941 if (errcode > 0) 942 _exception(SIGFPE, regs, 0, 0); 943 else if (errcode == -EFAULT) 944 _exception(SIGSEGV, regs, 0, 0); 945 else 946 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 947 } else 948 emulate_single_step(regs); 949 } 950 #endif /* CONFIG_8xx */ 951 952 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) 953 954 void DebugException(struct pt_regs *regs, unsigned long debug_status) 955 { 956 if (debug_status & DBSR_IC) { /* instruction completion */ 957 regs->msr &= ~MSR_DE; 958 if (user_mode(regs)) { 959 current->thread.dbcr0 &= ~DBCR0_IC; 960 } else { 961 /* Disable instruction completion */ 962 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 963 /* Clear the instruction completion event */ 964 mtspr(SPRN_DBSR, DBSR_IC); 965 if (debugger_sstep(regs)) 966 return; 967 } 968 _exception(SIGTRAP, regs, TRAP_TRACE, 0); 969 } 970 } 971 #endif /* CONFIG_4xx || CONFIG_BOOKE */ 972 973 #if !defined(CONFIG_TAU_INT) 974 void TAUException(struct pt_regs *regs) 975 { 976 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 977 regs->nip, regs->msr, regs->trap, print_tainted()); 978 } 979 #endif /* CONFIG_INT_TAU */ 980 981 #ifdef CONFIG_ALTIVEC 982 void altivec_assist_exception(struct pt_regs *regs) 983 { 984 int err; 985 986 if (!user_mode(regs)) { 987 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 988 " at %lx\n", regs->nip); 989 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 990 } 991 992 flush_altivec_to_thread(current); 993 994 err = emulate_altivec(regs); 995 if (err == 0) { 996 regs->nip += 4; /* skip emulated instruction */ 997 emulate_single_step(regs); 998 return; 999 } 1000 1001 if (err == -EFAULT) { 1002 /* got an error reading the instruction */ 1003 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1004 } else { 1005 /* didn't recognize the instruction */ 1006 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1007 if (printk_ratelimit()) 1008 printk(KERN_ERR "Unrecognized altivec instruction " 1009 "in %s at %lx\n", current->comm, regs->nip); 1010 current->thread.vscr.u[3] |= 0x10000; 1011 } 1012 } 1013 #endif /* CONFIG_ALTIVEC */ 1014 1015 #ifdef CONFIG_FSL_BOOKE 1016 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1017 unsigned long error_code) 1018 { 1019 /* We treat cache locking instructions from the user 1020 * as priv ops, in the future we could try to do 1021 * something smarter 1022 */ 1023 if (error_code & (ESR_DLK|ESR_ILK)) 1024 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1025 return; 1026 } 1027 #endif /* CONFIG_FSL_BOOKE */ 1028 1029 #ifdef CONFIG_SPE 1030 void SPEFloatingPointException(struct pt_regs *regs) 1031 { 1032 unsigned long spefscr; 1033 int fpexc_mode; 1034 int code = 0; 1035 1036 spefscr = current->thread.spefscr; 1037 fpexc_mode = current->thread.fpexc_mode; 1038 1039 /* Hardware does not neccessarily set sticky 1040 * underflow/overflow/invalid flags */ 1041 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1042 code = FPE_FLTOVF; 1043 spefscr |= SPEFSCR_FOVFS; 1044 } 1045 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1046 code = FPE_FLTUND; 1047 spefscr |= SPEFSCR_FUNFS; 1048 } 1049 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1050 code = FPE_FLTDIV; 1051 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1052 code = FPE_FLTINV; 1053 spefscr |= SPEFSCR_FINVS; 1054 } 1055 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1056 code = FPE_FLTRES; 1057 1058 current->thread.spefscr = spefscr; 1059 1060 _exception(SIGFPE, regs, code, regs->nip); 1061 return; 1062 } 1063 #endif 1064 1065 /* 1066 * We enter here if we get an unrecoverable exception, that is, one 1067 * that happened at a point where the RI (recoverable interrupt) bit 1068 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1069 * we therefore lost state by taking this exception. 1070 */ 1071 void unrecoverable_exception(struct pt_regs *regs) 1072 { 1073 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1074 regs->trap, regs->nip); 1075 die("Unrecoverable exception", regs, SIGABRT); 1076 } 1077 1078 #ifdef CONFIG_BOOKE_WDT 1079 /* 1080 * Default handler for a Watchdog exception, 1081 * spins until a reboot occurs 1082 */ 1083 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1084 { 1085 /* Generic WatchdogHandler, implement your own */ 1086 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1087 return; 1088 } 1089 1090 void WatchdogException(struct pt_regs *regs) 1091 { 1092 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1093 WatchdogHandler(regs); 1094 } 1095 #endif 1096 1097 /* 1098 * We enter here if we discover during exception entry that we are 1099 * running in supervisor mode with a userspace value in the stack pointer. 1100 */ 1101 void kernel_bad_stack(struct pt_regs *regs) 1102 { 1103 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1104 regs->gpr[1], regs->nip); 1105 die("Bad kernel stack pointer", regs, SIGABRT); 1106 } 1107 1108 void __init trap_init(void) 1109 { 1110 } 1111