1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright 2007-2010 Freescale Semiconductor, Inc. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * Modified by Cort Dougan (cort@cs.nmt.edu) 11 * and Paul Mackerras (paulus@samba.org) 12 */ 13 14 /* 15 * This file handles the architecture-dependent parts of hardware exceptions 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/sched.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/user.h> 26 #include <linux/interrupt.h> 27 #include <linux/init.h> 28 #include <linux/extable.h> 29 #include <linux/module.h> /* print_modules */ 30 #include <linux/prctl.h> 31 #include <linux/delay.h> 32 #include <linux/kprobes.h> 33 #include <linux/kexec.h> 34 #include <linux/backlight.h> 35 #include <linux/bug.h> 36 #include <linux/kdebug.h> 37 #include <linux/debugfs.h> 38 #include <linux/ratelimit.h> 39 #include <linux/context_tracking.h> 40 41 #include <asm/emulated_ops.h> 42 #include <asm/pgtable.h> 43 #include <asm/uaccess.h> 44 #include <asm/io.h> 45 #include <asm/machdep.h> 46 #include <asm/rtas.h> 47 #include <asm/pmc.h> 48 #include <asm/reg.h> 49 #ifdef CONFIG_PMAC_BACKLIGHT 50 #include <asm/backlight.h> 51 #endif 52 #ifdef CONFIG_PPC64 53 #include <asm/firmware.h> 54 #include <asm/processor.h> 55 #include <asm/tm.h> 56 #endif 57 #include <asm/kexec.h> 58 #include <asm/ppc-opcode.h> 59 #include <asm/rio.h> 60 #include <asm/fadump.h> 61 #include <asm/switch_to.h> 62 #include <asm/tm.h> 63 #include <asm/debug.h> 64 #include <asm/asm-prototypes.h> 65 #include <asm/hmi.h> 66 #include <sysdev/fsl_pci.h> 67 #include <asm/kprobes.h> 68 69 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) 70 int (*__debugger)(struct pt_regs *regs) __read_mostly; 71 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 72 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 73 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 74 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 75 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly; 76 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 77 78 EXPORT_SYMBOL(__debugger); 79 EXPORT_SYMBOL(__debugger_ipi); 80 EXPORT_SYMBOL(__debugger_bpt); 81 EXPORT_SYMBOL(__debugger_sstep); 82 EXPORT_SYMBOL(__debugger_iabr_match); 83 EXPORT_SYMBOL(__debugger_break_match); 84 EXPORT_SYMBOL(__debugger_fault_handler); 85 #endif 86 87 /* Transactional Memory trap debug */ 88 #ifdef TM_DEBUG_SW 89 #define TM_DEBUG(x...) printk(KERN_INFO x) 90 #else 91 #define TM_DEBUG(x...) do { } while(0) 92 #endif 93 94 /* 95 * Trap & Exception support 96 */ 97 98 #ifdef CONFIG_PMAC_BACKLIGHT 99 static void pmac_backlight_unblank(void) 100 { 101 mutex_lock(&pmac_backlight_mutex); 102 if (pmac_backlight) { 103 struct backlight_properties *props; 104 105 props = &pmac_backlight->props; 106 props->brightness = props->max_brightness; 107 props->power = FB_BLANK_UNBLANK; 108 backlight_update_status(pmac_backlight); 109 } 110 mutex_unlock(&pmac_backlight_mutex); 111 } 112 #else 113 static inline void pmac_backlight_unblank(void) { } 114 #endif 115 116 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 117 static int die_owner = -1; 118 static unsigned int die_nest_count; 119 static int die_counter; 120 121 static unsigned long oops_begin(struct pt_regs *regs) 122 { 123 int cpu; 124 unsigned long flags; 125 126 oops_enter(); 127 128 /* racy, but better than risking deadlock. */ 129 raw_local_irq_save(flags); 130 cpu = smp_processor_id(); 131 if (!arch_spin_trylock(&die_lock)) { 132 if (cpu == die_owner) 133 /* nested oops. should stop eventually */; 134 else 135 arch_spin_lock(&die_lock); 136 } 137 die_nest_count++; 138 die_owner = cpu; 139 console_verbose(); 140 bust_spinlocks(1); 141 if (machine_is(powermac)) 142 pmac_backlight_unblank(); 143 return flags; 144 } 145 NOKPROBE_SYMBOL(oops_begin); 146 147 static void oops_end(unsigned long flags, struct pt_regs *regs, 148 int signr) 149 { 150 bust_spinlocks(0); 151 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 152 die_nest_count--; 153 oops_exit(); 154 printk("\n"); 155 if (!die_nest_count) { 156 /* Nest count reaches zero, release the lock. */ 157 die_owner = -1; 158 arch_spin_unlock(&die_lock); 159 } 160 raw_local_irq_restore(flags); 161 162 crash_fadump(regs, "die oops"); 163 164 /* 165 * A system reset (0x100) is a request to dump, so we always send 166 * it through the crashdump code. 167 */ 168 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) { 169 crash_kexec(regs); 170 171 /* 172 * We aren't the primary crash CPU. We need to send it 173 * to a holding pattern to avoid it ending up in the panic 174 * code. 175 */ 176 crash_kexec_secondary(regs); 177 } 178 179 if (!signr) 180 return; 181 182 /* 183 * While our oops output is serialised by a spinlock, output 184 * from panic() called below can race and corrupt it. If we 185 * know we are going to panic, delay for 1 second so we have a 186 * chance to get clean backtraces from all CPUs that are oopsing. 187 */ 188 if (in_interrupt() || panic_on_oops || !current->pid || 189 is_global_init(current)) { 190 mdelay(MSEC_PER_SEC); 191 } 192 193 if (in_interrupt()) 194 panic("Fatal exception in interrupt"); 195 if (panic_on_oops) 196 panic("Fatal exception"); 197 do_exit(signr); 198 } 199 NOKPROBE_SYMBOL(oops_end); 200 201 static int __die(const char *str, struct pt_regs *regs, long err) 202 { 203 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 204 #ifdef CONFIG_PREEMPT 205 printk("PREEMPT "); 206 #endif 207 #ifdef CONFIG_SMP 208 printk("SMP NR_CPUS=%d ", NR_CPUS); 209 #endif 210 if (debug_pagealloc_enabled()) 211 printk("DEBUG_PAGEALLOC "); 212 #ifdef CONFIG_NUMA 213 printk("NUMA "); 214 #endif 215 printk("%s\n", ppc_md.name ? ppc_md.name : ""); 216 217 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) 218 return 1; 219 220 print_modules(); 221 show_regs(regs); 222 223 return 0; 224 } 225 NOKPROBE_SYMBOL(__die); 226 227 void die(const char *str, struct pt_regs *regs, long err) 228 { 229 unsigned long flags; 230 231 if (debugger(regs)) 232 return; 233 234 flags = oops_begin(regs); 235 if (__die(str, regs, err)) 236 err = 0; 237 oops_end(flags, regs, err); 238 } 239 240 void user_single_step_siginfo(struct task_struct *tsk, 241 struct pt_regs *regs, siginfo_t *info) 242 { 243 memset(info, 0, sizeof(*info)); 244 info->si_signo = SIGTRAP; 245 info->si_code = TRAP_TRACE; 246 info->si_addr = (void __user *)regs->nip; 247 } 248 249 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 250 { 251 siginfo_t info; 252 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 253 "at %08lx nip %08lx lr %08lx code %x\n"; 254 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 255 "at %016lx nip %016lx lr %016lx code %x\n"; 256 257 if (!user_mode(regs)) { 258 die("Exception in kernel mode", regs, signr); 259 return; 260 } 261 262 if (show_unhandled_signals && unhandled_signal(current, signr)) { 263 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 264 current->comm, current->pid, signr, 265 addr, regs->nip, regs->link, code); 266 } 267 268 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 269 local_irq_enable(); 270 271 current->thread.trap_nr = code; 272 memset(&info, 0, sizeof(info)); 273 info.si_signo = signr; 274 info.si_code = code; 275 info.si_addr = (void __user *) addr; 276 force_sig_info(signr, &info, current); 277 } 278 279 void system_reset_exception(struct pt_regs *regs) 280 { 281 /* See if any machine dependent calls */ 282 if (ppc_md.system_reset_exception) { 283 if (ppc_md.system_reset_exception(regs)) 284 return; 285 } 286 287 die("System Reset", regs, SIGABRT); 288 289 /* Must die if the interrupt is not recoverable */ 290 if (!(regs->msr & MSR_RI)) 291 panic("Unrecoverable System Reset"); 292 293 /* What should we do here? We could issue a shutdown or hard reset. */ 294 } 295 296 #ifdef CONFIG_PPC64 297 /* 298 * This function is called in real mode. Strictly no printk's please. 299 * 300 * regs->nip and regs->msr contains srr0 and ssr1. 301 */ 302 long machine_check_early(struct pt_regs *regs) 303 { 304 long handled = 0; 305 306 __this_cpu_inc(irq_stat.mce_exceptions); 307 308 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); 309 310 if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 311 handled = cur_cpu_spec->machine_check_early(regs); 312 return handled; 313 } 314 315 long hmi_exception_realmode(struct pt_regs *regs) 316 { 317 __this_cpu_inc(irq_stat.hmi_exceptions); 318 319 wait_for_subcore_guest_exit(); 320 321 if (ppc_md.hmi_exception_early) 322 ppc_md.hmi_exception_early(regs); 323 324 wait_for_tb_resync(); 325 326 return 0; 327 } 328 329 #endif 330 331 /* 332 * I/O accesses can cause machine checks on powermacs. 333 * Check if the NIP corresponds to the address of a sync 334 * instruction for which there is an entry in the exception 335 * table. 336 * Note that the 601 only takes a machine check on TEA 337 * (transfer error ack) signal assertion, and does not 338 * set any of the top 16 bits of SRR1. 339 * -- paulus. 340 */ 341 static inline int check_io_access(struct pt_regs *regs) 342 { 343 #ifdef CONFIG_PPC32 344 unsigned long msr = regs->msr; 345 const struct exception_table_entry *entry; 346 unsigned int *nip = (unsigned int *)regs->nip; 347 348 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 349 && (entry = search_exception_tables(regs->nip)) != NULL) { 350 /* 351 * Check that it's a sync instruction, or somewhere 352 * in the twi; isync; nop sequence that inb/inw/inl uses. 353 * As the address is in the exception table 354 * we should be able to read the instr there. 355 * For the debug message, we look at the preceding 356 * load or store. 357 */ 358 if (*nip == PPC_INST_NOP) 359 nip -= 2; 360 else if (*nip == PPC_INST_ISYNC) 361 --nip; 362 if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) { 363 unsigned int rb; 364 365 --nip; 366 rb = (*nip >> 11) & 0x1f; 367 printk(KERN_DEBUG "%s bad port %lx at %p\n", 368 (*nip & 0x100)? "OUT to": "IN from", 369 regs->gpr[rb] - _IO_BASE, nip); 370 regs->msr |= MSR_RI; 371 regs->nip = extable_fixup(entry); 372 return 1; 373 } 374 } 375 #endif /* CONFIG_PPC32 */ 376 return 0; 377 } 378 379 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 380 /* On 4xx, the reason for the machine check or program exception 381 is in the ESR. */ 382 #define get_reason(regs) ((regs)->dsisr) 383 #ifndef CONFIG_FSL_BOOKE 384 #define get_mc_reason(regs) ((regs)->dsisr) 385 #else 386 #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 387 #endif 388 #define REASON_FP ESR_FP 389 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 390 #define REASON_PRIVILEGED ESR_PPR 391 #define REASON_TRAP ESR_PTR 392 393 /* single-step stuff */ 394 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) 395 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) 396 397 #else 398 /* On non-4xx, the reason for the machine check or program 399 exception is in the MSR. */ 400 #define get_reason(regs) ((regs)->msr) 401 #define get_mc_reason(regs) ((regs)->msr) 402 #define REASON_TM 0x200000 403 #define REASON_FP 0x100000 404 #define REASON_ILLEGAL 0x80000 405 #define REASON_PRIVILEGED 0x40000 406 #define REASON_TRAP 0x20000 407 408 #define single_stepping(regs) ((regs)->msr & MSR_SE) 409 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 410 #endif 411 412 #if defined(CONFIG_4xx) 413 int machine_check_4xx(struct pt_regs *regs) 414 { 415 unsigned long reason = get_mc_reason(regs); 416 417 if (reason & ESR_IMCP) { 418 printk("Instruction"); 419 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 420 } else 421 printk("Data"); 422 printk(" machine check in kernel mode.\n"); 423 424 return 0; 425 } 426 427 int machine_check_440A(struct pt_regs *regs) 428 { 429 unsigned long reason = get_mc_reason(regs); 430 431 printk("Machine check in kernel mode.\n"); 432 if (reason & ESR_IMCP){ 433 printk("Instruction Synchronous Machine Check exception\n"); 434 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 435 } 436 else { 437 u32 mcsr = mfspr(SPRN_MCSR); 438 if (mcsr & MCSR_IB) 439 printk("Instruction Read PLB Error\n"); 440 if (mcsr & MCSR_DRB) 441 printk("Data Read PLB Error\n"); 442 if (mcsr & MCSR_DWB) 443 printk("Data Write PLB Error\n"); 444 if (mcsr & MCSR_TLBP) 445 printk("TLB Parity Error\n"); 446 if (mcsr & MCSR_ICP){ 447 flush_instruction_cache(); 448 printk("I-Cache Parity Error\n"); 449 } 450 if (mcsr & MCSR_DCSP) 451 printk("D-Cache Search Parity Error\n"); 452 if (mcsr & MCSR_DCFP) 453 printk("D-Cache Flush Parity Error\n"); 454 if (mcsr & MCSR_IMPE) 455 printk("Machine Check exception is imprecise\n"); 456 457 /* Clear MCSR */ 458 mtspr(SPRN_MCSR, mcsr); 459 } 460 return 0; 461 } 462 463 int machine_check_47x(struct pt_regs *regs) 464 { 465 unsigned long reason = get_mc_reason(regs); 466 u32 mcsr; 467 468 printk(KERN_ERR "Machine check in kernel mode.\n"); 469 if (reason & ESR_IMCP) { 470 printk(KERN_ERR 471 "Instruction Synchronous Machine Check exception\n"); 472 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 473 return 0; 474 } 475 mcsr = mfspr(SPRN_MCSR); 476 if (mcsr & MCSR_IB) 477 printk(KERN_ERR "Instruction Read PLB Error\n"); 478 if (mcsr & MCSR_DRB) 479 printk(KERN_ERR "Data Read PLB Error\n"); 480 if (mcsr & MCSR_DWB) 481 printk(KERN_ERR "Data Write PLB Error\n"); 482 if (mcsr & MCSR_TLBP) 483 printk(KERN_ERR "TLB Parity Error\n"); 484 if (mcsr & MCSR_ICP) { 485 flush_instruction_cache(); 486 printk(KERN_ERR "I-Cache Parity Error\n"); 487 } 488 if (mcsr & MCSR_DCSP) 489 printk(KERN_ERR "D-Cache Search Parity Error\n"); 490 if (mcsr & PPC47x_MCSR_GPR) 491 printk(KERN_ERR "GPR Parity Error\n"); 492 if (mcsr & PPC47x_MCSR_FPR) 493 printk(KERN_ERR "FPR Parity Error\n"); 494 if (mcsr & PPC47x_MCSR_IPR) 495 printk(KERN_ERR "Machine Check exception is imprecise\n"); 496 497 /* Clear MCSR */ 498 mtspr(SPRN_MCSR, mcsr); 499 500 return 0; 501 } 502 #elif defined(CONFIG_E500) 503 int machine_check_e500mc(struct pt_regs *regs) 504 { 505 unsigned long mcsr = mfspr(SPRN_MCSR); 506 unsigned long reason = mcsr; 507 int recoverable = 1; 508 509 if (reason & MCSR_LD) { 510 recoverable = fsl_rio_mcheck_exception(regs); 511 if (recoverable == 1) 512 goto silent_out; 513 } 514 515 printk("Machine check in kernel mode.\n"); 516 printk("Caused by (from MCSR=%lx): ", reason); 517 518 if (reason & MCSR_MCP) 519 printk("Machine Check Signal\n"); 520 521 if (reason & MCSR_ICPERR) { 522 printk("Instruction Cache Parity Error\n"); 523 524 /* 525 * This is recoverable by invalidating the i-cache. 526 */ 527 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 528 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 529 ; 530 531 /* 532 * This will generally be accompanied by an instruction 533 * fetch error report -- only treat MCSR_IF as fatal 534 * if it wasn't due to an L1 parity error. 535 */ 536 reason &= ~MCSR_IF; 537 } 538 539 if (reason & MCSR_DCPERR_MC) { 540 printk("Data Cache Parity Error\n"); 541 542 /* 543 * In write shadow mode we auto-recover from the error, but it 544 * may still get logged and cause a machine check. We should 545 * only treat the non-write shadow case as non-recoverable. 546 */ 547 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 548 recoverable = 0; 549 } 550 551 if (reason & MCSR_L2MMU_MHIT) { 552 printk("Hit on multiple TLB entries\n"); 553 recoverable = 0; 554 } 555 556 if (reason & MCSR_NMI) 557 printk("Non-maskable interrupt\n"); 558 559 if (reason & MCSR_IF) { 560 printk("Instruction Fetch Error Report\n"); 561 recoverable = 0; 562 } 563 564 if (reason & MCSR_LD) { 565 printk("Load Error Report\n"); 566 recoverable = 0; 567 } 568 569 if (reason & MCSR_ST) { 570 printk("Store Error Report\n"); 571 recoverable = 0; 572 } 573 574 if (reason & MCSR_LDG) { 575 printk("Guarded Load Error Report\n"); 576 recoverable = 0; 577 } 578 579 if (reason & MCSR_TLBSYNC) 580 printk("Simultaneous tlbsync operations\n"); 581 582 if (reason & MCSR_BSL2_ERR) { 583 printk("Level 2 Cache Error\n"); 584 recoverable = 0; 585 } 586 587 if (reason & MCSR_MAV) { 588 u64 addr; 589 590 addr = mfspr(SPRN_MCAR); 591 addr |= (u64)mfspr(SPRN_MCARU) << 32; 592 593 printk("Machine Check %s Address: %#llx\n", 594 reason & MCSR_MEA ? "Effective" : "Physical", addr); 595 } 596 597 silent_out: 598 mtspr(SPRN_MCSR, mcsr); 599 return mfspr(SPRN_MCSR) == 0 && recoverable; 600 } 601 602 int machine_check_e500(struct pt_regs *regs) 603 { 604 unsigned long reason = get_mc_reason(regs); 605 606 if (reason & MCSR_BUS_RBERR) { 607 if (fsl_rio_mcheck_exception(regs)) 608 return 1; 609 if (fsl_pci_mcheck_exception(regs)) 610 return 1; 611 } 612 613 printk("Machine check in kernel mode.\n"); 614 printk("Caused by (from MCSR=%lx): ", reason); 615 616 if (reason & MCSR_MCP) 617 printk("Machine Check Signal\n"); 618 if (reason & MCSR_ICPERR) 619 printk("Instruction Cache Parity Error\n"); 620 if (reason & MCSR_DCP_PERR) 621 printk("Data Cache Push Parity Error\n"); 622 if (reason & MCSR_DCPERR) 623 printk("Data Cache Parity Error\n"); 624 if (reason & MCSR_BUS_IAERR) 625 printk("Bus - Instruction Address Error\n"); 626 if (reason & MCSR_BUS_RAERR) 627 printk("Bus - Read Address Error\n"); 628 if (reason & MCSR_BUS_WAERR) 629 printk("Bus - Write Address Error\n"); 630 if (reason & MCSR_BUS_IBERR) 631 printk("Bus - Instruction Data Error\n"); 632 if (reason & MCSR_BUS_RBERR) 633 printk("Bus - Read Data Bus Error\n"); 634 if (reason & MCSR_BUS_WBERR) 635 printk("Bus - Write Data Bus Error\n"); 636 if (reason & MCSR_BUS_IPERR) 637 printk("Bus - Instruction Parity Error\n"); 638 if (reason & MCSR_BUS_RPERR) 639 printk("Bus - Read Parity Error\n"); 640 641 return 0; 642 } 643 644 int machine_check_generic(struct pt_regs *regs) 645 { 646 return 0; 647 } 648 #elif defined(CONFIG_E200) 649 int machine_check_e200(struct pt_regs *regs) 650 { 651 unsigned long reason = get_mc_reason(regs); 652 653 printk("Machine check in kernel mode.\n"); 654 printk("Caused by (from MCSR=%lx): ", reason); 655 656 if (reason & MCSR_MCP) 657 printk("Machine Check Signal\n"); 658 if (reason & MCSR_CP_PERR) 659 printk("Cache Push Parity Error\n"); 660 if (reason & MCSR_CPERR) 661 printk("Cache Parity Error\n"); 662 if (reason & MCSR_EXCP_ERR) 663 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 664 if (reason & MCSR_BUS_IRERR) 665 printk("Bus - Read Bus Error on instruction fetch\n"); 666 if (reason & MCSR_BUS_DRERR) 667 printk("Bus - Read Bus Error on data load\n"); 668 if (reason & MCSR_BUS_WRERR) 669 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 670 671 return 0; 672 } 673 #elif defined(CONFIG_PPC_8xx) 674 int machine_check_8xx(struct pt_regs *regs) 675 { 676 unsigned long reason = get_mc_reason(regs); 677 678 pr_err("Machine check in kernel mode.\n"); 679 pr_err("Caused by (from SRR1=%lx): ", reason); 680 if (reason & 0x40000000) 681 pr_err("Fetch error at address %lx\n", regs->nip); 682 else 683 pr_err("Data access error at address %lx\n", regs->dar); 684 685 #ifdef CONFIG_PCI 686 /* the qspan pci read routines can cause machine checks -- Cort 687 * 688 * yuck !!! that totally needs to go away ! There are better ways 689 * to deal with that than having a wart in the mcheck handler. 690 * -- BenH 691 */ 692 bad_page_fault(regs, regs->dar, SIGBUS); 693 return 1; 694 #else 695 return 0; 696 #endif 697 } 698 #else 699 int machine_check_generic(struct pt_regs *regs) 700 { 701 unsigned long reason = get_mc_reason(regs); 702 703 printk("Machine check in kernel mode.\n"); 704 printk("Caused by (from SRR1=%lx): ", reason); 705 switch (reason & 0x601F0000) { 706 case 0x80000: 707 printk("Machine check signal\n"); 708 break; 709 case 0: /* for 601 */ 710 case 0x40000: 711 case 0x140000: /* 7450 MSS error and TEA */ 712 printk("Transfer error ack signal\n"); 713 break; 714 case 0x20000: 715 printk("Data parity error signal\n"); 716 break; 717 case 0x10000: 718 printk("Address parity error signal\n"); 719 break; 720 case 0x20000000: 721 printk("L1 Data Cache error\n"); 722 break; 723 case 0x40000000: 724 printk("L1 Instruction Cache error\n"); 725 break; 726 case 0x00100000: 727 printk("L2 data cache parity error\n"); 728 break; 729 default: 730 printk("Unknown values in msr\n"); 731 } 732 return 0; 733 } 734 #endif /* everything else */ 735 736 void machine_check_exception(struct pt_regs *regs) 737 { 738 enum ctx_state prev_state = exception_enter(); 739 int recover = 0; 740 741 __this_cpu_inc(irq_stat.mce_exceptions); 742 743 /* See if any machine dependent calls. In theory, we would want 744 * to call the CPU first, and call the ppc_md. one if the CPU 745 * one returns a positive number. However there is existing code 746 * that assumes the board gets a first chance, so let's keep it 747 * that way for now and fix things later. --BenH. 748 */ 749 if (ppc_md.machine_check_exception) 750 recover = ppc_md.machine_check_exception(regs); 751 else if (cur_cpu_spec->machine_check) 752 recover = cur_cpu_spec->machine_check(regs); 753 754 if (recover > 0) 755 goto bail; 756 757 if (debugger_fault_handler(regs)) 758 goto bail; 759 760 if (check_io_access(regs)) 761 goto bail; 762 763 die("Machine check", regs, SIGBUS); 764 765 /* Must die if the interrupt is not recoverable */ 766 if (!(regs->msr & MSR_RI)) 767 panic("Unrecoverable Machine check"); 768 769 bail: 770 exception_exit(prev_state); 771 } 772 773 void SMIException(struct pt_regs *regs) 774 { 775 die("System Management Interrupt", regs, SIGABRT); 776 } 777 778 void handle_hmi_exception(struct pt_regs *regs) 779 { 780 struct pt_regs *old_regs; 781 782 old_regs = set_irq_regs(regs); 783 irq_enter(); 784 785 if (ppc_md.handle_hmi_exception) 786 ppc_md.handle_hmi_exception(regs); 787 788 irq_exit(); 789 set_irq_regs(old_regs); 790 } 791 792 void unknown_exception(struct pt_regs *regs) 793 { 794 enum ctx_state prev_state = exception_enter(); 795 796 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 797 regs->nip, regs->msr, regs->trap); 798 799 _exception(SIGTRAP, regs, 0, 0); 800 801 exception_exit(prev_state); 802 } 803 804 void instruction_breakpoint_exception(struct pt_regs *regs) 805 { 806 enum ctx_state prev_state = exception_enter(); 807 808 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 809 5, SIGTRAP) == NOTIFY_STOP) 810 goto bail; 811 if (debugger_iabr_match(regs)) 812 goto bail; 813 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 814 815 bail: 816 exception_exit(prev_state); 817 } 818 819 void RunModeException(struct pt_regs *regs) 820 { 821 _exception(SIGTRAP, regs, 0, 0); 822 } 823 824 void single_step_exception(struct pt_regs *regs) 825 { 826 enum ctx_state prev_state = exception_enter(); 827 828 clear_single_step(regs); 829 830 if (kprobe_post_handler(regs)) 831 return; 832 833 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 834 5, SIGTRAP) == NOTIFY_STOP) 835 goto bail; 836 if (debugger_sstep(regs)) 837 goto bail; 838 839 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 840 841 bail: 842 exception_exit(prev_state); 843 } 844 NOKPROBE_SYMBOL(single_step_exception); 845 846 /* 847 * After we have successfully emulated an instruction, we have to 848 * check if the instruction was being single-stepped, and if so, 849 * pretend we got a single-step exception. This was pointed out 850 * by Kumar Gala. -- paulus 851 */ 852 static void emulate_single_step(struct pt_regs *regs) 853 { 854 if (single_stepping(regs)) 855 single_step_exception(regs); 856 } 857 858 static inline int __parse_fpscr(unsigned long fpscr) 859 { 860 int ret = 0; 861 862 /* Invalid operation */ 863 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 864 ret = FPE_FLTINV; 865 866 /* Overflow */ 867 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 868 ret = FPE_FLTOVF; 869 870 /* Underflow */ 871 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 872 ret = FPE_FLTUND; 873 874 /* Divide by zero */ 875 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 876 ret = FPE_FLTDIV; 877 878 /* Inexact result */ 879 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 880 ret = FPE_FLTRES; 881 882 return ret; 883 } 884 885 static void parse_fpe(struct pt_regs *regs) 886 { 887 int code = 0; 888 889 flush_fp_to_thread(current); 890 891 code = __parse_fpscr(current->thread.fp_state.fpscr); 892 893 _exception(SIGFPE, regs, code, regs->nip); 894 } 895 896 /* 897 * Illegal instruction emulation support. Originally written to 898 * provide the PVR to user applications using the mfspr rd, PVR. 899 * Return non-zero if we can't emulate, or -EFAULT if the associated 900 * memory access caused an access fault. Return zero on success. 901 * 902 * There are a couple of ways to do this, either "decode" the instruction 903 * or directly match lots of bits. In this case, matching lots of 904 * bits is faster and easier. 905 * 906 */ 907 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 908 { 909 u8 rT = (instword >> 21) & 0x1f; 910 u8 rA = (instword >> 16) & 0x1f; 911 u8 NB_RB = (instword >> 11) & 0x1f; 912 u32 num_bytes; 913 unsigned long EA; 914 int pos = 0; 915 916 /* Early out if we are an invalid form of lswx */ 917 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 918 if ((rT == rA) || (rT == NB_RB)) 919 return -EINVAL; 920 921 EA = (rA == 0) ? 0 : regs->gpr[rA]; 922 923 switch (instword & PPC_INST_STRING_MASK) { 924 case PPC_INST_LSWX: 925 case PPC_INST_STSWX: 926 EA += NB_RB; 927 num_bytes = regs->xer & 0x7f; 928 break; 929 case PPC_INST_LSWI: 930 case PPC_INST_STSWI: 931 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 932 break; 933 default: 934 return -EINVAL; 935 } 936 937 while (num_bytes != 0) 938 { 939 u8 val; 940 u32 shift = 8 * (3 - (pos & 0x3)); 941 942 /* if process is 32-bit, clear upper 32 bits of EA */ 943 if ((regs->msr & MSR_64BIT) == 0) 944 EA &= 0xFFFFFFFF; 945 946 switch ((instword & PPC_INST_STRING_MASK)) { 947 case PPC_INST_LSWX: 948 case PPC_INST_LSWI: 949 if (get_user(val, (u8 __user *)EA)) 950 return -EFAULT; 951 /* first time updating this reg, 952 * zero it out */ 953 if (pos == 0) 954 regs->gpr[rT] = 0; 955 regs->gpr[rT] |= val << shift; 956 break; 957 case PPC_INST_STSWI: 958 case PPC_INST_STSWX: 959 val = regs->gpr[rT] >> shift; 960 if (put_user(val, (u8 __user *)EA)) 961 return -EFAULT; 962 break; 963 } 964 /* move EA to next address */ 965 EA += 1; 966 num_bytes--; 967 968 /* manage our position within the register */ 969 if (++pos == 4) { 970 pos = 0; 971 if (++rT == 32) 972 rT = 0; 973 } 974 } 975 976 return 0; 977 } 978 979 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 980 { 981 u32 ra,rs; 982 unsigned long tmp; 983 984 ra = (instword >> 16) & 0x1f; 985 rs = (instword >> 21) & 0x1f; 986 987 tmp = regs->gpr[rs]; 988 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 989 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 990 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 991 regs->gpr[ra] = tmp; 992 993 return 0; 994 } 995 996 static int emulate_isel(struct pt_regs *regs, u32 instword) 997 { 998 u8 rT = (instword >> 21) & 0x1f; 999 u8 rA = (instword >> 16) & 0x1f; 1000 u8 rB = (instword >> 11) & 0x1f; 1001 u8 BC = (instword >> 6) & 0x1f; 1002 u8 bit; 1003 unsigned long tmp; 1004 1005 tmp = (rA == 0) ? 0 : regs->gpr[rA]; 1006 bit = (regs->ccr >> (31 - BC)) & 0x1; 1007 1008 regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 1009 1010 return 0; 1011 } 1012 1013 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1014 static inline bool tm_abort_check(struct pt_regs *regs, int cause) 1015 { 1016 /* If we're emulating a load/store in an active transaction, we cannot 1017 * emulate it as the kernel operates in transaction suspended context. 1018 * We need to abort the transaction. This creates a persistent TM 1019 * abort so tell the user what caused it with a new code. 1020 */ 1021 if (MSR_TM_TRANSACTIONAL(regs->msr)) { 1022 tm_enable(); 1023 tm_abort(cause); 1024 return true; 1025 } 1026 return false; 1027 } 1028 #else 1029 static inline bool tm_abort_check(struct pt_regs *regs, int reason) 1030 { 1031 return false; 1032 } 1033 #endif 1034 1035 static int emulate_instruction(struct pt_regs *regs) 1036 { 1037 u32 instword; 1038 u32 rd; 1039 1040 if (!user_mode(regs)) 1041 return -EINVAL; 1042 CHECK_FULL_REGS(regs); 1043 1044 if (get_user(instword, (u32 __user *)(regs->nip))) 1045 return -EFAULT; 1046 1047 /* Emulate the mfspr rD, PVR. */ 1048 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 1049 PPC_WARN_EMULATED(mfpvr, regs); 1050 rd = (instword >> 21) & 0x1f; 1051 regs->gpr[rd] = mfspr(SPRN_PVR); 1052 return 0; 1053 } 1054 1055 /* Emulating the dcba insn is just a no-op. */ 1056 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 1057 PPC_WARN_EMULATED(dcba, regs); 1058 return 0; 1059 } 1060 1061 /* Emulate the mcrxr insn. */ 1062 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 1063 int shift = (instword >> 21) & 0x1c; 1064 unsigned long msk = 0xf0000000UL >> shift; 1065 1066 PPC_WARN_EMULATED(mcrxr, regs); 1067 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 1068 regs->xer &= ~0xf0000000UL; 1069 return 0; 1070 } 1071 1072 /* Emulate load/store string insn. */ 1073 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 1074 if (tm_abort_check(regs, 1075 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) 1076 return -EINVAL; 1077 PPC_WARN_EMULATED(string, regs); 1078 return emulate_string_inst(regs, instword); 1079 } 1080 1081 /* Emulate the popcntb (Population Count Bytes) instruction. */ 1082 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 1083 PPC_WARN_EMULATED(popcntb, regs); 1084 return emulate_popcntb_inst(regs, instword); 1085 } 1086 1087 /* Emulate isel (Integer Select) instruction */ 1088 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 1089 PPC_WARN_EMULATED(isel, regs); 1090 return emulate_isel(regs, instword); 1091 } 1092 1093 /* Emulate sync instruction variants */ 1094 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) { 1095 PPC_WARN_EMULATED(sync, regs); 1096 asm volatile("sync"); 1097 return 0; 1098 } 1099 1100 #ifdef CONFIG_PPC64 1101 /* Emulate the mfspr rD, DSCR. */ 1102 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == 1103 PPC_INST_MFSPR_DSCR_USER) || 1104 ((instword & PPC_INST_MFSPR_DSCR_MASK) == 1105 PPC_INST_MFSPR_DSCR)) && 1106 cpu_has_feature(CPU_FTR_DSCR)) { 1107 PPC_WARN_EMULATED(mfdscr, regs); 1108 rd = (instword >> 21) & 0x1f; 1109 regs->gpr[rd] = mfspr(SPRN_DSCR); 1110 return 0; 1111 } 1112 /* Emulate the mtspr DSCR, rD. */ 1113 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == 1114 PPC_INST_MTSPR_DSCR_USER) || 1115 ((instword & PPC_INST_MTSPR_DSCR_MASK) == 1116 PPC_INST_MTSPR_DSCR)) && 1117 cpu_has_feature(CPU_FTR_DSCR)) { 1118 PPC_WARN_EMULATED(mtdscr, regs); 1119 rd = (instword >> 21) & 0x1f; 1120 current->thread.dscr = regs->gpr[rd]; 1121 current->thread.dscr_inherit = 1; 1122 mtspr(SPRN_DSCR, current->thread.dscr); 1123 return 0; 1124 } 1125 #endif 1126 1127 return -EINVAL; 1128 } 1129 1130 int is_valid_bugaddr(unsigned long addr) 1131 { 1132 return is_kernel_addr(addr); 1133 } 1134 1135 #ifdef CONFIG_MATH_EMULATION 1136 static int emulate_math(struct pt_regs *regs) 1137 { 1138 int ret; 1139 extern int do_mathemu(struct pt_regs *regs); 1140 1141 ret = do_mathemu(regs); 1142 if (ret >= 0) 1143 PPC_WARN_EMULATED(math, regs); 1144 1145 switch (ret) { 1146 case 0: 1147 emulate_single_step(regs); 1148 return 0; 1149 case 1: { 1150 int code = 0; 1151 code = __parse_fpscr(current->thread.fp_state.fpscr); 1152 _exception(SIGFPE, regs, code, regs->nip); 1153 return 0; 1154 } 1155 case -EFAULT: 1156 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1157 return 0; 1158 } 1159 1160 return -1; 1161 } 1162 #else 1163 static inline int emulate_math(struct pt_regs *regs) { return -1; } 1164 #endif 1165 1166 void program_check_exception(struct pt_regs *regs) 1167 { 1168 enum ctx_state prev_state = exception_enter(); 1169 unsigned int reason = get_reason(regs); 1170 1171 /* We can now get here via a FP Unavailable exception if the core 1172 * has no FPU, in that case the reason flags will be 0 */ 1173 1174 if (reason & REASON_FP) { 1175 /* IEEE FP exception */ 1176 parse_fpe(regs); 1177 goto bail; 1178 } 1179 if (reason & REASON_TRAP) { 1180 unsigned long bugaddr; 1181 /* Debugger is first in line to stop recursive faults in 1182 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1183 if (debugger_bpt(regs)) 1184 goto bail; 1185 1186 if (kprobe_handler(regs)) 1187 goto bail; 1188 1189 /* trap exception */ 1190 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1191 == NOTIFY_STOP) 1192 goto bail; 1193 1194 bugaddr = regs->nip; 1195 /* 1196 * Fixup bugaddr for BUG_ON() in real mode 1197 */ 1198 if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR)) 1199 bugaddr += PAGE_OFFSET; 1200 1201 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1202 report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) { 1203 regs->nip += 4; 1204 goto bail; 1205 } 1206 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1207 goto bail; 1208 } 1209 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1210 if (reason & REASON_TM) { 1211 /* This is a TM "Bad Thing Exception" program check. 1212 * This occurs when: 1213 * - An rfid/hrfid/mtmsrd attempts to cause an illegal 1214 * transition in TM states. 1215 * - A trechkpt is attempted when transactional. 1216 * - A treclaim is attempted when non transactional. 1217 * - A tend is illegally attempted. 1218 * - writing a TM SPR when transactional. 1219 */ 1220 if (!user_mode(regs) && 1221 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1222 regs->nip += 4; 1223 goto bail; 1224 } 1225 /* If usermode caused this, it's done something illegal and 1226 * gets a SIGILL slap on the wrist. We call it an illegal 1227 * operand to distinguish from the instruction just being bad 1228 * (e.g. executing a 'tend' on a CPU without TM!); it's an 1229 * illegal /placement/ of a valid instruction. 1230 */ 1231 if (user_mode(regs)) { 1232 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1233 goto bail; 1234 } else { 1235 printk(KERN_EMERG "Unexpected TM Bad Thing exception " 1236 "at %lx (msr 0x%x)\n", regs->nip, reason); 1237 die("Unrecoverable exception", regs, SIGABRT); 1238 } 1239 } 1240 #endif 1241 1242 /* 1243 * If we took the program check in the kernel skip down to sending a 1244 * SIGILL. The subsequent cases all relate to emulating instructions 1245 * which we should only do for userspace. We also do not want to enable 1246 * interrupts for kernel faults because that might lead to further 1247 * faults, and loose the context of the original exception. 1248 */ 1249 if (!user_mode(regs)) 1250 goto sigill; 1251 1252 /* We restore the interrupt state now */ 1253 if (!arch_irq_disabled_regs(regs)) 1254 local_irq_enable(); 1255 1256 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 1257 * but there seems to be a hardware bug on the 405GP (RevD) 1258 * that means ESR is sometimes set incorrectly - either to 1259 * ESR_DST (!?) or 0. In the process of chasing this with the 1260 * hardware people - not sure if it can happen on any illegal 1261 * instruction or only on FP instructions, whether there is a 1262 * pattern to occurrences etc. -dgibson 31/Mar/2003 1263 */ 1264 if (!emulate_math(regs)) 1265 goto bail; 1266 1267 /* Try to emulate it if we should. */ 1268 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 1269 switch (emulate_instruction(regs)) { 1270 case 0: 1271 regs->nip += 4; 1272 emulate_single_step(regs); 1273 goto bail; 1274 case -EFAULT: 1275 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1276 goto bail; 1277 } 1278 } 1279 1280 sigill: 1281 if (reason & REASON_PRIVILEGED) 1282 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1283 else 1284 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1285 1286 bail: 1287 exception_exit(prev_state); 1288 } 1289 NOKPROBE_SYMBOL(program_check_exception); 1290 1291 /* 1292 * This occurs when running in hypervisor mode on POWER6 or later 1293 * and an illegal instruction is encountered. 1294 */ 1295 void emulation_assist_interrupt(struct pt_regs *regs) 1296 { 1297 regs->msr |= REASON_ILLEGAL; 1298 program_check_exception(regs); 1299 } 1300 NOKPROBE_SYMBOL(emulation_assist_interrupt); 1301 1302 void alignment_exception(struct pt_regs *regs) 1303 { 1304 enum ctx_state prev_state = exception_enter(); 1305 int sig, code, fixed = 0; 1306 1307 /* We restore the interrupt state now */ 1308 if (!arch_irq_disabled_regs(regs)) 1309 local_irq_enable(); 1310 1311 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) 1312 goto bail; 1313 1314 /* we don't implement logging of alignment exceptions */ 1315 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1316 fixed = fix_alignment(regs); 1317 1318 if (fixed == 1) { 1319 regs->nip += 4; /* skip over emulated instruction */ 1320 emulate_single_step(regs); 1321 goto bail; 1322 } 1323 1324 /* Operand address was bad */ 1325 if (fixed == -EFAULT) { 1326 sig = SIGSEGV; 1327 code = SEGV_ACCERR; 1328 } else { 1329 sig = SIGBUS; 1330 code = BUS_ADRALN; 1331 } 1332 if (user_mode(regs)) 1333 _exception(sig, regs, code, regs->dar); 1334 else 1335 bad_page_fault(regs, regs->dar, sig); 1336 1337 bail: 1338 exception_exit(prev_state); 1339 } 1340 1341 void slb_miss_bad_addr(struct pt_regs *regs) 1342 { 1343 enum ctx_state prev_state = exception_enter(); 1344 1345 if (user_mode(regs)) 1346 _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar); 1347 else 1348 bad_page_fault(regs, regs->dar, SIGSEGV); 1349 1350 exception_exit(prev_state); 1351 } 1352 1353 void StackOverflow(struct pt_regs *regs) 1354 { 1355 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 1356 current, regs->gpr[1]); 1357 debugger(regs); 1358 show_regs(regs); 1359 panic("kernel stack overflow"); 1360 } 1361 1362 void nonrecoverable_exception(struct pt_regs *regs) 1363 { 1364 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 1365 regs->nip, regs->msr); 1366 debugger(regs); 1367 die("nonrecoverable exception", regs, SIGKILL); 1368 } 1369 1370 void kernel_fp_unavailable_exception(struct pt_regs *regs) 1371 { 1372 enum ctx_state prev_state = exception_enter(); 1373 1374 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1375 "%lx at %lx\n", regs->trap, regs->nip); 1376 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1377 1378 exception_exit(prev_state); 1379 } 1380 1381 void altivec_unavailable_exception(struct pt_regs *regs) 1382 { 1383 enum ctx_state prev_state = exception_enter(); 1384 1385 if (user_mode(regs)) { 1386 /* A user program has executed an altivec instruction, 1387 but this kernel doesn't support altivec. */ 1388 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1389 goto bail; 1390 } 1391 1392 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1393 "%lx at %lx\n", regs->trap, regs->nip); 1394 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1395 1396 bail: 1397 exception_exit(prev_state); 1398 } 1399 1400 void vsx_unavailable_exception(struct pt_regs *regs) 1401 { 1402 if (user_mode(regs)) { 1403 /* A user program has executed an vsx instruction, 1404 but this kernel doesn't support vsx. */ 1405 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1406 return; 1407 } 1408 1409 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1410 "%lx at %lx\n", regs->trap, regs->nip); 1411 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1412 } 1413 1414 #ifdef CONFIG_PPC64 1415 static void tm_unavailable(struct pt_regs *regs) 1416 { 1417 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1418 if (user_mode(regs)) { 1419 current->thread.load_tm++; 1420 regs->msr |= MSR_TM; 1421 tm_enable(); 1422 tm_restore_sprs(¤t->thread); 1423 return; 1424 } 1425 #endif 1426 pr_emerg("Unrecoverable TM Unavailable Exception " 1427 "%lx at %lx\n", regs->trap, regs->nip); 1428 die("Unrecoverable TM Unavailable Exception", regs, SIGABRT); 1429 } 1430 1431 void facility_unavailable_exception(struct pt_regs *regs) 1432 { 1433 static char *facility_strings[] = { 1434 [FSCR_FP_LG] = "FPU", 1435 [FSCR_VECVSX_LG] = "VMX/VSX", 1436 [FSCR_DSCR_LG] = "DSCR", 1437 [FSCR_PM_LG] = "PMU SPRs", 1438 [FSCR_BHRB_LG] = "BHRB", 1439 [FSCR_TM_LG] = "TM", 1440 [FSCR_EBB_LG] = "EBB", 1441 [FSCR_TAR_LG] = "TAR", 1442 }; 1443 char *facility = "unknown"; 1444 u64 value; 1445 u32 instword, rd; 1446 u8 status; 1447 bool hv; 1448 1449 hv = (regs->trap == 0xf80); 1450 if (hv) 1451 value = mfspr(SPRN_HFSCR); 1452 else 1453 value = mfspr(SPRN_FSCR); 1454 1455 status = value >> 56; 1456 if (status == FSCR_DSCR_LG) { 1457 /* 1458 * User is accessing the DSCR register using the problem 1459 * state only SPR number (0x03) either through a mfspr or 1460 * a mtspr instruction. If it is a write attempt through 1461 * a mtspr, then we set the inherit bit. This also allows 1462 * the user to write or read the register directly in the 1463 * future by setting via the FSCR DSCR bit. But in case it 1464 * is a read DSCR attempt through a mfspr instruction, we 1465 * just emulate the instruction instead. This code path will 1466 * always emulate all the mfspr instructions till the user 1467 * has attempted at least one mtspr instruction. This way it 1468 * preserves the same behaviour when the user is accessing 1469 * the DSCR through privilege level only SPR number (0x11) 1470 * which is emulated through illegal instruction exception. 1471 * We always leave HFSCR DSCR set. 1472 */ 1473 if (get_user(instword, (u32 __user *)(regs->nip))) { 1474 pr_err("Failed to fetch the user instruction\n"); 1475 return; 1476 } 1477 1478 /* Write into DSCR (mtspr 0x03, RS) */ 1479 if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK) 1480 == PPC_INST_MTSPR_DSCR_USER) { 1481 rd = (instword >> 21) & 0x1f; 1482 current->thread.dscr = regs->gpr[rd]; 1483 current->thread.dscr_inherit = 1; 1484 current->thread.fscr |= FSCR_DSCR; 1485 mtspr(SPRN_FSCR, current->thread.fscr); 1486 } 1487 1488 /* Read from DSCR (mfspr RT, 0x03) */ 1489 if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK) 1490 == PPC_INST_MFSPR_DSCR_USER) { 1491 if (emulate_instruction(regs)) { 1492 pr_err("DSCR based mfspr emulation failed\n"); 1493 return; 1494 } 1495 regs->nip += 4; 1496 emulate_single_step(regs); 1497 } 1498 return; 1499 } 1500 1501 if (status == FSCR_TM_LG) { 1502 /* 1503 * If we're here then the hardware is TM aware because it 1504 * generated an exception with FSRM_TM set. 1505 * 1506 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware 1507 * told us not to do TM, or the kernel is not built with TM 1508 * support. 1509 * 1510 * If both of those things are true, then userspace can spam the 1511 * console by triggering the printk() below just by continually 1512 * doing tbegin (or any TM instruction). So in that case just 1513 * send the process a SIGILL immediately. 1514 */ 1515 if (!cpu_has_feature(CPU_FTR_TM)) 1516 goto out; 1517 1518 tm_unavailable(regs); 1519 return; 1520 } 1521 1522 if ((hv || status >= 2) && 1523 (status < ARRAY_SIZE(facility_strings)) && 1524 facility_strings[status]) 1525 facility = facility_strings[status]; 1526 1527 /* We restore the interrupt state now */ 1528 if (!arch_irq_disabled_regs(regs)) 1529 local_irq_enable(); 1530 1531 pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n", 1532 hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr); 1533 1534 out: 1535 if (user_mode(regs)) { 1536 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1537 return; 1538 } 1539 1540 die("Unexpected facility unavailable exception", regs, SIGABRT); 1541 } 1542 #endif 1543 1544 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1545 1546 void fp_unavailable_tm(struct pt_regs *regs) 1547 { 1548 /* Note: This does not handle any kind of FP laziness. */ 1549 1550 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1551 regs->nip, regs->msr); 1552 1553 /* We can only have got here if the task started using FP after 1554 * beginning the transaction. So, the transactional regs are just a 1555 * copy of the checkpointed ones. But, we still need to recheckpoint 1556 * as we're enabling FP for the process; it will return, abort the 1557 * transaction, and probably retry but now with FP enabled. So the 1558 * checkpointed FP registers need to be loaded. 1559 */ 1560 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1561 /* Reclaim didn't save out any FPRs to transact_fprs. */ 1562 1563 /* Enable FP for the task: */ 1564 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 1565 1566 /* This loads and recheckpoints the FP registers from 1567 * thread.fpr[]. They will remain in registers after the 1568 * checkpoint so we don't need to reload them after. 1569 * If VMX is in use, the VRs now hold checkpointed values, 1570 * so we don't want to load the VRs from the thread_struct. 1571 */ 1572 tm_recheckpoint(¤t->thread, MSR_FP); 1573 1574 /* If VMX is in use, get the transactional values back */ 1575 if (regs->msr & MSR_VEC) { 1576 msr_check_and_set(MSR_VEC); 1577 load_vr_state(¤t->thread.vr_state); 1578 /* At this point all the VSX state is loaded, so enable it */ 1579 regs->msr |= MSR_VSX; 1580 } 1581 } 1582 1583 void altivec_unavailable_tm(struct pt_regs *regs) 1584 { 1585 /* See the comments in fp_unavailable_tm(). This function operates 1586 * the same way. 1587 */ 1588 1589 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1590 "MSR=%lx\n", 1591 regs->nip, regs->msr); 1592 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1593 regs->msr |= MSR_VEC; 1594 tm_recheckpoint(¤t->thread, MSR_VEC); 1595 current->thread.used_vr = 1; 1596 1597 if (regs->msr & MSR_FP) { 1598 msr_check_and_set(MSR_FP); 1599 load_fp_state(¤t->thread.fp_state); 1600 regs->msr |= MSR_VSX; 1601 } 1602 } 1603 1604 void vsx_unavailable_tm(struct pt_regs *regs) 1605 { 1606 unsigned long orig_msr = regs->msr; 1607 1608 /* See the comments in fp_unavailable_tm(). This works similarly, 1609 * though we're loading both FP and VEC registers in here. 1610 * 1611 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC 1612 * regs. Either way, set MSR_VSX. 1613 */ 1614 1615 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx," 1616 "MSR=%lx\n", 1617 regs->nip, regs->msr); 1618 1619 current->thread.used_vsr = 1; 1620 1621 /* If FP and VMX are already loaded, we have all the state we need */ 1622 if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) { 1623 regs->msr |= MSR_VSX; 1624 return; 1625 } 1626 1627 /* This reclaims FP and/or VR regs if they're already enabled */ 1628 tm_reclaim_current(TM_CAUSE_FAC_UNAV); 1629 1630 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | 1631 MSR_VSX; 1632 1633 /* This loads & recheckpoints FP and VRs; but we have 1634 * to be sure not to overwrite previously-valid state. 1635 */ 1636 tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr); 1637 1638 msr_check_and_set(orig_msr & (MSR_FP | MSR_VEC)); 1639 1640 if (orig_msr & MSR_FP) 1641 load_fp_state(¤t->thread.fp_state); 1642 if (orig_msr & MSR_VEC) 1643 load_vr_state(¤t->thread.vr_state); 1644 } 1645 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1646 1647 void performance_monitor_exception(struct pt_regs *regs) 1648 { 1649 __this_cpu_inc(irq_stat.pmu_irqs); 1650 1651 perf_irq(regs); 1652 } 1653 1654 #ifdef CONFIG_8xx 1655 void SoftwareEmulation(struct pt_regs *regs) 1656 { 1657 CHECK_FULL_REGS(regs); 1658 1659 if (!user_mode(regs)) { 1660 debugger(regs); 1661 die("Kernel Mode Unimplemented Instruction or SW FPU Emulation", 1662 regs, SIGFPE); 1663 } 1664 1665 if (!emulate_math(regs)) 1666 return; 1667 1668 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1669 } 1670 #endif /* CONFIG_8xx */ 1671 1672 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1673 static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 1674 { 1675 int changed = 0; 1676 /* 1677 * Determine the cause of the debug event, clear the 1678 * event flags and send a trap to the handler. Torez 1679 */ 1680 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 1681 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1682 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1683 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; 1684 #endif 1685 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 1686 5); 1687 changed |= 0x01; 1688 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 1689 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1690 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, 1691 6); 1692 changed |= 0x01; 1693 } else if (debug_status & DBSR_IAC1) { 1694 current->thread.debug.dbcr0 &= ~DBCR0_IAC1; 1695 dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 1696 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1697 1); 1698 changed |= 0x01; 1699 } else if (debug_status & DBSR_IAC2) { 1700 current->thread.debug.dbcr0 &= ~DBCR0_IAC2; 1701 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 1702 2); 1703 changed |= 0x01; 1704 } else if (debug_status & DBSR_IAC3) { 1705 current->thread.debug.dbcr0 &= ~DBCR0_IAC3; 1706 dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 1707 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 1708 3); 1709 changed |= 0x01; 1710 } else if (debug_status & DBSR_IAC4) { 1711 current->thread.debug.dbcr0 &= ~DBCR0_IAC4; 1712 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 1713 4); 1714 changed |= 0x01; 1715 } 1716 /* 1717 * At the point this routine was called, the MSR(DE) was turned off. 1718 * Check all other debug flags and see if that bit needs to be turned 1719 * back on or not. 1720 */ 1721 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1722 current->thread.debug.dbcr1)) 1723 regs->msr |= MSR_DE; 1724 else 1725 /* Make sure the IDM flag is off */ 1726 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1727 1728 if (changed & 0x01) 1729 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); 1730 } 1731 1732 void DebugException(struct pt_regs *regs, unsigned long debug_status) 1733 { 1734 current->thread.debug.dbsr = debug_status; 1735 1736 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1737 * on server, it stops on the target of the branch. In order to simulate 1738 * the server behaviour, we thus restart right away with a single step 1739 * instead of stopping here when hitting a BT 1740 */ 1741 if (debug_status & DBSR_BT) { 1742 regs->msr &= ~MSR_DE; 1743 1744 /* Disable BT */ 1745 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1746 /* Clear the BT event */ 1747 mtspr(SPRN_DBSR, DBSR_BT); 1748 1749 /* Do the single step trick only when coming from userspace */ 1750 if (user_mode(regs)) { 1751 current->thread.debug.dbcr0 &= ~DBCR0_BT; 1752 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1753 regs->msr |= MSR_DE; 1754 return; 1755 } 1756 1757 if (kprobe_post_handler(regs)) 1758 return; 1759 1760 if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1761 5, SIGTRAP) == NOTIFY_STOP) { 1762 return; 1763 } 1764 if (debugger_sstep(regs)) 1765 return; 1766 } else if (debug_status & DBSR_IC) { /* Instruction complete */ 1767 regs->msr &= ~MSR_DE; 1768 1769 /* Disable instruction completion */ 1770 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 1771 /* Clear the instruction completion event */ 1772 mtspr(SPRN_DBSR, DBSR_IC); 1773 1774 if (kprobe_post_handler(regs)) 1775 return; 1776 1777 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 1778 5, SIGTRAP) == NOTIFY_STOP) { 1779 return; 1780 } 1781 1782 if (debugger_sstep(regs)) 1783 return; 1784 1785 if (user_mode(regs)) { 1786 current->thread.debug.dbcr0 &= ~DBCR0_IC; 1787 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, 1788 current->thread.debug.dbcr1)) 1789 regs->msr |= MSR_DE; 1790 else 1791 /* Make sure the IDM bit is off */ 1792 current->thread.debug.dbcr0 &= ~DBCR0_IDM; 1793 } 1794 1795 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1796 } else 1797 handle_debug(regs, debug_status); 1798 } 1799 NOKPROBE_SYMBOL(DebugException); 1800 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1801 1802 #if !defined(CONFIG_TAU_INT) 1803 void TAUException(struct pt_regs *regs) 1804 { 1805 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 1806 regs->nip, regs->msr, regs->trap, print_tainted()); 1807 } 1808 #endif /* CONFIG_INT_TAU */ 1809 1810 #ifdef CONFIG_ALTIVEC 1811 void altivec_assist_exception(struct pt_regs *regs) 1812 { 1813 int err; 1814 1815 if (!user_mode(regs)) { 1816 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 1817 " at %lx\n", regs->nip); 1818 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 1819 } 1820 1821 flush_altivec_to_thread(current); 1822 1823 PPC_WARN_EMULATED(altivec, regs); 1824 err = emulate_altivec(regs); 1825 if (err == 0) { 1826 regs->nip += 4; /* skip emulated instruction */ 1827 emulate_single_step(regs); 1828 return; 1829 } 1830 1831 if (err == -EFAULT) { 1832 /* got an error reading the instruction */ 1833 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1834 } else { 1835 /* didn't recognize the instruction */ 1836 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1837 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 1838 "in %s at %lx\n", current->comm, regs->nip); 1839 current->thread.vr_state.vscr.u[3] |= 0x10000; 1840 } 1841 } 1842 #endif /* CONFIG_ALTIVEC */ 1843 1844 #ifdef CONFIG_FSL_BOOKE 1845 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1846 unsigned long error_code) 1847 { 1848 /* We treat cache locking instructions from the user 1849 * as priv ops, in the future we could try to do 1850 * something smarter 1851 */ 1852 if (error_code & (ESR_DLK|ESR_ILK)) 1853 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1854 return; 1855 } 1856 #endif /* CONFIG_FSL_BOOKE */ 1857 1858 #ifdef CONFIG_SPE 1859 void SPEFloatingPointException(struct pt_regs *regs) 1860 { 1861 extern int do_spe_mathemu(struct pt_regs *regs); 1862 unsigned long spefscr; 1863 int fpexc_mode; 1864 int code = 0; 1865 int err; 1866 1867 flush_spe_to_thread(current); 1868 1869 spefscr = current->thread.spefscr; 1870 fpexc_mode = current->thread.fpexc_mode; 1871 1872 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1873 code = FPE_FLTOVF; 1874 } 1875 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1876 code = FPE_FLTUND; 1877 } 1878 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1879 code = FPE_FLTDIV; 1880 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1881 code = FPE_FLTINV; 1882 } 1883 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1884 code = FPE_FLTRES; 1885 1886 err = do_spe_mathemu(regs); 1887 if (err == 0) { 1888 regs->nip += 4; /* skip emulated instruction */ 1889 emulate_single_step(regs); 1890 return; 1891 } 1892 1893 if (err == -EFAULT) { 1894 /* got an error reading the instruction */ 1895 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1896 } else if (err == -EINVAL) { 1897 /* didn't recognize the instruction */ 1898 printk(KERN_ERR "unrecognized spe instruction " 1899 "in %s at %lx\n", current->comm, regs->nip); 1900 } else { 1901 _exception(SIGFPE, regs, code, regs->nip); 1902 } 1903 1904 return; 1905 } 1906 1907 void SPEFloatingPointRoundException(struct pt_regs *regs) 1908 { 1909 extern int speround_handler(struct pt_regs *regs); 1910 int err; 1911 1912 preempt_disable(); 1913 if (regs->msr & MSR_SPE) 1914 giveup_spe(current); 1915 preempt_enable(); 1916 1917 regs->nip -= 4; 1918 err = speround_handler(regs); 1919 if (err == 0) { 1920 regs->nip += 4; /* skip emulated instruction */ 1921 emulate_single_step(regs); 1922 return; 1923 } 1924 1925 if (err == -EFAULT) { 1926 /* got an error reading the instruction */ 1927 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1928 } else if (err == -EINVAL) { 1929 /* didn't recognize the instruction */ 1930 printk(KERN_ERR "unrecognized spe instruction " 1931 "in %s at %lx\n", current->comm, regs->nip); 1932 } else { 1933 _exception(SIGFPE, regs, 0, regs->nip); 1934 return; 1935 } 1936 } 1937 #endif 1938 1939 /* 1940 * We enter here if we get an unrecoverable exception, that is, one 1941 * that happened at a point where the RI (recoverable interrupt) bit 1942 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1943 * we therefore lost state by taking this exception. 1944 */ 1945 void unrecoverable_exception(struct pt_regs *regs) 1946 { 1947 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1948 regs->trap, regs->nip); 1949 die("Unrecoverable exception", regs, SIGABRT); 1950 } 1951 1952 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) 1953 /* 1954 * Default handler for a Watchdog exception, 1955 * spins until a reboot occurs 1956 */ 1957 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1958 { 1959 /* Generic WatchdogHandler, implement your own */ 1960 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1961 return; 1962 } 1963 1964 void WatchdogException(struct pt_regs *regs) 1965 { 1966 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1967 WatchdogHandler(regs); 1968 } 1969 #endif 1970 1971 /* 1972 * We enter here if we discover during exception entry that we are 1973 * running in supervisor mode with a userspace value in the stack pointer. 1974 */ 1975 void kernel_bad_stack(struct pt_regs *regs) 1976 { 1977 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1978 regs->gpr[1], regs->nip); 1979 die("Bad kernel stack pointer", regs, SIGABRT); 1980 } 1981 1982 void __init trap_init(void) 1983 { 1984 } 1985 1986 1987 #ifdef CONFIG_PPC_EMULATED_STATS 1988 1989 #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 1990 1991 struct ppc_emulated ppc_emulated = { 1992 #ifdef CONFIG_ALTIVEC 1993 WARN_EMULATED_SETUP(altivec), 1994 #endif 1995 WARN_EMULATED_SETUP(dcba), 1996 WARN_EMULATED_SETUP(dcbz), 1997 WARN_EMULATED_SETUP(fp_pair), 1998 WARN_EMULATED_SETUP(isel), 1999 WARN_EMULATED_SETUP(mcrxr), 2000 WARN_EMULATED_SETUP(mfpvr), 2001 WARN_EMULATED_SETUP(multiple), 2002 WARN_EMULATED_SETUP(popcntb), 2003 WARN_EMULATED_SETUP(spe), 2004 WARN_EMULATED_SETUP(string), 2005 WARN_EMULATED_SETUP(sync), 2006 WARN_EMULATED_SETUP(unaligned), 2007 #ifdef CONFIG_MATH_EMULATION 2008 WARN_EMULATED_SETUP(math), 2009 #endif 2010 #ifdef CONFIG_VSX 2011 WARN_EMULATED_SETUP(vsx), 2012 #endif 2013 #ifdef CONFIG_PPC64 2014 WARN_EMULATED_SETUP(mfdscr), 2015 WARN_EMULATED_SETUP(mtdscr), 2016 WARN_EMULATED_SETUP(lq_stq), 2017 #endif 2018 }; 2019 2020 u32 ppc_warn_emulated; 2021 2022 void ppc_warn_emulated_print(const char *type) 2023 { 2024 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 2025 type); 2026 } 2027 2028 static int __init ppc_warn_emulated_init(void) 2029 { 2030 struct dentry *dir, *d; 2031 unsigned int i; 2032 struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 2033 2034 if (!powerpc_debugfs_root) 2035 return -ENODEV; 2036 2037 dir = debugfs_create_dir("emulated_instructions", 2038 powerpc_debugfs_root); 2039 if (!dir) 2040 return -ENOMEM; 2041 2042 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, 2043 &ppc_warn_emulated); 2044 if (!d) 2045 goto fail; 2046 2047 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 2048 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, 2049 (u32 *)&entries[i].val.counter); 2050 if (!d) 2051 goto fail; 2052 } 2053 2054 return 0; 2055 2056 fail: 2057 debugfs_remove_recursive(dir); 2058 return -ENOMEM; 2059 } 2060 2061 device_initcall(ppc_warn_emulated_init); 2062 2063 #endif /* CONFIG_PPC_EMULATED_STATS */ 2064