1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright 2007-2010 Freescale Semiconductor, Inc. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * Modified by Cort Dougan (cort@cs.nmt.edu) 11 * and Paul Mackerras (paulus@samba.org) 12 */ 13 14 /* 15 * This file handles the architecture-dependent parts of hardware exceptions 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/sched.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/user.h> 26 #include <linux/interrupt.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/prctl.h> 30 #include <linux/delay.h> 31 #include <linux/kprobes.h> 32 #include <linux/kexec.h> 33 #include <linux/backlight.h> 34 #include <linux/bug.h> 35 #include <linux/kdebug.h> 36 #include <linux/debugfs.h> 37 #include <linux/ratelimit.h> 38 #include <linux/context_tracking.h> 39 40 #include <asm/emulated_ops.h> 41 #include <asm/pgtable.h> 42 #include <asm/uaccess.h> 43 #include <asm/io.h> 44 #include <asm/machdep.h> 45 #include <asm/rtas.h> 46 #include <asm/pmc.h> 47 #ifdef CONFIG_PPC32 48 #include <asm/reg.h> 49 #endif 50 #ifdef CONFIG_PMAC_BACKLIGHT 51 #include <asm/backlight.h> 52 #endif 53 #ifdef CONFIG_PPC64 54 #include <asm/firmware.h> 55 #include <asm/processor.h> 56 #include <asm/tm.h> 57 #endif 58 #include <asm/kexec.h> 59 #include <asm/ppc-opcode.h> 60 #include <asm/rio.h> 61 #include <asm/fadump.h> 62 #include <asm/switch_to.h> 63 #include <asm/tm.h> 64 #include <asm/debug.h> 65 66 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 67 int (*__debugger)(struct pt_regs *regs) __read_mostly; 68 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 69 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 70 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 71 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 72 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly; 73 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 74 75 EXPORT_SYMBOL(__debugger); 76 EXPORT_SYMBOL(__debugger_ipi); 77 EXPORT_SYMBOL(__debugger_bpt); 78 EXPORT_SYMBOL(__debugger_sstep); 79 EXPORT_SYMBOL(__debugger_iabr_match); 80 EXPORT_SYMBOL(__debugger_break_match); 81 EXPORT_SYMBOL(__debugger_fault_handler); 82 #endif 83 84 /* Transactional Memory trap debug */ 85 #ifdef TM_DEBUG_SW 86 #define TM_DEBUG(x...) printk(KERN_INFO x) 87 #else 88 #define TM_DEBUG(x...) do { } while(0) 89 #endif 90 91 /* 92 * Trap & Exception support 93 */ 94 95 #ifdef CONFIG_PMAC_BACKLIGHT 96 static void pmac_backlight_unblank(void) 97 { 98 mutex_lock(&pmac_backlight_mutex); 99 if (pmac_backlight) { 100 struct backlight_properties *props; 101 102 props = &pmac_backlight->props; 103 props->brightness = props->max_brightness; 104 props->power = FB_BLANK_UNBLANK; 105 backlight_update_status(pmac_backlight); 106 } 107 mutex_unlock(&pmac_backlight_mutex); 108 } 109 #else 110 static inline void pmac_backlight_unblank(void) { } 111 #endif 112 113 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 114 static int die_owner = -1; 115 static unsigned int die_nest_count; 116 static int die_counter; 117 118 static unsigned __kprobes long oops_begin(struct pt_regs *regs) 119 { 120 int cpu; 121 unsigned long flags; 122 123 if (debugger(regs)) 124 return 1; 125 126 oops_enter(); 127 128 /* racy, but better than risking deadlock. */ 129 raw_local_irq_save(flags); 130 cpu = smp_processor_id(); 131 if (!arch_spin_trylock(&die_lock)) { 132 if (cpu == die_owner) 133 /* nested oops. should stop eventually */; 134 else 135 arch_spin_lock(&die_lock); 136 } 137 die_nest_count++; 138 die_owner = cpu; 139 console_verbose(); 140 bust_spinlocks(1); 141 if (machine_is(powermac)) 142 pmac_backlight_unblank(); 143 return flags; 144 } 145 146 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, 147 int signr) 148 { 149 bust_spinlocks(0); 150 die_owner = -1; 151 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 152 die_nest_count--; 153 oops_exit(); 154 printk("\n"); 155 if (!die_nest_count) 156 /* Nest count reaches zero, release the lock. */ 157 arch_spin_unlock(&die_lock); 158 raw_local_irq_restore(flags); 159 160 crash_fadump(regs, "die oops"); 161 162 /* 163 * A system reset (0x100) is a request to dump, so we always send 164 * it through the crashdump code. 165 */ 166 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) { 167 crash_kexec(regs); 168 169 /* 170 * We aren't the primary crash CPU. We need to send it 171 * to a holding pattern to avoid it ending up in the panic 172 * code. 173 */ 174 crash_kexec_secondary(regs); 175 } 176 177 if (!signr) 178 return; 179 180 /* 181 * While our oops output is serialised by a spinlock, output 182 * from panic() called below can race and corrupt it. If we 183 * know we are going to panic, delay for 1 second so we have a 184 * chance to get clean backtraces from all CPUs that are oopsing. 185 */ 186 if (in_interrupt() || panic_on_oops || !current->pid || 187 is_global_init(current)) { 188 mdelay(MSEC_PER_SEC); 189 } 190 191 if (in_interrupt()) 192 panic("Fatal exception in interrupt"); 193 if (panic_on_oops) 194 panic("Fatal exception"); 195 do_exit(signr); 196 } 197 198 static int __kprobes __die(const char *str, struct pt_regs *regs, long err) 199 { 200 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 201 #ifdef CONFIG_PREEMPT 202 printk("PREEMPT "); 203 #endif 204 #ifdef CONFIG_SMP 205 printk("SMP NR_CPUS=%d ", NR_CPUS); 206 #endif 207 #ifdef CONFIG_DEBUG_PAGEALLOC 208 printk("DEBUG_PAGEALLOC "); 209 #endif 210 #ifdef CONFIG_NUMA 211 printk("NUMA "); 212 #endif 213 printk("%s\n", ppc_md.name ? ppc_md.name : ""); 214 215 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) 216 return 1; 217 218 print_modules(); 219 show_regs(regs); 220 221 return 0; 222 } 223 224 void die(const char *str, struct pt_regs *regs, long err) 225 { 226 unsigned long flags = oops_begin(regs); 227 228 if (__die(str, regs, err)) 229 err = 0; 230 oops_end(flags, regs, err); 231 } 232 233 void user_single_step_siginfo(struct task_struct *tsk, 234 struct pt_regs *regs, siginfo_t *info) 235 { 236 memset(info, 0, sizeof(*info)); 237 info->si_signo = SIGTRAP; 238 info->si_code = TRAP_TRACE; 239 info->si_addr = (void __user *)regs->nip; 240 } 241 242 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 243 { 244 siginfo_t info; 245 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 246 "at %08lx nip %08lx lr %08lx code %x\n"; 247 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 248 "at %016lx nip %016lx lr %016lx code %x\n"; 249 250 if (!user_mode(regs)) { 251 die("Exception in kernel mode", regs, signr); 252 return; 253 } 254 255 if (show_unhandled_signals && unhandled_signal(current, signr)) { 256 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 257 current->comm, current->pid, signr, 258 addr, regs->nip, regs->link, code); 259 } 260 261 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 262 local_irq_enable(); 263 264 current->thread.trap_nr = code; 265 memset(&info, 0, sizeof(info)); 266 info.si_signo = signr; 267 info.si_code = code; 268 info.si_addr = (void __user *) addr; 269 force_sig_info(signr, &info, current); 270 } 271 272 #ifdef CONFIG_PPC64 273 void system_reset_exception(struct pt_regs *regs) 274 { 275 /* See if any machine dependent calls */ 276 if (ppc_md.system_reset_exception) { 277 if (ppc_md.system_reset_exception(regs)) 278 return; 279 } 280 281 die("System Reset", regs, SIGABRT); 282 283 /* Must die if the interrupt is not recoverable */ 284 if (!(regs->msr & MSR_RI)) 285 panic("Unrecoverable System Reset"); 286 287 /* What should we do here? We could issue a shutdown or hard reset. */ 288 } 289 #endif 290 291 /* 292 * I/O accesses can cause machine checks on powermacs. 293 * Check if the NIP corresponds to the address of a sync 294 * instruction for which there is an entry in the exception 295 * table. 296 * Note that the 601 only takes a machine check on TEA 297 * (transfer error ack) signal assertion, and does not 298 * set any of the top 16 bits of SRR1. 299 * -- paulus. 300 */ 301 static inline int check_io_access(struct pt_regs *regs) 302 { 303 #ifdef CONFIG_PPC32 304 unsigned long msr = regs->msr; 305 const struct exception_table_entry *entry; 306 unsigned int *nip = (unsigned int *)regs->nip; 307 308 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 309 && (entry = search_exception_tables(regs->nip)) != NULL) { 310 /* 311 * Check that it's a sync instruction, or somewhere 312 * in the twi; isync; nop sequence that inb/inw/inl uses. 313 * As the address is in the exception table 314 * we should be able to read the instr there. 315 * For the debug message, we look at the preceding 316 * load or store. 317 */ 318 if (*nip == 0x60000000) /* nop */ 319 nip -= 2; 320 else if (*nip == 0x4c00012c) /* isync */ 321 --nip; 322 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { 323 /* sync or twi */ 324 unsigned int rb; 325 326 --nip; 327 rb = (*nip >> 11) & 0x1f; 328 printk(KERN_DEBUG "%s bad port %lx at %p\n", 329 (*nip & 0x100)? "OUT to": "IN from", 330 regs->gpr[rb] - _IO_BASE, nip); 331 regs->msr |= MSR_RI; 332 regs->nip = entry->fixup; 333 return 1; 334 } 335 } 336 #endif /* CONFIG_PPC32 */ 337 return 0; 338 } 339 340 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 341 /* On 4xx, the reason for the machine check or program exception 342 is in the ESR. */ 343 #define get_reason(regs) ((regs)->dsisr) 344 #ifndef CONFIG_FSL_BOOKE 345 #define get_mc_reason(regs) ((regs)->dsisr) 346 #else 347 #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 348 #endif 349 #define REASON_FP ESR_FP 350 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 351 #define REASON_PRIVILEGED ESR_PPR 352 #define REASON_TRAP ESR_PTR 353 354 /* single-step stuff */ 355 #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) 356 #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) 357 358 #else 359 /* On non-4xx, the reason for the machine check or program 360 exception is in the MSR. */ 361 #define get_reason(regs) ((regs)->msr) 362 #define get_mc_reason(regs) ((regs)->msr) 363 #define REASON_TM 0x200000 364 #define REASON_FP 0x100000 365 #define REASON_ILLEGAL 0x80000 366 #define REASON_PRIVILEGED 0x40000 367 #define REASON_TRAP 0x20000 368 369 #define single_stepping(regs) ((regs)->msr & MSR_SE) 370 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 371 #endif 372 373 #if defined(CONFIG_4xx) 374 int machine_check_4xx(struct pt_regs *regs) 375 { 376 unsigned long reason = get_mc_reason(regs); 377 378 if (reason & ESR_IMCP) { 379 printk("Instruction"); 380 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 381 } else 382 printk("Data"); 383 printk(" machine check in kernel mode.\n"); 384 385 return 0; 386 } 387 388 int machine_check_440A(struct pt_regs *regs) 389 { 390 unsigned long reason = get_mc_reason(regs); 391 392 printk("Machine check in kernel mode.\n"); 393 if (reason & ESR_IMCP){ 394 printk("Instruction Synchronous Machine Check exception\n"); 395 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 396 } 397 else { 398 u32 mcsr = mfspr(SPRN_MCSR); 399 if (mcsr & MCSR_IB) 400 printk("Instruction Read PLB Error\n"); 401 if (mcsr & MCSR_DRB) 402 printk("Data Read PLB Error\n"); 403 if (mcsr & MCSR_DWB) 404 printk("Data Write PLB Error\n"); 405 if (mcsr & MCSR_TLBP) 406 printk("TLB Parity Error\n"); 407 if (mcsr & MCSR_ICP){ 408 flush_instruction_cache(); 409 printk("I-Cache Parity Error\n"); 410 } 411 if (mcsr & MCSR_DCSP) 412 printk("D-Cache Search Parity Error\n"); 413 if (mcsr & MCSR_DCFP) 414 printk("D-Cache Flush Parity Error\n"); 415 if (mcsr & MCSR_IMPE) 416 printk("Machine Check exception is imprecise\n"); 417 418 /* Clear MCSR */ 419 mtspr(SPRN_MCSR, mcsr); 420 } 421 return 0; 422 } 423 424 int machine_check_47x(struct pt_regs *regs) 425 { 426 unsigned long reason = get_mc_reason(regs); 427 u32 mcsr; 428 429 printk(KERN_ERR "Machine check in kernel mode.\n"); 430 if (reason & ESR_IMCP) { 431 printk(KERN_ERR 432 "Instruction Synchronous Machine Check exception\n"); 433 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 434 return 0; 435 } 436 mcsr = mfspr(SPRN_MCSR); 437 if (mcsr & MCSR_IB) 438 printk(KERN_ERR "Instruction Read PLB Error\n"); 439 if (mcsr & MCSR_DRB) 440 printk(KERN_ERR "Data Read PLB Error\n"); 441 if (mcsr & MCSR_DWB) 442 printk(KERN_ERR "Data Write PLB Error\n"); 443 if (mcsr & MCSR_TLBP) 444 printk(KERN_ERR "TLB Parity Error\n"); 445 if (mcsr & MCSR_ICP) { 446 flush_instruction_cache(); 447 printk(KERN_ERR "I-Cache Parity Error\n"); 448 } 449 if (mcsr & MCSR_DCSP) 450 printk(KERN_ERR "D-Cache Search Parity Error\n"); 451 if (mcsr & PPC47x_MCSR_GPR) 452 printk(KERN_ERR "GPR Parity Error\n"); 453 if (mcsr & PPC47x_MCSR_FPR) 454 printk(KERN_ERR "FPR Parity Error\n"); 455 if (mcsr & PPC47x_MCSR_IPR) 456 printk(KERN_ERR "Machine Check exception is imprecise\n"); 457 458 /* Clear MCSR */ 459 mtspr(SPRN_MCSR, mcsr); 460 461 return 0; 462 } 463 #elif defined(CONFIG_E500) 464 int machine_check_e500mc(struct pt_regs *regs) 465 { 466 unsigned long mcsr = mfspr(SPRN_MCSR); 467 unsigned long reason = mcsr; 468 int recoverable = 1; 469 470 if (reason & MCSR_LD) { 471 recoverable = fsl_rio_mcheck_exception(regs); 472 if (recoverable == 1) 473 goto silent_out; 474 } 475 476 printk("Machine check in kernel mode.\n"); 477 printk("Caused by (from MCSR=%lx): ", reason); 478 479 if (reason & MCSR_MCP) 480 printk("Machine Check Signal\n"); 481 482 if (reason & MCSR_ICPERR) { 483 printk("Instruction Cache Parity Error\n"); 484 485 /* 486 * This is recoverable by invalidating the i-cache. 487 */ 488 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 489 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 490 ; 491 492 /* 493 * This will generally be accompanied by an instruction 494 * fetch error report -- only treat MCSR_IF as fatal 495 * if it wasn't due to an L1 parity error. 496 */ 497 reason &= ~MCSR_IF; 498 } 499 500 if (reason & MCSR_DCPERR_MC) { 501 printk("Data Cache Parity Error\n"); 502 503 /* 504 * In write shadow mode we auto-recover from the error, but it 505 * may still get logged and cause a machine check. We should 506 * only treat the non-write shadow case as non-recoverable. 507 */ 508 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 509 recoverable = 0; 510 } 511 512 if (reason & MCSR_L2MMU_MHIT) { 513 printk("Hit on multiple TLB entries\n"); 514 recoverable = 0; 515 } 516 517 if (reason & MCSR_NMI) 518 printk("Non-maskable interrupt\n"); 519 520 if (reason & MCSR_IF) { 521 printk("Instruction Fetch Error Report\n"); 522 recoverable = 0; 523 } 524 525 if (reason & MCSR_LD) { 526 printk("Load Error Report\n"); 527 recoverable = 0; 528 } 529 530 if (reason & MCSR_ST) { 531 printk("Store Error Report\n"); 532 recoverable = 0; 533 } 534 535 if (reason & MCSR_LDG) { 536 printk("Guarded Load Error Report\n"); 537 recoverable = 0; 538 } 539 540 if (reason & MCSR_TLBSYNC) 541 printk("Simultaneous tlbsync operations\n"); 542 543 if (reason & MCSR_BSL2_ERR) { 544 printk("Level 2 Cache Error\n"); 545 recoverable = 0; 546 } 547 548 if (reason & MCSR_MAV) { 549 u64 addr; 550 551 addr = mfspr(SPRN_MCAR); 552 addr |= (u64)mfspr(SPRN_MCARU) << 32; 553 554 printk("Machine Check %s Address: %#llx\n", 555 reason & MCSR_MEA ? "Effective" : "Physical", addr); 556 } 557 558 silent_out: 559 mtspr(SPRN_MCSR, mcsr); 560 return mfspr(SPRN_MCSR) == 0 && recoverable; 561 } 562 563 int machine_check_e500(struct pt_regs *regs) 564 { 565 unsigned long reason = get_mc_reason(regs); 566 567 if (reason & MCSR_BUS_RBERR) { 568 if (fsl_rio_mcheck_exception(regs)) 569 return 1; 570 } 571 572 printk("Machine check in kernel mode.\n"); 573 printk("Caused by (from MCSR=%lx): ", reason); 574 575 if (reason & MCSR_MCP) 576 printk("Machine Check Signal\n"); 577 if (reason & MCSR_ICPERR) 578 printk("Instruction Cache Parity Error\n"); 579 if (reason & MCSR_DCP_PERR) 580 printk("Data Cache Push Parity Error\n"); 581 if (reason & MCSR_DCPERR) 582 printk("Data Cache Parity Error\n"); 583 if (reason & MCSR_BUS_IAERR) 584 printk("Bus - Instruction Address Error\n"); 585 if (reason & MCSR_BUS_RAERR) 586 printk("Bus - Read Address Error\n"); 587 if (reason & MCSR_BUS_WAERR) 588 printk("Bus - Write Address Error\n"); 589 if (reason & MCSR_BUS_IBERR) 590 printk("Bus - Instruction Data Error\n"); 591 if (reason & MCSR_BUS_RBERR) 592 printk("Bus - Read Data Bus Error\n"); 593 if (reason & MCSR_BUS_WBERR) 594 printk("Bus - Read Data Bus Error\n"); 595 if (reason & MCSR_BUS_IPERR) 596 printk("Bus - Instruction Parity Error\n"); 597 if (reason & MCSR_BUS_RPERR) 598 printk("Bus - Read Parity Error\n"); 599 600 return 0; 601 } 602 603 int machine_check_generic(struct pt_regs *regs) 604 { 605 return 0; 606 } 607 #elif defined(CONFIG_E200) 608 int machine_check_e200(struct pt_regs *regs) 609 { 610 unsigned long reason = get_mc_reason(regs); 611 612 printk("Machine check in kernel mode.\n"); 613 printk("Caused by (from MCSR=%lx): ", reason); 614 615 if (reason & MCSR_MCP) 616 printk("Machine Check Signal\n"); 617 if (reason & MCSR_CP_PERR) 618 printk("Cache Push Parity Error\n"); 619 if (reason & MCSR_CPERR) 620 printk("Cache Parity Error\n"); 621 if (reason & MCSR_EXCP_ERR) 622 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 623 if (reason & MCSR_BUS_IRERR) 624 printk("Bus - Read Bus Error on instruction fetch\n"); 625 if (reason & MCSR_BUS_DRERR) 626 printk("Bus - Read Bus Error on data load\n"); 627 if (reason & MCSR_BUS_WRERR) 628 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 629 630 return 0; 631 } 632 #else 633 int machine_check_generic(struct pt_regs *regs) 634 { 635 unsigned long reason = get_mc_reason(regs); 636 637 printk("Machine check in kernel mode.\n"); 638 printk("Caused by (from SRR1=%lx): ", reason); 639 switch (reason & 0x601F0000) { 640 case 0x80000: 641 printk("Machine check signal\n"); 642 break; 643 case 0: /* for 601 */ 644 case 0x40000: 645 case 0x140000: /* 7450 MSS error and TEA */ 646 printk("Transfer error ack signal\n"); 647 break; 648 case 0x20000: 649 printk("Data parity error signal\n"); 650 break; 651 case 0x10000: 652 printk("Address parity error signal\n"); 653 break; 654 case 0x20000000: 655 printk("L1 Data Cache error\n"); 656 break; 657 case 0x40000000: 658 printk("L1 Instruction Cache error\n"); 659 break; 660 case 0x00100000: 661 printk("L2 data cache parity error\n"); 662 break; 663 default: 664 printk("Unknown values in msr\n"); 665 } 666 return 0; 667 } 668 #endif /* everything else */ 669 670 void machine_check_exception(struct pt_regs *regs) 671 { 672 enum ctx_state prev_state = exception_enter(); 673 int recover = 0; 674 675 __get_cpu_var(irq_stat).mce_exceptions++; 676 677 /* See if any machine dependent calls. In theory, we would want 678 * to call the CPU first, and call the ppc_md. one if the CPU 679 * one returns a positive number. However there is existing code 680 * that assumes the board gets a first chance, so let's keep it 681 * that way for now and fix things later. --BenH. 682 */ 683 if (ppc_md.machine_check_exception) 684 recover = ppc_md.machine_check_exception(regs); 685 else if (cur_cpu_spec->machine_check) 686 recover = cur_cpu_spec->machine_check(regs); 687 688 if (recover > 0) 689 goto bail; 690 691 #if defined(CONFIG_8xx) && defined(CONFIG_PCI) 692 /* the qspan pci read routines can cause machine checks -- Cort 693 * 694 * yuck !!! that totally needs to go away ! There are better ways 695 * to deal with that than having a wart in the mcheck handler. 696 * -- BenH 697 */ 698 bad_page_fault(regs, regs->dar, SIGBUS); 699 goto bail; 700 #endif 701 702 if (debugger_fault_handler(regs)) 703 goto bail; 704 705 if (check_io_access(regs)) 706 goto bail; 707 708 die("Machine check", regs, SIGBUS); 709 710 /* Must die if the interrupt is not recoverable */ 711 if (!(regs->msr & MSR_RI)) 712 panic("Unrecoverable Machine check"); 713 714 bail: 715 exception_exit(prev_state); 716 } 717 718 void SMIException(struct pt_regs *regs) 719 { 720 die("System Management Interrupt", regs, SIGABRT); 721 } 722 723 void unknown_exception(struct pt_regs *regs) 724 { 725 enum ctx_state prev_state = exception_enter(); 726 727 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 728 regs->nip, regs->msr, regs->trap); 729 730 _exception(SIGTRAP, regs, 0, 0); 731 732 exception_exit(prev_state); 733 } 734 735 void instruction_breakpoint_exception(struct pt_regs *regs) 736 { 737 enum ctx_state prev_state = exception_enter(); 738 739 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 740 5, SIGTRAP) == NOTIFY_STOP) 741 goto bail; 742 if (debugger_iabr_match(regs)) 743 goto bail; 744 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 745 746 bail: 747 exception_exit(prev_state); 748 } 749 750 void RunModeException(struct pt_regs *regs) 751 { 752 _exception(SIGTRAP, regs, 0, 0); 753 } 754 755 void __kprobes single_step_exception(struct pt_regs *regs) 756 { 757 enum ctx_state prev_state = exception_enter(); 758 759 clear_single_step(regs); 760 761 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 762 5, SIGTRAP) == NOTIFY_STOP) 763 goto bail; 764 if (debugger_sstep(regs)) 765 goto bail; 766 767 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 768 769 bail: 770 exception_exit(prev_state); 771 } 772 773 /* 774 * After we have successfully emulated an instruction, we have to 775 * check if the instruction was being single-stepped, and if so, 776 * pretend we got a single-step exception. This was pointed out 777 * by Kumar Gala. -- paulus 778 */ 779 static void emulate_single_step(struct pt_regs *regs) 780 { 781 if (single_stepping(regs)) 782 single_step_exception(regs); 783 } 784 785 static inline int __parse_fpscr(unsigned long fpscr) 786 { 787 int ret = 0; 788 789 /* Invalid operation */ 790 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 791 ret = FPE_FLTINV; 792 793 /* Overflow */ 794 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 795 ret = FPE_FLTOVF; 796 797 /* Underflow */ 798 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 799 ret = FPE_FLTUND; 800 801 /* Divide by zero */ 802 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 803 ret = FPE_FLTDIV; 804 805 /* Inexact result */ 806 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 807 ret = FPE_FLTRES; 808 809 return ret; 810 } 811 812 static void parse_fpe(struct pt_regs *regs) 813 { 814 int code = 0; 815 816 flush_fp_to_thread(current); 817 818 code = __parse_fpscr(current->thread.fpscr.val); 819 820 _exception(SIGFPE, regs, code, regs->nip); 821 } 822 823 /* 824 * Illegal instruction emulation support. Originally written to 825 * provide the PVR to user applications using the mfspr rd, PVR. 826 * Return non-zero if we can't emulate, or -EFAULT if the associated 827 * memory access caused an access fault. Return zero on success. 828 * 829 * There are a couple of ways to do this, either "decode" the instruction 830 * or directly match lots of bits. In this case, matching lots of 831 * bits is faster and easier. 832 * 833 */ 834 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 835 { 836 u8 rT = (instword >> 21) & 0x1f; 837 u8 rA = (instword >> 16) & 0x1f; 838 u8 NB_RB = (instword >> 11) & 0x1f; 839 u32 num_bytes; 840 unsigned long EA; 841 int pos = 0; 842 843 /* Early out if we are an invalid form of lswx */ 844 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 845 if ((rT == rA) || (rT == NB_RB)) 846 return -EINVAL; 847 848 EA = (rA == 0) ? 0 : regs->gpr[rA]; 849 850 switch (instword & PPC_INST_STRING_MASK) { 851 case PPC_INST_LSWX: 852 case PPC_INST_STSWX: 853 EA += NB_RB; 854 num_bytes = regs->xer & 0x7f; 855 break; 856 case PPC_INST_LSWI: 857 case PPC_INST_STSWI: 858 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 859 break; 860 default: 861 return -EINVAL; 862 } 863 864 while (num_bytes != 0) 865 { 866 u8 val; 867 u32 shift = 8 * (3 - (pos & 0x3)); 868 869 /* if process is 32-bit, clear upper 32 bits of EA */ 870 if ((regs->msr & MSR_64BIT) == 0) 871 EA &= 0xFFFFFFFF; 872 873 switch ((instword & PPC_INST_STRING_MASK)) { 874 case PPC_INST_LSWX: 875 case PPC_INST_LSWI: 876 if (get_user(val, (u8 __user *)EA)) 877 return -EFAULT; 878 /* first time updating this reg, 879 * zero it out */ 880 if (pos == 0) 881 regs->gpr[rT] = 0; 882 regs->gpr[rT] |= val << shift; 883 break; 884 case PPC_INST_STSWI: 885 case PPC_INST_STSWX: 886 val = regs->gpr[rT] >> shift; 887 if (put_user(val, (u8 __user *)EA)) 888 return -EFAULT; 889 break; 890 } 891 /* move EA to next address */ 892 EA += 1; 893 num_bytes--; 894 895 /* manage our position within the register */ 896 if (++pos == 4) { 897 pos = 0; 898 if (++rT == 32) 899 rT = 0; 900 } 901 } 902 903 return 0; 904 } 905 906 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 907 { 908 u32 ra,rs; 909 unsigned long tmp; 910 911 ra = (instword >> 16) & 0x1f; 912 rs = (instword >> 21) & 0x1f; 913 914 tmp = regs->gpr[rs]; 915 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 916 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 917 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 918 regs->gpr[ra] = tmp; 919 920 return 0; 921 } 922 923 static int emulate_isel(struct pt_regs *regs, u32 instword) 924 { 925 u8 rT = (instword >> 21) & 0x1f; 926 u8 rA = (instword >> 16) & 0x1f; 927 u8 rB = (instword >> 11) & 0x1f; 928 u8 BC = (instword >> 6) & 0x1f; 929 u8 bit; 930 unsigned long tmp; 931 932 tmp = (rA == 0) ? 0 : regs->gpr[rA]; 933 bit = (regs->ccr >> (31 - BC)) & 0x1; 934 935 regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 936 937 return 0; 938 } 939 940 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 941 static inline bool tm_abort_check(struct pt_regs *regs, int cause) 942 { 943 /* If we're emulating a load/store in an active transaction, we cannot 944 * emulate it as the kernel operates in transaction suspended context. 945 * We need to abort the transaction. This creates a persistent TM 946 * abort so tell the user what caused it with a new code. 947 */ 948 if (MSR_TM_TRANSACTIONAL(regs->msr)) { 949 tm_enable(); 950 tm_abort(cause); 951 return true; 952 } 953 return false; 954 } 955 #else 956 static inline bool tm_abort_check(struct pt_regs *regs, int reason) 957 { 958 return false; 959 } 960 #endif 961 962 static int emulate_instruction(struct pt_regs *regs) 963 { 964 u32 instword; 965 u32 rd; 966 967 if (!user_mode(regs) || (regs->msr & MSR_LE)) 968 return -EINVAL; 969 CHECK_FULL_REGS(regs); 970 971 if (get_user(instword, (u32 __user *)(regs->nip))) 972 return -EFAULT; 973 974 /* Emulate the mfspr rD, PVR. */ 975 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 976 PPC_WARN_EMULATED(mfpvr, regs); 977 rd = (instword >> 21) & 0x1f; 978 regs->gpr[rd] = mfspr(SPRN_PVR); 979 return 0; 980 } 981 982 /* Emulating the dcba insn is just a no-op. */ 983 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 984 PPC_WARN_EMULATED(dcba, regs); 985 return 0; 986 } 987 988 /* Emulate the mcrxr insn. */ 989 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 990 int shift = (instword >> 21) & 0x1c; 991 unsigned long msk = 0xf0000000UL >> shift; 992 993 PPC_WARN_EMULATED(mcrxr, regs); 994 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 995 regs->xer &= ~0xf0000000UL; 996 return 0; 997 } 998 999 /* Emulate load/store string insn. */ 1000 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 1001 if (tm_abort_check(regs, 1002 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) 1003 return -EINVAL; 1004 PPC_WARN_EMULATED(string, regs); 1005 return emulate_string_inst(regs, instword); 1006 } 1007 1008 /* Emulate the popcntb (Population Count Bytes) instruction. */ 1009 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 1010 PPC_WARN_EMULATED(popcntb, regs); 1011 return emulate_popcntb_inst(regs, instword); 1012 } 1013 1014 /* Emulate isel (Integer Select) instruction */ 1015 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 1016 PPC_WARN_EMULATED(isel, regs); 1017 return emulate_isel(regs, instword); 1018 } 1019 1020 #ifdef CONFIG_PPC64 1021 /* Emulate the mfspr rD, DSCR. */ 1022 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == 1023 PPC_INST_MFSPR_DSCR_USER) || 1024 ((instword & PPC_INST_MFSPR_DSCR_MASK) == 1025 PPC_INST_MFSPR_DSCR)) && 1026 cpu_has_feature(CPU_FTR_DSCR)) { 1027 PPC_WARN_EMULATED(mfdscr, regs); 1028 rd = (instword >> 21) & 0x1f; 1029 regs->gpr[rd] = mfspr(SPRN_DSCR); 1030 return 0; 1031 } 1032 /* Emulate the mtspr DSCR, rD. */ 1033 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == 1034 PPC_INST_MTSPR_DSCR_USER) || 1035 ((instword & PPC_INST_MTSPR_DSCR_MASK) == 1036 PPC_INST_MTSPR_DSCR)) && 1037 cpu_has_feature(CPU_FTR_DSCR)) { 1038 PPC_WARN_EMULATED(mtdscr, regs); 1039 rd = (instword >> 21) & 0x1f; 1040 current->thread.dscr = regs->gpr[rd]; 1041 current->thread.dscr_inherit = 1; 1042 mtspr(SPRN_DSCR, current->thread.dscr); 1043 return 0; 1044 } 1045 #endif 1046 1047 return -EINVAL; 1048 } 1049 1050 int is_valid_bugaddr(unsigned long addr) 1051 { 1052 return is_kernel_addr(addr); 1053 } 1054 1055 void __kprobes program_check_exception(struct pt_regs *regs) 1056 { 1057 enum ctx_state prev_state = exception_enter(); 1058 unsigned int reason = get_reason(regs); 1059 extern int do_mathemu(struct pt_regs *regs); 1060 1061 /* We can now get here via a FP Unavailable exception if the core 1062 * has no FPU, in that case the reason flags will be 0 */ 1063 1064 if (reason & REASON_FP) { 1065 /* IEEE FP exception */ 1066 parse_fpe(regs); 1067 goto bail; 1068 } 1069 if (reason & REASON_TRAP) { 1070 /* Debugger is first in line to stop recursive faults in 1071 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1072 if (debugger_bpt(regs)) 1073 goto bail; 1074 1075 /* trap exception */ 1076 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1077 == NOTIFY_STOP) 1078 goto bail; 1079 1080 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1081 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1082 regs->nip += 4; 1083 goto bail; 1084 } 1085 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1086 goto bail; 1087 } 1088 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1089 if (reason & REASON_TM) { 1090 /* This is a TM "Bad Thing Exception" program check. 1091 * This occurs when: 1092 * - An rfid/hrfid/mtmsrd attempts to cause an illegal 1093 * transition in TM states. 1094 * - A trechkpt is attempted when transactional. 1095 * - A treclaim is attempted when non transactional. 1096 * - A tend is illegally attempted. 1097 * - writing a TM SPR when transactional. 1098 */ 1099 if (!user_mode(regs) && 1100 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1101 regs->nip += 4; 1102 goto bail; 1103 } 1104 /* If usermode caused this, it's done something illegal and 1105 * gets a SIGILL slap on the wrist. We call it an illegal 1106 * operand to distinguish from the instruction just being bad 1107 * (e.g. executing a 'tend' on a CPU without TM!); it's an 1108 * illegal /placement/ of a valid instruction. 1109 */ 1110 if (user_mode(regs)) { 1111 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1112 goto bail; 1113 } else { 1114 printk(KERN_EMERG "Unexpected TM Bad Thing exception " 1115 "at %lx (msr 0x%x)\n", regs->nip, reason); 1116 die("Unrecoverable exception", regs, SIGABRT); 1117 } 1118 } 1119 #endif 1120 1121 /* We restore the interrupt state now */ 1122 if (!arch_irq_disabled_regs(regs)) 1123 local_irq_enable(); 1124 1125 #ifdef CONFIG_MATH_EMULATION 1126 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 1127 * but there seems to be a hardware bug on the 405GP (RevD) 1128 * that means ESR is sometimes set incorrectly - either to 1129 * ESR_DST (!?) or 0. In the process of chasing this with the 1130 * hardware people - not sure if it can happen on any illegal 1131 * instruction or only on FP instructions, whether there is a 1132 * pattern to occurrences etc. -dgibson 31/Mar/2003 1133 */ 1134 1135 /* 1136 * If we support a HW FPU, we need to ensure the FP state 1137 * if flushed into the thread_struct before attempting 1138 * emulation 1139 */ 1140 #ifdef CONFIG_PPC_FPU 1141 flush_fp_to_thread(current); 1142 #endif 1143 switch (do_mathemu(regs)) { 1144 case 0: 1145 emulate_single_step(regs); 1146 goto bail; 1147 case 1: { 1148 int code = 0; 1149 code = __parse_fpscr(current->thread.fpscr.val); 1150 _exception(SIGFPE, regs, code, regs->nip); 1151 goto bail; 1152 } 1153 case -EFAULT: 1154 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1155 goto bail; 1156 } 1157 /* fall through on any other errors */ 1158 #endif /* CONFIG_MATH_EMULATION */ 1159 1160 /* Try to emulate it if we should. */ 1161 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 1162 switch (emulate_instruction(regs)) { 1163 case 0: 1164 regs->nip += 4; 1165 emulate_single_step(regs); 1166 goto bail; 1167 case -EFAULT: 1168 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1169 goto bail; 1170 } 1171 } 1172 1173 if (reason & REASON_PRIVILEGED) 1174 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1175 else 1176 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1177 1178 bail: 1179 exception_exit(prev_state); 1180 } 1181 1182 /* 1183 * This occurs when running in hypervisor mode on POWER6 or later 1184 * and an illegal instruction is encountered. 1185 */ 1186 void __kprobes emulation_assist_interrupt(struct pt_regs *regs) 1187 { 1188 regs->msr |= REASON_ILLEGAL; 1189 program_check_exception(regs); 1190 } 1191 1192 void alignment_exception(struct pt_regs *regs) 1193 { 1194 enum ctx_state prev_state = exception_enter(); 1195 int sig, code, fixed = 0; 1196 1197 /* We restore the interrupt state now */ 1198 if (!arch_irq_disabled_regs(regs)) 1199 local_irq_enable(); 1200 1201 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) 1202 goto bail; 1203 1204 /* we don't implement logging of alignment exceptions */ 1205 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1206 fixed = fix_alignment(regs); 1207 1208 if (fixed == 1) { 1209 regs->nip += 4; /* skip over emulated instruction */ 1210 emulate_single_step(regs); 1211 goto bail; 1212 } 1213 1214 /* Operand address was bad */ 1215 if (fixed == -EFAULT) { 1216 sig = SIGSEGV; 1217 code = SEGV_ACCERR; 1218 } else { 1219 sig = SIGBUS; 1220 code = BUS_ADRALN; 1221 } 1222 if (user_mode(regs)) 1223 _exception(sig, regs, code, regs->dar); 1224 else 1225 bad_page_fault(regs, regs->dar, sig); 1226 1227 bail: 1228 exception_exit(prev_state); 1229 } 1230 1231 void StackOverflow(struct pt_regs *regs) 1232 { 1233 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 1234 current, regs->gpr[1]); 1235 debugger(regs); 1236 show_regs(regs); 1237 panic("kernel stack overflow"); 1238 } 1239 1240 void nonrecoverable_exception(struct pt_regs *regs) 1241 { 1242 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 1243 regs->nip, regs->msr); 1244 debugger(regs); 1245 die("nonrecoverable exception", regs, SIGKILL); 1246 } 1247 1248 void trace_syscall(struct pt_regs *regs) 1249 { 1250 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", 1251 current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], 1252 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); 1253 } 1254 1255 void kernel_fp_unavailable_exception(struct pt_regs *regs) 1256 { 1257 enum ctx_state prev_state = exception_enter(); 1258 1259 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1260 "%lx at %lx\n", regs->trap, regs->nip); 1261 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1262 1263 exception_exit(prev_state); 1264 } 1265 1266 void altivec_unavailable_exception(struct pt_regs *regs) 1267 { 1268 enum ctx_state prev_state = exception_enter(); 1269 1270 if (user_mode(regs)) { 1271 /* A user program has executed an altivec instruction, 1272 but this kernel doesn't support altivec. */ 1273 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1274 goto bail; 1275 } 1276 1277 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1278 "%lx at %lx\n", regs->trap, regs->nip); 1279 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1280 1281 bail: 1282 exception_exit(prev_state); 1283 } 1284 1285 void vsx_unavailable_exception(struct pt_regs *regs) 1286 { 1287 if (user_mode(regs)) { 1288 /* A user program has executed an vsx instruction, 1289 but this kernel doesn't support vsx. */ 1290 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1291 return; 1292 } 1293 1294 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1295 "%lx at %lx\n", regs->trap, regs->nip); 1296 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1297 } 1298 1299 void facility_unavailable_exception(struct pt_regs *regs) 1300 { 1301 static char *facility_strings[] = { 1302 "FPU", 1303 "VMX/VSX", 1304 "DSCR", 1305 "PMU SPRs", 1306 "BHRB", 1307 "TM", 1308 "AT", 1309 "EBB", 1310 "TAR", 1311 }; 1312 char *facility, *prefix; 1313 u64 value; 1314 1315 if (regs->trap == 0xf60) { 1316 value = mfspr(SPRN_FSCR); 1317 prefix = ""; 1318 } else { 1319 value = mfspr(SPRN_HFSCR); 1320 prefix = "Hypervisor "; 1321 } 1322 1323 value = value >> 56; 1324 1325 /* We restore the interrupt state now */ 1326 if (!arch_irq_disabled_regs(regs)) 1327 local_irq_enable(); 1328 1329 if (value < ARRAY_SIZE(facility_strings)) 1330 facility = facility_strings[value]; 1331 else 1332 facility = "unknown"; 1333 1334 pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", 1335 prefix, facility, regs->nip, regs->msr); 1336 1337 if (user_mode(regs)) { 1338 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1339 return; 1340 } 1341 1342 die("Unexpected facility unavailable exception", regs, SIGABRT); 1343 } 1344 1345 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1346 1347 extern void do_load_up_fpu(struct pt_regs *regs); 1348 1349 void fp_unavailable_tm(struct pt_regs *regs) 1350 { 1351 /* Note: This does not handle any kind of FP laziness. */ 1352 1353 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", 1354 regs->nip, regs->msr); 1355 tm_enable(); 1356 1357 /* We can only have got here if the task started using FP after 1358 * beginning the transaction. So, the transactional regs are just a 1359 * copy of the checkpointed ones. But, we still need to recheckpoint 1360 * as we're enabling FP for the process; it will return, abort the 1361 * transaction, and probably retry but now with FP enabled. So the 1362 * checkpointed FP registers need to be loaded. 1363 */ 1364 tm_reclaim(¤t->thread, current->thread.regs->msr, 1365 TM_CAUSE_FAC_UNAV); 1366 /* Reclaim didn't save out any FPRs to transact_fprs. */ 1367 1368 /* Enable FP for the task: */ 1369 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 1370 1371 /* This loads and recheckpoints the FP registers from 1372 * thread.fpr[]. They will remain in registers after the 1373 * checkpoint so we don't need to reload them after. 1374 */ 1375 tm_recheckpoint(¤t->thread, regs->msr); 1376 } 1377 1378 #ifdef CONFIG_ALTIVEC 1379 extern void do_load_up_altivec(struct pt_regs *regs); 1380 1381 void altivec_unavailable_tm(struct pt_regs *regs) 1382 { 1383 /* See the comments in fp_unavailable_tm(). This function operates 1384 * the same way. 1385 */ 1386 1387 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," 1388 "MSR=%lx\n", 1389 regs->nip, regs->msr); 1390 tm_enable(); 1391 tm_reclaim(¤t->thread, current->thread.regs->msr, 1392 TM_CAUSE_FAC_UNAV); 1393 regs->msr |= MSR_VEC; 1394 tm_recheckpoint(¤t->thread, regs->msr); 1395 current->thread.used_vr = 1; 1396 } 1397 #endif 1398 1399 #ifdef CONFIG_VSX 1400 void vsx_unavailable_tm(struct pt_regs *regs) 1401 { 1402 /* See the comments in fp_unavailable_tm(). This works similarly, 1403 * though we're loading both FP and VEC registers in here. 1404 * 1405 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC 1406 * regs. Either way, set MSR_VSX. 1407 */ 1408 1409 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx," 1410 "MSR=%lx\n", 1411 regs->nip, regs->msr); 1412 1413 tm_enable(); 1414 /* This reclaims FP and/or VR regs if they're already enabled */ 1415 tm_reclaim(¤t->thread, current->thread.regs->msr, 1416 TM_CAUSE_FAC_UNAV); 1417 1418 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | 1419 MSR_VSX; 1420 /* This loads & recheckpoints FP and VRs. */ 1421 tm_recheckpoint(¤t->thread, regs->msr); 1422 current->thread.used_vsr = 1; 1423 } 1424 #endif 1425 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1426 1427 void performance_monitor_exception(struct pt_regs *regs) 1428 { 1429 __get_cpu_var(irq_stat).pmu_irqs++; 1430 1431 perf_irq(regs); 1432 } 1433 1434 #ifdef CONFIG_8xx 1435 void SoftwareEmulation(struct pt_regs *regs) 1436 { 1437 extern int do_mathemu(struct pt_regs *); 1438 #if defined(CONFIG_MATH_EMULATION) 1439 int errcode; 1440 #endif 1441 1442 CHECK_FULL_REGS(regs); 1443 1444 if (!user_mode(regs)) { 1445 debugger(regs); 1446 die("Kernel Mode Software FPU Emulation", regs, SIGFPE); 1447 } 1448 1449 #ifdef CONFIG_MATH_EMULATION 1450 errcode = do_mathemu(regs); 1451 if (errcode >= 0) 1452 PPC_WARN_EMULATED(math, regs); 1453 1454 switch (errcode) { 1455 case 0: 1456 emulate_single_step(regs); 1457 return; 1458 case 1: { 1459 int code = 0; 1460 code = __parse_fpscr(current->thread.fpscr.val); 1461 _exception(SIGFPE, regs, code, regs->nip); 1462 return; 1463 } 1464 case -EFAULT: 1465 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1466 return; 1467 default: 1468 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1469 return; 1470 } 1471 #else 1472 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1473 #endif 1474 } 1475 #endif /* CONFIG_8xx */ 1476 1477 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1478 static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 1479 { 1480 int changed = 0; 1481 /* 1482 * Determine the cause of the debug event, clear the 1483 * event flags and send a trap to the handler. Torez 1484 */ 1485 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 1486 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1487 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1488 current->thread.dbcr2 &= ~DBCR2_DAC12MODE; 1489 #endif 1490 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 1491 5); 1492 changed |= 0x01; 1493 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 1494 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1495 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, 1496 6); 1497 changed |= 0x01; 1498 } else if (debug_status & DBSR_IAC1) { 1499 current->thread.dbcr0 &= ~DBCR0_IAC1; 1500 dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 1501 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1502 1); 1503 changed |= 0x01; 1504 } else if (debug_status & DBSR_IAC2) { 1505 current->thread.dbcr0 &= ~DBCR0_IAC2; 1506 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 1507 2); 1508 changed |= 0x01; 1509 } else if (debug_status & DBSR_IAC3) { 1510 current->thread.dbcr0 &= ~DBCR0_IAC3; 1511 dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 1512 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 1513 3); 1514 changed |= 0x01; 1515 } else if (debug_status & DBSR_IAC4) { 1516 current->thread.dbcr0 &= ~DBCR0_IAC4; 1517 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 1518 4); 1519 changed |= 0x01; 1520 } 1521 /* 1522 * At the point this routine was called, the MSR(DE) was turned off. 1523 * Check all other debug flags and see if that bit needs to be turned 1524 * back on or not. 1525 */ 1526 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1)) 1527 regs->msr |= MSR_DE; 1528 else 1529 /* Make sure the IDM flag is off */ 1530 current->thread.dbcr0 &= ~DBCR0_IDM; 1531 1532 if (changed & 0x01) 1533 mtspr(SPRN_DBCR0, current->thread.dbcr0); 1534 } 1535 1536 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) 1537 { 1538 current->thread.dbsr = debug_status; 1539 1540 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1541 * on server, it stops on the target of the branch. In order to simulate 1542 * the server behaviour, we thus restart right away with a single step 1543 * instead of stopping here when hitting a BT 1544 */ 1545 if (debug_status & DBSR_BT) { 1546 regs->msr &= ~MSR_DE; 1547 1548 /* Disable BT */ 1549 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1550 /* Clear the BT event */ 1551 mtspr(SPRN_DBSR, DBSR_BT); 1552 1553 /* Do the single step trick only when coming from userspace */ 1554 if (user_mode(regs)) { 1555 current->thread.dbcr0 &= ~DBCR0_BT; 1556 current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1557 regs->msr |= MSR_DE; 1558 return; 1559 } 1560 1561 if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1562 5, SIGTRAP) == NOTIFY_STOP) { 1563 return; 1564 } 1565 if (debugger_sstep(regs)) 1566 return; 1567 } else if (debug_status & DBSR_IC) { /* Instruction complete */ 1568 regs->msr &= ~MSR_DE; 1569 1570 /* Disable instruction completion */ 1571 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 1572 /* Clear the instruction completion event */ 1573 mtspr(SPRN_DBSR, DBSR_IC); 1574 1575 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 1576 5, SIGTRAP) == NOTIFY_STOP) { 1577 return; 1578 } 1579 1580 if (debugger_sstep(regs)) 1581 return; 1582 1583 if (user_mode(regs)) { 1584 current->thread.dbcr0 &= ~DBCR0_IC; 1585 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, 1586 current->thread.dbcr1)) 1587 regs->msr |= MSR_DE; 1588 else 1589 /* Make sure the IDM bit is off */ 1590 current->thread.dbcr0 &= ~DBCR0_IDM; 1591 } 1592 1593 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1594 } else 1595 handle_debug(regs, debug_status); 1596 } 1597 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1598 1599 #if !defined(CONFIG_TAU_INT) 1600 void TAUException(struct pt_regs *regs) 1601 { 1602 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 1603 regs->nip, regs->msr, regs->trap, print_tainted()); 1604 } 1605 #endif /* CONFIG_INT_TAU */ 1606 1607 #ifdef CONFIG_ALTIVEC 1608 void altivec_assist_exception(struct pt_regs *regs) 1609 { 1610 int err; 1611 1612 if (!user_mode(regs)) { 1613 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 1614 " at %lx\n", regs->nip); 1615 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 1616 } 1617 1618 flush_altivec_to_thread(current); 1619 1620 PPC_WARN_EMULATED(altivec, regs); 1621 err = emulate_altivec(regs); 1622 if (err == 0) { 1623 regs->nip += 4; /* skip emulated instruction */ 1624 emulate_single_step(regs); 1625 return; 1626 } 1627 1628 if (err == -EFAULT) { 1629 /* got an error reading the instruction */ 1630 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1631 } else { 1632 /* didn't recognize the instruction */ 1633 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1634 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 1635 "in %s at %lx\n", current->comm, regs->nip); 1636 current->thread.vscr.u[3] |= 0x10000; 1637 } 1638 } 1639 #endif /* CONFIG_ALTIVEC */ 1640 1641 #ifdef CONFIG_VSX 1642 void vsx_assist_exception(struct pt_regs *regs) 1643 { 1644 if (!user_mode(regs)) { 1645 printk(KERN_EMERG "VSX assist exception in kernel mode" 1646 " at %lx\n", regs->nip); 1647 die("Kernel VSX assist exception", regs, SIGILL); 1648 } 1649 1650 flush_vsx_to_thread(current); 1651 printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip); 1652 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1653 } 1654 #endif /* CONFIG_VSX */ 1655 1656 #ifdef CONFIG_FSL_BOOKE 1657 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1658 unsigned long error_code) 1659 { 1660 /* We treat cache locking instructions from the user 1661 * as priv ops, in the future we could try to do 1662 * something smarter 1663 */ 1664 if (error_code & (ESR_DLK|ESR_ILK)) 1665 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1666 return; 1667 } 1668 #endif /* CONFIG_FSL_BOOKE */ 1669 1670 #ifdef CONFIG_SPE 1671 void SPEFloatingPointException(struct pt_regs *regs) 1672 { 1673 extern int do_spe_mathemu(struct pt_regs *regs); 1674 unsigned long spefscr; 1675 int fpexc_mode; 1676 int code = 0; 1677 int err; 1678 1679 flush_spe_to_thread(current); 1680 1681 spefscr = current->thread.spefscr; 1682 fpexc_mode = current->thread.fpexc_mode; 1683 1684 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1685 code = FPE_FLTOVF; 1686 } 1687 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1688 code = FPE_FLTUND; 1689 } 1690 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1691 code = FPE_FLTDIV; 1692 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1693 code = FPE_FLTINV; 1694 } 1695 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1696 code = FPE_FLTRES; 1697 1698 err = do_spe_mathemu(regs); 1699 if (err == 0) { 1700 regs->nip += 4; /* skip emulated instruction */ 1701 emulate_single_step(regs); 1702 return; 1703 } 1704 1705 if (err == -EFAULT) { 1706 /* got an error reading the instruction */ 1707 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1708 } else if (err == -EINVAL) { 1709 /* didn't recognize the instruction */ 1710 printk(KERN_ERR "unrecognized spe instruction " 1711 "in %s at %lx\n", current->comm, regs->nip); 1712 } else { 1713 _exception(SIGFPE, regs, code, regs->nip); 1714 } 1715 1716 return; 1717 } 1718 1719 void SPEFloatingPointRoundException(struct pt_regs *regs) 1720 { 1721 extern int speround_handler(struct pt_regs *regs); 1722 int err; 1723 1724 preempt_disable(); 1725 if (regs->msr & MSR_SPE) 1726 giveup_spe(current); 1727 preempt_enable(); 1728 1729 regs->nip -= 4; 1730 err = speround_handler(regs); 1731 if (err == 0) { 1732 regs->nip += 4; /* skip emulated instruction */ 1733 emulate_single_step(regs); 1734 return; 1735 } 1736 1737 if (err == -EFAULT) { 1738 /* got an error reading the instruction */ 1739 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1740 } else if (err == -EINVAL) { 1741 /* didn't recognize the instruction */ 1742 printk(KERN_ERR "unrecognized spe instruction " 1743 "in %s at %lx\n", current->comm, regs->nip); 1744 } else { 1745 _exception(SIGFPE, regs, 0, regs->nip); 1746 return; 1747 } 1748 } 1749 #endif 1750 1751 /* 1752 * We enter here if we get an unrecoverable exception, that is, one 1753 * that happened at a point where the RI (recoverable interrupt) bit 1754 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1755 * we therefore lost state by taking this exception. 1756 */ 1757 void unrecoverable_exception(struct pt_regs *regs) 1758 { 1759 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1760 regs->trap, regs->nip); 1761 die("Unrecoverable exception", regs, SIGABRT); 1762 } 1763 1764 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x) 1765 /* 1766 * Default handler for a Watchdog exception, 1767 * spins until a reboot occurs 1768 */ 1769 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1770 { 1771 /* Generic WatchdogHandler, implement your own */ 1772 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1773 return; 1774 } 1775 1776 void WatchdogException(struct pt_regs *regs) 1777 { 1778 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1779 WatchdogHandler(regs); 1780 } 1781 #endif 1782 1783 /* 1784 * We enter here if we discover during exception entry that we are 1785 * running in supervisor mode with a userspace value in the stack pointer. 1786 */ 1787 void kernel_bad_stack(struct pt_regs *regs) 1788 { 1789 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1790 regs->gpr[1], regs->nip); 1791 die("Bad kernel stack pointer", regs, SIGABRT); 1792 } 1793 1794 void __init trap_init(void) 1795 { 1796 } 1797 1798 1799 #ifdef CONFIG_PPC_EMULATED_STATS 1800 1801 #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 1802 1803 struct ppc_emulated ppc_emulated = { 1804 #ifdef CONFIG_ALTIVEC 1805 WARN_EMULATED_SETUP(altivec), 1806 #endif 1807 WARN_EMULATED_SETUP(dcba), 1808 WARN_EMULATED_SETUP(dcbz), 1809 WARN_EMULATED_SETUP(fp_pair), 1810 WARN_EMULATED_SETUP(isel), 1811 WARN_EMULATED_SETUP(mcrxr), 1812 WARN_EMULATED_SETUP(mfpvr), 1813 WARN_EMULATED_SETUP(multiple), 1814 WARN_EMULATED_SETUP(popcntb), 1815 WARN_EMULATED_SETUP(spe), 1816 WARN_EMULATED_SETUP(string), 1817 WARN_EMULATED_SETUP(unaligned), 1818 #ifdef CONFIG_MATH_EMULATION 1819 WARN_EMULATED_SETUP(math), 1820 #endif 1821 #ifdef CONFIG_VSX 1822 WARN_EMULATED_SETUP(vsx), 1823 #endif 1824 #ifdef CONFIG_PPC64 1825 WARN_EMULATED_SETUP(mfdscr), 1826 WARN_EMULATED_SETUP(mtdscr), 1827 #endif 1828 }; 1829 1830 u32 ppc_warn_emulated; 1831 1832 void ppc_warn_emulated_print(const char *type) 1833 { 1834 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 1835 type); 1836 } 1837 1838 static int __init ppc_warn_emulated_init(void) 1839 { 1840 struct dentry *dir, *d; 1841 unsigned int i; 1842 struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 1843 1844 if (!powerpc_debugfs_root) 1845 return -ENODEV; 1846 1847 dir = debugfs_create_dir("emulated_instructions", 1848 powerpc_debugfs_root); 1849 if (!dir) 1850 return -ENOMEM; 1851 1852 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, 1853 &ppc_warn_emulated); 1854 if (!d) 1855 goto fail; 1856 1857 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 1858 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, 1859 (u32 *)&entries[i].val.counter); 1860 if (!d) 1861 goto fail; 1862 } 1863 1864 return 0; 1865 1866 fail: 1867 debugfs_remove_recursive(dir); 1868 return -ENOMEM; 1869 } 1870 1871 device_initcall(ppc_warn_emulated_init); 1872 1873 #endif /* CONFIG_PPC_EMULATED_STATS */ 1874