1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright 2007-2010 Freescale Semiconductor, Inc. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * Modified by Cort Dougan (cort@cs.nmt.edu) 11 * and Paul Mackerras (paulus@samba.org) 12 */ 13 14 /* 15 * This file handles the architecture-dependent parts of hardware exceptions 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/sched.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/user.h> 26 #include <linux/interrupt.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/prctl.h> 30 #include <linux/delay.h> 31 #include <linux/kprobes.h> 32 #include <linux/kexec.h> 33 #include <linux/backlight.h> 34 #include <linux/bug.h> 35 #include <linux/kdebug.h> 36 #include <linux/debugfs.h> 37 #include <linux/ratelimit.h> 38 39 #include <asm/emulated_ops.h> 40 #include <asm/pgtable.h> 41 #include <asm/uaccess.h> 42 #include <asm/io.h> 43 #include <asm/machdep.h> 44 #include <asm/rtas.h> 45 #include <asm/pmc.h> 46 #ifdef CONFIG_PPC32 47 #include <asm/reg.h> 48 #endif 49 #ifdef CONFIG_PMAC_BACKLIGHT 50 #include <asm/backlight.h> 51 #endif 52 #ifdef CONFIG_PPC64 53 #include <asm/firmware.h> 54 #include <asm/processor.h> 55 #endif 56 #include <asm/kexec.h> 57 #include <asm/ppc-opcode.h> 58 #include <asm/rio.h> 59 #include <asm/fadump.h> 60 #include <asm/switch_to.h> 61 #include <asm/debug.h> 62 63 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 64 int (*__debugger)(struct pt_regs *regs) __read_mostly; 65 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 66 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 67 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 68 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 69 int (*__debugger_dabr_match)(struct pt_regs *regs) __read_mostly; 70 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 71 72 EXPORT_SYMBOL(__debugger); 73 EXPORT_SYMBOL(__debugger_ipi); 74 EXPORT_SYMBOL(__debugger_bpt); 75 EXPORT_SYMBOL(__debugger_sstep); 76 EXPORT_SYMBOL(__debugger_iabr_match); 77 EXPORT_SYMBOL(__debugger_dabr_match); 78 EXPORT_SYMBOL(__debugger_fault_handler); 79 #endif 80 81 /* 82 * Trap & Exception support 83 */ 84 85 #ifdef CONFIG_PMAC_BACKLIGHT 86 static void pmac_backlight_unblank(void) 87 { 88 mutex_lock(&pmac_backlight_mutex); 89 if (pmac_backlight) { 90 struct backlight_properties *props; 91 92 props = &pmac_backlight->props; 93 props->brightness = props->max_brightness; 94 props->power = FB_BLANK_UNBLANK; 95 backlight_update_status(pmac_backlight); 96 } 97 mutex_unlock(&pmac_backlight_mutex); 98 } 99 #else 100 static inline void pmac_backlight_unblank(void) { } 101 #endif 102 103 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 104 static int die_owner = -1; 105 static unsigned int die_nest_count; 106 static int die_counter; 107 108 static unsigned __kprobes long oops_begin(struct pt_regs *regs) 109 { 110 int cpu; 111 unsigned long flags; 112 113 if (debugger(regs)) 114 return 1; 115 116 oops_enter(); 117 118 /* racy, but better than risking deadlock. */ 119 raw_local_irq_save(flags); 120 cpu = smp_processor_id(); 121 if (!arch_spin_trylock(&die_lock)) { 122 if (cpu == die_owner) 123 /* nested oops. should stop eventually */; 124 else 125 arch_spin_lock(&die_lock); 126 } 127 die_nest_count++; 128 die_owner = cpu; 129 console_verbose(); 130 bust_spinlocks(1); 131 if (machine_is(powermac)) 132 pmac_backlight_unblank(); 133 return flags; 134 } 135 136 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, 137 int signr) 138 { 139 bust_spinlocks(0); 140 die_owner = -1; 141 add_taint(TAINT_DIE); 142 die_nest_count--; 143 oops_exit(); 144 printk("\n"); 145 if (!die_nest_count) 146 /* Nest count reaches zero, release the lock. */ 147 arch_spin_unlock(&die_lock); 148 raw_local_irq_restore(flags); 149 150 crash_fadump(regs, "die oops"); 151 152 /* 153 * A system reset (0x100) is a request to dump, so we always send 154 * it through the crashdump code. 155 */ 156 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) { 157 crash_kexec(regs); 158 159 /* 160 * We aren't the primary crash CPU. We need to send it 161 * to a holding pattern to avoid it ending up in the panic 162 * code. 163 */ 164 crash_kexec_secondary(regs); 165 } 166 167 if (!signr) 168 return; 169 170 /* 171 * While our oops output is serialised by a spinlock, output 172 * from panic() called below can race and corrupt it. If we 173 * know we are going to panic, delay for 1 second so we have a 174 * chance to get clean backtraces from all CPUs that are oopsing. 175 */ 176 if (in_interrupt() || panic_on_oops || !current->pid || 177 is_global_init(current)) { 178 mdelay(MSEC_PER_SEC); 179 } 180 181 if (in_interrupt()) 182 panic("Fatal exception in interrupt"); 183 if (panic_on_oops) 184 panic("Fatal exception"); 185 do_exit(signr); 186 } 187 188 static int __kprobes __die(const char *str, struct pt_regs *regs, long err) 189 { 190 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 191 #ifdef CONFIG_PREEMPT 192 printk("PREEMPT "); 193 #endif 194 #ifdef CONFIG_SMP 195 printk("SMP NR_CPUS=%d ", NR_CPUS); 196 #endif 197 #ifdef CONFIG_DEBUG_PAGEALLOC 198 printk("DEBUG_PAGEALLOC "); 199 #endif 200 #ifdef CONFIG_NUMA 201 printk("NUMA "); 202 #endif 203 printk("%s\n", ppc_md.name ? ppc_md.name : ""); 204 205 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) 206 return 1; 207 208 print_modules(); 209 show_regs(regs); 210 211 return 0; 212 } 213 214 void die(const char *str, struct pt_regs *regs, long err) 215 { 216 unsigned long flags = oops_begin(regs); 217 218 if (__die(str, regs, err)) 219 err = 0; 220 oops_end(flags, regs, err); 221 } 222 223 void user_single_step_siginfo(struct task_struct *tsk, 224 struct pt_regs *regs, siginfo_t *info) 225 { 226 memset(info, 0, sizeof(*info)); 227 info->si_signo = SIGTRAP; 228 info->si_code = TRAP_TRACE; 229 info->si_addr = (void __user *)regs->nip; 230 } 231 232 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 233 { 234 siginfo_t info; 235 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 236 "at %08lx nip %08lx lr %08lx code %x\n"; 237 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 238 "at %016lx nip %016lx lr %016lx code %x\n"; 239 240 if (!user_mode(regs)) { 241 die("Exception in kernel mode", regs, signr); 242 return; 243 } 244 245 if (show_unhandled_signals && unhandled_signal(current, signr)) { 246 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 247 current->comm, current->pid, signr, 248 addr, regs->nip, regs->link, code); 249 } 250 251 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 252 local_irq_enable(); 253 254 current->thread.trap_nr = code; 255 memset(&info, 0, sizeof(info)); 256 info.si_signo = signr; 257 info.si_code = code; 258 info.si_addr = (void __user *) addr; 259 force_sig_info(signr, &info, current); 260 } 261 262 #ifdef CONFIG_PPC64 263 void system_reset_exception(struct pt_regs *regs) 264 { 265 /* See if any machine dependent calls */ 266 if (ppc_md.system_reset_exception) { 267 if (ppc_md.system_reset_exception(regs)) 268 return; 269 } 270 271 die("System Reset", regs, SIGABRT); 272 273 /* Must die if the interrupt is not recoverable */ 274 if (!(regs->msr & MSR_RI)) 275 panic("Unrecoverable System Reset"); 276 277 /* What should we do here? We could issue a shutdown or hard reset. */ 278 } 279 #endif 280 281 /* 282 * I/O accesses can cause machine checks on powermacs. 283 * Check if the NIP corresponds to the address of a sync 284 * instruction for which there is an entry in the exception 285 * table. 286 * Note that the 601 only takes a machine check on TEA 287 * (transfer error ack) signal assertion, and does not 288 * set any of the top 16 bits of SRR1. 289 * -- paulus. 290 */ 291 static inline int check_io_access(struct pt_regs *regs) 292 { 293 #ifdef CONFIG_PPC32 294 unsigned long msr = regs->msr; 295 const struct exception_table_entry *entry; 296 unsigned int *nip = (unsigned int *)regs->nip; 297 298 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 299 && (entry = search_exception_tables(regs->nip)) != NULL) { 300 /* 301 * Check that it's a sync instruction, or somewhere 302 * in the twi; isync; nop sequence that inb/inw/inl uses. 303 * As the address is in the exception table 304 * we should be able to read the instr there. 305 * For the debug message, we look at the preceding 306 * load or store. 307 */ 308 if (*nip == 0x60000000) /* nop */ 309 nip -= 2; 310 else if (*nip == 0x4c00012c) /* isync */ 311 --nip; 312 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { 313 /* sync or twi */ 314 unsigned int rb; 315 316 --nip; 317 rb = (*nip >> 11) & 0x1f; 318 printk(KERN_DEBUG "%s bad port %lx at %p\n", 319 (*nip & 0x100)? "OUT to": "IN from", 320 regs->gpr[rb] - _IO_BASE, nip); 321 regs->msr |= MSR_RI; 322 regs->nip = entry->fixup; 323 return 1; 324 } 325 } 326 #endif /* CONFIG_PPC32 */ 327 return 0; 328 } 329 330 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 331 /* On 4xx, the reason for the machine check or program exception 332 is in the ESR. */ 333 #define get_reason(regs) ((regs)->dsisr) 334 #ifndef CONFIG_FSL_BOOKE 335 #define get_mc_reason(regs) ((regs)->dsisr) 336 #else 337 #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 338 #endif 339 #define REASON_FP ESR_FP 340 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 341 #define REASON_PRIVILEGED ESR_PPR 342 #define REASON_TRAP ESR_PTR 343 344 /* single-step stuff */ 345 #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) 346 #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) 347 348 #else 349 /* On non-4xx, the reason for the machine check or program 350 exception is in the MSR. */ 351 #define get_reason(regs) ((regs)->msr) 352 #define get_mc_reason(regs) ((regs)->msr) 353 #define REASON_FP 0x100000 354 #define REASON_ILLEGAL 0x80000 355 #define REASON_PRIVILEGED 0x40000 356 #define REASON_TRAP 0x20000 357 358 #define single_stepping(regs) ((regs)->msr & MSR_SE) 359 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 360 #endif 361 362 #if defined(CONFIG_4xx) 363 int machine_check_4xx(struct pt_regs *regs) 364 { 365 unsigned long reason = get_mc_reason(regs); 366 367 if (reason & ESR_IMCP) { 368 printk("Instruction"); 369 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 370 } else 371 printk("Data"); 372 printk(" machine check in kernel mode.\n"); 373 374 return 0; 375 } 376 377 int machine_check_440A(struct pt_regs *regs) 378 { 379 unsigned long reason = get_mc_reason(regs); 380 381 printk("Machine check in kernel mode.\n"); 382 if (reason & ESR_IMCP){ 383 printk("Instruction Synchronous Machine Check exception\n"); 384 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 385 } 386 else { 387 u32 mcsr = mfspr(SPRN_MCSR); 388 if (mcsr & MCSR_IB) 389 printk("Instruction Read PLB Error\n"); 390 if (mcsr & MCSR_DRB) 391 printk("Data Read PLB Error\n"); 392 if (mcsr & MCSR_DWB) 393 printk("Data Write PLB Error\n"); 394 if (mcsr & MCSR_TLBP) 395 printk("TLB Parity Error\n"); 396 if (mcsr & MCSR_ICP){ 397 flush_instruction_cache(); 398 printk("I-Cache Parity Error\n"); 399 } 400 if (mcsr & MCSR_DCSP) 401 printk("D-Cache Search Parity Error\n"); 402 if (mcsr & MCSR_DCFP) 403 printk("D-Cache Flush Parity Error\n"); 404 if (mcsr & MCSR_IMPE) 405 printk("Machine Check exception is imprecise\n"); 406 407 /* Clear MCSR */ 408 mtspr(SPRN_MCSR, mcsr); 409 } 410 return 0; 411 } 412 413 int machine_check_47x(struct pt_regs *regs) 414 { 415 unsigned long reason = get_mc_reason(regs); 416 u32 mcsr; 417 418 printk(KERN_ERR "Machine check in kernel mode.\n"); 419 if (reason & ESR_IMCP) { 420 printk(KERN_ERR 421 "Instruction Synchronous Machine Check exception\n"); 422 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 423 return 0; 424 } 425 mcsr = mfspr(SPRN_MCSR); 426 if (mcsr & MCSR_IB) 427 printk(KERN_ERR "Instruction Read PLB Error\n"); 428 if (mcsr & MCSR_DRB) 429 printk(KERN_ERR "Data Read PLB Error\n"); 430 if (mcsr & MCSR_DWB) 431 printk(KERN_ERR "Data Write PLB Error\n"); 432 if (mcsr & MCSR_TLBP) 433 printk(KERN_ERR "TLB Parity Error\n"); 434 if (mcsr & MCSR_ICP) { 435 flush_instruction_cache(); 436 printk(KERN_ERR "I-Cache Parity Error\n"); 437 } 438 if (mcsr & MCSR_DCSP) 439 printk(KERN_ERR "D-Cache Search Parity Error\n"); 440 if (mcsr & PPC47x_MCSR_GPR) 441 printk(KERN_ERR "GPR Parity Error\n"); 442 if (mcsr & PPC47x_MCSR_FPR) 443 printk(KERN_ERR "FPR Parity Error\n"); 444 if (mcsr & PPC47x_MCSR_IPR) 445 printk(KERN_ERR "Machine Check exception is imprecise\n"); 446 447 /* Clear MCSR */ 448 mtspr(SPRN_MCSR, mcsr); 449 450 return 0; 451 } 452 #elif defined(CONFIG_E500) 453 int machine_check_e500mc(struct pt_regs *regs) 454 { 455 unsigned long mcsr = mfspr(SPRN_MCSR); 456 unsigned long reason = mcsr; 457 int recoverable = 1; 458 459 if (reason & MCSR_LD) { 460 recoverable = fsl_rio_mcheck_exception(regs); 461 if (recoverable == 1) 462 goto silent_out; 463 } 464 465 printk("Machine check in kernel mode.\n"); 466 printk("Caused by (from MCSR=%lx): ", reason); 467 468 if (reason & MCSR_MCP) 469 printk("Machine Check Signal\n"); 470 471 if (reason & MCSR_ICPERR) { 472 printk("Instruction Cache Parity Error\n"); 473 474 /* 475 * This is recoverable by invalidating the i-cache. 476 */ 477 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 478 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 479 ; 480 481 /* 482 * This will generally be accompanied by an instruction 483 * fetch error report -- only treat MCSR_IF as fatal 484 * if it wasn't due to an L1 parity error. 485 */ 486 reason &= ~MCSR_IF; 487 } 488 489 if (reason & MCSR_DCPERR_MC) { 490 printk("Data Cache Parity Error\n"); 491 492 /* 493 * In write shadow mode we auto-recover from the error, but it 494 * may still get logged and cause a machine check. We should 495 * only treat the non-write shadow case as non-recoverable. 496 */ 497 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) 498 recoverable = 0; 499 } 500 501 if (reason & MCSR_L2MMU_MHIT) { 502 printk("Hit on multiple TLB entries\n"); 503 recoverable = 0; 504 } 505 506 if (reason & MCSR_NMI) 507 printk("Non-maskable interrupt\n"); 508 509 if (reason & MCSR_IF) { 510 printk("Instruction Fetch Error Report\n"); 511 recoverable = 0; 512 } 513 514 if (reason & MCSR_LD) { 515 printk("Load Error Report\n"); 516 recoverable = 0; 517 } 518 519 if (reason & MCSR_ST) { 520 printk("Store Error Report\n"); 521 recoverable = 0; 522 } 523 524 if (reason & MCSR_LDG) { 525 printk("Guarded Load Error Report\n"); 526 recoverable = 0; 527 } 528 529 if (reason & MCSR_TLBSYNC) 530 printk("Simultaneous tlbsync operations\n"); 531 532 if (reason & MCSR_BSL2_ERR) { 533 printk("Level 2 Cache Error\n"); 534 recoverable = 0; 535 } 536 537 if (reason & MCSR_MAV) { 538 u64 addr; 539 540 addr = mfspr(SPRN_MCAR); 541 addr |= (u64)mfspr(SPRN_MCARU) << 32; 542 543 printk("Machine Check %s Address: %#llx\n", 544 reason & MCSR_MEA ? "Effective" : "Physical", addr); 545 } 546 547 silent_out: 548 mtspr(SPRN_MCSR, mcsr); 549 return mfspr(SPRN_MCSR) == 0 && recoverable; 550 } 551 552 int machine_check_e500(struct pt_regs *regs) 553 { 554 unsigned long reason = get_mc_reason(regs); 555 556 if (reason & MCSR_BUS_RBERR) { 557 if (fsl_rio_mcheck_exception(regs)) 558 return 1; 559 } 560 561 printk("Machine check in kernel mode.\n"); 562 printk("Caused by (from MCSR=%lx): ", reason); 563 564 if (reason & MCSR_MCP) 565 printk("Machine Check Signal\n"); 566 if (reason & MCSR_ICPERR) 567 printk("Instruction Cache Parity Error\n"); 568 if (reason & MCSR_DCP_PERR) 569 printk("Data Cache Push Parity Error\n"); 570 if (reason & MCSR_DCPERR) 571 printk("Data Cache Parity Error\n"); 572 if (reason & MCSR_BUS_IAERR) 573 printk("Bus - Instruction Address Error\n"); 574 if (reason & MCSR_BUS_RAERR) 575 printk("Bus - Read Address Error\n"); 576 if (reason & MCSR_BUS_WAERR) 577 printk("Bus - Write Address Error\n"); 578 if (reason & MCSR_BUS_IBERR) 579 printk("Bus - Instruction Data Error\n"); 580 if (reason & MCSR_BUS_RBERR) 581 printk("Bus - Read Data Bus Error\n"); 582 if (reason & MCSR_BUS_WBERR) 583 printk("Bus - Read Data Bus Error\n"); 584 if (reason & MCSR_BUS_IPERR) 585 printk("Bus - Instruction Parity Error\n"); 586 if (reason & MCSR_BUS_RPERR) 587 printk("Bus - Read Parity Error\n"); 588 589 return 0; 590 } 591 592 int machine_check_generic(struct pt_regs *regs) 593 { 594 return 0; 595 } 596 #elif defined(CONFIG_E200) 597 int machine_check_e200(struct pt_regs *regs) 598 { 599 unsigned long reason = get_mc_reason(regs); 600 601 printk("Machine check in kernel mode.\n"); 602 printk("Caused by (from MCSR=%lx): ", reason); 603 604 if (reason & MCSR_MCP) 605 printk("Machine Check Signal\n"); 606 if (reason & MCSR_CP_PERR) 607 printk("Cache Push Parity Error\n"); 608 if (reason & MCSR_CPERR) 609 printk("Cache Parity Error\n"); 610 if (reason & MCSR_EXCP_ERR) 611 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 612 if (reason & MCSR_BUS_IRERR) 613 printk("Bus - Read Bus Error on instruction fetch\n"); 614 if (reason & MCSR_BUS_DRERR) 615 printk("Bus - Read Bus Error on data load\n"); 616 if (reason & MCSR_BUS_WRERR) 617 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 618 619 return 0; 620 } 621 #else 622 int machine_check_generic(struct pt_regs *regs) 623 { 624 unsigned long reason = get_mc_reason(regs); 625 626 printk("Machine check in kernel mode.\n"); 627 printk("Caused by (from SRR1=%lx): ", reason); 628 switch (reason & 0x601F0000) { 629 case 0x80000: 630 printk("Machine check signal\n"); 631 break; 632 case 0: /* for 601 */ 633 case 0x40000: 634 case 0x140000: /* 7450 MSS error and TEA */ 635 printk("Transfer error ack signal\n"); 636 break; 637 case 0x20000: 638 printk("Data parity error signal\n"); 639 break; 640 case 0x10000: 641 printk("Address parity error signal\n"); 642 break; 643 case 0x20000000: 644 printk("L1 Data Cache error\n"); 645 break; 646 case 0x40000000: 647 printk("L1 Instruction Cache error\n"); 648 break; 649 case 0x00100000: 650 printk("L2 data cache parity error\n"); 651 break; 652 default: 653 printk("Unknown values in msr\n"); 654 } 655 return 0; 656 } 657 #endif /* everything else */ 658 659 void machine_check_exception(struct pt_regs *regs) 660 { 661 int recover = 0; 662 663 __get_cpu_var(irq_stat).mce_exceptions++; 664 665 /* See if any machine dependent calls. In theory, we would want 666 * to call the CPU first, and call the ppc_md. one if the CPU 667 * one returns a positive number. However there is existing code 668 * that assumes the board gets a first chance, so let's keep it 669 * that way for now and fix things later. --BenH. 670 */ 671 if (ppc_md.machine_check_exception) 672 recover = ppc_md.machine_check_exception(regs); 673 else if (cur_cpu_spec->machine_check) 674 recover = cur_cpu_spec->machine_check(regs); 675 676 if (recover > 0) 677 return; 678 679 #if defined(CONFIG_8xx) && defined(CONFIG_PCI) 680 /* the qspan pci read routines can cause machine checks -- Cort 681 * 682 * yuck !!! that totally needs to go away ! There are better ways 683 * to deal with that than having a wart in the mcheck handler. 684 * -- BenH 685 */ 686 bad_page_fault(regs, regs->dar, SIGBUS); 687 return; 688 #endif 689 690 if (debugger_fault_handler(regs)) 691 return; 692 693 if (check_io_access(regs)) 694 return; 695 696 die("Machine check", regs, SIGBUS); 697 698 /* Must die if the interrupt is not recoverable */ 699 if (!(regs->msr & MSR_RI)) 700 panic("Unrecoverable Machine check"); 701 } 702 703 void SMIException(struct pt_regs *regs) 704 { 705 die("System Management Interrupt", regs, SIGABRT); 706 } 707 708 void unknown_exception(struct pt_regs *regs) 709 { 710 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 711 regs->nip, regs->msr, regs->trap); 712 713 _exception(SIGTRAP, regs, 0, 0); 714 } 715 716 void instruction_breakpoint_exception(struct pt_regs *regs) 717 { 718 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 719 5, SIGTRAP) == NOTIFY_STOP) 720 return; 721 if (debugger_iabr_match(regs)) 722 return; 723 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 724 } 725 726 void RunModeException(struct pt_regs *regs) 727 { 728 _exception(SIGTRAP, regs, 0, 0); 729 } 730 731 void __kprobes single_step_exception(struct pt_regs *regs) 732 { 733 clear_single_step(regs); 734 735 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 736 5, SIGTRAP) == NOTIFY_STOP) 737 return; 738 if (debugger_sstep(regs)) 739 return; 740 741 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 742 } 743 744 /* 745 * After we have successfully emulated an instruction, we have to 746 * check if the instruction was being single-stepped, and if so, 747 * pretend we got a single-step exception. This was pointed out 748 * by Kumar Gala. -- paulus 749 */ 750 static void emulate_single_step(struct pt_regs *regs) 751 { 752 if (single_stepping(regs)) 753 single_step_exception(regs); 754 } 755 756 static inline int __parse_fpscr(unsigned long fpscr) 757 { 758 int ret = 0; 759 760 /* Invalid operation */ 761 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 762 ret = FPE_FLTINV; 763 764 /* Overflow */ 765 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 766 ret = FPE_FLTOVF; 767 768 /* Underflow */ 769 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 770 ret = FPE_FLTUND; 771 772 /* Divide by zero */ 773 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 774 ret = FPE_FLTDIV; 775 776 /* Inexact result */ 777 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 778 ret = FPE_FLTRES; 779 780 return ret; 781 } 782 783 static void parse_fpe(struct pt_regs *regs) 784 { 785 int code = 0; 786 787 flush_fp_to_thread(current); 788 789 code = __parse_fpscr(current->thread.fpscr.val); 790 791 _exception(SIGFPE, regs, code, regs->nip); 792 } 793 794 /* 795 * Illegal instruction emulation support. Originally written to 796 * provide the PVR to user applications using the mfspr rd, PVR. 797 * Return non-zero if we can't emulate, or -EFAULT if the associated 798 * memory access caused an access fault. Return zero on success. 799 * 800 * There are a couple of ways to do this, either "decode" the instruction 801 * or directly match lots of bits. In this case, matching lots of 802 * bits is faster and easier. 803 * 804 */ 805 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 806 { 807 u8 rT = (instword >> 21) & 0x1f; 808 u8 rA = (instword >> 16) & 0x1f; 809 u8 NB_RB = (instword >> 11) & 0x1f; 810 u32 num_bytes; 811 unsigned long EA; 812 int pos = 0; 813 814 /* Early out if we are an invalid form of lswx */ 815 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 816 if ((rT == rA) || (rT == NB_RB)) 817 return -EINVAL; 818 819 EA = (rA == 0) ? 0 : regs->gpr[rA]; 820 821 switch (instword & PPC_INST_STRING_MASK) { 822 case PPC_INST_LSWX: 823 case PPC_INST_STSWX: 824 EA += NB_RB; 825 num_bytes = regs->xer & 0x7f; 826 break; 827 case PPC_INST_LSWI: 828 case PPC_INST_STSWI: 829 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 830 break; 831 default: 832 return -EINVAL; 833 } 834 835 while (num_bytes != 0) 836 { 837 u8 val; 838 u32 shift = 8 * (3 - (pos & 0x3)); 839 840 switch ((instword & PPC_INST_STRING_MASK)) { 841 case PPC_INST_LSWX: 842 case PPC_INST_LSWI: 843 if (get_user(val, (u8 __user *)EA)) 844 return -EFAULT; 845 /* first time updating this reg, 846 * zero it out */ 847 if (pos == 0) 848 regs->gpr[rT] = 0; 849 regs->gpr[rT] |= val << shift; 850 break; 851 case PPC_INST_STSWI: 852 case PPC_INST_STSWX: 853 val = regs->gpr[rT] >> shift; 854 if (put_user(val, (u8 __user *)EA)) 855 return -EFAULT; 856 break; 857 } 858 /* move EA to next address */ 859 EA += 1; 860 num_bytes--; 861 862 /* manage our position within the register */ 863 if (++pos == 4) { 864 pos = 0; 865 if (++rT == 32) 866 rT = 0; 867 } 868 } 869 870 return 0; 871 } 872 873 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 874 { 875 u32 ra,rs; 876 unsigned long tmp; 877 878 ra = (instword >> 16) & 0x1f; 879 rs = (instword >> 21) & 0x1f; 880 881 tmp = regs->gpr[rs]; 882 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 883 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 884 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 885 regs->gpr[ra] = tmp; 886 887 return 0; 888 } 889 890 static int emulate_isel(struct pt_regs *regs, u32 instword) 891 { 892 u8 rT = (instword >> 21) & 0x1f; 893 u8 rA = (instword >> 16) & 0x1f; 894 u8 rB = (instword >> 11) & 0x1f; 895 u8 BC = (instword >> 6) & 0x1f; 896 u8 bit; 897 unsigned long tmp; 898 899 tmp = (rA == 0) ? 0 : regs->gpr[rA]; 900 bit = (regs->ccr >> (31 - BC)) & 0x1; 901 902 regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 903 904 return 0; 905 } 906 907 static int emulate_instruction(struct pt_regs *regs) 908 { 909 u32 instword; 910 u32 rd; 911 912 if (!user_mode(regs) || (regs->msr & MSR_LE)) 913 return -EINVAL; 914 CHECK_FULL_REGS(regs); 915 916 if (get_user(instword, (u32 __user *)(regs->nip))) 917 return -EFAULT; 918 919 /* Emulate the mfspr rD, PVR. */ 920 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 921 PPC_WARN_EMULATED(mfpvr, regs); 922 rd = (instword >> 21) & 0x1f; 923 regs->gpr[rd] = mfspr(SPRN_PVR); 924 return 0; 925 } 926 927 /* Emulating the dcba insn is just a no-op. */ 928 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 929 PPC_WARN_EMULATED(dcba, regs); 930 return 0; 931 } 932 933 /* Emulate the mcrxr insn. */ 934 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 935 int shift = (instword >> 21) & 0x1c; 936 unsigned long msk = 0xf0000000UL >> shift; 937 938 PPC_WARN_EMULATED(mcrxr, regs); 939 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 940 regs->xer &= ~0xf0000000UL; 941 return 0; 942 } 943 944 /* Emulate load/store string insn. */ 945 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 946 PPC_WARN_EMULATED(string, regs); 947 return emulate_string_inst(regs, instword); 948 } 949 950 /* Emulate the popcntb (Population Count Bytes) instruction. */ 951 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 952 PPC_WARN_EMULATED(popcntb, regs); 953 return emulate_popcntb_inst(regs, instword); 954 } 955 956 /* Emulate isel (Integer Select) instruction */ 957 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 958 PPC_WARN_EMULATED(isel, regs); 959 return emulate_isel(regs, instword); 960 } 961 962 #ifdef CONFIG_PPC64 963 /* Emulate the mfspr rD, DSCR. */ 964 if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) && 965 cpu_has_feature(CPU_FTR_DSCR)) { 966 PPC_WARN_EMULATED(mfdscr, regs); 967 rd = (instword >> 21) & 0x1f; 968 regs->gpr[rd] = mfspr(SPRN_DSCR); 969 return 0; 970 } 971 /* Emulate the mtspr DSCR, rD. */ 972 if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) && 973 cpu_has_feature(CPU_FTR_DSCR)) { 974 PPC_WARN_EMULATED(mtdscr, regs); 975 rd = (instword >> 21) & 0x1f; 976 current->thread.dscr = regs->gpr[rd]; 977 current->thread.dscr_inherit = 1; 978 mtspr(SPRN_DSCR, current->thread.dscr); 979 return 0; 980 } 981 #endif 982 983 return -EINVAL; 984 } 985 986 int is_valid_bugaddr(unsigned long addr) 987 { 988 return is_kernel_addr(addr); 989 } 990 991 void __kprobes program_check_exception(struct pt_regs *regs) 992 { 993 unsigned int reason = get_reason(regs); 994 extern int do_mathemu(struct pt_regs *regs); 995 996 /* We can now get here via a FP Unavailable exception if the core 997 * has no FPU, in that case the reason flags will be 0 */ 998 999 if (reason & REASON_FP) { 1000 /* IEEE FP exception */ 1001 parse_fpe(regs); 1002 return; 1003 } 1004 if (reason & REASON_TRAP) { 1005 /* Debugger is first in line to stop recursive faults in 1006 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1007 if (debugger_bpt(regs)) 1008 return; 1009 1010 /* trap exception */ 1011 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1012 == NOTIFY_STOP) 1013 return; 1014 1015 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1016 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1017 regs->nip += 4; 1018 return; 1019 } 1020 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1021 return; 1022 } 1023 1024 /* We restore the interrupt state now */ 1025 if (!arch_irq_disabled_regs(regs)) 1026 local_irq_enable(); 1027 1028 #ifdef CONFIG_MATH_EMULATION 1029 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 1030 * but there seems to be a hardware bug on the 405GP (RevD) 1031 * that means ESR is sometimes set incorrectly - either to 1032 * ESR_DST (!?) or 0. In the process of chasing this with the 1033 * hardware people - not sure if it can happen on any illegal 1034 * instruction or only on FP instructions, whether there is a 1035 * pattern to occurrences etc. -dgibson 31/Mar/2003 */ 1036 switch (do_mathemu(regs)) { 1037 case 0: 1038 emulate_single_step(regs); 1039 return; 1040 case 1: { 1041 int code = 0; 1042 code = __parse_fpscr(current->thread.fpscr.val); 1043 _exception(SIGFPE, regs, code, regs->nip); 1044 return; 1045 } 1046 case -EFAULT: 1047 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1048 return; 1049 } 1050 /* fall through on any other errors */ 1051 #endif /* CONFIG_MATH_EMULATION */ 1052 1053 /* Try to emulate it if we should. */ 1054 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 1055 switch (emulate_instruction(regs)) { 1056 case 0: 1057 regs->nip += 4; 1058 emulate_single_step(regs); 1059 return; 1060 case -EFAULT: 1061 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1062 return; 1063 } 1064 } 1065 1066 if (reason & REASON_PRIVILEGED) 1067 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1068 else 1069 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1070 } 1071 1072 void alignment_exception(struct pt_regs *regs) 1073 { 1074 int sig, code, fixed = 0; 1075 1076 /* We restore the interrupt state now */ 1077 if (!arch_irq_disabled_regs(regs)) 1078 local_irq_enable(); 1079 1080 /* we don't implement logging of alignment exceptions */ 1081 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1082 fixed = fix_alignment(regs); 1083 1084 if (fixed == 1) { 1085 regs->nip += 4; /* skip over emulated instruction */ 1086 emulate_single_step(regs); 1087 return; 1088 } 1089 1090 /* Operand address was bad */ 1091 if (fixed == -EFAULT) { 1092 sig = SIGSEGV; 1093 code = SEGV_ACCERR; 1094 } else { 1095 sig = SIGBUS; 1096 code = BUS_ADRALN; 1097 } 1098 if (user_mode(regs)) 1099 _exception(sig, regs, code, regs->dar); 1100 else 1101 bad_page_fault(regs, regs->dar, sig); 1102 } 1103 1104 void StackOverflow(struct pt_regs *regs) 1105 { 1106 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 1107 current, regs->gpr[1]); 1108 debugger(regs); 1109 show_regs(regs); 1110 panic("kernel stack overflow"); 1111 } 1112 1113 void nonrecoverable_exception(struct pt_regs *regs) 1114 { 1115 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 1116 regs->nip, regs->msr); 1117 debugger(regs); 1118 die("nonrecoverable exception", regs, SIGKILL); 1119 } 1120 1121 void trace_syscall(struct pt_regs *regs) 1122 { 1123 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", 1124 current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], 1125 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); 1126 } 1127 1128 void kernel_fp_unavailable_exception(struct pt_regs *regs) 1129 { 1130 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1131 "%lx at %lx\n", regs->trap, regs->nip); 1132 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1133 } 1134 1135 void altivec_unavailable_exception(struct pt_regs *regs) 1136 { 1137 if (user_mode(regs)) { 1138 /* A user program has executed an altivec instruction, 1139 but this kernel doesn't support altivec. */ 1140 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1141 return; 1142 } 1143 1144 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1145 "%lx at %lx\n", regs->trap, regs->nip); 1146 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1147 } 1148 1149 void vsx_unavailable_exception(struct pt_regs *regs) 1150 { 1151 if (user_mode(regs)) { 1152 /* A user program has executed an vsx instruction, 1153 but this kernel doesn't support vsx. */ 1154 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1155 return; 1156 } 1157 1158 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1159 "%lx at %lx\n", regs->trap, regs->nip); 1160 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1161 } 1162 1163 void performance_monitor_exception(struct pt_regs *regs) 1164 { 1165 __get_cpu_var(irq_stat).pmu_irqs++; 1166 1167 perf_irq(regs); 1168 } 1169 1170 #ifdef CONFIG_8xx 1171 void SoftwareEmulation(struct pt_regs *regs) 1172 { 1173 extern int do_mathemu(struct pt_regs *); 1174 extern int Soft_emulate_8xx(struct pt_regs *); 1175 #if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU) 1176 int errcode; 1177 #endif 1178 1179 CHECK_FULL_REGS(regs); 1180 1181 if (!user_mode(regs)) { 1182 debugger(regs); 1183 die("Kernel Mode Software FPU Emulation", regs, SIGFPE); 1184 } 1185 1186 #ifdef CONFIG_MATH_EMULATION 1187 errcode = do_mathemu(regs); 1188 if (errcode >= 0) 1189 PPC_WARN_EMULATED(math, regs); 1190 1191 switch (errcode) { 1192 case 0: 1193 emulate_single_step(regs); 1194 return; 1195 case 1: { 1196 int code = 0; 1197 code = __parse_fpscr(current->thread.fpscr.val); 1198 _exception(SIGFPE, regs, code, regs->nip); 1199 return; 1200 } 1201 case -EFAULT: 1202 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1203 return; 1204 default: 1205 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1206 return; 1207 } 1208 1209 #elif defined(CONFIG_8XX_MINIMAL_FPEMU) 1210 errcode = Soft_emulate_8xx(regs); 1211 if (errcode >= 0) 1212 PPC_WARN_EMULATED(8xx, regs); 1213 1214 switch (errcode) { 1215 case 0: 1216 emulate_single_step(regs); 1217 return; 1218 case 1: 1219 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1220 return; 1221 case -EFAULT: 1222 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1223 return; 1224 } 1225 #else 1226 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1227 #endif 1228 } 1229 #endif /* CONFIG_8xx */ 1230 1231 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1232 static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 1233 { 1234 int changed = 0; 1235 /* 1236 * Determine the cause of the debug event, clear the 1237 * event flags and send a trap to the handler. Torez 1238 */ 1239 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 1240 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1241 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1242 current->thread.dbcr2 &= ~DBCR2_DAC12MODE; 1243 #endif 1244 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 1245 5); 1246 changed |= 0x01; 1247 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 1248 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1249 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, 1250 6); 1251 changed |= 0x01; 1252 } else if (debug_status & DBSR_IAC1) { 1253 current->thread.dbcr0 &= ~DBCR0_IAC1; 1254 dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 1255 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1256 1); 1257 changed |= 0x01; 1258 } else if (debug_status & DBSR_IAC2) { 1259 current->thread.dbcr0 &= ~DBCR0_IAC2; 1260 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 1261 2); 1262 changed |= 0x01; 1263 } else if (debug_status & DBSR_IAC3) { 1264 current->thread.dbcr0 &= ~DBCR0_IAC3; 1265 dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 1266 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 1267 3); 1268 changed |= 0x01; 1269 } else if (debug_status & DBSR_IAC4) { 1270 current->thread.dbcr0 &= ~DBCR0_IAC4; 1271 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 1272 4); 1273 changed |= 0x01; 1274 } 1275 /* 1276 * At the point this routine was called, the MSR(DE) was turned off. 1277 * Check all other debug flags and see if that bit needs to be turned 1278 * back on or not. 1279 */ 1280 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1)) 1281 regs->msr |= MSR_DE; 1282 else 1283 /* Make sure the IDM flag is off */ 1284 current->thread.dbcr0 &= ~DBCR0_IDM; 1285 1286 if (changed & 0x01) 1287 mtspr(SPRN_DBCR0, current->thread.dbcr0); 1288 } 1289 1290 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) 1291 { 1292 current->thread.dbsr = debug_status; 1293 1294 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1295 * on server, it stops on the target of the branch. In order to simulate 1296 * the server behaviour, we thus restart right away with a single step 1297 * instead of stopping here when hitting a BT 1298 */ 1299 if (debug_status & DBSR_BT) { 1300 regs->msr &= ~MSR_DE; 1301 1302 /* Disable BT */ 1303 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1304 /* Clear the BT event */ 1305 mtspr(SPRN_DBSR, DBSR_BT); 1306 1307 /* Do the single step trick only when coming from userspace */ 1308 if (user_mode(regs)) { 1309 current->thread.dbcr0 &= ~DBCR0_BT; 1310 current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1311 regs->msr |= MSR_DE; 1312 return; 1313 } 1314 1315 if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1316 5, SIGTRAP) == NOTIFY_STOP) { 1317 return; 1318 } 1319 if (debugger_sstep(regs)) 1320 return; 1321 } else if (debug_status & DBSR_IC) { /* Instruction complete */ 1322 regs->msr &= ~MSR_DE; 1323 1324 /* Disable instruction completion */ 1325 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 1326 /* Clear the instruction completion event */ 1327 mtspr(SPRN_DBSR, DBSR_IC); 1328 1329 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 1330 5, SIGTRAP) == NOTIFY_STOP) { 1331 return; 1332 } 1333 1334 if (debugger_sstep(regs)) 1335 return; 1336 1337 if (user_mode(regs)) { 1338 current->thread.dbcr0 &= ~DBCR0_IC; 1339 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, 1340 current->thread.dbcr1)) 1341 regs->msr |= MSR_DE; 1342 else 1343 /* Make sure the IDM bit is off */ 1344 current->thread.dbcr0 &= ~DBCR0_IDM; 1345 } 1346 1347 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1348 } else 1349 handle_debug(regs, debug_status); 1350 } 1351 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1352 1353 #if !defined(CONFIG_TAU_INT) 1354 void TAUException(struct pt_regs *regs) 1355 { 1356 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 1357 regs->nip, regs->msr, regs->trap, print_tainted()); 1358 } 1359 #endif /* CONFIG_INT_TAU */ 1360 1361 #ifdef CONFIG_ALTIVEC 1362 void altivec_assist_exception(struct pt_regs *regs) 1363 { 1364 int err; 1365 1366 if (!user_mode(regs)) { 1367 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 1368 " at %lx\n", regs->nip); 1369 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 1370 } 1371 1372 flush_altivec_to_thread(current); 1373 1374 PPC_WARN_EMULATED(altivec, regs); 1375 err = emulate_altivec(regs); 1376 if (err == 0) { 1377 regs->nip += 4; /* skip emulated instruction */ 1378 emulate_single_step(regs); 1379 return; 1380 } 1381 1382 if (err == -EFAULT) { 1383 /* got an error reading the instruction */ 1384 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1385 } else { 1386 /* didn't recognize the instruction */ 1387 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1388 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " 1389 "in %s at %lx\n", current->comm, regs->nip); 1390 current->thread.vscr.u[3] |= 0x10000; 1391 } 1392 } 1393 #endif /* CONFIG_ALTIVEC */ 1394 1395 #ifdef CONFIG_VSX 1396 void vsx_assist_exception(struct pt_regs *regs) 1397 { 1398 if (!user_mode(regs)) { 1399 printk(KERN_EMERG "VSX assist exception in kernel mode" 1400 " at %lx\n", regs->nip); 1401 die("Kernel VSX assist exception", regs, SIGILL); 1402 } 1403 1404 flush_vsx_to_thread(current); 1405 printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip); 1406 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1407 } 1408 #endif /* CONFIG_VSX */ 1409 1410 #ifdef CONFIG_FSL_BOOKE 1411 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1412 unsigned long error_code) 1413 { 1414 /* We treat cache locking instructions from the user 1415 * as priv ops, in the future we could try to do 1416 * something smarter 1417 */ 1418 if (error_code & (ESR_DLK|ESR_ILK)) 1419 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1420 return; 1421 } 1422 #endif /* CONFIG_FSL_BOOKE */ 1423 1424 #ifdef CONFIG_SPE 1425 void SPEFloatingPointException(struct pt_regs *regs) 1426 { 1427 extern int do_spe_mathemu(struct pt_regs *regs); 1428 unsigned long spefscr; 1429 int fpexc_mode; 1430 int code = 0; 1431 int err; 1432 1433 flush_spe_to_thread(current); 1434 1435 spefscr = current->thread.spefscr; 1436 fpexc_mode = current->thread.fpexc_mode; 1437 1438 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1439 code = FPE_FLTOVF; 1440 } 1441 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1442 code = FPE_FLTUND; 1443 } 1444 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1445 code = FPE_FLTDIV; 1446 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1447 code = FPE_FLTINV; 1448 } 1449 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1450 code = FPE_FLTRES; 1451 1452 err = do_spe_mathemu(regs); 1453 if (err == 0) { 1454 regs->nip += 4; /* skip emulated instruction */ 1455 emulate_single_step(regs); 1456 return; 1457 } 1458 1459 if (err == -EFAULT) { 1460 /* got an error reading the instruction */ 1461 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1462 } else if (err == -EINVAL) { 1463 /* didn't recognize the instruction */ 1464 printk(KERN_ERR "unrecognized spe instruction " 1465 "in %s at %lx\n", current->comm, regs->nip); 1466 } else { 1467 _exception(SIGFPE, regs, code, regs->nip); 1468 } 1469 1470 return; 1471 } 1472 1473 void SPEFloatingPointRoundException(struct pt_regs *regs) 1474 { 1475 extern int speround_handler(struct pt_regs *regs); 1476 int err; 1477 1478 preempt_disable(); 1479 if (regs->msr & MSR_SPE) 1480 giveup_spe(current); 1481 preempt_enable(); 1482 1483 regs->nip -= 4; 1484 err = speround_handler(regs); 1485 if (err == 0) { 1486 regs->nip += 4; /* skip emulated instruction */ 1487 emulate_single_step(regs); 1488 return; 1489 } 1490 1491 if (err == -EFAULT) { 1492 /* got an error reading the instruction */ 1493 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1494 } else if (err == -EINVAL) { 1495 /* didn't recognize the instruction */ 1496 printk(KERN_ERR "unrecognized spe instruction " 1497 "in %s at %lx\n", current->comm, regs->nip); 1498 } else { 1499 _exception(SIGFPE, regs, 0, regs->nip); 1500 return; 1501 } 1502 } 1503 #endif 1504 1505 /* 1506 * We enter here if we get an unrecoverable exception, that is, one 1507 * that happened at a point where the RI (recoverable interrupt) bit 1508 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1509 * we therefore lost state by taking this exception. 1510 */ 1511 void unrecoverable_exception(struct pt_regs *regs) 1512 { 1513 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1514 regs->trap, regs->nip); 1515 die("Unrecoverable exception", regs, SIGABRT); 1516 } 1517 1518 #ifdef CONFIG_BOOKE_WDT 1519 /* 1520 * Default handler for a Watchdog exception, 1521 * spins until a reboot occurs 1522 */ 1523 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1524 { 1525 /* Generic WatchdogHandler, implement your own */ 1526 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1527 return; 1528 } 1529 1530 void WatchdogException(struct pt_regs *regs) 1531 { 1532 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1533 WatchdogHandler(regs); 1534 } 1535 #endif 1536 1537 /* 1538 * We enter here if we discover during exception entry that we are 1539 * running in supervisor mode with a userspace value in the stack pointer. 1540 */ 1541 void kernel_bad_stack(struct pt_regs *regs) 1542 { 1543 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1544 regs->gpr[1], regs->nip); 1545 die("Bad kernel stack pointer", regs, SIGABRT); 1546 } 1547 1548 void __init trap_init(void) 1549 { 1550 } 1551 1552 1553 #ifdef CONFIG_PPC_EMULATED_STATS 1554 1555 #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 1556 1557 struct ppc_emulated ppc_emulated = { 1558 #ifdef CONFIG_ALTIVEC 1559 WARN_EMULATED_SETUP(altivec), 1560 #endif 1561 WARN_EMULATED_SETUP(dcba), 1562 WARN_EMULATED_SETUP(dcbz), 1563 WARN_EMULATED_SETUP(fp_pair), 1564 WARN_EMULATED_SETUP(isel), 1565 WARN_EMULATED_SETUP(mcrxr), 1566 WARN_EMULATED_SETUP(mfpvr), 1567 WARN_EMULATED_SETUP(multiple), 1568 WARN_EMULATED_SETUP(popcntb), 1569 WARN_EMULATED_SETUP(spe), 1570 WARN_EMULATED_SETUP(string), 1571 WARN_EMULATED_SETUP(unaligned), 1572 #ifdef CONFIG_MATH_EMULATION 1573 WARN_EMULATED_SETUP(math), 1574 #elif defined(CONFIG_8XX_MINIMAL_FPEMU) 1575 WARN_EMULATED_SETUP(8xx), 1576 #endif 1577 #ifdef CONFIG_VSX 1578 WARN_EMULATED_SETUP(vsx), 1579 #endif 1580 #ifdef CONFIG_PPC64 1581 WARN_EMULATED_SETUP(mfdscr), 1582 WARN_EMULATED_SETUP(mtdscr), 1583 #endif 1584 }; 1585 1586 u32 ppc_warn_emulated; 1587 1588 void ppc_warn_emulated_print(const char *type) 1589 { 1590 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, 1591 type); 1592 } 1593 1594 static int __init ppc_warn_emulated_init(void) 1595 { 1596 struct dentry *dir, *d; 1597 unsigned int i; 1598 struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 1599 1600 if (!powerpc_debugfs_root) 1601 return -ENODEV; 1602 1603 dir = debugfs_create_dir("emulated_instructions", 1604 powerpc_debugfs_root); 1605 if (!dir) 1606 return -ENOMEM; 1607 1608 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, 1609 &ppc_warn_emulated); 1610 if (!d) 1611 goto fail; 1612 1613 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 1614 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, 1615 (u32 *)&entries[i].val.counter); 1616 if (!d) 1617 goto fail; 1618 } 1619 1620 return 0; 1621 1622 fail: 1623 debugfs_remove_recursive(dir); 1624 return -ENOMEM; 1625 } 1626 1627 device_initcall(ppc_warn_emulated_init); 1628 1629 #endif /* CONFIG_PPC_EMULATED_STATS */ 1630