1 /* 2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright 2007-2010 Freescale Semiconductor, Inc. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 * 10 * Modified by Cort Dougan (cort@cs.nmt.edu) 11 * and Paul Mackerras (paulus@samba.org) 12 */ 13 14 /* 15 * This file handles the architecture-dependent parts of hardware exceptions 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/sched.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/stddef.h> 23 #include <linux/unistd.h> 24 #include <linux/ptrace.h> 25 #include <linux/user.h> 26 #include <linux/interrupt.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/prctl.h> 30 #include <linux/delay.h> 31 #include <linux/kprobes.h> 32 #include <linux/kexec.h> 33 #include <linux/backlight.h> 34 #include <linux/bug.h> 35 #include <linux/kdebug.h> 36 #include <linux/debugfs.h> 37 38 #include <asm/emulated_ops.h> 39 #include <asm/pgtable.h> 40 #include <asm/uaccess.h> 41 #include <asm/system.h> 42 #include <asm/io.h> 43 #include <asm/machdep.h> 44 #include <asm/rtas.h> 45 #include <asm/pmc.h> 46 #ifdef CONFIG_PPC32 47 #include <asm/reg.h> 48 #endif 49 #ifdef CONFIG_PMAC_BACKLIGHT 50 #include <asm/backlight.h> 51 #endif 52 #ifdef CONFIG_PPC64 53 #include <asm/firmware.h> 54 #include <asm/processor.h> 55 #endif 56 #include <asm/kexec.h> 57 #include <asm/ppc-opcode.h> 58 59 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 60 int (*__debugger)(struct pt_regs *regs) __read_mostly; 61 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; 62 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; 63 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; 64 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; 65 int (*__debugger_dabr_match)(struct pt_regs *regs) __read_mostly; 66 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; 67 68 EXPORT_SYMBOL(__debugger); 69 EXPORT_SYMBOL(__debugger_ipi); 70 EXPORT_SYMBOL(__debugger_bpt); 71 EXPORT_SYMBOL(__debugger_sstep); 72 EXPORT_SYMBOL(__debugger_iabr_match); 73 EXPORT_SYMBOL(__debugger_dabr_match); 74 EXPORT_SYMBOL(__debugger_fault_handler); 75 #endif 76 77 /* 78 * Trap & Exception support 79 */ 80 81 #ifdef CONFIG_PMAC_BACKLIGHT 82 static void pmac_backlight_unblank(void) 83 { 84 mutex_lock(&pmac_backlight_mutex); 85 if (pmac_backlight) { 86 struct backlight_properties *props; 87 88 props = &pmac_backlight->props; 89 props->brightness = props->max_brightness; 90 props->power = FB_BLANK_UNBLANK; 91 backlight_update_status(pmac_backlight); 92 } 93 mutex_unlock(&pmac_backlight_mutex); 94 } 95 #else 96 static inline void pmac_backlight_unblank(void) { } 97 #endif 98 99 int die(const char *str, struct pt_regs *regs, long err) 100 { 101 static struct { 102 raw_spinlock_t lock; 103 u32 lock_owner; 104 int lock_owner_depth; 105 } die = { 106 .lock = __RAW_SPIN_LOCK_UNLOCKED(die.lock), 107 .lock_owner = -1, 108 .lock_owner_depth = 0 109 }; 110 static int die_counter; 111 unsigned long flags; 112 113 if (debugger(regs)) 114 return 1; 115 116 oops_enter(); 117 118 if (die.lock_owner != raw_smp_processor_id()) { 119 console_verbose(); 120 raw_spin_lock_irqsave(&die.lock, flags); 121 die.lock_owner = smp_processor_id(); 122 die.lock_owner_depth = 0; 123 bust_spinlocks(1); 124 if (machine_is(powermac)) 125 pmac_backlight_unblank(); 126 } else { 127 local_save_flags(flags); 128 } 129 130 if (++die.lock_owner_depth < 3) { 131 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 132 #ifdef CONFIG_PREEMPT 133 printk("PREEMPT "); 134 #endif 135 #ifdef CONFIG_SMP 136 printk("SMP NR_CPUS=%d ", NR_CPUS); 137 #endif 138 #ifdef CONFIG_DEBUG_PAGEALLOC 139 printk("DEBUG_PAGEALLOC "); 140 #endif 141 #ifdef CONFIG_NUMA 142 printk("NUMA "); 143 #endif 144 printk("%s\n", ppc_md.name ? ppc_md.name : ""); 145 146 sysfs_printk_last_file(); 147 if (notify_die(DIE_OOPS, str, regs, err, 255, 148 SIGSEGV) == NOTIFY_STOP) 149 return 1; 150 151 print_modules(); 152 show_regs(regs); 153 } else { 154 printk("Recursive die() failure, output suppressed\n"); 155 } 156 157 bust_spinlocks(0); 158 die.lock_owner = -1; 159 add_taint(TAINT_DIE); 160 raw_spin_unlock_irqrestore(&die.lock, flags); 161 162 if (kexec_should_crash(current) || 163 kexec_sr_activated(smp_processor_id())) 164 crash_kexec(regs); 165 crash_kexec_secondary(regs); 166 167 if (in_interrupt()) 168 panic("Fatal exception in interrupt"); 169 170 if (panic_on_oops) 171 panic("Fatal exception"); 172 173 oops_exit(); 174 do_exit(err); 175 176 return 0; 177 } 178 179 void user_single_step_siginfo(struct task_struct *tsk, 180 struct pt_regs *regs, siginfo_t *info) 181 { 182 memset(info, 0, sizeof(*info)); 183 info->si_signo = SIGTRAP; 184 info->si_code = TRAP_TRACE; 185 info->si_addr = (void __user *)regs->nip; 186 } 187 188 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 189 { 190 siginfo_t info; 191 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 192 "at %08lx nip %08lx lr %08lx code %x\n"; 193 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ 194 "at %016lx nip %016lx lr %016lx code %x\n"; 195 196 if (!user_mode(regs)) { 197 if (die("Exception in kernel mode", regs, signr)) 198 return; 199 } else if (show_unhandled_signals && 200 unhandled_signal(current, signr) && 201 printk_ratelimit()) { 202 printk(regs->msr & MSR_SF ? fmt64 : fmt32, 203 current->comm, current->pid, signr, 204 addr, regs->nip, regs->link, code); 205 } 206 207 memset(&info, 0, sizeof(info)); 208 info.si_signo = signr; 209 info.si_code = code; 210 info.si_addr = (void __user *) addr; 211 force_sig_info(signr, &info, current); 212 } 213 214 #ifdef CONFIG_PPC64 215 void system_reset_exception(struct pt_regs *regs) 216 { 217 /* See if any machine dependent calls */ 218 if (ppc_md.system_reset_exception) { 219 if (ppc_md.system_reset_exception(regs)) 220 return; 221 } 222 223 #ifdef CONFIG_KEXEC 224 cpu_set(smp_processor_id(), cpus_in_sr); 225 #endif 226 227 die("System Reset", regs, SIGABRT); 228 229 /* 230 * Some CPUs when released from the debugger will execute this path. 231 * These CPUs entered the debugger via a soft-reset. If the CPU was 232 * hung before entering the debugger it will return to the hung 233 * state when exiting this function. This causes a problem in 234 * kdump since the hung CPU(s) will not respond to the IPI sent 235 * from kdump. To prevent the problem we call crash_kexec_secondary() 236 * here. If a kdump had not been initiated or we exit the debugger 237 * with the "exit and recover" command (x) crash_kexec_secondary() 238 * will return after 5ms and the CPU returns to its previous state. 239 */ 240 crash_kexec_secondary(regs); 241 242 /* Must die if the interrupt is not recoverable */ 243 if (!(regs->msr & MSR_RI)) 244 panic("Unrecoverable System Reset"); 245 246 /* What should we do here? We could issue a shutdown or hard reset. */ 247 } 248 #endif 249 250 /* 251 * I/O accesses can cause machine checks on powermacs. 252 * Check if the NIP corresponds to the address of a sync 253 * instruction for which there is an entry in the exception 254 * table. 255 * Note that the 601 only takes a machine check on TEA 256 * (transfer error ack) signal assertion, and does not 257 * set any of the top 16 bits of SRR1. 258 * -- paulus. 259 */ 260 static inline int check_io_access(struct pt_regs *regs) 261 { 262 #ifdef CONFIG_PPC32 263 unsigned long msr = regs->msr; 264 const struct exception_table_entry *entry; 265 unsigned int *nip = (unsigned int *)regs->nip; 266 267 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) 268 && (entry = search_exception_tables(regs->nip)) != NULL) { 269 /* 270 * Check that it's a sync instruction, or somewhere 271 * in the twi; isync; nop sequence that inb/inw/inl uses. 272 * As the address is in the exception table 273 * we should be able to read the instr there. 274 * For the debug message, we look at the preceding 275 * load or store. 276 */ 277 if (*nip == 0x60000000) /* nop */ 278 nip -= 2; 279 else if (*nip == 0x4c00012c) /* isync */ 280 --nip; 281 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { 282 /* sync or twi */ 283 unsigned int rb; 284 285 --nip; 286 rb = (*nip >> 11) & 0x1f; 287 printk(KERN_DEBUG "%s bad port %lx at %p\n", 288 (*nip & 0x100)? "OUT to": "IN from", 289 regs->gpr[rb] - _IO_BASE, nip); 290 regs->msr |= MSR_RI; 291 regs->nip = entry->fixup; 292 return 1; 293 } 294 } 295 #endif /* CONFIG_PPC32 */ 296 return 0; 297 } 298 299 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 300 /* On 4xx, the reason for the machine check or program exception 301 is in the ESR. */ 302 #define get_reason(regs) ((regs)->dsisr) 303 #ifndef CONFIG_FSL_BOOKE 304 #define get_mc_reason(regs) ((regs)->dsisr) 305 #else 306 #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) 307 #endif 308 #define REASON_FP ESR_FP 309 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) 310 #define REASON_PRIVILEGED ESR_PPR 311 #define REASON_TRAP ESR_PTR 312 313 /* single-step stuff */ 314 #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) 315 #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) 316 317 #else 318 /* On non-4xx, the reason for the machine check or program 319 exception is in the MSR. */ 320 #define get_reason(regs) ((regs)->msr) 321 #define get_mc_reason(regs) ((regs)->msr) 322 #define REASON_FP 0x100000 323 #define REASON_ILLEGAL 0x80000 324 #define REASON_PRIVILEGED 0x40000 325 #define REASON_TRAP 0x20000 326 327 #define single_stepping(regs) ((regs)->msr & MSR_SE) 328 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) 329 #endif 330 331 #if defined(CONFIG_4xx) 332 int machine_check_4xx(struct pt_regs *regs) 333 { 334 unsigned long reason = get_mc_reason(regs); 335 336 if (reason & ESR_IMCP) { 337 printk("Instruction"); 338 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 339 } else 340 printk("Data"); 341 printk(" machine check in kernel mode.\n"); 342 343 return 0; 344 } 345 346 int machine_check_440A(struct pt_regs *regs) 347 { 348 unsigned long reason = get_mc_reason(regs); 349 350 printk("Machine check in kernel mode.\n"); 351 if (reason & ESR_IMCP){ 352 printk("Instruction Synchronous Machine Check exception\n"); 353 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 354 } 355 else { 356 u32 mcsr = mfspr(SPRN_MCSR); 357 if (mcsr & MCSR_IB) 358 printk("Instruction Read PLB Error\n"); 359 if (mcsr & MCSR_DRB) 360 printk("Data Read PLB Error\n"); 361 if (mcsr & MCSR_DWB) 362 printk("Data Write PLB Error\n"); 363 if (mcsr & MCSR_TLBP) 364 printk("TLB Parity Error\n"); 365 if (mcsr & MCSR_ICP){ 366 flush_instruction_cache(); 367 printk("I-Cache Parity Error\n"); 368 } 369 if (mcsr & MCSR_DCSP) 370 printk("D-Cache Search Parity Error\n"); 371 if (mcsr & MCSR_DCFP) 372 printk("D-Cache Flush Parity Error\n"); 373 if (mcsr & MCSR_IMPE) 374 printk("Machine Check exception is imprecise\n"); 375 376 /* Clear MCSR */ 377 mtspr(SPRN_MCSR, mcsr); 378 } 379 return 0; 380 } 381 382 int machine_check_47x(struct pt_regs *regs) 383 { 384 unsigned long reason = get_mc_reason(regs); 385 u32 mcsr; 386 387 printk(KERN_ERR "Machine check in kernel mode.\n"); 388 if (reason & ESR_IMCP) { 389 printk(KERN_ERR 390 "Instruction Synchronous Machine Check exception\n"); 391 mtspr(SPRN_ESR, reason & ~ESR_IMCP); 392 return 0; 393 } 394 mcsr = mfspr(SPRN_MCSR); 395 if (mcsr & MCSR_IB) 396 printk(KERN_ERR "Instruction Read PLB Error\n"); 397 if (mcsr & MCSR_DRB) 398 printk(KERN_ERR "Data Read PLB Error\n"); 399 if (mcsr & MCSR_DWB) 400 printk(KERN_ERR "Data Write PLB Error\n"); 401 if (mcsr & MCSR_TLBP) 402 printk(KERN_ERR "TLB Parity Error\n"); 403 if (mcsr & MCSR_ICP) { 404 flush_instruction_cache(); 405 printk(KERN_ERR "I-Cache Parity Error\n"); 406 } 407 if (mcsr & MCSR_DCSP) 408 printk(KERN_ERR "D-Cache Search Parity Error\n"); 409 if (mcsr & PPC47x_MCSR_GPR) 410 printk(KERN_ERR "GPR Parity Error\n"); 411 if (mcsr & PPC47x_MCSR_FPR) 412 printk(KERN_ERR "FPR Parity Error\n"); 413 if (mcsr & PPC47x_MCSR_IPR) 414 printk(KERN_ERR "Machine Check exception is imprecise\n"); 415 416 /* Clear MCSR */ 417 mtspr(SPRN_MCSR, mcsr); 418 419 return 0; 420 } 421 #elif defined(CONFIG_E500) 422 int machine_check_e500mc(struct pt_regs *regs) 423 { 424 unsigned long mcsr = mfspr(SPRN_MCSR); 425 unsigned long reason = mcsr; 426 int recoverable = 1; 427 428 printk("Machine check in kernel mode.\n"); 429 printk("Caused by (from MCSR=%lx): ", reason); 430 431 if (reason & MCSR_MCP) 432 printk("Machine Check Signal\n"); 433 434 if (reason & MCSR_ICPERR) { 435 printk("Instruction Cache Parity Error\n"); 436 437 /* 438 * This is recoverable by invalidating the i-cache. 439 */ 440 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); 441 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) 442 ; 443 444 /* 445 * This will generally be accompanied by an instruction 446 * fetch error report -- only treat MCSR_IF as fatal 447 * if it wasn't due to an L1 parity error. 448 */ 449 reason &= ~MCSR_IF; 450 } 451 452 if (reason & MCSR_DCPERR_MC) { 453 printk("Data Cache Parity Error\n"); 454 recoverable = 0; 455 } 456 457 if (reason & MCSR_L2MMU_MHIT) { 458 printk("Hit on multiple TLB entries\n"); 459 recoverable = 0; 460 } 461 462 if (reason & MCSR_NMI) 463 printk("Non-maskable interrupt\n"); 464 465 if (reason & MCSR_IF) { 466 printk("Instruction Fetch Error Report\n"); 467 recoverable = 0; 468 } 469 470 if (reason & MCSR_LD) { 471 printk("Load Error Report\n"); 472 recoverable = 0; 473 } 474 475 if (reason & MCSR_ST) { 476 printk("Store Error Report\n"); 477 recoverable = 0; 478 } 479 480 if (reason & MCSR_LDG) { 481 printk("Guarded Load Error Report\n"); 482 recoverable = 0; 483 } 484 485 if (reason & MCSR_TLBSYNC) 486 printk("Simultaneous tlbsync operations\n"); 487 488 if (reason & MCSR_BSL2_ERR) { 489 printk("Level 2 Cache Error\n"); 490 recoverable = 0; 491 } 492 493 if (reason & MCSR_MAV) { 494 u64 addr; 495 496 addr = mfspr(SPRN_MCAR); 497 addr |= (u64)mfspr(SPRN_MCARU) << 32; 498 499 printk("Machine Check %s Address: %#llx\n", 500 reason & MCSR_MEA ? "Effective" : "Physical", addr); 501 } 502 503 mtspr(SPRN_MCSR, mcsr); 504 return mfspr(SPRN_MCSR) == 0 && recoverable; 505 } 506 507 int machine_check_e500(struct pt_regs *regs) 508 { 509 unsigned long reason = get_mc_reason(regs); 510 511 printk("Machine check in kernel mode.\n"); 512 printk("Caused by (from MCSR=%lx): ", reason); 513 514 if (reason & MCSR_MCP) 515 printk("Machine Check Signal\n"); 516 if (reason & MCSR_ICPERR) 517 printk("Instruction Cache Parity Error\n"); 518 if (reason & MCSR_DCP_PERR) 519 printk("Data Cache Push Parity Error\n"); 520 if (reason & MCSR_DCPERR) 521 printk("Data Cache Parity Error\n"); 522 if (reason & MCSR_BUS_IAERR) 523 printk("Bus - Instruction Address Error\n"); 524 if (reason & MCSR_BUS_RAERR) 525 printk("Bus - Read Address Error\n"); 526 if (reason & MCSR_BUS_WAERR) 527 printk("Bus - Write Address Error\n"); 528 if (reason & MCSR_BUS_IBERR) 529 printk("Bus - Instruction Data Error\n"); 530 if (reason & MCSR_BUS_RBERR) 531 printk("Bus - Read Data Bus Error\n"); 532 if (reason & MCSR_BUS_WBERR) 533 printk("Bus - Read Data Bus Error\n"); 534 if (reason & MCSR_BUS_IPERR) 535 printk("Bus - Instruction Parity Error\n"); 536 if (reason & MCSR_BUS_RPERR) 537 printk("Bus - Read Parity Error\n"); 538 539 return 0; 540 } 541 #elif defined(CONFIG_E200) 542 int machine_check_e200(struct pt_regs *regs) 543 { 544 unsigned long reason = get_mc_reason(regs); 545 546 printk("Machine check in kernel mode.\n"); 547 printk("Caused by (from MCSR=%lx): ", reason); 548 549 if (reason & MCSR_MCP) 550 printk("Machine Check Signal\n"); 551 if (reason & MCSR_CP_PERR) 552 printk("Cache Push Parity Error\n"); 553 if (reason & MCSR_CPERR) 554 printk("Cache Parity Error\n"); 555 if (reason & MCSR_EXCP_ERR) 556 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); 557 if (reason & MCSR_BUS_IRERR) 558 printk("Bus - Read Bus Error on instruction fetch\n"); 559 if (reason & MCSR_BUS_DRERR) 560 printk("Bus - Read Bus Error on data load\n"); 561 if (reason & MCSR_BUS_WRERR) 562 printk("Bus - Write Bus Error on buffered store or cache line push\n"); 563 564 return 0; 565 } 566 #else 567 int machine_check_generic(struct pt_regs *regs) 568 { 569 unsigned long reason = get_mc_reason(regs); 570 571 printk("Machine check in kernel mode.\n"); 572 printk("Caused by (from SRR1=%lx): ", reason); 573 switch (reason & 0x601F0000) { 574 case 0x80000: 575 printk("Machine check signal\n"); 576 break; 577 case 0: /* for 601 */ 578 case 0x40000: 579 case 0x140000: /* 7450 MSS error and TEA */ 580 printk("Transfer error ack signal\n"); 581 break; 582 case 0x20000: 583 printk("Data parity error signal\n"); 584 break; 585 case 0x10000: 586 printk("Address parity error signal\n"); 587 break; 588 case 0x20000000: 589 printk("L1 Data Cache error\n"); 590 break; 591 case 0x40000000: 592 printk("L1 Instruction Cache error\n"); 593 break; 594 case 0x00100000: 595 printk("L2 data cache parity error\n"); 596 break; 597 default: 598 printk("Unknown values in msr\n"); 599 } 600 return 0; 601 } 602 #endif /* everything else */ 603 604 void machine_check_exception(struct pt_regs *regs) 605 { 606 int recover = 0; 607 608 __get_cpu_var(irq_stat).mce_exceptions++; 609 610 /* See if any machine dependent calls. In theory, we would want 611 * to call the CPU first, and call the ppc_md. one if the CPU 612 * one returns a positive number. However there is existing code 613 * that assumes the board gets a first chance, so let's keep it 614 * that way for now and fix things later. --BenH. 615 */ 616 if (ppc_md.machine_check_exception) 617 recover = ppc_md.machine_check_exception(regs); 618 else if (cur_cpu_spec->machine_check) 619 recover = cur_cpu_spec->machine_check(regs); 620 621 if (recover > 0) 622 return; 623 624 if (user_mode(regs)) { 625 regs->msr |= MSR_RI; 626 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); 627 return; 628 } 629 630 #if defined(CONFIG_8xx) && defined(CONFIG_PCI) 631 /* the qspan pci read routines can cause machine checks -- Cort 632 * 633 * yuck !!! that totally needs to go away ! There are better ways 634 * to deal with that than having a wart in the mcheck handler. 635 * -- BenH 636 */ 637 bad_page_fault(regs, regs->dar, SIGBUS); 638 return; 639 #endif 640 641 if (debugger_fault_handler(regs)) { 642 regs->msr |= MSR_RI; 643 return; 644 } 645 646 if (check_io_access(regs)) 647 return; 648 649 if (debugger_fault_handler(regs)) 650 return; 651 die("Machine check", regs, SIGBUS); 652 653 /* Must die if the interrupt is not recoverable */ 654 if (!(regs->msr & MSR_RI)) 655 panic("Unrecoverable Machine check"); 656 } 657 658 void SMIException(struct pt_regs *regs) 659 { 660 die("System Management Interrupt", regs, SIGABRT); 661 } 662 663 void unknown_exception(struct pt_regs *regs) 664 { 665 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 666 regs->nip, regs->msr, regs->trap); 667 668 _exception(SIGTRAP, regs, 0, 0); 669 } 670 671 void instruction_breakpoint_exception(struct pt_regs *regs) 672 { 673 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 674 5, SIGTRAP) == NOTIFY_STOP) 675 return; 676 if (debugger_iabr_match(regs)) 677 return; 678 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 679 } 680 681 void RunModeException(struct pt_regs *regs) 682 { 683 _exception(SIGTRAP, regs, 0, 0); 684 } 685 686 void __kprobes single_step_exception(struct pt_regs *regs) 687 { 688 clear_single_step(regs); 689 690 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 691 5, SIGTRAP) == NOTIFY_STOP) 692 return; 693 if (debugger_sstep(regs)) 694 return; 695 696 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 697 } 698 699 /* 700 * After we have successfully emulated an instruction, we have to 701 * check if the instruction was being single-stepped, and if so, 702 * pretend we got a single-step exception. This was pointed out 703 * by Kumar Gala. -- paulus 704 */ 705 static void emulate_single_step(struct pt_regs *regs) 706 { 707 if (single_stepping(regs)) 708 single_step_exception(regs); 709 } 710 711 static inline int __parse_fpscr(unsigned long fpscr) 712 { 713 int ret = 0; 714 715 /* Invalid operation */ 716 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) 717 ret = FPE_FLTINV; 718 719 /* Overflow */ 720 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) 721 ret = FPE_FLTOVF; 722 723 /* Underflow */ 724 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) 725 ret = FPE_FLTUND; 726 727 /* Divide by zero */ 728 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) 729 ret = FPE_FLTDIV; 730 731 /* Inexact result */ 732 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) 733 ret = FPE_FLTRES; 734 735 return ret; 736 } 737 738 static void parse_fpe(struct pt_regs *regs) 739 { 740 int code = 0; 741 742 flush_fp_to_thread(current); 743 744 code = __parse_fpscr(current->thread.fpscr.val); 745 746 _exception(SIGFPE, regs, code, regs->nip); 747 } 748 749 /* 750 * Illegal instruction emulation support. Originally written to 751 * provide the PVR to user applications using the mfspr rd, PVR. 752 * Return non-zero if we can't emulate, or -EFAULT if the associated 753 * memory access caused an access fault. Return zero on success. 754 * 755 * There are a couple of ways to do this, either "decode" the instruction 756 * or directly match lots of bits. In this case, matching lots of 757 * bits is faster and easier. 758 * 759 */ 760 static int emulate_string_inst(struct pt_regs *regs, u32 instword) 761 { 762 u8 rT = (instword >> 21) & 0x1f; 763 u8 rA = (instword >> 16) & 0x1f; 764 u8 NB_RB = (instword >> 11) & 0x1f; 765 u32 num_bytes; 766 unsigned long EA; 767 int pos = 0; 768 769 /* Early out if we are an invalid form of lswx */ 770 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) 771 if ((rT == rA) || (rT == NB_RB)) 772 return -EINVAL; 773 774 EA = (rA == 0) ? 0 : regs->gpr[rA]; 775 776 switch (instword & PPC_INST_STRING_MASK) { 777 case PPC_INST_LSWX: 778 case PPC_INST_STSWX: 779 EA += NB_RB; 780 num_bytes = regs->xer & 0x7f; 781 break; 782 case PPC_INST_LSWI: 783 case PPC_INST_STSWI: 784 num_bytes = (NB_RB == 0) ? 32 : NB_RB; 785 break; 786 default: 787 return -EINVAL; 788 } 789 790 while (num_bytes != 0) 791 { 792 u8 val; 793 u32 shift = 8 * (3 - (pos & 0x3)); 794 795 switch ((instword & PPC_INST_STRING_MASK)) { 796 case PPC_INST_LSWX: 797 case PPC_INST_LSWI: 798 if (get_user(val, (u8 __user *)EA)) 799 return -EFAULT; 800 /* first time updating this reg, 801 * zero it out */ 802 if (pos == 0) 803 regs->gpr[rT] = 0; 804 regs->gpr[rT] |= val << shift; 805 break; 806 case PPC_INST_STSWI: 807 case PPC_INST_STSWX: 808 val = regs->gpr[rT] >> shift; 809 if (put_user(val, (u8 __user *)EA)) 810 return -EFAULT; 811 break; 812 } 813 /* move EA to next address */ 814 EA += 1; 815 num_bytes--; 816 817 /* manage our position within the register */ 818 if (++pos == 4) { 819 pos = 0; 820 if (++rT == 32) 821 rT = 0; 822 } 823 } 824 825 return 0; 826 } 827 828 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) 829 { 830 u32 ra,rs; 831 unsigned long tmp; 832 833 ra = (instword >> 16) & 0x1f; 834 rs = (instword >> 21) & 0x1f; 835 836 tmp = regs->gpr[rs]; 837 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); 838 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); 839 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 840 regs->gpr[ra] = tmp; 841 842 return 0; 843 } 844 845 static int emulate_isel(struct pt_regs *regs, u32 instword) 846 { 847 u8 rT = (instword >> 21) & 0x1f; 848 u8 rA = (instword >> 16) & 0x1f; 849 u8 rB = (instword >> 11) & 0x1f; 850 u8 BC = (instword >> 6) & 0x1f; 851 u8 bit; 852 unsigned long tmp; 853 854 tmp = (rA == 0) ? 0 : regs->gpr[rA]; 855 bit = (regs->ccr >> (31 - BC)) & 0x1; 856 857 regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; 858 859 return 0; 860 } 861 862 static int emulate_instruction(struct pt_regs *regs) 863 { 864 u32 instword; 865 u32 rd; 866 867 if (!user_mode(regs) || (regs->msr & MSR_LE)) 868 return -EINVAL; 869 CHECK_FULL_REGS(regs); 870 871 if (get_user(instword, (u32 __user *)(regs->nip))) 872 return -EFAULT; 873 874 /* Emulate the mfspr rD, PVR. */ 875 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { 876 PPC_WARN_EMULATED(mfpvr, regs); 877 rd = (instword >> 21) & 0x1f; 878 regs->gpr[rd] = mfspr(SPRN_PVR); 879 return 0; 880 } 881 882 /* Emulating the dcba insn is just a no-op. */ 883 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { 884 PPC_WARN_EMULATED(dcba, regs); 885 return 0; 886 } 887 888 /* Emulate the mcrxr insn. */ 889 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { 890 int shift = (instword >> 21) & 0x1c; 891 unsigned long msk = 0xf0000000UL >> shift; 892 893 PPC_WARN_EMULATED(mcrxr, regs); 894 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); 895 regs->xer &= ~0xf0000000UL; 896 return 0; 897 } 898 899 /* Emulate load/store string insn. */ 900 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { 901 PPC_WARN_EMULATED(string, regs); 902 return emulate_string_inst(regs, instword); 903 } 904 905 /* Emulate the popcntb (Population Count Bytes) instruction. */ 906 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { 907 PPC_WARN_EMULATED(popcntb, regs); 908 return emulate_popcntb_inst(regs, instword); 909 } 910 911 /* Emulate isel (Integer Select) instruction */ 912 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { 913 PPC_WARN_EMULATED(isel, regs); 914 return emulate_isel(regs, instword); 915 } 916 917 return -EINVAL; 918 } 919 920 int is_valid_bugaddr(unsigned long addr) 921 { 922 return is_kernel_addr(addr); 923 } 924 925 void __kprobes program_check_exception(struct pt_regs *regs) 926 { 927 unsigned int reason = get_reason(regs); 928 extern int do_mathemu(struct pt_regs *regs); 929 930 /* We can now get here via a FP Unavailable exception if the core 931 * has no FPU, in that case the reason flags will be 0 */ 932 933 if (reason & REASON_FP) { 934 /* IEEE FP exception */ 935 parse_fpe(regs); 936 return; 937 } 938 if (reason & REASON_TRAP) { 939 /* Debugger is first in line to stop recursive faults in 940 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 941 if (debugger_bpt(regs)) 942 return; 943 944 /* trap exception */ 945 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 946 == NOTIFY_STOP) 947 return; 948 949 if (!(regs->msr & MSR_PR) && /* not user-mode */ 950 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 951 regs->nip += 4; 952 return; 953 } 954 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 955 return; 956 } 957 958 local_irq_enable(); 959 960 #ifdef CONFIG_MATH_EMULATION 961 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 962 * but there seems to be a hardware bug on the 405GP (RevD) 963 * that means ESR is sometimes set incorrectly - either to 964 * ESR_DST (!?) or 0. In the process of chasing this with the 965 * hardware people - not sure if it can happen on any illegal 966 * instruction or only on FP instructions, whether there is a 967 * pattern to occurences etc. -dgibson 31/Mar/2003 */ 968 switch (do_mathemu(regs)) { 969 case 0: 970 emulate_single_step(regs); 971 return; 972 case 1: { 973 int code = 0; 974 code = __parse_fpscr(current->thread.fpscr.val); 975 _exception(SIGFPE, regs, code, regs->nip); 976 return; 977 } 978 case -EFAULT: 979 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 980 return; 981 } 982 /* fall through on any other errors */ 983 #endif /* CONFIG_MATH_EMULATION */ 984 985 /* Try to emulate it if we should. */ 986 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 987 switch (emulate_instruction(regs)) { 988 case 0: 989 regs->nip += 4; 990 emulate_single_step(regs); 991 return; 992 case -EFAULT: 993 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 994 return; 995 } 996 } 997 998 if (reason & REASON_PRIVILEGED) 999 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1000 else 1001 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1002 } 1003 1004 void alignment_exception(struct pt_regs *regs) 1005 { 1006 int sig, code, fixed = 0; 1007 1008 /* we don't implement logging of alignment exceptions */ 1009 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) 1010 fixed = fix_alignment(regs); 1011 1012 if (fixed == 1) { 1013 regs->nip += 4; /* skip over emulated instruction */ 1014 emulate_single_step(regs); 1015 return; 1016 } 1017 1018 /* Operand address was bad */ 1019 if (fixed == -EFAULT) { 1020 sig = SIGSEGV; 1021 code = SEGV_ACCERR; 1022 } else { 1023 sig = SIGBUS; 1024 code = BUS_ADRALN; 1025 } 1026 if (user_mode(regs)) 1027 _exception(sig, regs, code, regs->dar); 1028 else 1029 bad_page_fault(regs, regs->dar, sig); 1030 } 1031 1032 void StackOverflow(struct pt_regs *regs) 1033 { 1034 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", 1035 current, regs->gpr[1]); 1036 debugger(regs); 1037 show_regs(regs); 1038 panic("kernel stack overflow"); 1039 } 1040 1041 void nonrecoverable_exception(struct pt_regs *regs) 1042 { 1043 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", 1044 regs->nip, regs->msr); 1045 debugger(regs); 1046 die("nonrecoverable exception", regs, SIGKILL); 1047 } 1048 1049 void trace_syscall(struct pt_regs *regs) 1050 { 1051 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", 1052 current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], 1053 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); 1054 } 1055 1056 void kernel_fp_unavailable_exception(struct pt_regs *regs) 1057 { 1058 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1059 "%lx at %lx\n", regs->trap, regs->nip); 1060 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1061 } 1062 1063 void altivec_unavailable_exception(struct pt_regs *regs) 1064 { 1065 if (user_mode(regs)) { 1066 /* A user program has executed an altivec instruction, 1067 but this kernel doesn't support altivec. */ 1068 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1069 return; 1070 } 1071 1072 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1073 "%lx at %lx\n", regs->trap, regs->nip); 1074 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1075 } 1076 1077 void vsx_unavailable_exception(struct pt_regs *regs) 1078 { 1079 if (user_mode(regs)) { 1080 /* A user program has executed an vsx instruction, 1081 but this kernel doesn't support vsx. */ 1082 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1083 return; 1084 } 1085 1086 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " 1087 "%lx at %lx\n", regs->trap, regs->nip); 1088 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1089 } 1090 1091 void performance_monitor_exception(struct pt_regs *regs) 1092 { 1093 __get_cpu_var(irq_stat).pmu_irqs++; 1094 1095 perf_irq(regs); 1096 } 1097 1098 #ifdef CONFIG_8xx 1099 void SoftwareEmulation(struct pt_regs *regs) 1100 { 1101 extern int do_mathemu(struct pt_regs *); 1102 extern int Soft_emulate_8xx(struct pt_regs *); 1103 #if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU) 1104 int errcode; 1105 #endif 1106 1107 CHECK_FULL_REGS(regs); 1108 1109 if (!user_mode(regs)) { 1110 debugger(regs); 1111 die("Kernel Mode Software FPU Emulation", regs, SIGFPE); 1112 } 1113 1114 #ifdef CONFIG_MATH_EMULATION 1115 errcode = do_mathemu(regs); 1116 if (errcode >= 0) 1117 PPC_WARN_EMULATED(math, regs); 1118 1119 switch (errcode) { 1120 case 0: 1121 emulate_single_step(regs); 1122 return; 1123 case 1: { 1124 int code = 0; 1125 code = __parse_fpscr(current->thread.fpscr.val); 1126 _exception(SIGFPE, regs, code, regs->nip); 1127 return; 1128 } 1129 case -EFAULT: 1130 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1131 return; 1132 default: 1133 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1134 return; 1135 } 1136 1137 #elif defined(CONFIG_8XX_MINIMAL_FPEMU) 1138 errcode = Soft_emulate_8xx(regs); 1139 if (errcode >= 0) 1140 PPC_WARN_EMULATED(8xx, regs); 1141 1142 switch (errcode) { 1143 case 0: 1144 emulate_single_step(regs); 1145 return; 1146 case 1: 1147 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1148 return; 1149 case -EFAULT: 1150 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1151 return; 1152 } 1153 #else 1154 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1155 #endif 1156 } 1157 #endif /* CONFIG_8xx */ 1158 1159 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1160 static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 1161 { 1162 int changed = 0; 1163 /* 1164 * Determine the cause of the debug event, clear the 1165 * event flags and send a trap to the handler. Torez 1166 */ 1167 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { 1168 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); 1169 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE 1170 current->thread.dbcr2 &= ~DBCR2_DAC12MODE; 1171 #endif 1172 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 1173 5); 1174 changed |= 0x01; 1175 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { 1176 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); 1177 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, 1178 6); 1179 changed |= 0x01; 1180 } else if (debug_status & DBSR_IAC1) { 1181 current->thread.dbcr0 &= ~DBCR0_IAC1; 1182 dbcr_iac_range(current) &= ~DBCR_IAC12MODE; 1183 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1184 1); 1185 changed |= 0x01; 1186 } else if (debug_status & DBSR_IAC2) { 1187 current->thread.dbcr0 &= ~DBCR0_IAC2; 1188 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 1189 2); 1190 changed |= 0x01; 1191 } else if (debug_status & DBSR_IAC3) { 1192 current->thread.dbcr0 &= ~DBCR0_IAC3; 1193 dbcr_iac_range(current) &= ~DBCR_IAC34MODE; 1194 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 1195 3); 1196 changed |= 0x01; 1197 } else if (debug_status & DBSR_IAC4) { 1198 current->thread.dbcr0 &= ~DBCR0_IAC4; 1199 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 1200 4); 1201 changed |= 0x01; 1202 } 1203 /* 1204 * At the point this routine was called, the MSR(DE) was turned off. 1205 * Check all other debug flags and see if that bit needs to be turned 1206 * back on or not. 1207 */ 1208 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1)) 1209 regs->msr |= MSR_DE; 1210 else 1211 /* Make sure the IDM flag is off */ 1212 current->thread.dbcr0 &= ~DBCR0_IDM; 1213 1214 if (changed & 0x01) 1215 mtspr(SPRN_DBCR0, current->thread.dbcr0); 1216 } 1217 1218 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) 1219 { 1220 current->thread.dbsr = debug_status; 1221 1222 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while 1223 * on server, it stops on the target of the branch. In order to simulate 1224 * the server behaviour, we thus restart right away with a single step 1225 * instead of stopping here when hitting a BT 1226 */ 1227 if (debug_status & DBSR_BT) { 1228 regs->msr &= ~MSR_DE; 1229 1230 /* Disable BT */ 1231 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); 1232 /* Clear the BT event */ 1233 mtspr(SPRN_DBSR, DBSR_BT); 1234 1235 /* Do the single step trick only when coming from userspace */ 1236 if (user_mode(regs)) { 1237 current->thread.dbcr0 &= ~DBCR0_BT; 1238 current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; 1239 regs->msr |= MSR_DE; 1240 return; 1241 } 1242 1243 if (notify_die(DIE_SSTEP, "block_step", regs, 5, 1244 5, SIGTRAP) == NOTIFY_STOP) { 1245 return; 1246 } 1247 if (debugger_sstep(regs)) 1248 return; 1249 } else if (debug_status & DBSR_IC) { /* Instruction complete */ 1250 regs->msr &= ~MSR_DE; 1251 1252 /* Disable instruction completion */ 1253 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); 1254 /* Clear the instruction completion event */ 1255 mtspr(SPRN_DBSR, DBSR_IC); 1256 1257 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 1258 5, SIGTRAP) == NOTIFY_STOP) { 1259 return; 1260 } 1261 1262 if (debugger_sstep(regs)) 1263 return; 1264 1265 if (user_mode(regs)) { 1266 current->thread.dbcr0 &= ~DBCR0_IC; 1267 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1268 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, 1269 current->thread.dbcr1)) 1270 regs->msr |= MSR_DE; 1271 else 1272 /* Make sure the IDM bit is off */ 1273 current->thread.dbcr0 &= ~DBCR0_IDM; 1274 #endif 1275 } 1276 1277 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 1278 } else 1279 handle_debug(regs, debug_status); 1280 } 1281 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 1282 1283 #if !defined(CONFIG_TAU_INT) 1284 void TAUException(struct pt_regs *regs) 1285 { 1286 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 1287 regs->nip, regs->msr, regs->trap, print_tainted()); 1288 } 1289 #endif /* CONFIG_INT_TAU */ 1290 1291 #ifdef CONFIG_ALTIVEC 1292 void altivec_assist_exception(struct pt_regs *regs) 1293 { 1294 int err; 1295 1296 if (!user_mode(regs)) { 1297 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" 1298 " at %lx\n", regs->nip); 1299 die("Kernel VMX/Altivec assist exception", regs, SIGILL); 1300 } 1301 1302 flush_altivec_to_thread(current); 1303 1304 PPC_WARN_EMULATED(altivec, regs); 1305 err = emulate_altivec(regs); 1306 if (err == 0) { 1307 regs->nip += 4; /* skip emulated instruction */ 1308 emulate_single_step(regs); 1309 return; 1310 } 1311 1312 if (err == -EFAULT) { 1313 /* got an error reading the instruction */ 1314 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1315 } else { 1316 /* didn't recognize the instruction */ 1317 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1318 if (printk_ratelimit()) 1319 printk(KERN_ERR "Unrecognized altivec instruction " 1320 "in %s at %lx\n", current->comm, regs->nip); 1321 current->thread.vscr.u[3] |= 0x10000; 1322 } 1323 } 1324 #endif /* CONFIG_ALTIVEC */ 1325 1326 #ifdef CONFIG_VSX 1327 void vsx_assist_exception(struct pt_regs *regs) 1328 { 1329 if (!user_mode(regs)) { 1330 printk(KERN_EMERG "VSX assist exception in kernel mode" 1331 " at %lx\n", regs->nip); 1332 die("Kernel VSX assist exception", regs, SIGILL); 1333 } 1334 1335 flush_vsx_to_thread(current); 1336 printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip); 1337 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1338 } 1339 #endif /* CONFIG_VSX */ 1340 1341 #ifdef CONFIG_FSL_BOOKE 1342 void CacheLockingException(struct pt_regs *regs, unsigned long address, 1343 unsigned long error_code) 1344 { 1345 /* We treat cache locking instructions from the user 1346 * as priv ops, in the future we could try to do 1347 * something smarter 1348 */ 1349 if (error_code & (ESR_DLK|ESR_ILK)) 1350 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1351 return; 1352 } 1353 #endif /* CONFIG_FSL_BOOKE */ 1354 1355 #ifdef CONFIG_SPE 1356 void SPEFloatingPointException(struct pt_regs *regs) 1357 { 1358 extern int do_spe_mathemu(struct pt_regs *regs); 1359 unsigned long spefscr; 1360 int fpexc_mode; 1361 int code = 0; 1362 int err; 1363 1364 preempt_disable(); 1365 if (regs->msr & MSR_SPE) 1366 giveup_spe(current); 1367 preempt_enable(); 1368 1369 spefscr = current->thread.spefscr; 1370 fpexc_mode = current->thread.fpexc_mode; 1371 1372 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1373 code = FPE_FLTOVF; 1374 } 1375 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1376 code = FPE_FLTUND; 1377 } 1378 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1379 code = FPE_FLTDIV; 1380 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1381 code = FPE_FLTINV; 1382 } 1383 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1384 code = FPE_FLTRES; 1385 1386 err = do_spe_mathemu(regs); 1387 if (err == 0) { 1388 regs->nip += 4; /* skip emulated instruction */ 1389 emulate_single_step(regs); 1390 return; 1391 } 1392 1393 if (err == -EFAULT) { 1394 /* got an error reading the instruction */ 1395 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1396 } else if (err == -EINVAL) { 1397 /* didn't recognize the instruction */ 1398 printk(KERN_ERR "unrecognized spe instruction " 1399 "in %s at %lx\n", current->comm, regs->nip); 1400 } else { 1401 _exception(SIGFPE, regs, code, regs->nip); 1402 } 1403 1404 return; 1405 } 1406 1407 void SPEFloatingPointRoundException(struct pt_regs *regs) 1408 { 1409 extern int speround_handler(struct pt_regs *regs); 1410 int err; 1411 1412 preempt_disable(); 1413 if (regs->msr & MSR_SPE) 1414 giveup_spe(current); 1415 preempt_enable(); 1416 1417 regs->nip -= 4; 1418 err = speround_handler(regs); 1419 if (err == 0) { 1420 regs->nip += 4; /* skip emulated instruction */ 1421 emulate_single_step(regs); 1422 return; 1423 } 1424 1425 if (err == -EFAULT) { 1426 /* got an error reading the instruction */ 1427 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); 1428 } else if (err == -EINVAL) { 1429 /* didn't recognize the instruction */ 1430 printk(KERN_ERR "unrecognized spe instruction " 1431 "in %s at %lx\n", current->comm, regs->nip); 1432 } else { 1433 _exception(SIGFPE, regs, 0, regs->nip); 1434 return; 1435 } 1436 } 1437 #endif 1438 1439 /* 1440 * We enter here if we get an unrecoverable exception, that is, one 1441 * that happened at a point where the RI (recoverable interrupt) bit 1442 * in the MSR is 0. This indicates that SRR0/1 are live, and that 1443 * we therefore lost state by taking this exception. 1444 */ 1445 void unrecoverable_exception(struct pt_regs *regs) 1446 { 1447 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", 1448 regs->trap, regs->nip); 1449 die("Unrecoverable exception", regs, SIGABRT); 1450 } 1451 1452 #ifdef CONFIG_BOOKE_WDT 1453 /* 1454 * Default handler for a Watchdog exception, 1455 * spins until a reboot occurs 1456 */ 1457 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) 1458 { 1459 /* Generic WatchdogHandler, implement your own */ 1460 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); 1461 return; 1462 } 1463 1464 void WatchdogException(struct pt_regs *regs) 1465 { 1466 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 1467 WatchdogHandler(regs); 1468 } 1469 #endif 1470 1471 /* 1472 * We enter here if we discover during exception entry that we are 1473 * running in supervisor mode with a userspace value in the stack pointer. 1474 */ 1475 void kernel_bad_stack(struct pt_regs *regs) 1476 { 1477 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 1478 regs->gpr[1], regs->nip); 1479 die("Bad kernel stack pointer", regs, SIGABRT); 1480 } 1481 1482 void __init trap_init(void) 1483 { 1484 } 1485 1486 1487 #ifdef CONFIG_PPC_EMULATED_STATS 1488 1489 #define WARN_EMULATED_SETUP(type) .type = { .name = #type } 1490 1491 struct ppc_emulated ppc_emulated = { 1492 #ifdef CONFIG_ALTIVEC 1493 WARN_EMULATED_SETUP(altivec), 1494 #endif 1495 WARN_EMULATED_SETUP(dcba), 1496 WARN_EMULATED_SETUP(dcbz), 1497 WARN_EMULATED_SETUP(fp_pair), 1498 WARN_EMULATED_SETUP(isel), 1499 WARN_EMULATED_SETUP(mcrxr), 1500 WARN_EMULATED_SETUP(mfpvr), 1501 WARN_EMULATED_SETUP(multiple), 1502 WARN_EMULATED_SETUP(popcntb), 1503 WARN_EMULATED_SETUP(spe), 1504 WARN_EMULATED_SETUP(string), 1505 WARN_EMULATED_SETUP(unaligned), 1506 #ifdef CONFIG_MATH_EMULATION 1507 WARN_EMULATED_SETUP(math), 1508 #elif defined(CONFIG_8XX_MINIMAL_FPEMU) 1509 WARN_EMULATED_SETUP(8xx), 1510 #endif 1511 #ifdef CONFIG_VSX 1512 WARN_EMULATED_SETUP(vsx), 1513 #endif 1514 }; 1515 1516 u32 ppc_warn_emulated; 1517 1518 void ppc_warn_emulated_print(const char *type) 1519 { 1520 if (printk_ratelimit()) 1521 pr_warning("%s used emulated %s instruction\n", current->comm, 1522 type); 1523 } 1524 1525 static int __init ppc_warn_emulated_init(void) 1526 { 1527 struct dentry *dir, *d; 1528 unsigned int i; 1529 struct ppc_emulated_entry *entries = (void *)&ppc_emulated; 1530 1531 if (!powerpc_debugfs_root) 1532 return -ENODEV; 1533 1534 dir = debugfs_create_dir("emulated_instructions", 1535 powerpc_debugfs_root); 1536 if (!dir) 1537 return -ENOMEM; 1538 1539 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, 1540 &ppc_warn_emulated); 1541 if (!d) 1542 goto fail; 1543 1544 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { 1545 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, 1546 (u32 *)&entries[i].val.counter); 1547 if (!d) 1548 goto fail; 1549 } 1550 1551 return 0; 1552 1553 fail: 1554 debugfs_remove_recursive(dir); 1555 return -ENOMEM; 1556 } 1557 1558 device_initcall(ppc_warn_emulated_init); 1559 1560 #endif /* CONFIG_PPC_EMULATED_STATS */ 1561