1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle 7 * Copyright (C) 1995, 1996 Paul M. Antoine 8 * Copyright (C) 1998 Ulf Carlsson 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 11 * Copyright (C) 2000, 01 MIPS Technologies, Inc. 12 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki 13 */ 14 #include <linux/bug.h> 15 #include <linux/compiler.h> 16 #include <linux/init.h> 17 #include <linux/mm.h> 18 #include <linux/module.h> 19 #include <linux/sched.h> 20 #include <linux/smp.h> 21 #include <linux/spinlock.h> 22 #include <linux/kallsyms.h> 23 #include <linux/bootmem.h> 24 #include <linux/interrupt.h> 25 #include <linux/ptrace.h> 26 #include <linux/kgdb.h> 27 #include <linux/kdebug.h> 28 29 #include <asm/bootinfo.h> 30 #include <asm/branch.h> 31 #include <asm/break.h> 32 #include <asm/cpu.h> 33 #include <asm/dsp.h> 34 #include <asm/fpu.h> 35 #include <asm/fpu_emulator.h> 36 #include <asm/mipsregs.h> 37 #include <asm/mipsmtregs.h> 38 #include <asm/module.h> 39 #include <asm/pgtable.h> 40 #include <asm/ptrace.h> 41 #include <asm/sections.h> 42 #include <asm/system.h> 43 #include <asm/tlbdebug.h> 44 #include <asm/traps.h> 45 #include <asm/uaccess.h> 46 #include <asm/watch.h> 47 #include <asm/mmu_context.h> 48 #include <asm/types.h> 49 #include <asm/stacktrace.h> 50 51 extern void check_wait(void); 52 extern asmlinkage void r4k_wait(void); 53 extern asmlinkage void rollback_handle_int(void); 54 extern asmlinkage void handle_int(void); 55 extern asmlinkage void handle_tlbm(void); 56 extern asmlinkage void handle_tlbl(void); 57 extern asmlinkage void handle_tlbs(void); 58 extern asmlinkage void handle_adel(void); 59 extern asmlinkage void handle_ades(void); 60 extern asmlinkage void handle_ibe(void); 61 extern asmlinkage void handle_dbe(void); 62 extern asmlinkage void handle_sys(void); 63 extern asmlinkage void handle_bp(void); 64 extern asmlinkage void handle_ri(void); 65 extern asmlinkage void handle_ri_rdhwr_vivt(void); 66 extern asmlinkage void handle_ri_rdhwr(void); 67 extern asmlinkage void handle_cpu(void); 68 extern asmlinkage void handle_ov(void); 69 extern asmlinkage void handle_tr(void); 70 extern asmlinkage void handle_fpe(void); 71 extern asmlinkage void handle_mdmx(void); 72 extern asmlinkage void handle_watch(void); 73 extern asmlinkage void handle_mt(void); 74 extern asmlinkage void handle_dsp(void); 75 extern asmlinkage void handle_mcheck(void); 76 extern asmlinkage void handle_reserved(void); 77 78 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, 79 struct mips_fpu_struct *ctx, int has_fpu); 80 81 void (*board_be_init)(void); 82 int (*board_be_handler)(struct pt_regs *regs, int is_fixup); 83 void (*board_nmi_handler_setup)(void); 84 void (*board_ejtag_handler_setup)(void); 85 void (*board_bind_eic_interrupt)(int irq, int regset); 86 87 88 static void show_raw_backtrace(unsigned long reg29) 89 { 90 unsigned long *sp = (unsigned long *)(reg29 & ~3); 91 unsigned long addr; 92 93 printk("Call Trace:"); 94 #ifdef CONFIG_KALLSYMS 95 printk("\n"); 96 #endif 97 while (!kstack_end(sp)) { 98 unsigned long __user *p = 99 (unsigned long __user *)(unsigned long)sp++; 100 if (__get_user(addr, p)) { 101 printk(" (Bad stack address)"); 102 break; 103 } 104 if (__kernel_text_address(addr)) 105 print_ip_sym(addr); 106 } 107 printk("\n"); 108 } 109 110 #ifdef CONFIG_KALLSYMS 111 int raw_show_trace; 112 static int __init set_raw_show_trace(char *str) 113 { 114 raw_show_trace = 1; 115 return 1; 116 } 117 __setup("raw_show_trace", set_raw_show_trace); 118 #endif 119 120 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) 121 { 122 unsigned long sp = regs->regs[29]; 123 unsigned long ra = regs->regs[31]; 124 unsigned long pc = regs->cp0_epc; 125 126 if (raw_show_trace || !__kernel_text_address(pc)) { 127 show_raw_backtrace(sp); 128 return; 129 } 130 printk("Call Trace:\n"); 131 do { 132 print_ip_sym(pc); 133 pc = unwind_stack(task, &sp, pc, &ra); 134 } while (pc); 135 printk("\n"); 136 } 137 138 /* 139 * This routine abuses get_user()/put_user() to reference pointers 140 * with at least a bit of error checking ... 141 */ 142 static void show_stacktrace(struct task_struct *task, 143 const struct pt_regs *regs) 144 { 145 const int field = 2 * sizeof(unsigned long); 146 long stackdata; 147 int i; 148 unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; 149 150 printk("Stack :"); 151 i = 0; 152 while ((unsigned long) sp & (PAGE_SIZE - 1)) { 153 if (i && ((i % (64 / field)) == 0)) 154 printk("\n "); 155 if (i > 39) { 156 printk(" ..."); 157 break; 158 } 159 160 if (__get_user(stackdata, sp++)) { 161 printk(" (Bad stack address)"); 162 break; 163 } 164 165 printk(" %0*lx", field, stackdata); 166 i++; 167 } 168 printk("\n"); 169 show_backtrace(task, regs); 170 } 171 172 void show_stack(struct task_struct *task, unsigned long *sp) 173 { 174 struct pt_regs regs; 175 if (sp) { 176 regs.regs[29] = (unsigned long)sp; 177 regs.regs[31] = 0; 178 regs.cp0_epc = 0; 179 } else { 180 if (task && task != current) { 181 regs.regs[29] = task->thread.reg29; 182 regs.regs[31] = 0; 183 regs.cp0_epc = task->thread.reg31; 184 } else { 185 prepare_frametrace(®s); 186 } 187 } 188 show_stacktrace(task, ®s); 189 } 190 191 /* 192 * The architecture-independent dump_stack generator 193 */ 194 void dump_stack(void) 195 { 196 struct pt_regs regs; 197 198 prepare_frametrace(®s); 199 show_backtrace(current, ®s); 200 } 201 202 EXPORT_SYMBOL(dump_stack); 203 204 static void show_code(unsigned int __user *pc) 205 { 206 long i; 207 unsigned short __user *pc16 = NULL; 208 209 printk("\nCode:"); 210 211 if ((unsigned long)pc & 1) 212 pc16 = (unsigned short __user *)((unsigned long)pc & ~1); 213 for(i = -3 ; i < 6 ; i++) { 214 unsigned int insn; 215 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { 216 printk(" (Bad address in epc)\n"); 217 break; 218 } 219 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); 220 } 221 } 222 223 static void __show_regs(const struct pt_regs *regs) 224 { 225 const int field = 2 * sizeof(unsigned long); 226 unsigned int cause = regs->cp0_cause; 227 int i; 228 229 printk("Cpu %d\n", smp_processor_id()); 230 231 /* 232 * Saved main processor registers 233 */ 234 for (i = 0; i < 32; ) { 235 if ((i % 4) == 0) 236 printk("$%2d :", i); 237 if (i == 0) 238 printk(" %0*lx", field, 0UL); 239 else if (i == 26 || i == 27) 240 printk(" %*s", field, ""); 241 else 242 printk(" %0*lx", field, regs->regs[i]); 243 244 i++; 245 if ((i % 4) == 0) 246 printk("\n"); 247 } 248 249 #ifdef CONFIG_CPU_HAS_SMARTMIPS 250 printk("Acx : %0*lx\n", field, regs->acx); 251 #endif 252 printk("Hi : %0*lx\n", field, regs->hi); 253 printk("Lo : %0*lx\n", field, regs->lo); 254 255 /* 256 * Saved cp0 registers 257 */ 258 printk("epc : %0*lx %pS\n", field, regs->cp0_epc, 259 (void *) regs->cp0_epc); 260 printk(" %s\n", print_tainted()); 261 printk("ra : %0*lx %pS\n", field, regs->regs[31], 262 (void *) regs->regs[31]); 263 264 printk("Status: %08x ", (uint32_t) regs->cp0_status); 265 266 if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) { 267 if (regs->cp0_status & ST0_KUO) 268 printk("KUo "); 269 if (regs->cp0_status & ST0_IEO) 270 printk("IEo "); 271 if (regs->cp0_status & ST0_KUP) 272 printk("KUp "); 273 if (regs->cp0_status & ST0_IEP) 274 printk("IEp "); 275 if (regs->cp0_status & ST0_KUC) 276 printk("KUc "); 277 if (regs->cp0_status & ST0_IEC) 278 printk("IEc "); 279 } else { 280 if (regs->cp0_status & ST0_KX) 281 printk("KX "); 282 if (regs->cp0_status & ST0_SX) 283 printk("SX "); 284 if (regs->cp0_status & ST0_UX) 285 printk("UX "); 286 switch (regs->cp0_status & ST0_KSU) { 287 case KSU_USER: 288 printk("USER "); 289 break; 290 case KSU_SUPERVISOR: 291 printk("SUPERVISOR "); 292 break; 293 case KSU_KERNEL: 294 printk("KERNEL "); 295 break; 296 default: 297 printk("BAD_MODE "); 298 break; 299 } 300 if (regs->cp0_status & ST0_ERL) 301 printk("ERL "); 302 if (regs->cp0_status & ST0_EXL) 303 printk("EXL "); 304 if (regs->cp0_status & ST0_IE) 305 printk("IE "); 306 } 307 printk("\n"); 308 309 printk("Cause : %08x\n", cause); 310 311 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; 312 if (1 <= cause && cause <= 5) 313 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr); 314 315 printk("PrId : %08x (%s)\n", read_c0_prid(), 316 cpu_name_string()); 317 } 318 319 /* 320 * FIXME: really the generic show_regs should take a const pointer argument. 321 */ 322 void show_regs(struct pt_regs *regs) 323 { 324 __show_regs((struct pt_regs *)regs); 325 } 326 327 void show_registers(const struct pt_regs *regs) 328 { 329 const int field = 2 * sizeof(unsigned long); 330 331 __show_regs(regs); 332 print_modules(); 333 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n", 334 current->comm, current->pid, current_thread_info(), current, 335 field, current_thread_info()->tp_value); 336 if (cpu_has_userlocal) { 337 unsigned long tls; 338 339 tls = read_c0_userlocal(); 340 if (tls != current_thread_info()->tp_value) 341 printk("*HwTLS: %0*lx\n", field, tls); 342 } 343 344 show_stacktrace(current, regs); 345 show_code((unsigned int __user *) regs->cp0_epc); 346 printk("\n"); 347 } 348 349 static DEFINE_SPINLOCK(die_lock); 350 351 void __noreturn die(const char * str, const struct pt_regs * regs) 352 { 353 static int die_counter; 354 #ifdef CONFIG_MIPS_MT_SMTC 355 unsigned long dvpret = dvpe(); 356 #endif /* CONFIG_MIPS_MT_SMTC */ 357 358 console_verbose(); 359 spin_lock_irq(&die_lock); 360 bust_spinlocks(1); 361 #ifdef CONFIG_MIPS_MT_SMTC 362 mips_mt_regdump(dvpret); 363 #endif /* CONFIG_MIPS_MT_SMTC */ 364 printk("%s[#%d]:\n", str, ++die_counter); 365 show_registers(regs); 366 add_taint(TAINT_DIE); 367 spin_unlock_irq(&die_lock); 368 369 if (in_interrupt()) 370 panic("Fatal exception in interrupt"); 371 372 if (panic_on_oops) { 373 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); 374 ssleep(5); 375 panic("Fatal exception"); 376 } 377 378 do_exit(SIGSEGV); 379 } 380 381 extern struct exception_table_entry __start___dbe_table[]; 382 extern struct exception_table_entry __stop___dbe_table[]; 383 384 __asm__( 385 " .section __dbe_table, \"a\"\n" 386 " .previous \n"); 387 388 /* Given an address, look for it in the exception tables. */ 389 static const struct exception_table_entry *search_dbe_tables(unsigned long addr) 390 { 391 const struct exception_table_entry *e; 392 393 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr); 394 if (!e) 395 e = search_module_dbetables(addr); 396 return e; 397 } 398 399 asmlinkage void do_be(struct pt_regs *regs) 400 { 401 const int field = 2 * sizeof(unsigned long); 402 const struct exception_table_entry *fixup = NULL; 403 int data = regs->cp0_cause & 4; 404 int action = MIPS_BE_FATAL; 405 406 /* XXX For now. Fixme, this searches the wrong table ... */ 407 if (data && !user_mode(regs)) 408 fixup = search_dbe_tables(exception_epc(regs)); 409 410 if (fixup) 411 action = MIPS_BE_FIXUP; 412 413 if (board_be_handler) 414 action = board_be_handler(regs, fixup != NULL); 415 416 switch (action) { 417 case MIPS_BE_DISCARD: 418 return; 419 case MIPS_BE_FIXUP: 420 if (fixup) { 421 regs->cp0_epc = fixup->nextinsn; 422 return; 423 } 424 break; 425 default: 426 break; 427 } 428 429 /* 430 * Assume it would be too dangerous to continue ... 431 */ 432 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n", 433 data ? "Data" : "Instruction", 434 field, regs->cp0_epc, field, regs->regs[31]); 435 if (notify_die(DIE_OOPS, "bus error", regs, SIGBUS, 0, 0) 436 == NOTIFY_STOP) 437 return; 438 439 die_if_kernel("Oops", regs); 440 force_sig(SIGBUS, current); 441 } 442 443 /* 444 * ll/sc, rdhwr, sync emulation 445 */ 446 447 #define OPCODE 0xfc000000 448 #define BASE 0x03e00000 449 #define RT 0x001f0000 450 #define OFFSET 0x0000ffff 451 #define LL 0xc0000000 452 #define SC 0xe0000000 453 #define SPEC0 0x00000000 454 #define SPEC3 0x7c000000 455 #define RD 0x0000f800 456 #define FUNC 0x0000003f 457 #define SYNC 0x0000000f 458 #define RDHWR 0x0000003b 459 460 /* 461 * The ll_bit is cleared by r*_switch.S 462 */ 463 464 unsigned long ll_bit; 465 466 static struct task_struct *ll_task = NULL; 467 468 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode) 469 { 470 unsigned long value, __user *vaddr; 471 long offset; 472 473 /* 474 * analyse the ll instruction that just caused a ri exception 475 * and put the referenced address to addr. 476 */ 477 478 /* sign extend offset */ 479 offset = opcode & OFFSET; 480 offset <<= 16; 481 offset >>= 16; 482 483 vaddr = (unsigned long __user *) 484 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); 485 486 if ((unsigned long)vaddr & 3) 487 return SIGBUS; 488 if (get_user(value, vaddr)) 489 return SIGSEGV; 490 491 preempt_disable(); 492 493 if (ll_task == NULL || ll_task == current) { 494 ll_bit = 1; 495 } else { 496 ll_bit = 0; 497 } 498 ll_task = current; 499 500 preempt_enable(); 501 502 regs->regs[(opcode & RT) >> 16] = value; 503 504 return 0; 505 } 506 507 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode) 508 { 509 unsigned long __user *vaddr; 510 unsigned long reg; 511 long offset; 512 513 /* 514 * analyse the sc instruction that just caused a ri exception 515 * and put the referenced address to addr. 516 */ 517 518 /* sign extend offset */ 519 offset = opcode & OFFSET; 520 offset <<= 16; 521 offset >>= 16; 522 523 vaddr = (unsigned long __user *) 524 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); 525 reg = (opcode & RT) >> 16; 526 527 if ((unsigned long)vaddr & 3) 528 return SIGBUS; 529 530 preempt_disable(); 531 532 if (ll_bit == 0 || ll_task != current) { 533 regs->regs[reg] = 0; 534 preempt_enable(); 535 return 0; 536 } 537 538 preempt_enable(); 539 540 if (put_user(regs->regs[reg], vaddr)) 541 return SIGSEGV; 542 543 regs->regs[reg] = 1; 544 545 return 0; 546 } 547 548 /* 549 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both 550 * opcodes are supposed to result in coprocessor unusable exceptions if 551 * executed on ll/sc-less processors. That's the theory. In practice a 552 * few processors such as NEC's VR4100 throw reserved instruction exceptions 553 * instead, so we're doing the emulation thing in both exception handlers. 554 */ 555 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) 556 { 557 if ((opcode & OPCODE) == LL) 558 return simulate_ll(regs, opcode); 559 if ((opcode & OPCODE) == SC) 560 return simulate_sc(regs, opcode); 561 562 return -1; /* Must be something else ... */ 563 } 564 565 /* 566 * Simulate trapping 'rdhwr' instructions to provide user accessible 567 * registers not implemented in hardware. 568 */ 569 static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) 570 { 571 struct thread_info *ti = task_thread_info(current); 572 573 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { 574 int rd = (opcode & RD) >> 11; 575 int rt = (opcode & RT) >> 16; 576 switch (rd) { 577 case 0: /* CPU number */ 578 regs->regs[rt] = smp_processor_id(); 579 return 0; 580 case 1: /* SYNCI length */ 581 regs->regs[rt] = min(current_cpu_data.dcache.linesz, 582 current_cpu_data.icache.linesz); 583 return 0; 584 case 2: /* Read count register */ 585 regs->regs[rt] = read_c0_count(); 586 return 0; 587 case 3: /* Count register resolution */ 588 switch (current_cpu_data.cputype) { 589 case CPU_20KC: 590 case CPU_25KF: 591 regs->regs[rt] = 1; 592 break; 593 default: 594 regs->regs[rt] = 2; 595 } 596 return 0; 597 case 29: 598 regs->regs[rt] = ti->tp_value; 599 return 0; 600 default: 601 return -1; 602 } 603 } 604 605 /* Not ours. */ 606 return -1; 607 } 608 609 static int simulate_sync(struct pt_regs *regs, unsigned int opcode) 610 { 611 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) 612 return 0; 613 614 return -1; /* Must be something else ... */ 615 } 616 617 asmlinkage void do_ov(struct pt_regs *regs) 618 { 619 siginfo_t info; 620 621 die_if_kernel("Integer overflow", regs); 622 623 info.si_code = FPE_INTOVF; 624 info.si_signo = SIGFPE; 625 info.si_errno = 0; 626 info.si_addr = (void __user *) regs->cp0_epc; 627 force_sig_info(SIGFPE, &info, current); 628 } 629 630 /* 631 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX 632 */ 633 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) 634 { 635 siginfo_t info; 636 637 if (notify_die(DIE_FP, "FP exception", regs, SIGFPE, 0, 0) 638 == NOTIFY_STOP) 639 return; 640 die_if_kernel("FP exception in kernel code", regs); 641 642 if (fcr31 & FPU_CSR_UNI_X) { 643 int sig; 644 645 /* 646 * Unimplemented operation exception. If we've got the full 647 * software emulator on-board, let's use it... 648 * 649 * Force FPU to dump state into task/thread context. We're 650 * moving a lot of data here for what is probably a single 651 * instruction, but the alternative is to pre-decode the FP 652 * register operands before invoking the emulator, which seems 653 * a bit extreme for what should be an infrequent event. 654 */ 655 /* Ensure 'resume' not overwrite saved fp context again. */ 656 lose_fpu(1); 657 658 /* Run the emulator */ 659 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1); 660 661 /* 662 * We can't allow the emulated instruction to leave any of 663 * the cause bit set in $fcr31. 664 */ 665 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 666 667 /* Restore the hardware register state */ 668 own_fpu(1); /* Using the FPU again. */ 669 670 /* If something went wrong, signal */ 671 if (sig) 672 force_sig(sig, current); 673 674 return; 675 } else if (fcr31 & FPU_CSR_INV_X) 676 info.si_code = FPE_FLTINV; 677 else if (fcr31 & FPU_CSR_DIV_X) 678 info.si_code = FPE_FLTDIV; 679 else if (fcr31 & FPU_CSR_OVF_X) 680 info.si_code = FPE_FLTOVF; 681 else if (fcr31 & FPU_CSR_UDF_X) 682 info.si_code = FPE_FLTUND; 683 else if (fcr31 & FPU_CSR_INE_X) 684 info.si_code = FPE_FLTRES; 685 else 686 info.si_code = __SI_FAULT; 687 info.si_signo = SIGFPE; 688 info.si_errno = 0; 689 info.si_addr = (void __user *) regs->cp0_epc; 690 force_sig_info(SIGFPE, &info, current); 691 } 692 693 static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 694 const char *str) 695 { 696 siginfo_t info; 697 char b[40]; 698 699 if (notify_die(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP) 700 return; 701 702 /* 703 * A short test says that IRIX 5.3 sends SIGTRAP for all trap 704 * insns, even for trap and break codes that indicate arithmetic 705 * failures. Weird ... 706 * But should we continue the brokenness??? --macro 707 */ 708 switch (code) { 709 case BRK_OVERFLOW: 710 case BRK_DIVZERO: 711 scnprintf(b, sizeof(b), "%s instruction in kernel code", str); 712 die_if_kernel(b, regs); 713 if (code == BRK_DIVZERO) 714 info.si_code = FPE_INTDIV; 715 else 716 info.si_code = FPE_INTOVF; 717 info.si_signo = SIGFPE; 718 info.si_errno = 0; 719 info.si_addr = (void __user *) regs->cp0_epc; 720 force_sig_info(SIGFPE, &info, current); 721 break; 722 case BRK_BUG: 723 die_if_kernel("Kernel bug detected", regs); 724 force_sig(SIGTRAP, current); 725 break; 726 case BRK_MEMU: 727 /* 728 * Address errors may be deliberately induced by the FPU 729 * emulator to retake control of the CPU after executing the 730 * instruction in the delay slot of an emulated branch. 731 * 732 * Terminate if exception was recognized as a delay slot return 733 * otherwise handle as normal. 734 */ 735 if (do_dsemulret(regs)) 736 return; 737 738 die_if_kernel("Math emu break/trap", regs); 739 force_sig(SIGTRAP, current); 740 break; 741 default: 742 scnprintf(b, sizeof(b), "%s instruction in kernel code", str); 743 die_if_kernel(b, regs); 744 force_sig(SIGTRAP, current); 745 } 746 } 747 748 asmlinkage void do_bp(struct pt_regs *regs) 749 { 750 unsigned int opcode, bcode; 751 752 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) 753 goto out_sigsegv; 754 755 /* 756 * There is the ancient bug in the MIPS assemblers that the break 757 * code starts left to bit 16 instead to bit 6 in the opcode. 758 * Gas is bug-compatible, but not always, grrr... 759 * We handle both cases with a simple heuristics. --macro 760 */ 761 bcode = ((opcode >> 6) & ((1 << 20) - 1)); 762 if (bcode >= (1 << 10)) 763 bcode >>= 10; 764 765 do_trap_or_bp(regs, bcode, "Break"); 766 return; 767 768 out_sigsegv: 769 force_sig(SIGSEGV, current); 770 } 771 772 asmlinkage void do_tr(struct pt_regs *regs) 773 { 774 unsigned int opcode, tcode = 0; 775 776 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) 777 goto out_sigsegv; 778 779 /* Immediate versions don't provide a code. */ 780 if (!(opcode & OPCODE)) 781 tcode = ((opcode >> 6) & ((1 << 10) - 1)); 782 783 do_trap_or_bp(regs, tcode, "Trap"); 784 return; 785 786 out_sigsegv: 787 force_sig(SIGSEGV, current); 788 } 789 790 asmlinkage void do_ri(struct pt_regs *regs) 791 { 792 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); 793 unsigned long old_epc = regs->cp0_epc; 794 unsigned int opcode = 0; 795 int status = -1; 796 797 if (notify_die(DIE_RI, "RI Fault", regs, SIGSEGV, 0, 0) 798 == NOTIFY_STOP) 799 return; 800 801 die_if_kernel("Reserved instruction in kernel code", regs); 802 803 if (unlikely(compute_return_epc(regs) < 0)) 804 return; 805 806 if (unlikely(get_user(opcode, epc) < 0)) 807 status = SIGSEGV; 808 809 if (!cpu_has_llsc && status < 0) 810 status = simulate_llsc(regs, opcode); 811 812 if (status < 0) 813 status = simulate_rdhwr(regs, opcode); 814 815 if (status < 0) 816 status = simulate_sync(regs, opcode); 817 818 if (status < 0) 819 status = SIGILL; 820 821 if (unlikely(status > 0)) { 822 regs->cp0_epc = old_epc; /* Undo skip-over. */ 823 force_sig(status, current); 824 } 825 } 826 827 /* 828 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've 829 * emulated more than some threshold number of instructions, force migration to 830 * a "CPU" that has FP support. 831 */ 832 static void mt_ase_fp_affinity(void) 833 { 834 #ifdef CONFIG_MIPS_MT_FPAFF 835 if (mt_fpemul_threshold > 0 && 836 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { 837 /* 838 * If there's no FPU present, or if the application has already 839 * restricted the allowed set to exclude any CPUs with FPUs, 840 * we'll skip the procedure. 841 */ 842 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { 843 cpumask_t tmask; 844 845 current->thread.user_cpus_allowed 846 = current->cpus_allowed; 847 cpus_and(tmask, current->cpus_allowed, 848 mt_fpu_cpumask); 849 set_cpus_allowed(current, tmask); 850 set_thread_flag(TIF_FPUBOUND); 851 } 852 } 853 #endif /* CONFIG_MIPS_MT_FPAFF */ 854 } 855 856 asmlinkage void do_cpu(struct pt_regs *regs) 857 { 858 unsigned int __user *epc; 859 unsigned long old_epc; 860 unsigned int opcode; 861 unsigned int cpid; 862 int status; 863 864 die_if_kernel("do_cpu invoked from kernel context!", regs); 865 866 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; 867 868 switch (cpid) { 869 case 0: 870 epc = (unsigned int __user *)exception_epc(regs); 871 old_epc = regs->cp0_epc; 872 opcode = 0; 873 status = -1; 874 875 if (unlikely(compute_return_epc(regs) < 0)) 876 return; 877 878 if (unlikely(get_user(opcode, epc) < 0)) 879 status = SIGSEGV; 880 881 if (!cpu_has_llsc && status < 0) 882 status = simulate_llsc(regs, opcode); 883 884 if (status < 0) 885 status = simulate_rdhwr(regs, opcode); 886 887 if (status < 0) 888 status = SIGILL; 889 890 if (unlikely(status > 0)) { 891 regs->cp0_epc = old_epc; /* Undo skip-over. */ 892 force_sig(status, current); 893 } 894 895 return; 896 897 case 1: 898 if (used_math()) /* Using the FPU again. */ 899 own_fpu(1); 900 else { /* First time FPU user. */ 901 init_fpu(); 902 set_used_math(); 903 } 904 905 if (!raw_cpu_has_fpu) { 906 int sig; 907 sig = fpu_emulator_cop1Handler(regs, 908 ¤t->thread.fpu, 0); 909 if (sig) 910 force_sig(sig, current); 911 else 912 mt_ase_fp_affinity(); 913 } 914 915 return; 916 917 case 2: 918 case 3: 919 break; 920 } 921 922 force_sig(SIGILL, current); 923 } 924 925 asmlinkage void do_mdmx(struct pt_regs *regs) 926 { 927 force_sig(SIGILL, current); 928 } 929 930 asmlinkage void do_watch(struct pt_regs *regs) 931 { 932 u32 cause; 933 934 /* 935 * Clear WP (bit 22) bit of cause register so we don't loop 936 * forever. 937 */ 938 cause = read_c0_cause(); 939 cause &= ~(1 << 22); 940 write_c0_cause(cause); 941 942 /* 943 * If the current thread has the watch registers loaded, save 944 * their values and send SIGTRAP. Otherwise another thread 945 * left the registers set, clear them and continue. 946 */ 947 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { 948 mips_read_watch_registers(); 949 force_sig(SIGTRAP, current); 950 } else 951 mips_clear_watch_registers(); 952 } 953 954 asmlinkage void do_mcheck(struct pt_regs *regs) 955 { 956 const int field = 2 * sizeof(unsigned long); 957 int multi_match = regs->cp0_status & ST0_TS; 958 959 show_regs(regs); 960 961 if (multi_match) { 962 printk("Index : %0x\n", read_c0_index()); 963 printk("Pagemask: %0x\n", read_c0_pagemask()); 964 printk("EntryHi : %0*lx\n", field, read_c0_entryhi()); 965 printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); 966 printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); 967 printk("\n"); 968 dump_tlb_all(); 969 } 970 971 show_code((unsigned int __user *) regs->cp0_epc); 972 973 /* 974 * Some chips may have other causes of machine check (e.g. SB1 975 * graduation timer) 976 */ 977 panic("Caught Machine Check exception - %scaused by multiple " 978 "matching entries in the TLB.", 979 (multi_match) ? "" : "not "); 980 } 981 982 asmlinkage void do_mt(struct pt_regs *regs) 983 { 984 int subcode; 985 986 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) 987 >> VPECONTROL_EXCPT_SHIFT; 988 switch (subcode) { 989 case 0: 990 printk(KERN_DEBUG "Thread Underflow\n"); 991 break; 992 case 1: 993 printk(KERN_DEBUG "Thread Overflow\n"); 994 break; 995 case 2: 996 printk(KERN_DEBUG "Invalid YIELD Qualifier\n"); 997 break; 998 case 3: 999 printk(KERN_DEBUG "Gating Storage Exception\n"); 1000 break; 1001 case 4: 1002 printk(KERN_DEBUG "YIELD Scheduler Exception\n"); 1003 break; 1004 case 5: 1005 printk(KERN_DEBUG "Gating Storage Schedulier Exception\n"); 1006 break; 1007 default: 1008 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n", 1009 subcode); 1010 break; 1011 } 1012 die_if_kernel("MIPS MT Thread exception in kernel", regs); 1013 1014 force_sig(SIGILL, current); 1015 } 1016 1017 1018 asmlinkage void do_dsp(struct pt_regs *regs) 1019 { 1020 if (cpu_has_dsp) 1021 panic("Unexpected DSP exception\n"); 1022 1023 force_sig(SIGILL, current); 1024 } 1025 1026 asmlinkage void do_reserved(struct pt_regs *regs) 1027 { 1028 /* 1029 * Game over - no way to handle this if it ever occurs. Most probably 1030 * caused by a new unknown cpu type or after another deadly 1031 * hard/software error. 1032 */ 1033 show_regs(regs); 1034 panic("Caught reserved exception %ld - should not happen.", 1035 (regs->cp0_cause & 0x7f) >> 2); 1036 } 1037 1038 static int __initdata l1parity = 1; 1039 static int __init nol1parity(char *s) 1040 { 1041 l1parity = 0; 1042 return 1; 1043 } 1044 __setup("nol1par", nol1parity); 1045 static int __initdata l2parity = 1; 1046 static int __init nol2parity(char *s) 1047 { 1048 l2parity = 0; 1049 return 1; 1050 } 1051 __setup("nol2par", nol2parity); 1052 1053 /* 1054 * Some MIPS CPUs can enable/disable for cache parity detection, but do 1055 * it different ways. 1056 */ 1057 static inline void parity_protection_init(void) 1058 { 1059 switch (current_cpu_type()) { 1060 case CPU_24K: 1061 case CPU_34K: 1062 case CPU_74K: 1063 case CPU_1004K: 1064 { 1065 #define ERRCTL_PE 0x80000000 1066 #define ERRCTL_L2P 0x00800000 1067 unsigned long errctl; 1068 unsigned int l1parity_present, l2parity_present; 1069 1070 errctl = read_c0_ecc(); 1071 errctl &= ~(ERRCTL_PE|ERRCTL_L2P); 1072 1073 /* probe L1 parity support */ 1074 write_c0_ecc(errctl | ERRCTL_PE); 1075 back_to_back_c0_hazard(); 1076 l1parity_present = (read_c0_ecc() & ERRCTL_PE); 1077 1078 /* probe L2 parity support */ 1079 write_c0_ecc(errctl|ERRCTL_L2P); 1080 back_to_back_c0_hazard(); 1081 l2parity_present = (read_c0_ecc() & ERRCTL_L2P); 1082 1083 if (l1parity_present && l2parity_present) { 1084 if (l1parity) 1085 errctl |= ERRCTL_PE; 1086 if (l1parity ^ l2parity) 1087 errctl |= ERRCTL_L2P; 1088 } else if (l1parity_present) { 1089 if (l1parity) 1090 errctl |= ERRCTL_PE; 1091 } else if (l2parity_present) { 1092 if (l2parity) 1093 errctl |= ERRCTL_L2P; 1094 } else { 1095 /* No parity available */ 1096 } 1097 1098 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl); 1099 1100 write_c0_ecc(errctl); 1101 back_to_back_c0_hazard(); 1102 errctl = read_c0_ecc(); 1103 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl); 1104 1105 if (l1parity_present) 1106 printk(KERN_INFO "Cache parity protection %sabled\n", 1107 (errctl & ERRCTL_PE) ? "en" : "dis"); 1108 1109 if (l2parity_present) { 1110 if (l1parity_present && l1parity) 1111 errctl ^= ERRCTL_L2P; 1112 printk(KERN_INFO "L2 cache parity protection %sabled\n", 1113 (errctl & ERRCTL_L2P) ? "en" : "dis"); 1114 } 1115 } 1116 break; 1117 1118 case CPU_5KC: 1119 write_c0_ecc(0x80000000); 1120 back_to_back_c0_hazard(); 1121 /* Set the PE bit (bit 31) in the c0_errctl register. */ 1122 printk(KERN_INFO "Cache parity protection %sabled\n", 1123 (read_c0_ecc() & 0x80000000) ? "en" : "dis"); 1124 break; 1125 case CPU_20KC: 1126 case CPU_25KF: 1127 /* Clear the DE bit (bit 16) in the c0_status register. */ 1128 printk(KERN_INFO "Enable cache parity protection for " 1129 "MIPS 20KC/25KF CPUs.\n"); 1130 clear_c0_status(ST0_DE); 1131 break; 1132 default: 1133 break; 1134 } 1135 } 1136 1137 asmlinkage void cache_parity_error(void) 1138 { 1139 const int field = 2 * sizeof(unsigned long); 1140 unsigned int reg_val; 1141 1142 /* For the moment, report the problem and hang. */ 1143 printk("Cache error exception:\n"); 1144 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); 1145 reg_val = read_c0_cacheerr(); 1146 printk("c0_cacheerr == %08x\n", reg_val); 1147 1148 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", 1149 reg_val & (1<<30) ? "secondary" : "primary", 1150 reg_val & (1<<31) ? "data" : "insn"); 1151 printk("Error bits: %s%s%s%s%s%s%s\n", 1152 reg_val & (1<<29) ? "ED " : "", 1153 reg_val & (1<<28) ? "ET " : "", 1154 reg_val & (1<<26) ? "EE " : "", 1155 reg_val & (1<<25) ? "EB " : "", 1156 reg_val & (1<<24) ? "EI " : "", 1157 reg_val & (1<<23) ? "E1 " : "", 1158 reg_val & (1<<22) ? "E0 " : ""); 1159 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); 1160 1161 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) 1162 if (reg_val & (1<<22)) 1163 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0()); 1164 1165 if (reg_val & (1<<23)) 1166 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1()); 1167 #endif 1168 1169 panic("Can't handle the cache error!"); 1170 } 1171 1172 /* 1173 * SDBBP EJTAG debug exception handler. 1174 * We skip the instruction and return to the next instruction. 1175 */ 1176 void ejtag_exception_handler(struct pt_regs *regs) 1177 { 1178 const int field = 2 * sizeof(unsigned long); 1179 unsigned long depc, old_epc; 1180 unsigned int debug; 1181 1182 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); 1183 depc = read_c0_depc(); 1184 debug = read_c0_debug(); 1185 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug); 1186 if (debug & 0x80000000) { 1187 /* 1188 * In branch delay slot. 1189 * We cheat a little bit here and use EPC to calculate the 1190 * debug return address (DEPC). EPC is restored after the 1191 * calculation. 1192 */ 1193 old_epc = regs->cp0_epc; 1194 regs->cp0_epc = depc; 1195 __compute_return_epc(regs); 1196 depc = regs->cp0_epc; 1197 regs->cp0_epc = old_epc; 1198 } else 1199 depc += 4; 1200 write_c0_depc(depc); 1201 1202 #if 0 1203 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n"); 1204 write_c0_debug(debug | 0x100); 1205 #endif 1206 } 1207 1208 /* 1209 * NMI exception handler. 1210 */ 1211 NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs) 1212 { 1213 bust_spinlocks(1); 1214 printk("NMI taken!!!!\n"); 1215 die("NMI", regs); 1216 } 1217 1218 #define VECTORSPACING 0x100 /* for EI/VI mode */ 1219 1220 unsigned long ebase; 1221 unsigned long exception_handlers[32]; 1222 unsigned long vi_handlers[64]; 1223 1224 /* 1225 * As a side effect of the way this is implemented we're limited 1226 * to interrupt handlers in the address range from 1227 * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ... 1228 */ 1229 void *set_except_vector(int n, void *addr) 1230 { 1231 unsigned long handler = (unsigned long) addr; 1232 unsigned long old_handler = exception_handlers[n]; 1233 1234 exception_handlers[n] = handler; 1235 if (n == 0 && cpu_has_divec) { 1236 *(u32 *)(ebase + 0x200) = 0x08000000 | 1237 (0x03ffffff & (handler >> 2)); 1238 local_flush_icache_range(ebase + 0x200, ebase + 0x204); 1239 } 1240 return (void *)old_handler; 1241 } 1242 1243 static asmlinkage void do_default_vi(void) 1244 { 1245 show_regs(get_irq_regs()); 1246 panic("Caught unexpected vectored interrupt."); 1247 } 1248 1249 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) 1250 { 1251 unsigned long handler; 1252 unsigned long old_handler = vi_handlers[n]; 1253 int srssets = current_cpu_data.srsets; 1254 u32 *w; 1255 unsigned char *b; 1256 1257 if (!cpu_has_veic && !cpu_has_vint) 1258 BUG(); 1259 1260 if (addr == NULL) { 1261 handler = (unsigned long) do_default_vi; 1262 srs = 0; 1263 } else 1264 handler = (unsigned long) addr; 1265 vi_handlers[n] = (unsigned long) addr; 1266 1267 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); 1268 1269 if (srs >= srssets) 1270 panic("Shadow register set %d not supported", srs); 1271 1272 if (cpu_has_veic) { 1273 if (board_bind_eic_interrupt) 1274 board_bind_eic_interrupt(n, srs); 1275 } else if (cpu_has_vint) { 1276 /* SRSMap is only defined if shadow sets are implemented */ 1277 if (srssets > 1) 1278 change_c0_srsmap(0xf << n*4, srs << n*4); 1279 } 1280 1281 if (srs == 0) { 1282 /* 1283 * If no shadow set is selected then use the default handler 1284 * that does normal register saving and a standard interrupt exit 1285 */ 1286 1287 extern char except_vec_vi, except_vec_vi_lui; 1288 extern char except_vec_vi_ori, except_vec_vi_end; 1289 extern char rollback_except_vec_vi; 1290 char *vec_start = (cpu_wait == r4k_wait) ? 1291 &rollback_except_vec_vi : &except_vec_vi; 1292 #ifdef CONFIG_MIPS_MT_SMTC 1293 /* 1294 * We need to provide the SMTC vectored interrupt handler 1295 * not only with the address of the handler, but with the 1296 * Status.IM bit to be masked before going there. 1297 */ 1298 extern char except_vec_vi_mori; 1299 const int mori_offset = &except_vec_vi_mori - vec_start; 1300 #endif /* CONFIG_MIPS_MT_SMTC */ 1301 const int handler_len = &except_vec_vi_end - vec_start; 1302 const int lui_offset = &except_vec_vi_lui - vec_start; 1303 const int ori_offset = &except_vec_vi_ori - vec_start; 1304 1305 if (handler_len > VECTORSPACING) { 1306 /* 1307 * Sigh... panicing won't help as the console 1308 * is probably not configured :( 1309 */ 1310 panic("VECTORSPACING too small"); 1311 } 1312 1313 memcpy(b, vec_start, handler_len); 1314 #ifdef CONFIG_MIPS_MT_SMTC 1315 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ 1316 1317 w = (u32 *)(b + mori_offset); 1318 *w = (*w & 0xffff0000) | (0x100 << n); 1319 #endif /* CONFIG_MIPS_MT_SMTC */ 1320 w = (u32 *)(b + lui_offset); 1321 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); 1322 w = (u32 *)(b + ori_offset); 1323 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); 1324 local_flush_icache_range((unsigned long)b, 1325 (unsigned long)(b+handler_len)); 1326 } 1327 else { 1328 /* 1329 * In other cases jump directly to the interrupt handler 1330 * 1331 * It is the handlers responsibility to save registers if required 1332 * (eg hi/lo) and return from the exception using "eret" 1333 */ 1334 w = (u32 *)b; 1335 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ 1336 *w = 0; 1337 local_flush_icache_range((unsigned long)b, 1338 (unsigned long)(b+8)); 1339 } 1340 1341 return (void *)old_handler; 1342 } 1343 1344 void *set_vi_handler(int n, vi_handler_t addr) 1345 { 1346 return set_vi_srs_handler(n, addr, 0); 1347 } 1348 1349 /* 1350 * This is used by native signal handling 1351 */ 1352 asmlinkage int (*save_fp_context)(struct sigcontext __user *sc); 1353 asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc); 1354 1355 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); 1356 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); 1357 1358 extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); 1359 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); 1360 1361 #ifdef CONFIG_SMP 1362 static int smp_save_fp_context(struct sigcontext __user *sc) 1363 { 1364 return raw_cpu_has_fpu 1365 ? _save_fp_context(sc) 1366 : fpu_emulator_save_context(sc); 1367 } 1368 1369 static int smp_restore_fp_context(struct sigcontext __user *sc) 1370 { 1371 return raw_cpu_has_fpu 1372 ? _restore_fp_context(sc) 1373 : fpu_emulator_restore_context(sc); 1374 } 1375 #endif 1376 1377 static inline void signal_init(void) 1378 { 1379 #ifdef CONFIG_SMP 1380 /* For now just do the cpu_has_fpu check when the functions are invoked */ 1381 save_fp_context = smp_save_fp_context; 1382 restore_fp_context = smp_restore_fp_context; 1383 #else 1384 if (cpu_has_fpu) { 1385 save_fp_context = _save_fp_context; 1386 restore_fp_context = _restore_fp_context; 1387 } else { 1388 save_fp_context = fpu_emulator_save_context; 1389 restore_fp_context = fpu_emulator_restore_context; 1390 } 1391 #endif 1392 } 1393 1394 #ifdef CONFIG_MIPS32_COMPAT 1395 1396 /* 1397 * This is used by 32-bit signal stuff on the 64-bit kernel 1398 */ 1399 asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc); 1400 asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc); 1401 1402 extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); 1403 extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); 1404 1405 extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc); 1406 extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc); 1407 1408 static inline void signal32_init(void) 1409 { 1410 if (cpu_has_fpu) { 1411 save_fp_context32 = _save_fp_context32; 1412 restore_fp_context32 = _restore_fp_context32; 1413 } else { 1414 save_fp_context32 = fpu_emulator_save_context32; 1415 restore_fp_context32 = fpu_emulator_restore_context32; 1416 } 1417 } 1418 #endif 1419 1420 extern void cpu_cache_init(void); 1421 extern void tlb_init(void); 1422 extern void flush_tlb_handlers(void); 1423 1424 /* 1425 * Timer interrupt 1426 */ 1427 int cp0_compare_irq; 1428 1429 /* 1430 * Performance counter IRQ or -1 if shared with timer 1431 */ 1432 int cp0_perfcount_irq; 1433 EXPORT_SYMBOL_GPL(cp0_perfcount_irq); 1434 1435 static int __cpuinitdata noulri; 1436 1437 static int __init ulri_disable(char *s) 1438 { 1439 pr_info("Disabling ulri\n"); 1440 noulri = 1; 1441 1442 return 1; 1443 } 1444 __setup("noulri", ulri_disable); 1445 1446 void __cpuinit per_cpu_trap_init(void) 1447 { 1448 unsigned int cpu = smp_processor_id(); 1449 unsigned int status_set = ST0_CU0; 1450 #ifdef CONFIG_MIPS_MT_SMTC 1451 int secondaryTC = 0; 1452 int bootTC = (cpu == 0); 1453 1454 /* 1455 * Only do per_cpu_trap_init() for first TC of Each VPE. 1456 * Note that this hack assumes that the SMTC init code 1457 * assigns TCs consecutively and in ascending order. 1458 */ 1459 1460 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && 1461 ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) 1462 secondaryTC = 1; 1463 #endif /* CONFIG_MIPS_MT_SMTC */ 1464 1465 /* 1466 * Disable coprocessors and select 32-bit or 64-bit addressing 1467 * and the 16/32 or 32/32 FPR register model. Reset the BEV 1468 * flag that some firmware may have left set and the TS bit (for 1469 * IP27). Set XX for ISA IV code to work. 1470 */ 1471 #ifdef CONFIG_64BIT 1472 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; 1473 #endif 1474 if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) 1475 status_set |= ST0_XX; 1476 if (cpu_has_dsp) 1477 status_set |= ST0_MX; 1478 1479 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, 1480 status_set); 1481 1482 if (cpu_has_mips_r2) { 1483 unsigned int enable = 0x0000000f; 1484 1485 if (!noulri && cpu_has_userlocal) 1486 enable |= (1 << 29); 1487 1488 write_c0_hwrena(enable); 1489 } 1490 1491 #ifdef CONFIG_MIPS_MT_SMTC 1492 if (!secondaryTC) { 1493 #endif /* CONFIG_MIPS_MT_SMTC */ 1494 1495 if (cpu_has_veic || cpu_has_vint) { 1496 write_c0_ebase(ebase); 1497 /* Setting vector spacing enables EI/VI mode */ 1498 change_c0_intctl(0x3e0, VECTORSPACING); 1499 } 1500 if (cpu_has_divec) { 1501 if (cpu_has_mipsmt) { 1502 unsigned int vpflags = dvpe(); 1503 set_c0_cause(CAUSEF_IV); 1504 evpe(vpflags); 1505 } else 1506 set_c0_cause(CAUSEF_IV); 1507 } 1508 1509 /* 1510 * Before R2 both interrupt numbers were fixed to 7, so on R2 only: 1511 * 1512 * o read IntCtl.IPTI to determine the timer interrupt 1513 * o read IntCtl.IPPCI to determine the performance counter interrupt 1514 */ 1515 if (cpu_has_mips_r2) { 1516 cp0_compare_irq = (read_c0_intctl() >> 29) & 7; 1517 cp0_perfcount_irq = (read_c0_intctl() >> 26) & 7; 1518 if (cp0_perfcount_irq == cp0_compare_irq) 1519 cp0_perfcount_irq = -1; 1520 } else { 1521 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; 1522 cp0_perfcount_irq = -1; 1523 } 1524 1525 #ifdef CONFIG_MIPS_MT_SMTC 1526 } 1527 #endif /* CONFIG_MIPS_MT_SMTC */ 1528 1529 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1530 TLBMISS_HANDLER_SETUP(); 1531 1532 atomic_inc(&init_mm.mm_count); 1533 current->active_mm = &init_mm; 1534 BUG_ON(current->mm); 1535 enter_lazy_tlb(&init_mm, current); 1536 1537 #ifdef CONFIG_MIPS_MT_SMTC 1538 if (bootTC) { 1539 #endif /* CONFIG_MIPS_MT_SMTC */ 1540 cpu_cache_init(); 1541 tlb_init(); 1542 #ifdef CONFIG_MIPS_MT_SMTC 1543 } else if (!secondaryTC) { 1544 /* 1545 * First TC in non-boot VPE must do subset of tlb_init() 1546 * for MMU countrol registers. 1547 */ 1548 write_c0_pagemask(PM_DEFAULT_MASK); 1549 write_c0_wired(0); 1550 } 1551 #endif /* CONFIG_MIPS_MT_SMTC */ 1552 } 1553 1554 /* Install CPU exception handler */ 1555 void __init set_handler(unsigned long offset, void *addr, unsigned long size) 1556 { 1557 memcpy((void *)(ebase + offset), addr, size); 1558 local_flush_icache_range(ebase + offset, ebase + offset + size); 1559 } 1560 1561 static char panic_null_cerr[] __cpuinitdata = 1562 "Trying to set NULL cache error exception handler"; 1563 1564 /* Install uncached CPU exception handler */ 1565 void __cpuinit set_uncached_handler(unsigned long offset, void *addr, 1566 unsigned long size) 1567 { 1568 #ifdef CONFIG_32BIT 1569 unsigned long uncached_ebase = KSEG1ADDR(ebase); 1570 #endif 1571 #ifdef CONFIG_64BIT 1572 unsigned long uncached_ebase = TO_UNCAC(ebase); 1573 #endif 1574 if (cpu_has_mips_r2) 1575 ebase += (read_c0_ebase() & 0x3ffff000); 1576 1577 if (!addr) 1578 panic(panic_null_cerr); 1579 1580 memcpy((void *)(uncached_ebase + offset), addr, size); 1581 } 1582 1583 static int __initdata rdhwr_noopt; 1584 static int __init set_rdhwr_noopt(char *str) 1585 { 1586 rdhwr_noopt = 1; 1587 return 1; 1588 } 1589 1590 __setup("rdhwr_noopt", set_rdhwr_noopt); 1591 1592 void __init trap_init(void) 1593 { 1594 extern char except_vec3_generic, except_vec3_r4000; 1595 extern char except_vec4; 1596 unsigned long i; 1597 int rollback; 1598 1599 check_wait(); 1600 rollback = (cpu_wait == r4k_wait); 1601 1602 #if defined(CONFIG_KGDB) 1603 if (kgdb_early_setup) 1604 return; /* Already done */ 1605 #endif 1606 1607 if (cpu_has_veic || cpu_has_vint) 1608 ebase = (unsigned long) alloc_bootmem_low_pages(0x200 + VECTORSPACING*64); 1609 else { 1610 ebase = CAC_BASE; 1611 if (cpu_has_mips_r2) 1612 ebase += (read_c0_ebase() & 0x3ffff000); 1613 } 1614 1615 per_cpu_trap_init(); 1616 1617 /* 1618 * Copy the generic exception handlers to their final destination. 1619 * This will be overriden later as suitable for a particular 1620 * configuration. 1621 */ 1622 set_handler(0x180, &except_vec3_generic, 0x80); 1623 1624 /* 1625 * Setup default vectors 1626 */ 1627 for (i = 0; i <= 31; i++) 1628 set_except_vector(i, handle_reserved); 1629 1630 /* 1631 * Copy the EJTAG debug exception vector handler code to it's final 1632 * destination. 1633 */ 1634 if (cpu_has_ejtag && board_ejtag_handler_setup) 1635 board_ejtag_handler_setup(); 1636 1637 /* 1638 * Only some CPUs have the watch exceptions. 1639 */ 1640 if (cpu_has_watch) 1641 set_except_vector(23, handle_watch); 1642 1643 /* 1644 * Initialise interrupt handlers 1645 */ 1646 if (cpu_has_veic || cpu_has_vint) { 1647 int nvec = cpu_has_veic ? 64 : 8; 1648 for (i = 0; i < nvec; i++) 1649 set_vi_handler(i, NULL); 1650 } 1651 else if (cpu_has_divec) 1652 set_handler(0x200, &except_vec4, 0x8); 1653 1654 /* 1655 * Some CPUs can enable/disable for cache parity detection, but does 1656 * it different ways. 1657 */ 1658 parity_protection_init(); 1659 1660 /* 1661 * The Data Bus Errors / Instruction Bus Errors are signaled 1662 * by external hardware. Therefore these two exceptions 1663 * may have board specific handlers. 1664 */ 1665 if (board_be_init) 1666 board_be_init(); 1667 1668 set_except_vector(0, rollback ? rollback_handle_int : handle_int); 1669 set_except_vector(1, handle_tlbm); 1670 set_except_vector(2, handle_tlbl); 1671 set_except_vector(3, handle_tlbs); 1672 1673 set_except_vector(4, handle_adel); 1674 set_except_vector(5, handle_ades); 1675 1676 set_except_vector(6, handle_ibe); 1677 set_except_vector(7, handle_dbe); 1678 1679 set_except_vector(8, handle_sys); 1680 set_except_vector(9, handle_bp); 1681 set_except_vector(10, rdhwr_noopt ? handle_ri : 1682 (cpu_has_vtag_icache ? 1683 handle_ri_rdhwr_vivt : handle_ri_rdhwr)); 1684 set_except_vector(11, handle_cpu); 1685 set_except_vector(12, handle_ov); 1686 set_except_vector(13, handle_tr); 1687 1688 if (current_cpu_type() == CPU_R6000 || 1689 current_cpu_type() == CPU_R6000A) { 1690 /* 1691 * The R6000 is the only R-series CPU that features a machine 1692 * check exception (similar to the R4000 cache error) and 1693 * unaligned ldc1/sdc1 exception. The handlers have not been 1694 * written yet. Well, anyway there is no R6000 machine on the 1695 * current list of targets for Linux/MIPS. 1696 * (Duh, crap, there is someone with a triple R6k machine) 1697 */ 1698 //set_except_vector(14, handle_mc); 1699 //set_except_vector(15, handle_ndc); 1700 } 1701 1702 1703 if (board_nmi_handler_setup) 1704 board_nmi_handler_setup(); 1705 1706 if (cpu_has_fpu && !cpu_has_nofpuex) 1707 set_except_vector(15, handle_fpe); 1708 1709 set_except_vector(22, handle_mdmx); 1710 1711 if (cpu_has_mcheck) 1712 set_except_vector(24, handle_mcheck); 1713 1714 if (cpu_has_mipsmt) 1715 set_except_vector(25, handle_mt); 1716 1717 set_except_vector(26, handle_dsp); 1718 1719 if (cpu_has_vce) 1720 /* Special exception: R4[04]00 uses also the divec space. */ 1721 memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100); 1722 else if (cpu_has_4kex) 1723 memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80); 1724 else 1725 memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80); 1726 1727 signal_init(); 1728 #ifdef CONFIG_MIPS32_COMPAT 1729 signal32_init(); 1730 #endif 1731 1732 local_flush_icache_range(ebase, ebase + 0x400); 1733 flush_tlb_handlers(); 1734 1735 sort_extable(__start___dbe_table, __stop___dbe_table); 1736 } 1737