1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle 7 * Copyright (C) 1995, 1996 Paul M. Antoine 8 * Copyright (C) 1998 Ulf Carlsson 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 11 * Copyright (C) 2000, 01 MIPS Technologies, Inc. 12 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki 13 */ 14 #include <linux/bug.h> 15 #include <linux/compiler.h> 16 #include <linux/init.h> 17 #include <linux/mm.h> 18 #include <linux/module.h> 19 #include <linux/sched.h> 20 #include <linux/smp.h> 21 #include <linux/spinlock.h> 22 #include <linux/kallsyms.h> 23 #include <linux/bootmem.h> 24 #include <linux/interrupt.h> 25 #include <linux/ptrace.h> 26 #include <linux/kgdb.h> 27 #include <linux/kdebug.h> 28 29 #include <asm/bootinfo.h> 30 #include <asm/branch.h> 31 #include <asm/break.h> 32 #include <asm/cpu.h> 33 #include <asm/dsp.h> 34 #include <asm/fpu.h> 35 #include <asm/mipsregs.h> 36 #include <asm/mipsmtregs.h> 37 #include <asm/module.h> 38 #include <asm/pgtable.h> 39 #include <asm/ptrace.h> 40 #include <asm/sections.h> 41 #include <asm/system.h> 42 #include <asm/tlbdebug.h> 43 #include <asm/traps.h> 44 #include <asm/uaccess.h> 45 #include <asm/watch.h> 46 #include <asm/mmu_context.h> 47 #include <asm/types.h> 48 #include <asm/stacktrace.h> 49 50 extern void check_wait(void); 51 extern asmlinkage void r4k_wait(void); 52 extern asmlinkage void rollback_handle_int(void); 53 extern asmlinkage void handle_int(void); 54 extern asmlinkage void handle_tlbm(void); 55 extern asmlinkage void handle_tlbl(void); 56 extern asmlinkage void handle_tlbs(void); 57 extern asmlinkage void handle_adel(void); 58 extern asmlinkage void handle_ades(void); 59 extern asmlinkage void handle_ibe(void); 60 extern asmlinkage void handle_dbe(void); 61 extern asmlinkage void handle_sys(void); 62 extern asmlinkage void handle_bp(void); 63 extern asmlinkage void handle_ri(void); 64 extern asmlinkage void handle_ri_rdhwr_vivt(void); 65 extern asmlinkage void handle_ri_rdhwr(void); 66 extern asmlinkage void handle_cpu(void); 67 extern asmlinkage void handle_ov(void); 68 extern asmlinkage void handle_tr(void); 69 extern asmlinkage void handle_fpe(void); 70 extern asmlinkage void handle_mdmx(void); 71 extern asmlinkage void handle_watch(void); 72 extern asmlinkage void handle_mt(void); 73 extern asmlinkage void handle_dsp(void); 74 extern asmlinkage void handle_mcheck(void); 75 extern asmlinkage void handle_reserved(void); 76 77 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, 78 struct mips_fpu_struct *ctx, int has_fpu); 79 80 void (*board_be_init)(void); 81 int (*board_be_handler)(struct pt_regs *regs, int is_fixup); 82 void (*board_nmi_handler_setup)(void); 83 void (*board_ejtag_handler_setup)(void); 84 void (*board_bind_eic_interrupt)(int irq, int regset); 85 86 87 static void show_raw_backtrace(unsigned long reg29) 88 { 89 unsigned long *sp = (unsigned long *)(reg29 & ~3); 90 unsigned long addr; 91 92 printk("Call Trace:"); 93 #ifdef CONFIG_KALLSYMS 94 printk("\n"); 95 #endif 96 while (!kstack_end(sp)) { 97 unsigned long __user *p = 98 (unsigned long __user *)(unsigned long)sp++; 99 if (__get_user(addr, p)) { 100 printk(" (Bad stack address)"); 101 break; 102 } 103 if (__kernel_text_address(addr)) 104 print_ip_sym(addr); 105 } 106 printk("\n"); 107 } 108 109 #ifdef CONFIG_KALLSYMS 110 int raw_show_trace; 111 static int __init set_raw_show_trace(char *str) 112 { 113 raw_show_trace = 1; 114 return 1; 115 } 116 __setup("raw_show_trace", set_raw_show_trace); 117 #endif 118 119 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) 120 { 121 unsigned long sp = regs->regs[29]; 122 unsigned long ra = regs->regs[31]; 123 unsigned long pc = regs->cp0_epc; 124 125 if (raw_show_trace || !__kernel_text_address(pc)) { 126 show_raw_backtrace(sp); 127 return; 128 } 129 printk("Call Trace:\n"); 130 do { 131 print_ip_sym(pc); 132 pc = unwind_stack(task, &sp, pc, &ra); 133 } while (pc); 134 printk("\n"); 135 } 136 137 /* 138 * This routine abuses get_user()/put_user() to reference pointers 139 * with at least a bit of error checking ... 140 */ 141 static void show_stacktrace(struct task_struct *task, 142 const struct pt_regs *regs) 143 { 144 const int field = 2 * sizeof(unsigned long); 145 long stackdata; 146 int i; 147 unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; 148 149 printk("Stack :"); 150 i = 0; 151 while ((unsigned long) sp & (PAGE_SIZE - 1)) { 152 if (i && ((i % (64 / field)) == 0)) 153 printk("\n "); 154 if (i > 39) { 155 printk(" ..."); 156 break; 157 } 158 159 if (__get_user(stackdata, sp++)) { 160 printk(" (Bad stack address)"); 161 break; 162 } 163 164 printk(" %0*lx", field, stackdata); 165 i++; 166 } 167 printk("\n"); 168 show_backtrace(task, regs); 169 } 170 171 void show_stack(struct task_struct *task, unsigned long *sp) 172 { 173 struct pt_regs regs; 174 if (sp) { 175 regs.regs[29] = (unsigned long)sp; 176 regs.regs[31] = 0; 177 regs.cp0_epc = 0; 178 } else { 179 if (task && task != current) { 180 regs.regs[29] = task->thread.reg29; 181 regs.regs[31] = 0; 182 regs.cp0_epc = task->thread.reg31; 183 } else { 184 prepare_frametrace(®s); 185 } 186 } 187 show_stacktrace(task, ®s); 188 } 189 190 /* 191 * The architecture-independent dump_stack generator 192 */ 193 void dump_stack(void) 194 { 195 struct pt_regs regs; 196 197 prepare_frametrace(®s); 198 show_backtrace(current, ®s); 199 } 200 201 EXPORT_SYMBOL(dump_stack); 202 203 static void show_code(unsigned int __user *pc) 204 { 205 long i; 206 unsigned short __user *pc16 = NULL; 207 208 printk("\nCode:"); 209 210 if ((unsigned long)pc & 1) 211 pc16 = (unsigned short __user *)((unsigned long)pc & ~1); 212 for(i = -3 ; i < 6 ; i++) { 213 unsigned int insn; 214 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { 215 printk(" (Bad address in epc)\n"); 216 break; 217 } 218 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); 219 } 220 } 221 222 static void __show_regs(const struct pt_regs *regs) 223 { 224 const int field = 2 * sizeof(unsigned long); 225 unsigned int cause = regs->cp0_cause; 226 int i; 227 228 printk("Cpu %d\n", smp_processor_id()); 229 230 /* 231 * Saved main processor registers 232 */ 233 for (i = 0; i < 32; ) { 234 if ((i % 4) == 0) 235 printk("$%2d :", i); 236 if (i == 0) 237 printk(" %0*lx", field, 0UL); 238 else if (i == 26 || i == 27) 239 printk(" %*s", field, ""); 240 else 241 printk(" %0*lx", field, regs->regs[i]); 242 243 i++; 244 if ((i % 4) == 0) 245 printk("\n"); 246 } 247 248 #ifdef CONFIG_CPU_HAS_SMARTMIPS 249 printk("Acx : %0*lx\n", field, regs->acx); 250 #endif 251 printk("Hi : %0*lx\n", field, regs->hi); 252 printk("Lo : %0*lx\n", field, regs->lo); 253 254 /* 255 * Saved cp0 registers 256 */ 257 printk("epc : %0*lx %pS\n", field, regs->cp0_epc, 258 (void *) regs->cp0_epc); 259 printk(" %s\n", print_tainted()); 260 printk("ra : %0*lx %pS\n", field, regs->regs[31], 261 (void *) regs->regs[31]); 262 263 printk("Status: %08x ", (uint32_t) regs->cp0_status); 264 265 if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) { 266 if (regs->cp0_status & ST0_KUO) 267 printk("KUo "); 268 if (regs->cp0_status & ST0_IEO) 269 printk("IEo "); 270 if (regs->cp0_status & ST0_KUP) 271 printk("KUp "); 272 if (regs->cp0_status & ST0_IEP) 273 printk("IEp "); 274 if (regs->cp0_status & ST0_KUC) 275 printk("KUc "); 276 if (regs->cp0_status & ST0_IEC) 277 printk("IEc "); 278 } else { 279 if (regs->cp0_status & ST0_KX) 280 printk("KX "); 281 if (regs->cp0_status & ST0_SX) 282 printk("SX "); 283 if (regs->cp0_status & ST0_UX) 284 printk("UX "); 285 switch (regs->cp0_status & ST0_KSU) { 286 case KSU_USER: 287 printk("USER "); 288 break; 289 case KSU_SUPERVISOR: 290 printk("SUPERVISOR "); 291 break; 292 case KSU_KERNEL: 293 printk("KERNEL "); 294 break; 295 default: 296 printk("BAD_MODE "); 297 break; 298 } 299 if (regs->cp0_status & ST0_ERL) 300 printk("ERL "); 301 if (regs->cp0_status & ST0_EXL) 302 printk("EXL "); 303 if (regs->cp0_status & ST0_IE) 304 printk("IE "); 305 } 306 printk("\n"); 307 308 printk("Cause : %08x\n", cause); 309 310 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; 311 if (1 <= cause && cause <= 5) 312 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr); 313 314 printk("PrId : %08x (%s)\n", read_c0_prid(), 315 cpu_name_string()); 316 } 317 318 /* 319 * FIXME: really the generic show_regs should take a const pointer argument. 320 */ 321 void show_regs(struct pt_regs *regs) 322 { 323 __show_regs((struct pt_regs *)regs); 324 } 325 326 void show_registers(const struct pt_regs *regs) 327 { 328 const int field = 2 * sizeof(unsigned long); 329 330 __show_regs(regs); 331 print_modules(); 332 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n", 333 current->comm, current->pid, current_thread_info(), current, 334 field, current_thread_info()->tp_value); 335 if (cpu_has_userlocal) { 336 unsigned long tls; 337 338 tls = read_c0_userlocal(); 339 if (tls != current_thread_info()->tp_value) 340 printk("*HwTLS: %0*lx\n", field, tls); 341 } 342 343 show_stacktrace(current, regs); 344 show_code((unsigned int __user *) regs->cp0_epc); 345 printk("\n"); 346 } 347 348 static DEFINE_SPINLOCK(die_lock); 349 350 void __noreturn die(const char * str, const struct pt_regs * regs) 351 { 352 static int die_counter; 353 #ifdef CONFIG_MIPS_MT_SMTC 354 unsigned long dvpret = dvpe(); 355 #endif /* CONFIG_MIPS_MT_SMTC */ 356 357 console_verbose(); 358 spin_lock_irq(&die_lock); 359 bust_spinlocks(1); 360 #ifdef CONFIG_MIPS_MT_SMTC 361 mips_mt_regdump(dvpret); 362 #endif /* CONFIG_MIPS_MT_SMTC */ 363 printk("%s[#%d]:\n", str, ++die_counter); 364 show_registers(regs); 365 add_taint(TAINT_DIE); 366 spin_unlock_irq(&die_lock); 367 368 if (in_interrupt()) 369 panic("Fatal exception in interrupt"); 370 371 if (panic_on_oops) { 372 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); 373 ssleep(5); 374 panic("Fatal exception"); 375 } 376 377 do_exit(SIGSEGV); 378 } 379 380 extern struct exception_table_entry __start___dbe_table[]; 381 extern struct exception_table_entry __stop___dbe_table[]; 382 383 __asm__( 384 " .section __dbe_table, \"a\"\n" 385 " .previous \n"); 386 387 /* Given an address, look for it in the exception tables. */ 388 static const struct exception_table_entry *search_dbe_tables(unsigned long addr) 389 { 390 const struct exception_table_entry *e; 391 392 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr); 393 if (!e) 394 e = search_module_dbetables(addr); 395 return e; 396 } 397 398 asmlinkage void do_be(struct pt_regs *regs) 399 { 400 const int field = 2 * sizeof(unsigned long); 401 const struct exception_table_entry *fixup = NULL; 402 int data = regs->cp0_cause & 4; 403 int action = MIPS_BE_FATAL; 404 405 /* XXX For now. Fixme, this searches the wrong table ... */ 406 if (data && !user_mode(regs)) 407 fixup = search_dbe_tables(exception_epc(regs)); 408 409 if (fixup) 410 action = MIPS_BE_FIXUP; 411 412 if (board_be_handler) 413 action = board_be_handler(regs, fixup != NULL); 414 415 switch (action) { 416 case MIPS_BE_DISCARD: 417 return; 418 case MIPS_BE_FIXUP: 419 if (fixup) { 420 regs->cp0_epc = fixup->nextinsn; 421 return; 422 } 423 break; 424 default: 425 break; 426 } 427 428 /* 429 * Assume it would be too dangerous to continue ... 430 */ 431 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n", 432 data ? "Data" : "Instruction", 433 field, regs->cp0_epc, field, regs->regs[31]); 434 if (notify_die(DIE_OOPS, "bus error", regs, SIGBUS, 0, 0) 435 == NOTIFY_STOP) 436 return; 437 438 die_if_kernel("Oops", regs); 439 force_sig(SIGBUS, current); 440 } 441 442 /* 443 * ll/sc, rdhwr, sync emulation 444 */ 445 446 #define OPCODE 0xfc000000 447 #define BASE 0x03e00000 448 #define RT 0x001f0000 449 #define OFFSET 0x0000ffff 450 #define LL 0xc0000000 451 #define SC 0xe0000000 452 #define SPEC0 0x00000000 453 #define SPEC3 0x7c000000 454 #define RD 0x0000f800 455 #define FUNC 0x0000003f 456 #define SYNC 0x0000000f 457 #define RDHWR 0x0000003b 458 459 /* 460 * The ll_bit is cleared by r*_switch.S 461 */ 462 463 unsigned long ll_bit; 464 465 static struct task_struct *ll_task = NULL; 466 467 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode) 468 { 469 unsigned long value, __user *vaddr; 470 long offset; 471 472 /* 473 * analyse the ll instruction that just caused a ri exception 474 * and put the referenced address to addr. 475 */ 476 477 /* sign extend offset */ 478 offset = opcode & OFFSET; 479 offset <<= 16; 480 offset >>= 16; 481 482 vaddr = (unsigned long __user *) 483 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); 484 485 if ((unsigned long)vaddr & 3) 486 return SIGBUS; 487 if (get_user(value, vaddr)) 488 return SIGSEGV; 489 490 preempt_disable(); 491 492 if (ll_task == NULL || ll_task == current) { 493 ll_bit = 1; 494 } else { 495 ll_bit = 0; 496 } 497 ll_task = current; 498 499 preempt_enable(); 500 501 regs->regs[(opcode & RT) >> 16] = value; 502 503 return 0; 504 } 505 506 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode) 507 { 508 unsigned long __user *vaddr; 509 unsigned long reg; 510 long offset; 511 512 /* 513 * analyse the sc instruction that just caused a ri exception 514 * and put the referenced address to addr. 515 */ 516 517 /* sign extend offset */ 518 offset = opcode & OFFSET; 519 offset <<= 16; 520 offset >>= 16; 521 522 vaddr = (unsigned long __user *) 523 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); 524 reg = (opcode & RT) >> 16; 525 526 if ((unsigned long)vaddr & 3) 527 return SIGBUS; 528 529 preempt_disable(); 530 531 if (ll_bit == 0 || ll_task != current) { 532 regs->regs[reg] = 0; 533 preempt_enable(); 534 return 0; 535 } 536 537 preempt_enable(); 538 539 if (put_user(regs->regs[reg], vaddr)) 540 return SIGSEGV; 541 542 regs->regs[reg] = 1; 543 544 return 0; 545 } 546 547 /* 548 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both 549 * opcodes are supposed to result in coprocessor unusable exceptions if 550 * executed on ll/sc-less processors. That's the theory. In practice a 551 * few processors such as NEC's VR4100 throw reserved instruction exceptions 552 * instead, so we're doing the emulation thing in both exception handlers. 553 */ 554 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) 555 { 556 if ((opcode & OPCODE) == LL) 557 return simulate_ll(regs, opcode); 558 if ((opcode & OPCODE) == SC) 559 return simulate_sc(regs, opcode); 560 561 return -1; /* Must be something else ... */ 562 } 563 564 /* 565 * Simulate trapping 'rdhwr' instructions to provide user accessible 566 * registers not implemented in hardware. 567 */ 568 static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) 569 { 570 struct thread_info *ti = task_thread_info(current); 571 572 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { 573 int rd = (opcode & RD) >> 11; 574 int rt = (opcode & RT) >> 16; 575 switch (rd) { 576 case 0: /* CPU number */ 577 regs->regs[rt] = smp_processor_id(); 578 return 0; 579 case 1: /* SYNCI length */ 580 regs->regs[rt] = min(current_cpu_data.dcache.linesz, 581 current_cpu_data.icache.linesz); 582 return 0; 583 case 2: /* Read count register */ 584 regs->regs[rt] = read_c0_count(); 585 return 0; 586 case 3: /* Count register resolution */ 587 switch (current_cpu_data.cputype) { 588 case CPU_20KC: 589 case CPU_25KF: 590 regs->regs[rt] = 1; 591 break; 592 default: 593 regs->regs[rt] = 2; 594 } 595 return 0; 596 case 29: 597 regs->regs[rt] = ti->tp_value; 598 return 0; 599 default: 600 return -1; 601 } 602 } 603 604 /* Not ours. */ 605 return -1; 606 } 607 608 static int simulate_sync(struct pt_regs *regs, unsigned int opcode) 609 { 610 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) 611 return 0; 612 613 return -1; /* Must be something else ... */ 614 } 615 616 asmlinkage void do_ov(struct pt_regs *regs) 617 { 618 siginfo_t info; 619 620 die_if_kernel("Integer overflow", regs); 621 622 info.si_code = FPE_INTOVF; 623 info.si_signo = SIGFPE; 624 info.si_errno = 0; 625 info.si_addr = (void __user *) regs->cp0_epc; 626 force_sig_info(SIGFPE, &info, current); 627 } 628 629 /* 630 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX 631 */ 632 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) 633 { 634 siginfo_t info; 635 636 if (notify_die(DIE_FP, "FP exception", regs, SIGFPE, 0, 0) 637 == NOTIFY_STOP) 638 return; 639 die_if_kernel("FP exception in kernel code", regs); 640 641 if (fcr31 & FPU_CSR_UNI_X) { 642 int sig; 643 644 /* 645 * Unimplemented operation exception. If we've got the full 646 * software emulator on-board, let's use it... 647 * 648 * Force FPU to dump state into task/thread context. We're 649 * moving a lot of data here for what is probably a single 650 * instruction, but the alternative is to pre-decode the FP 651 * register operands before invoking the emulator, which seems 652 * a bit extreme for what should be an infrequent event. 653 */ 654 /* Ensure 'resume' not overwrite saved fp context again. */ 655 lose_fpu(1); 656 657 /* Run the emulator */ 658 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1); 659 660 /* 661 * We can't allow the emulated instruction to leave any of 662 * the cause bit set in $fcr31. 663 */ 664 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 665 666 /* Restore the hardware register state */ 667 own_fpu(1); /* Using the FPU again. */ 668 669 /* If something went wrong, signal */ 670 if (sig) 671 force_sig(sig, current); 672 673 return; 674 } else if (fcr31 & FPU_CSR_INV_X) 675 info.si_code = FPE_FLTINV; 676 else if (fcr31 & FPU_CSR_DIV_X) 677 info.si_code = FPE_FLTDIV; 678 else if (fcr31 & FPU_CSR_OVF_X) 679 info.si_code = FPE_FLTOVF; 680 else if (fcr31 & FPU_CSR_UDF_X) 681 info.si_code = FPE_FLTUND; 682 else if (fcr31 & FPU_CSR_INE_X) 683 info.si_code = FPE_FLTRES; 684 else 685 info.si_code = __SI_FAULT; 686 info.si_signo = SIGFPE; 687 info.si_errno = 0; 688 info.si_addr = (void __user *) regs->cp0_epc; 689 force_sig_info(SIGFPE, &info, current); 690 } 691 692 static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 693 const char *str) 694 { 695 siginfo_t info; 696 char b[40]; 697 698 if (notify_die(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP) 699 return; 700 701 /* 702 * A short test says that IRIX 5.3 sends SIGTRAP for all trap 703 * insns, even for trap and break codes that indicate arithmetic 704 * failures. Weird ... 705 * But should we continue the brokenness??? --macro 706 */ 707 switch (code) { 708 case BRK_OVERFLOW: 709 case BRK_DIVZERO: 710 scnprintf(b, sizeof(b), "%s instruction in kernel code", str); 711 die_if_kernel(b, regs); 712 if (code == BRK_DIVZERO) 713 info.si_code = FPE_INTDIV; 714 else 715 info.si_code = FPE_INTOVF; 716 info.si_signo = SIGFPE; 717 info.si_errno = 0; 718 info.si_addr = (void __user *) regs->cp0_epc; 719 force_sig_info(SIGFPE, &info, current); 720 break; 721 case BRK_BUG: 722 die_if_kernel("Kernel bug detected", regs); 723 force_sig(SIGTRAP, current); 724 break; 725 default: 726 scnprintf(b, sizeof(b), "%s instruction in kernel code", str); 727 die_if_kernel(b, regs); 728 force_sig(SIGTRAP, current); 729 } 730 } 731 732 asmlinkage void do_bp(struct pt_regs *regs) 733 { 734 unsigned int opcode, bcode; 735 736 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) 737 goto out_sigsegv; 738 739 /* 740 * There is the ancient bug in the MIPS assemblers that the break 741 * code starts left to bit 16 instead to bit 6 in the opcode. 742 * Gas is bug-compatible, but not always, grrr... 743 * We handle both cases with a simple heuristics. --macro 744 */ 745 bcode = ((opcode >> 6) & ((1 << 20) - 1)); 746 if (bcode >= (1 << 10)) 747 bcode >>= 10; 748 749 do_trap_or_bp(regs, bcode, "Break"); 750 return; 751 752 out_sigsegv: 753 force_sig(SIGSEGV, current); 754 } 755 756 asmlinkage void do_tr(struct pt_regs *regs) 757 { 758 unsigned int opcode, tcode = 0; 759 760 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) 761 goto out_sigsegv; 762 763 /* Immediate versions don't provide a code. */ 764 if (!(opcode & OPCODE)) 765 tcode = ((opcode >> 6) & ((1 << 10) - 1)); 766 767 do_trap_or_bp(regs, tcode, "Trap"); 768 return; 769 770 out_sigsegv: 771 force_sig(SIGSEGV, current); 772 } 773 774 asmlinkage void do_ri(struct pt_regs *regs) 775 { 776 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); 777 unsigned long old_epc = regs->cp0_epc; 778 unsigned int opcode = 0; 779 int status = -1; 780 781 if (notify_die(DIE_RI, "RI Fault", regs, SIGSEGV, 0, 0) 782 == NOTIFY_STOP) 783 return; 784 785 die_if_kernel("Reserved instruction in kernel code", regs); 786 787 if (unlikely(compute_return_epc(regs) < 0)) 788 return; 789 790 if (unlikely(get_user(opcode, epc) < 0)) 791 status = SIGSEGV; 792 793 if (!cpu_has_llsc && status < 0) 794 status = simulate_llsc(regs, opcode); 795 796 if (status < 0) 797 status = simulate_rdhwr(regs, opcode); 798 799 if (status < 0) 800 status = simulate_sync(regs, opcode); 801 802 if (status < 0) 803 status = SIGILL; 804 805 if (unlikely(status > 0)) { 806 regs->cp0_epc = old_epc; /* Undo skip-over. */ 807 force_sig(status, current); 808 } 809 } 810 811 /* 812 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've 813 * emulated more than some threshold number of instructions, force migration to 814 * a "CPU" that has FP support. 815 */ 816 static void mt_ase_fp_affinity(void) 817 { 818 #ifdef CONFIG_MIPS_MT_FPAFF 819 if (mt_fpemul_threshold > 0 && 820 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { 821 /* 822 * If there's no FPU present, or if the application has already 823 * restricted the allowed set to exclude any CPUs with FPUs, 824 * we'll skip the procedure. 825 */ 826 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { 827 cpumask_t tmask; 828 829 current->thread.user_cpus_allowed 830 = current->cpus_allowed; 831 cpus_and(tmask, current->cpus_allowed, 832 mt_fpu_cpumask); 833 set_cpus_allowed(current, tmask); 834 set_thread_flag(TIF_FPUBOUND); 835 } 836 } 837 #endif /* CONFIG_MIPS_MT_FPAFF */ 838 } 839 840 asmlinkage void do_cpu(struct pt_regs *regs) 841 { 842 unsigned int __user *epc; 843 unsigned long old_epc; 844 unsigned int opcode; 845 unsigned int cpid; 846 int status; 847 848 die_if_kernel("do_cpu invoked from kernel context!", regs); 849 850 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; 851 852 switch (cpid) { 853 case 0: 854 epc = (unsigned int __user *)exception_epc(regs); 855 old_epc = regs->cp0_epc; 856 opcode = 0; 857 status = -1; 858 859 if (unlikely(compute_return_epc(regs) < 0)) 860 return; 861 862 if (unlikely(get_user(opcode, epc) < 0)) 863 status = SIGSEGV; 864 865 if (!cpu_has_llsc && status < 0) 866 status = simulate_llsc(regs, opcode); 867 868 if (status < 0) 869 status = simulate_rdhwr(regs, opcode); 870 871 if (status < 0) 872 status = SIGILL; 873 874 if (unlikely(status > 0)) { 875 regs->cp0_epc = old_epc; /* Undo skip-over. */ 876 force_sig(status, current); 877 } 878 879 return; 880 881 case 1: 882 if (used_math()) /* Using the FPU again. */ 883 own_fpu(1); 884 else { /* First time FPU user. */ 885 init_fpu(); 886 set_used_math(); 887 } 888 889 if (!raw_cpu_has_fpu) { 890 int sig; 891 sig = fpu_emulator_cop1Handler(regs, 892 ¤t->thread.fpu, 0); 893 if (sig) 894 force_sig(sig, current); 895 else 896 mt_ase_fp_affinity(); 897 } 898 899 return; 900 901 case 2: 902 case 3: 903 break; 904 } 905 906 force_sig(SIGILL, current); 907 } 908 909 asmlinkage void do_mdmx(struct pt_regs *regs) 910 { 911 force_sig(SIGILL, current); 912 } 913 914 asmlinkage void do_watch(struct pt_regs *regs) 915 { 916 u32 cause; 917 918 /* 919 * Clear WP (bit 22) bit of cause register so we don't loop 920 * forever. 921 */ 922 cause = read_c0_cause(); 923 cause &= ~(1 << 22); 924 write_c0_cause(cause); 925 926 /* 927 * If the current thread has the watch registers loaded, save 928 * their values and send SIGTRAP. Otherwise another thread 929 * left the registers set, clear them and continue. 930 */ 931 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { 932 mips_read_watch_registers(); 933 force_sig(SIGTRAP, current); 934 } else 935 mips_clear_watch_registers(); 936 } 937 938 asmlinkage void do_mcheck(struct pt_regs *regs) 939 { 940 const int field = 2 * sizeof(unsigned long); 941 int multi_match = regs->cp0_status & ST0_TS; 942 943 show_regs(regs); 944 945 if (multi_match) { 946 printk("Index : %0x\n", read_c0_index()); 947 printk("Pagemask: %0x\n", read_c0_pagemask()); 948 printk("EntryHi : %0*lx\n", field, read_c0_entryhi()); 949 printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); 950 printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); 951 printk("\n"); 952 dump_tlb_all(); 953 } 954 955 show_code((unsigned int __user *) regs->cp0_epc); 956 957 /* 958 * Some chips may have other causes of machine check (e.g. SB1 959 * graduation timer) 960 */ 961 panic("Caught Machine Check exception - %scaused by multiple " 962 "matching entries in the TLB.", 963 (multi_match) ? "" : "not "); 964 } 965 966 asmlinkage void do_mt(struct pt_regs *regs) 967 { 968 int subcode; 969 970 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) 971 >> VPECONTROL_EXCPT_SHIFT; 972 switch (subcode) { 973 case 0: 974 printk(KERN_DEBUG "Thread Underflow\n"); 975 break; 976 case 1: 977 printk(KERN_DEBUG "Thread Overflow\n"); 978 break; 979 case 2: 980 printk(KERN_DEBUG "Invalid YIELD Qualifier\n"); 981 break; 982 case 3: 983 printk(KERN_DEBUG "Gating Storage Exception\n"); 984 break; 985 case 4: 986 printk(KERN_DEBUG "YIELD Scheduler Exception\n"); 987 break; 988 case 5: 989 printk(KERN_DEBUG "Gating Storage Schedulier Exception\n"); 990 break; 991 default: 992 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n", 993 subcode); 994 break; 995 } 996 die_if_kernel("MIPS MT Thread exception in kernel", regs); 997 998 force_sig(SIGILL, current); 999 } 1000 1001 1002 asmlinkage void do_dsp(struct pt_regs *regs) 1003 { 1004 if (cpu_has_dsp) 1005 panic("Unexpected DSP exception\n"); 1006 1007 force_sig(SIGILL, current); 1008 } 1009 1010 asmlinkage void do_reserved(struct pt_regs *regs) 1011 { 1012 /* 1013 * Game over - no way to handle this if it ever occurs. Most probably 1014 * caused by a new unknown cpu type or after another deadly 1015 * hard/software error. 1016 */ 1017 show_regs(regs); 1018 panic("Caught reserved exception %ld - should not happen.", 1019 (regs->cp0_cause & 0x7f) >> 2); 1020 } 1021 1022 static int __initdata l1parity = 1; 1023 static int __init nol1parity(char *s) 1024 { 1025 l1parity = 0; 1026 return 1; 1027 } 1028 __setup("nol1par", nol1parity); 1029 static int __initdata l2parity = 1; 1030 static int __init nol2parity(char *s) 1031 { 1032 l2parity = 0; 1033 return 1; 1034 } 1035 __setup("nol2par", nol2parity); 1036 1037 /* 1038 * Some MIPS CPUs can enable/disable for cache parity detection, but do 1039 * it different ways. 1040 */ 1041 static inline void parity_protection_init(void) 1042 { 1043 switch (current_cpu_type()) { 1044 case CPU_24K: 1045 case CPU_34K: 1046 case CPU_74K: 1047 case CPU_1004K: 1048 { 1049 #define ERRCTL_PE 0x80000000 1050 #define ERRCTL_L2P 0x00800000 1051 unsigned long errctl; 1052 unsigned int l1parity_present, l2parity_present; 1053 1054 errctl = read_c0_ecc(); 1055 errctl &= ~(ERRCTL_PE|ERRCTL_L2P); 1056 1057 /* probe L1 parity support */ 1058 write_c0_ecc(errctl | ERRCTL_PE); 1059 back_to_back_c0_hazard(); 1060 l1parity_present = (read_c0_ecc() & ERRCTL_PE); 1061 1062 /* probe L2 parity support */ 1063 write_c0_ecc(errctl|ERRCTL_L2P); 1064 back_to_back_c0_hazard(); 1065 l2parity_present = (read_c0_ecc() & ERRCTL_L2P); 1066 1067 if (l1parity_present && l2parity_present) { 1068 if (l1parity) 1069 errctl |= ERRCTL_PE; 1070 if (l1parity ^ l2parity) 1071 errctl |= ERRCTL_L2P; 1072 } else if (l1parity_present) { 1073 if (l1parity) 1074 errctl |= ERRCTL_PE; 1075 } else if (l2parity_present) { 1076 if (l2parity) 1077 errctl |= ERRCTL_L2P; 1078 } else { 1079 /* No parity available */ 1080 } 1081 1082 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl); 1083 1084 write_c0_ecc(errctl); 1085 back_to_back_c0_hazard(); 1086 errctl = read_c0_ecc(); 1087 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl); 1088 1089 if (l1parity_present) 1090 printk(KERN_INFO "Cache parity protection %sabled\n", 1091 (errctl & ERRCTL_PE) ? "en" : "dis"); 1092 1093 if (l2parity_present) { 1094 if (l1parity_present && l1parity) 1095 errctl ^= ERRCTL_L2P; 1096 printk(KERN_INFO "L2 cache parity protection %sabled\n", 1097 (errctl & ERRCTL_L2P) ? "en" : "dis"); 1098 } 1099 } 1100 break; 1101 1102 case CPU_5KC: 1103 write_c0_ecc(0x80000000); 1104 back_to_back_c0_hazard(); 1105 /* Set the PE bit (bit 31) in the c0_errctl register. */ 1106 printk(KERN_INFO "Cache parity protection %sabled\n", 1107 (read_c0_ecc() & 0x80000000) ? "en" : "dis"); 1108 break; 1109 case CPU_20KC: 1110 case CPU_25KF: 1111 /* Clear the DE bit (bit 16) in the c0_status register. */ 1112 printk(KERN_INFO "Enable cache parity protection for " 1113 "MIPS 20KC/25KF CPUs.\n"); 1114 clear_c0_status(ST0_DE); 1115 break; 1116 default: 1117 break; 1118 } 1119 } 1120 1121 asmlinkage void cache_parity_error(void) 1122 { 1123 const int field = 2 * sizeof(unsigned long); 1124 unsigned int reg_val; 1125 1126 /* For the moment, report the problem and hang. */ 1127 printk("Cache error exception:\n"); 1128 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); 1129 reg_val = read_c0_cacheerr(); 1130 printk("c0_cacheerr == %08x\n", reg_val); 1131 1132 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", 1133 reg_val & (1<<30) ? "secondary" : "primary", 1134 reg_val & (1<<31) ? "data" : "insn"); 1135 printk("Error bits: %s%s%s%s%s%s%s\n", 1136 reg_val & (1<<29) ? "ED " : "", 1137 reg_val & (1<<28) ? "ET " : "", 1138 reg_val & (1<<26) ? "EE " : "", 1139 reg_val & (1<<25) ? "EB " : "", 1140 reg_val & (1<<24) ? "EI " : "", 1141 reg_val & (1<<23) ? "E1 " : "", 1142 reg_val & (1<<22) ? "E0 " : ""); 1143 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); 1144 1145 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) 1146 if (reg_val & (1<<22)) 1147 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0()); 1148 1149 if (reg_val & (1<<23)) 1150 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1()); 1151 #endif 1152 1153 panic("Can't handle the cache error!"); 1154 } 1155 1156 /* 1157 * SDBBP EJTAG debug exception handler. 1158 * We skip the instruction and return to the next instruction. 1159 */ 1160 void ejtag_exception_handler(struct pt_regs *regs) 1161 { 1162 const int field = 2 * sizeof(unsigned long); 1163 unsigned long depc, old_epc; 1164 unsigned int debug; 1165 1166 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); 1167 depc = read_c0_depc(); 1168 debug = read_c0_debug(); 1169 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug); 1170 if (debug & 0x80000000) { 1171 /* 1172 * In branch delay slot. 1173 * We cheat a little bit here and use EPC to calculate the 1174 * debug return address (DEPC). EPC is restored after the 1175 * calculation. 1176 */ 1177 old_epc = regs->cp0_epc; 1178 regs->cp0_epc = depc; 1179 __compute_return_epc(regs); 1180 depc = regs->cp0_epc; 1181 regs->cp0_epc = old_epc; 1182 } else 1183 depc += 4; 1184 write_c0_depc(depc); 1185 1186 #if 0 1187 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n"); 1188 write_c0_debug(debug | 0x100); 1189 #endif 1190 } 1191 1192 /* 1193 * NMI exception handler. 1194 */ 1195 NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs) 1196 { 1197 bust_spinlocks(1); 1198 printk("NMI taken!!!!\n"); 1199 die("NMI", regs); 1200 } 1201 1202 #define VECTORSPACING 0x100 /* for EI/VI mode */ 1203 1204 unsigned long ebase; 1205 unsigned long exception_handlers[32]; 1206 unsigned long vi_handlers[64]; 1207 1208 /* 1209 * As a side effect of the way this is implemented we're limited 1210 * to interrupt handlers in the address range from 1211 * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ... 1212 */ 1213 void *set_except_vector(int n, void *addr) 1214 { 1215 unsigned long handler = (unsigned long) addr; 1216 unsigned long old_handler = exception_handlers[n]; 1217 1218 exception_handlers[n] = handler; 1219 if (n == 0 && cpu_has_divec) { 1220 *(u32 *)(ebase + 0x200) = 0x08000000 | 1221 (0x03ffffff & (handler >> 2)); 1222 local_flush_icache_range(ebase + 0x200, ebase + 0x204); 1223 } 1224 return (void *)old_handler; 1225 } 1226 1227 static asmlinkage void do_default_vi(void) 1228 { 1229 show_regs(get_irq_regs()); 1230 panic("Caught unexpected vectored interrupt."); 1231 } 1232 1233 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) 1234 { 1235 unsigned long handler; 1236 unsigned long old_handler = vi_handlers[n]; 1237 int srssets = current_cpu_data.srsets; 1238 u32 *w; 1239 unsigned char *b; 1240 1241 if (!cpu_has_veic && !cpu_has_vint) 1242 BUG(); 1243 1244 if (addr == NULL) { 1245 handler = (unsigned long) do_default_vi; 1246 srs = 0; 1247 } else 1248 handler = (unsigned long) addr; 1249 vi_handlers[n] = (unsigned long) addr; 1250 1251 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); 1252 1253 if (srs >= srssets) 1254 panic("Shadow register set %d not supported", srs); 1255 1256 if (cpu_has_veic) { 1257 if (board_bind_eic_interrupt) 1258 board_bind_eic_interrupt(n, srs); 1259 } else if (cpu_has_vint) { 1260 /* SRSMap is only defined if shadow sets are implemented */ 1261 if (srssets > 1) 1262 change_c0_srsmap(0xf << n*4, srs << n*4); 1263 } 1264 1265 if (srs == 0) { 1266 /* 1267 * If no shadow set is selected then use the default handler 1268 * that does normal register saving and a standard interrupt exit 1269 */ 1270 1271 extern char except_vec_vi, except_vec_vi_lui; 1272 extern char except_vec_vi_ori, except_vec_vi_end; 1273 extern char rollback_except_vec_vi; 1274 char *vec_start = (cpu_wait == r4k_wait) ? 1275 &rollback_except_vec_vi : &except_vec_vi; 1276 #ifdef CONFIG_MIPS_MT_SMTC 1277 /* 1278 * We need to provide the SMTC vectored interrupt handler 1279 * not only with the address of the handler, but with the 1280 * Status.IM bit to be masked before going there. 1281 */ 1282 extern char except_vec_vi_mori; 1283 const int mori_offset = &except_vec_vi_mori - vec_start; 1284 #endif /* CONFIG_MIPS_MT_SMTC */ 1285 const int handler_len = &except_vec_vi_end - vec_start; 1286 const int lui_offset = &except_vec_vi_lui - vec_start; 1287 const int ori_offset = &except_vec_vi_ori - vec_start; 1288 1289 if (handler_len > VECTORSPACING) { 1290 /* 1291 * Sigh... panicing won't help as the console 1292 * is probably not configured :( 1293 */ 1294 panic("VECTORSPACING too small"); 1295 } 1296 1297 memcpy(b, vec_start, handler_len); 1298 #ifdef CONFIG_MIPS_MT_SMTC 1299 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ 1300 1301 w = (u32 *)(b + mori_offset); 1302 *w = (*w & 0xffff0000) | (0x100 << n); 1303 #endif /* CONFIG_MIPS_MT_SMTC */ 1304 w = (u32 *)(b + lui_offset); 1305 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); 1306 w = (u32 *)(b + ori_offset); 1307 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); 1308 local_flush_icache_range((unsigned long)b, 1309 (unsigned long)(b+handler_len)); 1310 } 1311 else { 1312 /* 1313 * In other cases jump directly to the interrupt handler 1314 * 1315 * It is the handlers responsibility to save registers if required 1316 * (eg hi/lo) and return from the exception using "eret" 1317 */ 1318 w = (u32 *)b; 1319 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ 1320 *w = 0; 1321 local_flush_icache_range((unsigned long)b, 1322 (unsigned long)(b+8)); 1323 } 1324 1325 return (void *)old_handler; 1326 } 1327 1328 void *set_vi_handler(int n, vi_handler_t addr) 1329 { 1330 return set_vi_srs_handler(n, addr, 0); 1331 } 1332 1333 /* 1334 * This is used by native signal handling 1335 */ 1336 asmlinkage int (*save_fp_context)(struct sigcontext __user *sc); 1337 asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc); 1338 1339 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); 1340 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); 1341 1342 extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); 1343 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); 1344 1345 #ifdef CONFIG_SMP 1346 static int smp_save_fp_context(struct sigcontext __user *sc) 1347 { 1348 return raw_cpu_has_fpu 1349 ? _save_fp_context(sc) 1350 : fpu_emulator_save_context(sc); 1351 } 1352 1353 static int smp_restore_fp_context(struct sigcontext __user *sc) 1354 { 1355 return raw_cpu_has_fpu 1356 ? _restore_fp_context(sc) 1357 : fpu_emulator_restore_context(sc); 1358 } 1359 #endif 1360 1361 static inline void signal_init(void) 1362 { 1363 #ifdef CONFIG_SMP 1364 /* For now just do the cpu_has_fpu check when the functions are invoked */ 1365 save_fp_context = smp_save_fp_context; 1366 restore_fp_context = smp_restore_fp_context; 1367 #else 1368 if (cpu_has_fpu) { 1369 save_fp_context = _save_fp_context; 1370 restore_fp_context = _restore_fp_context; 1371 } else { 1372 save_fp_context = fpu_emulator_save_context; 1373 restore_fp_context = fpu_emulator_restore_context; 1374 } 1375 #endif 1376 } 1377 1378 #ifdef CONFIG_MIPS32_COMPAT 1379 1380 /* 1381 * This is used by 32-bit signal stuff on the 64-bit kernel 1382 */ 1383 asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc); 1384 asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc); 1385 1386 extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); 1387 extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); 1388 1389 extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc); 1390 extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc); 1391 1392 static inline void signal32_init(void) 1393 { 1394 if (cpu_has_fpu) { 1395 save_fp_context32 = _save_fp_context32; 1396 restore_fp_context32 = _restore_fp_context32; 1397 } else { 1398 save_fp_context32 = fpu_emulator_save_context32; 1399 restore_fp_context32 = fpu_emulator_restore_context32; 1400 } 1401 } 1402 #endif 1403 1404 extern void cpu_cache_init(void); 1405 extern void tlb_init(void); 1406 extern void flush_tlb_handlers(void); 1407 1408 /* 1409 * Timer interrupt 1410 */ 1411 int cp0_compare_irq; 1412 1413 /* 1414 * Performance counter IRQ or -1 if shared with timer 1415 */ 1416 int cp0_perfcount_irq; 1417 EXPORT_SYMBOL_GPL(cp0_perfcount_irq); 1418 1419 static int __cpuinitdata noulri; 1420 1421 static int __init ulri_disable(char *s) 1422 { 1423 pr_info("Disabling ulri\n"); 1424 noulri = 1; 1425 1426 return 1; 1427 } 1428 __setup("noulri", ulri_disable); 1429 1430 void __cpuinit per_cpu_trap_init(void) 1431 { 1432 unsigned int cpu = smp_processor_id(); 1433 unsigned int status_set = ST0_CU0; 1434 #ifdef CONFIG_MIPS_MT_SMTC 1435 int secondaryTC = 0; 1436 int bootTC = (cpu == 0); 1437 1438 /* 1439 * Only do per_cpu_trap_init() for first TC of Each VPE. 1440 * Note that this hack assumes that the SMTC init code 1441 * assigns TCs consecutively and in ascending order. 1442 */ 1443 1444 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && 1445 ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) 1446 secondaryTC = 1; 1447 #endif /* CONFIG_MIPS_MT_SMTC */ 1448 1449 /* 1450 * Disable coprocessors and select 32-bit or 64-bit addressing 1451 * and the 16/32 or 32/32 FPR register model. Reset the BEV 1452 * flag that some firmware may have left set and the TS bit (for 1453 * IP27). Set XX for ISA IV code to work. 1454 */ 1455 #ifdef CONFIG_64BIT 1456 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; 1457 #endif 1458 if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) 1459 status_set |= ST0_XX; 1460 if (cpu_has_dsp) 1461 status_set |= ST0_MX; 1462 1463 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, 1464 status_set); 1465 1466 if (cpu_has_mips_r2) { 1467 unsigned int enable = 0x0000000f; 1468 1469 if (!noulri && cpu_has_userlocal) 1470 enable |= (1 << 29); 1471 1472 write_c0_hwrena(enable); 1473 } 1474 1475 #ifdef CONFIG_MIPS_MT_SMTC 1476 if (!secondaryTC) { 1477 #endif /* CONFIG_MIPS_MT_SMTC */ 1478 1479 if (cpu_has_veic || cpu_has_vint) { 1480 write_c0_ebase(ebase); 1481 /* Setting vector spacing enables EI/VI mode */ 1482 change_c0_intctl(0x3e0, VECTORSPACING); 1483 } 1484 if (cpu_has_divec) { 1485 if (cpu_has_mipsmt) { 1486 unsigned int vpflags = dvpe(); 1487 set_c0_cause(CAUSEF_IV); 1488 evpe(vpflags); 1489 } else 1490 set_c0_cause(CAUSEF_IV); 1491 } 1492 1493 /* 1494 * Before R2 both interrupt numbers were fixed to 7, so on R2 only: 1495 * 1496 * o read IntCtl.IPTI to determine the timer interrupt 1497 * o read IntCtl.IPPCI to determine the performance counter interrupt 1498 */ 1499 if (cpu_has_mips_r2) { 1500 cp0_compare_irq = (read_c0_intctl() >> 29) & 7; 1501 cp0_perfcount_irq = (read_c0_intctl() >> 26) & 7; 1502 if (cp0_perfcount_irq == cp0_compare_irq) 1503 cp0_perfcount_irq = -1; 1504 } else { 1505 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; 1506 cp0_perfcount_irq = -1; 1507 } 1508 1509 #ifdef CONFIG_MIPS_MT_SMTC 1510 } 1511 #endif /* CONFIG_MIPS_MT_SMTC */ 1512 1513 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1514 TLBMISS_HANDLER_SETUP(); 1515 1516 atomic_inc(&init_mm.mm_count); 1517 current->active_mm = &init_mm; 1518 BUG_ON(current->mm); 1519 enter_lazy_tlb(&init_mm, current); 1520 1521 #ifdef CONFIG_MIPS_MT_SMTC 1522 if (bootTC) { 1523 #endif /* CONFIG_MIPS_MT_SMTC */ 1524 cpu_cache_init(); 1525 tlb_init(); 1526 #ifdef CONFIG_MIPS_MT_SMTC 1527 } else if (!secondaryTC) { 1528 /* 1529 * First TC in non-boot VPE must do subset of tlb_init() 1530 * for MMU countrol registers. 1531 */ 1532 write_c0_pagemask(PM_DEFAULT_MASK); 1533 write_c0_wired(0); 1534 } 1535 #endif /* CONFIG_MIPS_MT_SMTC */ 1536 } 1537 1538 /* Install CPU exception handler */ 1539 void __init set_handler(unsigned long offset, void *addr, unsigned long size) 1540 { 1541 memcpy((void *)(ebase + offset), addr, size); 1542 local_flush_icache_range(ebase + offset, ebase + offset + size); 1543 } 1544 1545 static char panic_null_cerr[] __cpuinitdata = 1546 "Trying to set NULL cache error exception handler"; 1547 1548 /* Install uncached CPU exception handler */ 1549 void __cpuinit set_uncached_handler(unsigned long offset, void *addr, 1550 unsigned long size) 1551 { 1552 #ifdef CONFIG_32BIT 1553 unsigned long uncached_ebase = KSEG1ADDR(ebase); 1554 #endif 1555 #ifdef CONFIG_64BIT 1556 unsigned long uncached_ebase = TO_UNCAC(ebase); 1557 #endif 1558 1559 if (!addr) 1560 panic(panic_null_cerr); 1561 1562 memcpy((void *)(uncached_ebase + offset), addr, size); 1563 } 1564 1565 static int __initdata rdhwr_noopt; 1566 static int __init set_rdhwr_noopt(char *str) 1567 { 1568 rdhwr_noopt = 1; 1569 return 1; 1570 } 1571 1572 __setup("rdhwr_noopt", set_rdhwr_noopt); 1573 1574 void __init trap_init(void) 1575 { 1576 extern char except_vec3_generic, except_vec3_r4000; 1577 extern char except_vec4; 1578 unsigned long i; 1579 int rollback; 1580 1581 check_wait(); 1582 rollback = (cpu_wait == r4k_wait); 1583 1584 #if defined(CONFIG_KGDB) 1585 if (kgdb_early_setup) 1586 return; /* Already done */ 1587 #endif 1588 1589 if (cpu_has_veic || cpu_has_vint) 1590 ebase = (unsigned long) alloc_bootmem_low_pages(0x200 + VECTORSPACING*64); 1591 else 1592 ebase = CAC_BASE; 1593 1594 per_cpu_trap_init(); 1595 1596 /* 1597 * Copy the generic exception handlers to their final destination. 1598 * This will be overriden later as suitable for a particular 1599 * configuration. 1600 */ 1601 set_handler(0x180, &except_vec3_generic, 0x80); 1602 1603 /* 1604 * Setup default vectors 1605 */ 1606 for (i = 0; i <= 31; i++) 1607 set_except_vector(i, handle_reserved); 1608 1609 /* 1610 * Copy the EJTAG debug exception vector handler code to it's final 1611 * destination. 1612 */ 1613 if (cpu_has_ejtag && board_ejtag_handler_setup) 1614 board_ejtag_handler_setup(); 1615 1616 /* 1617 * Only some CPUs have the watch exceptions. 1618 */ 1619 if (cpu_has_watch) 1620 set_except_vector(23, handle_watch); 1621 1622 /* 1623 * Initialise interrupt handlers 1624 */ 1625 if (cpu_has_veic || cpu_has_vint) { 1626 int nvec = cpu_has_veic ? 64 : 8; 1627 for (i = 0; i < nvec; i++) 1628 set_vi_handler(i, NULL); 1629 } 1630 else if (cpu_has_divec) 1631 set_handler(0x200, &except_vec4, 0x8); 1632 1633 /* 1634 * Some CPUs can enable/disable for cache parity detection, but does 1635 * it different ways. 1636 */ 1637 parity_protection_init(); 1638 1639 /* 1640 * The Data Bus Errors / Instruction Bus Errors are signaled 1641 * by external hardware. Therefore these two exceptions 1642 * may have board specific handlers. 1643 */ 1644 if (board_be_init) 1645 board_be_init(); 1646 1647 set_except_vector(0, rollback ? rollback_handle_int : handle_int); 1648 set_except_vector(1, handle_tlbm); 1649 set_except_vector(2, handle_tlbl); 1650 set_except_vector(3, handle_tlbs); 1651 1652 set_except_vector(4, handle_adel); 1653 set_except_vector(5, handle_ades); 1654 1655 set_except_vector(6, handle_ibe); 1656 set_except_vector(7, handle_dbe); 1657 1658 set_except_vector(8, handle_sys); 1659 set_except_vector(9, handle_bp); 1660 set_except_vector(10, rdhwr_noopt ? handle_ri : 1661 (cpu_has_vtag_icache ? 1662 handle_ri_rdhwr_vivt : handle_ri_rdhwr)); 1663 set_except_vector(11, handle_cpu); 1664 set_except_vector(12, handle_ov); 1665 set_except_vector(13, handle_tr); 1666 1667 if (current_cpu_type() == CPU_R6000 || 1668 current_cpu_type() == CPU_R6000A) { 1669 /* 1670 * The R6000 is the only R-series CPU that features a machine 1671 * check exception (similar to the R4000 cache error) and 1672 * unaligned ldc1/sdc1 exception. The handlers have not been 1673 * written yet. Well, anyway there is no R6000 machine on the 1674 * current list of targets for Linux/MIPS. 1675 * (Duh, crap, there is someone with a triple R6k machine) 1676 */ 1677 //set_except_vector(14, handle_mc); 1678 //set_except_vector(15, handle_ndc); 1679 } 1680 1681 1682 if (board_nmi_handler_setup) 1683 board_nmi_handler_setup(); 1684 1685 if (cpu_has_fpu && !cpu_has_nofpuex) 1686 set_except_vector(15, handle_fpe); 1687 1688 set_except_vector(22, handle_mdmx); 1689 1690 if (cpu_has_mcheck) 1691 set_except_vector(24, handle_mcheck); 1692 1693 if (cpu_has_mipsmt) 1694 set_except_vector(25, handle_mt); 1695 1696 set_except_vector(26, handle_dsp); 1697 1698 if (cpu_has_vce) 1699 /* Special exception: R4[04]00 uses also the divec space. */ 1700 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100); 1701 else if (cpu_has_4kex) 1702 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80); 1703 else 1704 memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80); 1705 1706 signal_init(); 1707 #ifdef CONFIG_MIPS32_COMPAT 1708 signal32_init(); 1709 #endif 1710 1711 local_flush_icache_range(ebase, ebase + 0x400); 1712 flush_tlb_handlers(); 1713 1714 sort_extable(__start___dbe_table, __stop___dbe_table); 1715 } 1716