1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle 7 * Copyright (C) 1995, 1996 Paul M. Antoine 8 * Copyright (C) 1998 Ulf Carlsson 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 11 * Copyright (C) 2000, 01 MIPS Technologies, Inc. 12 * Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki 13 */ 14 #include <linux/bug.h> 15 #include <linux/init.h> 16 #include <linux/mm.h> 17 #include <linux/module.h> 18 #include <linux/sched.h> 19 #include <linux/smp.h> 20 #include <linux/spinlock.h> 21 #include <linux/kallsyms.h> 22 #include <linux/bootmem.h> 23 #include <linux/interrupt.h> 24 25 #include <asm/bootinfo.h> 26 #include <asm/branch.h> 27 #include <asm/break.h> 28 #include <asm/cpu.h> 29 #include <asm/dsp.h> 30 #include <asm/fpu.h> 31 #include <asm/mipsregs.h> 32 #include <asm/mipsmtregs.h> 33 #include <asm/module.h> 34 #include <asm/pgtable.h> 35 #include <asm/ptrace.h> 36 #include <asm/sections.h> 37 #include <asm/system.h> 38 #include <asm/tlbdebug.h> 39 #include <asm/traps.h> 40 #include <asm/uaccess.h> 41 #include <asm/mmu_context.h> 42 #include <asm/types.h> 43 #include <asm/stacktrace.h> 44 45 extern asmlinkage void handle_int(void); 46 extern asmlinkage void handle_tlbm(void); 47 extern asmlinkage void handle_tlbl(void); 48 extern asmlinkage void handle_tlbs(void); 49 extern asmlinkage void handle_adel(void); 50 extern asmlinkage void handle_ades(void); 51 extern asmlinkage void handle_ibe(void); 52 extern asmlinkage void handle_dbe(void); 53 extern asmlinkage void handle_sys(void); 54 extern asmlinkage void handle_bp(void); 55 extern asmlinkage void handle_ri(void); 56 extern asmlinkage void handle_ri_rdhwr_vivt(void); 57 extern asmlinkage void handle_ri_rdhwr(void); 58 extern asmlinkage void handle_cpu(void); 59 extern asmlinkage void handle_ov(void); 60 extern asmlinkage void handle_tr(void); 61 extern asmlinkage void handle_fpe(void); 62 extern asmlinkage void handle_mdmx(void); 63 extern asmlinkage void handle_watch(void); 64 extern asmlinkage void handle_mt(void); 65 extern asmlinkage void handle_dsp(void); 66 extern asmlinkage void handle_mcheck(void); 67 extern asmlinkage void handle_reserved(void); 68 69 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, 70 struct mips_fpu_struct *ctx, int has_fpu); 71 72 void (*board_watchpoint_handler)(struct pt_regs *regs); 73 void (*board_be_init)(void); 74 int (*board_be_handler)(struct pt_regs *regs, int is_fixup); 75 void (*board_nmi_handler_setup)(void); 76 void (*board_ejtag_handler_setup)(void); 77 void (*board_bind_eic_interrupt)(int irq, int regset); 78 79 80 static void show_raw_backtrace(unsigned long reg29) 81 { 82 unsigned long *sp = (unsigned long *)reg29; 83 unsigned long addr; 84 85 printk("Call Trace:"); 86 #ifdef CONFIG_KALLSYMS 87 printk("\n"); 88 #endif 89 while (!kstack_end(sp)) { 90 addr = *sp++; 91 if (__kernel_text_address(addr)) 92 print_ip_sym(addr); 93 } 94 printk("\n"); 95 } 96 97 #ifdef CONFIG_KALLSYMS 98 int raw_show_trace; 99 static int __init set_raw_show_trace(char *str) 100 { 101 raw_show_trace = 1; 102 return 1; 103 } 104 __setup("raw_show_trace", set_raw_show_trace); 105 #endif 106 107 static void show_backtrace(struct task_struct *task, struct pt_regs *regs) 108 { 109 unsigned long sp = regs->regs[29]; 110 unsigned long ra = regs->regs[31]; 111 unsigned long pc = regs->cp0_epc; 112 113 if (raw_show_trace || !__kernel_text_address(pc)) { 114 show_raw_backtrace(sp); 115 return; 116 } 117 printk("Call Trace:\n"); 118 do { 119 print_ip_sym(pc); 120 pc = unwind_stack(task, &sp, pc, &ra); 121 } while (pc); 122 printk("\n"); 123 } 124 125 /* 126 * This routine abuses get_user()/put_user() to reference pointers 127 * with at least a bit of error checking ... 128 */ 129 static void show_stacktrace(struct task_struct *task, struct pt_regs *regs) 130 { 131 const int field = 2 * sizeof(unsigned long); 132 long stackdata; 133 int i; 134 unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; 135 136 printk("Stack :"); 137 i = 0; 138 while ((unsigned long) sp & (PAGE_SIZE - 1)) { 139 if (i && ((i % (64 / field)) == 0)) 140 printk("\n "); 141 if (i > 39) { 142 printk(" ..."); 143 break; 144 } 145 146 if (__get_user(stackdata, sp++)) { 147 printk(" (Bad stack address)"); 148 break; 149 } 150 151 printk(" %0*lx", field, stackdata); 152 i++; 153 } 154 printk("\n"); 155 show_backtrace(task, regs); 156 } 157 158 void show_stack(struct task_struct *task, unsigned long *sp) 159 { 160 struct pt_regs regs; 161 if (sp) { 162 regs.regs[29] = (unsigned long)sp; 163 regs.regs[31] = 0; 164 regs.cp0_epc = 0; 165 } else { 166 if (task && task != current) { 167 regs.regs[29] = task->thread.reg29; 168 regs.regs[31] = 0; 169 regs.cp0_epc = task->thread.reg31; 170 } else { 171 prepare_frametrace(®s); 172 } 173 } 174 show_stacktrace(task, ®s); 175 } 176 177 /* 178 * The architecture-independent dump_stack generator 179 */ 180 void dump_stack(void) 181 { 182 struct pt_regs regs; 183 184 prepare_frametrace(®s); 185 show_backtrace(current, ®s); 186 } 187 188 EXPORT_SYMBOL(dump_stack); 189 190 static void show_code(unsigned int __user *pc) 191 { 192 long i; 193 194 printk("\nCode:"); 195 196 for(i = -3 ; i < 6 ; i++) { 197 unsigned int insn; 198 if (__get_user(insn, pc + i)) { 199 printk(" (Bad address in epc)\n"); 200 break; 201 } 202 printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>')); 203 } 204 } 205 206 void show_regs(struct pt_regs *regs) 207 { 208 const int field = 2 * sizeof(unsigned long); 209 unsigned int cause = regs->cp0_cause; 210 int i; 211 212 printk("Cpu %d\n", smp_processor_id()); 213 214 /* 215 * Saved main processor registers 216 */ 217 for (i = 0; i < 32; ) { 218 if ((i % 4) == 0) 219 printk("$%2d :", i); 220 if (i == 0) 221 printk(" %0*lx", field, 0UL); 222 else if (i == 26 || i == 27) 223 printk(" %*s", field, ""); 224 else 225 printk(" %0*lx", field, regs->regs[i]); 226 227 i++; 228 if ((i % 4) == 0) 229 printk("\n"); 230 } 231 232 #ifdef CONFIG_CPU_HAS_SMARTMIPS 233 printk("Acx : %0*lx\n", field, regs->acx); 234 #endif 235 printk("Hi : %0*lx\n", field, regs->hi); 236 printk("Lo : %0*lx\n", field, regs->lo); 237 238 /* 239 * Saved cp0 registers 240 */ 241 printk("epc : %0*lx ", field, regs->cp0_epc); 242 print_symbol("%s ", regs->cp0_epc); 243 printk(" %s\n", print_tainted()); 244 printk("ra : %0*lx ", field, regs->regs[31]); 245 print_symbol("%s\n", regs->regs[31]); 246 247 printk("Status: %08x ", (uint32_t) regs->cp0_status); 248 249 if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) { 250 if (regs->cp0_status & ST0_KUO) 251 printk("KUo "); 252 if (regs->cp0_status & ST0_IEO) 253 printk("IEo "); 254 if (regs->cp0_status & ST0_KUP) 255 printk("KUp "); 256 if (regs->cp0_status & ST0_IEP) 257 printk("IEp "); 258 if (regs->cp0_status & ST0_KUC) 259 printk("KUc "); 260 if (regs->cp0_status & ST0_IEC) 261 printk("IEc "); 262 } else { 263 if (regs->cp0_status & ST0_KX) 264 printk("KX "); 265 if (regs->cp0_status & ST0_SX) 266 printk("SX "); 267 if (regs->cp0_status & ST0_UX) 268 printk("UX "); 269 switch (regs->cp0_status & ST0_KSU) { 270 case KSU_USER: 271 printk("USER "); 272 break; 273 case KSU_SUPERVISOR: 274 printk("SUPERVISOR "); 275 break; 276 case KSU_KERNEL: 277 printk("KERNEL "); 278 break; 279 default: 280 printk("BAD_MODE "); 281 break; 282 } 283 if (regs->cp0_status & ST0_ERL) 284 printk("ERL "); 285 if (regs->cp0_status & ST0_EXL) 286 printk("EXL "); 287 if (regs->cp0_status & ST0_IE) 288 printk("IE "); 289 } 290 printk("\n"); 291 292 printk("Cause : %08x\n", cause); 293 294 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; 295 if (1 <= cause && cause <= 5) 296 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr); 297 298 printk("PrId : %08x (%s)\n", read_c0_prid(), 299 cpu_name_string()); 300 } 301 302 void show_registers(struct pt_regs *regs) 303 { 304 show_regs(regs); 305 print_modules(); 306 printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n", 307 current->comm, current->pid, current_thread_info(), current); 308 show_stacktrace(current, regs); 309 show_code((unsigned int __user *) regs->cp0_epc); 310 printk("\n"); 311 } 312 313 static DEFINE_SPINLOCK(die_lock); 314 315 void __noreturn die(const char * str, struct pt_regs * regs) 316 { 317 static int die_counter; 318 #ifdef CONFIG_MIPS_MT_SMTC 319 unsigned long dvpret = dvpe(); 320 #endif /* CONFIG_MIPS_MT_SMTC */ 321 322 console_verbose(); 323 spin_lock_irq(&die_lock); 324 bust_spinlocks(1); 325 #ifdef CONFIG_MIPS_MT_SMTC 326 mips_mt_regdump(dvpret); 327 #endif /* CONFIG_MIPS_MT_SMTC */ 328 printk("%s[#%d]:\n", str, ++die_counter); 329 show_registers(regs); 330 add_taint(TAINT_DIE); 331 spin_unlock_irq(&die_lock); 332 333 if (in_interrupt()) 334 panic("Fatal exception in interrupt"); 335 336 if (panic_on_oops) { 337 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); 338 ssleep(5); 339 panic("Fatal exception"); 340 } 341 342 do_exit(SIGSEGV); 343 } 344 345 extern const struct exception_table_entry __start___dbe_table[]; 346 extern const struct exception_table_entry __stop___dbe_table[]; 347 348 __asm__( 349 " .section __dbe_table, \"a\"\n" 350 " .previous \n"); 351 352 /* Given an address, look for it in the exception tables. */ 353 static const struct exception_table_entry *search_dbe_tables(unsigned long addr) 354 { 355 const struct exception_table_entry *e; 356 357 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr); 358 if (!e) 359 e = search_module_dbetables(addr); 360 return e; 361 } 362 363 asmlinkage void do_be(struct pt_regs *regs) 364 { 365 const int field = 2 * sizeof(unsigned long); 366 const struct exception_table_entry *fixup = NULL; 367 int data = regs->cp0_cause & 4; 368 int action = MIPS_BE_FATAL; 369 370 /* XXX For now. Fixme, this searches the wrong table ... */ 371 if (data && !user_mode(regs)) 372 fixup = search_dbe_tables(exception_epc(regs)); 373 374 if (fixup) 375 action = MIPS_BE_FIXUP; 376 377 if (board_be_handler) 378 action = board_be_handler(regs, fixup != NULL); 379 380 switch (action) { 381 case MIPS_BE_DISCARD: 382 return; 383 case MIPS_BE_FIXUP: 384 if (fixup) { 385 regs->cp0_epc = fixup->nextinsn; 386 return; 387 } 388 break; 389 default: 390 break; 391 } 392 393 /* 394 * Assume it would be too dangerous to continue ... 395 */ 396 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n", 397 data ? "Data" : "Instruction", 398 field, regs->cp0_epc, field, regs->regs[31]); 399 die_if_kernel("Oops", regs); 400 force_sig(SIGBUS, current); 401 } 402 403 /* 404 * ll/sc emulation 405 */ 406 407 #define OPCODE 0xfc000000 408 #define BASE 0x03e00000 409 #define RT 0x001f0000 410 #define OFFSET 0x0000ffff 411 #define LL 0xc0000000 412 #define SC 0xe0000000 413 #define SPEC3 0x7c000000 414 #define RD 0x0000f800 415 #define FUNC 0x0000003f 416 #define RDHWR 0x0000003b 417 418 /* 419 * The ll_bit is cleared by r*_switch.S 420 */ 421 422 unsigned long ll_bit; 423 424 static struct task_struct *ll_task = NULL; 425 426 static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode) 427 { 428 unsigned long value, __user *vaddr; 429 long offset; 430 int signal = 0; 431 432 /* 433 * analyse the ll instruction that just caused a ri exception 434 * and put the referenced address to addr. 435 */ 436 437 /* sign extend offset */ 438 offset = opcode & OFFSET; 439 offset <<= 16; 440 offset >>= 16; 441 442 vaddr = (unsigned long __user *) 443 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); 444 445 if ((unsigned long)vaddr & 3) { 446 signal = SIGBUS; 447 goto sig; 448 } 449 if (get_user(value, vaddr)) { 450 signal = SIGSEGV; 451 goto sig; 452 } 453 454 preempt_disable(); 455 456 if (ll_task == NULL || ll_task == current) { 457 ll_bit = 1; 458 } else { 459 ll_bit = 0; 460 } 461 ll_task = current; 462 463 preempt_enable(); 464 465 compute_return_epc(regs); 466 467 regs->regs[(opcode & RT) >> 16] = value; 468 469 return; 470 471 sig: 472 force_sig(signal, current); 473 } 474 475 static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode) 476 { 477 unsigned long __user *vaddr; 478 unsigned long reg; 479 long offset; 480 int signal = 0; 481 482 /* 483 * analyse the sc instruction that just caused a ri exception 484 * and put the referenced address to addr. 485 */ 486 487 /* sign extend offset */ 488 offset = opcode & OFFSET; 489 offset <<= 16; 490 offset >>= 16; 491 492 vaddr = (unsigned long __user *) 493 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); 494 reg = (opcode & RT) >> 16; 495 496 if ((unsigned long)vaddr & 3) { 497 signal = SIGBUS; 498 goto sig; 499 } 500 501 preempt_disable(); 502 503 if (ll_bit == 0 || ll_task != current) { 504 compute_return_epc(regs); 505 regs->regs[reg] = 0; 506 preempt_enable(); 507 return; 508 } 509 510 preempt_enable(); 511 512 if (put_user(regs->regs[reg], vaddr)) { 513 signal = SIGSEGV; 514 goto sig; 515 } 516 517 compute_return_epc(regs); 518 regs->regs[reg] = 1; 519 520 return; 521 522 sig: 523 force_sig(signal, current); 524 } 525 526 /* 527 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both 528 * opcodes are supposed to result in coprocessor unusable exceptions if 529 * executed on ll/sc-less processors. That's the theory. In practice a 530 * few processors such as NEC's VR4100 throw reserved instruction exceptions 531 * instead, so we're doing the emulation thing in both exception handlers. 532 */ 533 static inline int simulate_llsc(struct pt_regs *regs) 534 { 535 unsigned int opcode; 536 537 if (get_user(opcode, (unsigned int __user *) exception_epc(regs))) 538 goto out_sigsegv; 539 540 if ((opcode & OPCODE) == LL) { 541 simulate_ll(regs, opcode); 542 return 0; 543 } 544 if ((opcode & OPCODE) == SC) { 545 simulate_sc(regs, opcode); 546 return 0; 547 } 548 549 return -EFAULT; /* Strange things going on ... */ 550 551 out_sigsegv: 552 force_sig(SIGSEGV, current); 553 return -EFAULT; 554 } 555 556 /* 557 * Simulate trapping 'rdhwr' instructions to provide user accessible 558 * registers not implemented in hardware. The only current use of this 559 * is the thread area pointer. 560 */ 561 static inline int simulate_rdhwr(struct pt_regs *regs) 562 { 563 struct thread_info *ti = task_thread_info(current); 564 unsigned int opcode; 565 566 if (get_user(opcode, (unsigned int __user *) exception_epc(regs))) 567 goto out_sigsegv; 568 569 if (unlikely(compute_return_epc(regs))) 570 return -EFAULT; 571 572 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { 573 int rd = (opcode & RD) >> 11; 574 int rt = (opcode & RT) >> 16; 575 switch (rd) { 576 case 29: 577 regs->regs[rt] = ti->tp_value; 578 return 0; 579 default: 580 return -EFAULT; 581 } 582 } 583 584 /* Not ours. */ 585 return -EFAULT; 586 587 out_sigsegv: 588 force_sig(SIGSEGV, current); 589 return -EFAULT; 590 } 591 592 asmlinkage void do_ov(struct pt_regs *regs) 593 { 594 siginfo_t info; 595 596 die_if_kernel("Integer overflow", regs); 597 598 info.si_code = FPE_INTOVF; 599 info.si_signo = SIGFPE; 600 info.si_errno = 0; 601 info.si_addr = (void __user *) regs->cp0_epc; 602 force_sig_info(SIGFPE, &info, current); 603 } 604 605 /* 606 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX 607 */ 608 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) 609 { 610 siginfo_t info; 611 612 die_if_kernel("FP exception in kernel code", regs); 613 614 if (fcr31 & FPU_CSR_UNI_X) { 615 int sig; 616 617 /* 618 * Unimplemented operation exception. If we've got the full 619 * software emulator on-board, let's use it... 620 * 621 * Force FPU to dump state into task/thread context. We're 622 * moving a lot of data here for what is probably a single 623 * instruction, but the alternative is to pre-decode the FP 624 * register operands before invoking the emulator, which seems 625 * a bit extreme for what should be an infrequent event. 626 */ 627 /* Ensure 'resume' not overwrite saved fp context again. */ 628 lose_fpu(1); 629 630 /* Run the emulator */ 631 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1); 632 633 /* 634 * We can't allow the emulated instruction to leave any of 635 * the cause bit set in $fcr31. 636 */ 637 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; 638 639 /* Restore the hardware register state */ 640 own_fpu(1); /* Using the FPU again. */ 641 642 /* If something went wrong, signal */ 643 if (sig) 644 force_sig(sig, current); 645 646 return; 647 } else if (fcr31 & FPU_CSR_INV_X) 648 info.si_code = FPE_FLTINV; 649 else if (fcr31 & FPU_CSR_DIV_X) 650 info.si_code = FPE_FLTDIV; 651 else if (fcr31 & FPU_CSR_OVF_X) 652 info.si_code = FPE_FLTOVF; 653 else if (fcr31 & FPU_CSR_UDF_X) 654 info.si_code = FPE_FLTUND; 655 else if (fcr31 & FPU_CSR_INE_X) 656 info.si_code = FPE_FLTRES; 657 else 658 info.si_code = __SI_FAULT; 659 info.si_signo = SIGFPE; 660 info.si_errno = 0; 661 info.si_addr = (void __user *) regs->cp0_epc; 662 force_sig_info(SIGFPE, &info, current); 663 } 664 665 asmlinkage void do_bp(struct pt_regs *regs) 666 { 667 unsigned int opcode, bcode; 668 siginfo_t info; 669 670 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) 671 goto out_sigsegv; 672 673 /* 674 * There is the ancient bug in the MIPS assemblers that the break 675 * code starts left to bit 16 instead to bit 6 in the opcode. 676 * Gas is bug-compatible, but not always, grrr... 677 * We handle both cases with a simple heuristics. --macro 678 */ 679 bcode = ((opcode >> 6) & ((1 << 20) - 1)); 680 if (bcode < (1 << 10)) 681 bcode <<= 10; 682 683 /* 684 * (A short test says that IRIX 5.3 sends SIGTRAP for all break 685 * insns, even for break codes that indicate arithmetic failures. 686 * Weird ...) 687 * But should we continue the brokenness??? --macro 688 */ 689 switch (bcode) { 690 case BRK_OVERFLOW << 10: 691 case BRK_DIVZERO << 10: 692 die_if_kernel("Break instruction in kernel code", regs); 693 if (bcode == (BRK_DIVZERO << 10)) 694 info.si_code = FPE_INTDIV; 695 else 696 info.si_code = FPE_INTOVF; 697 info.si_signo = SIGFPE; 698 info.si_errno = 0; 699 info.si_addr = (void __user *) regs->cp0_epc; 700 force_sig_info(SIGFPE, &info, current); 701 break; 702 case BRK_BUG: 703 die("Kernel bug detected", regs); 704 break; 705 default: 706 die_if_kernel("Break instruction in kernel code", regs); 707 force_sig(SIGTRAP, current); 708 } 709 return; 710 711 out_sigsegv: 712 force_sig(SIGSEGV, current); 713 } 714 715 asmlinkage void do_tr(struct pt_regs *regs) 716 { 717 unsigned int opcode, tcode = 0; 718 siginfo_t info; 719 720 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) 721 goto out_sigsegv; 722 723 /* Immediate versions don't provide a code. */ 724 if (!(opcode & OPCODE)) 725 tcode = ((opcode >> 6) & ((1 << 10) - 1)); 726 727 /* 728 * (A short test says that IRIX 5.3 sends SIGTRAP for all trap 729 * insns, even for trap codes that indicate arithmetic failures. 730 * Weird ...) 731 * But should we continue the brokenness??? --macro 732 */ 733 switch (tcode) { 734 case BRK_OVERFLOW: 735 case BRK_DIVZERO: 736 die_if_kernel("Trap instruction in kernel code", regs); 737 if (tcode == BRK_DIVZERO) 738 info.si_code = FPE_INTDIV; 739 else 740 info.si_code = FPE_INTOVF; 741 info.si_signo = SIGFPE; 742 info.si_errno = 0; 743 info.si_addr = (void __user *) regs->cp0_epc; 744 force_sig_info(SIGFPE, &info, current); 745 break; 746 case BRK_BUG: 747 die("Kernel bug detected", regs); 748 break; 749 default: 750 die_if_kernel("Trap instruction in kernel code", regs); 751 force_sig(SIGTRAP, current); 752 } 753 return; 754 755 out_sigsegv: 756 force_sig(SIGSEGV, current); 757 } 758 759 asmlinkage void do_ri(struct pt_regs *regs) 760 { 761 die_if_kernel("Reserved instruction in kernel code", regs); 762 763 if (!cpu_has_llsc) 764 if (!simulate_llsc(regs)) 765 return; 766 767 if (!simulate_rdhwr(regs)) 768 return; 769 770 force_sig(SIGILL, current); 771 } 772 773 /* 774 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've 775 * emulated more than some threshold number of instructions, force migration to 776 * a "CPU" that has FP support. 777 */ 778 static void mt_ase_fp_affinity(void) 779 { 780 #ifdef CONFIG_MIPS_MT_FPAFF 781 if (mt_fpemul_threshold > 0 && 782 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { 783 /* 784 * If there's no FPU present, or if the application has already 785 * restricted the allowed set to exclude any CPUs with FPUs, 786 * we'll skip the procedure. 787 */ 788 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { 789 cpumask_t tmask; 790 791 cpus_and(tmask, current->thread.user_cpus_allowed, 792 mt_fpu_cpumask); 793 set_cpus_allowed(current, tmask); 794 set_thread_flag(TIF_FPUBOUND); 795 } 796 } 797 #endif /* CONFIG_MIPS_MT_FPAFF */ 798 } 799 800 asmlinkage void do_cpu(struct pt_regs *regs) 801 { 802 unsigned int cpid; 803 804 die_if_kernel("do_cpu invoked from kernel context!", regs); 805 806 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; 807 808 switch (cpid) { 809 case 0: 810 if (!cpu_has_llsc) 811 if (!simulate_llsc(regs)) 812 return; 813 814 if (!simulate_rdhwr(regs)) 815 return; 816 817 break; 818 819 case 1: 820 if (used_math()) /* Using the FPU again. */ 821 own_fpu(1); 822 else { /* First time FPU user. */ 823 init_fpu(); 824 set_used_math(); 825 } 826 827 if (!raw_cpu_has_fpu) { 828 int sig; 829 sig = fpu_emulator_cop1Handler(regs, 830 ¤t->thread.fpu, 0); 831 if (sig) 832 force_sig(sig, current); 833 else 834 mt_ase_fp_affinity(); 835 } 836 837 return; 838 839 case 2: 840 case 3: 841 break; 842 } 843 844 force_sig(SIGILL, current); 845 } 846 847 asmlinkage void do_mdmx(struct pt_regs *regs) 848 { 849 force_sig(SIGILL, current); 850 } 851 852 asmlinkage void do_watch(struct pt_regs *regs) 853 { 854 if (board_watchpoint_handler) { 855 (*board_watchpoint_handler)(regs); 856 return; 857 } 858 859 /* 860 * We use the watch exception where available to detect stack 861 * overflows. 862 */ 863 dump_tlb_all(); 864 show_regs(regs); 865 panic("Caught WATCH exception - probably caused by stack overflow."); 866 } 867 868 asmlinkage void do_mcheck(struct pt_regs *regs) 869 { 870 const int field = 2 * sizeof(unsigned long); 871 int multi_match = regs->cp0_status & ST0_TS; 872 873 show_regs(regs); 874 875 if (multi_match) { 876 printk("Index : %0x\n", read_c0_index()); 877 printk("Pagemask: %0x\n", read_c0_pagemask()); 878 printk("EntryHi : %0*lx\n", field, read_c0_entryhi()); 879 printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); 880 printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); 881 printk("\n"); 882 dump_tlb_all(); 883 } 884 885 show_code((unsigned int __user *) regs->cp0_epc); 886 887 /* 888 * Some chips may have other causes of machine check (e.g. SB1 889 * graduation timer) 890 */ 891 panic("Caught Machine Check exception - %scaused by multiple " 892 "matching entries in the TLB.", 893 (multi_match) ? "" : "not "); 894 } 895 896 asmlinkage void do_mt(struct pt_regs *regs) 897 { 898 int subcode; 899 900 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) 901 >> VPECONTROL_EXCPT_SHIFT; 902 switch (subcode) { 903 case 0: 904 printk(KERN_DEBUG "Thread Underflow\n"); 905 break; 906 case 1: 907 printk(KERN_DEBUG "Thread Overflow\n"); 908 break; 909 case 2: 910 printk(KERN_DEBUG "Invalid YIELD Qualifier\n"); 911 break; 912 case 3: 913 printk(KERN_DEBUG "Gating Storage Exception\n"); 914 break; 915 case 4: 916 printk(KERN_DEBUG "YIELD Scheduler Exception\n"); 917 break; 918 case 5: 919 printk(KERN_DEBUG "Gating Storage Schedulier Exception\n"); 920 break; 921 default: 922 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n", 923 subcode); 924 break; 925 } 926 die_if_kernel("MIPS MT Thread exception in kernel", regs); 927 928 force_sig(SIGILL, current); 929 } 930 931 932 asmlinkage void do_dsp(struct pt_regs *regs) 933 { 934 if (cpu_has_dsp) 935 panic("Unexpected DSP exception\n"); 936 937 force_sig(SIGILL, current); 938 } 939 940 asmlinkage void do_reserved(struct pt_regs *regs) 941 { 942 /* 943 * Game over - no way to handle this if it ever occurs. Most probably 944 * caused by a new unknown cpu type or after another deadly 945 * hard/software error. 946 */ 947 show_regs(regs); 948 panic("Caught reserved exception %ld - should not happen.", 949 (regs->cp0_cause & 0x7f) >> 2); 950 } 951 952 /* 953 * Some MIPS CPUs can enable/disable for cache parity detection, but do 954 * it different ways. 955 */ 956 static inline void parity_protection_init(void) 957 { 958 switch (current_cpu_type()) { 959 case CPU_24K: 960 case CPU_34K: 961 case CPU_5KC: 962 write_c0_ecc(0x80000000); 963 back_to_back_c0_hazard(); 964 /* Set the PE bit (bit 31) in the c0_errctl register. */ 965 printk(KERN_INFO "Cache parity protection %sabled\n", 966 (read_c0_ecc() & 0x80000000) ? "en" : "dis"); 967 break; 968 case CPU_20KC: 969 case CPU_25KF: 970 /* Clear the DE bit (bit 16) in the c0_status register. */ 971 printk(KERN_INFO "Enable cache parity protection for " 972 "MIPS 20KC/25KF CPUs.\n"); 973 clear_c0_status(ST0_DE); 974 break; 975 default: 976 break; 977 } 978 } 979 980 asmlinkage void cache_parity_error(void) 981 { 982 const int field = 2 * sizeof(unsigned long); 983 unsigned int reg_val; 984 985 /* For the moment, report the problem and hang. */ 986 printk("Cache error exception:\n"); 987 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); 988 reg_val = read_c0_cacheerr(); 989 printk("c0_cacheerr == %08x\n", reg_val); 990 991 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", 992 reg_val & (1<<30) ? "secondary" : "primary", 993 reg_val & (1<<31) ? "data" : "insn"); 994 printk("Error bits: %s%s%s%s%s%s%s\n", 995 reg_val & (1<<29) ? "ED " : "", 996 reg_val & (1<<28) ? "ET " : "", 997 reg_val & (1<<26) ? "EE " : "", 998 reg_val & (1<<25) ? "EB " : "", 999 reg_val & (1<<24) ? "EI " : "", 1000 reg_val & (1<<23) ? "E1 " : "", 1001 reg_val & (1<<22) ? "E0 " : ""); 1002 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); 1003 1004 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) 1005 if (reg_val & (1<<22)) 1006 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0()); 1007 1008 if (reg_val & (1<<23)) 1009 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1()); 1010 #endif 1011 1012 panic("Can't handle the cache error!"); 1013 } 1014 1015 /* 1016 * SDBBP EJTAG debug exception handler. 1017 * We skip the instruction and return to the next instruction. 1018 */ 1019 void ejtag_exception_handler(struct pt_regs *regs) 1020 { 1021 const int field = 2 * sizeof(unsigned long); 1022 unsigned long depc, old_epc; 1023 unsigned int debug; 1024 1025 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); 1026 depc = read_c0_depc(); 1027 debug = read_c0_debug(); 1028 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug); 1029 if (debug & 0x80000000) { 1030 /* 1031 * In branch delay slot. 1032 * We cheat a little bit here and use EPC to calculate the 1033 * debug return address (DEPC). EPC is restored after the 1034 * calculation. 1035 */ 1036 old_epc = regs->cp0_epc; 1037 regs->cp0_epc = depc; 1038 __compute_return_epc(regs); 1039 depc = regs->cp0_epc; 1040 regs->cp0_epc = old_epc; 1041 } else 1042 depc += 4; 1043 write_c0_depc(depc); 1044 1045 #if 0 1046 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n"); 1047 write_c0_debug(debug | 0x100); 1048 #endif 1049 } 1050 1051 /* 1052 * NMI exception handler. 1053 */ 1054 NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs) 1055 { 1056 bust_spinlocks(1); 1057 printk("NMI taken!!!!\n"); 1058 die("NMI", regs); 1059 } 1060 1061 #define VECTORSPACING 0x100 /* for EI/VI mode */ 1062 1063 unsigned long ebase; 1064 unsigned long exception_handlers[32]; 1065 unsigned long vi_handlers[64]; 1066 1067 /* 1068 * As a side effect of the way this is implemented we're limited 1069 * to interrupt handlers in the address range from 1070 * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ... 1071 */ 1072 void *set_except_vector(int n, void *addr) 1073 { 1074 unsigned long handler = (unsigned long) addr; 1075 unsigned long old_handler = exception_handlers[n]; 1076 1077 exception_handlers[n] = handler; 1078 if (n == 0 && cpu_has_divec) { 1079 *(u32 *)(ebase + 0x200) = 0x08000000 | 1080 (0x03ffffff & (handler >> 2)); 1081 flush_icache_range(ebase + 0x200, ebase + 0x204); 1082 } 1083 return (void *)old_handler; 1084 } 1085 1086 #ifdef CONFIG_CPU_MIPSR2_SRS 1087 /* 1088 * MIPSR2 shadow register set allocation 1089 * FIXME: SMP... 1090 */ 1091 1092 static struct shadow_registers { 1093 /* 1094 * Number of shadow register sets supported 1095 */ 1096 unsigned long sr_supported; 1097 /* 1098 * Bitmap of allocated shadow registers 1099 */ 1100 unsigned long sr_allocated; 1101 } shadow_registers; 1102 1103 static void mips_srs_init(void) 1104 { 1105 shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1; 1106 printk(KERN_INFO "%ld MIPSR2 register sets available\n", 1107 shadow_registers.sr_supported); 1108 shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */ 1109 } 1110 1111 int mips_srs_max(void) 1112 { 1113 return shadow_registers.sr_supported; 1114 } 1115 1116 int mips_srs_alloc(void) 1117 { 1118 struct shadow_registers *sr = &shadow_registers; 1119 int set; 1120 1121 again: 1122 set = find_first_zero_bit(&sr->sr_allocated, sr->sr_supported); 1123 if (set >= sr->sr_supported) 1124 return -1; 1125 1126 if (test_and_set_bit(set, &sr->sr_allocated)) 1127 goto again; 1128 1129 return set; 1130 } 1131 1132 void mips_srs_free(int set) 1133 { 1134 struct shadow_registers *sr = &shadow_registers; 1135 1136 clear_bit(set, &sr->sr_allocated); 1137 } 1138 1139 static asmlinkage void do_default_vi(void) 1140 { 1141 show_regs(get_irq_regs()); 1142 panic("Caught unexpected vectored interrupt."); 1143 } 1144 1145 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) 1146 { 1147 unsigned long handler; 1148 unsigned long old_handler = vi_handlers[n]; 1149 u32 *w; 1150 unsigned char *b; 1151 1152 if (!cpu_has_veic && !cpu_has_vint) 1153 BUG(); 1154 1155 if (addr == NULL) { 1156 handler = (unsigned long) do_default_vi; 1157 srs = 0; 1158 } else 1159 handler = (unsigned long) addr; 1160 vi_handlers[n] = (unsigned long) addr; 1161 1162 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); 1163 1164 if (srs >= mips_srs_max()) 1165 panic("Shadow register set %d not supported", srs); 1166 1167 if (cpu_has_veic) { 1168 if (board_bind_eic_interrupt) 1169 board_bind_eic_interrupt(n, srs); 1170 } else if (cpu_has_vint) { 1171 /* SRSMap is only defined if shadow sets are implemented */ 1172 if (mips_srs_max() > 1) 1173 change_c0_srsmap(0xf << n*4, srs << n*4); 1174 } 1175 1176 if (srs == 0) { 1177 /* 1178 * If no shadow set is selected then use the default handler 1179 * that does normal register saving and a standard interrupt exit 1180 */ 1181 1182 extern char except_vec_vi, except_vec_vi_lui; 1183 extern char except_vec_vi_ori, except_vec_vi_end; 1184 #ifdef CONFIG_MIPS_MT_SMTC 1185 /* 1186 * We need to provide the SMTC vectored interrupt handler 1187 * not only with the address of the handler, but with the 1188 * Status.IM bit to be masked before going there. 1189 */ 1190 extern char except_vec_vi_mori; 1191 const int mori_offset = &except_vec_vi_mori - &except_vec_vi; 1192 #endif /* CONFIG_MIPS_MT_SMTC */ 1193 const int handler_len = &except_vec_vi_end - &except_vec_vi; 1194 const int lui_offset = &except_vec_vi_lui - &except_vec_vi; 1195 const int ori_offset = &except_vec_vi_ori - &except_vec_vi; 1196 1197 if (handler_len > VECTORSPACING) { 1198 /* 1199 * Sigh... panicing won't help as the console 1200 * is probably not configured :( 1201 */ 1202 panic("VECTORSPACING too small"); 1203 } 1204 1205 memcpy(b, &except_vec_vi, handler_len); 1206 #ifdef CONFIG_MIPS_MT_SMTC 1207 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ 1208 1209 w = (u32 *)(b + mori_offset); 1210 *w = (*w & 0xffff0000) | (0x100 << n); 1211 #endif /* CONFIG_MIPS_MT_SMTC */ 1212 w = (u32 *)(b + lui_offset); 1213 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); 1214 w = (u32 *)(b + ori_offset); 1215 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); 1216 flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len)); 1217 } 1218 else { 1219 /* 1220 * In other cases jump directly to the interrupt handler 1221 * 1222 * It is the handlers responsibility to save registers if required 1223 * (eg hi/lo) and return from the exception using "eret" 1224 */ 1225 w = (u32 *)b; 1226 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ 1227 *w = 0; 1228 flush_icache_range((unsigned long)b, (unsigned long)(b+8)); 1229 } 1230 1231 return (void *)old_handler; 1232 } 1233 1234 void *set_vi_handler(int n, vi_handler_t addr) 1235 { 1236 return set_vi_srs_handler(n, addr, 0); 1237 } 1238 1239 #else 1240 1241 static inline void mips_srs_init(void) 1242 { 1243 } 1244 1245 #endif /* CONFIG_CPU_MIPSR2_SRS */ 1246 1247 /* 1248 * This is used by native signal handling 1249 */ 1250 asmlinkage int (*save_fp_context)(struct sigcontext __user *sc); 1251 asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc); 1252 1253 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); 1254 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); 1255 1256 extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); 1257 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); 1258 1259 #ifdef CONFIG_SMP 1260 static int smp_save_fp_context(struct sigcontext __user *sc) 1261 { 1262 return raw_cpu_has_fpu 1263 ? _save_fp_context(sc) 1264 : fpu_emulator_save_context(sc); 1265 } 1266 1267 static int smp_restore_fp_context(struct sigcontext __user *sc) 1268 { 1269 return raw_cpu_has_fpu 1270 ? _restore_fp_context(sc) 1271 : fpu_emulator_restore_context(sc); 1272 } 1273 #endif 1274 1275 static inline void signal_init(void) 1276 { 1277 #ifdef CONFIG_SMP 1278 /* For now just do the cpu_has_fpu check when the functions are invoked */ 1279 save_fp_context = smp_save_fp_context; 1280 restore_fp_context = smp_restore_fp_context; 1281 #else 1282 if (cpu_has_fpu) { 1283 save_fp_context = _save_fp_context; 1284 restore_fp_context = _restore_fp_context; 1285 } else { 1286 save_fp_context = fpu_emulator_save_context; 1287 restore_fp_context = fpu_emulator_restore_context; 1288 } 1289 #endif 1290 } 1291 1292 #ifdef CONFIG_MIPS32_COMPAT 1293 1294 /* 1295 * This is used by 32-bit signal stuff on the 64-bit kernel 1296 */ 1297 asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc); 1298 asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc); 1299 1300 extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); 1301 extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); 1302 1303 extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc); 1304 extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc); 1305 1306 static inline void signal32_init(void) 1307 { 1308 if (cpu_has_fpu) { 1309 save_fp_context32 = _save_fp_context32; 1310 restore_fp_context32 = _restore_fp_context32; 1311 } else { 1312 save_fp_context32 = fpu_emulator_save_context32; 1313 restore_fp_context32 = fpu_emulator_restore_context32; 1314 } 1315 } 1316 #endif 1317 1318 extern void cpu_cache_init(void); 1319 extern void tlb_init(void); 1320 extern void flush_tlb_handlers(void); 1321 1322 void __init per_cpu_trap_init(void) 1323 { 1324 unsigned int cpu = smp_processor_id(); 1325 unsigned int status_set = ST0_CU0; 1326 #ifdef CONFIG_MIPS_MT_SMTC 1327 int secondaryTC = 0; 1328 int bootTC = (cpu == 0); 1329 1330 /* 1331 * Only do per_cpu_trap_init() for first TC of Each VPE. 1332 * Note that this hack assumes that the SMTC init code 1333 * assigns TCs consecutively and in ascending order. 1334 */ 1335 1336 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && 1337 ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) 1338 secondaryTC = 1; 1339 #endif /* CONFIG_MIPS_MT_SMTC */ 1340 1341 /* 1342 * Disable coprocessors and select 32-bit or 64-bit addressing 1343 * and the 16/32 or 32/32 FPR register model. Reset the BEV 1344 * flag that some firmware may have left set and the TS bit (for 1345 * IP27). Set XX for ISA IV code to work. 1346 */ 1347 #ifdef CONFIG_64BIT 1348 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; 1349 #endif 1350 if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) 1351 status_set |= ST0_XX; 1352 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, 1353 status_set); 1354 1355 if (cpu_has_dsp) 1356 set_c0_status(ST0_MX); 1357 1358 #ifdef CONFIG_CPU_MIPSR2 1359 if (cpu_has_mips_r2) { 1360 unsigned int enable = 0x0000000f; 1361 1362 if (cpu_has_userlocal) 1363 enable |= (1 << 29); 1364 1365 write_c0_hwrena(enable); 1366 } 1367 #endif 1368 1369 #ifdef CONFIG_MIPS_MT_SMTC 1370 if (!secondaryTC) { 1371 #endif /* CONFIG_MIPS_MT_SMTC */ 1372 1373 if (cpu_has_veic || cpu_has_vint) { 1374 write_c0_ebase(ebase); 1375 /* Setting vector spacing enables EI/VI mode */ 1376 change_c0_intctl(0x3e0, VECTORSPACING); 1377 } 1378 if (cpu_has_divec) { 1379 if (cpu_has_mipsmt) { 1380 unsigned int vpflags = dvpe(); 1381 set_c0_cause(CAUSEF_IV); 1382 evpe(vpflags); 1383 } else 1384 set_c0_cause(CAUSEF_IV); 1385 } 1386 1387 /* 1388 * Before R2 both interrupt numbers were fixed to 7, so on R2 only: 1389 * 1390 * o read IntCtl.IPTI to determine the timer interrupt 1391 * o read IntCtl.IPPCI to determine the performance counter interrupt 1392 */ 1393 if (cpu_has_mips_r2) { 1394 cp0_compare_irq = (read_c0_intctl() >> 29) & 7; 1395 cp0_perfcount_irq = (read_c0_intctl() >> 26) & 7; 1396 if (cp0_perfcount_irq == cp0_compare_irq) 1397 cp0_perfcount_irq = -1; 1398 } else { 1399 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; 1400 cp0_perfcount_irq = -1; 1401 } 1402 1403 #ifdef CONFIG_MIPS_MT_SMTC 1404 } 1405 #endif /* CONFIG_MIPS_MT_SMTC */ 1406 1407 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 1408 TLBMISS_HANDLER_SETUP(); 1409 1410 atomic_inc(&init_mm.mm_count); 1411 current->active_mm = &init_mm; 1412 BUG_ON(current->mm); 1413 enter_lazy_tlb(&init_mm, current); 1414 1415 #ifdef CONFIG_MIPS_MT_SMTC 1416 if (bootTC) { 1417 #endif /* CONFIG_MIPS_MT_SMTC */ 1418 cpu_cache_init(); 1419 tlb_init(); 1420 #ifdef CONFIG_MIPS_MT_SMTC 1421 } else if (!secondaryTC) { 1422 /* 1423 * First TC in non-boot VPE must do subset of tlb_init() 1424 * for MMU countrol registers. 1425 */ 1426 write_c0_pagemask(PM_DEFAULT_MASK); 1427 write_c0_wired(0); 1428 } 1429 #endif /* CONFIG_MIPS_MT_SMTC */ 1430 } 1431 1432 /* Install CPU exception handler */ 1433 void __init set_handler(unsigned long offset, void *addr, unsigned long size) 1434 { 1435 memcpy((void *)(ebase + offset), addr, size); 1436 flush_icache_range(ebase + offset, ebase + offset + size); 1437 } 1438 1439 static char panic_null_cerr[] __initdata = 1440 "Trying to set NULL cache error exception handler"; 1441 1442 /* Install uncached CPU exception handler */ 1443 void __init set_uncached_handler(unsigned long offset, void *addr, unsigned long size) 1444 { 1445 #ifdef CONFIG_32BIT 1446 unsigned long uncached_ebase = KSEG1ADDR(ebase); 1447 #endif 1448 #ifdef CONFIG_64BIT 1449 unsigned long uncached_ebase = TO_UNCAC(ebase); 1450 #endif 1451 1452 if (!addr) 1453 panic(panic_null_cerr); 1454 1455 memcpy((void *)(uncached_ebase + offset), addr, size); 1456 } 1457 1458 static int __initdata rdhwr_noopt; 1459 static int __init set_rdhwr_noopt(char *str) 1460 { 1461 rdhwr_noopt = 1; 1462 return 1; 1463 } 1464 1465 __setup("rdhwr_noopt", set_rdhwr_noopt); 1466 1467 void __init trap_init(void) 1468 { 1469 extern char except_vec3_generic, except_vec3_r4000; 1470 extern char except_vec4; 1471 unsigned long i; 1472 1473 if (cpu_has_veic || cpu_has_vint) 1474 ebase = (unsigned long) alloc_bootmem_low_pages(0x200 + VECTORSPACING*64); 1475 else 1476 ebase = CAC_BASE; 1477 1478 mips_srs_init(); 1479 1480 per_cpu_trap_init(); 1481 1482 /* 1483 * Copy the generic exception handlers to their final destination. 1484 * This will be overriden later as suitable for a particular 1485 * configuration. 1486 */ 1487 set_handler(0x180, &except_vec3_generic, 0x80); 1488 1489 /* 1490 * Setup default vectors 1491 */ 1492 for (i = 0; i <= 31; i++) 1493 set_except_vector(i, handle_reserved); 1494 1495 /* 1496 * Copy the EJTAG debug exception vector handler code to it's final 1497 * destination. 1498 */ 1499 if (cpu_has_ejtag && board_ejtag_handler_setup) 1500 board_ejtag_handler_setup(); 1501 1502 /* 1503 * Only some CPUs have the watch exceptions. 1504 */ 1505 if (cpu_has_watch) 1506 set_except_vector(23, handle_watch); 1507 1508 /* 1509 * Initialise interrupt handlers 1510 */ 1511 if (cpu_has_veic || cpu_has_vint) { 1512 int nvec = cpu_has_veic ? 64 : 8; 1513 for (i = 0; i < nvec; i++) 1514 set_vi_handler(i, NULL); 1515 } 1516 else if (cpu_has_divec) 1517 set_handler(0x200, &except_vec4, 0x8); 1518 1519 /* 1520 * Some CPUs can enable/disable for cache parity detection, but does 1521 * it different ways. 1522 */ 1523 parity_protection_init(); 1524 1525 /* 1526 * The Data Bus Errors / Instruction Bus Errors are signaled 1527 * by external hardware. Therefore these two exceptions 1528 * may have board specific handlers. 1529 */ 1530 if (board_be_init) 1531 board_be_init(); 1532 1533 set_except_vector(0, handle_int); 1534 set_except_vector(1, handle_tlbm); 1535 set_except_vector(2, handle_tlbl); 1536 set_except_vector(3, handle_tlbs); 1537 1538 set_except_vector(4, handle_adel); 1539 set_except_vector(5, handle_ades); 1540 1541 set_except_vector(6, handle_ibe); 1542 set_except_vector(7, handle_dbe); 1543 1544 set_except_vector(8, handle_sys); 1545 set_except_vector(9, handle_bp); 1546 set_except_vector(10, rdhwr_noopt ? handle_ri : 1547 (cpu_has_vtag_icache ? 1548 handle_ri_rdhwr_vivt : handle_ri_rdhwr)); 1549 set_except_vector(11, handle_cpu); 1550 set_except_vector(12, handle_ov); 1551 set_except_vector(13, handle_tr); 1552 1553 if (current_cpu_type() == CPU_R6000 || 1554 current_cpu_type() == CPU_R6000A) { 1555 /* 1556 * The R6000 is the only R-series CPU that features a machine 1557 * check exception (similar to the R4000 cache error) and 1558 * unaligned ldc1/sdc1 exception. The handlers have not been 1559 * written yet. Well, anyway there is no R6000 machine on the 1560 * current list of targets for Linux/MIPS. 1561 * (Duh, crap, there is someone with a triple R6k machine) 1562 */ 1563 //set_except_vector(14, handle_mc); 1564 //set_except_vector(15, handle_ndc); 1565 } 1566 1567 1568 if (board_nmi_handler_setup) 1569 board_nmi_handler_setup(); 1570 1571 if (cpu_has_fpu && !cpu_has_nofpuex) 1572 set_except_vector(15, handle_fpe); 1573 1574 set_except_vector(22, handle_mdmx); 1575 1576 if (cpu_has_mcheck) 1577 set_except_vector(24, handle_mcheck); 1578 1579 if (cpu_has_mipsmt) 1580 set_except_vector(25, handle_mt); 1581 1582 set_except_vector(26, handle_dsp); 1583 1584 if (cpu_has_vce) 1585 /* Special exception: R4[04]00 uses also the divec space. */ 1586 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100); 1587 else if (cpu_has_4kex) 1588 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80); 1589 else 1590 memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80); 1591 1592 signal_init(); 1593 #ifdef CONFIG_MIPS32_COMPAT 1594 signal32_init(); 1595 #endif 1596 1597 flush_icache_range(ebase, ebase + 0x400); 1598 flush_tlb_handlers(); 1599 } 1600