xref: /linux/arch/mips/kernel/traps.c (revision 10cc3529072d5415fb040018a8a99aa7a60190b6)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7  * Copyright (C) 1995, 1996 Paul M. Antoine
8  * Copyright (C) 1998 Ulf Carlsson
9  * Copyright (C) 1999 Silicon Graphics, Inc.
10  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11  * Copyright (C) 2000, 01 MIPS Technologies, Inc.
12  * Copyright (C) 2002, 2003, 2004, 2005  Maciej W. Rozycki
13  */
14 #include <linux/bug.h>
15 #include <linux/init.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/smp.h>
20 #include <linux/spinlock.h>
21 #include <linux/kallsyms.h>
22 #include <linux/bootmem.h>
23 #include <linux/interrupt.h>
24 
25 #include <asm/bootinfo.h>
26 #include <asm/branch.h>
27 #include <asm/break.h>
28 #include <asm/cpu.h>
29 #include <asm/dsp.h>
30 #include <asm/fpu.h>
31 #include <asm/mipsregs.h>
32 #include <asm/mipsmtregs.h>
33 #include <asm/module.h>
34 #include <asm/pgtable.h>
35 #include <asm/ptrace.h>
36 #include <asm/sections.h>
37 #include <asm/system.h>
38 #include <asm/tlbdebug.h>
39 #include <asm/traps.h>
40 #include <asm/uaccess.h>
41 #include <asm/mmu_context.h>
42 #include <asm/types.h>
43 #include <asm/stacktrace.h>
44 
45 extern asmlinkage void handle_int(void);
46 extern asmlinkage void handle_tlbm(void);
47 extern asmlinkage void handle_tlbl(void);
48 extern asmlinkage void handle_tlbs(void);
49 extern asmlinkage void handle_adel(void);
50 extern asmlinkage void handle_ades(void);
51 extern asmlinkage void handle_ibe(void);
52 extern asmlinkage void handle_dbe(void);
53 extern asmlinkage void handle_sys(void);
54 extern asmlinkage void handle_bp(void);
55 extern asmlinkage void handle_ri(void);
56 extern asmlinkage void handle_ri_rdhwr_vivt(void);
57 extern asmlinkage void handle_ri_rdhwr(void);
58 extern asmlinkage void handle_cpu(void);
59 extern asmlinkage void handle_ov(void);
60 extern asmlinkage void handle_tr(void);
61 extern asmlinkage void handle_fpe(void);
62 extern asmlinkage void handle_mdmx(void);
63 extern asmlinkage void handle_watch(void);
64 extern asmlinkage void handle_mt(void);
65 extern asmlinkage void handle_dsp(void);
66 extern asmlinkage void handle_mcheck(void);
67 extern asmlinkage void handle_reserved(void);
68 
69 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
70 	struct mips_fpu_struct *ctx, int has_fpu);
71 
72 void (*board_watchpoint_handler)(struct pt_regs *regs);
73 void (*board_be_init)(void);
74 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
75 void (*board_nmi_handler_setup)(void);
76 void (*board_ejtag_handler_setup)(void);
77 void (*board_bind_eic_interrupt)(int irq, int regset);
78 
79 
80 static void show_raw_backtrace(unsigned long reg29)
81 {
82 	unsigned long *sp = (unsigned long *)reg29;
83 	unsigned long addr;
84 
85 	printk("Call Trace:");
86 #ifdef CONFIG_KALLSYMS
87 	printk("\n");
88 #endif
89 	while (!kstack_end(sp)) {
90 		addr = *sp++;
91 		if (__kernel_text_address(addr))
92 			print_ip_sym(addr);
93 	}
94 	printk("\n");
95 }
96 
97 #ifdef CONFIG_KALLSYMS
98 int raw_show_trace;
99 static int __init set_raw_show_trace(char *str)
100 {
101 	raw_show_trace = 1;
102 	return 1;
103 }
104 __setup("raw_show_trace", set_raw_show_trace);
105 #endif
106 
107 static void show_backtrace(struct task_struct *task, struct pt_regs *regs)
108 {
109 	unsigned long sp = regs->regs[29];
110 	unsigned long ra = regs->regs[31];
111 	unsigned long pc = regs->cp0_epc;
112 
113 	if (raw_show_trace || !__kernel_text_address(pc)) {
114 		show_raw_backtrace(sp);
115 		return;
116 	}
117 	printk("Call Trace:\n");
118 	do {
119 		print_ip_sym(pc);
120 		pc = unwind_stack(task, &sp, pc, &ra);
121 	} while (pc);
122 	printk("\n");
123 }
124 
125 /*
126  * This routine abuses get_user()/put_user() to reference pointers
127  * with at least a bit of error checking ...
128  */
129 static void show_stacktrace(struct task_struct *task, struct pt_regs *regs)
130 {
131 	const int field = 2 * sizeof(unsigned long);
132 	long stackdata;
133 	int i;
134 	unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
135 
136 	printk("Stack :");
137 	i = 0;
138 	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
139 		if (i && ((i % (64 / field)) == 0))
140 			printk("\n       ");
141 		if (i > 39) {
142 			printk(" ...");
143 			break;
144 		}
145 
146 		if (__get_user(stackdata, sp++)) {
147 			printk(" (Bad stack address)");
148 			break;
149 		}
150 
151 		printk(" %0*lx", field, stackdata);
152 		i++;
153 	}
154 	printk("\n");
155 	show_backtrace(task, regs);
156 }
157 
158 void show_stack(struct task_struct *task, unsigned long *sp)
159 {
160 	struct pt_regs regs;
161 	if (sp) {
162 		regs.regs[29] = (unsigned long)sp;
163 		regs.regs[31] = 0;
164 		regs.cp0_epc = 0;
165 	} else {
166 		if (task && task != current) {
167 			regs.regs[29] = task->thread.reg29;
168 			regs.regs[31] = 0;
169 			regs.cp0_epc = task->thread.reg31;
170 		} else {
171 			prepare_frametrace(&regs);
172 		}
173 	}
174 	show_stacktrace(task, &regs);
175 }
176 
177 /*
178  * The architecture-independent dump_stack generator
179  */
180 void dump_stack(void)
181 {
182 	struct pt_regs regs;
183 
184 	prepare_frametrace(&regs);
185 	show_backtrace(current, &regs);
186 }
187 
188 EXPORT_SYMBOL(dump_stack);
189 
190 static void show_code(unsigned int __user *pc)
191 {
192 	long i;
193 
194 	printk("\nCode:");
195 
196 	for(i = -3 ; i < 6 ; i++) {
197 		unsigned int insn;
198 		if (__get_user(insn, pc + i)) {
199 			printk(" (Bad address in epc)\n");
200 			break;
201 		}
202 		printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
203 	}
204 }
205 
206 void show_regs(struct pt_regs *regs)
207 {
208 	const int field = 2 * sizeof(unsigned long);
209 	unsigned int cause = regs->cp0_cause;
210 	int i;
211 
212 	printk("Cpu %d\n", smp_processor_id());
213 
214 	/*
215 	 * Saved main processor registers
216 	 */
217 	for (i = 0; i < 32; ) {
218 		if ((i % 4) == 0)
219 			printk("$%2d   :", i);
220 		if (i == 0)
221 			printk(" %0*lx", field, 0UL);
222 		else if (i == 26 || i == 27)
223 			printk(" %*s", field, "");
224 		else
225 			printk(" %0*lx", field, regs->regs[i]);
226 
227 		i++;
228 		if ((i % 4) == 0)
229 			printk("\n");
230 	}
231 
232 #ifdef CONFIG_CPU_HAS_SMARTMIPS
233 	printk("Acx    : %0*lx\n", field, regs->acx);
234 #endif
235 	printk("Hi    : %0*lx\n", field, regs->hi);
236 	printk("Lo    : %0*lx\n", field, regs->lo);
237 
238 	/*
239 	 * Saved cp0 registers
240 	 */
241 	printk("epc   : %0*lx ", field, regs->cp0_epc);
242 	print_symbol("%s ", regs->cp0_epc);
243 	printk("    %s\n", print_tainted());
244 	printk("ra    : %0*lx ", field, regs->regs[31]);
245 	print_symbol("%s\n", regs->regs[31]);
246 
247 	printk("Status: %08x    ", (uint32_t) regs->cp0_status);
248 
249 	if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
250 		if (regs->cp0_status & ST0_KUO)
251 			printk("KUo ");
252 		if (regs->cp0_status & ST0_IEO)
253 			printk("IEo ");
254 		if (regs->cp0_status & ST0_KUP)
255 			printk("KUp ");
256 		if (regs->cp0_status & ST0_IEP)
257 			printk("IEp ");
258 		if (regs->cp0_status & ST0_KUC)
259 			printk("KUc ");
260 		if (regs->cp0_status & ST0_IEC)
261 			printk("IEc ");
262 	} else {
263 		if (regs->cp0_status & ST0_KX)
264 			printk("KX ");
265 		if (regs->cp0_status & ST0_SX)
266 			printk("SX ");
267 		if (regs->cp0_status & ST0_UX)
268 			printk("UX ");
269 		switch (regs->cp0_status & ST0_KSU) {
270 		case KSU_USER:
271 			printk("USER ");
272 			break;
273 		case KSU_SUPERVISOR:
274 			printk("SUPERVISOR ");
275 			break;
276 		case KSU_KERNEL:
277 			printk("KERNEL ");
278 			break;
279 		default:
280 			printk("BAD_MODE ");
281 			break;
282 		}
283 		if (regs->cp0_status & ST0_ERL)
284 			printk("ERL ");
285 		if (regs->cp0_status & ST0_EXL)
286 			printk("EXL ");
287 		if (regs->cp0_status & ST0_IE)
288 			printk("IE ");
289 	}
290 	printk("\n");
291 
292 	printk("Cause : %08x\n", cause);
293 
294 	cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
295 	if (1 <= cause && cause <= 5)
296 		printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
297 
298 	printk("PrId  : %08x\n", read_c0_prid());
299 }
300 
301 void show_registers(struct pt_regs *regs)
302 {
303 	show_regs(regs);
304 	print_modules();
305 	printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
306 	        current->comm, current->pid, current_thread_info(), current);
307 	show_stacktrace(current, regs);
308 	show_code((unsigned int __user *) regs->cp0_epc);
309 	printk("\n");
310 }
311 
312 static DEFINE_SPINLOCK(die_lock);
313 
314 void __noreturn die(const char * str, struct pt_regs * regs)
315 {
316 	static int die_counter;
317 #ifdef CONFIG_MIPS_MT_SMTC
318 	unsigned long dvpret = dvpe();
319 #endif /* CONFIG_MIPS_MT_SMTC */
320 
321 	console_verbose();
322 	spin_lock_irq(&die_lock);
323 	bust_spinlocks(1);
324 #ifdef CONFIG_MIPS_MT_SMTC
325 	mips_mt_regdump(dvpret);
326 #endif /* CONFIG_MIPS_MT_SMTC */
327 	printk("%s[#%d]:\n", str, ++die_counter);
328 	show_registers(regs);
329 	add_taint(TAINT_DIE);
330 	spin_unlock_irq(&die_lock);
331 
332 	if (in_interrupt())
333 		panic("Fatal exception in interrupt");
334 
335 	if (panic_on_oops) {
336 		printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
337 		ssleep(5);
338 		panic("Fatal exception");
339 	}
340 
341 	do_exit(SIGSEGV);
342 }
343 
344 extern const struct exception_table_entry __start___dbe_table[];
345 extern const struct exception_table_entry __stop___dbe_table[];
346 
347 __asm__(
348 "	.section	__dbe_table, \"a\"\n"
349 "	.previous			\n");
350 
351 /* Given an address, look for it in the exception tables. */
352 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
353 {
354 	const struct exception_table_entry *e;
355 
356 	e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
357 	if (!e)
358 		e = search_module_dbetables(addr);
359 	return e;
360 }
361 
362 asmlinkage void do_be(struct pt_regs *regs)
363 {
364 	const int field = 2 * sizeof(unsigned long);
365 	const struct exception_table_entry *fixup = NULL;
366 	int data = regs->cp0_cause & 4;
367 	int action = MIPS_BE_FATAL;
368 
369 	/* XXX For now.  Fixme, this searches the wrong table ...  */
370 	if (data && !user_mode(regs))
371 		fixup = search_dbe_tables(exception_epc(regs));
372 
373 	if (fixup)
374 		action = MIPS_BE_FIXUP;
375 
376 	if (board_be_handler)
377 		action = board_be_handler(regs, fixup != NULL);
378 
379 	switch (action) {
380 	case MIPS_BE_DISCARD:
381 		return;
382 	case MIPS_BE_FIXUP:
383 		if (fixup) {
384 			regs->cp0_epc = fixup->nextinsn;
385 			return;
386 		}
387 		break;
388 	default:
389 		break;
390 	}
391 
392 	/*
393 	 * Assume it would be too dangerous to continue ...
394 	 */
395 	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
396 	       data ? "Data" : "Instruction",
397 	       field, regs->cp0_epc, field, regs->regs[31]);
398 	die_if_kernel("Oops", regs);
399 	force_sig(SIGBUS, current);
400 }
401 
402 /*
403  * ll/sc emulation
404  */
405 
406 #define OPCODE 0xfc000000
407 #define BASE   0x03e00000
408 #define RT     0x001f0000
409 #define OFFSET 0x0000ffff
410 #define LL     0xc0000000
411 #define SC     0xe0000000
412 #define SPEC3  0x7c000000
413 #define RD     0x0000f800
414 #define FUNC   0x0000003f
415 #define RDHWR  0x0000003b
416 
417 /*
418  * The ll_bit is cleared by r*_switch.S
419  */
420 
421 unsigned long ll_bit;
422 
423 static struct task_struct *ll_task = NULL;
424 
425 static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
426 {
427 	unsigned long value, __user *vaddr;
428 	long offset;
429 	int signal = 0;
430 
431 	/*
432 	 * analyse the ll instruction that just caused a ri exception
433 	 * and put the referenced address to addr.
434 	 */
435 
436 	/* sign extend offset */
437 	offset = opcode & OFFSET;
438 	offset <<= 16;
439 	offset >>= 16;
440 
441 	vaddr = (unsigned long __user *)
442 	        ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
443 
444 	if ((unsigned long)vaddr & 3) {
445 		signal = SIGBUS;
446 		goto sig;
447 	}
448 	if (get_user(value, vaddr)) {
449 		signal = SIGSEGV;
450 		goto sig;
451 	}
452 
453 	preempt_disable();
454 
455 	if (ll_task == NULL || ll_task == current) {
456 		ll_bit = 1;
457 	} else {
458 		ll_bit = 0;
459 	}
460 	ll_task = current;
461 
462 	preempt_enable();
463 
464 	compute_return_epc(regs);
465 
466 	regs->regs[(opcode & RT) >> 16] = value;
467 
468 	return;
469 
470 sig:
471 	force_sig(signal, current);
472 }
473 
474 static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
475 {
476 	unsigned long __user *vaddr;
477 	unsigned long reg;
478 	long offset;
479 	int signal = 0;
480 
481 	/*
482 	 * analyse the sc instruction that just caused a ri exception
483 	 * and put the referenced address to addr.
484 	 */
485 
486 	/* sign extend offset */
487 	offset = opcode & OFFSET;
488 	offset <<= 16;
489 	offset >>= 16;
490 
491 	vaddr = (unsigned long __user *)
492 	        ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
493 	reg = (opcode & RT) >> 16;
494 
495 	if ((unsigned long)vaddr & 3) {
496 		signal = SIGBUS;
497 		goto sig;
498 	}
499 
500 	preempt_disable();
501 
502 	if (ll_bit == 0 || ll_task != current) {
503 		compute_return_epc(regs);
504 		regs->regs[reg] = 0;
505 		preempt_enable();
506 		return;
507 	}
508 
509 	preempt_enable();
510 
511 	if (put_user(regs->regs[reg], vaddr)) {
512 		signal = SIGSEGV;
513 		goto sig;
514 	}
515 
516 	compute_return_epc(regs);
517 	regs->regs[reg] = 1;
518 
519 	return;
520 
521 sig:
522 	force_sig(signal, current);
523 }
524 
525 /*
526  * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
527  * opcodes are supposed to result in coprocessor unusable exceptions if
528  * executed on ll/sc-less processors.  That's the theory.  In practice a
529  * few processors such as NEC's VR4100 throw reserved instruction exceptions
530  * instead, so we're doing the emulation thing in both exception handlers.
531  */
532 static inline int simulate_llsc(struct pt_regs *regs)
533 {
534 	unsigned int opcode;
535 
536 	if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
537 		goto out_sigsegv;
538 
539 	if ((opcode & OPCODE) == LL) {
540 		simulate_ll(regs, opcode);
541 		return 0;
542 	}
543 	if ((opcode & OPCODE) == SC) {
544 		simulate_sc(regs, opcode);
545 		return 0;
546 	}
547 
548 	return -EFAULT;			/* Strange things going on ... */
549 
550 out_sigsegv:
551 	force_sig(SIGSEGV, current);
552 	return -EFAULT;
553 }
554 
555 /*
556  * Simulate trapping 'rdhwr' instructions to provide user accessible
557  * registers not implemented in hardware.  The only current use of this
558  * is the thread area pointer.
559  */
560 static inline int simulate_rdhwr(struct pt_regs *regs)
561 {
562 	struct thread_info *ti = task_thread_info(current);
563 	unsigned int opcode;
564 
565 	if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
566 		goto out_sigsegv;
567 
568 	if (unlikely(compute_return_epc(regs)))
569 		return -EFAULT;
570 
571 	if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
572 		int rd = (opcode & RD) >> 11;
573 		int rt = (opcode & RT) >> 16;
574 		switch (rd) {
575 			case 29:
576 				regs->regs[rt] = ti->tp_value;
577 				return 0;
578 			default:
579 				return -EFAULT;
580 		}
581 	}
582 
583 	/* Not ours.  */
584 	return -EFAULT;
585 
586 out_sigsegv:
587 	force_sig(SIGSEGV, current);
588 	return -EFAULT;
589 }
590 
591 asmlinkage void do_ov(struct pt_regs *regs)
592 {
593 	siginfo_t info;
594 
595 	die_if_kernel("Integer overflow", regs);
596 
597 	info.si_code = FPE_INTOVF;
598 	info.si_signo = SIGFPE;
599 	info.si_errno = 0;
600 	info.si_addr = (void __user *) regs->cp0_epc;
601 	force_sig_info(SIGFPE, &info, current);
602 }
603 
604 /*
605  * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
606  */
607 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
608 {
609 	siginfo_t info;
610 
611 	die_if_kernel("FP exception in kernel code", regs);
612 
613 	if (fcr31 & FPU_CSR_UNI_X) {
614 		int sig;
615 
616 		/*
617 		 * Unimplemented operation exception.  If we've got the full
618 		 * software emulator on-board, let's use it...
619 		 *
620 		 * Force FPU to dump state into task/thread context.  We're
621 		 * moving a lot of data here for what is probably a single
622 		 * instruction, but the alternative is to pre-decode the FP
623 		 * register operands before invoking the emulator, which seems
624 		 * a bit extreme for what should be an infrequent event.
625 		 */
626 		/* Ensure 'resume' not overwrite saved fp context again. */
627 		lose_fpu(1);
628 
629 		/* Run the emulator */
630 		sig = fpu_emulator_cop1Handler (regs, &current->thread.fpu, 1);
631 
632 		/*
633 		 * We can't allow the emulated instruction to leave any of
634 		 * the cause bit set in $fcr31.
635 		 */
636 		current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
637 
638 		/* Restore the hardware register state */
639 		own_fpu(1);	/* Using the FPU again.  */
640 
641 		/* If something went wrong, signal */
642 		if (sig)
643 			force_sig(sig, current);
644 
645 		return;
646 	} else if (fcr31 & FPU_CSR_INV_X)
647 		info.si_code = FPE_FLTINV;
648 	else if (fcr31 & FPU_CSR_DIV_X)
649 		info.si_code = FPE_FLTDIV;
650 	else if (fcr31 & FPU_CSR_OVF_X)
651 		info.si_code = FPE_FLTOVF;
652 	else if (fcr31 & FPU_CSR_UDF_X)
653 		info.si_code = FPE_FLTUND;
654 	else if (fcr31 & FPU_CSR_INE_X)
655 		info.si_code = FPE_FLTRES;
656 	else
657 		info.si_code = __SI_FAULT;
658 	info.si_signo = SIGFPE;
659 	info.si_errno = 0;
660 	info.si_addr = (void __user *) regs->cp0_epc;
661 	force_sig_info(SIGFPE, &info, current);
662 }
663 
664 asmlinkage void do_bp(struct pt_regs *regs)
665 {
666 	unsigned int opcode, bcode;
667 	siginfo_t info;
668 
669 	if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
670 		goto out_sigsegv;
671 
672 	/*
673 	 * There is the ancient bug in the MIPS assemblers that the break
674 	 * code starts left to bit 16 instead to bit 6 in the opcode.
675 	 * Gas is bug-compatible, but not always, grrr...
676 	 * We handle both cases with a simple heuristics.  --macro
677 	 */
678 	bcode = ((opcode >> 6) & ((1 << 20) - 1));
679 	if (bcode < (1 << 10))
680 		bcode <<= 10;
681 
682 	/*
683 	 * (A short test says that IRIX 5.3 sends SIGTRAP for all break
684 	 * insns, even for break codes that indicate arithmetic failures.
685 	 * Weird ...)
686 	 * But should we continue the brokenness???  --macro
687 	 */
688 	switch (bcode) {
689 	case BRK_OVERFLOW << 10:
690 	case BRK_DIVZERO << 10:
691 		die_if_kernel("Break instruction in kernel code", regs);
692 		if (bcode == (BRK_DIVZERO << 10))
693 			info.si_code = FPE_INTDIV;
694 		else
695 			info.si_code = FPE_INTOVF;
696 		info.si_signo = SIGFPE;
697 		info.si_errno = 0;
698 		info.si_addr = (void __user *) regs->cp0_epc;
699 		force_sig_info(SIGFPE, &info, current);
700 		break;
701 	case BRK_BUG:
702 		die("Kernel bug detected", regs);
703 		break;
704 	default:
705 		die_if_kernel("Break instruction in kernel code", regs);
706 		force_sig(SIGTRAP, current);
707 	}
708 	return;
709 
710 out_sigsegv:
711 	force_sig(SIGSEGV, current);
712 }
713 
714 asmlinkage void do_tr(struct pt_regs *regs)
715 {
716 	unsigned int opcode, tcode = 0;
717 	siginfo_t info;
718 
719 	if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
720 		goto out_sigsegv;
721 
722 	/* Immediate versions don't provide a code.  */
723 	if (!(opcode & OPCODE))
724 		tcode = ((opcode >> 6) & ((1 << 10) - 1));
725 
726 	/*
727 	 * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
728 	 * insns, even for trap codes that indicate arithmetic failures.
729 	 * Weird ...)
730 	 * But should we continue the brokenness???  --macro
731 	 */
732 	switch (tcode) {
733 	case BRK_OVERFLOW:
734 	case BRK_DIVZERO:
735 		die_if_kernel("Trap instruction in kernel code", regs);
736 		if (tcode == BRK_DIVZERO)
737 			info.si_code = FPE_INTDIV;
738 		else
739 			info.si_code = FPE_INTOVF;
740 		info.si_signo = SIGFPE;
741 		info.si_errno = 0;
742 		info.si_addr = (void __user *) regs->cp0_epc;
743 		force_sig_info(SIGFPE, &info, current);
744 		break;
745 	case BRK_BUG:
746 		die("Kernel bug detected", regs);
747 		break;
748 	default:
749 		die_if_kernel("Trap instruction in kernel code", regs);
750 		force_sig(SIGTRAP, current);
751 	}
752 	return;
753 
754 out_sigsegv:
755 	force_sig(SIGSEGV, current);
756 }
757 
758 asmlinkage void do_ri(struct pt_regs *regs)
759 {
760 	die_if_kernel("Reserved instruction in kernel code", regs);
761 
762 	if (!cpu_has_llsc)
763 		if (!simulate_llsc(regs))
764 			return;
765 
766 	if (!simulate_rdhwr(regs))
767 		return;
768 
769 	force_sig(SIGILL, current);
770 }
771 
772 /*
773  * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
774  * emulated more than some threshold number of instructions, force migration to
775  * a "CPU" that has FP support.
776  */
777 static void mt_ase_fp_affinity(void)
778 {
779 #ifdef CONFIG_MIPS_MT_FPAFF
780 	if (mt_fpemul_threshold > 0 &&
781 	     ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
782 		/*
783 		 * If there's no FPU present, or if the application has already
784 		 * restricted the allowed set to exclude any CPUs with FPUs,
785 		 * we'll skip the procedure.
786 		 */
787 		if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
788 			cpumask_t tmask;
789 
790 			cpus_and(tmask, current->thread.user_cpus_allowed,
791 			         mt_fpu_cpumask);
792 			set_cpus_allowed(current, tmask);
793 			set_thread_flag(TIF_FPUBOUND);
794 		}
795 	}
796 #endif /* CONFIG_MIPS_MT_FPAFF */
797 }
798 
799 asmlinkage void do_cpu(struct pt_regs *regs)
800 {
801 	unsigned int cpid;
802 
803 	die_if_kernel("do_cpu invoked from kernel context!", regs);
804 
805 	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
806 
807 	switch (cpid) {
808 	case 0:
809 		if (!cpu_has_llsc)
810 			if (!simulate_llsc(regs))
811 				return;
812 
813 		if (!simulate_rdhwr(regs))
814 			return;
815 
816 		break;
817 
818 	case 1:
819 		if (used_math())	/* Using the FPU again.  */
820 			own_fpu(1);
821 		else {			/* First time FPU user.  */
822 			init_fpu();
823 			set_used_math();
824 		}
825 
826 		if (!raw_cpu_has_fpu) {
827 			int sig;
828 			sig = fpu_emulator_cop1Handler(regs,
829 						&current->thread.fpu, 0);
830 			if (sig)
831 				force_sig(sig, current);
832 			else
833 				mt_ase_fp_affinity();
834 		}
835 
836 		return;
837 
838 	case 2:
839 	case 3:
840 		break;
841 	}
842 
843 	force_sig(SIGILL, current);
844 }
845 
846 asmlinkage void do_mdmx(struct pt_regs *regs)
847 {
848 	force_sig(SIGILL, current);
849 }
850 
851 asmlinkage void do_watch(struct pt_regs *regs)
852 {
853 	if (board_watchpoint_handler) {
854 		(*board_watchpoint_handler)(regs);
855 		return;
856 	}
857 
858 	/*
859 	 * We use the watch exception where available to detect stack
860 	 * overflows.
861 	 */
862 	dump_tlb_all();
863 	show_regs(regs);
864 	panic("Caught WATCH exception - probably caused by stack overflow.");
865 }
866 
867 asmlinkage void do_mcheck(struct pt_regs *regs)
868 {
869 	const int field = 2 * sizeof(unsigned long);
870 	int multi_match = regs->cp0_status & ST0_TS;
871 
872 	show_regs(regs);
873 
874 	if (multi_match) {
875 		printk("Index   : %0x\n", read_c0_index());
876 		printk("Pagemask: %0x\n", read_c0_pagemask());
877 		printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
878 		printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
879 		printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
880 		printk("\n");
881 		dump_tlb_all();
882 	}
883 
884 	show_code((unsigned int __user *) regs->cp0_epc);
885 
886 	/*
887 	 * Some chips may have other causes of machine check (e.g. SB1
888 	 * graduation timer)
889 	 */
890 	panic("Caught Machine Check exception - %scaused by multiple "
891 	      "matching entries in the TLB.",
892 	      (multi_match) ? "" : "not ");
893 }
894 
895 asmlinkage void do_mt(struct pt_regs *regs)
896 {
897 	int subcode;
898 
899 	subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
900 			>> VPECONTROL_EXCPT_SHIFT;
901 	switch (subcode) {
902 	case 0:
903 		printk(KERN_DEBUG "Thread Underflow\n");
904 		break;
905 	case 1:
906 		printk(KERN_DEBUG "Thread Overflow\n");
907 		break;
908 	case 2:
909 		printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
910 		break;
911 	case 3:
912 		printk(KERN_DEBUG "Gating Storage Exception\n");
913 		break;
914 	case 4:
915 		printk(KERN_DEBUG "YIELD Scheduler Exception\n");
916 		break;
917 	case 5:
918 		printk(KERN_DEBUG "Gating Storage Schedulier Exception\n");
919 		break;
920 	default:
921 		printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
922 			subcode);
923 		break;
924 	}
925 	die_if_kernel("MIPS MT Thread exception in kernel", regs);
926 
927 	force_sig(SIGILL, current);
928 }
929 
930 
931 asmlinkage void do_dsp(struct pt_regs *regs)
932 {
933 	if (cpu_has_dsp)
934 		panic("Unexpected DSP exception\n");
935 
936 	force_sig(SIGILL, current);
937 }
938 
939 asmlinkage void do_reserved(struct pt_regs *regs)
940 {
941 	/*
942 	 * Game over - no way to handle this if it ever occurs.  Most probably
943 	 * caused by a new unknown cpu type or after another deadly
944 	 * hard/software error.
945 	 */
946 	show_regs(regs);
947 	panic("Caught reserved exception %ld - should not happen.",
948 	      (regs->cp0_cause & 0x7f) >> 2);
949 }
950 
951 /*
952  * Some MIPS CPUs can enable/disable for cache parity detection, but do
953  * it different ways.
954  */
955 static inline void parity_protection_init(void)
956 {
957 	switch (current_cpu_type()) {
958 	case CPU_24K:
959 	case CPU_34K:
960 	case CPU_5KC:
961 		write_c0_ecc(0x80000000);
962 		back_to_back_c0_hazard();
963 		/* Set the PE bit (bit 31) in the c0_errctl register. */
964 		printk(KERN_INFO "Cache parity protection %sabled\n",
965 		       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
966 		break;
967 	case CPU_20KC:
968 	case CPU_25KF:
969 		/* Clear the DE bit (bit 16) in the c0_status register. */
970 		printk(KERN_INFO "Enable cache parity protection for "
971 		       "MIPS 20KC/25KF CPUs.\n");
972 		clear_c0_status(ST0_DE);
973 		break;
974 	default:
975 		break;
976 	}
977 }
978 
979 asmlinkage void cache_parity_error(void)
980 {
981 	const int field = 2 * sizeof(unsigned long);
982 	unsigned int reg_val;
983 
984 	/* For the moment, report the problem and hang. */
985 	printk("Cache error exception:\n");
986 	printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
987 	reg_val = read_c0_cacheerr();
988 	printk("c0_cacheerr == %08x\n", reg_val);
989 
990 	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
991 	       reg_val & (1<<30) ? "secondary" : "primary",
992 	       reg_val & (1<<31) ? "data" : "insn");
993 	printk("Error bits: %s%s%s%s%s%s%s\n",
994 	       reg_val & (1<<29) ? "ED " : "",
995 	       reg_val & (1<<28) ? "ET " : "",
996 	       reg_val & (1<<26) ? "EE " : "",
997 	       reg_val & (1<<25) ? "EB " : "",
998 	       reg_val & (1<<24) ? "EI " : "",
999 	       reg_val & (1<<23) ? "E1 " : "",
1000 	       reg_val & (1<<22) ? "E0 " : "");
1001 	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1002 
1003 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1004 	if (reg_val & (1<<22))
1005 		printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1006 
1007 	if (reg_val & (1<<23))
1008 		printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1009 #endif
1010 
1011 	panic("Can't handle the cache error!");
1012 }
1013 
1014 /*
1015  * SDBBP EJTAG debug exception handler.
1016  * We skip the instruction and return to the next instruction.
1017  */
1018 void ejtag_exception_handler(struct pt_regs *regs)
1019 {
1020 	const int field = 2 * sizeof(unsigned long);
1021 	unsigned long depc, old_epc;
1022 	unsigned int debug;
1023 
1024 	printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1025 	depc = read_c0_depc();
1026 	debug = read_c0_debug();
1027 	printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1028 	if (debug & 0x80000000) {
1029 		/*
1030 		 * In branch delay slot.
1031 		 * We cheat a little bit here and use EPC to calculate the
1032 		 * debug return address (DEPC). EPC is restored after the
1033 		 * calculation.
1034 		 */
1035 		old_epc = regs->cp0_epc;
1036 		regs->cp0_epc = depc;
1037 		__compute_return_epc(regs);
1038 		depc = regs->cp0_epc;
1039 		regs->cp0_epc = old_epc;
1040 	} else
1041 		depc += 4;
1042 	write_c0_depc(depc);
1043 
1044 #if 0
1045 	printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1046 	write_c0_debug(debug | 0x100);
1047 #endif
1048 }
1049 
1050 /*
1051  * NMI exception handler.
1052  */
1053 NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs)
1054 {
1055 	bust_spinlocks(1);
1056 	printk("NMI taken!!!!\n");
1057 	die("NMI", regs);
1058 }
1059 
1060 #define VECTORSPACING 0x100	/* for EI/VI mode */
1061 
1062 unsigned long ebase;
1063 unsigned long exception_handlers[32];
1064 unsigned long vi_handlers[64];
1065 
1066 /*
1067  * As a side effect of the way this is implemented we're limited
1068  * to interrupt handlers in the address range from
1069  * KSEG0 <= x < KSEG0 + 256mb on the Nevada.  Oh well ...
1070  */
1071 void *set_except_vector(int n, void *addr)
1072 {
1073 	unsigned long handler = (unsigned long) addr;
1074 	unsigned long old_handler = exception_handlers[n];
1075 
1076 	exception_handlers[n] = handler;
1077 	if (n == 0 && cpu_has_divec) {
1078 		*(u32 *)(ebase + 0x200) = 0x08000000 |
1079 					  (0x03ffffff & (handler >> 2));
1080 		flush_icache_range(ebase + 0x200, ebase + 0x204);
1081 	}
1082 	return (void *)old_handler;
1083 }
1084 
1085 #ifdef CONFIG_CPU_MIPSR2_SRS
1086 /*
1087  * MIPSR2 shadow register set allocation
1088  * FIXME: SMP...
1089  */
1090 
1091 static struct shadow_registers {
1092 	/*
1093 	 * Number of shadow register sets supported
1094 	 */
1095 	unsigned long sr_supported;
1096 	/*
1097 	 * Bitmap of allocated shadow registers
1098 	 */
1099 	unsigned long sr_allocated;
1100 } shadow_registers;
1101 
1102 static void mips_srs_init(void)
1103 {
1104 	shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
1105 	printk(KERN_INFO "%ld MIPSR2 register sets available\n",
1106 	       shadow_registers.sr_supported);
1107 	shadow_registers.sr_allocated = 1;	/* Set 0 used by kernel */
1108 }
1109 
1110 int mips_srs_max(void)
1111 {
1112 	return shadow_registers.sr_supported;
1113 }
1114 
1115 int mips_srs_alloc(void)
1116 {
1117 	struct shadow_registers *sr = &shadow_registers;
1118 	int set;
1119 
1120 again:
1121 	set = find_first_zero_bit(&sr->sr_allocated, sr->sr_supported);
1122 	if (set >= sr->sr_supported)
1123 		return -1;
1124 
1125 	if (test_and_set_bit(set, &sr->sr_allocated))
1126 		goto again;
1127 
1128 	return set;
1129 }
1130 
1131 void mips_srs_free(int set)
1132 {
1133 	struct shadow_registers *sr = &shadow_registers;
1134 
1135 	clear_bit(set, &sr->sr_allocated);
1136 }
1137 
1138 static asmlinkage void do_default_vi(void)
1139 {
1140 	show_regs(get_irq_regs());
1141 	panic("Caught unexpected vectored interrupt.");
1142 }
1143 
1144 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1145 {
1146 	unsigned long handler;
1147 	unsigned long old_handler = vi_handlers[n];
1148 	u32 *w;
1149 	unsigned char *b;
1150 
1151 	if (!cpu_has_veic && !cpu_has_vint)
1152 		BUG();
1153 
1154 	if (addr == NULL) {
1155 		handler = (unsigned long) do_default_vi;
1156 		srs = 0;
1157 	} else
1158 		handler = (unsigned long) addr;
1159 	vi_handlers[n] = (unsigned long) addr;
1160 
1161 	b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1162 
1163 	if (srs >= mips_srs_max())
1164 		panic("Shadow register set %d not supported", srs);
1165 
1166 	if (cpu_has_veic) {
1167 		if (board_bind_eic_interrupt)
1168 			board_bind_eic_interrupt (n, srs);
1169 	} else if (cpu_has_vint) {
1170 		/* SRSMap is only defined if shadow sets are implemented */
1171 		if (mips_srs_max() > 1)
1172 			change_c0_srsmap (0xf << n*4, srs << n*4);
1173 	}
1174 
1175 	if (srs == 0) {
1176 		/*
1177 		 * If no shadow set is selected then use the default handler
1178 		 * that does normal register saving and a standard interrupt exit
1179 		 */
1180 
1181 		extern char except_vec_vi, except_vec_vi_lui;
1182 		extern char except_vec_vi_ori, except_vec_vi_end;
1183 #ifdef CONFIG_MIPS_MT_SMTC
1184 		/*
1185 		 * We need to provide the SMTC vectored interrupt handler
1186 		 * not only with the address of the handler, but with the
1187 		 * Status.IM bit to be masked before going there.
1188 		 */
1189 		extern char except_vec_vi_mori;
1190 		const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
1191 #endif /* CONFIG_MIPS_MT_SMTC */
1192 		const int handler_len = &except_vec_vi_end - &except_vec_vi;
1193 		const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
1194 		const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
1195 
1196 		if (handler_len > VECTORSPACING) {
1197 			/*
1198 			 * Sigh... panicing won't help as the console
1199 			 * is probably not configured :(
1200 			 */
1201 			panic ("VECTORSPACING too small");
1202 		}
1203 
1204 		memcpy (b, &except_vec_vi, handler_len);
1205 #ifdef CONFIG_MIPS_MT_SMTC
1206 		BUG_ON(n > 7);	/* Vector index %d exceeds SMTC maximum. */
1207 
1208 		w = (u32 *)(b + mori_offset);
1209 		*w = (*w & 0xffff0000) | (0x100 << n);
1210 #endif /* CONFIG_MIPS_MT_SMTC */
1211 		w = (u32 *)(b + lui_offset);
1212 		*w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
1213 		w = (u32 *)(b + ori_offset);
1214 		*w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
1215 		flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len));
1216 	}
1217 	else {
1218 		/*
1219 		 * In other cases jump directly to the interrupt handler
1220 		 *
1221 		 * It is the handlers responsibility to save registers if required
1222 		 * (eg hi/lo) and return from the exception using "eret"
1223 		 */
1224 		w = (u32 *)b;
1225 		*w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
1226 		*w = 0;
1227 		flush_icache_range((unsigned long)b, (unsigned long)(b+8));
1228 	}
1229 
1230 	return (void *)old_handler;
1231 }
1232 
1233 void *set_vi_handler(int n, vi_handler_t addr)
1234 {
1235 	return set_vi_srs_handler(n, addr, 0);
1236 }
1237 
1238 #else
1239 
1240 static inline void mips_srs_init(void)
1241 {
1242 }
1243 
1244 #endif /* CONFIG_CPU_MIPSR2_SRS */
1245 
1246 /*
1247  * This is used by native signal handling
1248  */
1249 asmlinkage int (*save_fp_context)(struct sigcontext __user *sc);
1250 asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc);
1251 
1252 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
1253 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
1254 
1255 extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
1256 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
1257 
1258 #ifdef CONFIG_SMP
1259 static int smp_save_fp_context(struct sigcontext __user *sc)
1260 {
1261 	return raw_cpu_has_fpu
1262 	       ? _save_fp_context(sc)
1263 	       : fpu_emulator_save_context(sc);
1264 }
1265 
1266 static int smp_restore_fp_context(struct sigcontext __user *sc)
1267 {
1268 	return raw_cpu_has_fpu
1269 	       ? _restore_fp_context(sc)
1270 	       : fpu_emulator_restore_context(sc);
1271 }
1272 #endif
1273 
1274 static inline void signal_init(void)
1275 {
1276 #ifdef CONFIG_SMP
1277 	/* For now just do the cpu_has_fpu check when the functions are invoked */
1278 	save_fp_context = smp_save_fp_context;
1279 	restore_fp_context = smp_restore_fp_context;
1280 #else
1281 	if (cpu_has_fpu) {
1282 		save_fp_context = _save_fp_context;
1283 		restore_fp_context = _restore_fp_context;
1284 	} else {
1285 		save_fp_context = fpu_emulator_save_context;
1286 		restore_fp_context = fpu_emulator_restore_context;
1287 	}
1288 #endif
1289 }
1290 
1291 #ifdef CONFIG_MIPS32_COMPAT
1292 
1293 /*
1294  * This is used by 32-bit signal stuff on the 64-bit kernel
1295  */
1296 asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc);
1297 asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc);
1298 
1299 extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
1300 extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
1301 
1302 extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
1303 extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
1304 
1305 static inline void signal32_init(void)
1306 {
1307 	if (cpu_has_fpu) {
1308 		save_fp_context32 = _save_fp_context32;
1309 		restore_fp_context32 = _restore_fp_context32;
1310 	} else {
1311 		save_fp_context32 = fpu_emulator_save_context32;
1312 		restore_fp_context32 = fpu_emulator_restore_context32;
1313 	}
1314 }
1315 #endif
1316 
1317 extern void cpu_cache_init(void);
1318 extern void tlb_init(void);
1319 extern void flush_tlb_handlers(void);
1320 
1321 void __init per_cpu_trap_init(void)
1322 {
1323 	unsigned int cpu = smp_processor_id();
1324 	unsigned int status_set = ST0_CU0;
1325 #ifdef CONFIG_MIPS_MT_SMTC
1326 	int secondaryTC = 0;
1327 	int bootTC = (cpu == 0);
1328 
1329 	/*
1330 	 * Only do per_cpu_trap_init() for first TC of Each VPE.
1331 	 * Note that this hack assumes that the SMTC init code
1332 	 * assigns TCs consecutively and in ascending order.
1333 	 */
1334 
1335 	if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
1336 	    ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
1337 		secondaryTC = 1;
1338 #endif /* CONFIG_MIPS_MT_SMTC */
1339 
1340 	/*
1341 	 * Disable coprocessors and select 32-bit or 64-bit addressing
1342 	 * and the 16/32 or 32/32 FPR register model.  Reset the BEV
1343 	 * flag that some firmware may have left set and the TS bit (for
1344 	 * IP27).  Set XX for ISA IV code to work.
1345 	 */
1346 #ifdef CONFIG_64BIT
1347 	status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1348 #endif
1349 	if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
1350 		status_set |= ST0_XX;
1351 	change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1352 			 status_set);
1353 
1354 	if (cpu_has_dsp)
1355 		set_c0_status(ST0_MX);
1356 
1357 #ifdef CONFIG_CPU_MIPSR2
1358 	if (cpu_has_mips_r2) {
1359 		unsigned int enable = 0x0000000f;
1360 
1361 		if (cpu_has_userlocal)
1362 			enable |= (1 << 29);
1363 
1364 		write_c0_hwrena(enable);
1365 	}
1366 #endif
1367 
1368 #ifdef CONFIG_MIPS_MT_SMTC
1369 	if (!secondaryTC) {
1370 #endif /* CONFIG_MIPS_MT_SMTC */
1371 
1372 	if (cpu_has_veic || cpu_has_vint) {
1373 		write_c0_ebase (ebase);
1374 		/* Setting vector spacing enables EI/VI mode  */
1375 		change_c0_intctl (0x3e0, VECTORSPACING);
1376 	}
1377 	if (cpu_has_divec) {
1378 		if (cpu_has_mipsmt) {
1379 			unsigned int vpflags = dvpe();
1380 			set_c0_cause(CAUSEF_IV);
1381 			evpe(vpflags);
1382 		} else
1383 			set_c0_cause(CAUSEF_IV);
1384 	}
1385 
1386 	/*
1387 	 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
1388 	 *
1389 	 *  o read IntCtl.IPTI to determine the timer interrupt
1390 	 *  o read IntCtl.IPPCI to determine the performance counter interrupt
1391 	 */
1392 	if (cpu_has_mips_r2) {
1393 		cp0_compare_irq = (read_c0_intctl () >> 29) & 7;
1394 		cp0_perfcount_irq = (read_c0_intctl () >> 26) & 7;
1395 		if (cp0_perfcount_irq == cp0_compare_irq)
1396 			cp0_perfcount_irq = -1;
1397 	} else {
1398 		cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
1399 		cp0_perfcount_irq = -1;
1400 	}
1401 
1402 #ifdef CONFIG_MIPS_MT_SMTC
1403 	}
1404 #endif /* CONFIG_MIPS_MT_SMTC */
1405 
1406 	cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1407 	TLBMISS_HANDLER_SETUP();
1408 
1409 	atomic_inc(&init_mm.mm_count);
1410 	current->active_mm = &init_mm;
1411 	BUG_ON(current->mm);
1412 	enter_lazy_tlb(&init_mm, current);
1413 
1414 #ifdef CONFIG_MIPS_MT_SMTC
1415 	if (bootTC) {
1416 #endif /* CONFIG_MIPS_MT_SMTC */
1417 		cpu_cache_init();
1418 		tlb_init();
1419 #ifdef CONFIG_MIPS_MT_SMTC
1420 	} else if (!secondaryTC) {
1421 		/*
1422 		 * First TC in non-boot VPE must do subset of tlb_init()
1423 		 * for MMU countrol registers.
1424 		 */
1425 		write_c0_pagemask(PM_DEFAULT_MASK);
1426 		write_c0_wired(0);
1427 	}
1428 #endif /* CONFIG_MIPS_MT_SMTC */
1429 }
1430 
1431 /* Install CPU exception handler */
1432 void __init set_handler (unsigned long offset, void *addr, unsigned long size)
1433 {
1434 	memcpy((void *)(ebase + offset), addr, size);
1435 	flush_icache_range(ebase + offset, ebase + offset + size);
1436 }
1437 
1438 static char panic_null_cerr[] __initdata =
1439 	"Trying to set NULL cache error exception handler";
1440 
1441 /* Install uncached CPU exception handler */
1442 void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size)
1443 {
1444 #ifdef CONFIG_32BIT
1445 	unsigned long uncached_ebase = KSEG1ADDR(ebase);
1446 #endif
1447 #ifdef CONFIG_64BIT
1448 	unsigned long uncached_ebase = TO_UNCAC(ebase);
1449 #endif
1450 
1451 	if (!addr)
1452 		panic(panic_null_cerr);
1453 
1454 	memcpy((void *)(uncached_ebase + offset), addr, size);
1455 }
1456 
1457 static int __initdata rdhwr_noopt;
1458 static int __init set_rdhwr_noopt(char *str)
1459 {
1460 	rdhwr_noopt = 1;
1461 	return 1;
1462 }
1463 
1464 __setup("rdhwr_noopt", set_rdhwr_noopt);
1465 
1466 void __init trap_init(void)
1467 {
1468 	extern char except_vec3_generic, except_vec3_r4000;
1469 	extern char except_vec4;
1470 	unsigned long i;
1471 
1472 	if (cpu_has_veic || cpu_has_vint)
1473 		ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64);
1474 	else
1475 		ebase = CAC_BASE;
1476 
1477 	mips_srs_init();
1478 
1479 	per_cpu_trap_init();
1480 
1481 	/*
1482 	 * Copy the generic exception handlers to their final destination.
1483 	 * This will be overriden later as suitable for a particular
1484 	 * configuration.
1485 	 */
1486 	set_handler(0x180, &except_vec3_generic, 0x80);
1487 
1488 	/*
1489 	 * Setup default vectors
1490 	 */
1491 	for (i = 0; i <= 31; i++)
1492 		set_except_vector(i, handle_reserved);
1493 
1494 	/*
1495 	 * Copy the EJTAG debug exception vector handler code to it's final
1496 	 * destination.
1497 	 */
1498 	if (cpu_has_ejtag && board_ejtag_handler_setup)
1499 		board_ejtag_handler_setup ();
1500 
1501 	/*
1502 	 * Only some CPUs have the watch exceptions.
1503 	 */
1504 	if (cpu_has_watch)
1505 		set_except_vector(23, handle_watch);
1506 
1507 	/*
1508 	 * Initialise interrupt handlers
1509 	 */
1510 	if (cpu_has_veic || cpu_has_vint) {
1511 		int nvec = cpu_has_veic ? 64 : 8;
1512 		for (i = 0; i < nvec; i++)
1513 			set_vi_handler(i, NULL);
1514 	}
1515 	else if (cpu_has_divec)
1516 		set_handler(0x200, &except_vec4, 0x8);
1517 
1518 	/*
1519 	 * Some CPUs can enable/disable for cache parity detection, but does
1520 	 * it different ways.
1521 	 */
1522 	parity_protection_init();
1523 
1524 	/*
1525 	 * The Data Bus Errors / Instruction Bus Errors are signaled
1526 	 * by external hardware.  Therefore these two exceptions
1527 	 * may have board specific handlers.
1528 	 */
1529 	if (board_be_init)
1530 		board_be_init();
1531 
1532 	set_except_vector(0, handle_int);
1533 	set_except_vector(1, handle_tlbm);
1534 	set_except_vector(2, handle_tlbl);
1535 	set_except_vector(3, handle_tlbs);
1536 
1537 	set_except_vector(4, handle_adel);
1538 	set_except_vector(5, handle_ades);
1539 
1540 	set_except_vector(6, handle_ibe);
1541 	set_except_vector(7, handle_dbe);
1542 
1543 	set_except_vector(8, handle_sys);
1544 	set_except_vector(9, handle_bp);
1545 	set_except_vector(10, rdhwr_noopt ? handle_ri :
1546 			  (cpu_has_vtag_icache ?
1547 			   handle_ri_rdhwr_vivt : handle_ri_rdhwr));
1548 	set_except_vector(11, handle_cpu);
1549 	set_except_vector(12, handle_ov);
1550 	set_except_vector(13, handle_tr);
1551 
1552 	if (current_cpu_type() == CPU_R6000 ||
1553 	    current_cpu_type() == CPU_R6000A) {
1554 		/*
1555 		 * The R6000 is the only R-series CPU that features a machine
1556 		 * check exception (similar to the R4000 cache error) and
1557 		 * unaligned ldc1/sdc1 exception.  The handlers have not been
1558 		 * written yet.  Well, anyway there is no R6000 machine on the
1559 		 * current list of targets for Linux/MIPS.
1560 		 * (Duh, crap, there is someone with a triple R6k machine)
1561 		 */
1562 		//set_except_vector(14, handle_mc);
1563 		//set_except_vector(15, handle_ndc);
1564 	}
1565 
1566 
1567 	if (board_nmi_handler_setup)
1568 		board_nmi_handler_setup();
1569 
1570 	if (cpu_has_fpu && !cpu_has_nofpuex)
1571 		set_except_vector(15, handle_fpe);
1572 
1573 	set_except_vector(22, handle_mdmx);
1574 
1575 	if (cpu_has_mcheck)
1576 		set_except_vector(24, handle_mcheck);
1577 
1578 	if (cpu_has_mipsmt)
1579 		set_except_vector(25, handle_mt);
1580 
1581 	set_except_vector(26, handle_dsp);
1582 
1583 	if (cpu_has_vce)
1584 		/* Special exception: R4[04]00 uses also the divec space. */
1585 		memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);
1586 	else if (cpu_has_4kex)
1587 		memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
1588 	else
1589 		memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);
1590 
1591 	signal_init();
1592 #ifdef CONFIG_MIPS32_COMPAT
1593 	signal32_init();
1594 #endif
1595 
1596 	flush_icache_range(ebase, ebase + 0x400);
1597 	flush_tlb_handlers();
1598 }
1599