xref: /linux/arch/mips/kernel/traps.c (revision 2ba9268dd603d23e17643437b2246acb6844953b)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7  * Copyright (C) 1995, 1996 Paul M. Antoine
8  * Copyright (C) 1998 Ulf Carlsson
9  * Copyright (C) 1999 Silicon Graphics, Inc.
10  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11  * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
12  * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
13  * Copyright (C) 2014, Imagination Technologies Ltd.
14  */
15 #include <linux/bug.h>
16 #include <linux/compiler.h>
17 #include <linux/context_tracking.h>
18 #include <linux/cpu_pm.h>
19 #include <linux/kexec.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/mm.h>
24 #include <linux/sched.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/kallsyms.h>
28 #include <linux/bootmem.h>
29 #include <linux/interrupt.h>
30 #include <linux/ptrace.h>
31 #include <linux/kgdb.h>
32 #include <linux/kdebug.h>
33 #include <linux/kprobes.h>
34 #include <linux/notifier.h>
35 #include <linux/kdb.h>
36 #include <linux/irq.h>
37 #include <linux/perf_event.h>
38 
39 #include <asm/bootinfo.h>
40 #include <asm/branch.h>
41 #include <asm/break.h>
42 #include <asm/cop2.h>
43 #include <asm/cpu.h>
44 #include <asm/cpu-type.h>
45 #include <asm/dsp.h>
46 #include <asm/fpu.h>
47 #include <asm/fpu_emulator.h>
48 #include <asm/idle.h>
49 #include <asm/mips-r2-to-r6-emul.h>
50 #include <asm/mipsregs.h>
51 #include <asm/mipsmtregs.h>
52 #include <asm/module.h>
53 #include <asm/msa.h>
54 #include <asm/pgtable.h>
55 #include <asm/ptrace.h>
56 #include <asm/sections.h>
57 #include <asm/tlbdebug.h>
58 #include <asm/traps.h>
59 #include <asm/uaccess.h>
60 #include <asm/watch.h>
61 #include <asm/mmu_context.h>
62 #include <asm/types.h>
63 #include <asm/stacktrace.h>
64 #include <asm/uasm.h>
65 
66 extern void check_wait(void);
67 extern asmlinkage void rollback_handle_int(void);
68 extern asmlinkage void handle_int(void);
69 extern u32 handle_tlbl[];
70 extern u32 handle_tlbs[];
71 extern u32 handle_tlbm[];
72 extern asmlinkage void handle_adel(void);
73 extern asmlinkage void handle_ades(void);
74 extern asmlinkage void handle_ibe(void);
75 extern asmlinkage void handle_dbe(void);
76 extern asmlinkage void handle_sys(void);
77 extern asmlinkage void handle_bp(void);
78 extern asmlinkage void handle_ri(void);
79 extern asmlinkage void handle_ri_rdhwr_vivt(void);
80 extern asmlinkage void handle_ri_rdhwr(void);
81 extern asmlinkage void handle_cpu(void);
82 extern asmlinkage void handle_ov(void);
83 extern asmlinkage void handle_tr(void);
84 extern asmlinkage void handle_msa_fpe(void);
85 extern asmlinkage void handle_fpe(void);
86 extern asmlinkage void handle_ftlb(void);
87 extern asmlinkage void handle_msa(void);
88 extern asmlinkage void handle_mdmx(void);
89 extern asmlinkage void handle_watch(void);
90 extern asmlinkage void handle_mt(void);
91 extern asmlinkage void handle_dsp(void);
92 extern asmlinkage void handle_mcheck(void);
93 extern asmlinkage void handle_reserved(void);
94 extern void tlb_do_page_fault_0(void);
95 
96 void (*board_be_init)(void);
97 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
98 void (*board_nmi_handler_setup)(void);
99 void (*board_ejtag_handler_setup)(void);
100 void (*board_bind_eic_interrupt)(int irq, int regset);
101 void (*board_ebase_setup)(void);
102 void(*board_cache_error_setup)(void);
103 
104 static void show_raw_backtrace(unsigned long reg29)
105 {
106 	unsigned long *sp = (unsigned long *)(reg29 & ~3);
107 	unsigned long addr;
108 
109 	printk("Call Trace:");
110 #ifdef CONFIG_KALLSYMS
111 	printk("\n");
112 #endif
113 	while (!kstack_end(sp)) {
114 		unsigned long __user *p =
115 			(unsigned long __user *)(unsigned long)sp++;
116 		if (__get_user(addr, p)) {
117 			printk(" (Bad stack address)");
118 			break;
119 		}
120 		if (__kernel_text_address(addr))
121 			print_ip_sym(addr);
122 	}
123 	printk("\n");
124 }
125 
126 #ifdef CONFIG_KALLSYMS
127 int raw_show_trace;
128 static int __init set_raw_show_trace(char *str)
129 {
130 	raw_show_trace = 1;
131 	return 1;
132 }
133 __setup("raw_show_trace", set_raw_show_trace);
134 #endif
135 
136 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
137 {
138 	unsigned long sp = regs->regs[29];
139 	unsigned long ra = regs->regs[31];
140 	unsigned long pc = regs->cp0_epc;
141 
142 	if (!task)
143 		task = current;
144 
145 	if (raw_show_trace || !__kernel_text_address(pc)) {
146 		show_raw_backtrace(sp);
147 		return;
148 	}
149 	printk("Call Trace:\n");
150 	do {
151 		print_ip_sym(pc);
152 		pc = unwind_stack(task, &sp, pc, &ra);
153 	} while (pc);
154 	printk("\n");
155 }
156 
157 /*
158  * This routine abuses get_user()/put_user() to reference pointers
159  * with at least a bit of error checking ...
160  */
161 static void show_stacktrace(struct task_struct *task,
162 	const struct pt_regs *regs)
163 {
164 	const int field = 2 * sizeof(unsigned long);
165 	long stackdata;
166 	int i;
167 	unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
168 
169 	printk("Stack :");
170 	i = 0;
171 	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
172 		if (i && ((i % (64 / field)) == 0))
173 			printk("\n	 ");
174 		if (i > 39) {
175 			printk(" ...");
176 			break;
177 		}
178 
179 		if (__get_user(stackdata, sp++)) {
180 			printk(" (Bad stack address)");
181 			break;
182 		}
183 
184 		printk(" %0*lx", field, stackdata);
185 		i++;
186 	}
187 	printk("\n");
188 	show_backtrace(task, regs);
189 }
190 
191 void show_stack(struct task_struct *task, unsigned long *sp)
192 {
193 	struct pt_regs regs;
194 	if (sp) {
195 		regs.regs[29] = (unsigned long)sp;
196 		regs.regs[31] = 0;
197 		regs.cp0_epc = 0;
198 	} else {
199 		if (task && task != current) {
200 			regs.regs[29] = task->thread.reg29;
201 			regs.regs[31] = 0;
202 			regs.cp0_epc = task->thread.reg31;
203 #ifdef CONFIG_KGDB_KDB
204 		} else if (atomic_read(&kgdb_active) != -1 &&
205 			   kdb_current_regs) {
206 			memcpy(&regs, kdb_current_regs, sizeof(regs));
207 #endif /* CONFIG_KGDB_KDB */
208 		} else {
209 			prepare_frametrace(&regs);
210 		}
211 	}
212 	show_stacktrace(task, &regs);
213 }
214 
215 static void show_code(unsigned int __user *pc)
216 {
217 	long i;
218 	unsigned short __user *pc16 = NULL;
219 
220 	printk("\nCode:");
221 
222 	if ((unsigned long)pc & 1)
223 		pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
224 	for(i = -3 ; i < 6 ; i++) {
225 		unsigned int insn;
226 		if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
227 			printk(" (Bad address in epc)\n");
228 			break;
229 		}
230 		printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
231 	}
232 }
233 
234 static void __show_regs(const struct pt_regs *regs)
235 {
236 	const int field = 2 * sizeof(unsigned long);
237 	unsigned int cause = regs->cp0_cause;
238 	int i;
239 
240 	show_regs_print_info(KERN_DEFAULT);
241 
242 	/*
243 	 * Saved main processor registers
244 	 */
245 	for (i = 0; i < 32; ) {
246 		if ((i % 4) == 0)
247 			printk("$%2d   :", i);
248 		if (i == 0)
249 			printk(" %0*lx", field, 0UL);
250 		else if (i == 26 || i == 27)
251 			printk(" %*s", field, "");
252 		else
253 			printk(" %0*lx", field, regs->regs[i]);
254 
255 		i++;
256 		if ((i % 4) == 0)
257 			printk("\n");
258 	}
259 
260 #ifdef CONFIG_CPU_HAS_SMARTMIPS
261 	printk("Acx    : %0*lx\n", field, regs->acx);
262 #endif
263 	printk("Hi    : %0*lx\n", field, regs->hi);
264 	printk("Lo    : %0*lx\n", field, regs->lo);
265 
266 	/*
267 	 * Saved cp0 registers
268 	 */
269 	printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
270 	       (void *) regs->cp0_epc);
271 	printk("    %s\n", print_tainted());
272 	printk("ra    : %0*lx %pS\n", field, regs->regs[31],
273 	       (void *) regs->regs[31]);
274 
275 	printk("Status: %08x	", (uint32_t) regs->cp0_status);
276 
277 	if (cpu_has_3kex) {
278 		if (regs->cp0_status & ST0_KUO)
279 			printk("KUo ");
280 		if (regs->cp0_status & ST0_IEO)
281 			printk("IEo ");
282 		if (regs->cp0_status & ST0_KUP)
283 			printk("KUp ");
284 		if (regs->cp0_status & ST0_IEP)
285 			printk("IEp ");
286 		if (regs->cp0_status & ST0_KUC)
287 			printk("KUc ");
288 		if (regs->cp0_status & ST0_IEC)
289 			printk("IEc ");
290 	} else if (cpu_has_4kex) {
291 		if (regs->cp0_status & ST0_KX)
292 			printk("KX ");
293 		if (regs->cp0_status & ST0_SX)
294 			printk("SX ");
295 		if (regs->cp0_status & ST0_UX)
296 			printk("UX ");
297 		switch (regs->cp0_status & ST0_KSU) {
298 		case KSU_USER:
299 			printk("USER ");
300 			break;
301 		case KSU_SUPERVISOR:
302 			printk("SUPERVISOR ");
303 			break;
304 		case KSU_KERNEL:
305 			printk("KERNEL ");
306 			break;
307 		default:
308 			printk("BAD_MODE ");
309 			break;
310 		}
311 		if (regs->cp0_status & ST0_ERL)
312 			printk("ERL ");
313 		if (regs->cp0_status & ST0_EXL)
314 			printk("EXL ");
315 		if (regs->cp0_status & ST0_IE)
316 			printk("IE ");
317 	}
318 	printk("\n");
319 
320 	printk("Cause : %08x\n", cause);
321 
322 	cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
323 	if (1 <= cause && cause <= 5)
324 		printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
325 
326 	printk("PrId  : %08x (%s)\n", read_c0_prid(),
327 	       cpu_name_string());
328 }
329 
330 /*
331  * FIXME: really the generic show_regs should take a const pointer argument.
332  */
333 void show_regs(struct pt_regs *regs)
334 {
335 	__show_regs((struct pt_regs *)regs);
336 }
337 
338 void show_registers(struct pt_regs *regs)
339 {
340 	const int field = 2 * sizeof(unsigned long);
341 	mm_segment_t old_fs = get_fs();
342 
343 	__show_regs(regs);
344 	print_modules();
345 	printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
346 	       current->comm, current->pid, current_thread_info(), current,
347 	      field, current_thread_info()->tp_value);
348 	if (cpu_has_userlocal) {
349 		unsigned long tls;
350 
351 		tls = read_c0_userlocal();
352 		if (tls != current_thread_info()->tp_value)
353 			printk("*HwTLS: %0*lx\n", field, tls);
354 	}
355 
356 	if (!user_mode(regs))
357 		/* Necessary for getting the correct stack content */
358 		set_fs(KERNEL_DS);
359 	show_stacktrace(current, regs);
360 	show_code((unsigned int __user *) regs->cp0_epc);
361 	printk("\n");
362 	set_fs(old_fs);
363 }
364 
365 static int regs_to_trapnr(struct pt_regs *regs)
366 {
367 	return (regs->cp0_cause >> 2) & 0x1f;
368 }
369 
370 static DEFINE_RAW_SPINLOCK(die_lock);
371 
372 void __noreturn die(const char *str, struct pt_regs *regs)
373 {
374 	static int die_counter;
375 	int sig = SIGSEGV;
376 
377 	oops_enter();
378 
379 	if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs),
380 		       SIGSEGV) == NOTIFY_STOP)
381 		sig = 0;
382 
383 	console_verbose();
384 	raw_spin_lock_irq(&die_lock);
385 	bust_spinlocks(1);
386 
387 	printk("%s[#%d]:\n", str, ++die_counter);
388 	show_registers(regs);
389 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
390 	raw_spin_unlock_irq(&die_lock);
391 
392 	oops_exit();
393 
394 	if (in_interrupt())
395 		panic("Fatal exception in interrupt");
396 
397 	if (panic_on_oops) {
398 		printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
399 		ssleep(5);
400 		panic("Fatal exception");
401 	}
402 
403 	if (regs && kexec_should_crash(current))
404 		crash_kexec(regs);
405 
406 	do_exit(sig);
407 }
408 
409 extern struct exception_table_entry __start___dbe_table[];
410 extern struct exception_table_entry __stop___dbe_table[];
411 
412 __asm__(
413 "	.section	__dbe_table, \"a\"\n"
414 "	.previous			\n");
415 
416 /* Given an address, look for it in the exception tables. */
417 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
418 {
419 	const struct exception_table_entry *e;
420 
421 	e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
422 	if (!e)
423 		e = search_module_dbetables(addr);
424 	return e;
425 }
426 
427 asmlinkage void do_be(struct pt_regs *regs)
428 {
429 	const int field = 2 * sizeof(unsigned long);
430 	const struct exception_table_entry *fixup = NULL;
431 	int data = regs->cp0_cause & 4;
432 	int action = MIPS_BE_FATAL;
433 	enum ctx_state prev_state;
434 
435 	prev_state = exception_enter();
436 	/* XXX For now.	 Fixme, this searches the wrong table ...  */
437 	if (data && !user_mode(regs))
438 		fixup = search_dbe_tables(exception_epc(regs));
439 
440 	if (fixup)
441 		action = MIPS_BE_FIXUP;
442 
443 	if (board_be_handler)
444 		action = board_be_handler(regs, fixup != NULL);
445 
446 	switch (action) {
447 	case MIPS_BE_DISCARD:
448 		goto out;
449 	case MIPS_BE_FIXUP:
450 		if (fixup) {
451 			regs->cp0_epc = fixup->nextinsn;
452 			goto out;
453 		}
454 		break;
455 	default:
456 		break;
457 	}
458 
459 	/*
460 	 * Assume it would be too dangerous to continue ...
461 	 */
462 	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
463 	       data ? "Data" : "Instruction",
464 	       field, regs->cp0_epc, field, regs->regs[31]);
465 	if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs),
466 		       SIGBUS) == NOTIFY_STOP)
467 		goto out;
468 
469 	die_if_kernel("Oops", regs);
470 	force_sig(SIGBUS, current);
471 
472 out:
473 	exception_exit(prev_state);
474 }
475 
476 /*
477  * ll/sc, rdhwr, sync emulation
478  */
479 
480 #define OPCODE 0xfc000000
481 #define BASE   0x03e00000
482 #define RT     0x001f0000
483 #define OFFSET 0x0000ffff
484 #define LL     0xc0000000
485 #define SC     0xe0000000
486 #define SPEC0  0x00000000
487 #define SPEC3  0x7c000000
488 #define RD     0x0000f800
489 #define FUNC   0x0000003f
490 #define SYNC   0x0000000f
491 #define RDHWR  0x0000003b
492 
493 /*  microMIPS definitions   */
494 #define MM_POOL32A_FUNC 0xfc00ffff
495 #define MM_RDHWR        0x00006b3c
496 #define MM_RS           0x001f0000
497 #define MM_RT           0x03e00000
498 
499 /*
500  * The ll_bit is cleared by r*_switch.S
501  */
502 
503 unsigned int ll_bit;
504 struct task_struct *ll_task;
505 
506 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
507 {
508 	unsigned long value, __user *vaddr;
509 	long offset;
510 
511 	/*
512 	 * analyse the ll instruction that just caused a ri exception
513 	 * and put the referenced address to addr.
514 	 */
515 
516 	/* sign extend offset */
517 	offset = opcode & OFFSET;
518 	offset <<= 16;
519 	offset >>= 16;
520 
521 	vaddr = (unsigned long __user *)
522 		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
523 
524 	if ((unsigned long)vaddr & 3)
525 		return SIGBUS;
526 	if (get_user(value, vaddr))
527 		return SIGSEGV;
528 
529 	preempt_disable();
530 
531 	if (ll_task == NULL || ll_task == current) {
532 		ll_bit = 1;
533 	} else {
534 		ll_bit = 0;
535 	}
536 	ll_task = current;
537 
538 	preempt_enable();
539 
540 	regs->regs[(opcode & RT) >> 16] = value;
541 
542 	return 0;
543 }
544 
545 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
546 {
547 	unsigned long __user *vaddr;
548 	unsigned long reg;
549 	long offset;
550 
551 	/*
552 	 * analyse the sc instruction that just caused a ri exception
553 	 * and put the referenced address to addr.
554 	 */
555 
556 	/* sign extend offset */
557 	offset = opcode & OFFSET;
558 	offset <<= 16;
559 	offset >>= 16;
560 
561 	vaddr = (unsigned long __user *)
562 		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
563 	reg = (opcode & RT) >> 16;
564 
565 	if ((unsigned long)vaddr & 3)
566 		return SIGBUS;
567 
568 	preempt_disable();
569 
570 	if (ll_bit == 0 || ll_task != current) {
571 		regs->regs[reg] = 0;
572 		preempt_enable();
573 		return 0;
574 	}
575 
576 	preempt_enable();
577 
578 	if (put_user(regs->regs[reg], vaddr))
579 		return SIGSEGV;
580 
581 	regs->regs[reg] = 1;
582 
583 	return 0;
584 }
585 
586 /*
587  * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
588  * opcodes are supposed to result in coprocessor unusable exceptions if
589  * executed on ll/sc-less processors.  That's the theory.  In practice a
590  * few processors such as NEC's VR4100 throw reserved instruction exceptions
591  * instead, so we're doing the emulation thing in both exception handlers.
592  */
593 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
594 {
595 	if ((opcode & OPCODE) == LL) {
596 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
597 				1, regs, 0);
598 		return simulate_ll(regs, opcode);
599 	}
600 	if ((opcode & OPCODE) == SC) {
601 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
602 				1, regs, 0);
603 		return simulate_sc(regs, opcode);
604 	}
605 
606 	return -1;			/* Must be something else ... */
607 }
608 
609 /*
610  * Simulate trapping 'rdhwr' instructions to provide user accessible
611  * registers not implemented in hardware.
612  */
613 static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
614 {
615 	struct thread_info *ti = task_thread_info(current);
616 
617 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
618 			1, regs, 0);
619 	switch (rd) {
620 	case 0:		/* CPU number */
621 		regs->regs[rt] = smp_processor_id();
622 		return 0;
623 	case 1:		/* SYNCI length */
624 		regs->regs[rt] = min(current_cpu_data.dcache.linesz,
625 				     current_cpu_data.icache.linesz);
626 		return 0;
627 	case 2:		/* Read count register */
628 		regs->regs[rt] = read_c0_count();
629 		return 0;
630 	case 3:		/* Count register resolution */
631 		switch (current_cpu_type()) {
632 		case CPU_20KC:
633 		case CPU_25KF:
634 			regs->regs[rt] = 1;
635 			break;
636 		default:
637 			regs->regs[rt] = 2;
638 		}
639 		return 0;
640 	case 29:
641 		regs->regs[rt] = ti->tp_value;
642 		return 0;
643 	default:
644 		return -1;
645 	}
646 }
647 
648 static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
649 {
650 	if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
651 		int rd = (opcode & RD) >> 11;
652 		int rt = (opcode & RT) >> 16;
653 
654 		simulate_rdhwr(regs, rd, rt);
655 		return 0;
656 	}
657 
658 	/* Not ours.  */
659 	return -1;
660 }
661 
662 static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
663 {
664 	if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
665 		int rd = (opcode & MM_RS) >> 16;
666 		int rt = (opcode & MM_RT) >> 21;
667 		simulate_rdhwr(regs, rd, rt);
668 		return 0;
669 	}
670 
671 	/* Not ours.  */
672 	return -1;
673 }
674 
675 static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
676 {
677 	if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
678 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
679 				1, regs, 0);
680 		return 0;
681 	}
682 
683 	return -1;			/* Must be something else ... */
684 }
685 
686 asmlinkage void do_ov(struct pt_regs *regs)
687 {
688 	enum ctx_state prev_state;
689 	siginfo_t info;
690 
691 	prev_state = exception_enter();
692 	die_if_kernel("Integer overflow", regs);
693 
694 	info.si_code = FPE_INTOVF;
695 	info.si_signo = SIGFPE;
696 	info.si_errno = 0;
697 	info.si_addr = (void __user *) regs->cp0_epc;
698 	force_sig_info(SIGFPE, &info, current);
699 	exception_exit(prev_state);
700 }
701 
702 int process_fpemu_return(int sig, void __user *fault_addr)
703 {
704 	if (sig == SIGSEGV || sig == SIGBUS) {
705 		struct siginfo si = {0};
706 		si.si_addr = fault_addr;
707 		si.si_signo = sig;
708 		if (sig == SIGSEGV) {
709 			down_read(&current->mm->mmap_sem);
710 			if (find_vma(current->mm, (unsigned long)fault_addr))
711 				si.si_code = SEGV_ACCERR;
712 			else
713 				si.si_code = SEGV_MAPERR;
714 			up_read(&current->mm->mmap_sem);
715 		} else {
716 			si.si_code = BUS_ADRERR;
717 		}
718 		force_sig_info(sig, &si, current);
719 		return 1;
720 	} else if (sig) {
721 		force_sig(sig, current);
722 		return 1;
723 	} else {
724 		return 0;
725 	}
726 }
727 
728 static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
729 		       unsigned long old_epc, unsigned long old_ra)
730 {
731 	union mips_instruction inst = { .word = opcode };
732 	void __user *fault_addr = NULL;
733 	int sig;
734 
735 	/* If it's obviously not an FP instruction, skip it */
736 	switch (inst.i_format.opcode) {
737 	case cop1_op:
738 	case cop1x_op:
739 	case lwc1_op:
740 	case ldc1_op:
741 	case swc1_op:
742 	case sdc1_op:
743 		break;
744 
745 	default:
746 		return -1;
747 	}
748 
749 	/*
750 	 * do_ri skipped over the instruction via compute_return_epc, undo
751 	 * that for the FPU emulator.
752 	 */
753 	regs->cp0_epc = old_epc;
754 	regs->regs[31] = old_ra;
755 
756 	/* Save the FP context to struct thread_struct */
757 	lose_fpu(1);
758 
759 	/* Run the emulator */
760 	sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
761 				       &fault_addr);
762 
763 	/* If something went wrong, signal */
764 	process_fpemu_return(sig, fault_addr);
765 
766 	/* Restore the hardware register state */
767 	own_fpu(1);
768 
769 	return 0;
770 }
771 
772 /*
773  * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
774  */
775 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
776 {
777 	enum ctx_state prev_state;
778 	siginfo_t info = {0};
779 
780 	prev_state = exception_enter();
781 	if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
782 		       SIGFPE) == NOTIFY_STOP)
783 		goto out;
784 	die_if_kernel("FP exception in kernel code", regs);
785 
786 	if (fcr31 & FPU_CSR_UNI_X) {
787 		int sig;
788 		void __user *fault_addr = NULL;
789 
790 		/*
791 		 * Unimplemented operation exception.  If we've got the full
792 		 * software emulator on-board, let's use it...
793 		 *
794 		 * Force FPU to dump state into task/thread context.  We're
795 		 * moving a lot of data here for what is probably a single
796 		 * instruction, but the alternative is to pre-decode the FP
797 		 * register operands before invoking the emulator, which seems
798 		 * a bit extreme for what should be an infrequent event.
799 		 */
800 		/* Ensure 'resume' not overwrite saved fp context again. */
801 		lose_fpu(1);
802 
803 		/* Run the emulator */
804 		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
805 					       &fault_addr);
806 
807 		/*
808 		 * We can't allow the emulated instruction to leave any of
809 		 * the cause bit set in $fcr31.
810 		 */
811 		current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
812 
813 		/* Restore the hardware register state */
814 		own_fpu(1);	/* Using the FPU again.	 */
815 
816 		/* If something went wrong, signal */
817 		process_fpemu_return(sig, fault_addr);
818 
819 		goto out;
820 	} else if (fcr31 & FPU_CSR_INV_X)
821 		info.si_code = FPE_FLTINV;
822 	else if (fcr31 & FPU_CSR_DIV_X)
823 		info.si_code = FPE_FLTDIV;
824 	else if (fcr31 & FPU_CSR_OVF_X)
825 		info.si_code = FPE_FLTOVF;
826 	else if (fcr31 & FPU_CSR_UDF_X)
827 		info.si_code = FPE_FLTUND;
828 	else if (fcr31 & FPU_CSR_INE_X)
829 		info.si_code = FPE_FLTRES;
830 	else
831 		info.si_code = __SI_FAULT;
832 	info.si_signo = SIGFPE;
833 	info.si_errno = 0;
834 	info.si_addr = (void __user *) regs->cp0_epc;
835 	force_sig_info(SIGFPE, &info, current);
836 
837 out:
838 	exception_exit(prev_state);
839 }
840 
841 void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
842 	const char *str)
843 {
844 	siginfo_t info;
845 	char b[40];
846 
847 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
848 	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
849 		return;
850 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
851 
852 	if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs),
853 		       SIGTRAP) == NOTIFY_STOP)
854 		return;
855 
856 	/*
857 	 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
858 	 * insns, even for trap and break codes that indicate arithmetic
859 	 * failures.  Weird ...
860 	 * But should we continue the brokenness???  --macro
861 	 */
862 	switch (code) {
863 	case BRK_OVERFLOW:
864 	case BRK_DIVZERO:
865 		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
866 		die_if_kernel(b, regs);
867 		if (code == BRK_DIVZERO)
868 			info.si_code = FPE_INTDIV;
869 		else
870 			info.si_code = FPE_INTOVF;
871 		info.si_signo = SIGFPE;
872 		info.si_errno = 0;
873 		info.si_addr = (void __user *) regs->cp0_epc;
874 		force_sig_info(SIGFPE, &info, current);
875 		break;
876 	case BRK_BUG:
877 		die_if_kernel("Kernel bug detected", regs);
878 		force_sig(SIGTRAP, current);
879 		break;
880 	case BRK_MEMU:
881 		/*
882 		 * Address errors may be deliberately induced by the FPU
883 		 * emulator to retake control of the CPU after executing the
884 		 * instruction in the delay slot of an emulated branch.
885 		 *
886 		 * Terminate if exception was recognized as a delay slot return
887 		 * otherwise handle as normal.
888 		 */
889 		if (do_dsemulret(regs))
890 			return;
891 
892 		die_if_kernel("Math emu break/trap", regs);
893 		force_sig(SIGTRAP, current);
894 		break;
895 	default:
896 		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
897 		die_if_kernel(b, regs);
898 		force_sig(SIGTRAP, current);
899 	}
900 }
901 
902 asmlinkage void do_bp(struct pt_regs *regs)
903 {
904 	unsigned int opcode, bcode;
905 	enum ctx_state prev_state;
906 	unsigned long epc;
907 	u16 instr[2];
908 	mm_segment_t seg;
909 
910 	seg = get_fs();
911 	if (!user_mode(regs))
912 		set_fs(KERNEL_DS);
913 
914 	prev_state = exception_enter();
915 	if (get_isa16_mode(regs->cp0_epc)) {
916 		/* Calculate EPC. */
917 		epc = exception_epc(regs);
918 		if (cpu_has_mmips) {
919 			if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
920 			    (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
921 				goto out_sigsegv;
922 			opcode = (instr[0] << 16) | instr[1];
923 		} else {
924 			/* MIPS16e mode */
925 			if (__get_user(instr[0],
926 				       (u16 __user *)msk_isa16_mode(epc)))
927 				goto out_sigsegv;
928 			bcode = (instr[0] >> 6) & 0x3f;
929 			do_trap_or_bp(regs, bcode, "Break");
930 			goto out;
931 		}
932 	} else {
933 		if (__get_user(opcode,
934 			       (unsigned int __user *) exception_epc(regs)))
935 			goto out_sigsegv;
936 	}
937 
938 	/*
939 	 * There is the ancient bug in the MIPS assemblers that the break
940 	 * code starts left to bit 16 instead to bit 6 in the opcode.
941 	 * Gas is bug-compatible, but not always, grrr...
942 	 * We handle both cases with a simple heuristics.  --macro
943 	 */
944 	bcode = ((opcode >> 6) & ((1 << 20) - 1));
945 	if (bcode >= (1 << 10))
946 		bcode >>= 10;
947 
948 	/*
949 	 * notify the kprobe handlers, if instruction is likely to
950 	 * pertain to them.
951 	 */
952 	switch (bcode) {
953 	case BRK_KPROBE_BP:
954 		if (notify_die(DIE_BREAK, "debug", regs, bcode,
955 			       regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
956 			goto out;
957 		else
958 			break;
959 	case BRK_KPROBE_SSTEPBP:
960 		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
961 			       regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
962 			goto out;
963 		else
964 			break;
965 	default:
966 		break;
967 	}
968 
969 	do_trap_or_bp(regs, bcode, "Break");
970 
971 out:
972 	set_fs(seg);
973 	exception_exit(prev_state);
974 	return;
975 
976 out_sigsegv:
977 	force_sig(SIGSEGV, current);
978 	goto out;
979 }
980 
981 asmlinkage void do_tr(struct pt_regs *regs)
982 {
983 	u32 opcode, tcode = 0;
984 	enum ctx_state prev_state;
985 	u16 instr[2];
986 	mm_segment_t seg;
987 	unsigned long epc = msk_isa16_mode(exception_epc(regs));
988 
989 	seg = get_fs();
990 	if (!user_mode(regs))
991 		set_fs(get_ds());
992 
993 	prev_state = exception_enter();
994 	if (get_isa16_mode(regs->cp0_epc)) {
995 		if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
996 		    __get_user(instr[1], (u16 __user *)(epc + 2)))
997 			goto out_sigsegv;
998 		opcode = (instr[0] << 16) | instr[1];
999 		/* Immediate versions don't provide a code.  */
1000 		if (!(opcode & OPCODE))
1001 			tcode = (opcode >> 12) & ((1 << 4) - 1);
1002 	} else {
1003 		if (__get_user(opcode, (u32 __user *)epc))
1004 			goto out_sigsegv;
1005 		/* Immediate versions don't provide a code.  */
1006 		if (!(opcode & OPCODE))
1007 			tcode = (opcode >> 6) & ((1 << 10) - 1);
1008 	}
1009 
1010 	do_trap_or_bp(regs, tcode, "Trap");
1011 
1012 out:
1013 	set_fs(seg);
1014 	exception_exit(prev_state);
1015 	return;
1016 
1017 out_sigsegv:
1018 	force_sig(SIGSEGV, current);
1019 	goto out;
1020 }
1021 
1022 asmlinkage void do_ri(struct pt_regs *regs)
1023 {
1024 	unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1025 	unsigned long old_epc = regs->cp0_epc;
1026 	unsigned long old31 = regs->regs[31];
1027 	enum ctx_state prev_state;
1028 	unsigned int opcode = 0;
1029 	int status = -1;
1030 
1031 	/*
1032 	 * Avoid any kernel code. Just emulate the R2 instruction
1033 	 * as quickly as possible.
1034 	 */
1035 	if (mipsr2_emulation && cpu_has_mips_r6 &&
1036 	    likely(user_mode(regs))) {
1037 		if (likely(get_user(opcode, epc) >= 0)) {
1038 			status = mipsr2_decoder(regs, opcode);
1039 			switch (status) {
1040 			case 0:
1041 			case SIGEMT:
1042 				task_thread_info(current)->r2_emul_return = 1;
1043 				return;
1044 			case SIGILL:
1045 				goto no_r2_instr;
1046 			default:
1047 				process_fpemu_return(status,
1048 						     &current->thread.cp0_baduaddr);
1049 				task_thread_info(current)->r2_emul_return = 1;
1050 				return;
1051 			}
1052 		}
1053 	}
1054 
1055 no_r2_instr:
1056 
1057 	prev_state = exception_enter();
1058 
1059 	if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
1060 		       SIGILL) == NOTIFY_STOP)
1061 		goto out;
1062 
1063 	die_if_kernel("Reserved instruction in kernel code", regs);
1064 
1065 	if (unlikely(compute_return_epc(regs) < 0))
1066 		goto out;
1067 
1068 	if (get_isa16_mode(regs->cp0_epc)) {
1069 		unsigned short mmop[2] = { 0 };
1070 
1071 		if (unlikely(get_user(mmop[0], epc) < 0))
1072 			status = SIGSEGV;
1073 		if (unlikely(get_user(mmop[1], epc) < 0))
1074 			status = SIGSEGV;
1075 		opcode = (mmop[0] << 16) | mmop[1];
1076 
1077 		if (status < 0)
1078 			status = simulate_rdhwr_mm(regs, opcode);
1079 	} else {
1080 		if (unlikely(get_user(opcode, epc) < 0))
1081 			status = SIGSEGV;
1082 
1083 		if (!cpu_has_llsc && status < 0)
1084 			status = simulate_llsc(regs, opcode);
1085 
1086 		if (status < 0)
1087 			status = simulate_rdhwr_normal(regs, opcode);
1088 
1089 		if (status < 0)
1090 			status = simulate_sync(regs, opcode);
1091 
1092 		if (status < 0)
1093 			status = simulate_fp(regs, opcode, old_epc, old31);
1094 	}
1095 
1096 	if (status < 0)
1097 		status = SIGILL;
1098 
1099 	if (unlikely(status > 0)) {
1100 		regs->cp0_epc = old_epc;		/* Undo skip-over.  */
1101 		regs->regs[31] = old31;
1102 		force_sig(status, current);
1103 	}
1104 
1105 out:
1106 	exception_exit(prev_state);
1107 }
1108 
1109 /*
1110  * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1111  * emulated more than some threshold number of instructions, force migration to
1112  * a "CPU" that has FP support.
1113  */
1114 static void mt_ase_fp_affinity(void)
1115 {
1116 #ifdef CONFIG_MIPS_MT_FPAFF
1117 	if (mt_fpemul_threshold > 0 &&
1118 	     ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1119 		/*
1120 		 * If there's no FPU present, or if the application has already
1121 		 * restricted the allowed set to exclude any CPUs with FPUs,
1122 		 * we'll skip the procedure.
1123 		 */
1124 		if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
1125 			cpumask_t tmask;
1126 
1127 			current->thread.user_cpus_allowed
1128 				= current->cpus_allowed;
1129 			cpus_and(tmask, current->cpus_allowed,
1130 				mt_fpu_cpumask);
1131 			set_cpus_allowed_ptr(current, &tmask);
1132 			set_thread_flag(TIF_FPUBOUND);
1133 		}
1134 	}
1135 #endif /* CONFIG_MIPS_MT_FPAFF */
1136 }
1137 
1138 /*
1139  * No lock; only written during early bootup by CPU 0.
1140  */
1141 static RAW_NOTIFIER_HEAD(cu2_chain);
1142 
1143 int __ref register_cu2_notifier(struct notifier_block *nb)
1144 {
1145 	return raw_notifier_chain_register(&cu2_chain, nb);
1146 }
1147 
1148 int cu2_notifier_call_chain(unsigned long val, void *v)
1149 {
1150 	return raw_notifier_call_chain(&cu2_chain, val, v);
1151 }
1152 
1153 static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1154 	void *data)
1155 {
1156 	struct pt_regs *regs = data;
1157 
1158 	die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1159 			      "instruction", regs);
1160 	force_sig(SIGILL, current);
1161 
1162 	return NOTIFY_OK;
1163 }
1164 
1165 static int wait_on_fp_mode_switch(atomic_t *p)
1166 {
1167 	/*
1168 	 * The FP mode for this task is currently being switched. That may
1169 	 * involve modifications to the format of this tasks FP context which
1170 	 * make it unsafe to proceed with execution for the moment. Instead,
1171 	 * schedule some other task.
1172 	 */
1173 	schedule();
1174 	return 0;
1175 }
1176 
1177 static int enable_restore_fp_context(int msa)
1178 {
1179 	int err, was_fpu_owner, prior_msa;
1180 
1181 	/*
1182 	 * If an FP mode switch is currently underway, wait for it to
1183 	 * complete before proceeding.
1184 	 */
1185 	wait_on_atomic_t(&current->mm->context.fp_mode_switching,
1186 			 wait_on_fp_mode_switch, TASK_KILLABLE);
1187 
1188 	if (!used_math()) {
1189 		/* First time FP context user. */
1190 		preempt_disable();
1191 		err = init_fpu();
1192 		if (msa && !err) {
1193 			enable_msa();
1194 			_init_msa_upper();
1195 			set_thread_flag(TIF_USEDMSA);
1196 			set_thread_flag(TIF_MSA_CTX_LIVE);
1197 		}
1198 		preempt_enable();
1199 		if (!err)
1200 			set_used_math();
1201 		return err;
1202 	}
1203 
1204 	/*
1205 	 * This task has formerly used the FP context.
1206 	 *
1207 	 * If this thread has no live MSA vector context then we can simply
1208 	 * restore the scalar FP context. If it has live MSA vector context
1209 	 * (that is, it has or may have used MSA since last performing a
1210 	 * function call) then we'll need to restore the vector context. This
1211 	 * applies even if we're currently only executing a scalar FP
1212 	 * instruction. This is because if we were to later execute an MSA
1213 	 * instruction then we'd either have to:
1214 	 *
1215 	 *  - Restore the vector context & clobber any registers modified by
1216 	 *    scalar FP instructions between now & then.
1217 	 *
1218 	 * or
1219 	 *
1220 	 *  - Not restore the vector context & lose the most significant bits
1221 	 *    of all vector registers.
1222 	 *
1223 	 * Neither of those options is acceptable. We cannot restore the least
1224 	 * significant bits of the registers now & only restore the most
1225 	 * significant bits later because the most significant bits of any
1226 	 * vector registers whose aliased FP register is modified now will have
1227 	 * been zeroed. We'd have no way to know that when restoring the vector
1228 	 * context & thus may load an outdated value for the most significant
1229 	 * bits of a vector register.
1230 	 */
1231 	if (!msa && !thread_msa_context_live())
1232 		return own_fpu(1);
1233 
1234 	/*
1235 	 * This task is using or has previously used MSA. Thus we require
1236 	 * that Status.FR == 1.
1237 	 */
1238 	preempt_disable();
1239 	was_fpu_owner = is_fpu_owner();
1240 	err = own_fpu_inatomic(0);
1241 	if (err)
1242 		goto out;
1243 
1244 	enable_msa();
1245 	write_msa_csr(current->thread.fpu.msacsr);
1246 	set_thread_flag(TIF_USEDMSA);
1247 
1248 	/*
1249 	 * If this is the first time that the task is using MSA and it has
1250 	 * previously used scalar FP in this time slice then we already nave
1251 	 * FP context which we shouldn't clobber. We do however need to clear
1252 	 * the upper 64b of each vector register so that this task has no
1253 	 * opportunity to see data left behind by another.
1254 	 */
1255 	prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1256 	if (!prior_msa && was_fpu_owner) {
1257 		_init_msa_upper();
1258 
1259 		goto out;
1260 	}
1261 
1262 	if (!prior_msa) {
1263 		/*
1264 		 * Restore the least significant 64b of each vector register
1265 		 * from the existing scalar FP context.
1266 		 */
1267 		_restore_fp(current);
1268 
1269 		/*
1270 		 * The task has not formerly used MSA, so clear the upper 64b
1271 		 * of each vector register such that it cannot see data left
1272 		 * behind by another task.
1273 		 */
1274 		_init_msa_upper();
1275 	} else {
1276 		/* We need to restore the vector context. */
1277 		restore_msa(current);
1278 
1279 		/* Restore the scalar FP control & status register */
1280 		if (!was_fpu_owner)
1281 			write_32bit_cp1_register(CP1_STATUS,
1282 						 current->thread.fpu.fcr31);
1283 	}
1284 
1285 out:
1286 	preempt_enable();
1287 
1288 	return 0;
1289 }
1290 
1291 asmlinkage void do_cpu(struct pt_regs *regs)
1292 {
1293 	enum ctx_state prev_state;
1294 	unsigned int __user *epc;
1295 	unsigned long old_epc, old31;
1296 	unsigned int opcode;
1297 	unsigned int cpid;
1298 	int status, err;
1299 	unsigned long __maybe_unused flags;
1300 
1301 	prev_state = exception_enter();
1302 	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1303 
1304 	if (cpid != 2)
1305 		die_if_kernel("do_cpu invoked from kernel context!", regs);
1306 
1307 	switch (cpid) {
1308 	case 0:
1309 		epc = (unsigned int __user *)exception_epc(regs);
1310 		old_epc = regs->cp0_epc;
1311 		old31 = regs->regs[31];
1312 		opcode = 0;
1313 		status = -1;
1314 
1315 		if (unlikely(compute_return_epc(regs) < 0))
1316 			goto out;
1317 
1318 		if (get_isa16_mode(regs->cp0_epc)) {
1319 			unsigned short mmop[2] = { 0 };
1320 
1321 			if (unlikely(get_user(mmop[0], epc) < 0))
1322 				status = SIGSEGV;
1323 			if (unlikely(get_user(mmop[1], epc) < 0))
1324 				status = SIGSEGV;
1325 			opcode = (mmop[0] << 16) | mmop[1];
1326 
1327 			if (status < 0)
1328 				status = simulate_rdhwr_mm(regs, opcode);
1329 		} else {
1330 			if (unlikely(get_user(opcode, epc) < 0))
1331 				status = SIGSEGV;
1332 
1333 			if (!cpu_has_llsc && status < 0)
1334 				status = simulate_llsc(regs, opcode);
1335 
1336 			if (status < 0)
1337 				status = simulate_rdhwr_normal(regs, opcode);
1338 		}
1339 
1340 		if (status < 0)
1341 			status = SIGILL;
1342 
1343 		if (unlikely(status > 0)) {
1344 			regs->cp0_epc = old_epc;	/* Undo skip-over.  */
1345 			regs->regs[31] = old31;
1346 			force_sig(status, current);
1347 		}
1348 
1349 		goto out;
1350 
1351 	case 3:
1352 		/*
1353 		 * Old (MIPS I and MIPS II) processors will set this code
1354 		 * for COP1X opcode instructions that replaced the original
1355 		 * COP3 space.	We don't limit COP1 space instructions in
1356 		 * the emulator according to the CPU ISA, so we want to
1357 		 * treat COP1X instructions consistently regardless of which
1358 		 * code the CPU chose.	Therefore we redirect this trap to
1359 		 * the FP emulator too.
1360 		 *
1361 		 * Then some newer FPU-less processors use this code
1362 		 * erroneously too, so they are covered by this choice
1363 		 * as well.
1364 		 */
1365 		if (raw_cpu_has_fpu)
1366 			break;
1367 		/* Fall through.  */
1368 
1369 	case 1:
1370 		err = enable_restore_fp_context(0);
1371 
1372 		if (!raw_cpu_has_fpu || err) {
1373 			int sig;
1374 			void __user *fault_addr = NULL;
1375 			sig = fpu_emulator_cop1Handler(regs,
1376 						       &current->thread.fpu,
1377 						       0, &fault_addr);
1378 			if (!process_fpemu_return(sig, fault_addr) && !err)
1379 				mt_ase_fp_affinity();
1380 		}
1381 
1382 		goto out;
1383 
1384 	case 2:
1385 		raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1386 		goto out;
1387 	}
1388 
1389 	force_sig(SIGILL, current);
1390 
1391 out:
1392 	exception_exit(prev_state);
1393 }
1394 
1395 asmlinkage void do_msa_fpe(struct pt_regs *regs)
1396 {
1397 	enum ctx_state prev_state;
1398 
1399 	prev_state = exception_enter();
1400 	die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1401 	force_sig(SIGFPE, current);
1402 	exception_exit(prev_state);
1403 }
1404 
1405 asmlinkage void do_msa(struct pt_regs *regs)
1406 {
1407 	enum ctx_state prev_state;
1408 	int err;
1409 
1410 	prev_state = exception_enter();
1411 
1412 	if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1413 		force_sig(SIGILL, current);
1414 		goto out;
1415 	}
1416 
1417 	die_if_kernel("do_msa invoked from kernel context!", regs);
1418 
1419 	err = enable_restore_fp_context(1);
1420 	if (err)
1421 		force_sig(SIGILL, current);
1422 out:
1423 	exception_exit(prev_state);
1424 }
1425 
1426 asmlinkage void do_mdmx(struct pt_regs *regs)
1427 {
1428 	enum ctx_state prev_state;
1429 
1430 	prev_state = exception_enter();
1431 	force_sig(SIGILL, current);
1432 	exception_exit(prev_state);
1433 }
1434 
1435 /*
1436  * Called with interrupts disabled.
1437  */
1438 asmlinkage void do_watch(struct pt_regs *regs)
1439 {
1440 	enum ctx_state prev_state;
1441 	u32 cause;
1442 
1443 	prev_state = exception_enter();
1444 	/*
1445 	 * Clear WP (bit 22) bit of cause register so we don't loop
1446 	 * forever.
1447 	 */
1448 	cause = read_c0_cause();
1449 	cause &= ~(1 << 22);
1450 	write_c0_cause(cause);
1451 
1452 	/*
1453 	 * If the current thread has the watch registers loaded, save
1454 	 * their values and send SIGTRAP.  Otherwise another thread
1455 	 * left the registers set, clear them and continue.
1456 	 */
1457 	if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1458 		mips_read_watch_registers();
1459 		local_irq_enable();
1460 		force_sig(SIGTRAP, current);
1461 	} else {
1462 		mips_clear_watch_registers();
1463 		local_irq_enable();
1464 	}
1465 	exception_exit(prev_state);
1466 }
1467 
1468 asmlinkage void do_mcheck(struct pt_regs *regs)
1469 {
1470 	const int field = 2 * sizeof(unsigned long);
1471 	int multi_match = regs->cp0_status & ST0_TS;
1472 	enum ctx_state prev_state;
1473 
1474 	prev_state = exception_enter();
1475 	show_regs(regs);
1476 
1477 	if (multi_match) {
1478 		pr_err("Index	: %0x\n", read_c0_index());
1479 		pr_err("Pagemask: %0x\n", read_c0_pagemask());
1480 		pr_err("EntryHi : %0*lx\n", field, read_c0_entryhi());
1481 		pr_err("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
1482 		pr_err("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
1483 		pr_err("Wired   : %0x\n", read_c0_wired());
1484 		pr_err("Pagegrain: %0x\n", read_c0_pagegrain());
1485 		if (cpu_has_htw) {
1486 			pr_err("PWField : %0*lx\n", field, read_c0_pwfield());
1487 			pr_err("PWSize  : %0*lx\n", field, read_c0_pwsize());
1488 			pr_err("PWCtl   : %0x\n", read_c0_pwctl());
1489 		}
1490 		pr_err("\n");
1491 		dump_tlb_all();
1492 	}
1493 
1494 	show_code((unsigned int __user *) regs->cp0_epc);
1495 
1496 	/*
1497 	 * Some chips may have other causes of machine check (e.g. SB1
1498 	 * graduation timer)
1499 	 */
1500 	panic("Caught Machine Check exception - %scaused by multiple "
1501 	      "matching entries in the TLB.",
1502 	      (multi_match) ? "" : "not ");
1503 }
1504 
1505 asmlinkage void do_mt(struct pt_regs *regs)
1506 {
1507 	int subcode;
1508 
1509 	subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1510 			>> VPECONTROL_EXCPT_SHIFT;
1511 	switch (subcode) {
1512 	case 0:
1513 		printk(KERN_DEBUG "Thread Underflow\n");
1514 		break;
1515 	case 1:
1516 		printk(KERN_DEBUG "Thread Overflow\n");
1517 		break;
1518 	case 2:
1519 		printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1520 		break;
1521 	case 3:
1522 		printk(KERN_DEBUG "Gating Storage Exception\n");
1523 		break;
1524 	case 4:
1525 		printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1526 		break;
1527 	case 5:
1528 		printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1529 		break;
1530 	default:
1531 		printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1532 			subcode);
1533 		break;
1534 	}
1535 	die_if_kernel("MIPS MT Thread exception in kernel", regs);
1536 
1537 	force_sig(SIGILL, current);
1538 }
1539 
1540 
1541 asmlinkage void do_dsp(struct pt_regs *regs)
1542 {
1543 	if (cpu_has_dsp)
1544 		panic("Unexpected DSP exception");
1545 
1546 	force_sig(SIGILL, current);
1547 }
1548 
1549 asmlinkage void do_reserved(struct pt_regs *regs)
1550 {
1551 	/*
1552 	 * Game over - no way to handle this if it ever occurs.	 Most probably
1553 	 * caused by a new unknown cpu type or after another deadly
1554 	 * hard/software error.
1555 	 */
1556 	show_regs(regs);
1557 	panic("Caught reserved exception %ld - should not happen.",
1558 	      (regs->cp0_cause & 0x7f) >> 2);
1559 }
1560 
1561 static int __initdata l1parity = 1;
1562 static int __init nol1parity(char *s)
1563 {
1564 	l1parity = 0;
1565 	return 1;
1566 }
1567 __setup("nol1par", nol1parity);
1568 static int __initdata l2parity = 1;
1569 static int __init nol2parity(char *s)
1570 {
1571 	l2parity = 0;
1572 	return 1;
1573 }
1574 __setup("nol2par", nol2parity);
1575 
1576 /*
1577  * Some MIPS CPUs can enable/disable for cache parity detection, but do
1578  * it different ways.
1579  */
1580 static inline void parity_protection_init(void)
1581 {
1582 	switch (current_cpu_type()) {
1583 	case CPU_24K:
1584 	case CPU_34K:
1585 	case CPU_74K:
1586 	case CPU_1004K:
1587 	case CPU_1074K:
1588 	case CPU_INTERAPTIV:
1589 	case CPU_PROAPTIV:
1590 	case CPU_P5600:
1591 	case CPU_QEMU_GENERIC:
1592 		{
1593 #define ERRCTL_PE	0x80000000
1594 #define ERRCTL_L2P	0x00800000
1595 			unsigned long errctl;
1596 			unsigned int l1parity_present, l2parity_present;
1597 
1598 			errctl = read_c0_ecc();
1599 			errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1600 
1601 			/* probe L1 parity support */
1602 			write_c0_ecc(errctl | ERRCTL_PE);
1603 			back_to_back_c0_hazard();
1604 			l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1605 
1606 			/* probe L2 parity support */
1607 			write_c0_ecc(errctl|ERRCTL_L2P);
1608 			back_to_back_c0_hazard();
1609 			l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1610 
1611 			if (l1parity_present && l2parity_present) {
1612 				if (l1parity)
1613 					errctl |= ERRCTL_PE;
1614 				if (l1parity ^ l2parity)
1615 					errctl |= ERRCTL_L2P;
1616 			} else if (l1parity_present) {
1617 				if (l1parity)
1618 					errctl |= ERRCTL_PE;
1619 			} else if (l2parity_present) {
1620 				if (l2parity)
1621 					errctl |= ERRCTL_L2P;
1622 			} else {
1623 				/* No parity available */
1624 			}
1625 
1626 			printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1627 
1628 			write_c0_ecc(errctl);
1629 			back_to_back_c0_hazard();
1630 			errctl = read_c0_ecc();
1631 			printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1632 
1633 			if (l1parity_present)
1634 				printk(KERN_INFO "Cache parity protection %sabled\n",
1635 				       (errctl & ERRCTL_PE) ? "en" : "dis");
1636 
1637 			if (l2parity_present) {
1638 				if (l1parity_present && l1parity)
1639 					errctl ^= ERRCTL_L2P;
1640 				printk(KERN_INFO "L2 cache parity protection %sabled\n",
1641 				       (errctl & ERRCTL_L2P) ? "en" : "dis");
1642 			}
1643 		}
1644 		break;
1645 
1646 	case CPU_5KC:
1647 	case CPU_5KE:
1648 	case CPU_LOONGSON1:
1649 		write_c0_ecc(0x80000000);
1650 		back_to_back_c0_hazard();
1651 		/* Set the PE bit (bit 31) in the c0_errctl register. */
1652 		printk(KERN_INFO "Cache parity protection %sabled\n",
1653 		       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1654 		break;
1655 	case CPU_20KC:
1656 	case CPU_25KF:
1657 		/* Clear the DE bit (bit 16) in the c0_status register. */
1658 		printk(KERN_INFO "Enable cache parity protection for "
1659 		       "MIPS 20KC/25KF CPUs.\n");
1660 		clear_c0_status(ST0_DE);
1661 		break;
1662 	default:
1663 		break;
1664 	}
1665 }
1666 
1667 asmlinkage void cache_parity_error(void)
1668 {
1669 	const int field = 2 * sizeof(unsigned long);
1670 	unsigned int reg_val;
1671 
1672 	/* For the moment, report the problem and hang. */
1673 	printk("Cache error exception:\n");
1674 	printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1675 	reg_val = read_c0_cacheerr();
1676 	printk("c0_cacheerr == %08x\n", reg_val);
1677 
1678 	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1679 	       reg_val & (1<<30) ? "secondary" : "primary",
1680 	       reg_val & (1<<31) ? "data" : "insn");
1681 	if ((cpu_has_mips_r2_r6) &&
1682 	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1683 		pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1684 			reg_val & (1<<29) ? "ED " : "",
1685 			reg_val & (1<<28) ? "ET " : "",
1686 			reg_val & (1<<27) ? "ES " : "",
1687 			reg_val & (1<<26) ? "EE " : "",
1688 			reg_val & (1<<25) ? "EB " : "",
1689 			reg_val & (1<<24) ? "EI " : "",
1690 			reg_val & (1<<23) ? "E1 " : "",
1691 			reg_val & (1<<22) ? "E0 " : "");
1692 	} else {
1693 		pr_err("Error bits: %s%s%s%s%s%s%s\n",
1694 			reg_val & (1<<29) ? "ED " : "",
1695 			reg_val & (1<<28) ? "ET " : "",
1696 			reg_val & (1<<26) ? "EE " : "",
1697 			reg_val & (1<<25) ? "EB " : "",
1698 			reg_val & (1<<24) ? "EI " : "",
1699 			reg_val & (1<<23) ? "E1 " : "",
1700 			reg_val & (1<<22) ? "E0 " : "");
1701 	}
1702 	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1703 
1704 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1705 	if (reg_val & (1<<22))
1706 		printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1707 
1708 	if (reg_val & (1<<23))
1709 		printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1710 #endif
1711 
1712 	panic("Can't handle the cache error!");
1713 }
1714 
1715 asmlinkage void do_ftlb(void)
1716 {
1717 	const int field = 2 * sizeof(unsigned long);
1718 	unsigned int reg_val;
1719 
1720 	/* For the moment, report the problem and hang. */
1721 	if ((cpu_has_mips_r2_r6) &&
1722 	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1723 		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1724 		       read_c0_ecc());
1725 		pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1726 		reg_val = read_c0_cacheerr();
1727 		pr_err("c0_cacheerr == %08x\n", reg_val);
1728 
1729 		if ((reg_val & 0xc0000000) == 0xc0000000) {
1730 			pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1731 		} else {
1732 			pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1733 			       reg_val & (1<<30) ? "secondary" : "primary",
1734 			       reg_val & (1<<31) ? "data" : "insn");
1735 		}
1736 	} else {
1737 		pr_err("FTLB error exception\n");
1738 	}
1739 	/* Just print the cacheerr bits for now */
1740 	cache_parity_error();
1741 }
1742 
1743 /*
1744  * SDBBP EJTAG debug exception handler.
1745  * We skip the instruction and return to the next instruction.
1746  */
1747 void ejtag_exception_handler(struct pt_regs *regs)
1748 {
1749 	const int field = 2 * sizeof(unsigned long);
1750 	unsigned long depc, old_epc, old_ra;
1751 	unsigned int debug;
1752 
1753 	printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1754 	depc = read_c0_depc();
1755 	debug = read_c0_debug();
1756 	printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1757 	if (debug & 0x80000000) {
1758 		/*
1759 		 * In branch delay slot.
1760 		 * We cheat a little bit here and use EPC to calculate the
1761 		 * debug return address (DEPC). EPC is restored after the
1762 		 * calculation.
1763 		 */
1764 		old_epc = regs->cp0_epc;
1765 		old_ra = regs->regs[31];
1766 		regs->cp0_epc = depc;
1767 		compute_return_epc(regs);
1768 		depc = regs->cp0_epc;
1769 		regs->cp0_epc = old_epc;
1770 		regs->regs[31] = old_ra;
1771 	} else
1772 		depc += 4;
1773 	write_c0_depc(depc);
1774 
1775 #if 0
1776 	printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1777 	write_c0_debug(debug | 0x100);
1778 #endif
1779 }
1780 
1781 /*
1782  * NMI exception handler.
1783  * No lock; only written during early bootup by CPU 0.
1784  */
1785 static RAW_NOTIFIER_HEAD(nmi_chain);
1786 
1787 int register_nmi_notifier(struct notifier_block *nb)
1788 {
1789 	return raw_notifier_chain_register(&nmi_chain, nb);
1790 }
1791 
1792 void __noreturn nmi_exception_handler(struct pt_regs *regs)
1793 {
1794 	char str[100];
1795 
1796 	raw_notifier_call_chain(&nmi_chain, 0, regs);
1797 	bust_spinlocks(1);
1798 	snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1799 		 smp_processor_id(), regs->cp0_epc);
1800 	regs->cp0_epc = read_c0_errorepc();
1801 	die(str, regs);
1802 }
1803 
1804 #define VECTORSPACING 0x100	/* for EI/VI mode */
1805 
1806 unsigned long ebase;
1807 unsigned long exception_handlers[32];
1808 unsigned long vi_handlers[64];
1809 
1810 void __init *set_except_vector(int n, void *addr)
1811 {
1812 	unsigned long handler = (unsigned long) addr;
1813 	unsigned long old_handler;
1814 
1815 #ifdef CONFIG_CPU_MICROMIPS
1816 	/*
1817 	 * Only the TLB handlers are cache aligned with an even
1818 	 * address. All other handlers are on an odd address and
1819 	 * require no modification. Otherwise, MIPS32 mode will
1820 	 * be entered when handling any TLB exceptions. That
1821 	 * would be bad...since we must stay in microMIPS mode.
1822 	 */
1823 	if (!(handler & 0x1))
1824 		handler |= 1;
1825 #endif
1826 	old_handler = xchg(&exception_handlers[n], handler);
1827 
1828 	if (n == 0 && cpu_has_divec) {
1829 #ifdef CONFIG_CPU_MICROMIPS
1830 		unsigned long jump_mask = ~((1 << 27) - 1);
1831 #else
1832 		unsigned long jump_mask = ~((1 << 28) - 1);
1833 #endif
1834 		u32 *buf = (u32 *)(ebase + 0x200);
1835 		unsigned int k0 = 26;
1836 		if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1837 			uasm_i_j(&buf, handler & ~jump_mask);
1838 			uasm_i_nop(&buf);
1839 		} else {
1840 			UASM_i_LA(&buf, k0, handler);
1841 			uasm_i_jr(&buf, k0);
1842 			uasm_i_nop(&buf);
1843 		}
1844 		local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1845 	}
1846 	return (void *)old_handler;
1847 }
1848 
1849 static void do_default_vi(void)
1850 {
1851 	show_regs(get_irq_regs());
1852 	panic("Caught unexpected vectored interrupt.");
1853 }
1854 
1855 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1856 {
1857 	unsigned long handler;
1858 	unsigned long old_handler = vi_handlers[n];
1859 	int srssets = current_cpu_data.srsets;
1860 	u16 *h;
1861 	unsigned char *b;
1862 
1863 	BUG_ON(!cpu_has_veic && !cpu_has_vint);
1864 
1865 	if (addr == NULL) {
1866 		handler = (unsigned long) do_default_vi;
1867 		srs = 0;
1868 	} else
1869 		handler = (unsigned long) addr;
1870 	vi_handlers[n] = handler;
1871 
1872 	b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1873 
1874 	if (srs >= srssets)
1875 		panic("Shadow register set %d not supported", srs);
1876 
1877 	if (cpu_has_veic) {
1878 		if (board_bind_eic_interrupt)
1879 			board_bind_eic_interrupt(n, srs);
1880 	} else if (cpu_has_vint) {
1881 		/* SRSMap is only defined if shadow sets are implemented */
1882 		if (srssets > 1)
1883 			change_c0_srsmap(0xf << n*4, srs << n*4);
1884 	}
1885 
1886 	if (srs == 0) {
1887 		/*
1888 		 * If no shadow set is selected then use the default handler
1889 		 * that does normal register saving and standard interrupt exit
1890 		 */
1891 		extern char except_vec_vi, except_vec_vi_lui;
1892 		extern char except_vec_vi_ori, except_vec_vi_end;
1893 		extern char rollback_except_vec_vi;
1894 		char *vec_start = using_rollback_handler() ?
1895 			&rollback_except_vec_vi : &except_vec_vi;
1896 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1897 		const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1898 		const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1899 #else
1900 		const int lui_offset = &except_vec_vi_lui - vec_start;
1901 		const int ori_offset = &except_vec_vi_ori - vec_start;
1902 #endif
1903 		const int handler_len = &except_vec_vi_end - vec_start;
1904 
1905 		if (handler_len > VECTORSPACING) {
1906 			/*
1907 			 * Sigh... panicing won't help as the console
1908 			 * is probably not configured :(
1909 			 */
1910 			panic("VECTORSPACING too small");
1911 		}
1912 
1913 		set_handler(((unsigned long)b - ebase), vec_start,
1914 #ifdef CONFIG_CPU_MICROMIPS
1915 				(handler_len - 1));
1916 #else
1917 				handler_len);
1918 #endif
1919 		h = (u16 *)(b + lui_offset);
1920 		*h = (handler >> 16) & 0xffff;
1921 		h = (u16 *)(b + ori_offset);
1922 		*h = (handler & 0xffff);
1923 		local_flush_icache_range((unsigned long)b,
1924 					 (unsigned long)(b+handler_len));
1925 	}
1926 	else {
1927 		/*
1928 		 * In other cases jump directly to the interrupt handler. It
1929 		 * is the handler's responsibility to save registers if required
1930 		 * (eg hi/lo) and return from the exception using "eret".
1931 		 */
1932 		u32 insn;
1933 
1934 		h = (u16 *)b;
1935 		/* j handler */
1936 #ifdef CONFIG_CPU_MICROMIPS
1937 		insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
1938 #else
1939 		insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
1940 #endif
1941 		h[0] = (insn >> 16) & 0xffff;
1942 		h[1] = insn & 0xffff;
1943 		h[2] = 0;
1944 		h[3] = 0;
1945 		local_flush_icache_range((unsigned long)b,
1946 					 (unsigned long)(b+8));
1947 	}
1948 
1949 	return (void *)old_handler;
1950 }
1951 
1952 void *set_vi_handler(int n, vi_handler_t addr)
1953 {
1954 	return set_vi_srs_handler(n, addr, 0);
1955 }
1956 
1957 extern void tlb_init(void);
1958 
1959 /*
1960  * Timer interrupt
1961  */
1962 int cp0_compare_irq;
1963 EXPORT_SYMBOL_GPL(cp0_compare_irq);
1964 int cp0_compare_irq_shift;
1965 
1966 /*
1967  * Performance counter IRQ or -1 if shared with timer
1968  */
1969 int cp0_perfcount_irq;
1970 EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
1971 
1972 static int noulri;
1973 
1974 static int __init ulri_disable(char *s)
1975 {
1976 	pr_info("Disabling ulri\n");
1977 	noulri = 1;
1978 
1979 	return 1;
1980 }
1981 __setup("noulri", ulri_disable);
1982 
1983 /* configure STATUS register */
1984 static void configure_status(void)
1985 {
1986 	/*
1987 	 * Disable coprocessors and select 32-bit or 64-bit addressing
1988 	 * and the 16/32 or 32/32 FPR register model.  Reset the BEV
1989 	 * flag that some firmware may have left set and the TS bit (for
1990 	 * IP27).  Set XX for ISA IV code to work.
1991 	 */
1992 	unsigned int status_set = ST0_CU0;
1993 #ifdef CONFIG_64BIT
1994 	status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1995 #endif
1996 	if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
1997 		status_set |= ST0_XX;
1998 	if (cpu_has_dsp)
1999 		status_set |= ST0_MX;
2000 
2001 	change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2002 			 status_set);
2003 }
2004 
2005 /* configure HWRENA register */
2006 static void configure_hwrena(void)
2007 {
2008 	unsigned int hwrena = cpu_hwrena_impl_bits;
2009 
2010 	if (cpu_has_mips_r2_r6)
2011 		hwrena |= 0x0000000f;
2012 
2013 	if (!noulri && cpu_has_userlocal)
2014 		hwrena |= (1 << 29);
2015 
2016 	if (hwrena)
2017 		write_c0_hwrena(hwrena);
2018 }
2019 
2020 static void configure_exception_vector(void)
2021 {
2022 	if (cpu_has_veic || cpu_has_vint) {
2023 		unsigned long sr = set_c0_status(ST0_BEV);
2024 		write_c0_ebase(ebase);
2025 		write_c0_status(sr);
2026 		/* Setting vector spacing enables EI/VI mode  */
2027 		change_c0_intctl(0x3e0, VECTORSPACING);
2028 	}
2029 	if (cpu_has_divec) {
2030 		if (cpu_has_mipsmt) {
2031 			unsigned int vpflags = dvpe();
2032 			set_c0_cause(CAUSEF_IV);
2033 			evpe(vpflags);
2034 		} else
2035 			set_c0_cause(CAUSEF_IV);
2036 	}
2037 }
2038 
2039 void per_cpu_trap_init(bool is_boot_cpu)
2040 {
2041 	unsigned int cpu = smp_processor_id();
2042 
2043 	configure_status();
2044 	configure_hwrena();
2045 
2046 	configure_exception_vector();
2047 
2048 	/*
2049 	 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2050 	 *
2051 	 *  o read IntCtl.IPTI to determine the timer interrupt
2052 	 *  o read IntCtl.IPPCI to determine the performance counter interrupt
2053 	 */
2054 	if (cpu_has_mips_r2_r6) {
2055 		cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2056 		cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2057 		cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2058 		if (cp0_perfcount_irq == cp0_compare_irq)
2059 			cp0_perfcount_irq = -1;
2060 	} else {
2061 		cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2062 		cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2063 		cp0_perfcount_irq = -1;
2064 	}
2065 
2066 	if (!cpu_data[cpu].asid_cache)
2067 		cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
2068 
2069 	atomic_inc(&init_mm.mm_count);
2070 	current->active_mm = &init_mm;
2071 	BUG_ON(current->mm);
2072 	enter_lazy_tlb(&init_mm, current);
2073 
2074 		/* Boot CPU's cache setup in setup_arch(). */
2075 		if (!is_boot_cpu)
2076 			cpu_cache_init();
2077 		tlb_init();
2078 	TLBMISS_HANDLER_SETUP();
2079 }
2080 
2081 /* Install CPU exception handler */
2082 void set_handler(unsigned long offset, void *addr, unsigned long size)
2083 {
2084 #ifdef CONFIG_CPU_MICROMIPS
2085 	memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2086 #else
2087 	memcpy((void *)(ebase + offset), addr, size);
2088 #endif
2089 	local_flush_icache_range(ebase + offset, ebase + offset + size);
2090 }
2091 
2092 static char panic_null_cerr[] =
2093 	"Trying to set NULL cache error exception handler";
2094 
2095 /*
2096  * Install uncached CPU exception handler.
2097  * This is suitable only for the cache error exception which is the only
2098  * exception handler that is being run uncached.
2099  */
2100 void set_uncached_handler(unsigned long offset, void *addr,
2101 	unsigned long size)
2102 {
2103 	unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2104 
2105 	if (!addr)
2106 		panic(panic_null_cerr);
2107 
2108 	memcpy((void *)(uncached_ebase + offset), addr, size);
2109 }
2110 
2111 static int __initdata rdhwr_noopt;
2112 static int __init set_rdhwr_noopt(char *str)
2113 {
2114 	rdhwr_noopt = 1;
2115 	return 1;
2116 }
2117 
2118 __setup("rdhwr_noopt", set_rdhwr_noopt);
2119 
2120 void __init trap_init(void)
2121 {
2122 	extern char except_vec3_generic;
2123 	extern char except_vec4;
2124 	extern char except_vec3_r4000;
2125 	unsigned long i;
2126 
2127 	check_wait();
2128 
2129 #if defined(CONFIG_KGDB)
2130 	if (kgdb_early_setup)
2131 		return; /* Already done */
2132 #endif
2133 
2134 	if (cpu_has_veic || cpu_has_vint) {
2135 		unsigned long size = 0x200 + VECTORSPACING*64;
2136 		ebase = (unsigned long)
2137 			__alloc_bootmem(size, 1 << fls(size), 0);
2138 	} else {
2139 #ifdef CONFIG_KVM_GUEST
2140 #define KVM_GUEST_KSEG0     0x40000000
2141         ebase = KVM_GUEST_KSEG0;
2142 #else
2143         ebase = CKSEG0;
2144 #endif
2145 		if (cpu_has_mips_r2_r6)
2146 			ebase += (read_c0_ebase() & 0x3ffff000);
2147 	}
2148 
2149 	if (cpu_has_mmips) {
2150 		unsigned int config3 = read_c0_config3();
2151 
2152 		if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2153 			write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2154 		else
2155 			write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2156 	}
2157 
2158 	if (board_ebase_setup)
2159 		board_ebase_setup();
2160 	per_cpu_trap_init(true);
2161 
2162 	/*
2163 	 * Copy the generic exception handlers to their final destination.
2164 	 * This will be overriden later as suitable for a particular
2165 	 * configuration.
2166 	 */
2167 	set_handler(0x180, &except_vec3_generic, 0x80);
2168 
2169 	/*
2170 	 * Setup default vectors
2171 	 */
2172 	for (i = 0; i <= 31; i++)
2173 		set_except_vector(i, handle_reserved);
2174 
2175 	/*
2176 	 * Copy the EJTAG debug exception vector handler code to it's final
2177 	 * destination.
2178 	 */
2179 	if (cpu_has_ejtag && board_ejtag_handler_setup)
2180 		board_ejtag_handler_setup();
2181 
2182 	/*
2183 	 * Only some CPUs have the watch exceptions.
2184 	 */
2185 	if (cpu_has_watch)
2186 		set_except_vector(23, handle_watch);
2187 
2188 	/*
2189 	 * Initialise interrupt handlers
2190 	 */
2191 	if (cpu_has_veic || cpu_has_vint) {
2192 		int nvec = cpu_has_veic ? 64 : 8;
2193 		for (i = 0; i < nvec; i++)
2194 			set_vi_handler(i, NULL);
2195 	}
2196 	else if (cpu_has_divec)
2197 		set_handler(0x200, &except_vec4, 0x8);
2198 
2199 	/*
2200 	 * Some CPUs can enable/disable for cache parity detection, but does
2201 	 * it different ways.
2202 	 */
2203 	parity_protection_init();
2204 
2205 	/*
2206 	 * The Data Bus Errors / Instruction Bus Errors are signaled
2207 	 * by external hardware.  Therefore these two exceptions
2208 	 * may have board specific handlers.
2209 	 */
2210 	if (board_be_init)
2211 		board_be_init();
2212 
2213 	set_except_vector(0, using_rollback_handler() ? rollback_handle_int
2214 						      : handle_int);
2215 	set_except_vector(1, handle_tlbm);
2216 	set_except_vector(2, handle_tlbl);
2217 	set_except_vector(3, handle_tlbs);
2218 
2219 	set_except_vector(4, handle_adel);
2220 	set_except_vector(5, handle_ades);
2221 
2222 	set_except_vector(6, handle_ibe);
2223 	set_except_vector(7, handle_dbe);
2224 
2225 	set_except_vector(8, handle_sys);
2226 	set_except_vector(9, handle_bp);
2227 	set_except_vector(10, rdhwr_noopt ? handle_ri :
2228 			  (cpu_has_vtag_icache ?
2229 			   handle_ri_rdhwr_vivt : handle_ri_rdhwr));
2230 	set_except_vector(11, handle_cpu);
2231 	set_except_vector(12, handle_ov);
2232 	set_except_vector(13, handle_tr);
2233 	set_except_vector(14, handle_msa_fpe);
2234 
2235 	if (current_cpu_type() == CPU_R6000 ||
2236 	    current_cpu_type() == CPU_R6000A) {
2237 		/*
2238 		 * The R6000 is the only R-series CPU that features a machine
2239 		 * check exception (similar to the R4000 cache error) and
2240 		 * unaligned ldc1/sdc1 exception.  The handlers have not been
2241 		 * written yet.	 Well, anyway there is no R6000 machine on the
2242 		 * current list of targets for Linux/MIPS.
2243 		 * (Duh, crap, there is someone with a triple R6k machine)
2244 		 */
2245 		//set_except_vector(14, handle_mc);
2246 		//set_except_vector(15, handle_ndc);
2247 	}
2248 
2249 
2250 	if (board_nmi_handler_setup)
2251 		board_nmi_handler_setup();
2252 
2253 	if (cpu_has_fpu && !cpu_has_nofpuex)
2254 		set_except_vector(15, handle_fpe);
2255 
2256 	set_except_vector(16, handle_ftlb);
2257 
2258 	if (cpu_has_rixiex) {
2259 		set_except_vector(19, tlb_do_page_fault_0);
2260 		set_except_vector(20, tlb_do_page_fault_0);
2261 	}
2262 
2263 	set_except_vector(21, handle_msa);
2264 	set_except_vector(22, handle_mdmx);
2265 
2266 	if (cpu_has_mcheck)
2267 		set_except_vector(24, handle_mcheck);
2268 
2269 	if (cpu_has_mipsmt)
2270 		set_except_vector(25, handle_mt);
2271 
2272 	set_except_vector(26, handle_dsp);
2273 
2274 	if (board_cache_error_setup)
2275 		board_cache_error_setup();
2276 
2277 	if (cpu_has_vce)
2278 		/* Special exception: R4[04]00 uses also the divec space. */
2279 		set_handler(0x180, &except_vec3_r4000, 0x100);
2280 	else if (cpu_has_4kex)
2281 		set_handler(0x180, &except_vec3_generic, 0x80);
2282 	else
2283 		set_handler(0x080, &except_vec3_generic, 0x80);
2284 
2285 	local_flush_icache_range(ebase, ebase + 0x400);
2286 
2287 	sort_extable(__start___dbe_table, __stop___dbe_table);
2288 
2289 	cu2_notifier(default_cu2_call, 0x80000000);	/* Run last  */
2290 }
2291 
2292 static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2293 			    void *v)
2294 {
2295 	switch (cmd) {
2296 	case CPU_PM_ENTER_FAILED:
2297 	case CPU_PM_EXIT:
2298 		configure_status();
2299 		configure_hwrena();
2300 		configure_exception_vector();
2301 
2302 		/* Restore register with CPU number for TLB handlers */
2303 		TLBMISS_HANDLER_RESTORE();
2304 
2305 		break;
2306 	}
2307 
2308 	return NOTIFY_OK;
2309 }
2310 
2311 static struct notifier_block trap_pm_notifier_block = {
2312 	.notifier_call = trap_pm_notifier,
2313 };
2314 
2315 static int __init trap_pm_init(void)
2316 {
2317 	return cpu_pm_register_notifier(&trap_pm_notifier_block);
2318 }
2319 arch_initcall(trap_pm_init);
2320