xref: /linux/arch/sh/kernel/traps_32.c (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * 'traps.c' handles hardware traps and faults after we have saved some
4  * state in 'entry.S'.
5  *
6  *  SuperH version: Copyright (C) 1999 Niibe Yutaka
7  *                  Copyright (C) 2000 Philipp Rumpf
8  *                  Copyright (C) 2000 David Howells
9  *                  Copyright (C) 2002 - 2010 Paul Mundt
10  */
11 #include <linux/kernel.h>
12 #include <linux/ptrace.h>
13 #include <linux/hardirq.h>
14 #include <linux/init.h>
15 #include <linux/spinlock.h>
16 #include <linux/kallsyms.h>
17 #include <linux/io.h>
18 #include <linux/bug.h>
19 #include <linux/debug_locks.h>
20 #include <linux/kdebug.h>
21 #include <linux/limits.h>
22 #include <linux/sysfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/perf_event.h>
25 #include <linux/sched/task_stack.h>
26 
27 #include <asm/alignment.h>
28 #include <asm/fpu.h>
29 #include <asm/kprobes.h>
30 #include <asm/setup.h>
31 #include <asm/traps.h>
32 #include <asm/bl_bit.h>
33 
34 #ifdef CONFIG_CPU_SH2
35 # define TRAP_RESERVED_INST	4
36 # define TRAP_ILLEGAL_SLOT_INST	6
37 # define TRAP_ADDRESS_ERROR	9
38 # ifdef CONFIG_CPU_SH2A
39 #  define TRAP_UBC		12
40 #  define TRAP_FPU_ERROR	13
41 #  define TRAP_DIVZERO_ERROR	17
42 #  define TRAP_DIVOVF_ERROR	18
43 # endif
44 #else
45 #define TRAP_RESERVED_INST	12
46 #define TRAP_ILLEGAL_SLOT_INST	13
47 #endif
48 
49 static inline void sign_extend(unsigned int count, unsigned char *dst)
50 {
51 #ifdef __LITTLE_ENDIAN__
52 	if ((count == 1) && dst[0] & 0x80) {
53 		dst[1] = 0xff;
54 		dst[2] = 0xff;
55 		dst[3] = 0xff;
56 	}
57 	if ((count == 2) && dst[1] & 0x80) {
58 		dst[2] = 0xff;
59 		dst[3] = 0xff;
60 	}
61 #else
62 	if ((count == 1) && dst[3] & 0x80) {
63 		dst[2] = 0xff;
64 		dst[1] = 0xff;
65 		dst[0] = 0xff;
66 	}
67 	if ((count == 2) && dst[2] & 0x80) {
68 		dst[1] = 0xff;
69 		dst[0] = 0xff;
70 	}
71 #endif
72 }
73 
74 static struct mem_access user_mem_access = {
75 	copy_from_user,
76 	copy_to_user,
77 };
78 
79 static unsigned long copy_from_kernel_wrapper(void *dst, const void __user *src,
80 					      unsigned long cnt)
81 {
82 	return copy_from_kernel_nofault(dst, (const void __force *)src, cnt);
83 }
84 
85 static unsigned long copy_to_kernel_wrapper(void __user *dst, const void *src,
86 					    unsigned long cnt)
87 {
88 	return copy_to_kernel_nofault((void __force *)dst, src, cnt);
89 }
90 
91 static struct mem_access kernel_mem_access = {
92 	copy_from_kernel_wrapper,
93 	copy_to_kernel_wrapper,
94 };
95 
96 /*
97  * handle an instruction that does an unaligned memory access by emulating the
98  * desired behaviour
99  * - note that PC _may not_ point to the faulting instruction
100  *   (if that instruction is in a branch delay slot)
101  * - return 0 if emulation okay, -EFAULT on existential error
102  */
103 static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
104 				struct mem_access *ma)
105 {
106 	int ret, index, count;
107 	unsigned long *rm, *rn;
108 	unsigned char *src, *dst;
109 	unsigned char __user *srcu, *dstu;
110 
111 	index = (instruction>>8)&15;	/* 0x0F00 */
112 	rn = &regs->regs[index];
113 
114 	index = (instruction>>4)&15;	/* 0x00F0 */
115 	rm = &regs->regs[index];
116 
117 	count = 1<<(instruction&3);
118 
119 	switch (count) {
120 	case 1: inc_unaligned_byte_access(); break;
121 	case 2: inc_unaligned_word_access(); break;
122 	case 4: inc_unaligned_dword_access(); break;
123 	case 8: inc_unaligned_multi_access(); break;
124 	}
125 
126 	ret = -EFAULT;
127 	switch (instruction>>12) {
128 	case 0: /* mov.[bwl] to/from memory via r0+rn */
129 		if (instruction & 8) {
130 			/* from memory */
131 			srcu = (unsigned char __user *)*rm;
132 			srcu += regs->regs[0];
133 			dst = (unsigned char *)rn;
134 			*(unsigned long *)dst = 0;
135 
136 #if !defined(__LITTLE_ENDIAN__)
137 			dst += 4-count;
138 #endif
139 			if (ma->from(dst, srcu, count))
140 				goto fetch_fault;
141 
142 			sign_extend(count, dst);
143 		} else {
144 			/* to memory */
145 			src = (unsigned char *)rm;
146 #if !defined(__LITTLE_ENDIAN__)
147 			src += 4-count;
148 #endif
149 			dstu = (unsigned char __user *)*rn;
150 			dstu += regs->regs[0];
151 
152 			if (ma->to(dstu, src, count))
153 				goto fetch_fault;
154 		}
155 		ret = 0;
156 		break;
157 
158 	case 1: /* mov.l Rm,@(disp,Rn) */
159 		src = (unsigned char*) rm;
160 		dstu = (unsigned char __user *)*rn;
161 		dstu += (instruction&0x000F)<<2;
162 
163 		if (ma->to(dstu, src, 4))
164 			goto fetch_fault;
165 		ret = 0;
166 		break;
167 
168 	case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
169 		if (instruction & 4)
170 			*rn -= count;
171 		src = (unsigned char*) rm;
172 		dstu = (unsigned char __user *)*rn;
173 #if !defined(__LITTLE_ENDIAN__)
174 		src += 4-count;
175 #endif
176 		if (ma->to(dstu, src, count))
177 			goto fetch_fault;
178 		ret = 0;
179 		break;
180 
181 	case 5: /* mov.l @(disp,Rm),Rn */
182 		srcu = (unsigned char __user *)*rm;
183 		srcu += (instruction & 0x000F) << 2;
184 		dst = (unsigned char *)rn;
185 		*(unsigned long *)dst = 0;
186 
187 		if (ma->from(dst, srcu, 4))
188 			goto fetch_fault;
189 		ret = 0;
190 		break;
191 
192 	case 6:	/* mov.[bwl] from memory, possibly with post-increment */
193 		srcu = (unsigned char __user *)*rm;
194 		if (instruction & 4)
195 			*rm += count;
196 		dst = (unsigned char*) rn;
197 		*(unsigned long*)dst = 0;
198 
199 #if !defined(__LITTLE_ENDIAN__)
200 		dst += 4-count;
201 #endif
202 		if (ma->from(dst, srcu, count))
203 			goto fetch_fault;
204 		sign_extend(count, dst);
205 		ret = 0;
206 		break;
207 
208 	case 8:
209 		switch ((instruction&0xFF00)>>8) {
210 		case 0x81: /* mov.w R0,@(disp,Rn) */
211 			src = (unsigned char *) &regs->regs[0];
212 #if !defined(__LITTLE_ENDIAN__)
213 			src += 2;
214 #endif
215 			dstu = (unsigned char __user *)*rm; /* called Rn in the spec */
216 			dstu += (instruction & 0x000F) << 1;
217 
218 			if (ma->to(dstu, src, 2))
219 				goto fetch_fault;
220 			ret = 0;
221 			break;
222 
223 		case 0x85: /* mov.w @(disp,Rm),R0 */
224 			srcu = (unsigned char __user *)*rm;
225 			srcu += (instruction & 0x000F) << 1;
226 			dst = (unsigned char *) &regs->regs[0];
227 			*(unsigned long *)dst = 0;
228 
229 #if !defined(__LITTLE_ENDIAN__)
230 			dst += 2;
231 #endif
232 			if (ma->from(dst, srcu, 2))
233 				goto fetch_fault;
234 			sign_extend(2, dst);
235 			ret = 0;
236 			break;
237 		}
238 		break;
239 
240 	case 9: /* mov.w @(disp,PC),Rn */
241 		srcu = (unsigned char __user *)regs->pc;
242 		srcu += 4;
243 		srcu += (instruction & 0x00FF) << 1;
244 		dst = (unsigned char *)rn;
245 		*(unsigned long *)dst = 0;
246 
247 #if !defined(__LITTLE_ENDIAN__)
248 		dst += 2;
249 #endif
250 
251 		if (ma->from(dst, srcu, 2))
252 			goto fetch_fault;
253 		sign_extend(2, dst);
254 		ret = 0;
255 		break;
256 
257 	case 0xd: /* mov.l @(disp,PC),Rn */
258 		srcu = (unsigned char __user *)(regs->pc & ~0x3);
259 		srcu += 4;
260 		srcu += (instruction & 0x00FF) << 2;
261 		dst = (unsigned char *)rn;
262 		*(unsigned long *)dst = 0;
263 
264 		if (ma->from(dst, srcu, 4))
265 			goto fetch_fault;
266 		ret = 0;
267 		break;
268 	}
269 	return ret;
270 
271  fetch_fault:
272 	/* Argh. Address not only misaligned but also non-existent.
273 	 * Raise an EFAULT and see if it's trapped
274 	 */
275 	die_if_no_fixup("Fault in unaligned fixup", regs, 0);
276 	return -EFAULT;
277 }
278 
279 /*
280  * emulate the instruction in the delay slot
281  * - fetches the instruction from PC+2
282  */
283 static inline int handle_delayslot(struct pt_regs *regs,
284 				   insn_size_t old_instruction,
285 				   struct mem_access *ma)
286 {
287 	insn_size_t instruction;
288 	void __user *addr = (void __user *)(regs->pc +
289 		instruction_size(old_instruction));
290 
291 	if (copy_from_user(&instruction, addr, sizeof(instruction))) {
292 		/* the instruction-fetch faulted */
293 		if (user_mode(regs))
294 			return -EFAULT;
295 
296 		/* kernel */
297 		die("delay-slot-insn faulting in handle_unaligned_delayslot",
298 		    regs, 0);
299 	}
300 
301 	return handle_unaligned_ins(instruction, regs, ma);
302 }
303 
304 /*
305  * handle an instruction that does an unaligned memory access
306  * - have to be careful of branch delay-slot instructions that fault
307  *  SH3:
308  *   - if the branch would be taken PC points to the branch
309  *   - if the branch would not be taken, PC points to delay-slot
310  *  SH4:
311  *   - PC always points to delayed branch
312  * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
313  */
314 
315 /* Macros to determine offset from current PC for branch instructions */
316 /* Explicit type coercion is used to force sign extension where needed */
317 #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
318 #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
319 
320 int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
321 			    struct mem_access *ma, int expected,
322 			    unsigned long address)
323 {
324 	u_int rm;
325 	int ret, index;
326 
327 	/*
328 	 * XXX: We can't handle mixed 16/32-bit instructions yet
329 	 */
330 	if (instruction_size(instruction) != 2)
331 		return -EINVAL;
332 
333 	index = (instruction>>8)&15;	/* 0x0F00 */
334 	rm = regs->regs[index];
335 
336 	/*
337 	 * Log the unexpected fixups, and then pass them on to perf.
338 	 *
339 	 * We intentionally don't report the expected cases to perf as
340 	 * otherwise the trapped I/O case will skew the results too much
341 	 * to be useful.
342 	 */
343 	if (!expected) {
344 		unaligned_fixups_notify(current, instruction, regs);
345 		perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
346 			      regs, address);
347 	}
348 
349 	ret = -EFAULT;
350 	switch (instruction&0xF000) {
351 	case 0x0000:
352 		if (instruction==0x000B) {
353 			/* rts */
354 			ret = handle_delayslot(regs, instruction, ma);
355 			if (ret==0)
356 				regs->pc = regs->pr;
357 		}
358 		else if ((instruction&0x00FF)==0x0023) {
359 			/* braf @Rm */
360 			ret = handle_delayslot(regs, instruction, ma);
361 			if (ret==0)
362 				regs->pc += rm + 4;
363 		}
364 		else if ((instruction&0x00FF)==0x0003) {
365 			/* bsrf @Rm */
366 			ret = handle_delayslot(regs, instruction, ma);
367 			if (ret==0) {
368 				regs->pr = regs->pc + 4;
369 				regs->pc += rm + 4;
370 			}
371 		}
372 		else {
373 			/* mov.[bwl] to/from memory via r0+rn */
374 			goto simple;
375 		}
376 		break;
377 
378 	case 0x1000: /* mov.l Rm,@(disp,Rn) */
379 		goto simple;
380 
381 	case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
382 		goto simple;
383 
384 	case 0x4000:
385 		if ((instruction&0x00FF)==0x002B) {
386 			/* jmp @Rm */
387 			ret = handle_delayslot(regs, instruction, ma);
388 			if (ret==0)
389 				regs->pc = rm;
390 		}
391 		else if ((instruction&0x00FF)==0x000B) {
392 			/* jsr @Rm */
393 			ret = handle_delayslot(regs, instruction, ma);
394 			if (ret==0) {
395 				regs->pr = regs->pc + 4;
396 				regs->pc = rm;
397 			}
398 		}
399 		else {
400 			/* mov.[bwl] to/from memory via r0+rn */
401 			goto simple;
402 		}
403 		break;
404 
405 	case 0x5000: /* mov.l @(disp,Rm),Rn */
406 		goto simple;
407 
408 	case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
409 		goto simple;
410 
411 	case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
412 		switch (instruction&0x0F00) {
413 		case 0x0100: /* mov.w R0,@(disp,Rm) */
414 			goto simple;
415 		case 0x0500: /* mov.w @(disp,Rm),R0 */
416 			goto simple;
417 		case 0x0B00: /* bf   lab - no delayslot*/
418 			ret = 0;
419 			break;
420 		case 0x0F00: /* bf/s lab */
421 			ret = handle_delayslot(regs, instruction, ma);
422 			if (ret==0) {
423 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
424 				if ((regs->sr & 0x00000001) != 0)
425 					regs->pc += 4; /* next after slot */
426 				else
427 #endif
428 					regs->pc += SH_PC_8BIT_OFFSET(instruction);
429 			}
430 			break;
431 		case 0x0900: /* bt   lab - no delayslot */
432 			ret = 0;
433 			break;
434 		case 0x0D00: /* bt/s lab */
435 			ret = handle_delayslot(regs, instruction, ma);
436 			if (ret==0) {
437 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
438 				if ((regs->sr & 0x00000001) == 0)
439 					regs->pc += 4; /* next after slot */
440 				else
441 #endif
442 					regs->pc += SH_PC_8BIT_OFFSET(instruction);
443 			}
444 			break;
445 		}
446 		break;
447 
448 	case 0x9000: /* mov.w @(disp,Rm),Rn */
449 		goto simple;
450 
451 	case 0xA000: /* bra label */
452 		ret = handle_delayslot(regs, instruction, ma);
453 		if (ret==0)
454 			regs->pc += SH_PC_12BIT_OFFSET(instruction);
455 		break;
456 
457 	case 0xB000: /* bsr label */
458 		ret = handle_delayslot(regs, instruction, ma);
459 		if (ret==0) {
460 			regs->pr = regs->pc + 4;
461 			regs->pc += SH_PC_12BIT_OFFSET(instruction);
462 		}
463 		break;
464 
465 	case 0xD000: /* mov.l @(disp,Rm),Rn */
466 		goto simple;
467 	}
468 	return ret;
469 
470 	/* handle non-delay-slot instruction */
471  simple:
472 	ret = handle_unaligned_ins(instruction, regs, ma);
473 	if (ret==0)
474 		regs->pc += instruction_size(instruction);
475 	return ret;
476 }
477 
478 /*
479  * Handle various address error exceptions:
480  *  - instruction address error:
481  *       misaligned PC
482  *       PC >= 0x80000000 in user mode
483  *  - data address error (read and write)
484  *       misaligned data access
485  *       access to >= 0x80000000 is user mode
486  * Unfortuntaly we can't distinguish between instruction address error
487  * and data address errors caused by read accesses.
488  */
489 asmlinkage void do_address_error(struct pt_regs *regs,
490 				 unsigned long writeaccess,
491 				 unsigned long address)
492 {
493 	unsigned long error_code = 0;
494 	insn_size_t instruction;
495 	int tmp;
496 
497 	/* Intentional ifdef */
498 #ifdef CONFIG_CPU_HAS_SR_RB
499 	error_code = lookup_exception_vector();
500 #endif
501 
502 	if (user_mode(regs)) {
503 		int si_code = BUS_ADRERR;
504 		unsigned int user_action;
505 
506 		local_irq_enable();
507 		inc_unaligned_user_access();
508 
509 		if (copy_from_user(&instruction, (insn_size_t __user *)(regs->pc & ~1),
510 				   sizeof(instruction))) {
511 			goto uspace_segv;
512 		}
513 
514 		/* shout about userspace fixups */
515 		unaligned_fixups_notify(current, instruction, regs);
516 
517 		user_action = unaligned_user_action();
518 		if (user_action & UM_FIXUP)
519 			goto fixup;
520 		if (user_action & UM_SIGNAL)
521 			goto uspace_segv;
522 		else {
523 			/* ignore */
524 			regs->pc += instruction_size(instruction);
525 			return;
526 		}
527 
528 fixup:
529 		/* bad PC is not something we can fix */
530 		if (regs->pc & 1) {
531 			si_code = BUS_ADRALN;
532 			goto uspace_segv;
533 		}
534 
535 		tmp = handle_unaligned_access(instruction, regs,
536 					      &user_mem_access, 0,
537 					      address);
538 
539 		if (tmp == 0)
540 			return; /* sorted */
541 uspace_segv:
542 		printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
543 		       "access (PC %lx PR %lx)\n", current->comm, regs->pc,
544 		       regs->pr);
545 
546 		force_sig_fault(SIGBUS, si_code, (void __user *)address);
547 	} else {
548 		inc_unaligned_kernel_access();
549 
550 		if (regs->pc & 1)
551 			die("unaligned program counter", regs, error_code);
552 
553 		if (copy_from_kernel_nofault(&instruction, (void *)(regs->pc),
554 				   sizeof(instruction))) {
555 			/* Argh. Fault on the instruction itself.
556 			   This should never happen non-SMP
557 			*/
558 			die("insn faulting in do_address_error", regs, 0);
559 		}
560 
561 		unaligned_fixups_notify(current, instruction, regs);
562 
563 		handle_unaligned_access(instruction, regs, &kernel_mem_access,
564 					0, address);
565 	}
566 }
567 
568 #ifdef CONFIG_SH_DSP
569 /*
570  *	SH-DSP support gerg@snapgear.com.
571  */
572 static int is_dsp_inst(struct pt_regs *regs)
573 {
574 	unsigned short inst = 0;
575 
576 	/*
577 	 * Safe guard if DSP mode is already enabled or we're lacking
578 	 * the DSP altogether.
579 	 */
580 	if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
581 		return 0;
582 
583 	get_user(inst, ((unsigned short *) regs->pc));
584 
585 	inst &= 0xf000;
586 
587 	/* Check for any type of DSP or support instruction */
588 	if ((inst == 0xf000) || (inst == 0x4000))
589 		return 1;
590 
591 	return 0;
592 }
593 #else
594 static inline int is_dsp_inst(struct pt_regs *regs) { return 0; }
595 #endif /* CONFIG_SH_DSP */
596 
597 #ifdef CONFIG_CPU_SH2A
598 asmlinkage void do_divide_error(unsigned long r4)
599 {
600 	int code;
601 
602 	switch (r4) {
603 	case TRAP_DIVZERO_ERROR:
604 		code = FPE_INTDIV;
605 		break;
606 	case TRAP_DIVOVF_ERROR:
607 		code = FPE_INTOVF;
608 		break;
609 	default:
610 		/* Let gcc know unhandled cases don't make it past here */
611 		return;
612 	}
613 	force_sig_fault(SIGFPE, code, NULL);
614 }
615 #endif
616 
617 asmlinkage void do_reserved_inst(void)
618 {
619 	struct pt_regs *regs = current_pt_regs();
620 	unsigned long error_code;
621 
622 #ifdef CONFIG_SH_FPU_EMU
623 	unsigned short inst = 0;
624 	int err;
625 
626 	get_user(inst, (unsigned short __user *)regs->pc);
627 
628 	err = do_fpu_inst(inst, regs);
629 	if (!err) {
630 		regs->pc += instruction_size(inst);
631 		return;
632 	}
633 	/* not a FPU inst. */
634 #endif
635 
636 #ifdef CONFIG_SH_DSP
637 	/* Check if it's a DSP instruction */
638 	if (is_dsp_inst(regs)) {
639 		/* Enable DSP mode, and restart instruction. */
640 		regs->sr |= SR_DSP;
641 		/* Save DSP mode */
642 		current->thread.dsp_status.status |= SR_DSP;
643 		return;
644 	}
645 #endif
646 
647 	error_code = lookup_exception_vector();
648 
649 	local_irq_enable();
650 	force_sig(SIGILL);
651 	die_if_no_fixup("reserved instruction", regs, error_code);
652 }
653 
654 #ifdef CONFIG_SH_FPU_EMU
655 static int emulate_branch(unsigned short inst, struct pt_regs *regs)
656 {
657 	/*
658 	 * bfs: 8fxx: PC+=d*2+4;
659 	 * bts: 8dxx: PC+=d*2+4;
660 	 * bra: axxx: PC+=D*2+4;
661 	 * bsr: bxxx: PC+=D*2+4  after PR=PC+4;
662 	 * braf:0x23: PC+=Rn*2+4;
663 	 * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
664 	 * jmp: 4x2b: PC=Rn;
665 	 * jsr: 4x0b: PC=Rn      after PR=PC+4;
666 	 * rts: 000b: PC=PR;
667 	 */
668 	if (((inst & 0xf000) == 0xb000)  ||	/* bsr */
669 	    ((inst & 0xf0ff) == 0x0003)  ||	/* bsrf */
670 	    ((inst & 0xf0ff) == 0x400b))	/* jsr */
671 		regs->pr = regs->pc + 4;
672 
673 	if ((inst & 0xfd00) == 0x8d00) {	/* bfs, bts */
674 		regs->pc += SH_PC_8BIT_OFFSET(inst);
675 		return 0;
676 	}
677 
678 	if ((inst & 0xe000) == 0xa000) {	/* bra, bsr */
679 		regs->pc += SH_PC_12BIT_OFFSET(inst);
680 		return 0;
681 	}
682 
683 	if ((inst & 0xf0df) == 0x0003) {	/* braf, bsrf */
684 		regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
685 		return 0;
686 	}
687 
688 	if ((inst & 0xf0df) == 0x400b) {	/* jmp, jsr */
689 		regs->pc = regs->regs[(inst & 0x0f00) >> 8];
690 		return 0;
691 	}
692 
693 	if ((inst & 0xffff) == 0x000b) {	/* rts */
694 		regs->pc = regs->pr;
695 		return 0;
696 	}
697 
698 	return 1;
699 }
700 #endif
701 
702 asmlinkage void do_illegal_slot_inst(void)
703 {
704 	struct pt_regs *regs = current_pt_regs();
705 	unsigned long inst;
706 
707 	if (kprobe_handle_illslot(regs->pc) == 0)
708 		return;
709 
710 #ifdef CONFIG_SH_FPU_EMU
711 	get_user(inst, (unsigned short __user *)regs->pc + 1);
712 	if (!do_fpu_inst(inst, regs)) {
713 		get_user(inst, (unsigned short __user *)regs->pc);
714 		if (!emulate_branch(inst, regs))
715 			return;
716 		/* fault in branch.*/
717 	}
718 	/* not a FPU inst. */
719 #endif
720 
721 	inst = lookup_exception_vector();
722 
723 	local_irq_enable();
724 	force_sig(SIGILL);
725 	die_if_no_fixup("illegal slot instruction", regs, inst);
726 }
727 
728 asmlinkage void do_exception_error(void)
729 {
730 	long ex;
731 
732 	ex = lookup_exception_vector();
733 	die_if_kernel("exception", current_pt_regs(), ex);
734 }
735 
736 void per_cpu_trap_init(void)
737 {
738 	extern void *vbr_base;
739 
740 	/* NOTE: The VBR value should be at P1
741 	   (or P2, virtural "fixed" address space).
742 	   It's definitely should not in physical address.  */
743 
744 	asm volatile("ldc	%0, vbr"
745 		     : /* no output */
746 		     : "r" (&vbr_base)
747 		     : "memory");
748 
749 	/* disable exception blocking now when the vbr has been setup */
750 	clear_bl_bit();
751 }
752 
753 void *set_exception_table_vec(unsigned int vec, void *handler)
754 {
755 	extern void *exception_handling_table[];
756 	void *old_handler;
757 
758 	old_handler = exception_handling_table[vec];
759 	exception_handling_table[vec] = handler;
760 	return old_handler;
761 }
762 
763 void __init trap_init(void)
764 {
765 	set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
766 	set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
767 
768 #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
769     defined(CONFIG_SH_FPU_EMU)
770 	/*
771 	 * For SH-4 lacking an FPU, treat floating point instructions as
772 	 * reserved. They'll be handled in the math-emu case, or faulted on
773 	 * otherwise.
774 	 */
775 	set_exception_table_evt(0x800, do_reserved_inst);
776 	set_exception_table_evt(0x820, do_illegal_slot_inst);
777 #elif defined(CONFIG_SH_FPU)
778 	set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
779 	set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
780 #endif
781 
782 #ifdef CONFIG_CPU_SH2
783 	set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
784 #endif
785 #ifdef CONFIG_CPU_SH2A
786 	set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
787 	set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
788 #ifdef CONFIG_SH_FPU
789 	set_exception_table_vec(TRAP_FPU_ERROR, fpu_error_trap_handler);
790 #endif
791 #endif
792 
793 #ifdef TRAP_UBC
794 	set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
795 #endif
796 }
797