xref: /linux/arch/s390/kernel/traps.c (revision 757dea93e136b219af09d3cd56a81063fdbdef1a)
1 /*
2  *  arch/s390/kernel/traps.c
3  *
4  *  S390 version
5  *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7  *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8  *
9  *  Derived from "arch/i386/kernel/traps.c"
10  *    Copyright (C) 1991, 1992 Linus Torvalds
11  */
12 
13 /*
14  * 'Traps.c' handles hardware traps and faults after we have saved some
15  * state in 'asm.s'.
16  */
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/ptrace.h>
22 #include <linux/timer.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/smp_lock.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/module.h>
30 #include <linux/kdebug.h>
31 #include <linux/kallsyms.h>
32 #include <linux/reboot.h>
33 #include <linux/kprobes.h>
34 #include <linux/bug.h>
35 #include <asm/system.h>
36 #include <asm/uaccess.h>
37 #include <asm/io.h>
38 #include <asm/atomic.h>
39 #include <asm/mathemu.h>
40 #include <asm/cpcmd.h>
41 #include <asm/s390_ext.h>
42 #include <asm/lowcore.h>
43 #include <asm/debug.h>
44 
45 /* Called from entry.S only */
46 extern void handle_per_exception(struct pt_regs *regs);
47 
48 typedef void pgm_check_handler_t(struct pt_regs *, long);
49 pgm_check_handler_t *pgm_check_table[128];
50 
51 #ifdef CONFIG_SYSCTL
52 #ifdef CONFIG_PROCESS_DEBUG
53 int sysctl_userprocess_debug = 1;
54 #else
55 int sysctl_userprocess_debug = 0;
56 #endif
57 #endif
58 
59 extern pgm_check_handler_t do_protection_exception;
60 extern pgm_check_handler_t do_dat_exception;
61 extern pgm_check_handler_t do_monitor_call;
62 
63 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
64 
65 #ifndef CONFIG_64BIT
66 #define FOURLONG "%08lx %08lx %08lx %08lx\n"
67 static int kstack_depth_to_print = 12;
68 #else /* CONFIG_64BIT */
69 #define FOURLONG "%016lx %016lx %016lx %016lx\n"
70 static int kstack_depth_to_print = 20;
71 #endif /* CONFIG_64BIT */
72 
73 /*
74  * For show_trace we have tree different stack to consider:
75  *   - the panic stack which is used if the kernel stack has overflown
76  *   - the asynchronous interrupt stack (cpu related)
77  *   - the synchronous kernel stack (process related)
78  * The stack trace can start at any of the three stack and can potentially
79  * touch all of them. The order is: panic stack, async stack, sync stack.
80  */
81 static unsigned long
82 __show_trace(unsigned long sp, unsigned long low, unsigned long high)
83 {
84 	struct stack_frame *sf;
85 	struct pt_regs *regs;
86 
87 	while (1) {
88 		sp = sp & PSW_ADDR_INSN;
89 		if (sp < low || sp > high - sizeof(*sf))
90 			return sp;
91 		sf = (struct stack_frame *) sp;
92 		printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
93 		print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
94 		/* Follow the backchain. */
95 		while (1) {
96 			low = sp;
97 			sp = sf->back_chain & PSW_ADDR_INSN;
98 			if (!sp)
99 				break;
100 			if (sp <= low || sp > high - sizeof(*sf))
101 				return sp;
102 			sf = (struct stack_frame *) sp;
103 			printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
104 			print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
105 		}
106 		/* Zero backchain detected, check for interrupt frame. */
107 		sp = (unsigned long) (sf + 1);
108 		if (sp <= low || sp > high - sizeof(*regs))
109 			return sp;
110 		regs = (struct pt_regs *) sp;
111 		printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
112 		print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
113 		low = sp;
114 		sp = regs->gprs[15];
115 	}
116 }
117 
118 void show_trace(struct task_struct *task, unsigned long *stack)
119 {
120 	register unsigned long __r15 asm ("15");
121 	unsigned long sp;
122 
123 	sp = (unsigned long) stack;
124 	if (!sp)
125 		sp = task ? task->thread.ksp : __r15;
126 	printk("Call Trace:\n");
127 #ifdef CONFIG_CHECK_STACK
128 	sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
129 			  S390_lowcore.panic_stack);
130 #endif
131 	sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
132 			  S390_lowcore.async_stack);
133 	if (task)
134 		__show_trace(sp, (unsigned long) task_stack_page(task),
135 			     (unsigned long) task_stack_page(task) + THREAD_SIZE);
136 	else
137 		__show_trace(sp, S390_lowcore.thread_info,
138 			     S390_lowcore.thread_info + THREAD_SIZE);
139 	printk("\n");
140 	if (!task)
141 		task = current;
142 	debug_show_held_locks(task);
143 }
144 
145 void show_stack(struct task_struct *task, unsigned long *sp)
146 {
147 	register unsigned long * __r15 asm ("15");
148 	unsigned long *stack;
149 	int i;
150 
151 	if (!sp)
152 		stack = task ? (unsigned long *) task->thread.ksp : __r15;
153 	else
154 		stack = sp;
155 
156 	for (i = 0; i < kstack_depth_to_print; i++) {
157 		if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
158 			break;
159 		if (i && ((i * sizeof (long) % 32) == 0))
160 			printk("\n       ");
161 		printk("%p ", (void *)*stack++);
162 	}
163 	printk("\n");
164 	show_trace(task, sp);
165 }
166 
167 /*
168  * The architecture-independent dump_stack generator
169  */
170 void dump_stack(void)
171 {
172 	show_stack(NULL, NULL);
173 }
174 
175 EXPORT_SYMBOL(dump_stack);
176 
177 static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
178 {
179 	return (regs->psw.mask & bits) / ((~bits + 1) & bits);
180 }
181 
182 void show_registers(struct pt_regs *regs)
183 {
184 	char *mode;
185 
186 	mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
187 	printk("%s PSW : %p %p",
188 	       mode, (void *) regs->psw.mask,
189 	       (void *) regs->psw.addr);
190 	print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
191 	printk("           R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
192 	       "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
193 	       mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
194 	       mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
195 	       mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
196 	       mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
197 	       mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
198 #ifdef CONFIG_64BIT
199 	printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
200 #endif
201 	printk("\n%s GPRS: " FOURLONG, mode,
202 	       regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
203 	printk("           " FOURLONG,
204 	       regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
205 	printk("           " FOURLONG,
206 	       regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
207 	printk("           " FOURLONG,
208 	       regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
209 
210 	show_code(regs);
211 }
212 
213 /* This is called from fs/proc/array.c */
214 char *task_show_regs(struct task_struct *task, char *buffer)
215 {
216 	struct pt_regs *regs;
217 
218 	regs = task_pt_regs(task);
219 	buffer += sprintf(buffer, "task: %p, ksp: %p\n",
220 		       task, (void *)task->thread.ksp);
221 	buffer += sprintf(buffer, "User PSW : %p %p\n",
222 		       (void *) regs->psw.mask, (void *)regs->psw.addr);
223 
224 	buffer += sprintf(buffer, "User GPRS: " FOURLONG,
225 			  regs->gprs[0], regs->gprs[1],
226 			  regs->gprs[2], regs->gprs[3]);
227 	buffer += sprintf(buffer, "           " FOURLONG,
228 			  regs->gprs[4], regs->gprs[5],
229 			  regs->gprs[6], regs->gprs[7]);
230 	buffer += sprintf(buffer, "           " FOURLONG,
231 			  regs->gprs[8], regs->gprs[9],
232 			  regs->gprs[10], regs->gprs[11]);
233 	buffer += sprintf(buffer, "           " FOURLONG,
234 			  regs->gprs[12], regs->gprs[13],
235 			  regs->gprs[14], regs->gprs[15]);
236 	buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n",
237 			  task->thread.acrs[0], task->thread.acrs[1],
238 			  task->thread.acrs[2], task->thread.acrs[3]);
239 	buffer += sprintf(buffer, "           %08x %08x %08x %08x\n",
240 			  task->thread.acrs[4], task->thread.acrs[5],
241 			  task->thread.acrs[6], task->thread.acrs[7]);
242 	buffer += sprintf(buffer, "           %08x %08x %08x %08x\n",
243 			  task->thread.acrs[8], task->thread.acrs[9],
244 			  task->thread.acrs[10], task->thread.acrs[11]);
245 	buffer += sprintf(buffer, "           %08x %08x %08x %08x\n",
246 			  task->thread.acrs[12], task->thread.acrs[13],
247 			  task->thread.acrs[14], task->thread.acrs[15]);
248 	return buffer;
249 }
250 
251 static DEFINE_SPINLOCK(die_lock);
252 
253 void die(const char * str, struct pt_regs * regs, long err)
254 {
255 	static int die_counter;
256 
257 	debug_stop_all();
258 	console_verbose();
259 	spin_lock_irq(&die_lock);
260 	bust_spinlocks(1);
261 	printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
262         show_regs(regs);
263 	bust_spinlocks(0);
264         spin_unlock_irq(&die_lock);
265 	if (in_interrupt())
266 		panic("Fatal exception in interrupt");
267 	if (panic_on_oops)
268 		panic("Fatal exception: panic_on_oops");
269         do_exit(SIGSEGV);
270 }
271 
272 static void inline
273 report_user_fault(long interruption_code, struct pt_regs *regs)
274 {
275 #if defined(CONFIG_SYSCTL)
276 	if (!sysctl_userprocess_debug)
277 		return;
278 #endif
279 #if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
280 	printk("User process fault: interruption code 0x%lX\n",
281 	       interruption_code);
282 	show_regs(regs);
283 #endif
284 }
285 
286 int is_valid_bugaddr(unsigned long addr)
287 {
288 	return 1;
289 }
290 
291 static void __kprobes inline do_trap(long interruption_code, int signr,
292 					char *str, struct pt_regs *regs,
293 					siginfo_t *info)
294 {
295 	/*
296 	 * We got all needed information from the lowcore and can
297 	 * now safely switch on interrupts.
298 	 */
299         if (regs->psw.mask & PSW_MASK_PSTATE)
300 		local_irq_enable();
301 
302 	if (notify_die(DIE_TRAP, str, regs, interruption_code,
303 				interruption_code, signr) == NOTIFY_STOP)
304 		return;
305 
306         if (regs->psw.mask & PSW_MASK_PSTATE) {
307                 struct task_struct *tsk = current;
308 
309                 tsk->thread.trap_no = interruption_code & 0xffff;
310 		force_sig_info(signr, info, tsk);
311 		report_user_fault(interruption_code, regs);
312         } else {
313                 const struct exception_table_entry *fixup;
314                 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
315                 if (fixup)
316                         regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
317 		else {
318 			enum bug_trap_type btt;
319 
320 			btt = report_bug(regs->psw.addr & PSW_ADDR_INSN);
321 			if (btt == BUG_TRAP_TYPE_WARN)
322 				return;
323 			die(str, regs, interruption_code);
324 		}
325         }
326 }
327 
328 static inline void __user *get_check_address(struct pt_regs *regs)
329 {
330 	return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
331 }
332 
333 void __kprobes do_single_step(struct pt_regs *regs)
334 {
335 	if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
336 					SIGTRAP) == NOTIFY_STOP){
337 		return;
338 	}
339 	if ((current->ptrace & PT_PTRACED) != 0)
340 		force_sig(SIGTRAP, current);
341 }
342 
343 static void default_trap_handler(struct pt_regs * regs, long interruption_code)
344 {
345         if (regs->psw.mask & PSW_MASK_PSTATE) {
346 		local_irq_enable();
347 		do_exit(SIGSEGV);
348 		report_user_fault(interruption_code, regs);
349 	} else
350 		die("Unknown program exception", regs, interruption_code);
351 }
352 
353 #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
354 static void name(struct pt_regs * regs, long interruption_code) \
355 { \
356         siginfo_t info; \
357         info.si_signo = signr; \
358         info.si_errno = 0; \
359         info.si_code = sicode; \
360 	info.si_addr = siaddr; \
361         do_trap(interruption_code, signr, str, regs, &info); \
362 }
363 
364 DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception,
365 	      ILL_ILLADR, get_check_address(regs))
366 DO_ERROR_INFO(SIGILL,  "execute exception", execute_exception,
367 	      ILL_ILLOPN, get_check_address(regs))
368 DO_ERROR_INFO(SIGFPE,  "fixpoint divide exception", divide_exception,
369 	      FPE_INTDIV, get_check_address(regs))
370 DO_ERROR_INFO(SIGFPE,  "fixpoint overflow exception", overflow_exception,
371 	      FPE_INTOVF, get_check_address(regs))
372 DO_ERROR_INFO(SIGFPE,  "HFP overflow exception", hfp_overflow_exception,
373 	      FPE_FLTOVF, get_check_address(regs))
374 DO_ERROR_INFO(SIGFPE,  "HFP underflow exception", hfp_underflow_exception,
375 	      FPE_FLTUND, get_check_address(regs))
376 DO_ERROR_INFO(SIGFPE,  "HFP significance exception", hfp_significance_exception,
377 	      FPE_FLTRES, get_check_address(regs))
378 DO_ERROR_INFO(SIGFPE,  "HFP divide exception", hfp_divide_exception,
379 	      FPE_FLTDIV, get_check_address(regs))
380 DO_ERROR_INFO(SIGFPE,  "HFP square root exception", hfp_sqrt_exception,
381 	      FPE_FLTINV, get_check_address(regs))
382 DO_ERROR_INFO(SIGILL,  "operand exception", operand_exception,
383 	      ILL_ILLOPN, get_check_address(regs))
384 DO_ERROR_INFO(SIGILL,  "privileged operation", privileged_op,
385 	      ILL_PRVOPC, get_check_address(regs))
386 DO_ERROR_INFO(SIGILL,  "special operation exception", special_op_exception,
387 	      ILL_ILLOPN, get_check_address(regs))
388 DO_ERROR_INFO(SIGILL,  "translation exception", translation_exception,
389 	      ILL_ILLOPN, get_check_address(regs))
390 
391 static inline void
392 do_fp_trap(struct pt_regs *regs, void __user *location,
393            int fpc, long interruption_code)
394 {
395 	siginfo_t si;
396 
397 	si.si_signo = SIGFPE;
398 	si.si_errno = 0;
399 	si.si_addr = location;
400 	si.si_code = 0;
401 	/* FPC[2] is Data Exception Code */
402 	if ((fpc & 0x00000300) == 0) {
403 		/* bits 6 and 7 of DXC are 0 iff IEEE exception */
404 		if (fpc & 0x8000) /* invalid fp operation */
405 			si.si_code = FPE_FLTINV;
406 		else if (fpc & 0x4000) /* div by 0 */
407 			si.si_code = FPE_FLTDIV;
408 		else if (fpc & 0x2000) /* overflow */
409 			si.si_code = FPE_FLTOVF;
410 		else if (fpc & 0x1000) /* underflow */
411 			si.si_code = FPE_FLTUND;
412 		else if (fpc & 0x0800) /* inexact */
413 			si.si_code = FPE_FLTRES;
414 	}
415 	current->thread.ieee_instruction_pointer = (addr_t) location;
416 	do_trap(interruption_code, SIGFPE,
417 		"floating point exception", regs, &si);
418 }
419 
420 static void illegal_op(struct pt_regs * regs, long interruption_code)
421 {
422 	siginfo_t info;
423         __u8 opcode[6];
424 	__u16 __user *location;
425 	int signal = 0;
426 
427 	location = get_check_address(regs);
428 
429 	/*
430 	 * We got all needed information from the lowcore and can
431 	 * now safely switch on interrupts.
432 	 */
433 	if (regs->psw.mask & PSW_MASK_PSTATE)
434 		local_irq_enable();
435 
436 	if (regs->psw.mask & PSW_MASK_PSTATE) {
437 		if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
438 			return;
439 		if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
440 			if (current->ptrace & PT_PTRACED)
441 				force_sig(SIGTRAP, current);
442 			else
443 				signal = SIGILL;
444 #ifdef CONFIG_MATHEMU
445 		} else if (opcode[0] == 0xb3) {
446 			if (get_user(*((__u16 *) (opcode+2)), location+1))
447 				return;
448 			signal = math_emu_b3(opcode, regs);
449                 } else if (opcode[0] == 0xed) {
450 			if (get_user(*((__u32 *) (opcode+2)),
451 				     (__u32 __user *)(location+1)))
452 				return;
453 			signal = math_emu_ed(opcode, regs);
454 		} else if (*((__u16 *) opcode) == 0xb299) {
455 			if (get_user(*((__u16 *) (opcode+2)), location+1))
456 				return;
457 			signal = math_emu_srnm(opcode, regs);
458 		} else if (*((__u16 *) opcode) == 0xb29c) {
459 			if (get_user(*((__u16 *) (opcode+2)), location+1))
460 				return;
461 			signal = math_emu_stfpc(opcode, regs);
462 		} else if (*((__u16 *) opcode) == 0xb29d) {
463 			if (get_user(*((__u16 *) (opcode+2)), location+1))
464 				return;
465 			signal = math_emu_lfpc(opcode, regs);
466 #endif
467 		} else
468 			signal = SIGILL;
469 	} else {
470 		/*
471 		 * If we get an illegal op in kernel mode, send it through the
472 		 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
473 		 */
474 		if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
475 			       3, SIGTRAP) != NOTIFY_STOP)
476 			signal = SIGILL;
477 	}
478 
479 #ifdef CONFIG_MATHEMU
480         if (signal == SIGFPE)
481 		do_fp_trap(regs, location,
482                            current->thread.fp_regs.fpc, interruption_code);
483         else if (signal == SIGSEGV) {
484 		info.si_signo = signal;
485 		info.si_errno = 0;
486 		info.si_code = SEGV_MAPERR;
487 		info.si_addr = (void __user *) location;
488 		do_trap(interruption_code, signal,
489 			"user address fault", regs, &info);
490 	} else
491 #endif
492         if (signal) {
493 		info.si_signo = signal;
494 		info.si_errno = 0;
495 		info.si_code = ILL_ILLOPC;
496 		info.si_addr = (void __user *) location;
497 		do_trap(interruption_code, signal,
498 			"illegal operation", regs, &info);
499 	}
500 }
501 
502 
503 #ifdef CONFIG_MATHEMU
504 asmlinkage void
505 specification_exception(struct pt_regs * regs, long interruption_code)
506 {
507         __u8 opcode[6];
508 	__u16 __user *location = NULL;
509 	int signal = 0;
510 
511 	location = (__u16 __user *) get_check_address(regs);
512 
513 	/*
514 	 * We got all needed information from the lowcore and can
515 	 * now safely switch on interrupts.
516 	 */
517         if (regs->psw.mask & PSW_MASK_PSTATE)
518 		local_irq_enable();
519 
520         if (regs->psw.mask & PSW_MASK_PSTATE) {
521 		get_user(*((__u16 *) opcode), location);
522 		switch (opcode[0]) {
523 		case 0x28: /* LDR Rx,Ry   */
524 			signal = math_emu_ldr(opcode);
525 			break;
526 		case 0x38: /* LER Rx,Ry   */
527 			signal = math_emu_ler(opcode);
528 			break;
529 		case 0x60: /* STD R,D(X,B) */
530 			get_user(*((__u16 *) (opcode+2)), location+1);
531 			signal = math_emu_std(opcode, regs);
532 			break;
533 		case 0x68: /* LD R,D(X,B) */
534 			get_user(*((__u16 *) (opcode+2)), location+1);
535 			signal = math_emu_ld(opcode, regs);
536 			break;
537 		case 0x70: /* STE R,D(X,B) */
538 			get_user(*((__u16 *) (opcode+2)), location+1);
539 			signal = math_emu_ste(opcode, regs);
540 			break;
541 		case 0x78: /* LE R,D(X,B) */
542 			get_user(*((__u16 *) (opcode+2)), location+1);
543 			signal = math_emu_le(opcode, regs);
544 			break;
545 		default:
546 			signal = SIGILL;
547 			break;
548                 }
549         } else
550 		signal = SIGILL;
551 
552         if (signal == SIGFPE)
553 		do_fp_trap(regs, location,
554                            current->thread.fp_regs.fpc, interruption_code);
555         else if (signal) {
556 		siginfo_t info;
557 		info.si_signo = signal;
558 		info.si_errno = 0;
559 		info.si_code = ILL_ILLOPN;
560 		info.si_addr = location;
561 		do_trap(interruption_code, signal,
562 			"specification exception", regs, &info);
563 	}
564 }
565 #else
566 DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
567 	      ILL_ILLOPN, get_check_address(regs));
568 #endif
569 
570 static void data_exception(struct pt_regs * regs, long interruption_code)
571 {
572 	__u16 __user *location;
573 	int signal = 0;
574 
575 	location = get_check_address(regs);
576 
577 	/*
578 	 * We got all needed information from the lowcore and can
579 	 * now safely switch on interrupts.
580 	 */
581 	if (regs->psw.mask & PSW_MASK_PSTATE)
582 		local_irq_enable();
583 
584 	if (MACHINE_HAS_IEEE)
585 		asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
586 
587 #ifdef CONFIG_MATHEMU
588         else if (regs->psw.mask & PSW_MASK_PSTATE) {
589         	__u8 opcode[6];
590 		get_user(*((__u16 *) opcode), location);
591 		switch (opcode[0]) {
592 		case 0x28: /* LDR Rx,Ry   */
593 			signal = math_emu_ldr(opcode);
594 			break;
595 		case 0x38: /* LER Rx,Ry   */
596 			signal = math_emu_ler(opcode);
597 			break;
598 		case 0x60: /* STD R,D(X,B) */
599 			get_user(*((__u16 *) (opcode+2)), location+1);
600 			signal = math_emu_std(opcode, regs);
601 			break;
602 		case 0x68: /* LD R,D(X,B) */
603 			get_user(*((__u16 *) (opcode+2)), location+1);
604 			signal = math_emu_ld(opcode, regs);
605 			break;
606 		case 0x70: /* STE R,D(X,B) */
607 			get_user(*((__u16 *) (opcode+2)), location+1);
608 			signal = math_emu_ste(opcode, regs);
609 			break;
610 		case 0x78: /* LE R,D(X,B) */
611 			get_user(*((__u16 *) (opcode+2)), location+1);
612 			signal = math_emu_le(opcode, regs);
613 			break;
614 		case 0xb3:
615 			get_user(*((__u16 *) (opcode+2)), location+1);
616 			signal = math_emu_b3(opcode, regs);
617 			break;
618                 case 0xed:
619 			get_user(*((__u32 *) (opcode+2)),
620 				 (__u32 __user *)(location+1));
621 			signal = math_emu_ed(opcode, regs);
622 			break;
623 	        case 0xb2:
624 			if (opcode[1] == 0x99) {
625 				get_user(*((__u16 *) (opcode+2)), location+1);
626 				signal = math_emu_srnm(opcode, regs);
627 			} else if (opcode[1] == 0x9c) {
628 				get_user(*((__u16 *) (opcode+2)), location+1);
629 				signal = math_emu_stfpc(opcode, regs);
630 			} else if (opcode[1] == 0x9d) {
631 				get_user(*((__u16 *) (opcode+2)), location+1);
632 				signal = math_emu_lfpc(opcode, regs);
633 			} else
634 				signal = SIGILL;
635 			break;
636 		default:
637 			signal = SIGILL;
638 			break;
639                 }
640         }
641 #endif
642 	if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
643 		signal = SIGFPE;
644 	else
645 		signal = SIGILL;
646         if (signal == SIGFPE)
647 		do_fp_trap(regs, location,
648                            current->thread.fp_regs.fpc, interruption_code);
649         else if (signal) {
650 		siginfo_t info;
651 		info.si_signo = signal;
652 		info.si_errno = 0;
653 		info.si_code = ILL_ILLOPN;
654 		info.si_addr = location;
655 		do_trap(interruption_code, signal,
656 			"data exception", regs, &info);
657 	}
658 }
659 
660 static void space_switch_exception(struct pt_regs * regs, long int_code)
661 {
662         siginfo_t info;
663 
664 	/* Set user psw back to home space mode. */
665 	if (regs->psw.mask & PSW_MASK_PSTATE)
666 		regs->psw.mask |= PSW_ASC_HOME;
667 	/* Send SIGILL. */
668         info.si_signo = SIGILL;
669         info.si_errno = 0;
670         info.si_code = ILL_PRVOPC;
671         info.si_addr = get_check_address(regs);
672         do_trap(int_code, SIGILL, "space switch event", regs, &info);
673 }
674 
675 asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
676 {
677 	bust_spinlocks(1);
678 	printk("Kernel stack overflow.\n");
679 	show_regs(regs);
680 	bust_spinlocks(0);
681 	panic("Corrupt kernel stack, can't continue.");
682 }
683 
684 /* init is done in lowcore.S and head.S */
685 
686 void __init trap_init(void)
687 {
688         int i;
689 
690         for (i = 0; i < 128; i++)
691           pgm_check_table[i] = &default_trap_handler;
692         pgm_check_table[1] = &illegal_op;
693         pgm_check_table[2] = &privileged_op;
694         pgm_check_table[3] = &execute_exception;
695         pgm_check_table[4] = &do_protection_exception;
696         pgm_check_table[5] = &addressing_exception;
697         pgm_check_table[6] = &specification_exception;
698         pgm_check_table[7] = &data_exception;
699         pgm_check_table[8] = &overflow_exception;
700         pgm_check_table[9] = &divide_exception;
701         pgm_check_table[0x0A] = &overflow_exception;
702         pgm_check_table[0x0B] = &divide_exception;
703         pgm_check_table[0x0C] = &hfp_overflow_exception;
704         pgm_check_table[0x0D] = &hfp_underflow_exception;
705         pgm_check_table[0x0E] = &hfp_significance_exception;
706         pgm_check_table[0x0F] = &hfp_divide_exception;
707         pgm_check_table[0x10] = &do_dat_exception;
708         pgm_check_table[0x11] = &do_dat_exception;
709         pgm_check_table[0x12] = &translation_exception;
710         pgm_check_table[0x13] = &special_op_exception;
711 #ifdef CONFIG_64BIT
712         pgm_check_table[0x38] = &do_dat_exception;
713 	pgm_check_table[0x39] = &do_dat_exception;
714 	pgm_check_table[0x3A] = &do_dat_exception;
715         pgm_check_table[0x3B] = &do_dat_exception;
716 #endif /* CONFIG_64BIT */
717         pgm_check_table[0x15] = &operand_exception;
718         pgm_check_table[0x1C] = &space_switch_exception;
719         pgm_check_table[0x1D] = &hfp_sqrt_exception;
720 	pgm_check_table[0x40] = &do_monitor_call;
721 	pfault_irq_init();
722 }
723