xref: /linux/arch/parisc/kernel/traps.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  *  linux/arch/parisc/traps.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
6  */
7 
8 /*
9  * 'Traps.c' handles hardware traps and faults after we have saved some
10  * state in 'asm.s'.
11  */
12 
13 #include <linux/config.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/ptrace.h>
19 #include <linux/timer.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/spinlock.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/console.h>
28 #include <linux/kallsyms.h>
29 
30 #include <asm/assembly.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/io.h>
34 #include <asm/irq.h>
35 #include <asm/traps.h>
36 #include <asm/unaligned.h>
37 #include <asm/atomic.h>
38 #include <asm/smp.h>
39 #include <asm/pdc.h>
40 #include <asm/pdc_chassis.h>
41 #include <asm/unwind.h>
42 
43 #include "../math-emu/math-emu.h"	/* for handle_fpe() */
44 
45 #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
46 			  /*  dumped to the console via printk)          */
47 
48 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
49 DEFINE_SPINLOCK(pa_dbit_lock);
50 #endif
51 
52 int printbinary(char *buf, unsigned long x, int nbits)
53 {
54 	unsigned long mask = 1UL << (nbits - 1);
55 	while (mask != 0) {
56 		*buf++ = (mask & x ? '1' : '0');
57 		mask >>= 1;
58 	}
59 	*buf = '\0';
60 
61 	return nbits;
62 }
63 
64 #ifdef __LP64__
65 #define RFMT "%016lx"
66 #else
67 #define RFMT "%08lx"
68 #endif
69 
70 void show_regs(struct pt_regs *regs)
71 {
72 	int i;
73 	char buf[128], *p;
74 	char *level;
75 	unsigned long cr30;
76 	unsigned long cr31;
77 	/* carlos says that gcc understands better memory in a struct,
78 	 * and it makes our life easier with fpregs -- T-Bone */
79 	struct { u32 sw[2]; } s;
80 
81 	level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT;
82 
83 	printk("%s\n", level); /* don't want to have that pretty register dump messed up */
84 
85 	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
86 	printbinary(buf, regs->gr[0], 32);
87 	printk("%sPSW: %s %s\n", level, buf, print_tainted());
88 
89 	for (i = 0; i < 32; i += 4) {
90 		int j;
91 		p = buf;
92 		p += sprintf(p, "%sr%02d-%02d ", level, i, i + 3);
93 		for (j = 0; j < 4; j++) {
94 			p += sprintf(p, " " RFMT, (i+j) == 0 ? 0 : regs->gr[i + j]);
95 		}
96 		printk("%s\n", buf);
97 	}
98 
99 	for (i = 0; i < 8; i += 4) {
100 		int j;
101 		p = buf;
102 		p += sprintf(p, "%ssr%d-%d  ", level, i, i + 3);
103 		for (j = 0; j < 4; j++) {
104 			p += sprintf(p, " " RFMT, regs->sr[i + j]);
105 		}
106 		printk("%s\n", buf);
107 	}
108 
109 	/* FR are 64bit everywhere. Need to use asm to get the content
110 	 * of fpsr/fper1, and we assume that we won't have a FP Identify
111 	 * in our way, otherwise we're screwed.
112 	 * The fldd is used to restore the T-bit if there was one, as the
113 	 * store clears it anyway.
114 	 * BTW, PA2.0 book says "thou shall not use fstw on FPSR/FPERs". */
115 	__asm__ (
116 		"fstd %%fr0,0(%1)	\n\t"
117 		"fldd 0(%1),%%fr0	\n\t"
118 		: "=m" (s) : "r" (&s) : "%r0"
119 		);
120 
121 	printk("%s\n", level);
122 	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
123 	printbinary(buf, s.sw[0], 32);
124 	printk("%sFPSR: %s\n", level, buf);
125 	printk("%sFPER1: %08x\n", level, s.sw[1]);
126 
127 	/* here we'll print fr0 again, tho it'll be meaningless */
128 	for (i = 0; i < 32; i += 4) {
129 		int j;
130 		p = buf;
131 		p += sprintf(p, "%sfr%02d-%02d ", level, i, i + 3);
132 		for (j = 0; j < 4; j++)
133 			p += sprintf(p, " %016llx", (i+j) == 0 ? 0 : regs->fr[i+j]);
134 		printk("%s\n", buf);
135 	}
136 
137 	cr30 = mfctl(30);
138 	cr31 = mfctl(31);
139 	printk("%s\n", level);
140 	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
141 	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
142 	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
143 	       level, regs->iir, regs->isr, regs->ior);
144 	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
145 	       level, current_thread_info()->cpu, cr30, cr31);
146 	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
147 	printk(level);
148 	print_symbol(" IAOQ[0]: %s\n", regs->iaoq[0]);
149 	printk(level);
150 	print_symbol(" IAOQ[1]: %s\n", regs->iaoq[1]);
151 	printk(level);
152 	print_symbol(" RP(r2): %s\n", regs->gr[2]);
153 }
154 
155 
156 void dump_stack(void)
157 {
158 	show_stack(NULL, NULL);
159 }
160 
161 EXPORT_SYMBOL(dump_stack);
162 
163 static void do_show_stack(struct unwind_frame_info *info)
164 {
165 	int i = 1;
166 
167 	printk("Backtrace:\n");
168 	while (i <= 16) {
169 		if (unwind_once(info) < 0 || info->ip == 0)
170 			break;
171 
172 		if (__kernel_text_address(info->ip)) {
173 			printk(" [<" RFMT ">] ", info->ip);
174 #ifdef CONFIG_KALLSYMS
175 			print_symbol("%s\n", info->ip);
176 #else
177 			if ((i & 0x03) == 0)
178 				printk("\n");
179 #endif
180 			i++;
181 		}
182 	}
183 	printk("\n");
184 }
185 
186 void show_stack(struct task_struct *task, unsigned long *s)
187 {
188 	struct unwind_frame_info info;
189 
190 	if (!task) {
191 		unsigned long sp;
192 		struct pt_regs *r;
193 
194 HERE:
195 		asm volatile ("copy %%r30, %0" : "=r"(sp));
196 		r = kzalloc(sizeof(struct pt_regs), GFP_KERNEL);
197 		if (!r)
198 			return;
199 		r->iaoq[0] = (unsigned long)&&HERE;
200 		r->gr[2] = (unsigned long)__builtin_return_address(0);
201 		r->gr[30] = sp;
202 		unwind_frame_init(&info, current, r);
203 		kfree(r);
204 	} else {
205 		unwind_frame_init_from_blocked_task(&info, task);
206 	}
207 
208 	do_show_stack(&info);
209 }
210 
211 void die_if_kernel(char *str, struct pt_regs *regs, long err)
212 {
213 	if (user_mode(regs)) {
214 		if (err == 0)
215 			return; /* STFU */
216 
217 		printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
218 			current->comm, current->pid, str, err, regs->iaoq[0]);
219 #ifdef PRINT_USER_FAULTS
220 		/* XXX for debugging only */
221 		show_regs(regs);
222 #endif
223 		return;
224 	}
225 
226 	oops_in_progress = 1;
227 
228 	/* Amuse the user in a SPARC fashion */
229 	printk(
230 "      _______________________________ \n"
231 "     < Your System ate a SPARC! Gah! >\n"
232 "      ------------------------------- \n"
233 "             \\   ^__^\n"
234 "              \\  (xx)\\_______\n"
235 "                 (__)\\       )\\/\\\n"
236 "                  U  ||----w |\n"
237 "                     ||     ||\n");
238 
239 	/* unlock the pdc lock if necessary */
240 	pdc_emergency_unlock();
241 
242 	/* maybe the kernel hasn't booted very far yet and hasn't been able
243 	 * to initialize the serial or STI console. In that case we should
244 	 * re-enable the pdc console, so that the user will be able to
245 	 * identify the problem. */
246 	if (!console_drivers)
247 		pdc_console_restart();
248 
249 	printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
250 		current->comm, current->pid, str, err);
251 	show_regs(regs);
252 
253 	/* Wot's wrong wif bein' racy? */
254 	if (current->thread.flags & PARISC_KERNEL_DEATH) {
255 		printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
256 		local_irq_enable();
257 		while (1);
258 	}
259 
260 	current->thread.flags |= PARISC_KERNEL_DEATH;
261 	do_exit(SIGSEGV);
262 }
263 
264 int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
265 {
266 	return syscall(regs);
267 }
268 
269 /* gdb uses break 4,8 */
270 #define GDB_BREAK_INSN 0x10004
271 void handle_gdb_break(struct pt_regs *regs, int wot)
272 {
273 	struct siginfo si;
274 
275 	si.si_code = wot;
276 	si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
277 	si.si_signo = SIGTRAP;
278 	si.si_errno = 0;
279 	force_sig_info(SIGTRAP, &si, current);
280 }
281 
282 void handle_break(unsigned iir, struct pt_regs *regs)
283 {
284 	struct siginfo si;
285 
286 	switch(iir) {
287 	case 0x00:
288 #ifdef PRINT_USER_FAULTS
289 		printk(KERN_DEBUG "break 0,0: pid=%d command='%s'\n",
290 		       current->pid, current->comm);
291 #endif
292 		die_if_kernel("Breakpoint", regs, 0);
293 #ifdef PRINT_USER_FAULTS
294 		show_regs(regs);
295 #endif
296 		si.si_code = TRAP_BRKPT;
297 		si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
298 		si.si_signo = SIGTRAP;
299 		force_sig_info(SIGTRAP, &si, current);
300 		break;
301 
302 	case GDB_BREAK_INSN:
303 		die_if_kernel("Breakpoint", regs, 0);
304 		handle_gdb_break(regs, TRAP_BRKPT);
305 		break;
306 
307 	default:
308 #ifdef PRINT_USER_FAULTS
309 		printk(KERN_DEBUG "break %#08x: pid=%d command='%s'\n",
310 		       iir, current->pid, current->comm);
311 		show_regs(regs);
312 #endif
313 		si.si_signo = SIGTRAP;
314 		si.si_code = TRAP_BRKPT;
315 		si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
316 		force_sig_info(SIGTRAP, &si, current);
317 		return;
318 	}
319 }
320 
321 
322 int handle_toc(void)
323 {
324 	printk(KERN_CRIT "TOC call.\n");
325 	return 0;
326 }
327 
328 static void default_trap(int code, struct pt_regs *regs)
329 {
330 	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
331 	show_regs(regs);
332 }
333 
334 void (*cpu_lpmc) (int code, struct pt_regs *regs) = default_trap;
335 
336 
337 void transfer_pim_to_trap_frame(struct pt_regs *regs)
338 {
339     register int i;
340     extern unsigned int hpmc_pim_data[];
341     struct pdc_hpmc_pim_11 *pim_narrow;
342     struct pdc_hpmc_pim_20 *pim_wide;
343 
344     if (boot_cpu_data.cpu_type >= pcxu) {
345 
346 	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
347 
348 	/*
349 	 * Note: The following code will probably generate a
350 	 * bunch of truncation error warnings from the compiler.
351 	 * Could be handled with an ifdef, but perhaps there
352 	 * is a better way.
353 	 */
354 
355 	regs->gr[0] = pim_wide->cr[22];
356 
357 	for (i = 1; i < 32; i++)
358 	    regs->gr[i] = pim_wide->gr[i];
359 
360 	for (i = 0; i < 32; i++)
361 	    regs->fr[i] = pim_wide->fr[i];
362 
363 	for (i = 0; i < 8; i++)
364 	    regs->sr[i] = pim_wide->sr[i];
365 
366 	regs->iasq[0] = pim_wide->cr[17];
367 	regs->iasq[1] = pim_wide->iasq_back;
368 	regs->iaoq[0] = pim_wide->cr[18];
369 	regs->iaoq[1] = pim_wide->iaoq_back;
370 
371 	regs->sar  = pim_wide->cr[11];
372 	regs->iir  = pim_wide->cr[19];
373 	regs->isr  = pim_wide->cr[20];
374 	regs->ior  = pim_wide->cr[21];
375     }
376     else {
377 	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
378 
379 	regs->gr[0] = pim_narrow->cr[22];
380 
381 	for (i = 1; i < 32; i++)
382 	    regs->gr[i] = pim_narrow->gr[i];
383 
384 	for (i = 0; i < 32; i++)
385 	    regs->fr[i] = pim_narrow->fr[i];
386 
387 	for (i = 0; i < 8; i++)
388 	    regs->sr[i] = pim_narrow->sr[i];
389 
390 	regs->iasq[0] = pim_narrow->cr[17];
391 	regs->iasq[1] = pim_narrow->iasq_back;
392 	regs->iaoq[0] = pim_narrow->cr[18];
393 	regs->iaoq[1] = pim_narrow->iaoq_back;
394 
395 	regs->sar  = pim_narrow->cr[11];
396 	regs->iir  = pim_narrow->cr[19];
397 	regs->isr  = pim_narrow->cr[20];
398 	regs->ior  = pim_narrow->cr[21];
399     }
400 
401     /*
402      * The following fields only have meaning if we came through
403      * another path. So just zero them here.
404      */
405 
406     regs->ksp = 0;
407     regs->kpc = 0;
408     regs->orig_r28 = 0;
409 }
410 
411 
412 /*
413  * This routine is called as a last resort when everything else
414  * has gone clearly wrong. We get called for faults in kernel space,
415  * and HPMC's.
416  */
417 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
418 {
419 	static DEFINE_SPINLOCK(terminate_lock);
420 
421 	oops_in_progress = 1;
422 
423 	set_eiem(0);
424 	local_irq_disable();
425 	spin_lock(&terminate_lock);
426 
427 	/* unlock the pdc lock if necessary */
428 	pdc_emergency_unlock();
429 
430 	/* restart pdc console if necessary */
431 	if (!console_drivers)
432 		pdc_console_restart();
433 
434 	/* Not all paths will gutter the processor... */
435 	switch(code){
436 
437 	case 1:
438 		transfer_pim_to_trap_frame(regs);
439 		break;
440 
441 	default:
442 		/* Fall through */
443 		break;
444 
445 	}
446 
447 	{
448 		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
449 		struct unwind_frame_info info;
450 		unwind_frame_init(&info, current, regs);
451 		do_show_stack(&info);
452 	}
453 
454 	printk("\n");
455 	printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
456 			msg, code, regs, offset);
457 	show_regs(regs);
458 
459 	spin_unlock(&terminate_lock);
460 
461 	/* put soft power button back under hardware control;
462 	 * if the user had pressed it once at any time, the
463 	 * system will shut down immediately right here. */
464 	pdc_soft_power_button(0);
465 
466 	/* Call kernel panic() so reboot timeouts work properly
467 	 * FIXME: This function should be on the list of
468 	 * panic notifiers, and we should call panic
469 	 * directly from the location that we wish.
470 	 * e.g. We should not call panic from
471 	 * parisc_terminate, but rather the oter way around.
472 	 * This hack works, prints the panic message twice,
473 	 * and it enables reboot timers!
474 	 */
475 	panic(msg);
476 }
477 
478 void handle_interruption(int code, struct pt_regs *regs)
479 {
480 	unsigned long fault_address = 0;
481 	unsigned long fault_space = 0;
482 	struct siginfo si;
483 
484 	if (code == 1)
485 	    pdc_console_restart();  /* switch back to pdc if HPMC */
486 	else
487 	    local_irq_enable();
488 
489 	/* Security check:
490 	 * If the priority level is still user, and the
491 	 * faulting space is not equal to the active space
492 	 * then the user is attempting something in a space
493 	 * that does not belong to them. Kill the process.
494 	 *
495 	 * This is normally the situation when the user
496 	 * attempts to jump into the kernel space at the
497 	 * wrong offset, be it at the gateway page or a
498 	 * random location.
499 	 *
500 	 * We cannot normally signal the process because it
501 	 * could *be* on the gateway page, and processes
502 	 * executing on the gateway page can't have signals
503 	 * delivered.
504 	 *
505 	 * We merely readjust the address into the users
506 	 * space, at a destination address of zero, and
507 	 * allow processing to continue.
508 	 */
509 	if (((unsigned long)regs->iaoq[0] & 3) &&
510 	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
511 	  	/* Kill the user process later */
512 	  	regs->iaoq[0] = 0 | 3;
513 		regs->iaoq[1] = regs->iaoq[0] + 4;
514 	 	regs->iasq[0] = regs->iasq[0] = regs->sr[7];
515 		regs->gr[0] &= ~PSW_B;
516 		return;
517 	}
518 
519 #if 0
520 	printk(KERN_CRIT "Interruption # %d\n", code);
521 #endif
522 
523 	switch(code) {
524 
525 	case  1:
526 		/* High-priority machine check (HPMC) */
527 
528 		/* set up a new led state on systems shipped with a LED State panel */
529 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
530 
531 	    	parisc_terminate("High Priority Machine Check (HPMC)",
532 				regs, code, 0);
533 		/* NOT REACHED */
534 
535 	case  2:
536 		/* Power failure interrupt */
537 		printk(KERN_CRIT "Power failure interrupt !\n");
538 		return;
539 
540 	case  3:
541 		/* Recovery counter trap */
542 		regs->gr[0] &= ~PSW_R;
543 		if (user_space(regs))
544 			handle_gdb_break(regs, TRAP_TRACE);
545 		/* else this must be the start of a syscall - just let it run */
546 		return;
547 
548 	case  5:
549 		/* Low-priority machine check */
550 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
551 
552 		flush_all_caches();
553 		cpu_lpmc(5, regs);
554 		return;
555 
556 	case  6:
557 		/* Instruction TLB miss fault/Instruction page fault */
558 		fault_address = regs->iaoq[0];
559 		fault_space   = regs->iasq[0];
560 		break;
561 
562 	case  8:
563 		/* Illegal instruction trap */
564 		die_if_kernel("Illegal instruction", regs, code);
565 		si.si_code = ILL_ILLOPC;
566 		goto give_sigill;
567 
568 	case  9:
569 		/* Break instruction trap */
570 		handle_break(regs->iir,regs);
571 		return;
572 
573 	case 10:
574 		/* Privileged operation trap */
575 		die_if_kernel("Privileged operation", regs, code);
576 		si.si_code = ILL_PRVOPC;
577 		goto give_sigill;
578 
579 	case 11:
580 		/* Privileged register trap */
581 		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
582 
583 			/* This is a MFCTL cr26/cr27 to gr instruction.
584 			 * PCXS traps on this, so we need to emulate it.
585 			 */
586 
587 			if (regs->iir & 0x00200000)
588 				regs->gr[regs->iir & 0x1f] = mfctl(27);
589 			else
590 				regs->gr[regs->iir & 0x1f] = mfctl(26);
591 
592 			regs->iaoq[0] = regs->iaoq[1];
593 			regs->iaoq[1] += 4;
594 			regs->iasq[0] = regs->iasq[1];
595 			return;
596 		}
597 
598 		die_if_kernel("Privileged register usage", regs, code);
599 		si.si_code = ILL_PRVREG;
600 	give_sigill:
601 		si.si_signo = SIGILL;
602 		si.si_errno = 0;
603 		si.si_addr = (void __user *) regs->iaoq[0];
604 		force_sig_info(SIGILL, &si, current);
605 		return;
606 
607 	case 12:
608 		/* Overflow Trap, let the userland signal handler do the cleanup */
609 		si.si_signo = SIGFPE;
610 		si.si_code = FPE_INTOVF;
611 		si.si_addr = (void __user *) regs->iaoq[0];
612 		force_sig_info(SIGFPE, &si, current);
613 		return;
614 
615 	case 13:
616 		/* Conditional Trap
617 		   The condition succees in an instruction which traps
618 		   on condition  */
619 		if(user_mode(regs)){
620 			si.si_signo = SIGFPE;
621 			/* Set to zero, and let the userspace app figure it out from
622 		   	   the insn pointed to by si_addr */
623 			si.si_code = 0;
624 			si.si_addr = (void __user *) regs->iaoq[0];
625 			force_sig_info(SIGFPE, &si, current);
626 			return;
627 		}
628 		/* The kernel doesn't want to handle condition codes */
629 		break;
630 
631 	case 14:
632 		/* Assist Exception Trap, i.e. floating point exception. */
633 		die_if_kernel("Floating point exception", regs, 0); /* quiet */
634 		handle_fpe(regs);
635 		return;
636 
637 	case 15:
638 		/* Data TLB miss fault/Data page fault */
639 		/* Fall through */
640 	case 16:
641 		/* Non-access instruction TLB miss fault */
642 		/* The instruction TLB entry needed for the target address of the FIC
643 		   is absent, and hardware can't find it, so we get to cleanup */
644 		/* Fall through */
645 	case 17:
646 		/* Non-access data TLB miss fault/Non-access data page fault */
647 		/* FIXME:
648 		 	 Still need to add slow path emulation code here!
649 		         If the insn used a non-shadow register, then the tlb
650 			 handlers could not have their side-effect (e.g. probe
651 			 writing to a target register) emulated since rfir would
652 			 erase the changes to said register. Instead we have to
653 			 setup everything, call this function we are in, and emulate
654 			 by hand. Technically we need to emulate:
655 			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
656 		*/
657 		fault_address = regs->ior;
658 		fault_space = regs->isr;
659 		break;
660 
661 	case 18:
662 		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
663 		/* Check for unaligned access */
664 		if (check_unaligned(regs)) {
665 			handle_unaligned(regs);
666 			return;
667 		}
668 		/* Fall Through */
669 	case 26:
670 		/* PCXL: Data memory access rights trap */
671 		fault_address = regs->ior;
672 		fault_space   = regs->isr;
673 		break;
674 
675 	case 19:
676 		/* Data memory break trap */
677 		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
678 		/* fall thru */
679 	case 21:
680 		/* Page reference trap */
681 		handle_gdb_break(regs, TRAP_HWBKPT);
682 		return;
683 
684 	case 25:
685 		/* Taken branch trap */
686 		regs->gr[0] &= ~PSW_T;
687 		if (user_space(regs))
688 			handle_gdb_break(regs, TRAP_BRANCH);
689 		/* else this must be the start of a syscall - just let it
690 		 * run.
691 		 */
692 		return;
693 
694 	case  7:
695 		/* Instruction access rights */
696 		/* PCXL: Instruction memory protection trap */
697 
698 		/*
699 		 * This could be caused by either: 1) a process attempting
700 		 * to execute within a vma that does not have execute
701 		 * permission, or 2) an access rights violation caused by a
702 		 * flush only translation set up by ptep_get_and_clear().
703 		 * So we check the vma permissions to differentiate the two.
704 		 * If the vma indicates we have execute permission, then
705 		 * the cause is the latter one. In this case, we need to
706 		 * call do_page_fault() to fix the problem.
707 		 */
708 
709 		if (user_mode(regs)) {
710 			struct vm_area_struct *vma;
711 
712 			down_read(&current->mm->mmap_sem);
713 			vma = find_vma(current->mm,regs->iaoq[0]);
714 			if (vma && (regs->iaoq[0] >= vma->vm_start)
715 				&& (vma->vm_flags & VM_EXEC)) {
716 
717 				fault_address = regs->iaoq[0];
718 				fault_space = regs->iasq[0];
719 
720 				up_read(&current->mm->mmap_sem);
721 				break; /* call do_page_fault() */
722 			}
723 			up_read(&current->mm->mmap_sem);
724 		}
725 		/* Fall Through */
726 	case 27:
727 		/* Data memory protection ID trap */
728 		die_if_kernel("Protection id trap", regs, code);
729 		si.si_code = SEGV_MAPERR;
730 		si.si_signo = SIGSEGV;
731 		si.si_errno = 0;
732 		if (code == 7)
733 		    si.si_addr = (void __user *) regs->iaoq[0];
734 		else
735 		    si.si_addr = (void __user *) regs->ior;
736 		force_sig_info(SIGSEGV, &si, current);
737 		return;
738 
739 	case 28:
740 		/* Unaligned data reference trap */
741 		handle_unaligned(regs);
742 		return;
743 
744 	default:
745 		if (user_mode(regs)) {
746 #ifdef PRINT_USER_FAULTS
747 			printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
748 			    current->pid, current->comm);
749 			show_regs(regs);
750 #endif
751 			/* SIGBUS, for lack of a better one. */
752 			si.si_signo = SIGBUS;
753 			si.si_code = BUS_OBJERR;
754 			si.si_errno = 0;
755 			si.si_addr = (void __user *) regs->ior;
756 			force_sig_info(SIGBUS, &si, current);
757 			return;
758 		}
759 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
760 
761 		parisc_terminate("Unexpected interruption", regs, code, 0);
762 		/* NOT REACHED */
763 	}
764 
765 	if (user_mode(regs)) {
766 	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
767 #ifdef PRINT_USER_FAULTS
768 		if (fault_space == 0)
769 			printk(KERN_DEBUG "User Fault on Kernel Space ");
770 		else
771 			printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
772 			       code);
773 		printk("pid=%d command='%s'\n", current->pid, current->comm);
774 		show_regs(regs);
775 #endif
776 		si.si_signo = SIGSEGV;
777 		si.si_errno = 0;
778 		si.si_code = SEGV_MAPERR;
779 		si.si_addr = (void __user *) regs->ior;
780 		force_sig_info(SIGSEGV, &si, current);
781 		return;
782 	    }
783 	}
784 	else {
785 
786 	    /*
787 	     * The kernel should never fault on its own address space.
788 	     */
789 
790 	    if (fault_space == 0)
791 	    {
792 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
793 		parisc_terminate("Kernel Fault", regs, code, fault_address);
794 
795 	    }
796 	}
797 
798 	do_page_fault(regs, code, fault_address);
799 }
800 
801 
802 int __init check_ivt(void *iva)
803 {
804 	int i;
805 	u32 check = 0;
806 	u32 *ivap;
807 	u32 *hpmcp;
808 	u32 length;
809 	extern void os_hpmc(void);
810 	extern void os_hpmc_end(void);
811 
812 	if (strcmp((char *)iva, "cows can fly"))
813 		return -1;
814 
815 	ivap = (u32 *)iva;
816 
817 	for (i = 0; i < 8; i++)
818 	    *ivap++ = 0;
819 
820 	/* Compute Checksum for HPMC handler */
821 
822 	length = (u32)((unsigned long)os_hpmc_end - (unsigned long)os_hpmc);
823 	ivap[7] = length;
824 
825 	hpmcp = (u32 *)os_hpmc;
826 
827 	for (i=0; i<length/4; i++)
828 	    check += *hpmcp++;
829 
830 	for (i=0; i<8; i++)
831 	    check += ivap[i];
832 
833 	ivap[5] = -check;
834 
835 	return 0;
836 }
837 
838 #ifndef __LP64__
839 extern const void fault_vector_11;
840 #endif
841 extern const void fault_vector_20;
842 
843 void __init trap_init(void)
844 {
845 	void *iva;
846 
847 	if (boot_cpu_data.cpu_type >= pcxu)
848 		iva = (void *) &fault_vector_20;
849 	else
850 #ifdef __LP64__
851 		panic("Can't boot 64-bit OS on PA1.1 processor!");
852 #else
853 		iva = (void *) &fault_vector_11;
854 #endif
855 
856 	if (check_ivt(iva))
857 		panic("IVT invalid");
858 }
859