xref: /linux/arch/parisc/kernel/traps.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  *  linux/arch/parisc/traps.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
6  */
7 
8 /*
9  * 'Traps.c' handles hardware traps and faults after we have saved some
10  * state in 'asm.s'.
11  */
12 
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/timer.h>
19 #include <linux/delay.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/spinlock.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/console.h>
28 #include <linux/kallsyms.h>
29 #include <linux/bug.h>
30 
31 #include <asm/assembly.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
34 #include <asm/io.h>
35 #include <asm/irq.h>
36 #include <asm/traps.h>
37 #include <asm/unaligned.h>
38 #include <asm/atomic.h>
39 #include <asm/smp.h>
40 #include <asm/pdc.h>
41 #include <asm/pdc_chassis.h>
42 #include <asm/unwind.h>
43 #include <asm/tlbflush.h>
44 #include <asm/cacheflush.h>
45 
46 #include "../math-emu/math-emu.h"	/* for handle_fpe() */
47 
48 #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
49 			  /*  dumped to the console via printk)          */
50 
51 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
52 DEFINE_SPINLOCK(pa_dbit_lock);
53 #endif
54 
55 static int printbinary(char *buf, unsigned long x, int nbits)
56 {
57 	unsigned long mask = 1UL << (nbits - 1);
58 	while (mask != 0) {
59 		*buf++ = (mask & x ? '1' : '0');
60 		mask >>= 1;
61 	}
62 	*buf = '\0';
63 
64 	return nbits;
65 }
66 
67 #ifdef CONFIG_64BIT
68 #define RFMT "%016lx"
69 #else
70 #define RFMT "%08lx"
71 #endif
72 #define FFMT "%016llx"	/* fpregs are 64-bit always */
73 
74 #define PRINTREGS(lvl,r,f,fmt,x)	\
75 	printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",	\
76 		lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],		\
77 		(r)[(x)+2], (r)[(x)+3])
78 
79 static void print_gr(char *level, struct pt_regs *regs)
80 {
81 	int i;
82 	char buf[64];
83 
84 	printk("%s\n", level);
85 	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
86 	printbinary(buf, regs->gr[0], 32);
87 	printk("%sPSW: %s %s\n", level, buf, print_tainted());
88 
89 	for (i = 0; i < 32; i += 4)
90 		PRINTREGS(level, regs->gr, "r", RFMT, i);
91 }
92 
93 static void print_fr(char *level, struct pt_regs *regs)
94 {
95 	int i;
96 	char buf[64];
97 	struct { u32 sw[2]; } s;
98 
99 	/* FR are 64bit everywhere. Need to use asm to get the content
100 	 * of fpsr/fper1, and we assume that we won't have a FP Identify
101 	 * in our way, otherwise we're screwed.
102 	 * The fldd is used to restore the T-bit if there was one, as the
103 	 * store clears it anyway.
104 	 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
105 	asm volatile ("fstd %%fr0,0(%1)	\n\t"
106 		      "fldd 0(%1),%%fr0	\n\t"
107 		      : "=m" (s) : "r" (&s) : "r0");
108 
109 	printk("%s\n", level);
110 	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
111 	printbinary(buf, s.sw[0], 32);
112 	printk("%sFPSR: %s\n", level, buf);
113 	printk("%sFPER1: %08x\n", level, s.sw[1]);
114 
115 	/* here we'll print fr0 again, tho it'll be meaningless */
116 	for (i = 0; i < 32; i += 4)
117 		PRINTREGS(level, regs->fr, "fr", FFMT, i);
118 }
119 
120 void show_regs(struct pt_regs *regs)
121 {
122 	int i;
123 	char *level;
124 	unsigned long cr30, cr31;
125 
126 	level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT;
127 
128 	print_gr(level, regs);
129 
130 	for (i = 0; i < 8; i += 4)
131 		PRINTREGS(level, regs->sr, "sr", RFMT, i);
132 
133 	if (user_mode(regs))
134 		print_fr(level, regs);
135 
136 	cr30 = mfctl(30);
137 	cr31 = mfctl(31);
138 	printk("%s\n", level);
139 	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
140 	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
141 	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
142 	       level, regs->iir, regs->isr, regs->ior);
143 	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
144 	       level, current_thread_info()->cpu, cr30, cr31);
145 	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
146 	printk(level);
147 	print_symbol(" IAOQ[0]: %s\n", regs->iaoq[0]);
148 	printk(level);
149 	print_symbol(" IAOQ[1]: %s\n", regs->iaoq[1]);
150 	printk(level);
151 	print_symbol(" RP(r2): %s\n", regs->gr[2]);
152 }
153 
154 
155 void dump_stack(void)
156 {
157 	show_stack(NULL, NULL);
158 }
159 
160 EXPORT_SYMBOL(dump_stack);
161 
162 static void do_show_stack(struct unwind_frame_info *info)
163 {
164 	int i = 1;
165 
166 	printk(KERN_CRIT "Backtrace:\n");
167 	while (i <= 16) {
168 		if (unwind_once(info) < 0 || info->ip == 0)
169 			break;
170 
171 		if (__kernel_text_address(info->ip)) {
172 			printk("%s [<" RFMT ">] ", (i&0x3)==1 ? KERN_CRIT : "", info->ip);
173 #ifdef CONFIG_KALLSYMS
174 			print_symbol("%s\n", info->ip);
175 #else
176 			if ((i & 0x03) == 0)
177 				printk("\n");
178 #endif
179 			i++;
180 		}
181 	}
182 	printk("\n");
183 }
184 
185 void show_stack(struct task_struct *task, unsigned long *s)
186 {
187 	struct unwind_frame_info info;
188 
189 	if (!task) {
190 		unsigned long sp;
191 
192 HERE:
193 		asm volatile ("copy %%r30, %0" : "=r"(sp));
194 		{
195 			struct pt_regs r;
196 
197 			memset(&r, 0, sizeof(struct pt_regs));
198 			r.iaoq[0] = (unsigned long)&&HERE;
199 			r.gr[2] = (unsigned long)__builtin_return_address(0);
200 			r.gr[30] = sp;
201 
202 			unwind_frame_init(&info, current, &r);
203 		}
204 	} else {
205 		unwind_frame_init_from_blocked_task(&info, task);
206 	}
207 
208 	do_show_stack(&info);
209 }
210 
211 int is_valid_bugaddr(unsigned long iaoq)
212 {
213 	return 1;
214 }
215 
216 void die_if_kernel(char *str, struct pt_regs *regs, long err)
217 {
218 	if (user_mode(regs)) {
219 		if (err == 0)
220 			return; /* STFU */
221 
222 		printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
223 			current->comm, current->pid, str, err, regs->iaoq[0]);
224 #ifdef PRINT_USER_FAULTS
225 		/* XXX for debugging only */
226 		show_regs(regs);
227 #endif
228 		return;
229 	}
230 
231 	oops_in_progress = 1;
232 
233 	/* Amuse the user in a SPARC fashion */
234 	if (err) printk(
235 KERN_CRIT "      _______________________________ \n"
236 KERN_CRIT "     < Your System ate a SPARC! Gah! >\n"
237 KERN_CRIT "      ------------------------------- \n"
238 KERN_CRIT "             \\   ^__^\n"
239 KERN_CRIT "              \\  (xx)\\_______\n"
240 KERN_CRIT "                 (__)\\       )\\/\\\n"
241 KERN_CRIT "                  U  ||----w |\n"
242 KERN_CRIT "                     ||     ||\n");
243 
244 	/* unlock the pdc lock if necessary */
245 	pdc_emergency_unlock();
246 
247 	/* maybe the kernel hasn't booted very far yet and hasn't been able
248 	 * to initialize the serial or STI console. In that case we should
249 	 * re-enable the pdc console, so that the user will be able to
250 	 * identify the problem. */
251 	if (!console_drivers)
252 		pdc_console_restart();
253 
254 	if (err)
255 		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
256 			current->comm, current->pid, str, err);
257 
258 	/* Wot's wrong wif bein' racy? */
259 	if (current->thread.flags & PARISC_KERNEL_DEATH) {
260 		printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
261 		local_irq_enable();
262 		while (1);
263 	}
264 	current->thread.flags |= PARISC_KERNEL_DEATH;
265 
266 	show_regs(regs);
267 	dump_stack();
268 
269 	if (in_interrupt())
270 		panic("Fatal exception in interrupt");
271 
272 	if (panic_on_oops) {
273 		printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
274 		ssleep(5);
275 		panic("Fatal exception");
276 	}
277 
278 	do_exit(SIGSEGV);
279 }
280 
281 int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
282 {
283 	return syscall(regs);
284 }
285 
286 /* gdb uses break 4,8 */
287 #define GDB_BREAK_INSN 0x10004
288 static void handle_gdb_break(struct pt_regs *regs, int wot)
289 {
290 	struct siginfo si;
291 
292 	si.si_signo = SIGTRAP;
293 	si.si_errno = 0;
294 	si.si_code = wot;
295 	si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
296 	force_sig_info(SIGTRAP, &si, current);
297 }
298 
299 static void handle_break(struct pt_regs *regs)
300 {
301 	unsigned iir = regs->iir;
302 
303 	if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
304 		/* check if a BUG() or WARN() trapped here.  */
305 		enum bug_trap_type tt;
306 		tt = report_bug(regs->iaoq[0] & ~3);
307 		if (tt == BUG_TRAP_TYPE_WARN) {
308 			regs->iaoq[0] += 4;
309 			regs->iaoq[1] += 4;
310 			return; /* return to next instruction when WARN_ON().  */
311 		}
312 		die_if_kernel("Unknown kernel breakpoint", regs,
313 			(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
314 	}
315 
316 #ifdef PRINT_USER_FAULTS
317 	if (unlikely(iir != GDB_BREAK_INSN)) {
318 		printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
319 			iir & 31, (iir>>13) & ((1<<13)-1),
320 			current->pid, current->comm);
321 		show_regs(regs);
322 	}
323 #endif
324 
325 	/* send standard GDB signal */
326 	handle_gdb_break(regs, TRAP_BRKPT);
327 }
328 
329 static void default_trap(int code, struct pt_regs *regs)
330 {
331 	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
332 	show_regs(regs);
333 }
334 
335 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
336 
337 
338 void transfer_pim_to_trap_frame(struct pt_regs *regs)
339 {
340     register int i;
341     extern unsigned int hpmc_pim_data[];
342     struct pdc_hpmc_pim_11 *pim_narrow;
343     struct pdc_hpmc_pim_20 *pim_wide;
344 
345     if (boot_cpu_data.cpu_type >= pcxu) {
346 
347 	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
348 
349 	/*
350 	 * Note: The following code will probably generate a
351 	 * bunch of truncation error warnings from the compiler.
352 	 * Could be handled with an ifdef, but perhaps there
353 	 * is a better way.
354 	 */
355 
356 	regs->gr[0] = pim_wide->cr[22];
357 
358 	for (i = 1; i < 32; i++)
359 	    regs->gr[i] = pim_wide->gr[i];
360 
361 	for (i = 0; i < 32; i++)
362 	    regs->fr[i] = pim_wide->fr[i];
363 
364 	for (i = 0; i < 8; i++)
365 	    regs->sr[i] = pim_wide->sr[i];
366 
367 	regs->iasq[0] = pim_wide->cr[17];
368 	regs->iasq[1] = pim_wide->iasq_back;
369 	regs->iaoq[0] = pim_wide->cr[18];
370 	regs->iaoq[1] = pim_wide->iaoq_back;
371 
372 	regs->sar  = pim_wide->cr[11];
373 	regs->iir  = pim_wide->cr[19];
374 	regs->isr  = pim_wide->cr[20];
375 	regs->ior  = pim_wide->cr[21];
376     }
377     else {
378 	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
379 
380 	regs->gr[0] = pim_narrow->cr[22];
381 
382 	for (i = 1; i < 32; i++)
383 	    regs->gr[i] = pim_narrow->gr[i];
384 
385 	for (i = 0; i < 32; i++)
386 	    regs->fr[i] = pim_narrow->fr[i];
387 
388 	for (i = 0; i < 8; i++)
389 	    regs->sr[i] = pim_narrow->sr[i];
390 
391 	regs->iasq[0] = pim_narrow->cr[17];
392 	regs->iasq[1] = pim_narrow->iasq_back;
393 	regs->iaoq[0] = pim_narrow->cr[18];
394 	regs->iaoq[1] = pim_narrow->iaoq_back;
395 
396 	regs->sar  = pim_narrow->cr[11];
397 	regs->iir  = pim_narrow->cr[19];
398 	regs->isr  = pim_narrow->cr[20];
399 	regs->ior  = pim_narrow->cr[21];
400     }
401 
402     /*
403      * The following fields only have meaning if we came through
404      * another path. So just zero them here.
405      */
406 
407     regs->ksp = 0;
408     regs->kpc = 0;
409     regs->orig_r28 = 0;
410 }
411 
412 
413 /*
414  * This routine is called as a last resort when everything else
415  * has gone clearly wrong. We get called for faults in kernel space,
416  * and HPMC's.
417  */
418 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
419 {
420 	static DEFINE_SPINLOCK(terminate_lock);
421 
422 	oops_in_progress = 1;
423 
424 	set_eiem(0);
425 	local_irq_disable();
426 	spin_lock(&terminate_lock);
427 
428 	/* unlock the pdc lock if necessary */
429 	pdc_emergency_unlock();
430 
431 	/* restart pdc console if necessary */
432 	if (!console_drivers)
433 		pdc_console_restart();
434 
435 	/* Not all paths will gutter the processor... */
436 	switch(code){
437 
438 	case 1:
439 		transfer_pim_to_trap_frame(regs);
440 		break;
441 
442 	default:
443 		/* Fall through */
444 		break;
445 
446 	}
447 
448 	{
449 		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
450 		struct unwind_frame_info info;
451 		unwind_frame_init(&info, current, regs);
452 		do_show_stack(&info);
453 	}
454 
455 	printk("\n");
456 	printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
457 			msg, code, regs, offset);
458 	show_regs(regs);
459 
460 	spin_unlock(&terminate_lock);
461 
462 	/* put soft power button back under hardware control;
463 	 * if the user had pressed it once at any time, the
464 	 * system will shut down immediately right here. */
465 	pdc_soft_power_button(0);
466 
467 	/* Call kernel panic() so reboot timeouts work properly
468 	 * FIXME: This function should be on the list of
469 	 * panic notifiers, and we should call panic
470 	 * directly from the location that we wish.
471 	 * e.g. We should not call panic from
472 	 * parisc_terminate, but rather the oter way around.
473 	 * This hack works, prints the panic message twice,
474 	 * and it enables reboot timers!
475 	 */
476 	panic(msg);
477 }
478 
479 void handle_interruption(int code, struct pt_regs *regs)
480 {
481 	unsigned long fault_address = 0;
482 	unsigned long fault_space = 0;
483 	struct siginfo si;
484 
485 	if (code == 1)
486 	    pdc_console_restart();  /* switch back to pdc if HPMC */
487 	else
488 	    local_irq_enable();
489 
490 	/* Security check:
491 	 * If the priority level is still user, and the
492 	 * faulting space is not equal to the active space
493 	 * then the user is attempting something in a space
494 	 * that does not belong to them. Kill the process.
495 	 *
496 	 * This is normally the situation when the user
497 	 * attempts to jump into the kernel space at the
498 	 * wrong offset, be it at the gateway page or a
499 	 * random location.
500 	 *
501 	 * We cannot normally signal the process because it
502 	 * could *be* on the gateway page, and processes
503 	 * executing on the gateway page can't have signals
504 	 * delivered.
505 	 *
506 	 * We merely readjust the address into the users
507 	 * space, at a destination address of zero, and
508 	 * allow processing to continue.
509 	 */
510 	if (((unsigned long)regs->iaoq[0] & 3) &&
511 	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
512 	  	/* Kill the user process later */
513 	  	regs->iaoq[0] = 0 | 3;
514 		regs->iaoq[1] = regs->iaoq[0] + 4;
515 	 	regs->iasq[0] = regs->iasq[0] = regs->sr[7];
516 		regs->gr[0] &= ~PSW_B;
517 		return;
518 	}
519 
520 #if 0
521 	printk(KERN_CRIT "Interruption # %d\n", code);
522 #endif
523 
524 	switch(code) {
525 
526 	case  1:
527 		/* High-priority machine check (HPMC) */
528 
529 		/* set up a new led state on systems shipped with a LED State panel */
530 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
531 
532 	    	parisc_terminate("High Priority Machine Check (HPMC)",
533 				regs, code, 0);
534 		/* NOT REACHED */
535 
536 	case  2:
537 		/* Power failure interrupt */
538 		printk(KERN_CRIT "Power failure interrupt !\n");
539 		return;
540 
541 	case  3:
542 		/* Recovery counter trap */
543 		regs->gr[0] &= ~PSW_R;
544 		if (user_space(regs))
545 			handle_gdb_break(regs, TRAP_TRACE);
546 		/* else this must be the start of a syscall - just let it run */
547 		return;
548 
549 	case  5:
550 		/* Low-priority machine check */
551 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
552 
553 		flush_cache_all();
554 		flush_tlb_all();
555 		cpu_lpmc(5, regs);
556 		return;
557 
558 	case  6:
559 		/* Instruction TLB miss fault/Instruction page fault */
560 		fault_address = regs->iaoq[0];
561 		fault_space   = regs->iasq[0];
562 		break;
563 
564 	case  8:
565 		/* Illegal instruction trap */
566 		die_if_kernel("Illegal instruction", regs, code);
567 		si.si_code = ILL_ILLOPC;
568 		goto give_sigill;
569 
570 	case  9:
571 		/* Break instruction trap */
572 		handle_break(regs);
573 		return;
574 
575 	case 10:
576 		/* Privileged operation trap */
577 		die_if_kernel("Privileged operation", regs, code);
578 		si.si_code = ILL_PRVOPC;
579 		goto give_sigill;
580 
581 	case 11:
582 		/* Privileged register trap */
583 		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
584 
585 			/* This is a MFCTL cr26/cr27 to gr instruction.
586 			 * PCXS traps on this, so we need to emulate it.
587 			 */
588 
589 			if (regs->iir & 0x00200000)
590 				regs->gr[regs->iir & 0x1f] = mfctl(27);
591 			else
592 				regs->gr[regs->iir & 0x1f] = mfctl(26);
593 
594 			regs->iaoq[0] = regs->iaoq[1];
595 			regs->iaoq[1] += 4;
596 			regs->iasq[0] = regs->iasq[1];
597 			return;
598 		}
599 
600 		die_if_kernel("Privileged register usage", regs, code);
601 		si.si_code = ILL_PRVREG;
602 	give_sigill:
603 		si.si_signo = SIGILL;
604 		si.si_errno = 0;
605 		si.si_addr = (void __user *) regs->iaoq[0];
606 		force_sig_info(SIGILL, &si, current);
607 		return;
608 
609 	case 12:
610 		/* Overflow Trap, let the userland signal handler do the cleanup */
611 		si.si_signo = SIGFPE;
612 		si.si_code = FPE_INTOVF;
613 		si.si_addr = (void __user *) regs->iaoq[0];
614 		force_sig_info(SIGFPE, &si, current);
615 		return;
616 
617 	case 13:
618 		/* Conditional Trap
619 		   The condition succees in an instruction which traps
620 		   on condition  */
621 		if(user_mode(regs)){
622 			si.si_signo = SIGFPE;
623 			/* Set to zero, and let the userspace app figure it out from
624 		   	   the insn pointed to by si_addr */
625 			si.si_code = 0;
626 			si.si_addr = (void __user *) regs->iaoq[0];
627 			force_sig_info(SIGFPE, &si, current);
628 			return;
629 		}
630 		/* The kernel doesn't want to handle condition codes */
631 		break;
632 
633 	case 14:
634 		/* Assist Exception Trap, i.e. floating point exception. */
635 		die_if_kernel("Floating point exception", regs, 0); /* quiet */
636 		handle_fpe(regs);
637 		return;
638 
639 	case 15:
640 		/* Data TLB miss fault/Data page fault */
641 		/* Fall through */
642 	case 16:
643 		/* Non-access instruction TLB miss fault */
644 		/* The instruction TLB entry needed for the target address of the FIC
645 		   is absent, and hardware can't find it, so we get to cleanup */
646 		/* Fall through */
647 	case 17:
648 		/* Non-access data TLB miss fault/Non-access data page fault */
649 		/* FIXME:
650 		 	 Still need to add slow path emulation code here!
651 		         If the insn used a non-shadow register, then the tlb
652 			 handlers could not have their side-effect (e.g. probe
653 			 writing to a target register) emulated since rfir would
654 			 erase the changes to said register. Instead we have to
655 			 setup everything, call this function we are in, and emulate
656 			 by hand. Technically we need to emulate:
657 			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
658 		*/
659 		fault_address = regs->ior;
660 		fault_space = regs->isr;
661 		break;
662 
663 	case 18:
664 		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
665 		/* Check for unaligned access */
666 		if (check_unaligned(regs)) {
667 			handle_unaligned(regs);
668 			return;
669 		}
670 		/* Fall Through */
671 	case 26:
672 		/* PCXL: Data memory access rights trap */
673 		fault_address = regs->ior;
674 		fault_space   = regs->isr;
675 		break;
676 
677 	case 19:
678 		/* Data memory break trap */
679 		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
680 		/* fall thru */
681 	case 21:
682 		/* Page reference trap */
683 		handle_gdb_break(regs, TRAP_HWBKPT);
684 		return;
685 
686 	case 25:
687 		/* Taken branch trap */
688 		regs->gr[0] &= ~PSW_T;
689 		if (user_space(regs))
690 			handle_gdb_break(regs, TRAP_BRANCH);
691 		/* else this must be the start of a syscall - just let it
692 		 * run.
693 		 */
694 		return;
695 
696 	case  7:
697 		/* Instruction access rights */
698 		/* PCXL: Instruction memory protection trap */
699 
700 		/*
701 		 * This could be caused by either: 1) a process attempting
702 		 * to execute within a vma that does not have execute
703 		 * permission, or 2) an access rights violation caused by a
704 		 * flush only translation set up by ptep_get_and_clear().
705 		 * So we check the vma permissions to differentiate the two.
706 		 * If the vma indicates we have execute permission, then
707 		 * the cause is the latter one. In this case, we need to
708 		 * call do_page_fault() to fix the problem.
709 		 */
710 
711 		if (user_mode(regs)) {
712 			struct vm_area_struct *vma;
713 
714 			down_read(&current->mm->mmap_sem);
715 			vma = find_vma(current->mm,regs->iaoq[0]);
716 			if (vma && (regs->iaoq[0] >= vma->vm_start)
717 				&& (vma->vm_flags & VM_EXEC)) {
718 
719 				fault_address = regs->iaoq[0];
720 				fault_space = regs->iasq[0];
721 
722 				up_read(&current->mm->mmap_sem);
723 				break; /* call do_page_fault() */
724 			}
725 			up_read(&current->mm->mmap_sem);
726 		}
727 		/* Fall Through */
728 	case 27:
729 		/* Data memory protection ID trap */
730 		die_if_kernel("Protection id trap", regs, code);
731 		si.si_code = SEGV_MAPERR;
732 		si.si_signo = SIGSEGV;
733 		si.si_errno = 0;
734 		if (code == 7)
735 		    si.si_addr = (void __user *) regs->iaoq[0];
736 		else
737 		    si.si_addr = (void __user *) regs->ior;
738 		force_sig_info(SIGSEGV, &si, current);
739 		return;
740 
741 	case 28:
742 		/* Unaligned data reference trap */
743 		handle_unaligned(regs);
744 		return;
745 
746 	default:
747 		if (user_mode(regs)) {
748 #ifdef PRINT_USER_FAULTS
749 			printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
750 			    current->pid, current->comm);
751 			show_regs(regs);
752 #endif
753 			/* SIGBUS, for lack of a better one. */
754 			si.si_signo = SIGBUS;
755 			si.si_code = BUS_OBJERR;
756 			si.si_errno = 0;
757 			si.si_addr = (void __user *) regs->ior;
758 			force_sig_info(SIGBUS, &si, current);
759 			return;
760 		}
761 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
762 
763 		parisc_terminate("Unexpected interruption", regs, code, 0);
764 		/* NOT REACHED */
765 	}
766 
767 	if (user_mode(regs)) {
768 	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
769 #ifdef PRINT_USER_FAULTS
770 		if (fault_space == 0)
771 			printk(KERN_DEBUG "User Fault on Kernel Space ");
772 		else
773 			printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
774 			       code);
775 		printk("pid=%d command='%s'\n", current->pid, current->comm);
776 		show_regs(regs);
777 #endif
778 		si.si_signo = SIGSEGV;
779 		si.si_errno = 0;
780 		si.si_code = SEGV_MAPERR;
781 		si.si_addr = (void __user *) regs->ior;
782 		force_sig_info(SIGSEGV, &si, current);
783 		return;
784 	    }
785 	}
786 	else {
787 
788 	    /*
789 	     * The kernel should never fault on its own address space.
790 	     */
791 
792 	    if (fault_space == 0)
793 	    {
794 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
795 		parisc_terminate("Kernel Fault", regs, code, fault_address);
796 
797 	    }
798 	}
799 
800 	do_page_fault(regs, code, fault_address);
801 }
802 
803 
804 int __init check_ivt(void *iva)
805 {
806 	int i;
807 	u32 check = 0;
808 	u32 *ivap;
809 	u32 *hpmcp;
810 	u32 length;
811 	extern void os_hpmc(void);
812 	extern void os_hpmc_end(void);
813 
814 	if (strcmp((char *)iva, "cows can fly"))
815 		return -1;
816 
817 	ivap = (u32 *)iva;
818 
819 	for (i = 0; i < 8; i++)
820 	    *ivap++ = 0;
821 
822 	/* Compute Checksum for HPMC handler */
823 
824 	length = (u32)((unsigned long)os_hpmc_end - (unsigned long)os_hpmc);
825 	ivap[7] = length;
826 
827 	hpmcp = (u32 *)os_hpmc;
828 
829 	for (i=0; i<length/4; i++)
830 	    check += *hpmcp++;
831 
832 	for (i=0; i<8; i++)
833 	    check += ivap[i];
834 
835 	ivap[5] = -check;
836 
837 	return 0;
838 }
839 
840 #ifndef CONFIG_64BIT
841 extern const void fault_vector_11;
842 #endif
843 extern const void fault_vector_20;
844 
845 void __init trap_init(void)
846 {
847 	void *iva;
848 
849 	if (boot_cpu_data.cpu_type >= pcxu)
850 		iva = (void *) &fault_vector_20;
851 	else
852 #ifdef CONFIG_64BIT
853 		panic("Can't boot 64-bit OS on PA1.1 processor!");
854 #else
855 		iva = (void *) &fault_vector_11;
856 #endif
857 
858 	if (check_ivt(iva))
859 		panic("IVT invalid");
860 }
861