xref: /linux/arch/alpha/kernel/traps.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * arch/alpha/kernel/traps.c
3  *
4  * (C) Copyright 1994 Linus Torvalds
5  */
6 
7 /*
8  * This file initializes the trap entry points
9  */
10 
11 #include <linux/jiffies.h>
12 #include <linux/mm.h>
13 #include <linux/sched.h>
14 #include <linux/tty.h>
15 #include <linux/delay.h>
16 #include <linux/module.h>
17 #include <linux/kallsyms.h>
18 #include <linux/ratelimit.h>
19 
20 #include <asm/gentrap.h>
21 #include <asm/uaccess.h>
22 #include <asm/unaligned.h>
23 #include <asm/sysinfo.h>
24 #include <asm/hwrpb.h>
25 #include <asm/mmu_context.h>
26 #include <asm/special_insns.h>
27 
28 #include "proto.h"
29 
30 /* Work-around for some SRMs which mishandle opDEC faults.  */
31 
32 static int opDEC_fix;
33 
34 static void
35 opDEC_check(void)
36 {
37 	__asm__ __volatile__ (
38 	/* Load the address of... */
39 	"	br	$16, 1f\n"
40 	/* A stub instruction fault handler.  Just add 4 to the
41 	   pc and continue.  */
42 	"	ldq	$16, 8($sp)\n"
43 	"	addq	$16, 4, $16\n"
44 	"	stq	$16, 8($sp)\n"
45 	"	call_pal %[rti]\n"
46 	/* Install the instruction fault handler.  */
47 	"1:	lda	$17, 3\n"
48 	"	call_pal %[wrent]\n"
49 	/* With that in place, the fault from the round-to-minf fp
50 	   insn will arrive either at the "lda 4" insn (bad) or one
51 	   past that (good).  This places the correct fixup in %0.  */
52 	"	lda %[fix], 0\n"
53 	"	cvttq/svm $f31,$f31\n"
54 	"	lda %[fix], 4"
55 	: [fix] "=r" (opDEC_fix)
56 	: [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent)
57 	: "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
58 
59 	if (opDEC_fix)
60 		printk("opDEC fixup enabled.\n");
61 }
62 
63 void
64 dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
65 {
66 	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx    %s\n",
67 	       regs->pc, regs->r26, regs->ps, print_tainted());
68 	printk("pc is at %pSR\n", (void *)regs->pc);
69 	printk("ra is at %pSR\n", (void *)regs->r26);
70 	printk("v0 = %016lx  t0 = %016lx  t1 = %016lx\n",
71 	       regs->r0, regs->r1, regs->r2);
72 	printk("t2 = %016lx  t3 = %016lx  t4 = %016lx\n",
73  	       regs->r3, regs->r4, regs->r5);
74 	printk("t5 = %016lx  t6 = %016lx  t7 = %016lx\n",
75 	       regs->r6, regs->r7, regs->r8);
76 
77 	if (r9_15) {
78 		printk("s0 = %016lx  s1 = %016lx  s2 = %016lx\n",
79 		       r9_15[9], r9_15[10], r9_15[11]);
80 		printk("s3 = %016lx  s4 = %016lx  s5 = %016lx\n",
81 		       r9_15[12], r9_15[13], r9_15[14]);
82 		printk("s6 = %016lx\n", r9_15[15]);
83 	}
84 
85 	printk("a0 = %016lx  a1 = %016lx  a2 = %016lx\n",
86 	       regs->r16, regs->r17, regs->r18);
87 	printk("a3 = %016lx  a4 = %016lx  a5 = %016lx\n",
88  	       regs->r19, regs->r20, regs->r21);
89  	printk("t8 = %016lx  t9 = %016lx  t10= %016lx\n",
90 	       regs->r22, regs->r23, regs->r24);
91 	printk("t11= %016lx  pv = %016lx  at = %016lx\n",
92 	       regs->r25, regs->r27, regs->r28);
93 	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
94 #if 0
95 __halt();
96 #endif
97 }
98 
99 #if 0
100 static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
101 			   "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
102 			   "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
103 			   "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
104 #endif
105 
106 static void
107 dik_show_code(unsigned int *pc)
108 {
109 	long i;
110 
111 	printk("Code:");
112 	for (i = -6; i < 2; i++) {
113 		unsigned int insn;
114 		if (__get_user(insn, (unsigned int __user *)pc + i))
115 			break;
116 		printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
117 	}
118 	printk("\n");
119 }
120 
121 static void
122 dik_show_trace(unsigned long *sp)
123 {
124 	long i = 0;
125 	printk("Trace:\n");
126 	while (0x1ff8 & (unsigned long) sp) {
127 		extern char _stext[], _etext[];
128 		unsigned long tmp = *sp;
129 		sp++;
130 		if (tmp < (unsigned long) &_stext)
131 			continue;
132 		if (tmp >= (unsigned long) &_etext)
133 			continue;
134 		printk("[<%lx>] %pSR\n", tmp, (void *)tmp);
135 		if (i > 40) {
136 			printk(" ...");
137 			break;
138 		}
139 	}
140 	printk("\n");
141 }
142 
143 static int kstack_depth_to_print = 24;
144 
145 void show_stack(struct task_struct *task, unsigned long *sp)
146 {
147 	unsigned long *stack;
148 	int i;
149 
150 	/*
151 	 * debugging aid: "show_stack(NULL);" prints the
152 	 * back trace for this cpu.
153 	 */
154 	if(sp==NULL)
155 		sp=(unsigned long*)&sp;
156 
157 	stack = sp;
158 	for(i=0; i < kstack_depth_to_print; i++) {
159 		if (((long) stack & (THREAD_SIZE-1)) == 0)
160 			break;
161 		if (i && ((i % 4) == 0))
162 			printk("\n       ");
163 		printk("%016lx ", *stack++);
164 	}
165 	printk("\n");
166 	dik_show_trace(sp);
167 }
168 
169 void
170 die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
171 {
172 	if (regs->ps & 8)
173 		return;
174 #ifdef CONFIG_SMP
175 	printk("CPU %d ", hard_smp_processor_id());
176 #endif
177 	printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
178 	dik_show_regs(regs, r9_15);
179 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
180 	dik_show_trace((unsigned long *)(regs+1));
181 	dik_show_code((unsigned int *)regs->pc);
182 
183 	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
184 		printk("die_if_kernel recursion detected.\n");
185 		local_irq_enable();
186 		while (1);
187 	}
188 	do_exit(SIGSEGV);
189 }
190 
191 #ifndef CONFIG_MATHEMU
192 static long dummy_emul(void) { return 0; }
193 long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
194   = (void *)dummy_emul;
195 long (*alpha_fp_emul) (unsigned long pc)
196   = (void *)dummy_emul;
197 #else
198 long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
199 long alpha_fp_emul (unsigned long pc);
200 #endif
201 
202 asmlinkage void
203 do_entArith(unsigned long summary, unsigned long write_mask,
204 	    struct pt_regs *regs)
205 {
206 	long si_code = FPE_FLTINV;
207 	siginfo_t info;
208 
209 	if (summary & 1) {
210 		/* Software-completion summary bit is set, so try to
211 		   emulate the instruction.  If the processor supports
212 		   precise exceptions, we don't have to search.  */
213 		if (!amask(AMASK_PRECISE_TRAP))
214 			si_code = alpha_fp_emul(regs->pc - 4);
215 		else
216 			si_code = alpha_fp_emul_imprecise(regs, write_mask);
217 		if (si_code == 0)
218 			return;
219 	}
220 	die_if_kernel("Arithmetic fault", regs, 0, NULL);
221 
222 	info.si_signo = SIGFPE;
223 	info.si_errno = 0;
224 	info.si_code = si_code;
225 	info.si_addr = (void __user *) regs->pc;
226 	send_sig_info(SIGFPE, &info, current);
227 }
228 
229 asmlinkage void
230 do_entIF(unsigned long type, struct pt_regs *regs)
231 {
232 	siginfo_t info;
233 	int signo, code;
234 
235 	if ((regs->ps & ~IPL_MAX) == 0) {
236 		if (type == 1) {
237 			const unsigned int *data
238 			  = (const unsigned int *) regs->pc;
239 			printk("Kernel bug at %s:%d\n",
240 			       (const char *)(data[1] | (long)data[2] << 32),
241 			       data[0]);
242 		}
243 #ifdef CONFIG_ALPHA_WTINT
244 		if (type == 4) {
245 			/* If CALL_PAL WTINT is totally unsupported by the
246 			   PALcode, e.g. MILO, "emulate" it by overwriting
247 			   the insn.  */
248 			unsigned int *pinsn
249 			  = (unsigned int *) regs->pc - 1;
250 			if (*pinsn == PAL_wtint) {
251 				*pinsn = 0x47e01400; /* mov 0,$0 */
252 				imb();
253 				regs->r0 = 0;
254 				return;
255 			}
256 		}
257 #endif /* ALPHA_WTINT */
258 		die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
259 			      regs, type, NULL);
260 	}
261 
262 	switch (type) {
263 	      case 0: /* breakpoint */
264 		info.si_signo = SIGTRAP;
265 		info.si_errno = 0;
266 		info.si_code = TRAP_BRKPT;
267 		info.si_trapno = 0;
268 		info.si_addr = (void __user *) regs->pc;
269 
270 		if (ptrace_cancel_bpt(current)) {
271 			regs->pc -= 4;	/* make pc point to former bpt */
272 		}
273 
274 		send_sig_info(SIGTRAP, &info, current);
275 		return;
276 
277 	      case 1: /* bugcheck */
278 		info.si_signo = SIGTRAP;
279 		info.si_errno = 0;
280 		info.si_code = __SI_FAULT;
281 		info.si_addr = (void __user *) regs->pc;
282 		info.si_trapno = 0;
283 		send_sig_info(SIGTRAP, &info, current);
284 		return;
285 
286 	      case 2: /* gentrap */
287 		info.si_addr = (void __user *) regs->pc;
288 		info.si_trapno = regs->r16;
289 		switch ((long) regs->r16) {
290 		case GEN_INTOVF:
291 			signo = SIGFPE;
292 			code = FPE_INTOVF;
293 			break;
294 		case GEN_INTDIV:
295 			signo = SIGFPE;
296 			code = FPE_INTDIV;
297 			break;
298 		case GEN_FLTOVF:
299 			signo = SIGFPE;
300 			code = FPE_FLTOVF;
301 			break;
302 		case GEN_FLTDIV:
303 			signo = SIGFPE;
304 			code = FPE_FLTDIV;
305 			break;
306 		case GEN_FLTUND:
307 			signo = SIGFPE;
308 			code = FPE_FLTUND;
309 			break;
310 		case GEN_FLTINV:
311 			signo = SIGFPE;
312 			code = FPE_FLTINV;
313 			break;
314 		case GEN_FLTINE:
315 			signo = SIGFPE;
316 			code = FPE_FLTRES;
317 			break;
318 		case GEN_ROPRAND:
319 			signo = SIGFPE;
320 			code = __SI_FAULT;
321 			break;
322 
323 		case GEN_DECOVF:
324 		case GEN_DECDIV:
325 		case GEN_DECINV:
326 		case GEN_ASSERTERR:
327 		case GEN_NULPTRERR:
328 		case GEN_STKOVF:
329 		case GEN_STRLENERR:
330 		case GEN_SUBSTRERR:
331 		case GEN_RANGERR:
332 		case GEN_SUBRNG:
333 		case GEN_SUBRNG1:
334 		case GEN_SUBRNG2:
335 		case GEN_SUBRNG3:
336 		case GEN_SUBRNG4:
337 		case GEN_SUBRNG5:
338 		case GEN_SUBRNG6:
339 		case GEN_SUBRNG7:
340 		default:
341 			signo = SIGTRAP;
342 			code = __SI_FAULT;
343 			break;
344 		}
345 
346 		info.si_signo = signo;
347 		info.si_errno = 0;
348 		info.si_code = code;
349 		info.si_addr = (void __user *) regs->pc;
350 		send_sig_info(signo, &info, current);
351 		return;
352 
353 	      case 4: /* opDEC */
354 		if (implver() == IMPLVER_EV4) {
355 			long si_code;
356 
357 			/* The some versions of SRM do not handle
358 			   the opDEC properly - they return the PC of the
359 			   opDEC fault, not the instruction after as the
360 			   Alpha architecture requires.  Here we fix it up.
361 			   We do this by intentionally causing an opDEC
362 			   fault during the boot sequence and testing if
363 			   we get the correct PC.  If not, we set a flag
364 			   to correct it every time through.  */
365 			regs->pc += opDEC_fix;
366 
367 			/* EV4 does not implement anything except normal
368 			   rounding.  Everything else will come here as
369 			   an illegal instruction.  Emulate them.  */
370 			si_code = alpha_fp_emul(regs->pc - 4);
371 			if (si_code == 0)
372 				return;
373 			if (si_code > 0) {
374 				info.si_signo = SIGFPE;
375 				info.si_errno = 0;
376 				info.si_code = si_code;
377 				info.si_addr = (void __user *) regs->pc;
378 				send_sig_info(SIGFPE, &info, current);
379 				return;
380 			}
381 		}
382 		break;
383 
384 	      case 3: /* FEN fault */
385 		/* Irritating users can call PAL_clrfen to disable the
386 		   FPU for the process.  The kernel will then trap in
387 		   do_switch_stack and undo_switch_stack when we try
388 		   to save and restore the FP registers.
389 
390 		   Given that GCC by default generates code that uses the
391 		   FP registers, PAL_clrfen is not useful except for DoS
392 		   attacks.  So turn the bleeding FPU back on and be done
393 		   with it.  */
394 		current_thread_info()->pcb.flags |= 1;
395 		__reload_thread(&current_thread_info()->pcb);
396 		return;
397 
398 	      case 5: /* illoc */
399 	      default: /* unexpected instruction-fault type */
400 		      ;
401 	}
402 
403 	info.si_signo = SIGILL;
404 	info.si_errno = 0;
405 	info.si_code = ILL_ILLOPC;
406 	info.si_addr = (void __user *) regs->pc;
407 	send_sig_info(SIGILL, &info, current);
408 }
409 
410 /* There is an ifdef in the PALcode in MILO that enables a
411    "kernel debugging entry point" as an unprivileged call_pal.
412 
413    We don't want to have anything to do with it, but unfortunately
414    several versions of MILO included in distributions have it enabled,
415    and if we don't put something on the entry point we'll oops.  */
416 
417 asmlinkage void
418 do_entDbg(struct pt_regs *regs)
419 {
420 	siginfo_t info;
421 
422 	die_if_kernel("Instruction fault", regs, 0, NULL);
423 
424 	info.si_signo = SIGILL;
425 	info.si_errno = 0;
426 	info.si_code = ILL_ILLOPC;
427 	info.si_addr = (void __user *) regs->pc;
428 	force_sig_info(SIGILL, &info, current);
429 }
430 
431 
432 /*
433  * entUna has a different register layout to be reasonably simple. It
434  * needs access to all the integer registers (the kernel doesn't use
435  * fp-regs), and it needs to have them in order for simpler access.
436  *
437  * Due to the non-standard register layout (and because we don't want
438  * to handle floating-point regs), user-mode unaligned accesses are
439  * handled separately by do_entUnaUser below.
440  *
441  * Oh, btw, we don't handle the "gp" register correctly, but if we fault
442  * on a gp-register unaligned load/store, something is _very_ wrong
443  * in the kernel anyway..
444  */
445 struct allregs {
446 	unsigned long regs[32];
447 	unsigned long ps, pc, gp, a0, a1, a2;
448 };
449 
450 struct unaligned_stat {
451 	unsigned long count, va, pc;
452 } unaligned[2];
453 
454 
455 /* Macro for exception fixup code to access integer registers.  */
456 #define una_reg(r)  (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
457 
458 
459 asmlinkage void
460 do_entUna(void * va, unsigned long opcode, unsigned long reg,
461 	  struct allregs *regs)
462 {
463 	long error, tmp1, tmp2, tmp3, tmp4;
464 	unsigned long pc = regs->pc - 4;
465 	unsigned long *_regs = regs->regs;
466 	const struct exception_table_entry *fixup;
467 
468 	unaligned[0].count++;
469 	unaligned[0].va = (unsigned long) va;
470 	unaligned[0].pc = pc;
471 
472 	/* We don't want to use the generic get/put unaligned macros as
473 	   we want to trap exceptions.  Only if we actually get an
474 	   exception will we decide whether we should have caught it.  */
475 
476 	switch (opcode) {
477 	case 0x0c: /* ldwu */
478 		__asm__ __volatile__(
479 		"1:	ldq_u %1,0(%3)\n"
480 		"2:	ldq_u %2,1(%3)\n"
481 		"	extwl %1,%3,%1\n"
482 		"	extwh %2,%3,%2\n"
483 		"3:\n"
484 		".section __ex_table,\"a\"\n"
485 		"	.long 1b - .\n"
486 		"	lda %1,3b-1b(%0)\n"
487 		"	.long 2b - .\n"
488 		"	lda %2,3b-2b(%0)\n"
489 		".previous"
490 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
491 			: "r"(va), "0"(0));
492 		if (error)
493 			goto got_exception;
494 		una_reg(reg) = tmp1|tmp2;
495 		return;
496 
497 	case 0x28: /* ldl */
498 		__asm__ __volatile__(
499 		"1:	ldq_u %1,0(%3)\n"
500 		"2:	ldq_u %2,3(%3)\n"
501 		"	extll %1,%3,%1\n"
502 		"	extlh %2,%3,%2\n"
503 		"3:\n"
504 		".section __ex_table,\"a\"\n"
505 		"	.long 1b - .\n"
506 		"	lda %1,3b-1b(%0)\n"
507 		"	.long 2b - .\n"
508 		"	lda %2,3b-2b(%0)\n"
509 		".previous"
510 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
511 			: "r"(va), "0"(0));
512 		if (error)
513 			goto got_exception;
514 		una_reg(reg) = (int)(tmp1|tmp2);
515 		return;
516 
517 	case 0x29: /* ldq */
518 		__asm__ __volatile__(
519 		"1:	ldq_u %1,0(%3)\n"
520 		"2:	ldq_u %2,7(%3)\n"
521 		"	extql %1,%3,%1\n"
522 		"	extqh %2,%3,%2\n"
523 		"3:\n"
524 		".section __ex_table,\"a\"\n"
525 		"	.long 1b - .\n"
526 		"	lda %1,3b-1b(%0)\n"
527 		"	.long 2b - .\n"
528 		"	lda %2,3b-2b(%0)\n"
529 		".previous"
530 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
531 			: "r"(va), "0"(0));
532 		if (error)
533 			goto got_exception;
534 		una_reg(reg) = tmp1|tmp2;
535 		return;
536 
537 	/* Note that the store sequences do not indicate that they change
538 	   memory because it _should_ be affecting nothing in this context.
539 	   (Otherwise we have other, much larger, problems.)  */
540 	case 0x0d: /* stw */
541 		__asm__ __volatile__(
542 		"1:	ldq_u %2,1(%5)\n"
543 		"2:	ldq_u %1,0(%5)\n"
544 		"	inswh %6,%5,%4\n"
545 		"	inswl %6,%5,%3\n"
546 		"	mskwh %2,%5,%2\n"
547 		"	mskwl %1,%5,%1\n"
548 		"	or %2,%4,%2\n"
549 		"	or %1,%3,%1\n"
550 		"3:	stq_u %2,1(%5)\n"
551 		"4:	stq_u %1,0(%5)\n"
552 		"5:\n"
553 		".section __ex_table,\"a\"\n"
554 		"	.long 1b - .\n"
555 		"	lda %2,5b-1b(%0)\n"
556 		"	.long 2b - .\n"
557 		"	lda %1,5b-2b(%0)\n"
558 		"	.long 3b - .\n"
559 		"	lda $31,5b-3b(%0)\n"
560 		"	.long 4b - .\n"
561 		"	lda $31,5b-4b(%0)\n"
562 		".previous"
563 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
564 			  "=&r"(tmp3), "=&r"(tmp4)
565 			: "r"(va), "r"(una_reg(reg)), "0"(0));
566 		if (error)
567 			goto got_exception;
568 		return;
569 
570 	case 0x2c: /* stl */
571 		__asm__ __volatile__(
572 		"1:	ldq_u %2,3(%5)\n"
573 		"2:	ldq_u %1,0(%5)\n"
574 		"	inslh %6,%5,%4\n"
575 		"	insll %6,%5,%3\n"
576 		"	msklh %2,%5,%2\n"
577 		"	mskll %1,%5,%1\n"
578 		"	or %2,%4,%2\n"
579 		"	or %1,%3,%1\n"
580 		"3:	stq_u %2,3(%5)\n"
581 		"4:	stq_u %1,0(%5)\n"
582 		"5:\n"
583 		".section __ex_table,\"a\"\n"
584 		"	.long 1b - .\n"
585 		"	lda %2,5b-1b(%0)\n"
586 		"	.long 2b - .\n"
587 		"	lda %1,5b-2b(%0)\n"
588 		"	.long 3b - .\n"
589 		"	lda $31,5b-3b(%0)\n"
590 		"	.long 4b - .\n"
591 		"	lda $31,5b-4b(%0)\n"
592 		".previous"
593 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
594 			  "=&r"(tmp3), "=&r"(tmp4)
595 			: "r"(va), "r"(una_reg(reg)), "0"(0));
596 		if (error)
597 			goto got_exception;
598 		return;
599 
600 	case 0x2d: /* stq */
601 		__asm__ __volatile__(
602 		"1:	ldq_u %2,7(%5)\n"
603 		"2:	ldq_u %1,0(%5)\n"
604 		"	insqh %6,%5,%4\n"
605 		"	insql %6,%5,%3\n"
606 		"	mskqh %2,%5,%2\n"
607 		"	mskql %1,%5,%1\n"
608 		"	or %2,%4,%2\n"
609 		"	or %1,%3,%1\n"
610 		"3:	stq_u %2,7(%5)\n"
611 		"4:	stq_u %1,0(%5)\n"
612 		"5:\n"
613 		".section __ex_table,\"a\"\n\t"
614 		"	.long 1b - .\n"
615 		"	lda %2,5b-1b(%0)\n"
616 		"	.long 2b - .\n"
617 		"	lda %1,5b-2b(%0)\n"
618 		"	.long 3b - .\n"
619 		"	lda $31,5b-3b(%0)\n"
620 		"	.long 4b - .\n"
621 		"	lda $31,5b-4b(%0)\n"
622 		".previous"
623 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
624 			  "=&r"(tmp3), "=&r"(tmp4)
625 			: "r"(va), "r"(una_reg(reg)), "0"(0));
626 		if (error)
627 			goto got_exception;
628 		return;
629 	}
630 
631 	printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
632 		pc, va, opcode, reg);
633 	do_exit(SIGSEGV);
634 
635 got_exception:
636 	/* Ok, we caught the exception, but we don't want it.  Is there
637 	   someone to pass it along to?  */
638 	if ((fixup = search_exception_tables(pc)) != 0) {
639 		unsigned long newpc;
640 		newpc = fixup_exception(una_reg, fixup, pc);
641 
642 		printk("Forwarding unaligned exception at %lx (%lx)\n",
643 		       pc, newpc);
644 
645 		regs->pc = newpc;
646 		return;
647 	}
648 
649 	/*
650 	 * Yikes!  No one to forward the exception to.
651 	 * Since the registers are in a weird format, dump them ourselves.
652  	 */
653 
654 	printk("%s(%d): unhandled unaligned exception\n",
655 	       current->comm, task_pid_nr(current));
656 
657 	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx\n",
658 	       pc, una_reg(26), regs->ps);
659 	printk("r0 = %016lx  r1 = %016lx  r2 = %016lx\n",
660 	       una_reg(0), una_reg(1), una_reg(2));
661 	printk("r3 = %016lx  r4 = %016lx  r5 = %016lx\n",
662  	       una_reg(3), una_reg(4), una_reg(5));
663 	printk("r6 = %016lx  r7 = %016lx  r8 = %016lx\n",
664 	       una_reg(6), una_reg(7), una_reg(8));
665 	printk("r9 = %016lx  r10= %016lx  r11= %016lx\n",
666 	       una_reg(9), una_reg(10), una_reg(11));
667 	printk("r12= %016lx  r13= %016lx  r14= %016lx\n",
668 	       una_reg(12), una_reg(13), una_reg(14));
669 	printk("r15= %016lx\n", una_reg(15));
670 	printk("r16= %016lx  r17= %016lx  r18= %016lx\n",
671 	       una_reg(16), una_reg(17), una_reg(18));
672 	printk("r19= %016lx  r20= %016lx  r21= %016lx\n",
673  	       una_reg(19), una_reg(20), una_reg(21));
674  	printk("r22= %016lx  r23= %016lx  r24= %016lx\n",
675 	       una_reg(22), una_reg(23), una_reg(24));
676 	printk("r25= %016lx  r27= %016lx  r28= %016lx\n",
677 	       una_reg(25), una_reg(27), una_reg(28));
678 	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
679 
680 	dik_show_code((unsigned int *)pc);
681 	dik_show_trace((unsigned long *)(regs+1));
682 
683 	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
684 		printk("die_if_kernel recursion detected.\n");
685 		local_irq_enable();
686 		while (1);
687 	}
688 	do_exit(SIGSEGV);
689 }
690 
691 /*
692  * Convert an s-floating point value in memory format to the
693  * corresponding value in register format.  The exponent
694  * needs to be remapped to preserve non-finite values
695  * (infinities, not-a-numbers, denormals).
696  */
697 static inline unsigned long
698 s_mem_to_reg (unsigned long s_mem)
699 {
700 	unsigned long frac    = (s_mem >>  0) & 0x7fffff;
701 	unsigned long sign    = (s_mem >> 31) & 0x1;
702 	unsigned long exp_msb = (s_mem >> 30) & 0x1;
703 	unsigned long exp_low = (s_mem >> 23) & 0x7f;
704 	unsigned long exp;
705 
706 	exp = (exp_msb << 10) | exp_low;	/* common case */
707 	if (exp_msb) {
708 		if (exp_low == 0x7f) {
709 			exp = 0x7ff;
710 		}
711 	} else {
712 		if (exp_low == 0x00) {
713 			exp = 0x000;
714 		} else {
715 			exp |= (0x7 << 7);
716 		}
717 	}
718 	return (sign << 63) | (exp << 52) | (frac << 29);
719 }
720 
721 /*
722  * Convert an s-floating point value in register format to the
723  * corresponding value in memory format.
724  */
725 static inline unsigned long
726 s_reg_to_mem (unsigned long s_reg)
727 {
728 	return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
729 }
730 
731 /*
732  * Handle user-level unaligned fault.  Handling user-level unaligned
733  * faults is *extremely* slow and produces nasty messages.  A user
734  * program *should* fix unaligned faults ASAP.
735  *
736  * Notice that we have (almost) the regular kernel stack layout here,
737  * so finding the appropriate registers is a little more difficult
738  * than in the kernel case.
739  *
740  * Finally, we handle regular integer load/stores only.  In
741  * particular, load-linked/store-conditionally and floating point
742  * load/stores are not supported.  The former make no sense with
743  * unaligned faults (they are guaranteed to fail) and I don't think
744  * the latter will occur in any decent program.
745  *
746  * Sigh. We *do* have to handle some FP operations, because GCC will
747  * uses them as temporary storage for integer memory to memory copies.
748  * However, we need to deal with stt/ldt and sts/lds only.
749  */
750 
751 #define OP_INT_MASK	( 1L << 0x28 | 1L << 0x2c   /* ldl stl */	\
752 			| 1L << 0x29 | 1L << 0x2d   /* ldq stq */	\
753 			| 1L << 0x0c | 1L << 0x0d   /* ldwu stw */	\
754 			| 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
755 
756 #define OP_WRITE_MASK	( 1L << 0x26 | 1L << 0x27   /* sts stt */	\
757 			| 1L << 0x2c | 1L << 0x2d   /* stl stq */	\
758 			| 1L << 0x0d | 1L << 0x0e ) /* stw stb */
759 
760 #define R(x)	((size_t) &((struct pt_regs *)0)->x)
761 
762 static int unauser_reg_offsets[32] = {
763 	R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
764 	/* r9 ... r15 are stored in front of regs.  */
765 	-56, -48, -40, -32, -24, -16, -8,
766 	R(r16), R(r17), R(r18),
767 	R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
768 	R(r27), R(r28), R(gp),
769 	0, 0
770 };
771 
772 #undef R
773 
774 asmlinkage void
775 do_entUnaUser(void __user * va, unsigned long opcode,
776 	      unsigned long reg, struct pt_regs *regs)
777 {
778 	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
779 
780 	unsigned long tmp1, tmp2, tmp3, tmp4;
781 	unsigned long fake_reg, *reg_addr = &fake_reg;
782 	siginfo_t info;
783 	long error;
784 
785 	/* Check the UAC bits to decide what the user wants us to do
786 	   with the unaliged access.  */
787 
788 	if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
789 		if (__ratelimit(&ratelimit)) {
790 			printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
791 			       current->comm, task_pid_nr(current),
792 			       regs->pc - 4, va, opcode, reg);
793 		}
794 	}
795 	if ((current_thread_info()->status & TS_UAC_SIGBUS))
796 		goto give_sigbus;
797 	/* Not sure why you'd want to use this, but... */
798 	if ((current_thread_info()->status & TS_UAC_NOFIX))
799 		return;
800 
801 	/* Don't bother reading ds in the access check since we already
802 	   know that this came from the user.  Also rely on the fact that
803 	   the page at TASK_SIZE is unmapped and so can't be touched anyway. */
804 	if (!__access_ok((unsigned long)va, 0, USER_DS))
805 		goto give_sigsegv;
806 
807 	++unaligned[1].count;
808 	unaligned[1].va = (unsigned long)va;
809 	unaligned[1].pc = regs->pc - 4;
810 
811 	if ((1L << opcode) & OP_INT_MASK) {
812 		/* it's an integer load/store */
813 		if (reg < 30) {
814 			reg_addr = (unsigned long *)
815 			  ((char *)regs + unauser_reg_offsets[reg]);
816 		} else if (reg == 30) {
817 			/* usp in PAL regs */
818 			fake_reg = rdusp();
819 		} else {
820 			/* zero "register" */
821 			fake_reg = 0;
822 		}
823 	}
824 
825 	/* We don't want to use the generic get/put unaligned macros as
826 	   we want to trap exceptions.  Only if we actually get an
827 	   exception will we decide whether we should have caught it.  */
828 
829 	switch (opcode) {
830 	case 0x0c: /* ldwu */
831 		__asm__ __volatile__(
832 		"1:	ldq_u %1,0(%3)\n"
833 		"2:	ldq_u %2,1(%3)\n"
834 		"	extwl %1,%3,%1\n"
835 		"	extwh %2,%3,%2\n"
836 		"3:\n"
837 		".section __ex_table,\"a\"\n"
838 		"	.long 1b - .\n"
839 		"	lda %1,3b-1b(%0)\n"
840 		"	.long 2b - .\n"
841 		"	lda %2,3b-2b(%0)\n"
842 		".previous"
843 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
844 			: "r"(va), "0"(0));
845 		if (error)
846 			goto give_sigsegv;
847 		*reg_addr = tmp1|tmp2;
848 		break;
849 
850 	case 0x22: /* lds */
851 		__asm__ __volatile__(
852 		"1:	ldq_u %1,0(%3)\n"
853 		"2:	ldq_u %2,3(%3)\n"
854 		"	extll %1,%3,%1\n"
855 		"	extlh %2,%3,%2\n"
856 		"3:\n"
857 		".section __ex_table,\"a\"\n"
858 		"	.long 1b - .\n"
859 		"	lda %1,3b-1b(%0)\n"
860 		"	.long 2b - .\n"
861 		"	lda %2,3b-2b(%0)\n"
862 		".previous"
863 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
864 			: "r"(va), "0"(0));
865 		if (error)
866 			goto give_sigsegv;
867 		alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
868 		return;
869 
870 	case 0x23: /* ldt */
871 		__asm__ __volatile__(
872 		"1:	ldq_u %1,0(%3)\n"
873 		"2:	ldq_u %2,7(%3)\n"
874 		"	extql %1,%3,%1\n"
875 		"	extqh %2,%3,%2\n"
876 		"3:\n"
877 		".section __ex_table,\"a\"\n"
878 		"	.long 1b - .\n"
879 		"	lda %1,3b-1b(%0)\n"
880 		"	.long 2b - .\n"
881 		"	lda %2,3b-2b(%0)\n"
882 		".previous"
883 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
884 			: "r"(va), "0"(0));
885 		if (error)
886 			goto give_sigsegv;
887 		alpha_write_fp_reg(reg, tmp1|tmp2);
888 		return;
889 
890 	case 0x28: /* ldl */
891 		__asm__ __volatile__(
892 		"1:	ldq_u %1,0(%3)\n"
893 		"2:	ldq_u %2,3(%3)\n"
894 		"	extll %1,%3,%1\n"
895 		"	extlh %2,%3,%2\n"
896 		"3:\n"
897 		".section __ex_table,\"a\"\n"
898 		"	.long 1b - .\n"
899 		"	lda %1,3b-1b(%0)\n"
900 		"	.long 2b - .\n"
901 		"	lda %2,3b-2b(%0)\n"
902 		".previous"
903 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
904 			: "r"(va), "0"(0));
905 		if (error)
906 			goto give_sigsegv;
907 		*reg_addr = (int)(tmp1|tmp2);
908 		break;
909 
910 	case 0x29: /* ldq */
911 		__asm__ __volatile__(
912 		"1:	ldq_u %1,0(%3)\n"
913 		"2:	ldq_u %2,7(%3)\n"
914 		"	extql %1,%3,%1\n"
915 		"	extqh %2,%3,%2\n"
916 		"3:\n"
917 		".section __ex_table,\"a\"\n"
918 		"	.long 1b - .\n"
919 		"	lda %1,3b-1b(%0)\n"
920 		"	.long 2b - .\n"
921 		"	lda %2,3b-2b(%0)\n"
922 		".previous"
923 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
924 			: "r"(va), "0"(0));
925 		if (error)
926 			goto give_sigsegv;
927 		*reg_addr = tmp1|tmp2;
928 		break;
929 
930 	/* Note that the store sequences do not indicate that they change
931 	   memory because it _should_ be affecting nothing in this context.
932 	   (Otherwise we have other, much larger, problems.)  */
933 	case 0x0d: /* stw */
934 		__asm__ __volatile__(
935 		"1:	ldq_u %2,1(%5)\n"
936 		"2:	ldq_u %1,0(%5)\n"
937 		"	inswh %6,%5,%4\n"
938 		"	inswl %6,%5,%3\n"
939 		"	mskwh %2,%5,%2\n"
940 		"	mskwl %1,%5,%1\n"
941 		"	or %2,%4,%2\n"
942 		"	or %1,%3,%1\n"
943 		"3:	stq_u %2,1(%5)\n"
944 		"4:	stq_u %1,0(%5)\n"
945 		"5:\n"
946 		".section __ex_table,\"a\"\n"
947 		"	.long 1b - .\n"
948 		"	lda %2,5b-1b(%0)\n"
949 		"	.long 2b - .\n"
950 		"	lda %1,5b-2b(%0)\n"
951 		"	.long 3b - .\n"
952 		"	lda $31,5b-3b(%0)\n"
953 		"	.long 4b - .\n"
954 		"	lda $31,5b-4b(%0)\n"
955 		".previous"
956 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
957 			  "=&r"(tmp3), "=&r"(tmp4)
958 			: "r"(va), "r"(*reg_addr), "0"(0));
959 		if (error)
960 			goto give_sigsegv;
961 		return;
962 
963 	case 0x26: /* sts */
964 		fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
965 		/* FALLTHRU */
966 
967 	case 0x2c: /* stl */
968 		__asm__ __volatile__(
969 		"1:	ldq_u %2,3(%5)\n"
970 		"2:	ldq_u %1,0(%5)\n"
971 		"	inslh %6,%5,%4\n"
972 		"	insll %6,%5,%3\n"
973 		"	msklh %2,%5,%2\n"
974 		"	mskll %1,%5,%1\n"
975 		"	or %2,%4,%2\n"
976 		"	or %1,%3,%1\n"
977 		"3:	stq_u %2,3(%5)\n"
978 		"4:	stq_u %1,0(%5)\n"
979 		"5:\n"
980 		".section __ex_table,\"a\"\n"
981 		"	.long 1b - .\n"
982 		"	lda %2,5b-1b(%0)\n"
983 		"	.long 2b - .\n"
984 		"	lda %1,5b-2b(%0)\n"
985 		"	.long 3b - .\n"
986 		"	lda $31,5b-3b(%0)\n"
987 		"	.long 4b - .\n"
988 		"	lda $31,5b-4b(%0)\n"
989 		".previous"
990 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
991 			  "=&r"(tmp3), "=&r"(tmp4)
992 			: "r"(va), "r"(*reg_addr), "0"(0));
993 		if (error)
994 			goto give_sigsegv;
995 		return;
996 
997 	case 0x27: /* stt */
998 		fake_reg = alpha_read_fp_reg(reg);
999 		/* FALLTHRU */
1000 
1001 	case 0x2d: /* stq */
1002 		__asm__ __volatile__(
1003 		"1:	ldq_u %2,7(%5)\n"
1004 		"2:	ldq_u %1,0(%5)\n"
1005 		"	insqh %6,%5,%4\n"
1006 		"	insql %6,%5,%3\n"
1007 		"	mskqh %2,%5,%2\n"
1008 		"	mskql %1,%5,%1\n"
1009 		"	or %2,%4,%2\n"
1010 		"	or %1,%3,%1\n"
1011 		"3:	stq_u %2,7(%5)\n"
1012 		"4:	stq_u %1,0(%5)\n"
1013 		"5:\n"
1014 		".section __ex_table,\"a\"\n\t"
1015 		"	.long 1b - .\n"
1016 		"	lda %2,5b-1b(%0)\n"
1017 		"	.long 2b - .\n"
1018 		"	lda %1,5b-2b(%0)\n"
1019 		"	.long 3b - .\n"
1020 		"	lda $31,5b-3b(%0)\n"
1021 		"	.long 4b - .\n"
1022 		"	lda $31,5b-4b(%0)\n"
1023 		".previous"
1024 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
1025 			  "=&r"(tmp3), "=&r"(tmp4)
1026 			: "r"(va), "r"(*reg_addr), "0"(0));
1027 		if (error)
1028 			goto give_sigsegv;
1029 		return;
1030 
1031 	default:
1032 		/* What instruction were you trying to use, exactly?  */
1033 		goto give_sigbus;
1034 	}
1035 
1036 	/* Only integer loads should get here; everyone else returns early. */
1037 	if (reg == 30)
1038 		wrusp(fake_reg);
1039 	return;
1040 
1041 give_sigsegv:
1042 	regs->pc -= 4;  /* make pc point to faulting insn */
1043 	info.si_signo = SIGSEGV;
1044 	info.si_errno = 0;
1045 
1046 	/* We need to replicate some of the logic in mm/fault.c,
1047 	   since we don't have access to the fault code in the
1048 	   exception handling return path.  */
1049 	if (!__access_ok((unsigned long)va, 0, USER_DS))
1050 		info.si_code = SEGV_ACCERR;
1051 	else {
1052 		struct mm_struct *mm = current->mm;
1053 		down_read(&mm->mmap_sem);
1054 		if (find_vma(mm, (unsigned long)va))
1055 			info.si_code = SEGV_ACCERR;
1056 		else
1057 			info.si_code = SEGV_MAPERR;
1058 		up_read(&mm->mmap_sem);
1059 	}
1060 	info.si_addr = va;
1061 	send_sig_info(SIGSEGV, &info, current);
1062 	return;
1063 
1064 give_sigbus:
1065 	regs->pc -= 4;
1066 	info.si_signo = SIGBUS;
1067 	info.si_errno = 0;
1068 	info.si_code = BUS_ADRALN;
1069 	info.si_addr = va;
1070 	send_sig_info(SIGBUS, &info, current);
1071 	return;
1072 }
1073 
1074 void
1075 trap_init(void)
1076 {
1077 	/* Tell PAL-code what global pointer we want in the kernel.  */
1078 	register unsigned long gptr __asm__("$29");
1079 	wrkgp(gptr);
1080 
1081 	/* Hack for Multia (UDB) and JENSEN: some of their SRMs have
1082 	   a bug in the handling of the opDEC fault.  Fix it up if so.  */
1083 	if (implver() == IMPLVER_EV4)
1084 		opDEC_check();
1085 
1086 	wrent(entArith, 1);
1087 	wrent(entMM, 2);
1088 	wrent(entIF, 3);
1089 	wrent(entUna, 4);
1090 	wrent(entSys, 5);
1091 	wrent(entDbg, 6);
1092 }
1093