xref: /linux/arch/alpha/kernel/traps.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  * arch/alpha/kernel/traps.c
3  *
4  * (C) Copyright 1994 Linus Torvalds
5  */
6 
7 /*
8  * This file initializes the trap entry points
9  */
10 
11 #include <linux/jiffies.h>
12 #include <linux/mm.h>
13 #include <linux/sched.h>
14 #include <linux/tty.h>
15 #include <linux/delay.h>
16 #include <linux/smp_lock.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/kallsyms.h>
20 #include <linux/ratelimit.h>
21 
22 #include <asm/gentrap.h>
23 #include <asm/uaccess.h>
24 #include <asm/unaligned.h>
25 #include <asm/sysinfo.h>
26 #include <asm/hwrpb.h>
27 #include <asm/mmu_context.h>
28 
29 #include "proto.h"
30 
31 /* Work-around for some SRMs which mishandle opDEC faults.  */
32 
33 static int opDEC_fix;
34 
35 static void __cpuinit
36 opDEC_check(void)
37 {
38 	__asm__ __volatile__ (
39 	/* Load the address of... */
40 	"	br	$16, 1f\n"
41 	/* A stub instruction fault handler.  Just add 4 to the
42 	   pc and continue.  */
43 	"	ldq	$16, 8($sp)\n"
44 	"	addq	$16, 4, $16\n"
45 	"	stq	$16, 8($sp)\n"
46 	"	call_pal %[rti]\n"
47 	/* Install the instruction fault handler.  */
48 	"1:	lda	$17, 3\n"
49 	"	call_pal %[wrent]\n"
50 	/* With that in place, the fault from the round-to-minf fp
51 	   insn will arrive either at the "lda 4" insn (bad) or one
52 	   past that (good).  This places the correct fixup in %0.  */
53 	"	lda %[fix], 0\n"
54 	"	cvttq/svm $f31,$f31\n"
55 	"	lda %[fix], 4"
56 	: [fix] "=r" (opDEC_fix)
57 	: [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent)
58 	: "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
59 
60 	if (opDEC_fix)
61 		printk("opDEC fixup enabled.\n");
62 }
63 
64 void
65 dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
66 {
67 	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx    %s\n",
68 	       regs->pc, regs->r26, regs->ps, print_tainted());
69 	print_symbol("pc is at %s\n", regs->pc);
70 	print_symbol("ra is at %s\n", regs->r26 );
71 	printk("v0 = %016lx  t0 = %016lx  t1 = %016lx\n",
72 	       regs->r0, regs->r1, regs->r2);
73 	printk("t2 = %016lx  t3 = %016lx  t4 = %016lx\n",
74  	       regs->r3, regs->r4, regs->r5);
75 	printk("t5 = %016lx  t6 = %016lx  t7 = %016lx\n",
76 	       regs->r6, regs->r7, regs->r8);
77 
78 	if (r9_15) {
79 		printk("s0 = %016lx  s1 = %016lx  s2 = %016lx\n",
80 		       r9_15[9], r9_15[10], r9_15[11]);
81 		printk("s3 = %016lx  s4 = %016lx  s5 = %016lx\n",
82 		       r9_15[12], r9_15[13], r9_15[14]);
83 		printk("s6 = %016lx\n", r9_15[15]);
84 	}
85 
86 	printk("a0 = %016lx  a1 = %016lx  a2 = %016lx\n",
87 	       regs->r16, regs->r17, regs->r18);
88 	printk("a3 = %016lx  a4 = %016lx  a5 = %016lx\n",
89  	       regs->r19, regs->r20, regs->r21);
90  	printk("t8 = %016lx  t9 = %016lx  t10= %016lx\n",
91 	       regs->r22, regs->r23, regs->r24);
92 	printk("t11= %016lx  pv = %016lx  at = %016lx\n",
93 	       regs->r25, regs->r27, regs->r28);
94 	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
95 #if 0
96 __halt();
97 #endif
98 }
99 
100 #if 0
101 static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
102 			   "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
103 			   "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
104 			   "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
105 #endif
106 
107 static void
108 dik_show_code(unsigned int *pc)
109 {
110 	long i;
111 
112 	printk("Code:");
113 	for (i = -6; i < 2; i++) {
114 		unsigned int insn;
115 		if (__get_user(insn, (unsigned int __user *)pc + i))
116 			break;
117 		printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
118 	}
119 	printk("\n");
120 }
121 
122 static void
123 dik_show_trace(unsigned long *sp)
124 {
125 	long i = 0;
126 	printk("Trace:\n");
127 	while (0x1ff8 & (unsigned long) sp) {
128 		extern char _stext[], _etext[];
129 		unsigned long tmp = *sp;
130 		sp++;
131 		if (tmp < (unsigned long) &_stext)
132 			continue;
133 		if (tmp >= (unsigned long) &_etext)
134 			continue;
135 		printk("[<%lx>]", tmp);
136 		print_symbol(" %s", tmp);
137 		printk("\n");
138 		if (i > 40) {
139 			printk(" ...");
140 			break;
141 		}
142 	}
143 	printk("\n");
144 }
145 
146 static int kstack_depth_to_print = 24;
147 
148 void show_stack(struct task_struct *task, unsigned long *sp)
149 {
150 	unsigned long *stack;
151 	int i;
152 
153 	/*
154 	 * debugging aid: "show_stack(NULL);" prints the
155 	 * back trace for this cpu.
156 	 */
157 	if(sp==NULL)
158 		sp=(unsigned long*)&sp;
159 
160 	stack = sp;
161 	for(i=0; i < kstack_depth_to_print; i++) {
162 		if (((long) stack & (THREAD_SIZE-1)) == 0)
163 			break;
164 		if (i && ((i % 4) == 0))
165 			printk("\n       ");
166 		printk("%016lx ", *stack++);
167 	}
168 	printk("\n");
169 	dik_show_trace(sp);
170 }
171 
172 void dump_stack(void)
173 {
174 	show_stack(NULL, NULL);
175 }
176 
177 EXPORT_SYMBOL(dump_stack);
178 
179 void
180 die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
181 {
182 	if (regs->ps & 8)
183 		return;
184 #ifdef CONFIG_SMP
185 	printk("CPU %d ", hard_smp_processor_id());
186 #endif
187 	printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
188 	dik_show_regs(regs, r9_15);
189 	add_taint(TAINT_DIE);
190 	dik_show_trace((unsigned long *)(regs+1));
191 	dik_show_code((unsigned int *)regs->pc);
192 
193 	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
194 		printk("die_if_kernel recursion detected.\n");
195 		local_irq_enable();
196 		while (1);
197 	}
198 	do_exit(SIGSEGV);
199 }
200 
201 #ifndef CONFIG_MATHEMU
202 static long dummy_emul(void) { return 0; }
203 long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
204   = (void *)dummy_emul;
205 long (*alpha_fp_emul) (unsigned long pc)
206   = (void *)dummy_emul;
207 #else
208 long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
209 long alpha_fp_emul (unsigned long pc);
210 #endif
211 
212 asmlinkage void
213 do_entArith(unsigned long summary, unsigned long write_mask,
214 	    struct pt_regs *regs)
215 {
216 	long si_code = FPE_FLTINV;
217 	siginfo_t info;
218 
219 	if (summary & 1) {
220 		/* Software-completion summary bit is set, so try to
221 		   emulate the instruction.  If the processor supports
222 		   precise exceptions, we don't have to search.  */
223 		if (!amask(AMASK_PRECISE_TRAP))
224 			si_code = alpha_fp_emul(regs->pc - 4);
225 		else
226 			si_code = alpha_fp_emul_imprecise(regs, write_mask);
227 		if (si_code == 0)
228 			return;
229 	}
230 	die_if_kernel("Arithmetic fault", regs, 0, NULL);
231 
232 	info.si_signo = SIGFPE;
233 	info.si_errno = 0;
234 	info.si_code = si_code;
235 	info.si_addr = (void __user *) regs->pc;
236 	send_sig_info(SIGFPE, &info, current);
237 }
238 
239 asmlinkage void
240 do_entIF(unsigned long type, struct pt_regs *regs)
241 {
242 	siginfo_t info;
243 	int signo, code;
244 
245 	if ((regs->ps & ~IPL_MAX) == 0) {
246 		if (type == 1) {
247 			const unsigned int *data
248 			  = (const unsigned int *) regs->pc;
249 			printk("Kernel bug at %s:%d\n",
250 			       (const char *)(data[1] | (long)data[2] << 32),
251 			       data[0]);
252 		}
253 		die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
254 			      regs, type, NULL);
255 	}
256 
257 	switch (type) {
258 	      case 0: /* breakpoint */
259 		info.si_signo = SIGTRAP;
260 		info.si_errno = 0;
261 		info.si_code = TRAP_BRKPT;
262 		info.si_trapno = 0;
263 		info.si_addr = (void __user *) regs->pc;
264 
265 		if (ptrace_cancel_bpt(current)) {
266 			regs->pc -= 4;	/* make pc point to former bpt */
267 		}
268 
269 		send_sig_info(SIGTRAP, &info, current);
270 		return;
271 
272 	      case 1: /* bugcheck */
273 		info.si_signo = SIGTRAP;
274 		info.si_errno = 0;
275 		info.si_code = __SI_FAULT;
276 		info.si_addr = (void __user *) regs->pc;
277 		info.si_trapno = 0;
278 		send_sig_info(SIGTRAP, &info, current);
279 		return;
280 
281 	      case 2: /* gentrap */
282 		info.si_addr = (void __user *) regs->pc;
283 		info.si_trapno = regs->r16;
284 		switch ((long) regs->r16) {
285 		case GEN_INTOVF:
286 			signo = SIGFPE;
287 			code = FPE_INTOVF;
288 			break;
289 		case GEN_INTDIV:
290 			signo = SIGFPE;
291 			code = FPE_INTDIV;
292 			break;
293 		case GEN_FLTOVF:
294 			signo = SIGFPE;
295 			code = FPE_FLTOVF;
296 			break;
297 		case GEN_FLTDIV:
298 			signo = SIGFPE;
299 			code = FPE_FLTDIV;
300 			break;
301 		case GEN_FLTUND:
302 			signo = SIGFPE;
303 			code = FPE_FLTUND;
304 			break;
305 		case GEN_FLTINV:
306 			signo = SIGFPE;
307 			code = FPE_FLTINV;
308 			break;
309 		case GEN_FLTINE:
310 			signo = SIGFPE;
311 			code = FPE_FLTRES;
312 			break;
313 		case GEN_ROPRAND:
314 			signo = SIGFPE;
315 			code = __SI_FAULT;
316 			break;
317 
318 		case GEN_DECOVF:
319 		case GEN_DECDIV:
320 		case GEN_DECINV:
321 		case GEN_ASSERTERR:
322 		case GEN_NULPTRERR:
323 		case GEN_STKOVF:
324 		case GEN_STRLENERR:
325 		case GEN_SUBSTRERR:
326 		case GEN_RANGERR:
327 		case GEN_SUBRNG:
328 		case GEN_SUBRNG1:
329 		case GEN_SUBRNG2:
330 		case GEN_SUBRNG3:
331 		case GEN_SUBRNG4:
332 		case GEN_SUBRNG5:
333 		case GEN_SUBRNG6:
334 		case GEN_SUBRNG7:
335 		default:
336 			signo = SIGTRAP;
337 			code = __SI_FAULT;
338 			break;
339 		}
340 
341 		info.si_signo = signo;
342 		info.si_errno = 0;
343 		info.si_code = code;
344 		info.si_addr = (void __user *) regs->pc;
345 		send_sig_info(signo, &info, current);
346 		return;
347 
348 	      case 4: /* opDEC */
349 		if (implver() == IMPLVER_EV4) {
350 			long si_code;
351 
352 			/* The some versions of SRM do not handle
353 			   the opDEC properly - they return the PC of the
354 			   opDEC fault, not the instruction after as the
355 			   Alpha architecture requires.  Here we fix it up.
356 			   We do this by intentionally causing an opDEC
357 			   fault during the boot sequence and testing if
358 			   we get the correct PC.  If not, we set a flag
359 			   to correct it every time through.  */
360 			regs->pc += opDEC_fix;
361 
362 			/* EV4 does not implement anything except normal
363 			   rounding.  Everything else will come here as
364 			   an illegal instruction.  Emulate them.  */
365 			si_code = alpha_fp_emul(regs->pc - 4);
366 			if (si_code == 0)
367 				return;
368 			if (si_code > 0) {
369 				info.si_signo = SIGFPE;
370 				info.si_errno = 0;
371 				info.si_code = si_code;
372 				info.si_addr = (void __user *) regs->pc;
373 				send_sig_info(SIGFPE, &info, current);
374 				return;
375 			}
376 		}
377 		break;
378 
379 	      case 3: /* FEN fault */
380 		/* Irritating users can call PAL_clrfen to disable the
381 		   FPU for the process.  The kernel will then trap in
382 		   do_switch_stack and undo_switch_stack when we try
383 		   to save and restore the FP registers.
384 
385 		   Given that GCC by default generates code that uses the
386 		   FP registers, PAL_clrfen is not useful except for DoS
387 		   attacks.  So turn the bleeding FPU back on and be done
388 		   with it.  */
389 		current_thread_info()->pcb.flags |= 1;
390 		__reload_thread(&current_thread_info()->pcb);
391 		return;
392 
393 	      case 5: /* illoc */
394 	      default: /* unexpected instruction-fault type */
395 		      ;
396 	}
397 
398 	info.si_signo = SIGILL;
399 	info.si_errno = 0;
400 	info.si_code = ILL_ILLOPC;
401 	info.si_addr = (void __user *) regs->pc;
402 	send_sig_info(SIGILL, &info, current);
403 }
404 
405 /* There is an ifdef in the PALcode in MILO that enables a
406    "kernel debugging entry point" as an unprivileged call_pal.
407 
408    We don't want to have anything to do with it, but unfortunately
409    several versions of MILO included in distributions have it enabled,
410    and if we don't put something on the entry point we'll oops.  */
411 
412 asmlinkage void
413 do_entDbg(struct pt_regs *regs)
414 {
415 	siginfo_t info;
416 
417 	die_if_kernel("Instruction fault", regs, 0, NULL);
418 
419 	info.si_signo = SIGILL;
420 	info.si_errno = 0;
421 	info.si_code = ILL_ILLOPC;
422 	info.si_addr = (void __user *) regs->pc;
423 	force_sig_info(SIGILL, &info, current);
424 }
425 
426 
427 /*
428  * entUna has a different register layout to be reasonably simple. It
429  * needs access to all the integer registers (the kernel doesn't use
430  * fp-regs), and it needs to have them in order for simpler access.
431  *
432  * Due to the non-standard register layout (and because we don't want
433  * to handle floating-point regs), user-mode unaligned accesses are
434  * handled separately by do_entUnaUser below.
435  *
436  * Oh, btw, we don't handle the "gp" register correctly, but if we fault
437  * on a gp-register unaligned load/store, something is _very_ wrong
438  * in the kernel anyway..
439  */
440 struct allregs {
441 	unsigned long regs[32];
442 	unsigned long ps, pc, gp, a0, a1, a2;
443 };
444 
445 struct unaligned_stat {
446 	unsigned long count, va, pc;
447 } unaligned[2];
448 
449 
450 /* Macro for exception fixup code to access integer registers.  */
451 #define una_reg(r)  (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
452 
453 
454 asmlinkage void
455 do_entUna(void * va, unsigned long opcode, unsigned long reg,
456 	  struct allregs *regs)
457 {
458 	long error, tmp1, tmp2, tmp3, tmp4;
459 	unsigned long pc = regs->pc - 4;
460 	unsigned long *_regs = regs->regs;
461 	const struct exception_table_entry *fixup;
462 
463 	unaligned[0].count++;
464 	unaligned[0].va = (unsigned long) va;
465 	unaligned[0].pc = pc;
466 
467 	/* We don't want to use the generic get/put unaligned macros as
468 	   we want to trap exceptions.  Only if we actually get an
469 	   exception will we decide whether we should have caught it.  */
470 
471 	switch (opcode) {
472 	case 0x0c: /* ldwu */
473 		__asm__ __volatile__(
474 		"1:	ldq_u %1,0(%3)\n"
475 		"2:	ldq_u %2,1(%3)\n"
476 		"	extwl %1,%3,%1\n"
477 		"	extwh %2,%3,%2\n"
478 		"3:\n"
479 		".section __ex_table,\"a\"\n"
480 		"	.long 1b - .\n"
481 		"	lda %1,3b-1b(%0)\n"
482 		"	.long 2b - .\n"
483 		"	lda %2,3b-2b(%0)\n"
484 		".previous"
485 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
486 			: "r"(va), "0"(0));
487 		if (error)
488 			goto got_exception;
489 		una_reg(reg) = tmp1|tmp2;
490 		return;
491 
492 	case 0x28: /* ldl */
493 		__asm__ __volatile__(
494 		"1:	ldq_u %1,0(%3)\n"
495 		"2:	ldq_u %2,3(%3)\n"
496 		"	extll %1,%3,%1\n"
497 		"	extlh %2,%3,%2\n"
498 		"3:\n"
499 		".section __ex_table,\"a\"\n"
500 		"	.long 1b - .\n"
501 		"	lda %1,3b-1b(%0)\n"
502 		"	.long 2b - .\n"
503 		"	lda %2,3b-2b(%0)\n"
504 		".previous"
505 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
506 			: "r"(va), "0"(0));
507 		if (error)
508 			goto got_exception;
509 		una_reg(reg) = (int)(tmp1|tmp2);
510 		return;
511 
512 	case 0x29: /* ldq */
513 		__asm__ __volatile__(
514 		"1:	ldq_u %1,0(%3)\n"
515 		"2:	ldq_u %2,7(%3)\n"
516 		"	extql %1,%3,%1\n"
517 		"	extqh %2,%3,%2\n"
518 		"3:\n"
519 		".section __ex_table,\"a\"\n"
520 		"	.long 1b - .\n"
521 		"	lda %1,3b-1b(%0)\n"
522 		"	.long 2b - .\n"
523 		"	lda %2,3b-2b(%0)\n"
524 		".previous"
525 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
526 			: "r"(va), "0"(0));
527 		if (error)
528 			goto got_exception;
529 		una_reg(reg) = tmp1|tmp2;
530 		return;
531 
532 	/* Note that the store sequences do not indicate that they change
533 	   memory because it _should_ be affecting nothing in this context.
534 	   (Otherwise we have other, much larger, problems.)  */
535 	case 0x0d: /* stw */
536 		__asm__ __volatile__(
537 		"1:	ldq_u %2,1(%5)\n"
538 		"2:	ldq_u %1,0(%5)\n"
539 		"	inswh %6,%5,%4\n"
540 		"	inswl %6,%5,%3\n"
541 		"	mskwh %2,%5,%2\n"
542 		"	mskwl %1,%5,%1\n"
543 		"	or %2,%4,%2\n"
544 		"	or %1,%3,%1\n"
545 		"3:	stq_u %2,1(%5)\n"
546 		"4:	stq_u %1,0(%5)\n"
547 		"5:\n"
548 		".section __ex_table,\"a\"\n"
549 		"	.long 1b - .\n"
550 		"	lda %2,5b-1b(%0)\n"
551 		"	.long 2b - .\n"
552 		"	lda %1,5b-2b(%0)\n"
553 		"	.long 3b - .\n"
554 		"	lda $31,5b-3b(%0)\n"
555 		"	.long 4b - .\n"
556 		"	lda $31,5b-4b(%0)\n"
557 		".previous"
558 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
559 			  "=&r"(tmp3), "=&r"(tmp4)
560 			: "r"(va), "r"(una_reg(reg)), "0"(0));
561 		if (error)
562 			goto got_exception;
563 		return;
564 
565 	case 0x2c: /* stl */
566 		__asm__ __volatile__(
567 		"1:	ldq_u %2,3(%5)\n"
568 		"2:	ldq_u %1,0(%5)\n"
569 		"	inslh %6,%5,%4\n"
570 		"	insll %6,%5,%3\n"
571 		"	msklh %2,%5,%2\n"
572 		"	mskll %1,%5,%1\n"
573 		"	or %2,%4,%2\n"
574 		"	or %1,%3,%1\n"
575 		"3:	stq_u %2,3(%5)\n"
576 		"4:	stq_u %1,0(%5)\n"
577 		"5:\n"
578 		".section __ex_table,\"a\"\n"
579 		"	.long 1b - .\n"
580 		"	lda %2,5b-1b(%0)\n"
581 		"	.long 2b - .\n"
582 		"	lda %1,5b-2b(%0)\n"
583 		"	.long 3b - .\n"
584 		"	lda $31,5b-3b(%0)\n"
585 		"	.long 4b - .\n"
586 		"	lda $31,5b-4b(%0)\n"
587 		".previous"
588 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
589 			  "=&r"(tmp3), "=&r"(tmp4)
590 			: "r"(va), "r"(una_reg(reg)), "0"(0));
591 		if (error)
592 			goto got_exception;
593 		return;
594 
595 	case 0x2d: /* stq */
596 		__asm__ __volatile__(
597 		"1:	ldq_u %2,7(%5)\n"
598 		"2:	ldq_u %1,0(%5)\n"
599 		"	insqh %6,%5,%4\n"
600 		"	insql %6,%5,%3\n"
601 		"	mskqh %2,%5,%2\n"
602 		"	mskql %1,%5,%1\n"
603 		"	or %2,%4,%2\n"
604 		"	or %1,%3,%1\n"
605 		"3:	stq_u %2,7(%5)\n"
606 		"4:	stq_u %1,0(%5)\n"
607 		"5:\n"
608 		".section __ex_table,\"a\"\n\t"
609 		"	.long 1b - .\n"
610 		"	lda %2,5b-1b(%0)\n"
611 		"	.long 2b - .\n"
612 		"	lda %1,5b-2b(%0)\n"
613 		"	.long 3b - .\n"
614 		"	lda $31,5b-3b(%0)\n"
615 		"	.long 4b - .\n"
616 		"	lda $31,5b-4b(%0)\n"
617 		".previous"
618 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
619 			  "=&r"(tmp3), "=&r"(tmp4)
620 			: "r"(va), "r"(una_reg(reg)), "0"(0));
621 		if (error)
622 			goto got_exception;
623 		return;
624 	}
625 
626 	lock_kernel();
627 	printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
628 		pc, va, opcode, reg);
629 	do_exit(SIGSEGV);
630 
631 got_exception:
632 	/* Ok, we caught the exception, but we don't want it.  Is there
633 	   someone to pass it along to?  */
634 	if ((fixup = search_exception_tables(pc)) != 0) {
635 		unsigned long newpc;
636 		newpc = fixup_exception(una_reg, fixup, pc);
637 
638 		printk("Forwarding unaligned exception at %lx (%lx)\n",
639 		       pc, newpc);
640 
641 		regs->pc = newpc;
642 		return;
643 	}
644 
645 	/*
646 	 * Yikes!  No one to forward the exception to.
647 	 * Since the registers are in a weird format, dump them ourselves.
648  	 */
649 	lock_kernel();
650 
651 	printk("%s(%d): unhandled unaligned exception\n",
652 	       current->comm, task_pid_nr(current));
653 
654 	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx\n",
655 	       pc, una_reg(26), regs->ps);
656 	printk("r0 = %016lx  r1 = %016lx  r2 = %016lx\n",
657 	       una_reg(0), una_reg(1), una_reg(2));
658 	printk("r3 = %016lx  r4 = %016lx  r5 = %016lx\n",
659  	       una_reg(3), una_reg(4), una_reg(5));
660 	printk("r6 = %016lx  r7 = %016lx  r8 = %016lx\n",
661 	       una_reg(6), una_reg(7), una_reg(8));
662 	printk("r9 = %016lx  r10= %016lx  r11= %016lx\n",
663 	       una_reg(9), una_reg(10), una_reg(11));
664 	printk("r12= %016lx  r13= %016lx  r14= %016lx\n",
665 	       una_reg(12), una_reg(13), una_reg(14));
666 	printk("r15= %016lx\n", una_reg(15));
667 	printk("r16= %016lx  r17= %016lx  r18= %016lx\n",
668 	       una_reg(16), una_reg(17), una_reg(18));
669 	printk("r19= %016lx  r20= %016lx  r21= %016lx\n",
670  	       una_reg(19), una_reg(20), una_reg(21));
671  	printk("r22= %016lx  r23= %016lx  r24= %016lx\n",
672 	       una_reg(22), una_reg(23), una_reg(24));
673 	printk("r25= %016lx  r27= %016lx  r28= %016lx\n",
674 	       una_reg(25), una_reg(27), una_reg(28));
675 	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
676 
677 	dik_show_code((unsigned int *)pc);
678 	dik_show_trace((unsigned long *)(regs+1));
679 
680 	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
681 		printk("die_if_kernel recursion detected.\n");
682 		local_irq_enable();
683 		while (1);
684 	}
685 	do_exit(SIGSEGV);
686 }
687 
688 /*
689  * Convert an s-floating point value in memory format to the
690  * corresponding value in register format.  The exponent
691  * needs to be remapped to preserve non-finite values
692  * (infinities, not-a-numbers, denormals).
693  */
694 static inline unsigned long
695 s_mem_to_reg (unsigned long s_mem)
696 {
697 	unsigned long frac    = (s_mem >>  0) & 0x7fffff;
698 	unsigned long sign    = (s_mem >> 31) & 0x1;
699 	unsigned long exp_msb = (s_mem >> 30) & 0x1;
700 	unsigned long exp_low = (s_mem >> 23) & 0x7f;
701 	unsigned long exp;
702 
703 	exp = (exp_msb << 10) | exp_low;	/* common case */
704 	if (exp_msb) {
705 		if (exp_low == 0x7f) {
706 			exp = 0x7ff;
707 		}
708 	} else {
709 		if (exp_low == 0x00) {
710 			exp = 0x000;
711 		} else {
712 			exp |= (0x7 << 7);
713 		}
714 	}
715 	return (sign << 63) | (exp << 52) | (frac << 29);
716 }
717 
718 /*
719  * Convert an s-floating point value in register format to the
720  * corresponding value in memory format.
721  */
722 static inline unsigned long
723 s_reg_to_mem (unsigned long s_reg)
724 {
725 	return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
726 }
727 
728 /*
729  * Handle user-level unaligned fault.  Handling user-level unaligned
730  * faults is *extremely* slow and produces nasty messages.  A user
731  * program *should* fix unaligned faults ASAP.
732  *
733  * Notice that we have (almost) the regular kernel stack layout here,
734  * so finding the appropriate registers is a little more difficult
735  * than in the kernel case.
736  *
737  * Finally, we handle regular integer load/stores only.  In
738  * particular, load-linked/store-conditionally and floating point
739  * load/stores are not supported.  The former make no sense with
740  * unaligned faults (they are guaranteed to fail) and I don't think
741  * the latter will occur in any decent program.
742  *
743  * Sigh. We *do* have to handle some FP operations, because GCC will
744  * uses them as temporary storage for integer memory to memory copies.
745  * However, we need to deal with stt/ldt and sts/lds only.
746  */
747 
748 #define OP_INT_MASK	( 1L << 0x28 | 1L << 0x2c   /* ldl stl */	\
749 			| 1L << 0x29 | 1L << 0x2d   /* ldq stq */	\
750 			| 1L << 0x0c | 1L << 0x0d   /* ldwu stw */	\
751 			| 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
752 
753 #define OP_WRITE_MASK	( 1L << 0x26 | 1L << 0x27   /* sts stt */	\
754 			| 1L << 0x2c | 1L << 0x2d   /* stl stq */	\
755 			| 1L << 0x0d | 1L << 0x0e ) /* stw stb */
756 
757 #define R(x)	((size_t) &((struct pt_regs *)0)->x)
758 
759 static int unauser_reg_offsets[32] = {
760 	R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
761 	/* r9 ... r15 are stored in front of regs.  */
762 	-56, -48, -40, -32, -24, -16, -8,
763 	R(r16), R(r17), R(r18),
764 	R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
765 	R(r27), R(r28), R(gp),
766 	0, 0
767 };
768 
769 #undef R
770 
771 asmlinkage void
772 do_entUnaUser(void __user * va, unsigned long opcode,
773 	      unsigned long reg, struct pt_regs *regs)
774 {
775 	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
776 
777 	unsigned long tmp1, tmp2, tmp3, tmp4;
778 	unsigned long fake_reg, *reg_addr = &fake_reg;
779 	siginfo_t info;
780 	long error;
781 
782 	/* Check the UAC bits to decide what the user wants us to do
783 	   with the unaliged access.  */
784 
785 	if (!test_thread_flag (TIF_UAC_NOPRINT)) {
786 		if (__ratelimit(&ratelimit)) {
787 			printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
788 			       current->comm, task_pid_nr(current),
789 			       regs->pc - 4, va, opcode, reg);
790 		}
791 	}
792 	if (test_thread_flag (TIF_UAC_SIGBUS))
793 		goto give_sigbus;
794 	/* Not sure why you'd want to use this, but... */
795 	if (test_thread_flag (TIF_UAC_NOFIX))
796 		return;
797 
798 	/* Don't bother reading ds in the access check since we already
799 	   know that this came from the user.  Also rely on the fact that
800 	   the page at TASK_SIZE is unmapped and so can't be touched anyway. */
801 	if (!__access_ok((unsigned long)va, 0, USER_DS))
802 		goto give_sigsegv;
803 
804 	++unaligned[1].count;
805 	unaligned[1].va = (unsigned long)va;
806 	unaligned[1].pc = regs->pc - 4;
807 
808 	if ((1L << opcode) & OP_INT_MASK) {
809 		/* it's an integer load/store */
810 		if (reg < 30) {
811 			reg_addr = (unsigned long *)
812 			  ((char *)regs + unauser_reg_offsets[reg]);
813 		} else if (reg == 30) {
814 			/* usp in PAL regs */
815 			fake_reg = rdusp();
816 		} else {
817 			/* zero "register" */
818 			fake_reg = 0;
819 		}
820 	}
821 
822 	/* We don't want to use the generic get/put unaligned macros as
823 	   we want to trap exceptions.  Only if we actually get an
824 	   exception will we decide whether we should have caught it.  */
825 
826 	switch (opcode) {
827 	case 0x0c: /* ldwu */
828 		__asm__ __volatile__(
829 		"1:	ldq_u %1,0(%3)\n"
830 		"2:	ldq_u %2,1(%3)\n"
831 		"	extwl %1,%3,%1\n"
832 		"	extwh %2,%3,%2\n"
833 		"3:\n"
834 		".section __ex_table,\"a\"\n"
835 		"	.long 1b - .\n"
836 		"	lda %1,3b-1b(%0)\n"
837 		"	.long 2b - .\n"
838 		"	lda %2,3b-2b(%0)\n"
839 		".previous"
840 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
841 			: "r"(va), "0"(0));
842 		if (error)
843 			goto give_sigsegv;
844 		*reg_addr = tmp1|tmp2;
845 		break;
846 
847 	case 0x22: /* lds */
848 		__asm__ __volatile__(
849 		"1:	ldq_u %1,0(%3)\n"
850 		"2:	ldq_u %2,3(%3)\n"
851 		"	extll %1,%3,%1\n"
852 		"	extlh %2,%3,%2\n"
853 		"3:\n"
854 		".section __ex_table,\"a\"\n"
855 		"	.long 1b - .\n"
856 		"	lda %1,3b-1b(%0)\n"
857 		"	.long 2b - .\n"
858 		"	lda %2,3b-2b(%0)\n"
859 		".previous"
860 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
861 			: "r"(va), "0"(0));
862 		if (error)
863 			goto give_sigsegv;
864 		alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
865 		return;
866 
867 	case 0x23: /* ldt */
868 		__asm__ __volatile__(
869 		"1:	ldq_u %1,0(%3)\n"
870 		"2:	ldq_u %2,7(%3)\n"
871 		"	extql %1,%3,%1\n"
872 		"	extqh %2,%3,%2\n"
873 		"3:\n"
874 		".section __ex_table,\"a\"\n"
875 		"	.long 1b - .\n"
876 		"	lda %1,3b-1b(%0)\n"
877 		"	.long 2b - .\n"
878 		"	lda %2,3b-2b(%0)\n"
879 		".previous"
880 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
881 			: "r"(va), "0"(0));
882 		if (error)
883 			goto give_sigsegv;
884 		alpha_write_fp_reg(reg, tmp1|tmp2);
885 		return;
886 
887 	case 0x28: /* ldl */
888 		__asm__ __volatile__(
889 		"1:	ldq_u %1,0(%3)\n"
890 		"2:	ldq_u %2,3(%3)\n"
891 		"	extll %1,%3,%1\n"
892 		"	extlh %2,%3,%2\n"
893 		"3:\n"
894 		".section __ex_table,\"a\"\n"
895 		"	.long 1b - .\n"
896 		"	lda %1,3b-1b(%0)\n"
897 		"	.long 2b - .\n"
898 		"	lda %2,3b-2b(%0)\n"
899 		".previous"
900 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
901 			: "r"(va), "0"(0));
902 		if (error)
903 			goto give_sigsegv;
904 		*reg_addr = (int)(tmp1|tmp2);
905 		break;
906 
907 	case 0x29: /* ldq */
908 		__asm__ __volatile__(
909 		"1:	ldq_u %1,0(%3)\n"
910 		"2:	ldq_u %2,7(%3)\n"
911 		"	extql %1,%3,%1\n"
912 		"	extqh %2,%3,%2\n"
913 		"3:\n"
914 		".section __ex_table,\"a\"\n"
915 		"	.long 1b - .\n"
916 		"	lda %1,3b-1b(%0)\n"
917 		"	.long 2b - .\n"
918 		"	lda %2,3b-2b(%0)\n"
919 		".previous"
920 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
921 			: "r"(va), "0"(0));
922 		if (error)
923 			goto give_sigsegv;
924 		*reg_addr = tmp1|tmp2;
925 		break;
926 
927 	/* Note that the store sequences do not indicate that they change
928 	   memory because it _should_ be affecting nothing in this context.
929 	   (Otherwise we have other, much larger, problems.)  */
930 	case 0x0d: /* stw */
931 		__asm__ __volatile__(
932 		"1:	ldq_u %2,1(%5)\n"
933 		"2:	ldq_u %1,0(%5)\n"
934 		"	inswh %6,%5,%4\n"
935 		"	inswl %6,%5,%3\n"
936 		"	mskwh %2,%5,%2\n"
937 		"	mskwl %1,%5,%1\n"
938 		"	or %2,%4,%2\n"
939 		"	or %1,%3,%1\n"
940 		"3:	stq_u %2,1(%5)\n"
941 		"4:	stq_u %1,0(%5)\n"
942 		"5:\n"
943 		".section __ex_table,\"a\"\n"
944 		"	.long 1b - .\n"
945 		"	lda %2,5b-1b(%0)\n"
946 		"	.long 2b - .\n"
947 		"	lda %1,5b-2b(%0)\n"
948 		"	.long 3b - .\n"
949 		"	lda $31,5b-3b(%0)\n"
950 		"	.long 4b - .\n"
951 		"	lda $31,5b-4b(%0)\n"
952 		".previous"
953 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
954 			  "=&r"(tmp3), "=&r"(tmp4)
955 			: "r"(va), "r"(*reg_addr), "0"(0));
956 		if (error)
957 			goto give_sigsegv;
958 		return;
959 
960 	case 0x26: /* sts */
961 		fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
962 		/* FALLTHRU */
963 
964 	case 0x2c: /* stl */
965 		__asm__ __volatile__(
966 		"1:	ldq_u %2,3(%5)\n"
967 		"2:	ldq_u %1,0(%5)\n"
968 		"	inslh %6,%5,%4\n"
969 		"	insll %6,%5,%3\n"
970 		"	msklh %2,%5,%2\n"
971 		"	mskll %1,%5,%1\n"
972 		"	or %2,%4,%2\n"
973 		"	or %1,%3,%1\n"
974 		"3:	stq_u %2,3(%5)\n"
975 		"4:	stq_u %1,0(%5)\n"
976 		"5:\n"
977 		".section __ex_table,\"a\"\n"
978 		"	.long 1b - .\n"
979 		"	lda %2,5b-1b(%0)\n"
980 		"	.long 2b - .\n"
981 		"	lda %1,5b-2b(%0)\n"
982 		"	.long 3b - .\n"
983 		"	lda $31,5b-3b(%0)\n"
984 		"	.long 4b - .\n"
985 		"	lda $31,5b-4b(%0)\n"
986 		".previous"
987 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
988 			  "=&r"(tmp3), "=&r"(tmp4)
989 			: "r"(va), "r"(*reg_addr), "0"(0));
990 		if (error)
991 			goto give_sigsegv;
992 		return;
993 
994 	case 0x27: /* stt */
995 		fake_reg = alpha_read_fp_reg(reg);
996 		/* FALLTHRU */
997 
998 	case 0x2d: /* stq */
999 		__asm__ __volatile__(
1000 		"1:	ldq_u %2,7(%5)\n"
1001 		"2:	ldq_u %1,0(%5)\n"
1002 		"	insqh %6,%5,%4\n"
1003 		"	insql %6,%5,%3\n"
1004 		"	mskqh %2,%5,%2\n"
1005 		"	mskql %1,%5,%1\n"
1006 		"	or %2,%4,%2\n"
1007 		"	or %1,%3,%1\n"
1008 		"3:	stq_u %2,7(%5)\n"
1009 		"4:	stq_u %1,0(%5)\n"
1010 		"5:\n"
1011 		".section __ex_table,\"a\"\n\t"
1012 		"	.long 1b - .\n"
1013 		"	lda %2,5b-1b(%0)\n"
1014 		"	.long 2b - .\n"
1015 		"	lda %1,5b-2b(%0)\n"
1016 		"	.long 3b - .\n"
1017 		"	lda $31,5b-3b(%0)\n"
1018 		"	.long 4b - .\n"
1019 		"	lda $31,5b-4b(%0)\n"
1020 		".previous"
1021 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
1022 			  "=&r"(tmp3), "=&r"(tmp4)
1023 			: "r"(va), "r"(*reg_addr), "0"(0));
1024 		if (error)
1025 			goto give_sigsegv;
1026 		return;
1027 
1028 	default:
1029 		/* What instruction were you trying to use, exactly?  */
1030 		goto give_sigbus;
1031 	}
1032 
1033 	/* Only integer loads should get here; everyone else returns early. */
1034 	if (reg == 30)
1035 		wrusp(fake_reg);
1036 	return;
1037 
1038 give_sigsegv:
1039 	regs->pc -= 4;  /* make pc point to faulting insn */
1040 	info.si_signo = SIGSEGV;
1041 	info.si_errno = 0;
1042 
1043 	/* We need to replicate some of the logic in mm/fault.c,
1044 	   since we don't have access to the fault code in the
1045 	   exception handling return path.  */
1046 	if (!__access_ok((unsigned long)va, 0, USER_DS))
1047 		info.si_code = SEGV_ACCERR;
1048 	else {
1049 		struct mm_struct *mm = current->mm;
1050 		down_read(&mm->mmap_sem);
1051 		if (find_vma(mm, (unsigned long)va))
1052 			info.si_code = SEGV_ACCERR;
1053 		else
1054 			info.si_code = SEGV_MAPERR;
1055 		up_read(&mm->mmap_sem);
1056 	}
1057 	info.si_addr = va;
1058 	send_sig_info(SIGSEGV, &info, current);
1059 	return;
1060 
1061 give_sigbus:
1062 	regs->pc -= 4;
1063 	info.si_signo = SIGBUS;
1064 	info.si_errno = 0;
1065 	info.si_code = BUS_ADRALN;
1066 	info.si_addr = va;
1067 	send_sig_info(SIGBUS, &info, current);
1068 	return;
1069 }
1070 
1071 void __cpuinit
1072 trap_init(void)
1073 {
1074 	/* Tell PAL-code what global pointer we want in the kernel.  */
1075 	register unsigned long gptr __asm__("$29");
1076 	wrkgp(gptr);
1077 
1078 	/* Hack for Multia (UDB) and JENSEN: some of their SRMs have
1079 	   a bug in the handling of the opDEC fault.  Fix it up if so.  */
1080 	if (implver() == IMPLVER_EV4)
1081 		opDEC_check();
1082 
1083 	wrent(entArith, 1);
1084 	wrent(entMM, 2);
1085 	wrent(entIF, 3);
1086 	wrent(entUna, 4);
1087 	wrent(entSys, 5);
1088 	wrent(entDbg, 6);
1089 }
1090