xref: /linux/arch/alpha/kernel/traps.c (revision c4101e55974cc7d835fbd2d8e01553a3f61e9e75)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * arch/alpha/kernel/traps.c
4  *
5  * (C) Copyright 1994 Linus Torvalds
6  */
7 
8 /*
9  * This file initializes the trap entry points
10  */
11 
12 #include <linux/cpu.h>
13 #include <linux/jiffies.h>
14 #include <linux/mm.h>
15 #include <linux/sched/signal.h>
16 #include <linux/sched/debug.h>
17 #include <linux/tty.h>
18 #include <linux/delay.h>
19 #include <linux/extable.h>
20 #include <linux/kallsyms.h>
21 #include <linux/ratelimit.h>
22 
23 #include <asm/gentrap.h>
24 #include <linux/uaccess.h>
25 #include <asm/unaligned.h>
26 #include <asm/sysinfo.h>
27 #include <asm/hwrpb.h>
28 #include <asm/mmu_context.h>
29 #include <asm/special_insns.h>
30 
31 #include "proto.h"
32 
33 /* Work-around for some SRMs which mishandle opDEC faults.  */
34 
35 static int opDEC_fix;
36 
37 static void
38 opDEC_check(void)
39 {
40 	__asm__ __volatile__ (
41 	/* Load the address of... */
42 	"	br	$16, 1f\n"
43 	/* A stub instruction fault handler.  Just add 4 to the
44 	   pc and continue.  */
45 	"	ldq	$16, 8($sp)\n"
46 	"	addq	$16, 4, $16\n"
47 	"	stq	$16, 8($sp)\n"
48 	"	call_pal %[rti]\n"
49 	/* Install the instruction fault handler.  */
50 	"1:	lda	$17, 3\n"
51 	"	call_pal %[wrent]\n"
52 	/* With that in place, the fault from the round-to-minf fp
53 	   insn will arrive either at the "lda 4" insn (bad) or one
54 	   past that (good).  This places the correct fixup in %0.  */
55 	"	lda %[fix], 0\n"
56 	"	cvttq/svm $f31,$f31\n"
57 	"	lda %[fix], 4"
58 	: [fix] "=r" (opDEC_fix)
59 	: [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent)
60 	: "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
61 
62 	if (opDEC_fix)
63 		printk("opDEC fixup enabled.\n");
64 }
65 
66 void
67 dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
68 {
69 	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx    %s\n",
70 	       regs->pc, regs->r26, regs->ps, print_tainted());
71 	printk("pc is at %pSR\n", (void *)regs->pc);
72 	printk("ra is at %pSR\n", (void *)regs->r26);
73 	printk("v0 = %016lx  t0 = %016lx  t1 = %016lx\n",
74 	       regs->r0, regs->r1, regs->r2);
75 	printk("t2 = %016lx  t3 = %016lx  t4 = %016lx\n",
76  	       regs->r3, regs->r4, regs->r5);
77 	printk("t5 = %016lx  t6 = %016lx  t7 = %016lx\n",
78 	       regs->r6, regs->r7, regs->r8);
79 
80 	if (r9_15) {
81 		printk("s0 = %016lx  s1 = %016lx  s2 = %016lx\n",
82 		       r9_15[9], r9_15[10], r9_15[11]);
83 		printk("s3 = %016lx  s4 = %016lx  s5 = %016lx\n",
84 		       r9_15[12], r9_15[13], r9_15[14]);
85 		printk("s6 = %016lx\n", r9_15[15]);
86 	}
87 
88 	printk("a0 = %016lx  a1 = %016lx  a2 = %016lx\n",
89 	       regs->r16, regs->r17, regs->r18);
90 	printk("a3 = %016lx  a4 = %016lx  a5 = %016lx\n",
91  	       regs->r19, regs->r20, regs->r21);
92  	printk("t8 = %016lx  t9 = %016lx  t10= %016lx\n",
93 	       regs->r22, regs->r23, regs->r24);
94 	printk("t11= %016lx  pv = %016lx  at = %016lx\n",
95 	       regs->r25, regs->r27, regs->r28);
96 	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
97 #if 0
98 __halt();
99 #endif
100 }
101 
102 #if 0
103 static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
104 			   "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
105 			   "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
106 			   "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
107 #endif
108 
109 static void
110 dik_show_code(unsigned int *pc)
111 {
112 	long i;
113 
114 	printk("Code:");
115 	for (i = -6; i < 2; i++) {
116 		unsigned int insn;
117 		if (__get_user(insn, (unsigned int __user *)pc + i))
118 			break;
119 		printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
120 	}
121 	printk("\n");
122 }
123 
124 static void
125 dik_show_trace(unsigned long *sp, const char *loglvl)
126 {
127 	long i = 0;
128 	printk("%sTrace:\n", loglvl);
129 	while (0x1ff8 & (unsigned long) sp) {
130 		extern char _stext[], _etext[];
131 		unsigned long tmp = *sp;
132 		sp++;
133 		if (!is_kernel_text(tmp))
134 			continue;
135 		printk("%s[<%lx>] %pSR\n", loglvl, tmp, (void *)tmp);
136 		if (i > 40) {
137 			printk("%s ...", loglvl);
138 			break;
139 		}
140 	}
141 	printk("%s\n", loglvl);
142 }
143 
144 static int kstack_depth_to_print = 24;
145 
146 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
147 {
148 	unsigned long *stack;
149 	int i;
150 
151 	/*
152 	 * debugging aid: "show_stack(NULL, NULL, KERN_EMERG);" prints the
153 	 * back trace for this cpu.
154 	 */
155 	if(sp==NULL)
156 		sp=(unsigned long*)&sp;
157 
158 	stack = sp;
159 	for(i=0; i < kstack_depth_to_print; i++) {
160 		if (((long) stack & (THREAD_SIZE-1)) == 0)
161 			break;
162 		if ((i % 4) == 0) {
163 			if (i)
164 				pr_cont("\n");
165 			printk("%s       ", loglvl);
166 		} else {
167 			pr_cont(" ");
168 		}
169 		pr_cont("%016lx", *stack++);
170 	}
171 	pr_cont("\n");
172 	dik_show_trace(sp, loglvl);
173 }
174 
175 void
176 die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
177 {
178 	if (regs->ps & 8)
179 		return;
180 #ifdef CONFIG_SMP
181 	printk("CPU %d ", hard_smp_processor_id());
182 #endif
183 	printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
184 	dik_show_regs(regs, r9_15);
185 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
186 	dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
187 	dik_show_code((unsigned int *)regs->pc);
188 
189 	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
190 		printk("die_if_kernel recursion detected.\n");
191 		local_irq_enable();
192 		while (1);
193 	}
194 	make_task_dead(SIGSEGV);
195 }
196 
197 #ifndef CONFIG_MATHEMU
198 static long dummy_emul(void) { return 0; }
199 long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
200   = (void *)dummy_emul;
201 EXPORT_SYMBOL_GPL(alpha_fp_emul_imprecise);
202 long (*alpha_fp_emul) (unsigned long pc)
203   = (void *)dummy_emul;
204 EXPORT_SYMBOL_GPL(alpha_fp_emul);
205 #else
206 long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
207 long alpha_fp_emul (unsigned long pc);
208 #endif
209 
210 asmlinkage void
211 do_entArith(unsigned long summary, unsigned long write_mask,
212 	    struct pt_regs *regs)
213 {
214 	long si_code = FPE_FLTINV;
215 
216 	if (summary & 1) {
217 		/* Software-completion summary bit is set, so try to
218 		   emulate the instruction.  If the processor supports
219 		   precise exceptions, we don't have to search.  */
220 		if (!amask(AMASK_PRECISE_TRAP))
221 			si_code = alpha_fp_emul(regs->pc - 4);
222 		else
223 			si_code = alpha_fp_emul_imprecise(regs, write_mask);
224 		if (si_code == 0)
225 			return;
226 	}
227 	die_if_kernel("Arithmetic fault", regs, 0, NULL);
228 
229 	send_sig_fault_trapno(SIGFPE, si_code, (void __user *) regs->pc, 0, current);
230 }
231 
232 asmlinkage void
233 do_entIF(unsigned long type, struct pt_regs *regs)
234 {
235 	int signo, code;
236 
237 	if (type == 3) { /* FEN fault */
238 		/* Irritating users can call PAL_clrfen to disable the
239 		   FPU for the process.  The kernel will then trap in
240 		   do_switch_stack and undo_switch_stack when we try
241 		   to save and restore the FP registers.
242 
243 		   Given that GCC by default generates code that uses the
244 		   FP registers, PAL_clrfen is not useful except for DoS
245 		   attacks.  So turn the bleeding FPU back on and be done
246 		   with it.  */
247 		current_thread_info()->pcb.flags |= 1;
248 		__reload_thread(&current_thread_info()->pcb);
249 		return;
250 	}
251 	if (!user_mode(regs)) {
252 		if (type == 1) {
253 			const unsigned int *data
254 			  = (const unsigned int *) regs->pc;
255 			printk("Kernel bug at %s:%d\n",
256 			       (const char *)(data[1] | (long)data[2] << 32),
257 			       data[0]);
258 		}
259 #ifdef CONFIG_ALPHA_WTINT
260 		if (type == 4) {
261 			/* If CALL_PAL WTINT is totally unsupported by the
262 			   PALcode, e.g. MILO, "emulate" it by overwriting
263 			   the insn.  */
264 			unsigned int *pinsn
265 			  = (unsigned int *) regs->pc - 1;
266 			if (*pinsn == PAL_wtint) {
267 				*pinsn = 0x47e01400; /* mov 0,$0 */
268 				imb();
269 				regs->r0 = 0;
270 				return;
271 			}
272 		}
273 #endif /* ALPHA_WTINT */
274 		die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
275 			      regs, type, NULL);
276 	}
277 
278 	switch (type) {
279 	      case 0: /* breakpoint */
280 		if (ptrace_cancel_bpt(current)) {
281 			regs->pc -= 4;	/* make pc point to former bpt */
282 		}
283 
284 		send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc,
285 			       current);
286 		return;
287 
288 	      case 1: /* bugcheck */
289 		send_sig_fault_trapno(SIGTRAP, TRAP_UNK,
290 				      (void __user *) regs->pc, 0, current);
291 		return;
292 
293 	      case 2: /* gentrap */
294 		switch ((long) regs->r16) {
295 		case GEN_INTOVF:
296 			signo = SIGFPE;
297 			code = FPE_INTOVF;
298 			break;
299 		case GEN_INTDIV:
300 			signo = SIGFPE;
301 			code = FPE_INTDIV;
302 			break;
303 		case GEN_FLTOVF:
304 			signo = SIGFPE;
305 			code = FPE_FLTOVF;
306 			break;
307 		case GEN_FLTDIV:
308 			signo = SIGFPE;
309 			code = FPE_FLTDIV;
310 			break;
311 		case GEN_FLTUND:
312 			signo = SIGFPE;
313 			code = FPE_FLTUND;
314 			break;
315 		case GEN_FLTINV:
316 			signo = SIGFPE;
317 			code = FPE_FLTINV;
318 			break;
319 		case GEN_FLTINE:
320 			signo = SIGFPE;
321 			code = FPE_FLTRES;
322 			break;
323 		case GEN_ROPRAND:
324 			signo = SIGFPE;
325 			code = FPE_FLTUNK;
326 			break;
327 
328 		case GEN_DECOVF:
329 		case GEN_DECDIV:
330 		case GEN_DECINV:
331 		case GEN_ASSERTERR:
332 		case GEN_NULPTRERR:
333 		case GEN_STKOVF:
334 		case GEN_STRLENERR:
335 		case GEN_SUBSTRERR:
336 		case GEN_RANGERR:
337 		case GEN_SUBRNG:
338 		case GEN_SUBRNG1:
339 		case GEN_SUBRNG2:
340 		case GEN_SUBRNG3:
341 		case GEN_SUBRNG4:
342 		case GEN_SUBRNG5:
343 		case GEN_SUBRNG6:
344 		case GEN_SUBRNG7:
345 		default:
346 			signo = SIGTRAP;
347 			code = TRAP_UNK;
348 			break;
349 		}
350 
351 		send_sig_fault_trapno(signo, code, (void __user *) regs->pc,
352 				      regs->r16, current);
353 		return;
354 
355 	      case 4: /* opDEC */
356 		if (implver() == IMPLVER_EV4) {
357 			long si_code;
358 
359 			/* The some versions of SRM do not handle
360 			   the opDEC properly - they return the PC of the
361 			   opDEC fault, not the instruction after as the
362 			   Alpha architecture requires.  Here we fix it up.
363 			   We do this by intentionally causing an opDEC
364 			   fault during the boot sequence and testing if
365 			   we get the correct PC.  If not, we set a flag
366 			   to correct it every time through.  */
367 			regs->pc += opDEC_fix;
368 
369 			/* EV4 does not implement anything except normal
370 			   rounding.  Everything else will come here as
371 			   an illegal instruction.  Emulate them.  */
372 			si_code = alpha_fp_emul(regs->pc - 4);
373 			if (si_code == 0)
374 				return;
375 			if (si_code > 0) {
376 				send_sig_fault_trapno(SIGFPE, si_code,
377 						      (void __user *) regs->pc,
378 						      0, current);
379 				return;
380 			}
381 		}
382 		break;
383 
384 	      case 5: /* illoc */
385 	      default: /* unexpected instruction-fault type */
386 		      ;
387 	}
388 
389 	send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, current);
390 }
391 
392 /* There is an ifdef in the PALcode in MILO that enables a
393    "kernel debugging entry point" as an unprivileged call_pal.
394 
395    We don't want to have anything to do with it, but unfortunately
396    several versions of MILO included in distributions have it enabled,
397    and if we don't put something on the entry point we'll oops.  */
398 
399 asmlinkage void
400 do_entDbg(struct pt_regs *regs)
401 {
402 	die_if_kernel("Instruction fault", regs, 0, NULL);
403 
404 	force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc);
405 }
406 
407 
408 /*
409  * entUna has a different register layout to be reasonably simple. It
410  * needs access to all the integer registers (the kernel doesn't use
411  * fp-regs), and it needs to have them in order for simpler access.
412  *
413  * Due to the non-standard register layout (and because we don't want
414  * to handle floating-point regs), user-mode unaligned accesses are
415  * handled separately by do_entUnaUser below.
416  *
417  * Oh, btw, we don't handle the "gp" register correctly, but if we fault
418  * on a gp-register unaligned load/store, something is _very_ wrong
419  * in the kernel anyway..
420  */
421 struct allregs {
422 	unsigned long regs[32];
423 	unsigned long ps, pc, gp, a0, a1, a2;
424 };
425 
426 struct unaligned_stat {
427 	unsigned long count, va, pc;
428 } unaligned[2];
429 
430 
431 /* Macro for exception fixup code to access integer registers.  */
432 #define una_reg(r)  (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
433 
434 
435 asmlinkage void
436 do_entUna(void * va, unsigned long opcode, unsigned long reg,
437 	  struct allregs *regs)
438 {
439 	long error, tmp1, tmp2, tmp3, tmp4;
440 	unsigned long pc = regs->pc - 4;
441 	unsigned long *_regs = regs->regs;
442 	const struct exception_table_entry *fixup;
443 
444 	unaligned[0].count++;
445 	unaligned[0].va = (unsigned long) va;
446 	unaligned[0].pc = pc;
447 
448 	/* We don't want to use the generic get/put unaligned macros as
449 	   we want to trap exceptions.  Only if we actually get an
450 	   exception will we decide whether we should have caught it.  */
451 
452 	switch (opcode) {
453 	case 0x0c: /* ldwu */
454 		__asm__ __volatile__(
455 		"1:	ldq_u %1,0(%3)\n"
456 		"2:	ldq_u %2,1(%3)\n"
457 		"	extwl %1,%3,%1\n"
458 		"	extwh %2,%3,%2\n"
459 		"3:\n"
460 		EXC(1b,3b,%1,%0)
461 		EXC(2b,3b,%2,%0)
462 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
463 			: "r"(va), "0"(0));
464 		if (error)
465 			goto got_exception;
466 		una_reg(reg) = tmp1|tmp2;
467 		return;
468 
469 	case 0x28: /* ldl */
470 		__asm__ __volatile__(
471 		"1:	ldq_u %1,0(%3)\n"
472 		"2:	ldq_u %2,3(%3)\n"
473 		"	extll %1,%3,%1\n"
474 		"	extlh %2,%3,%2\n"
475 		"3:\n"
476 		EXC(1b,3b,%1,%0)
477 		EXC(2b,3b,%2,%0)
478 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
479 			: "r"(va), "0"(0));
480 		if (error)
481 			goto got_exception;
482 		una_reg(reg) = (int)(tmp1|tmp2);
483 		return;
484 
485 	case 0x29: /* ldq */
486 		__asm__ __volatile__(
487 		"1:	ldq_u %1,0(%3)\n"
488 		"2:	ldq_u %2,7(%3)\n"
489 		"	extql %1,%3,%1\n"
490 		"	extqh %2,%3,%2\n"
491 		"3:\n"
492 		EXC(1b,3b,%1,%0)
493 		EXC(2b,3b,%2,%0)
494 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
495 			: "r"(va), "0"(0));
496 		if (error)
497 			goto got_exception;
498 		una_reg(reg) = tmp1|tmp2;
499 		return;
500 
501 	/* Note that the store sequences do not indicate that they change
502 	   memory because it _should_ be affecting nothing in this context.
503 	   (Otherwise we have other, much larger, problems.)  */
504 	case 0x0d: /* stw */
505 		__asm__ __volatile__(
506 		"1:	ldq_u %2,1(%5)\n"
507 		"2:	ldq_u %1,0(%5)\n"
508 		"	inswh %6,%5,%4\n"
509 		"	inswl %6,%5,%3\n"
510 		"	mskwh %2,%5,%2\n"
511 		"	mskwl %1,%5,%1\n"
512 		"	or %2,%4,%2\n"
513 		"	or %1,%3,%1\n"
514 		"3:	stq_u %2,1(%5)\n"
515 		"4:	stq_u %1,0(%5)\n"
516 		"5:\n"
517 		EXC(1b,5b,%2,%0)
518 		EXC(2b,5b,%1,%0)
519 		EXC(3b,5b,$31,%0)
520 		EXC(4b,5b,$31,%0)
521 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
522 			  "=&r"(tmp3), "=&r"(tmp4)
523 			: "r"(va), "r"(una_reg(reg)), "0"(0));
524 		if (error)
525 			goto got_exception;
526 		return;
527 
528 	case 0x2c: /* stl */
529 		__asm__ __volatile__(
530 		"1:	ldq_u %2,3(%5)\n"
531 		"2:	ldq_u %1,0(%5)\n"
532 		"	inslh %6,%5,%4\n"
533 		"	insll %6,%5,%3\n"
534 		"	msklh %2,%5,%2\n"
535 		"	mskll %1,%5,%1\n"
536 		"	or %2,%4,%2\n"
537 		"	or %1,%3,%1\n"
538 		"3:	stq_u %2,3(%5)\n"
539 		"4:	stq_u %1,0(%5)\n"
540 		"5:\n"
541 		EXC(1b,5b,%2,%0)
542 		EXC(2b,5b,%1,%0)
543 		EXC(3b,5b,$31,%0)
544 		EXC(4b,5b,$31,%0)
545 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
546 			  "=&r"(tmp3), "=&r"(tmp4)
547 			: "r"(va), "r"(una_reg(reg)), "0"(0));
548 		if (error)
549 			goto got_exception;
550 		return;
551 
552 	case 0x2d: /* stq */
553 		__asm__ __volatile__(
554 		"1:	ldq_u %2,7(%5)\n"
555 		"2:	ldq_u %1,0(%5)\n"
556 		"	insqh %6,%5,%4\n"
557 		"	insql %6,%5,%3\n"
558 		"	mskqh %2,%5,%2\n"
559 		"	mskql %1,%5,%1\n"
560 		"	or %2,%4,%2\n"
561 		"	or %1,%3,%1\n"
562 		"3:	stq_u %2,7(%5)\n"
563 		"4:	stq_u %1,0(%5)\n"
564 		"5:\n"
565 		EXC(1b,5b,%2,%0)
566 		EXC(2b,5b,%1,%0)
567 		EXC(3b,5b,$31,%0)
568 		EXC(4b,5b,$31,%0)
569 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
570 			  "=&r"(tmp3), "=&r"(tmp4)
571 			: "r"(va), "r"(una_reg(reg)), "0"(0));
572 		if (error)
573 			goto got_exception;
574 		return;
575 	}
576 
577 	printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
578 		pc, va, opcode, reg);
579 	make_task_dead(SIGSEGV);
580 
581 got_exception:
582 	/* Ok, we caught the exception, but we don't want it.  Is there
583 	   someone to pass it along to?  */
584 	if ((fixup = search_exception_tables(pc)) != 0) {
585 		unsigned long newpc;
586 		newpc = fixup_exception(una_reg, fixup, pc);
587 
588 		printk("Forwarding unaligned exception at %lx (%lx)\n",
589 		       pc, newpc);
590 
591 		regs->pc = newpc;
592 		return;
593 	}
594 
595 	/*
596 	 * Yikes!  No one to forward the exception to.
597 	 * Since the registers are in a weird format, dump them ourselves.
598  	 */
599 
600 	printk("%s(%d): unhandled unaligned exception\n",
601 	       current->comm, task_pid_nr(current));
602 
603 	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx\n",
604 	       pc, una_reg(26), regs->ps);
605 	printk("r0 = %016lx  r1 = %016lx  r2 = %016lx\n",
606 	       una_reg(0), una_reg(1), una_reg(2));
607 	printk("r3 = %016lx  r4 = %016lx  r5 = %016lx\n",
608  	       una_reg(3), una_reg(4), una_reg(5));
609 	printk("r6 = %016lx  r7 = %016lx  r8 = %016lx\n",
610 	       una_reg(6), una_reg(7), una_reg(8));
611 	printk("r9 = %016lx  r10= %016lx  r11= %016lx\n",
612 	       una_reg(9), una_reg(10), una_reg(11));
613 	printk("r12= %016lx  r13= %016lx  r14= %016lx\n",
614 	       una_reg(12), una_reg(13), una_reg(14));
615 	printk("r15= %016lx\n", una_reg(15));
616 	printk("r16= %016lx  r17= %016lx  r18= %016lx\n",
617 	       una_reg(16), una_reg(17), una_reg(18));
618 	printk("r19= %016lx  r20= %016lx  r21= %016lx\n",
619  	       una_reg(19), una_reg(20), una_reg(21));
620  	printk("r22= %016lx  r23= %016lx  r24= %016lx\n",
621 	       una_reg(22), una_reg(23), una_reg(24));
622 	printk("r25= %016lx  r27= %016lx  r28= %016lx\n",
623 	       una_reg(25), una_reg(27), una_reg(28));
624 	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
625 
626 	dik_show_code((unsigned int *)pc);
627 	dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
628 
629 	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
630 		printk("die_if_kernel recursion detected.\n");
631 		local_irq_enable();
632 		while (1);
633 	}
634 	make_task_dead(SIGSEGV);
635 }
636 
637 /*
638  * Convert an s-floating point value in memory format to the
639  * corresponding value in register format.  The exponent
640  * needs to be remapped to preserve non-finite values
641  * (infinities, not-a-numbers, denormals).
642  */
643 static inline unsigned long
644 s_mem_to_reg (unsigned long s_mem)
645 {
646 	unsigned long frac    = (s_mem >>  0) & 0x7fffff;
647 	unsigned long sign    = (s_mem >> 31) & 0x1;
648 	unsigned long exp_msb = (s_mem >> 30) & 0x1;
649 	unsigned long exp_low = (s_mem >> 23) & 0x7f;
650 	unsigned long exp;
651 
652 	exp = (exp_msb << 10) | exp_low;	/* common case */
653 	if (exp_msb) {
654 		if (exp_low == 0x7f) {
655 			exp = 0x7ff;
656 		}
657 	} else {
658 		if (exp_low == 0x00) {
659 			exp = 0x000;
660 		} else {
661 			exp |= (0x7 << 7);
662 		}
663 	}
664 	return (sign << 63) | (exp << 52) | (frac << 29);
665 }
666 
667 /*
668  * Convert an s-floating point value in register format to the
669  * corresponding value in memory format.
670  */
671 static inline unsigned long
672 s_reg_to_mem (unsigned long s_reg)
673 {
674 	return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
675 }
676 
677 /*
678  * Handle user-level unaligned fault.  Handling user-level unaligned
679  * faults is *extremely* slow and produces nasty messages.  A user
680  * program *should* fix unaligned faults ASAP.
681  *
682  * Notice that we have (almost) the regular kernel stack layout here,
683  * so finding the appropriate registers is a little more difficult
684  * than in the kernel case.
685  *
686  * Finally, we handle regular integer load/stores only.  In
687  * particular, load-linked/store-conditionally and floating point
688  * load/stores are not supported.  The former make no sense with
689  * unaligned faults (they are guaranteed to fail) and I don't think
690  * the latter will occur in any decent program.
691  *
692  * Sigh. We *do* have to handle some FP operations, because GCC will
693  * uses them as temporary storage for integer memory to memory copies.
694  * However, we need to deal with stt/ldt and sts/lds only.
695  */
696 
697 #define OP_INT_MASK	( 1L << 0x28 | 1L << 0x2c   /* ldl stl */	\
698 			| 1L << 0x29 | 1L << 0x2d   /* ldq stq */	\
699 			| 1L << 0x0c | 1L << 0x0d   /* ldwu stw */	\
700 			| 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
701 
702 #define OP_WRITE_MASK	( 1L << 0x26 | 1L << 0x27   /* sts stt */	\
703 			| 1L << 0x2c | 1L << 0x2d   /* stl stq */	\
704 			| 1L << 0x0d | 1L << 0x0e ) /* stw stb */
705 
706 #define R(x)	((size_t) &((struct pt_regs *)0)->x)
707 
708 static int unauser_reg_offsets[32] = {
709 	R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
710 	/* r9 ... r15 are stored in front of regs.  */
711 	-56, -48, -40, -32, -24, -16, -8,
712 	R(r16), R(r17), R(r18),
713 	R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
714 	R(r27), R(r28), R(gp),
715 	0, 0
716 };
717 
718 #undef R
719 
720 asmlinkage void
721 do_entUnaUser(void __user * va, unsigned long opcode,
722 	      unsigned long reg, struct pt_regs *regs)
723 {
724 	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
725 
726 	unsigned long tmp1, tmp2, tmp3, tmp4;
727 	unsigned long fake_reg, *reg_addr = &fake_reg;
728 	int si_code;
729 	long error;
730 
731 	/* Check the UAC bits to decide what the user wants us to do
732 	   with the unaligned access.  */
733 
734 	if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
735 		if (__ratelimit(&ratelimit)) {
736 			printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
737 			       current->comm, task_pid_nr(current),
738 			       regs->pc - 4, va, opcode, reg);
739 		}
740 	}
741 	if ((current_thread_info()->status & TS_UAC_SIGBUS))
742 		goto give_sigbus;
743 	/* Not sure why you'd want to use this, but... */
744 	if ((current_thread_info()->status & TS_UAC_NOFIX))
745 		return;
746 
747 	/* Don't bother reading ds in the access check since we already
748 	   know that this came from the user.  Also rely on the fact that
749 	   the page at TASK_SIZE is unmapped and so can't be touched anyway. */
750 	if ((unsigned long)va >= TASK_SIZE)
751 		goto give_sigsegv;
752 
753 	++unaligned[1].count;
754 	unaligned[1].va = (unsigned long)va;
755 	unaligned[1].pc = regs->pc - 4;
756 
757 	if ((1L << opcode) & OP_INT_MASK) {
758 		/* it's an integer load/store */
759 		if (reg < 30) {
760 			reg_addr = (unsigned long *)
761 			  ((char *)regs + unauser_reg_offsets[reg]);
762 		} else if (reg == 30) {
763 			/* usp in PAL regs */
764 			fake_reg = rdusp();
765 		} else {
766 			/* zero "register" */
767 			fake_reg = 0;
768 		}
769 	}
770 
771 	/* We don't want to use the generic get/put unaligned macros as
772 	   we want to trap exceptions.  Only if we actually get an
773 	   exception will we decide whether we should have caught it.  */
774 
775 	switch (opcode) {
776 	case 0x0c: /* ldwu */
777 		__asm__ __volatile__(
778 		"1:	ldq_u %1,0(%3)\n"
779 		"2:	ldq_u %2,1(%3)\n"
780 		"	extwl %1,%3,%1\n"
781 		"	extwh %2,%3,%2\n"
782 		"3:\n"
783 		EXC(1b,3b,%1,%0)
784 		EXC(2b,3b,%2,%0)
785 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
786 			: "r"(va), "0"(0));
787 		if (error)
788 			goto give_sigsegv;
789 		*reg_addr = tmp1|tmp2;
790 		break;
791 
792 	case 0x22: /* lds */
793 		__asm__ __volatile__(
794 		"1:	ldq_u %1,0(%3)\n"
795 		"2:	ldq_u %2,3(%3)\n"
796 		"	extll %1,%3,%1\n"
797 		"	extlh %2,%3,%2\n"
798 		"3:\n"
799 		EXC(1b,3b,%1,%0)
800 		EXC(2b,3b,%2,%0)
801 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
802 			: "r"(va), "0"(0));
803 		if (error)
804 			goto give_sigsegv;
805 		alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
806 		return;
807 
808 	case 0x23: /* ldt */
809 		__asm__ __volatile__(
810 		"1:	ldq_u %1,0(%3)\n"
811 		"2:	ldq_u %2,7(%3)\n"
812 		"	extql %1,%3,%1\n"
813 		"	extqh %2,%3,%2\n"
814 		"3:\n"
815 		EXC(1b,3b,%1,%0)
816 		EXC(2b,3b,%2,%0)
817 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
818 			: "r"(va), "0"(0));
819 		if (error)
820 			goto give_sigsegv;
821 		alpha_write_fp_reg(reg, tmp1|tmp2);
822 		return;
823 
824 	case 0x28: /* ldl */
825 		__asm__ __volatile__(
826 		"1:	ldq_u %1,0(%3)\n"
827 		"2:	ldq_u %2,3(%3)\n"
828 		"	extll %1,%3,%1\n"
829 		"	extlh %2,%3,%2\n"
830 		"3:\n"
831 		EXC(1b,3b,%1,%0)
832 		EXC(2b,3b,%2,%0)
833 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
834 			: "r"(va), "0"(0));
835 		if (error)
836 			goto give_sigsegv;
837 		*reg_addr = (int)(tmp1|tmp2);
838 		break;
839 
840 	case 0x29: /* ldq */
841 		__asm__ __volatile__(
842 		"1:	ldq_u %1,0(%3)\n"
843 		"2:	ldq_u %2,7(%3)\n"
844 		"	extql %1,%3,%1\n"
845 		"	extqh %2,%3,%2\n"
846 		"3:\n"
847 		EXC(1b,3b,%1,%0)
848 		EXC(2b,3b,%2,%0)
849 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
850 			: "r"(va), "0"(0));
851 		if (error)
852 			goto give_sigsegv;
853 		*reg_addr = tmp1|tmp2;
854 		break;
855 
856 	/* Note that the store sequences do not indicate that they change
857 	   memory because it _should_ be affecting nothing in this context.
858 	   (Otherwise we have other, much larger, problems.)  */
859 	case 0x0d: /* stw */
860 		__asm__ __volatile__(
861 		"1:	ldq_u %2,1(%5)\n"
862 		"2:	ldq_u %1,0(%5)\n"
863 		"	inswh %6,%5,%4\n"
864 		"	inswl %6,%5,%3\n"
865 		"	mskwh %2,%5,%2\n"
866 		"	mskwl %1,%5,%1\n"
867 		"	or %2,%4,%2\n"
868 		"	or %1,%3,%1\n"
869 		"3:	stq_u %2,1(%5)\n"
870 		"4:	stq_u %1,0(%5)\n"
871 		"5:\n"
872 		EXC(1b,5b,%2,%0)
873 		EXC(2b,5b,%1,%0)
874 		EXC(3b,5b,$31,%0)
875 		EXC(4b,5b,$31,%0)
876 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
877 			  "=&r"(tmp3), "=&r"(tmp4)
878 			: "r"(va), "r"(*reg_addr), "0"(0));
879 		if (error)
880 			goto give_sigsegv;
881 		return;
882 
883 	case 0x26: /* sts */
884 		fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
885 		fallthrough;
886 
887 	case 0x2c: /* stl */
888 		__asm__ __volatile__(
889 		"1:	ldq_u %2,3(%5)\n"
890 		"2:	ldq_u %1,0(%5)\n"
891 		"	inslh %6,%5,%4\n"
892 		"	insll %6,%5,%3\n"
893 		"	msklh %2,%5,%2\n"
894 		"	mskll %1,%5,%1\n"
895 		"	or %2,%4,%2\n"
896 		"	or %1,%3,%1\n"
897 		"3:	stq_u %2,3(%5)\n"
898 		"4:	stq_u %1,0(%5)\n"
899 		"5:\n"
900 		EXC(1b,5b,%2,%0)
901 		EXC(2b,5b,%1,%0)
902 		EXC(3b,5b,$31,%0)
903 		EXC(4b,5b,$31,%0)
904 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
905 			  "=&r"(tmp3), "=&r"(tmp4)
906 			: "r"(va), "r"(*reg_addr), "0"(0));
907 		if (error)
908 			goto give_sigsegv;
909 		return;
910 
911 	case 0x27: /* stt */
912 		fake_reg = alpha_read_fp_reg(reg);
913 		fallthrough;
914 
915 	case 0x2d: /* stq */
916 		__asm__ __volatile__(
917 		"1:	ldq_u %2,7(%5)\n"
918 		"2:	ldq_u %1,0(%5)\n"
919 		"	insqh %6,%5,%4\n"
920 		"	insql %6,%5,%3\n"
921 		"	mskqh %2,%5,%2\n"
922 		"	mskql %1,%5,%1\n"
923 		"	or %2,%4,%2\n"
924 		"	or %1,%3,%1\n"
925 		"3:	stq_u %2,7(%5)\n"
926 		"4:	stq_u %1,0(%5)\n"
927 		"5:\n"
928 		EXC(1b,5b,%2,%0)
929 		EXC(2b,5b,%1,%0)
930 		EXC(3b,5b,$31,%0)
931 		EXC(4b,5b,$31,%0)
932 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
933 			  "=&r"(tmp3), "=&r"(tmp4)
934 			: "r"(va), "r"(*reg_addr), "0"(0));
935 		if (error)
936 			goto give_sigsegv;
937 		return;
938 
939 	default:
940 		/* What instruction were you trying to use, exactly?  */
941 		goto give_sigbus;
942 	}
943 
944 	/* Only integer loads should get here; everyone else returns early. */
945 	if (reg == 30)
946 		wrusp(fake_reg);
947 	return;
948 
949 give_sigsegv:
950 	regs->pc -= 4;  /* make pc point to faulting insn */
951 
952 	/* We need to replicate some of the logic in mm/fault.c,
953 	   since we don't have access to the fault code in the
954 	   exception handling return path.  */
955 	if ((unsigned long)va >= TASK_SIZE)
956 		si_code = SEGV_ACCERR;
957 	else {
958 		struct mm_struct *mm = current->mm;
959 		mmap_read_lock(mm);
960 		if (find_vma(mm, (unsigned long)va))
961 			si_code = SEGV_ACCERR;
962 		else
963 			si_code = SEGV_MAPERR;
964 		mmap_read_unlock(mm);
965 	}
966 	send_sig_fault(SIGSEGV, si_code, va, current);
967 	return;
968 
969 give_sigbus:
970 	regs->pc -= 4;
971 	send_sig_fault(SIGBUS, BUS_ADRALN, va, current);
972 	return;
973 }
974 
975 void
976 trap_init(void)
977 {
978 	/* Tell PAL-code what global pointer we want in the kernel.  */
979 	register unsigned long gptr __asm__("$29");
980 	wrkgp(gptr);
981 
982 	/* Hack for Multia (UDB) and JENSEN: some of their SRMs have
983 	   a bug in the handling of the opDEC fault.  Fix it up if so.  */
984 	if (implver() == IMPLVER_EV4)
985 		opDEC_check();
986 
987 	wrent(entArith, 1);
988 	wrent(entMM, 2);
989 	wrent(entIF, 3);
990 	wrent(entUna, 4);
991 	wrent(entSys, 5);
992 	wrent(entDbg, 6);
993 }
994