xref: /linux/arch/parisc/kernel/traps.c (revision 7ec462100ef9142344ddbf86f2c3008b97acddbe)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/arch/parisc/traps.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
7  */
8 
9 /*
10  * 'Traps.c' handles hardware traps and faults after we have saved some
11  * state in 'asm.s'.
12  */
13 
14 #include <linux/sched.h>
15 #include <linux/sched/debug.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/ptrace.h>
20 #include <linux/timer.h>
21 #include <linux/delay.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/console.h>
29 #include <linux/bug.h>
30 #include <linux/ratelimit.h>
31 #include <linux/uaccess.h>
32 #include <linux/kdebug.h>
33 #include <linux/kfence.h>
34 
35 #include <asm/assembly.h>
36 #include <asm/io.h>
37 #include <asm/irq.h>
38 #include <asm/traps.h>
39 #include <linux/unaligned.h>
40 #include <linux/atomic.h>
41 #include <asm/smp.h>
42 #include <asm/pdc.h>
43 #include <asm/pdc_chassis.h>
44 #include <asm/unwind.h>
45 #include <asm/tlbflush.h>
46 #include <asm/cacheflush.h>
47 #include <linux/kgdb.h>
48 #include <linux/kprobes.h>
49 
50 #include "unaligned.h"
51 
52 #if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)
53 #include <asm/spinlock.h>
54 #endif
55 
56 #include "../math-emu/math-emu.h"	/* for handle_fpe() */
57 
58 static void parisc_show_stack(struct task_struct *task,
59 	struct pt_regs *regs, const char *loglvl);
60 
printbinary(char * buf,unsigned long x,int nbits)61 static int printbinary(char *buf, unsigned long x, int nbits)
62 {
63 	unsigned long mask = 1UL << (nbits - 1);
64 	while (mask != 0) {
65 		*buf++ = (mask & x ? '1' : '0');
66 		mask >>= 1;
67 	}
68 	*buf = '\0';
69 
70 	return nbits;
71 }
72 
73 #ifdef CONFIG_64BIT
74 #define RFMT "%016lx"
75 #else
76 #define RFMT "%08lx"
77 #endif
78 #define FFMT "%016llx"	/* fpregs are 64-bit always */
79 
80 #define PRINTREGS(lvl,r,f,fmt,x)	\
81 	printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",	\
82 		lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],		\
83 		(r)[(x)+2], (r)[(x)+3])
84 
print_gr(const char * level,struct pt_regs * regs)85 static void print_gr(const char *level, struct pt_regs *regs)
86 {
87 	int i;
88 	char buf[64];
89 
90 	printk("%s\n", level);
91 	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
92 	printbinary(buf, regs->gr[0], 32);
93 	printk("%sPSW: %s %s\n", level, buf, print_tainted());
94 
95 	for (i = 0; i < 32; i += 4)
96 		PRINTREGS(level, regs->gr, "r", RFMT, i);
97 }
98 
print_fr(const char * level,struct pt_regs * regs)99 static void print_fr(const char *level, struct pt_regs *regs)
100 {
101 	int i;
102 	char buf[64];
103 	struct { u32 sw[2]; } s;
104 
105 	/* FR are 64bit everywhere. Need to use asm to get the content
106 	 * of fpsr/fper1, and we assume that we won't have a FP Identify
107 	 * in our way, otherwise we're screwed.
108 	 * The fldd is used to restore the T-bit if there was one, as the
109 	 * store clears it anyway.
110 	 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
111 	asm volatile ("fstd %%fr0,0(%1)	\n\t"
112 		      "fldd 0(%1),%%fr0	\n\t"
113 		      : "=m" (s) : "r" (&s) : "r0");
114 
115 	printk("%s\n", level);
116 	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
117 	printbinary(buf, s.sw[0], 32);
118 	printk("%sFPSR: %s\n", level, buf);
119 	printk("%sFPER1: %08x\n", level, s.sw[1]);
120 
121 	/* here we'll print fr0 again, tho it'll be meaningless */
122 	for (i = 0; i < 32; i += 4)
123 		PRINTREGS(level, regs->fr, "fr", FFMT, i);
124 }
125 
show_regs(struct pt_regs * regs)126 void show_regs(struct pt_regs *regs)
127 {
128 	int i, user;
129 	const char *level;
130 	unsigned long cr30, cr31;
131 
132 	user = user_mode(regs);
133 	level = user ? KERN_DEBUG : KERN_CRIT;
134 
135 	show_regs_print_info(level);
136 
137 	print_gr(level, regs);
138 
139 	for (i = 0; i < 8; i += 4)
140 		PRINTREGS(level, regs->sr, "sr", RFMT, i);
141 
142 	if (user)
143 		print_fr(level, regs);
144 
145 	cr30 = mfctl(30);
146 	cr31 = mfctl(31);
147 	printk("%s\n", level);
148 	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
149 	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
150 	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
151 	       level, regs->iir, regs->isr, regs->ior);
152 	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
153 	       level, task_cpu(current), cr30, cr31);
154 	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
155 
156 	if (user) {
157 		printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
158 		printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
159 		printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
160 	} else {
161 		printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
162 		printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
163 		printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
164 
165 		parisc_show_stack(current, regs, KERN_DEFAULT);
166 	}
167 }
168 
169 static DEFINE_RATELIMIT_STATE(_hppa_rs,
170 	DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
171 
172 #define parisc_printk_ratelimited(critical, regs, fmt, ...)	{	      \
173 	if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
174 		printk(fmt, ##__VA_ARGS__);				      \
175 		show_regs(regs);					      \
176 	}								      \
177 }
178 
179 
do_show_stack(struct unwind_frame_info * info,const char * loglvl)180 static void do_show_stack(struct unwind_frame_info *info, const char *loglvl)
181 {
182 	int i = 1;
183 
184 	printk("%sBacktrace:\n", loglvl);
185 	while (i <= MAX_UNWIND_ENTRIES) {
186 		if (unwind_once(info) < 0 || info->ip == 0)
187 			break;
188 
189 		if (__kernel_text_address(info->ip)) {
190 			printk("%s [<" RFMT ">] %pS\n",
191 				loglvl, info->ip, (void *) info->ip);
192 			i++;
193 		}
194 	}
195 	printk("%s\n", loglvl);
196 }
197 
parisc_show_stack(struct task_struct * task,struct pt_regs * regs,const char * loglvl)198 static void parisc_show_stack(struct task_struct *task,
199 	struct pt_regs *regs, const char *loglvl)
200 {
201 	struct unwind_frame_info info;
202 
203 	unwind_frame_init_task(&info, task, regs);
204 
205 	do_show_stack(&info, loglvl);
206 }
207 
show_stack(struct task_struct * t,unsigned long * sp,const char * loglvl)208 void show_stack(struct task_struct *t, unsigned long *sp, const char *loglvl)
209 {
210 	parisc_show_stack(t, NULL, loglvl);
211 }
212 
is_valid_bugaddr(unsigned long iaoq)213 int is_valid_bugaddr(unsigned long iaoq)
214 {
215 	return 1;
216 }
217 
die_if_kernel(char * str,struct pt_regs * regs,long err)218 void die_if_kernel(char *str, struct pt_regs *regs, long err)
219 {
220 	if (user_mode(regs)) {
221 		if (err == 0)
222 			return; /* STFU */
223 
224 		parisc_printk_ratelimited(1, regs,
225 			KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
226 			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
227 
228 		return;
229 	}
230 
231 	bust_spinlocks(1);
232 
233 	oops_enter();
234 
235 	/* Amuse the user in a SPARC fashion */
236 	if (err) printk(KERN_CRIT
237 			"      _______________________________ \n"
238 			"     < Your System ate a SPARC! Gah! >\n"
239 			"      ------------------------------- \n"
240 			"             \\   ^__^\n"
241 			"                 (__)\\       )\\/\\\n"
242 			"                  U  ||----w |\n"
243 			"                     ||     ||\n");
244 
245 	/* unlock the pdc lock if necessary */
246 	pdc_emergency_unlock();
247 
248 	if (err)
249 		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
250 			current->comm, task_pid_nr(current), str, err);
251 
252 	/* Wot's wrong wif bein' racy? */
253 	if (current->thread.flags & PARISC_KERNEL_DEATH) {
254 		printk(KERN_CRIT "%s() recursion detected.\n", __func__);
255 		local_irq_enable();
256 		while (1);
257 	}
258 	current->thread.flags |= PARISC_KERNEL_DEATH;
259 
260 	show_regs(regs);
261 	dump_stack();
262 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
263 
264 	if (in_interrupt())
265 		panic("Fatal exception in interrupt");
266 
267 	if (panic_on_oops)
268 		panic("Fatal exception");
269 
270 	oops_exit();
271 	make_task_dead(SIGSEGV);
272 }
273 
274 /* gdb uses break 4,8 */
275 #define GDB_BREAK_INSN 0x10004
handle_gdb_break(struct pt_regs * regs,int wot)276 static void handle_gdb_break(struct pt_regs *regs, int wot)
277 {
278 	force_sig_fault(SIGTRAP, wot,
279 			(void __user *) (regs->iaoq[0] & ~3));
280 }
281 
handle_break(struct pt_regs * regs)282 static void handle_break(struct pt_regs *regs)
283 {
284 	unsigned iir = regs->iir;
285 
286 	if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
287 		/* check if a BUG() or WARN() trapped here.  */
288 		enum bug_trap_type tt;
289 		tt = report_bug(regs->iaoq[0] & ~3, regs);
290 		if (tt == BUG_TRAP_TYPE_WARN) {
291 			regs->iaoq[0] += 4;
292 			regs->iaoq[1] += 4;
293 			return; /* return to next instruction when WARN_ON().  */
294 		}
295 		die_if_kernel("Unknown kernel breakpoint", regs,
296 			(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
297 	}
298 
299 #ifdef CONFIG_KPROBES
300 	if (unlikely(iir == PARISC_KPROBES_BREAK_INSN && !user_mode(regs))) {
301 		parisc_kprobe_break_handler(regs);
302 		return;
303 	}
304 	if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2 && !user_mode(regs))) {
305 		parisc_kprobe_ss_handler(regs);
306 		return;
307 	}
308 #endif
309 
310 #ifdef CONFIG_KGDB
311 	if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
312 		iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) {
313 		kgdb_handle_exception(9, SIGTRAP, 0, regs);
314 		return;
315 	}
316 #endif
317 
318 #ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
319         if ((iir == SPINLOCK_BREAK_INSN) && !user_mode(regs)) {
320 		die_if_kernel("Spinlock was trashed", regs, 1);
321 	}
322 #endif
323 
324 	if (unlikely(iir != GDB_BREAK_INSN))
325 		parisc_printk_ratelimited(0, regs,
326 			KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
327 			iir & 31, (iir>>13) & ((1<<13)-1),
328 			task_pid_nr(current), current->comm);
329 
330 	/* send standard GDB signal */
331 	handle_gdb_break(regs, TRAP_BRKPT);
332 }
333 
default_trap(int code,struct pt_regs * regs)334 static void default_trap(int code, struct pt_regs *regs)
335 {
336 	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
337 	show_regs(regs);
338 }
339 
transfer_pim_to_trap_frame(struct pt_regs * regs)340 static void transfer_pim_to_trap_frame(struct pt_regs *regs)
341 {
342     register int i;
343     extern unsigned int hpmc_pim_data[];
344     struct pdc_hpmc_pim_11 *pim_narrow;
345     struct pdc_hpmc_pim_20 *pim_wide;
346 
347     if (boot_cpu_data.cpu_type >= pcxu) {
348 
349 	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
350 
351 	/*
352 	 * Note: The following code will probably generate a
353 	 * bunch of truncation error warnings from the compiler.
354 	 * Could be handled with an ifdef, but perhaps there
355 	 * is a better way.
356 	 */
357 
358 	regs->gr[0] = pim_wide->cr[22];
359 
360 	for (i = 1; i < 32; i++)
361 	    regs->gr[i] = pim_wide->gr[i];
362 
363 	for (i = 0; i < 32; i++)
364 	    regs->fr[i] = pim_wide->fr[i];
365 
366 	for (i = 0; i < 8; i++)
367 	    regs->sr[i] = pim_wide->sr[i];
368 
369 	regs->iasq[0] = pim_wide->cr[17];
370 	regs->iasq[1] = pim_wide->iasq_back;
371 	regs->iaoq[0] = pim_wide->cr[18];
372 	regs->iaoq[1] = pim_wide->iaoq_back;
373 
374 	regs->sar  = pim_wide->cr[11];
375 	regs->iir  = pim_wide->cr[19];
376 	regs->isr  = pim_wide->cr[20];
377 	regs->ior  = pim_wide->cr[21];
378     }
379     else {
380 	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
381 
382 	regs->gr[0] = pim_narrow->cr[22];
383 
384 	for (i = 1; i < 32; i++)
385 	    regs->gr[i] = pim_narrow->gr[i];
386 
387 	for (i = 0; i < 32; i++)
388 	    regs->fr[i] = pim_narrow->fr[i];
389 
390 	for (i = 0; i < 8; i++)
391 	    regs->sr[i] = pim_narrow->sr[i];
392 
393 	regs->iasq[0] = pim_narrow->cr[17];
394 	regs->iasq[1] = pim_narrow->iasq_back;
395 	regs->iaoq[0] = pim_narrow->cr[18];
396 	regs->iaoq[1] = pim_narrow->iaoq_back;
397 
398 	regs->sar  = pim_narrow->cr[11];
399 	regs->iir  = pim_narrow->cr[19];
400 	regs->isr  = pim_narrow->cr[20];
401 	regs->ior  = pim_narrow->cr[21];
402     }
403 
404     /*
405      * The following fields only have meaning if we came through
406      * another path. So just zero them here.
407      */
408 
409     regs->ksp = 0;
410     regs->kpc = 0;
411     regs->orig_r28 = 0;
412 }
413 
414 
415 /*
416  * This routine is called as a last resort when everything else
417  * has gone clearly wrong. We get called for faults in kernel space,
418  * and HPMC's.
419  */
parisc_terminate(char * msg,struct pt_regs * regs,int code,unsigned long offset)420 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
421 {
422 	static DEFINE_SPINLOCK(terminate_lock);
423 
424 	(void)notify_die(DIE_OOPS, msg, regs, 0, code, SIGTRAP);
425 	bust_spinlocks(1);
426 
427 	set_eiem(0);
428 	local_irq_disable();
429 	spin_lock(&terminate_lock);
430 
431 	/* unlock the pdc lock if necessary */
432 	pdc_emergency_unlock();
433 
434 	/* Not all paths will gutter the processor... */
435 	switch(code){
436 
437 	case 1:
438 		transfer_pim_to_trap_frame(regs);
439 		break;
440 
441 	default:
442 		break;
443 
444 	}
445 
446 	{
447 		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
448 		struct unwind_frame_info info;
449 		unwind_frame_init(&info, current, regs);
450 		do_show_stack(&info, KERN_CRIT);
451 	}
452 
453 	printk("\n");
454 	pr_crit("%s: Code=%d (%s) at addr " RFMT "\n",
455 		msg, code, trap_name(code), offset);
456 	show_regs(regs);
457 
458 	spin_unlock(&terminate_lock);
459 
460 	/* put soft power button back under hardware control;
461 	 * if the user had pressed it once at any time, the
462 	 * system will shut down immediately right here. */
463 	pdc_soft_power_button(0);
464 
465 	/* Call kernel panic() so reboot timeouts work properly
466 	 * FIXME: This function should be on the list of
467 	 * panic notifiers, and we should call panic
468 	 * directly from the location that we wish.
469 	 * e.g. We should not call panic from
470 	 * parisc_terminate, but rather the other way around.
471 	 * This hack works, prints the panic message twice,
472 	 * and it enables reboot timers!
473 	 */
474 	panic(msg);
475 }
476 
handle_interruption(int code,struct pt_regs * regs)477 void notrace handle_interruption(int code, struct pt_regs *regs)
478 {
479 	unsigned long fault_address = 0;
480 	unsigned long fault_space = 0;
481 	int si_code;
482 
483 	if (!irqs_disabled_flags(regs->gr[0]))
484 	    local_irq_enable();
485 
486 	/* Security check:
487 	 * If the priority level is still user, and the
488 	 * faulting space is not equal to the active space
489 	 * then the user is attempting something in a space
490 	 * that does not belong to them. Kill the process.
491 	 *
492 	 * This is normally the situation when the user
493 	 * attempts to jump into the kernel space at the
494 	 * wrong offset, be it at the gateway page or a
495 	 * random location.
496 	 *
497 	 * We cannot normally signal the process because it
498 	 * could *be* on the gateway page, and processes
499 	 * executing on the gateway page can't have signals
500 	 * delivered.
501 	 *
502 	 * We merely readjust the address into the users
503 	 * space, at a destination address of zero, and
504 	 * allow processing to continue.
505 	 */
506 	if (((unsigned long)regs->iaoq[0] & 3) &&
507 	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
508 		/* Kill the user process later */
509 		regs->iaoq[0] = 0 | PRIV_USER;
510 		regs->iaoq[1] = regs->iaoq[0] + 4;
511 		regs->iasq[0] = regs->iasq[1] = regs->sr[7];
512 		regs->gr[0] &= ~PSW_B;
513 		return;
514 	}
515 
516 #if 0
517 	printk(KERN_CRIT "Interruption # %d\n", code);
518 #endif
519 
520 	switch(code) {
521 
522 	case  1:
523 		/* High-priority machine check (HPMC) */
524 
525 		/* set up a new led state on systems shipped with a LED State panel */
526 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
527 
528 		parisc_terminate("High Priority Machine Check (HPMC)",
529 				regs, code, 0);
530 		/* NOT REACHED */
531 
532 	case  2:
533 		/* Power failure interrupt */
534 		printk(KERN_CRIT "Power failure interrupt !\n");
535 		return;
536 
537 	case  3:
538 		/* Recovery counter trap */
539 		regs->gr[0] &= ~PSW_R;
540 
541 #ifdef CONFIG_KGDB
542 		if (kgdb_single_step) {
543 			kgdb_handle_exception(0, SIGTRAP, 0, regs);
544 			return;
545 		}
546 #endif
547 
548 		if (user_space(regs))
549 			handle_gdb_break(regs, TRAP_TRACE);
550 		/* else this must be the start of a syscall - just let it run */
551 		return;
552 
553 	case  5:
554 		/* Low-priority machine check */
555 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
556 
557 		flush_cache_all();
558 		flush_tlb_all();
559 		default_trap(code, regs);
560 		return;
561 
562 	case  PARISC_ITLB_TRAP:
563 		/* Instruction TLB miss fault/Instruction page fault */
564 		fault_address = regs->iaoq[0];
565 		fault_space   = regs->iasq[0];
566 		break;
567 
568 	case  8:
569 		/* Illegal instruction trap */
570 		die_if_kernel("Illegal instruction", regs, code);
571 		si_code = ILL_ILLOPC;
572 		goto give_sigill;
573 
574 	case  9:
575 		/* Break instruction trap */
576 		handle_break(regs);
577 		return;
578 
579 	case 10:
580 		/* Privileged operation trap */
581 		die_if_kernel("Privileged operation", regs, code);
582 		si_code = ILL_PRVOPC;
583 		goto give_sigill;
584 
585 	case 11:
586 		/* Privileged register trap */
587 		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
588 
589 			/* This is a MFCTL cr26/cr27 to gr instruction.
590 			 * PCXS traps on this, so we need to emulate it.
591 			 */
592 
593 			if (regs->iir & 0x00200000)
594 				regs->gr[regs->iir & 0x1f] = mfctl(27);
595 			else
596 				regs->gr[regs->iir & 0x1f] = mfctl(26);
597 
598 			regs->iaoq[0] = regs->iaoq[1];
599 			regs->iaoq[1] += 4;
600 			regs->iasq[0] = regs->iasq[1];
601 			return;
602 		}
603 
604 		die_if_kernel("Privileged register usage", regs, code);
605 		si_code = ILL_PRVREG;
606 	give_sigill:
607 		force_sig_fault(SIGILL, si_code,
608 				(void __user *) regs->iaoq[0]);
609 		return;
610 
611 	case 12:
612 		/* Overflow Trap, let the userland signal handler do the cleanup */
613 		force_sig_fault(SIGFPE, FPE_INTOVF,
614 				(void __user *) regs->iaoq[0]);
615 		return;
616 
617 	case 13:
618 		/* Conditional Trap
619 		   The condition succeeds in an instruction which traps
620 		   on condition  */
621 		if(user_mode(regs)){
622 			/* Let userspace app figure it out from the insn pointed
623 			 * to by si_addr.
624 			 */
625 			force_sig_fault(SIGFPE, FPE_CONDTRAP,
626 					(void __user *) regs->iaoq[0]);
627 			return;
628 		}
629 		/* The kernel doesn't want to handle condition codes */
630 		break;
631 
632 	case 14:
633 		/* Assist Exception Trap, i.e. floating point exception. */
634 		die_if_kernel("Floating point exception", regs, 0); /* quiet */
635 		__inc_irq_stat(irq_fpassist_count);
636 		handle_fpe(regs);
637 		return;
638 
639 	case 15:
640 		/* Data TLB miss fault/Data page fault */
641 		fallthrough;
642 	case 16:
643 		/* Non-access instruction TLB miss fault */
644 		/* The instruction TLB entry needed for the target address of the FIC
645 		   is absent, and hardware can't find it, so we get to cleanup */
646 		fallthrough;
647 	case 17:
648 		/* Non-access data TLB miss fault/Non-access data page fault */
649 		/* FIXME:
650 			 Still need to add slow path emulation code here!
651 			 If the insn used a non-shadow register, then the tlb
652 			 handlers could not have their side-effect (e.g. probe
653 			 writing to a target register) emulated since rfir would
654 			 erase the changes to said register. Instead we have to
655 			 setup everything, call this function we are in, and emulate
656 			 by hand. Technically we need to emulate:
657 			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
658 		*/
659 		if (code == 17 && handle_nadtlb_fault(regs))
660 			return;
661 		fault_address = regs->ior;
662 		fault_space = regs->isr;
663 		break;
664 
665 	case 18:
666 		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
667 		/* Check for unaligned access */
668 		if (check_unaligned(regs)) {
669 			handle_unaligned(regs);
670 			return;
671 		}
672 		fallthrough;
673 	case 26:
674 		/* PCXL: Data memory access rights trap */
675 		fault_address = regs->ior;
676 		fault_space   = regs->isr;
677 		break;
678 
679 	case 19:
680 		/* Data memory break trap */
681 		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
682 		fallthrough;
683 	case 21:
684 		/* Page reference trap */
685 		handle_gdb_break(regs, TRAP_HWBKPT);
686 		return;
687 
688 	case 25:
689 		/* Taken branch trap */
690 		regs->gr[0] &= ~PSW_T;
691 		if (user_space(regs))
692 			handle_gdb_break(regs, TRAP_BRANCH);
693 		/* else this must be the start of a syscall - just let it
694 		 * run.
695 		 */
696 		return;
697 
698 	case  7:
699 		/* Instruction access rights */
700 		/* PCXL: Instruction memory protection trap */
701 
702 		/*
703 		 * This could be caused by either: 1) a process attempting
704 		 * to execute within a vma that does not have execute
705 		 * permission, or 2) an access rights violation caused by a
706 		 * flush only translation set up by ptep_get_and_clear().
707 		 * So we check the vma permissions to differentiate the two.
708 		 * If the vma indicates we have execute permission, then
709 		 * the cause is the latter one. In this case, we need to
710 		 * call do_page_fault() to fix the problem.
711 		 */
712 
713 		if (user_mode(regs)) {
714 			struct vm_area_struct *vma;
715 
716 			mmap_read_lock(current->mm);
717 			vma = find_vma(current->mm,regs->iaoq[0]);
718 			if (vma && (regs->iaoq[0] >= vma->vm_start)
719 				&& (vma->vm_flags & VM_EXEC)) {
720 
721 				fault_address = regs->iaoq[0];
722 				fault_space = regs->iasq[0];
723 
724 				mmap_read_unlock(current->mm);
725 				break; /* call do_page_fault() */
726 			}
727 			mmap_read_unlock(current->mm);
728 		}
729 		/* CPU could not fetch instruction, so clear stale IIR value. */
730 		regs->iir = 0xbaadf00d;
731 		fallthrough;
732 	case 27:
733 		/* Data memory protection ID trap */
734 		if (code == 27 && !user_mode(regs) &&
735 			fixup_exception(regs))
736 			return;
737 
738 		die_if_kernel("Protection id trap", regs, code);
739 		force_sig_fault(SIGSEGV, SEGV_MAPERR,
740 				(code == 7)?
741 				((void __user *) regs->iaoq[0]) :
742 				((void __user *) regs->ior));
743 		return;
744 
745 	case 28:
746 		/* Unaligned data reference trap */
747 		handle_unaligned(regs);
748 		return;
749 
750 	default:
751 		if (user_mode(regs)) {
752 			parisc_printk_ratelimited(0, regs, KERN_DEBUG
753 				"handle_interruption() pid=%d command='%s'\n",
754 				task_pid_nr(current), current->comm);
755 			/* SIGBUS, for lack of a better one. */
756 			force_sig_fault(SIGBUS, BUS_OBJERR,
757 					(void __user *)regs->ior);
758 			return;
759 		}
760 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
761 
762 		parisc_terminate("Unexpected interruption", regs, code, 0);
763 		/* NOT REACHED */
764 	}
765 
766 	if (user_mode(regs)) {
767 	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
768 		parisc_printk_ratelimited(0, regs, KERN_DEBUG
769 				"User fault %d on space 0x%08lx, pid=%d command='%s'\n",
770 				code, fault_space,
771 				task_pid_nr(current), current->comm);
772 		force_sig_fault(SIGSEGV, SEGV_MAPERR,
773 				(void __user *)regs->ior);
774 		return;
775 	    }
776 	}
777 	else {
778 
779 	    /*
780 	     * The kernel should never fault on its own address space,
781 	     * unless pagefault_disable() was called before.
782 	     */
783 
784 	    if (faulthandler_disabled() || fault_space == 0)
785 	    {
786 		/* Clean up and return if in exception table. */
787 		if (fixup_exception(regs))
788 			return;
789 		/* Clean up and return if handled by kfence. */
790 		if (kfence_handle_page_fault(fault_address,
791 			parisc_acctyp(code, regs->iir) == VM_WRITE, regs))
792 			return;
793 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
794 		parisc_terminate("Kernel Fault", regs, code, fault_address);
795 	    }
796 	}
797 
798 	do_page_fault(regs, code, fault_address);
799 }
800 
801 
initialize_ivt(const void * iva)802 static void __init initialize_ivt(const void *iva)
803 {
804 	extern const u32 os_hpmc[];
805 
806 	int i;
807 	u32 check = 0;
808 	u32 *ivap;
809 	u32 instr;
810 
811 	if (strcmp((const char *)iva, "cows can fly"))
812 		panic("IVT invalid");
813 
814 	ivap = (u32 *)iva;
815 
816 	for (i = 0; i < 8; i++)
817 	    *ivap++ = 0;
818 
819 	/*
820 	 * Use PDC_INSTR firmware function to get instruction that invokes
821 	 * PDCE_CHECK in HPMC handler.  See programming note at page 1-31 of
822 	 * the PA 1.1 Firmware Architecture document.
823 	 */
824 	if (pdc_instr(&instr) == PDC_OK)
825 		ivap[0] = instr;
826 
827 	/*
828 	 * Rules for the checksum of the HPMC handler:
829 	 * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed
830 	 *    its own IVA).
831 	 * 2. The word at IVA + 32 is nonzero.
832 	 * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and
833 	 *    Address (IVA + 56) are word-aligned.
834 	 * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of
835 	 *    the Length/4 words starting at Address is zero.
836 	 */
837 
838 	/* Setup IVA and compute checksum for HPMC handler */
839 	ivap[6] = (u32)__pa(os_hpmc);
840 
841 	for (i=0; i<8; i++)
842 	    check += ivap[i];
843 
844 	ivap[5] = -check;
845 	pr_debug("initialize_ivt: IVA[6] = 0x%08x\n", ivap[6]);
846 }
847 
848 
849 /* early_trap_init() is called before we set up kernel mappings and
850  * write-protect the kernel */
early_trap_init(void)851 void  __init early_trap_init(void)
852 {
853 	extern const void fault_vector_20;
854 
855 #ifndef CONFIG_64BIT
856 	extern const void fault_vector_11;
857 	initialize_ivt(&fault_vector_11);
858 #endif
859 
860 	initialize_ivt(&fault_vector_20);
861 }
862