xref: /linux/arch/parisc/kernel/traps.c (revision 8cc8ea228c4199482cf087fc6ed2d6e31b7a49e2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/arch/parisc/traps.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
7  */
8 
9 /*
10  * 'Traps.c' handles hardware traps and faults after we have saved some
11  * state in 'asm.s'.
12  */
13 
14 #include <linux/sched.h>
15 #include <linux/sched/debug.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/ptrace.h>
20 #include <linux/timer.h>
21 #include <linux/delay.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/console.h>
29 #include <linux/bug.h>
30 #include <linux/ratelimit.h>
31 #include <linux/uaccess.h>
32 #include <linux/kdebug.h>
33 #include <linux/kfence.h>
34 #include <linux/perf_event.h>
35 
36 #include <asm/assembly.h>
37 #include <asm/io.h>
38 #include <asm/irq.h>
39 #include <asm/traps.h>
40 #include <linux/unaligned.h>
41 #include <linux/atomic.h>
42 #include <asm/smp.h>
43 #include <asm/pdc.h>
44 #include <asm/pdc_chassis.h>
45 #include <asm/unwind.h>
46 #include <asm/tlbflush.h>
47 #include <asm/cacheflush.h>
48 #include <linux/kgdb.h>
49 #include <linux/kprobes.h>
50 
51 #include "unaligned.h"
52 
53 #if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)
54 #include <asm/spinlock.h>
55 #endif
56 
57 #include "../math-emu/math-emu.h"	/* for handle_fpe() */
58 
59 static void parisc_show_stack(struct task_struct *task,
60 	struct pt_regs *regs, const char *loglvl);
61 
printbinary(char * buf,unsigned long x,int nbits)62 static int printbinary(char *buf, unsigned long x, int nbits)
63 {
64 	unsigned long mask = 1UL << (nbits - 1);
65 	while (mask != 0) {
66 		*buf++ = (mask & x ? '1' : '0');
67 		mask >>= 1;
68 	}
69 	*buf = '\0';
70 
71 	return nbits;
72 }
73 
74 #ifdef CONFIG_64BIT
75 #define RFMT "%016lx"
76 #else
77 #define RFMT "%08lx"
78 #endif
79 #define FFMT "%016llx"	/* fpregs are 64-bit always */
80 
81 #define PRINTREGS(lvl,r,f,fmt,x)	\
82 	printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",	\
83 		lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],		\
84 		(r)[(x)+2], (r)[(x)+3])
85 
print_gr(const char * level,struct pt_regs * regs)86 static void print_gr(const char *level, struct pt_regs *regs)
87 {
88 	int i;
89 	char buf[64];
90 
91 	printk("%s\n", level);
92 	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
93 	printbinary(buf, regs->gr[0], 32);
94 	printk("%sPSW: %s %s\n", level, buf, print_tainted());
95 
96 	for (i = 0; i < 32; i += 4)
97 		PRINTREGS(level, regs->gr, "r", RFMT, i);
98 }
99 
print_fr(const char * level,struct pt_regs * regs)100 static void print_fr(const char *level, struct pt_regs *regs)
101 {
102 	int i;
103 	char buf[64];
104 	struct { u32 sw[2]; } s;
105 
106 	/* FR are 64bit everywhere. Need to use asm to get the content
107 	 * of fpsr/fper1, and we assume that we won't have a FP Identify
108 	 * in our way, otherwise we're screwed.
109 	 * The fldd is used to restore the T-bit if there was one, as the
110 	 * store clears it anyway.
111 	 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
112 	asm volatile ("fstd %%fr0,0(%1)	\n\t"
113 		      "fldd 0(%1),%%fr0	\n\t"
114 		      : "=m" (s) : "r" (&s) : "r0");
115 
116 	printk("%s\n", level);
117 	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
118 	printbinary(buf, s.sw[0], 32);
119 	printk("%sFPSR: %s\n", level, buf);
120 	printk("%sFPER1: %08x\n", level, s.sw[1]);
121 
122 	/* here we'll print fr0 again, tho it'll be meaningless */
123 	for (i = 0; i < 32; i += 4)
124 		PRINTREGS(level, regs->fr, "fr", FFMT, i);
125 }
126 
show_regs(struct pt_regs * regs)127 void show_regs(struct pt_regs *regs)
128 {
129 	int i, user;
130 	const char *level;
131 	unsigned long cr30, cr31;
132 
133 	user = user_mode(regs);
134 	level = user ? KERN_DEBUG : KERN_CRIT;
135 
136 	show_regs_print_info(level);
137 
138 	print_gr(level, regs);
139 
140 	for (i = 0; i < 8; i += 4)
141 		PRINTREGS(level, regs->sr, "sr", RFMT, i);
142 
143 	if (user)
144 		print_fr(level, regs);
145 
146 	cr30 = mfctl(30);
147 	cr31 = mfctl(31);
148 	printk("%s\n", level);
149 	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
150 	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
151 	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
152 	       level, regs->iir, regs->isr, regs->ior);
153 	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
154 	       level, task_cpu(current), cr30, cr31);
155 	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
156 
157 	if (user) {
158 		printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
159 		printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
160 		printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
161 	} else {
162 		printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
163 		printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
164 		printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
165 
166 		parisc_show_stack(current, regs, KERN_DEFAULT);
167 	}
168 }
169 
170 static DEFINE_RATELIMIT_STATE(_hppa_rs,
171 	DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
172 
173 #define parisc_printk_ratelimited(critical, regs, fmt, ...)	{	      \
174 	if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
175 		printk(fmt, ##__VA_ARGS__);				      \
176 		show_regs(regs);					      \
177 	}								      \
178 }
179 
180 
do_show_stack(struct unwind_frame_info * info,const char * loglvl)181 static void do_show_stack(struct unwind_frame_info *info, const char *loglvl)
182 {
183 	int i = 1;
184 
185 	printk("%sBacktrace:\n", loglvl);
186 	while (i <= MAX_UNWIND_ENTRIES) {
187 		if (unwind_once(info) < 0 || info->ip == 0)
188 			break;
189 
190 		if (__kernel_text_address(info->ip)) {
191 			printk("%s [<" RFMT ">] %pS\n",
192 				loglvl, info->ip, (void *) info->ip);
193 			i++;
194 		}
195 	}
196 	printk("%s\n", loglvl);
197 }
198 
parisc_show_stack(struct task_struct * task,struct pt_regs * regs,const char * loglvl)199 static void parisc_show_stack(struct task_struct *task,
200 	struct pt_regs *regs, const char *loglvl)
201 {
202 	struct unwind_frame_info info;
203 
204 	unwind_frame_init_task(&info, task, regs);
205 
206 	do_show_stack(&info, loglvl);
207 }
208 
show_stack(struct task_struct * t,unsigned long * sp,const char * loglvl)209 void show_stack(struct task_struct *t, unsigned long *sp, const char *loglvl)
210 {
211 	parisc_show_stack(t, NULL, loglvl);
212 }
213 
is_valid_bugaddr(unsigned long iaoq)214 int is_valid_bugaddr(unsigned long iaoq)
215 {
216 	return 1;
217 }
218 
die_if_kernel(char * str,struct pt_regs * regs,long err)219 void die_if_kernel(char *str, struct pt_regs *regs, long err)
220 {
221 	if (user_mode(regs)) {
222 		if (err == 0)
223 			return; /* STFU */
224 
225 		parisc_printk_ratelimited(1, regs,
226 			KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
227 			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
228 
229 		return;
230 	}
231 
232 	bust_spinlocks(1);
233 
234 	oops_enter();
235 
236 	/* Amuse the user in a SPARC fashion */
237 	if (err) printk(KERN_CRIT
238 			"      _______________________________ \n"
239 			"     < Your System ate a SPARC! Gah! >\n"
240 			"      ------------------------------- \n"
241 			"             \\   ^__^\n"
242 			"                 (__)\\       )\\/\\\n"
243 			"                  U  ||----w |\n"
244 			"                     ||     ||\n");
245 
246 	/* unlock the pdc lock if necessary */
247 	pdc_emergency_unlock();
248 
249 	if (err)
250 		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
251 			current->comm, task_pid_nr(current), str, err);
252 
253 	/* Wot's wrong wif bein' racy? */
254 	if (current->thread.flags & PARISC_KERNEL_DEATH) {
255 		printk(KERN_CRIT "%s() recursion detected.\n", __func__);
256 		local_irq_enable();
257 		while (1);
258 	}
259 	current->thread.flags |= PARISC_KERNEL_DEATH;
260 
261 	show_regs(regs);
262 	dump_stack();
263 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
264 
265 	if (in_interrupt())
266 		panic("Fatal exception in interrupt");
267 
268 	if (panic_on_oops)
269 		panic("Fatal exception");
270 
271 	oops_exit();
272 	make_task_dead(SIGSEGV);
273 }
274 
275 /* gdb uses break 4,8 */
276 #define GDB_BREAK_INSN 0x10004
handle_gdb_break(struct pt_regs * regs,int wot)277 static void handle_gdb_break(struct pt_regs *regs, int wot)
278 {
279 	force_sig_fault(SIGTRAP, wot,
280 			(void __user *) (regs->iaoq[0] & ~3));
281 }
282 
handle_break(struct pt_regs * regs)283 static void handle_break(struct pt_regs *regs)
284 {
285 	unsigned iir = regs->iir;
286 
287 	if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
288 		/* check if a BUG() or WARN() trapped here.  */
289 		enum bug_trap_type tt;
290 		tt = report_bug(regs->iaoq[0] & ~3, regs);
291 		if (tt == BUG_TRAP_TYPE_WARN) {
292 			regs->iaoq[0] += 4;
293 			regs->iaoq[1] += 4;
294 			return; /* return to next instruction when WARN_ON().  */
295 		}
296 		die_if_kernel("Unknown kernel breakpoint", regs,
297 			(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
298 	}
299 
300 #ifdef CONFIG_KPROBES
301 	if (unlikely(iir == PARISC_KPROBES_BREAK_INSN && !user_mode(regs))) {
302 		parisc_kprobe_break_handler(regs);
303 		return;
304 	}
305 	if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2 && !user_mode(regs))) {
306 		parisc_kprobe_ss_handler(regs);
307 		return;
308 	}
309 #endif
310 
311 #ifdef CONFIG_KGDB
312 	if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
313 		iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) {
314 		kgdb_handle_exception(9, SIGTRAP, 0, regs);
315 		return;
316 	}
317 #endif
318 
319 #ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
320         if ((iir == SPINLOCK_BREAK_INSN) && !user_mode(regs)) {
321 		die_if_kernel("Spinlock was trashed", regs, 1);
322 	}
323 #endif
324 
325 	if (unlikely(iir != GDB_BREAK_INSN))
326 		parisc_printk_ratelimited(0, regs,
327 			KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
328 			iir & 31, (iir>>13) & ((1<<13)-1),
329 			task_pid_nr(current), current->comm);
330 
331 	/* send standard GDB signal */
332 	handle_gdb_break(regs, TRAP_BRKPT);
333 }
334 
default_trap(int code,struct pt_regs * regs)335 static void default_trap(int code, struct pt_regs *regs)
336 {
337 	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
338 	show_regs(regs);
339 }
340 
transfer_pim_to_trap_frame(struct pt_regs * regs)341 static void transfer_pim_to_trap_frame(struct pt_regs *regs)
342 {
343     register int i;
344     extern unsigned int hpmc_pim_data[];
345     struct pdc_hpmc_pim_11 *pim_narrow;
346     struct pdc_hpmc_pim_20 *pim_wide;
347 
348     if (boot_cpu_data.cpu_type >= pcxu) {
349 
350 	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
351 
352 	/*
353 	 * Note: The following code will probably generate a
354 	 * bunch of truncation error warnings from the compiler.
355 	 * Could be handled with an ifdef, but perhaps there
356 	 * is a better way.
357 	 */
358 
359 	regs->gr[0] = pim_wide->cr[22];
360 
361 	for (i = 1; i < 32; i++)
362 	    regs->gr[i] = pim_wide->gr[i];
363 
364 	for (i = 0; i < 32; i++)
365 	    regs->fr[i] = pim_wide->fr[i];
366 
367 	for (i = 0; i < 8; i++)
368 	    regs->sr[i] = pim_wide->sr[i];
369 
370 	regs->iasq[0] = pim_wide->cr[17];
371 	regs->iasq[1] = pim_wide->iasq_back;
372 	regs->iaoq[0] = pim_wide->cr[18];
373 	regs->iaoq[1] = pim_wide->iaoq_back;
374 
375 	regs->sar  = pim_wide->cr[11];
376 	regs->iir  = pim_wide->cr[19];
377 	regs->isr  = pim_wide->cr[20];
378 	regs->ior  = pim_wide->cr[21];
379     }
380     else {
381 	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
382 
383 	regs->gr[0] = pim_narrow->cr[22];
384 
385 	for (i = 1; i < 32; i++)
386 	    regs->gr[i] = pim_narrow->gr[i];
387 
388 	for (i = 0; i < 32; i++)
389 	    regs->fr[i] = pim_narrow->fr[i];
390 
391 	for (i = 0; i < 8; i++)
392 	    regs->sr[i] = pim_narrow->sr[i];
393 
394 	regs->iasq[0] = pim_narrow->cr[17];
395 	regs->iasq[1] = pim_narrow->iasq_back;
396 	regs->iaoq[0] = pim_narrow->cr[18];
397 	regs->iaoq[1] = pim_narrow->iaoq_back;
398 
399 	regs->sar  = pim_narrow->cr[11];
400 	regs->iir  = pim_narrow->cr[19];
401 	regs->isr  = pim_narrow->cr[20];
402 	regs->ior  = pim_narrow->cr[21];
403     }
404 
405     /*
406      * The following fields only have meaning if we came through
407      * another path. So just zero them here.
408      */
409 
410     regs->ksp = 0;
411     regs->kpc = 0;
412     regs->orig_r28 = 0;
413 }
414 
415 
416 /*
417  * This routine is called as a last resort when everything else
418  * has gone clearly wrong. We get called for faults in kernel space,
419  * and HPMC's.
420  */
parisc_terminate(char * msg,struct pt_regs * regs,int code,unsigned long offset)421 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
422 {
423 	static DEFINE_SPINLOCK(terminate_lock);
424 
425 	(void)notify_die(DIE_OOPS, msg, regs, 0, code, SIGTRAP);
426 	bust_spinlocks(1);
427 
428 	set_eiem(0);
429 	local_irq_disable();
430 	spin_lock(&terminate_lock);
431 
432 	/* unlock the pdc lock if necessary */
433 	pdc_emergency_unlock();
434 
435 	/* Not all paths will gutter the processor... */
436 	switch(code){
437 
438 	case 1:
439 		transfer_pim_to_trap_frame(regs);
440 		break;
441 
442 	default:
443 		break;
444 
445 	}
446 
447 	{
448 		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
449 		struct unwind_frame_info info;
450 		unwind_frame_init(&info, current, regs);
451 		do_show_stack(&info, KERN_CRIT);
452 	}
453 
454 	printk("\n");
455 	pr_crit("%s: Code=%d (%s) at addr " RFMT "\n",
456 		msg, code, trap_name(code), offset);
457 	show_regs(regs);
458 
459 	spin_unlock(&terminate_lock);
460 
461 	/* put soft power button back under hardware control;
462 	 * if the user had pressed it once at any time, the
463 	 * system will shut down immediately right here. */
464 	pdc_soft_power_button(0);
465 
466 	/* Call kernel panic() so reboot timeouts work properly
467 	 * FIXME: This function should be on the list of
468 	 * panic notifiers, and we should call panic
469 	 * directly from the location that we wish.
470 	 * e.g. We should not call panic from
471 	 * parisc_terminate, but rather the other way around.
472 	 * This hack works, prints the panic message twice,
473 	 * and it enables reboot timers!
474 	 */
475 	panic(msg);
476 }
477 
handle_interruption(int code,struct pt_regs * regs)478 void notrace handle_interruption(int code, struct pt_regs *regs)
479 {
480 	unsigned long fault_address = 0;
481 	unsigned long fault_space = 0;
482 	int si_code;
483 
484 	if (!irqs_disabled_flags(regs->gr[0]))
485 	    local_irq_enable();
486 
487 	/* Security check:
488 	 * If the priority level is still user, and the
489 	 * faulting space is not equal to the active space
490 	 * then the user is attempting something in a space
491 	 * that does not belong to them. Kill the process.
492 	 *
493 	 * This is normally the situation when the user
494 	 * attempts to jump into the kernel space at the
495 	 * wrong offset, be it at the gateway page or a
496 	 * random location.
497 	 *
498 	 * We cannot normally signal the process because it
499 	 * could *be* on the gateway page, and processes
500 	 * executing on the gateway page can't have signals
501 	 * delivered.
502 	 *
503 	 * We merely readjust the address into the users
504 	 * space, at a destination address of zero, and
505 	 * allow processing to continue.
506 	 */
507 	if (((unsigned long)regs->iaoq[0] & 3) &&
508 	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
509 		/* Kill the user process later */
510 		regs->iaoq[0] = 0 | PRIV_USER;
511 		regs->iaoq[1] = regs->iaoq[0] + 4;
512 		regs->iasq[0] = regs->iasq[1] = regs->sr[7];
513 		regs->gr[0] &= ~PSW_B;
514 		return;
515 	}
516 
517 #if 0
518 	printk(KERN_CRIT "Interruption # %d\n", code);
519 #endif
520 
521 	switch(code) {
522 
523 	case  1:
524 		/* High-priority machine check (HPMC) */
525 
526 		/* set up a new led state on systems shipped with a LED State panel */
527 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
528 
529 		parisc_terminate("High Priority Machine Check (HPMC)",
530 				regs, code, 0);
531 		/* NOT REACHED */
532 
533 	case  2:
534 		/* Power failure interrupt */
535 		printk(KERN_CRIT "Power failure interrupt !\n");
536 		return;
537 
538 	case  3:
539 		/* Recovery counter trap */
540 		regs->gr[0] &= ~PSW_R;
541 
542 #ifdef CONFIG_KGDB
543 		if (kgdb_single_step) {
544 			kgdb_handle_exception(0, SIGTRAP, 0, regs);
545 			return;
546 		}
547 #endif
548 
549 		if (user_space(regs))
550 			handle_gdb_break(regs, TRAP_TRACE);
551 		/* else this must be the start of a syscall - just let it run */
552 		return;
553 
554 	case  5:
555 		/* Low-priority machine check */
556 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
557 
558 		flush_cache_all();
559 		flush_tlb_all();
560 		default_trap(code, regs);
561 		return;
562 
563 	case  PARISC_ITLB_TRAP:
564 		/* Instruction TLB miss fault/Instruction page fault */
565 		fault_address = regs->iaoq[0];
566 		fault_space   = regs->iasq[0];
567 		break;
568 
569 	case  8:
570 		/* Illegal instruction trap */
571 		die_if_kernel("Illegal instruction", regs, code);
572 		si_code = ILL_ILLOPC;
573 		goto give_sigill;
574 
575 	case  9:
576 		/* Break instruction trap */
577 		handle_break(regs);
578 		return;
579 
580 	case 10:
581 		/* Privileged operation trap */
582 		die_if_kernel("Privileged operation", regs, code);
583 		si_code = ILL_PRVOPC;
584 		goto give_sigill;
585 
586 	case 11:
587 		/* Privileged register trap */
588 		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
589 
590 			/* This is a MFCTL cr26/cr27 to gr instruction.
591 			 * PCXS traps on this, so we need to emulate it.
592 			 */
593 
594 			if (regs->iir & 0x00200000)
595 				regs->gr[regs->iir & 0x1f] = mfctl(27);
596 			else
597 				regs->gr[regs->iir & 0x1f] = mfctl(26);
598 
599 			regs->iaoq[0] = regs->iaoq[1];
600 			regs->iaoq[1] += 4;
601 			regs->iasq[0] = regs->iasq[1];
602 			return;
603 		}
604 
605 		die_if_kernel("Privileged register usage", regs, code);
606 		si_code = ILL_PRVREG;
607 	give_sigill:
608 		force_sig_fault(SIGILL, si_code,
609 				(void __user *) regs->iaoq[0]);
610 		return;
611 
612 	case 12:
613 		/* Overflow Trap, let the userland signal handler do the cleanup */
614 		force_sig_fault(SIGFPE, FPE_INTOVF,
615 				(void __user *) regs->iaoq[0]);
616 		return;
617 
618 	case 13:
619 		/* Conditional Trap
620 		   The condition succeeds in an instruction which traps
621 		   on condition  */
622 		if(user_mode(regs)){
623 			/* Let userspace app figure it out from the insn pointed
624 			 * to by si_addr.
625 			 */
626 			force_sig_fault(SIGFPE, FPE_CONDTRAP,
627 					(void __user *) regs->iaoq[0]);
628 			return;
629 		}
630 		/* The kernel doesn't want to handle condition codes */
631 		break;
632 
633 	case 14:
634 		/* Assist Exception Trap, i.e. floating point exception. */
635 		die_if_kernel("Floating point exception", regs, 0); /* quiet */
636 		__inc_irq_stat(irq_fpassist_count);
637 		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
638 		handle_fpe(regs);
639 		return;
640 
641 	case 15:
642 		/* Data TLB miss fault/Data page fault */
643 		fallthrough;
644 	case 16:
645 		/* Non-access instruction TLB miss fault */
646 		/* The instruction TLB entry needed for the target address of the FIC
647 		   is absent, and hardware can't find it, so we get to cleanup */
648 		fallthrough;
649 	case 17:
650 		/* Non-access data TLB miss fault/Non-access data page fault */
651 		/* FIXME:
652 			 Still need to add slow path emulation code here!
653 			 If the insn used a non-shadow register, then the tlb
654 			 handlers could not have their side-effect (e.g. probe
655 			 writing to a target register) emulated since rfir would
656 			 erase the changes to said register. Instead we have to
657 			 setup everything, call this function we are in, and emulate
658 			 by hand. Technically we need to emulate:
659 			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
660 		*/
661 		if (code == 17 && handle_nadtlb_fault(regs))
662 			return;
663 		fault_address = regs->ior;
664 		fault_space = regs->isr;
665 		break;
666 
667 	case 18:
668 		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
669 		/* Check for unaligned access */
670 		if (check_unaligned(regs)) {
671 			handle_unaligned(regs);
672 			return;
673 		}
674 		fallthrough;
675 	case 26:
676 		/* PCXL: Data memory access rights trap */
677 		fault_address = regs->ior;
678 		fault_space   = regs->isr;
679 		break;
680 
681 	case 19:
682 		/* Data memory break trap */
683 		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
684 		fallthrough;
685 	case 21:
686 		/* Page reference trap */
687 		handle_gdb_break(regs, TRAP_HWBKPT);
688 		return;
689 
690 	case 25:
691 		/* Taken branch trap */
692 		regs->gr[0] &= ~PSW_T;
693 		if (user_space(regs))
694 			handle_gdb_break(regs, TRAP_BRANCH);
695 		/* else this must be the start of a syscall - just let it
696 		 * run.
697 		 */
698 		return;
699 
700 	case  7:
701 		/* Instruction access rights */
702 		/* PCXL: Instruction memory protection trap */
703 
704 		/*
705 		 * This could be caused by either: 1) a process attempting
706 		 * to execute within a vma that does not have execute
707 		 * permission, or 2) an access rights violation caused by a
708 		 * flush only translation set up by ptep_get_and_clear().
709 		 * So we check the vma permissions to differentiate the two.
710 		 * If the vma indicates we have execute permission, then
711 		 * the cause is the latter one. In this case, we need to
712 		 * call do_page_fault() to fix the problem.
713 		 */
714 
715 		if (user_mode(regs)) {
716 			struct vm_area_struct *vma;
717 
718 			mmap_read_lock(current->mm);
719 			vma = find_vma(current->mm,regs->iaoq[0]);
720 			if (vma && (regs->iaoq[0] >= vma->vm_start)
721 				&& (vma->vm_flags & VM_EXEC)) {
722 
723 				fault_address = regs->iaoq[0];
724 				fault_space = regs->iasq[0];
725 
726 				mmap_read_unlock(current->mm);
727 				break; /* call do_page_fault() */
728 			}
729 			mmap_read_unlock(current->mm);
730 		}
731 		/* CPU could not fetch instruction, so clear stale IIR value. */
732 		regs->iir = 0xbaadf00d;
733 		fallthrough;
734 	case 27:
735 		/* Data memory protection ID trap */
736 		if (code == 27 && !user_mode(regs) &&
737 			fixup_exception(regs))
738 			return;
739 
740 		die_if_kernel("Protection id trap", regs, code);
741 		force_sig_fault(SIGSEGV, SEGV_MAPERR,
742 				(code == 7)?
743 				((void __user *) regs->iaoq[0]) :
744 				((void __user *) regs->ior));
745 		return;
746 
747 	case 28:
748 		/* Unaligned data reference trap */
749 		handle_unaligned(regs);
750 		return;
751 
752 	default:
753 		if (user_mode(regs)) {
754 			parisc_printk_ratelimited(0, regs, KERN_DEBUG
755 				"handle_interruption() pid=%d command='%s'\n",
756 				task_pid_nr(current), current->comm);
757 			/* SIGBUS, for lack of a better one. */
758 			force_sig_fault(SIGBUS, BUS_OBJERR,
759 					(void __user *)regs->ior);
760 			return;
761 		}
762 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
763 
764 		parisc_terminate("Unexpected interruption", regs, code, 0);
765 		/* NOT REACHED */
766 	}
767 
768 	if (user_mode(regs)) {
769 	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
770 		parisc_printk_ratelimited(0, regs, KERN_DEBUG
771 				"User fault %d on space 0x%08lx, pid=%d command='%s'\n",
772 				code, fault_space,
773 				task_pid_nr(current), current->comm);
774 		force_sig_fault(SIGSEGV, SEGV_MAPERR,
775 				(void __user *)regs->ior);
776 		return;
777 	    }
778 	}
779 	else {
780 
781 	    /*
782 	     * The kernel should never fault on its own address space,
783 	     * unless pagefault_disable() was called before.
784 	     */
785 
786 	    if (faulthandler_disabled() || fault_space == 0)
787 	    {
788 		/* Clean up and return if in exception table. */
789 		if (fixup_exception(regs))
790 			return;
791 		/* Clean up and return if handled by kfence. */
792 		if (kfence_handle_page_fault(fault_address,
793 			parisc_acctyp(code, regs->iir) == VM_WRITE, regs))
794 			return;
795 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
796 		parisc_terminate("Kernel Fault", regs, code, fault_address);
797 	    }
798 	}
799 
800 	do_page_fault(regs, code, fault_address);
801 }
802 
803 
initialize_ivt(const void * iva)804 static void __init initialize_ivt(const void *iva)
805 {
806 	extern const u32 os_hpmc[];
807 
808 	int i;
809 	u32 check = 0;
810 	u32 *ivap;
811 	u32 instr;
812 
813 	if (strcmp((const char *)iva, "cows can fly"))
814 		panic("IVT invalid");
815 
816 	ivap = (u32 *)iva;
817 
818 	for (i = 0; i < 8; i++)
819 	    *ivap++ = 0;
820 
821 	/*
822 	 * Use PDC_INSTR firmware function to get instruction that invokes
823 	 * PDCE_CHECK in HPMC handler.  See programming note at page 1-31 of
824 	 * the PA 1.1 Firmware Architecture document.
825 	 */
826 	if (pdc_instr(&instr) == PDC_OK)
827 		ivap[0] = instr;
828 
829 	/*
830 	 * Rules for the checksum of the HPMC handler:
831 	 * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed
832 	 *    its own IVA).
833 	 * 2. The word at IVA + 32 is nonzero.
834 	 * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and
835 	 *    Address (IVA + 56) are word-aligned.
836 	 * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of
837 	 *    the Length/4 words starting at Address is zero.
838 	 */
839 
840 	/* Setup IVA and compute checksum for HPMC handler */
841 	ivap[6] = (u32)__pa(os_hpmc);
842 
843 	for (i=0; i<8; i++)
844 	    check += ivap[i];
845 
846 	ivap[5] = -check;
847 	pr_debug("initialize_ivt: IVA[6] = 0x%08x\n", ivap[6]);
848 }
849 
850 
851 /* early_trap_init() is called before we set up kernel mappings and
852  * write-protect the kernel */
early_trap_init(void)853 void  __init early_trap_init(void)
854 {
855 	extern const void fault_vector_20;
856 
857 #ifndef CONFIG_64BIT
858 	extern const void fault_vector_11;
859 	initialize_ivt(&fault_vector_11);
860 #endif
861 
862 	initialize_ivt(&fault_vector_20);
863 }
864