xref: /linux/arch/loongarch/kernel/traps.c (revision 52990390f91c1c39ca742fc8f390b29891d95127)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author: Huacai Chen <chenhuacai@loongson.cn>
4  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
5  */
6 #include <linux/bitfield.h>
7 #include <linux/bitops.h>
8 #include <linux/bug.h>
9 #include <linux/compiler.h>
10 #include <linux/context_tracking.h>
11 #include <linux/entry-common.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/kexec.h>
15 #include <linux/module.h>
16 #include <linux/extable.h>
17 #include <linux/mm.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/debug.h>
20 #include <linux/smp.h>
21 #include <linux/spinlock.h>
22 #include <linux/kallsyms.h>
23 #include <linux/memblock.h>
24 #include <linux/interrupt.h>
25 #include <linux/ptrace.h>
26 #include <linux/kgdb.h>
27 #include <linux/kdebug.h>
28 #include <linux/kprobes.h>
29 #include <linux/notifier.h>
30 #include <linux/irq.h>
31 #include <linux/perf_event.h>
32 
33 #include <asm/addrspace.h>
34 #include <asm/bootinfo.h>
35 #include <asm/branch.h>
36 #include <asm/break.h>
37 #include <asm/cpu.h>
38 #include <asm/fpu.h>
39 #include <asm/inst.h>
40 #include <asm/loongarch.h>
41 #include <asm/mmu_context.h>
42 #include <asm/pgtable.h>
43 #include <asm/ptrace.h>
44 #include <asm/sections.h>
45 #include <asm/siginfo.h>
46 #include <asm/stacktrace.h>
47 #include <asm/tlb.h>
48 #include <asm/types.h>
49 #include <asm/unwind.h>
50 
51 #include "access-helper.h"
52 
53 extern asmlinkage void handle_ade(void);
54 extern asmlinkage void handle_ale(void);
55 extern asmlinkage void handle_bce(void);
56 extern asmlinkage void handle_sys(void);
57 extern asmlinkage void handle_bp(void);
58 extern asmlinkage void handle_ri(void);
59 extern asmlinkage void handle_fpu(void);
60 extern asmlinkage void handle_fpe(void);
61 extern asmlinkage void handle_lbt(void);
62 extern asmlinkage void handle_lsx(void);
63 extern asmlinkage void handle_lasx(void);
64 extern asmlinkage void handle_reserved(void);
65 extern asmlinkage void handle_watch(void);
66 extern asmlinkage void handle_vint(void);
67 
68 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
69 			   const char *loglvl, bool user)
70 {
71 	unsigned long addr;
72 	struct unwind_state state;
73 	struct pt_regs *pregs = (struct pt_regs *)regs;
74 
75 	if (!task)
76 		task = current;
77 
78 	printk("%sCall Trace:", loglvl);
79 	for (unwind_start(&state, task, pregs);
80 	      !unwind_done(&state); unwind_next_frame(&state)) {
81 		addr = unwind_get_return_address(&state);
82 		print_ip_sym(loglvl, addr);
83 	}
84 	printk("%s\n", loglvl);
85 }
86 
87 static void show_stacktrace(struct task_struct *task,
88 	const struct pt_regs *regs, const char *loglvl, bool user)
89 {
90 	int i;
91 	const int field = 2 * sizeof(unsigned long);
92 	unsigned long stackdata;
93 	unsigned long *sp = (unsigned long *)regs->regs[3];
94 
95 	printk("%sStack :", loglvl);
96 	i = 0;
97 	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
98 		if (i && ((i % (64 / field)) == 0)) {
99 			pr_cont("\n");
100 			printk("%s       ", loglvl);
101 		}
102 		if (i > 39) {
103 			pr_cont(" ...");
104 			break;
105 		}
106 
107 		if (__get_addr(&stackdata, sp++, user)) {
108 			pr_cont(" (Bad stack address)");
109 			break;
110 		}
111 
112 		pr_cont(" %0*lx", field, stackdata);
113 		i++;
114 	}
115 	pr_cont("\n");
116 	show_backtrace(task, regs, loglvl, user);
117 }
118 
119 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
120 {
121 	struct pt_regs regs;
122 
123 	regs.csr_crmd = 0;
124 	if (sp) {
125 		regs.csr_era = 0;
126 		regs.regs[1] = 0;
127 		regs.regs[3] = (unsigned long)sp;
128 	} else {
129 		if (!task || task == current)
130 			prepare_frametrace(&regs);
131 		else {
132 			regs.csr_era = task->thread.reg01;
133 			regs.regs[1] = 0;
134 			regs.regs[3] = task->thread.reg03;
135 			regs.regs[22] = task->thread.reg22;
136 		}
137 	}
138 
139 	show_stacktrace(task, &regs, loglvl, false);
140 }
141 
142 static void show_code(unsigned int *pc, bool user)
143 {
144 	long i;
145 	unsigned int insn;
146 
147 	printk("Code:");
148 
149 	for(i = -3 ; i < 6 ; i++) {
150 		if (__get_inst(&insn, pc + i, user)) {
151 			pr_cont(" (Bad address in era)\n");
152 			break;
153 		}
154 		pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
155 	}
156 	pr_cont("\n");
157 }
158 
159 static void print_bool_fragment(const char *key, unsigned long val, bool first)
160 {
161 	/* e.g. "+PG", "-DA" */
162 	pr_cont("%s%c%s", first ? "" : " ", val ? '+' : '-', key);
163 }
164 
165 static void print_plv_fragment(const char *key, int val)
166 {
167 	/* e.g. "PLV0", "PPLV3" */
168 	pr_cont("%s%d", key, val);
169 }
170 
171 static void print_memory_type_fragment(const char *key, unsigned long val)
172 {
173 	const char *humanized_type;
174 
175 	switch (val) {
176 	case 0:
177 		humanized_type = "SUC";
178 		break;
179 	case 1:
180 		humanized_type = "CC";
181 		break;
182 	case 2:
183 		humanized_type = "WUC";
184 		break;
185 	default:
186 		pr_cont(" %s=Reserved(%lu)", key, val);
187 		return;
188 	}
189 
190 	/* e.g. " DATM=WUC" */
191 	pr_cont(" %s=%s", key, humanized_type);
192 }
193 
194 static void print_intr_fragment(const char *key, unsigned long val)
195 {
196 	/* e.g. "LIE=0-1,3,5-7" */
197 	pr_cont("%s=%*pbl", key, EXCCODE_INT_NUM, &val);
198 }
199 
200 static void print_crmd(unsigned long x)
201 {
202 	printk(" CRMD: %08lx (", x);
203 	print_plv_fragment("PLV", (int) FIELD_GET(CSR_CRMD_PLV, x));
204 	print_bool_fragment("IE", FIELD_GET(CSR_CRMD_IE, x), false);
205 	print_bool_fragment("DA", FIELD_GET(CSR_CRMD_DA, x), false);
206 	print_bool_fragment("PG", FIELD_GET(CSR_CRMD_PG, x), false);
207 	print_memory_type_fragment("DACF", FIELD_GET(CSR_CRMD_DACF, x));
208 	print_memory_type_fragment("DACM", FIELD_GET(CSR_CRMD_DACM, x));
209 	print_bool_fragment("WE", FIELD_GET(CSR_CRMD_WE, x), false);
210 	pr_cont(")\n");
211 }
212 
213 static void print_prmd(unsigned long x)
214 {
215 	printk(" PRMD: %08lx (", x);
216 	print_plv_fragment("PPLV", (int) FIELD_GET(CSR_PRMD_PPLV, x));
217 	print_bool_fragment("PIE", FIELD_GET(CSR_PRMD_PIE, x), false);
218 	print_bool_fragment("PWE", FIELD_GET(CSR_PRMD_PWE, x), false);
219 	pr_cont(")\n");
220 }
221 
222 static void print_euen(unsigned long x)
223 {
224 	printk(" EUEN: %08lx (", x);
225 	print_bool_fragment("FPE", FIELD_GET(CSR_EUEN_FPEN, x), true);
226 	print_bool_fragment("SXE", FIELD_GET(CSR_EUEN_LSXEN, x), false);
227 	print_bool_fragment("ASXE", FIELD_GET(CSR_EUEN_LASXEN, x), false);
228 	print_bool_fragment("BTE", FIELD_GET(CSR_EUEN_LBTEN, x), false);
229 	pr_cont(")\n");
230 }
231 
232 static void print_ecfg(unsigned long x)
233 {
234 	printk(" ECFG: %08lx (", x);
235 	print_intr_fragment("LIE", FIELD_GET(CSR_ECFG_IM, x));
236 	pr_cont(" VS=%d)\n", (int) FIELD_GET(CSR_ECFG_VS, x));
237 }
238 
239 static const char *humanize_exc_name(unsigned int ecode, unsigned int esubcode)
240 {
241 	/*
242 	 * LoongArch users and developers are probably more familiar with
243 	 * those names found in the ISA manual, so we are going to print out
244 	 * the latter. This will require some mapping.
245 	 */
246 	switch (ecode) {
247 	case EXCCODE_RSV: return "INT";
248 	case EXCCODE_TLBL: return "PIL";
249 	case EXCCODE_TLBS: return "PIS";
250 	case EXCCODE_TLBI: return "PIF";
251 	case EXCCODE_TLBM: return "PME";
252 	case EXCCODE_TLBNR: return "PNR";
253 	case EXCCODE_TLBNX: return "PNX";
254 	case EXCCODE_TLBPE: return "PPI";
255 	case EXCCODE_ADE:
256 		switch (esubcode) {
257 		case EXSUBCODE_ADEF: return "ADEF";
258 		case EXSUBCODE_ADEM: return "ADEM";
259 		}
260 		break;
261 	case EXCCODE_ALE: return "ALE";
262 	case EXCCODE_BCE: return "BCE";
263 	case EXCCODE_SYS: return "SYS";
264 	case EXCCODE_BP: return "BRK";
265 	case EXCCODE_INE: return "INE";
266 	case EXCCODE_IPE: return "IPE";
267 	case EXCCODE_FPDIS: return "FPD";
268 	case EXCCODE_LSXDIS: return "SXD";
269 	case EXCCODE_LASXDIS: return "ASXD";
270 	case EXCCODE_FPE:
271 		switch (esubcode) {
272 		case EXCSUBCODE_FPE: return "FPE";
273 		case EXCSUBCODE_VFPE: return "VFPE";
274 		}
275 		break;
276 	case EXCCODE_WATCH:
277 		switch (esubcode) {
278 		case EXCSUBCODE_WPEF: return "WPEF";
279 		case EXCSUBCODE_WPEM: return "WPEM";
280 		}
281 		break;
282 	case EXCCODE_BTDIS: return "BTD";
283 	case EXCCODE_BTE: return "BTE";
284 	case EXCCODE_GSPR: return "GSPR";
285 	case EXCCODE_HVC: return "HVC";
286 	case EXCCODE_GCM:
287 		switch (esubcode) {
288 		case EXCSUBCODE_GCSC: return "GCSC";
289 		case EXCSUBCODE_GCHC: return "GCHC";
290 		}
291 		break;
292 	/*
293 	 * The manual did not mention the EXCCODE_SE case, but print out it
294 	 * nevertheless.
295 	 */
296 	case EXCCODE_SE: return "SE";
297 	}
298 
299 	return "???";
300 }
301 
302 static void print_estat(unsigned long x)
303 {
304 	unsigned int ecode = FIELD_GET(CSR_ESTAT_EXC, x);
305 	unsigned int esubcode = FIELD_GET(CSR_ESTAT_ESUBCODE, x);
306 
307 	printk("ESTAT: %08lx [%s] (", x, humanize_exc_name(ecode, esubcode));
308 	print_intr_fragment("IS", FIELD_GET(CSR_ESTAT_IS, x));
309 	pr_cont(" ECode=%d EsubCode=%d)\n", (int) ecode, (int) esubcode);
310 }
311 
312 static void __show_regs(const struct pt_regs *regs)
313 {
314 	const int field = 2 * sizeof(unsigned long);
315 	unsigned int exccode = FIELD_GET(CSR_ESTAT_EXC, regs->csr_estat);
316 
317 	show_regs_print_info(KERN_DEFAULT);
318 
319 	/* Print saved GPRs except $zero (substituting with PC/ERA) */
320 #define GPR_FIELD(x) field, regs->regs[x]
321 	printk("pc %0*lx ra %0*lx tp %0*lx sp %0*lx\n",
322 	       field, regs->csr_era, GPR_FIELD(1), GPR_FIELD(2), GPR_FIELD(3));
323 	printk("a0 %0*lx a1 %0*lx a2 %0*lx a3 %0*lx\n",
324 	       GPR_FIELD(4), GPR_FIELD(5), GPR_FIELD(6), GPR_FIELD(7));
325 	printk("a4 %0*lx a5 %0*lx a6 %0*lx a7 %0*lx\n",
326 	       GPR_FIELD(8), GPR_FIELD(9), GPR_FIELD(10), GPR_FIELD(11));
327 	printk("t0 %0*lx t1 %0*lx t2 %0*lx t3 %0*lx\n",
328 	       GPR_FIELD(12), GPR_FIELD(13), GPR_FIELD(14), GPR_FIELD(15));
329 	printk("t4 %0*lx t5 %0*lx t6 %0*lx t7 %0*lx\n",
330 	       GPR_FIELD(16), GPR_FIELD(17), GPR_FIELD(18), GPR_FIELD(19));
331 	printk("t8 %0*lx u0 %0*lx s9 %0*lx s0 %0*lx\n",
332 	       GPR_FIELD(20), GPR_FIELD(21), GPR_FIELD(22), GPR_FIELD(23));
333 	printk("s1 %0*lx s2 %0*lx s3 %0*lx s4 %0*lx\n",
334 	       GPR_FIELD(24), GPR_FIELD(25), GPR_FIELD(26), GPR_FIELD(27));
335 	printk("s5 %0*lx s6 %0*lx s7 %0*lx s8 %0*lx\n",
336 	       GPR_FIELD(28), GPR_FIELD(29), GPR_FIELD(30), GPR_FIELD(31));
337 
338 	/* The slot for $zero is reused as the syscall restart flag */
339 	if (regs->regs[0])
340 		printk("syscall restart flag: %0*lx\n", GPR_FIELD(0));
341 
342 	if (user_mode(regs)) {
343 		printk("   ra: %0*lx\n", GPR_FIELD(1));
344 		printk("  ERA: %0*lx\n", field, regs->csr_era);
345 	} else {
346 		printk("   ra: %0*lx %pS\n", GPR_FIELD(1), (void *) regs->regs[1]);
347 		printk("  ERA: %0*lx %pS\n", field, regs->csr_era, (void *) regs->csr_era);
348 	}
349 #undef GPR_FIELD
350 
351 	/* Print saved important CSRs */
352 	print_crmd(regs->csr_crmd);
353 	print_prmd(regs->csr_prmd);
354 	print_euen(regs->csr_euen);
355 	print_ecfg(regs->csr_ecfg);
356 	print_estat(regs->csr_estat);
357 
358 	if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
359 		printk(" BADV: %0*lx\n", field, regs->csr_badvaddr);
360 
361 	printk(" PRID: %08x (%s, %s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
362 	       cpu_family_string(), cpu_full_name_string());
363 }
364 
365 void show_regs(struct pt_regs *regs)
366 {
367 	__show_regs((struct pt_regs *)regs);
368 	dump_stack();
369 }
370 
371 void show_registers(struct pt_regs *regs)
372 {
373 	__show_regs(regs);
374 	print_modules();
375 	printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
376 	       current->comm, current->pid, current_thread_info(), current);
377 
378 	show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
379 	show_code((void *)regs->csr_era, user_mode(regs));
380 	printk("\n");
381 }
382 
383 static DEFINE_RAW_SPINLOCK(die_lock);
384 
385 void __noreturn die(const char *str, struct pt_regs *regs)
386 {
387 	static int die_counter;
388 	int sig = SIGSEGV;
389 
390 	oops_enter();
391 
392 	if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
393 		       SIGSEGV) == NOTIFY_STOP)
394 		sig = 0;
395 
396 	console_verbose();
397 	raw_spin_lock_irq(&die_lock);
398 	bust_spinlocks(1);
399 
400 	printk("%s[#%d]:\n", str, ++die_counter);
401 	show_registers(regs);
402 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
403 	raw_spin_unlock_irq(&die_lock);
404 
405 	oops_exit();
406 
407 	if (regs && kexec_should_crash(current))
408 		crash_kexec(regs);
409 
410 	if (in_interrupt())
411 		panic("Fatal exception in interrupt");
412 
413 	if (panic_on_oops)
414 		panic("Fatal exception");
415 
416 	make_task_dead(sig);
417 }
418 
419 static inline void setup_vint_size(unsigned int size)
420 {
421 	unsigned int vs;
422 
423 	vs = ilog2(size/4);
424 
425 	if (vs == 0 || vs > 7)
426 		panic("vint_size %d Not support yet", vs);
427 
428 	csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
429 }
430 
431 /*
432  * Send SIGFPE according to FCSR Cause bits, which must have already
433  * been masked against Enable bits.  This is impotant as Inexact can
434  * happen together with Overflow or Underflow, and `ptrace' can set
435  * any bits.
436  */
437 void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr,
438 		     struct task_struct *tsk)
439 {
440 	int si_code = FPE_FLTUNK;
441 
442 	if (fcsr & FPU_CSR_INV_X)
443 		si_code = FPE_FLTINV;
444 	else if (fcsr & FPU_CSR_DIV_X)
445 		si_code = FPE_FLTDIV;
446 	else if (fcsr & FPU_CSR_OVF_X)
447 		si_code = FPE_FLTOVF;
448 	else if (fcsr & FPU_CSR_UDF_X)
449 		si_code = FPE_FLTUND;
450 	else if (fcsr & FPU_CSR_INE_X)
451 		si_code = FPE_FLTRES;
452 
453 	force_sig_fault(SIGFPE, si_code, fault_addr);
454 }
455 
456 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
457 {
458 	int si_code;
459 
460 	switch (sig) {
461 	case 0:
462 		return 0;
463 
464 	case SIGFPE:
465 		force_fcsr_sig(fcsr, fault_addr, current);
466 		return 1;
467 
468 	case SIGBUS:
469 		force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
470 		return 1;
471 
472 	case SIGSEGV:
473 		mmap_read_lock(current->mm);
474 		if (vma_lookup(current->mm, (unsigned long)fault_addr))
475 			si_code = SEGV_ACCERR;
476 		else
477 			si_code = SEGV_MAPERR;
478 		mmap_read_unlock(current->mm);
479 		force_sig_fault(SIGSEGV, si_code, fault_addr);
480 		return 1;
481 
482 	default:
483 		force_sig(sig);
484 		return 1;
485 	}
486 }
487 
488 /*
489  * Delayed fp exceptions when doing a lazy ctx switch
490  */
491 asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
492 {
493 	int sig;
494 	void __user *fault_addr;
495 	irqentry_state_t state = irqentry_enter(regs);
496 
497 	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
498 		       SIGFPE) == NOTIFY_STOP)
499 		goto out;
500 
501 	/* Clear FCSR.Cause before enabling interrupts */
502 	write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
503 	local_irq_enable();
504 
505 	die_if_kernel("FP exception in kernel code", regs);
506 
507 	sig = SIGFPE;
508 	fault_addr = (void __user *) regs->csr_era;
509 
510 	/* Send a signal if required.  */
511 	process_fpemu_return(sig, fault_addr, fcsr);
512 
513 out:
514 	local_irq_disable();
515 	irqentry_exit(regs, state);
516 }
517 
518 asmlinkage void noinstr do_ade(struct pt_regs *regs)
519 {
520 	irqentry_state_t state = irqentry_enter(regs);
521 
522 	die_if_kernel("Kernel ade access", regs);
523 	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
524 
525 	irqentry_exit(regs, state);
526 }
527 
528 /* sysctl hooks */
529 int unaligned_enabled __read_mostly = 1;	/* Enabled by default */
530 int no_unaligned_warning __read_mostly = 1;	/* Only 1 warning by default */
531 
532 asmlinkage void noinstr do_ale(struct pt_regs *regs)
533 {
534 	irqentry_state_t state = irqentry_enter(regs);
535 
536 #ifndef CONFIG_ARCH_STRICT_ALIGN
537 	die_if_kernel("Kernel ale access", regs);
538 	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
539 #else
540 	unsigned int *pc;
541 
542 	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
543 
544 	/*
545 	 * Did we catch a fault trying to load an instruction?
546 	 */
547 	if (regs->csr_badvaddr == regs->csr_era)
548 		goto sigbus;
549 	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
550 		goto sigbus;
551 	if (!unaligned_enabled)
552 		goto sigbus;
553 	if (!no_unaligned_warning)
554 		show_registers(regs);
555 
556 	pc = (unsigned int *)exception_era(regs);
557 
558 	emulate_load_store_insn(regs, (void __user *)regs->csr_badvaddr, pc);
559 
560 	goto out;
561 
562 sigbus:
563 	die_if_kernel("Kernel ale access", regs);
564 	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
565 out:
566 #endif
567 	irqentry_exit(regs, state);
568 }
569 
570 #ifdef CONFIG_GENERIC_BUG
571 int is_valid_bugaddr(unsigned long addr)
572 {
573 	return 1;
574 }
575 #endif /* CONFIG_GENERIC_BUG */
576 
577 static void bug_handler(struct pt_regs *regs)
578 {
579 	switch (report_bug(regs->csr_era, regs)) {
580 	case BUG_TRAP_TYPE_BUG:
581 	case BUG_TRAP_TYPE_NONE:
582 		die_if_kernel("Oops - BUG", regs);
583 		force_sig(SIGTRAP);
584 		break;
585 
586 	case BUG_TRAP_TYPE_WARN:
587 		/* Skip the BUG instruction and continue */
588 		regs->csr_era += LOONGARCH_INSN_SIZE;
589 		break;
590 	}
591 }
592 
593 asmlinkage void noinstr do_bce(struct pt_regs *regs)
594 {
595 	bool user = user_mode(regs);
596 	unsigned long era = exception_era(regs);
597 	u64 badv = 0, lower = 0, upper = ULONG_MAX;
598 	union loongarch_instruction insn;
599 	irqentry_state_t state = irqentry_enter(regs);
600 
601 	if (regs->csr_prmd & CSR_PRMD_PIE)
602 		local_irq_enable();
603 
604 	current->thread.trap_nr = read_csr_excode();
605 
606 	die_if_kernel("Bounds check error in kernel code", regs);
607 
608 	/*
609 	 * Pull out the address that failed bounds checking, and the lower /
610 	 * upper bound, by minimally looking at the faulting instruction word
611 	 * and reading from the correct register.
612 	 */
613 	if (__get_inst(&insn.word, (u32 *)era, user))
614 		goto bad_era;
615 
616 	switch (insn.reg3_format.opcode) {
617 	case asrtle_op:
618 		if (insn.reg3_format.rd != 0)
619 			break;	/* not asrtle */
620 		badv = regs->regs[insn.reg3_format.rj];
621 		upper = regs->regs[insn.reg3_format.rk];
622 		break;
623 
624 	case asrtgt_op:
625 		if (insn.reg3_format.rd != 0)
626 			break;	/* not asrtgt */
627 		badv = regs->regs[insn.reg3_format.rj];
628 		lower = regs->regs[insn.reg3_format.rk];
629 		break;
630 
631 	case ldleb_op:
632 	case ldleh_op:
633 	case ldlew_op:
634 	case ldled_op:
635 	case stleb_op:
636 	case stleh_op:
637 	case stlew_op:
638 	case stled_op:
639 	case fldles_op:
640 	case fldled_op:
641 	case fstles_op:
642 	case fstled_op:
643 		badv = regs->regs[insn.reg3_format.rj];
644 		upper = regs->regs[insn.reg3_format.rk];
645 		break;
646 
647 	case ldgtb_op:
648 	case ldgth_op:
649 	case ldgtw_op:
650 	case ldgtd_op:
651 	case stgtb_op:
652 	case stgth_op:
653 	case stgtw_op:
654 	case stgtd_op:
655 	case fldgts_op:
656 	case fldgtd_op:
657 	case fstgts_op:
658 	case fstgtd_op:
659 		badv = regs->regs[insn.reg3_format.rj];
660 		lower = regs->regs[insn.reg3_format.rk];
661 		break;
662 	}
663 
664 	force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
665 
666 out:
667 	if (regs->csr_prmd & CSR_PRMD_PIE)
668 		local_irq_disable();
669 
670 	irqentry_exit(regs, state);
671 	return;
672 
673 bad_era:
674 	/*
675 	 * Cannot pull out the instruction word, hence cannot provide more
676 	 * info than a regular SIGSEGV in this case.
677 	 */
678 	force_sig(SIGSEGV);
679 	goto out;
680 }
681 
682 asmlinkage void noinstr do_bp(struct pt_regs *regs)
683 {
684 	bool user = user_mode(regs);
685 	unsigned int opcode, bcode;
686 	unsigned long era = exception_era(regs);
687 	irqentry_state_t state = irqentry_enter(regs);
688 
689 	if (regs->csr_prmd & CSR_PRMD_PIE)
690 		local_irq_enable();
691 
692 	current->thread.trap_nr = read_csr_excode();
693 	if (__get_inst(&opcode, (u32 *)era, user))
694 		goto out_sigsegv;
695 
696 	bcode = (opcode & 0x7fff);
697 
698 	/*
699 	 * notify the kprobe handlers, if instruction is likely to
700 	 * pertain to them.
701 	 */
702 	switch (bcode) {
703 	case BRK_KPROBE_BP:
704 		if (kprobe_breakpoint_handler(regs))
705 			goto out;
706 		else
707 			break;
708 	case BRK_KPROBE_SSTEPBP:
709 		if (kprobe_singlestep_handler(regs))
710 			goto out;
711 		else
712 			break;
713 	case BRK_UPROBE_BP:
714 		if (notify_die(DIE_UPROBE, "Uprobe", regs, bcode,
715 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
716 			goto out;
717 		else
718 			break;
719 	case BRK_UPROBE_XOLBP:
720 		if (notify_die(DIE_UPROBE_XOL, "Uprobe_XOL", regs, bcode,
721 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
722 			goto out;
723 		else
724 			break;
725 	default:
726 		if (notify_die(DIE_TRAP, "Break", regs, bcode,
727 			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
728 			goto out;
729 		else
730 			break;
731 	}
732 
733 	switch (bcode) {
734 	case BRK_BUG:
735 		bug_handler(regs);
736 		break;
737 	case BRK_DIVZERO:
738 		die_if_kernel("Break instruction in kernel code", regs);
739 		force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
740 		break;
741 	case BRK_OVERFLOW:
742 		die_if_kernel("Break instruction in kernel code", regs);
743 		force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
744 		break;
745 	default:
746 		die_if_kernel("Break instruction in kernel code", regs);
747 		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
748 		break;
749 	}
750 
751 out:
752 	if (regs->csr_prmd & CSR_PRMD_PIE)
753 		local_irq_disable();
754 
755 	irqentry_exit(regs, state);
756 	return;
757 
758 out_sigsegv:
759 	force_sig(SIGSEGV);
760 	goto out;
761 }
762 
763 asmlinkage void noinstr do_watch(struct pt_regs *regs)
764 {
765 	irqentry_state_t state = irqentry_enter(regs);
766 
767 #ifndef CONFIG_HAVE_HW_BREAKPOINT
768 	pr_warn("Hardware watch point handler not implemented!\n");
769 #else
770 	if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) {
771 		int llbit = (csr_read32(LOONGARCH_CSR_LLBCTL) & 0x1);
772 		unsigned long pc = instruction_pointer(regs);
773 		union loongarch_instruction *ip = (union loongarch_instruction *)pc;
774 
775 		if (llbit) {
776 			/*
777 			 * When the ll-sc combo is encountered, it is regarded as an single
778 			 * instruction. So don't clear llbit and reset CSR.FWPS.Skip until
779 			 * the llsc execution is completed.
780 			 */
781 			csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
782 			csr_write32(CSR_LLBCTL_KLO, LOONGARCH_CSR_LLBCTL);
783 			goto out;
784 		}
785 
786 		if (pc == current->thread.single_step) {
787 			/*
788 			 * Certain insns are occasionally not skipped when CSR.FWPS.Skip is
789 			 * set, such as fld.d/fst.d. So singlestep needs to compare whether
790 			 * the csr_era is equal to the value of singlestep which last time set.
791 			 */
792 			if (!is_self_loop_ins(ip, regs)) {
793 				/*
794 				 * Check if the given instruction the target pc is equal to the
795 				 * current pc, If yes, then we should not set the CSR.FWPS.SKIP
796 				 * bit to break the original instruction stream.
797 				 */
798 				csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
799 				goto out;
800 			}
801 		}
802 	} else {
803 		breakpoint_handler(regs);
804 		watchpoint_handler(regs);
805 	}
806 
807 	force_sig(SIGTRAP);
808 out:
809 #endif
810 	irqentry_exit(regs, state);
811 }
812 
813 asmlinkage void noinstr do_ri(struct pt_regs *regs)
814 {
815 	int status = SIGILL;
816 	unsigned int opcode = 0;
817 	unsigned int __user *era = (unsigned int __user *)exception_era(regs);
818 	irqentry_state_t state = irqentry_enter(regs);
819 
820 	local_irq_enable();
821 	current->thread.trap_nr = read_csr_excode();
822 
823 	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
824 		       SIGILL) == NOTIFY_STOP)
825 		goto out;
826 
827 	die_if_kernel("Reserved instruction in kernel code", regs);
828 
829 	if (unlikely(get_user(opcode, era) < 0)) {
830 		status = SIGSEGV;
831 		current->thread.error_code = 1;
832 	}
833 
834 	force_sig(status);
835 
836 out:
837 	local_irq_disable();
838 	irqentry_exit(regs, state);
839 }
840 
841 static void init_restore_fp(void)
842 {
843 	if (!used_math()) {
844 		/* First time FP context user. */
845 		init_fpu();
846 	} else {
847 		/* This task has formerly used the FP context */
848 		if (!is_fpu_owner())
849 			own_fpu_inatomic(1);
850 	}
851 
852 	BUG_ON(!is_fp_enabled());
853 }
854 
855 asmlinkage void noinstr do_fpu(struct pt_regs *regs)
856 {
857 	irqentry_state_t state = irqentry_enter(regs);
858 
859 	local_irq_enable();
860 	die_if_kernel("do_fpu invoked from kernel context!", regs);
861 
862 	preempt_disable();
863 	init_restore_fp();
864 	preempt_enable();
865 
866 	local_irq_disable();
867 	irqentry_exit(regs, state);
868 }
869 
870 asmlinkage void noinstr do_lsx(struct pt_regs *regs)
871 {
872 	irqentry_state_t state = irqentry_enter(regs);
873 
874 	local_irq_enable();
875 	force_sig(SIGILL);
876 	local_irq_disable();
877 
878 	irqentry_exit(regs, state);
879 }
880 
881 asmlinkage void noinstr do_lasx(struct pt_regs *regs)
882 {
883 	irqentry_state_t state = irqentry_enter(regs);
884 
885 	local_irq_enable();
886 	force_sig(SIGILL);
887 	local_irq_disable();
888 
889 	irqentry_exit(regs, state);
890 }
891 
892 asmlinkage void noinstr do_lbt(struct pt_regs *regs)
893 {
894 	irqentry_state_t state = irqentry_enter(regs);
895 
896 	local_irq_enable();
897 	force_sig(SIGILL);
898 	local_irq_disable();
899 
900 	irqentry_exit(regs, state);
901 }
902 
903 asmlinkage void noinstr do_reserved(struct pt_regs *regs)
904 {
905 	irqentry_state_t state = irqentry_enter(regs);
906 
907 	local_irq_enable();
908 	/*
909 	 * Game over - no way to handle this if it ever occurs.	Most probably
910 	 * caused by a fatal error after another hardware/software error.
911 	 */
912 	pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
913 		read_csr_excode(), current->pid, current->comm);
914 	die_if_kernel("do_reserved exception", regs);
915 	force_sig(SIGUNUSED);
916 
917 	local_irq_disable();
918 
919 	irqentry_exit(regs, state);
920 }
921 
922 asmlinkage void cache_parity_error(void)
923 {
924 	/* For the moment, report the problem and hang. */
925 	pr_err("Cache error exception:\n");
926 	pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
927 	pr_err("csr_merrera == %016llx\n", csr_read64(LOONGARCH_CSR_MERRERA));
928 	panic("Can't handle the cache error!");
929 }
930 
931 asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
932 {
933 	struct pt_regs *old_regs;
934 
935 	irq_enter_rcu();
936 	old_regs = set_irq_regs(regs);
937 	handle_arch_irq(regs);
938 	set_irq_regs(old_regs);
939 	irq_exit_rcu();
940 }
941 
942 asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
943 {
944 	register int cpu;
945 	register unsigned long stack;
946 	irqentry_state_t state = irqentry_enter(regs);
947 
948 	cpu = smp_processor_id();
949 
950 	if (on_irq_stack(cpu, sp))
951 		handle_loongarch_irq(regs);
952 	else {
953 		stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
954 
955 		/* Save task's sp on IRQ stack for unwinding */
956 		*(unsigned long *)stack = sp;
957 
958 		__asm__ __volatile__(
959 		"move	$s0, $sp		\n" /* Preserve sp */
960 		"move	$sp, %[stk]		\n" /* Switch stack */
961 		"move	$a0, %[regs]		\n"
962 		"bl	handle_loongarch_irq	\n"
963 		"move	$sp, $s0		\n" /* Restore sp */
964 		: /* No outputs */
965 		: [stk] "r" (stack), [regs] "r" (regs)
966 		: "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
967 		  "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
968 		  "memory");
969 	}
970 
971 	irqentry_exit(regs, state);
972 }
973 
974 unsigned long eentry;
975 unsigned long tlbrentry;
976 
977 long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
978 
979 static void configure_exception_vector(void)
980 {
981 	eentry    = (unsigned long)exception_handlers;
982 	tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
983 
984 	csr_write64(eentry, LOONGARCH_CSR_EENTRY);
985 	csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
986 	csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
987 }
988 
989 void per_cpu_trap_init(int cpu)
990 {
991 	unsigned int i;
992 
993 	setup_vint_size(VECSIZE);
994 
995 	configure_exception_vector();
996 
997 	if (!cpu_data[cpu].asid_cache)
998 		cpu_data[cpu].asid_cache = asid_first_version(cpu);
999 
1000 	mmgrab(&init_mm);
1001 	current->active_mm = &init_mm;
1002 	BUG_ON(current->mm);
1003 	enter_lazy_tlb(&init_mm, current);
1004 
1005 	/* Initialise exception handlers */
1006 	if (cpu == 0)
1007 		for (i = 0; i < 64; i++)
1008 			set_handler(i * VECSIZE, handle_reserved, VECSIZE);
1009 
1010 	tlb_init(cpu);
1011 	cpu_cache_init();
1012 }
1013 
1014 /* Install CPU exception handler */
1015 void set_handler(unsigned long offset, void *addr, unsigned long size)
1016 {
1017 	memcpy((void *)(eentry + offset), addr, size);
1018 	local_flush_icache_range(eentry + offset, eentry + offset + size);
1019 }
1020 
1021 static const char panic_null_cerr[] =
1022 	"Trying to set NULL cache error exception handler\n";
1023 
1024 /*
1025  * Install uncached CPU exception handler.
1026  * This is suitable only for the cache error exception which is the only
1027  * exception handler that is being run uncached.
1028  */
1029 void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
1030 {
1031 	unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
1032 
1033 	if (!addr)
1034 		panic(panic_null_cerr);
1035 
1036 	memcpy((void *)(uncached_eentry + offset), addr, size);
1037 }
1038 
1039 void __init trap_init(void)
1040 {
1041 	long i;
1042 
1043 	/* Set interrupt vector handler */
1044 	for (i = EXCCODE_INT_START; i <= EXCCODE_INT_END; i++)
1045 		set_handler(i * VECSIZE, handle_vint, VECSIZE);
1046 
1047 	set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
1048 	set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
1049 	set_handler(EXCCODE_BCE * VECSIZE, handle_bce, VECSIZE);
1050 	set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
1051 	set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
1052 	set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
1053 	set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
1054 	set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
1055 	set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
1056 	set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
1057 	set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
1058 	set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
1059 	set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
1060 
1061 	cache_error_setup();
1062 
1063 	local_flush_icache_range(eentry, eentry + 0x400);
1064 }
1065