xref: /linux/arch/riscv/kernel/traps.c (revision ff19a8dee196d757dbc32a946843260f0b784ca3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  */
5 
6 #include <linux/cpu.h>
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/sched.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched/signal.h>
12 #include <linux/signal.h>
13 #include <linux/kdebug.h>
14 #include <linux/uaccess.h>
15 #include <linux/kprobes.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/irq.h>
19 #include <linux/kexec.h>
20 
21 #include <asm/asm-prototypes.h>
22 #include <asm/bug.h>
23 #include <asm/csr.h>
24 #include <asm/processor.h>
25 #include <asm/ptrace.h>
26 #include <asm/thread_info.h>
27 
28 int show_unhandled_signals = 1;
29 
30 static DEFINE_SPINLOCK(die_lock);
31 
32 static void dump_kernel_instr(const char *loglvl, struct pt_regs *regs)
33 {
34 	char str[sizeof("0000 ") * 12 + 2 + 1], *p = str;
35 	const u16 *insns = (u16 *)instruction_pointer(regs);
36 	long bad;
37 	u16 val;
38 	int i;
39 
40 	for (i = -10; i < 2; i++) {
41 		bad = get_kernel_nofault(val, &insns[i]);
42 		if (!bad) {
43 			p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val);
44 		} else {
45 			printk("%sCode: Unable to access instruction at 0x%px.\n",
46 			       loglvl, &insns[i]);
47 			return;
48 		}
49 	}
50 	printk("%sCode: %s\n", loglvl, str);
51 }
52 
53 void die(struct pt_regs *regs, const char *str)
54 {
55 	static int die_counter;
56 	int ret;
57 	long cause;
58 	unsigned long flags;
59 
60 	oops_enter();
61 
62 	spin_lock_irqsave(&die_lock, flags);
63 	console_verbose();
64 	bust_spinlocks(1);
65 
66 	pr_emerg("%s [#%d]\n", str, ++die_counter);
67 	print_modules();
68 	if (regs) {
69 		show_regs(regs);
70 		dump_kernel_instr(KERN_EMERG, regs);
71 	}
72 
73 	cause = regs ? regs->cause : -1;
74 	ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV);
75 
76 	if (kexec_should_crash(current))
77 		crash_kexec(regs);
78 
79 	bust_spinlocks(0);
80 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
81 	spin_unlock_irqrestore(&die_lock, flags);
82 	oops_exit();
83 
84 	if (in_interrupt())
85 		panic("Fatal exception in interrupt");
86 	if (panic_on_oops)
87 		panic("Fatal exception");
88 	if (ret != NOTIFY_STOP)
89 		make_task_dead(SIGSEGV);
90 }
91 
92 void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
93 {
94 	struct task_struct *tsk = current;
95 
96 	if (show_unhandled_signals && unhandled_signal(tsk, signo)
97 	    && printk_ratelimit()) {
98 		pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
99 			tsk->comm, task_pid_nr(tsk), signo, code, addr);
100 		print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
101 		pr_cont("\n");
102 		__show_regs(regs);
103 	}
104 
105 	force_sig_fault(signo, code, (void __user *)addr);
106 }
107 
108 static void do_trap_error(struct pt_regs *regs, int signo, int code,
109 	unsigned long addr, const char *str)
110 {
111 	current->thread.bad_cause = regs->cause;
112 
113 	if (user_mode(regs)) {
114 		do_trap(regs, signo, code, addr);
115 	} else {
116 		if (!fixup_exception(regs))
117 			die(regs, str);
118 	}
119 }
120 
121 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE)
122 #define __trap_section		__section(".xip.traps")
123 #else
124 #define __trap_section
125 #endif
126 #define DO_ERROR_INFO(name, signo, code, str)				\
127 asmlinkage __visible __trap_section void name(struct pt_regs *regs)	\
128 {									\
129 	do_trap_error(regs, signo, code, regs->epc, "Oops - " str);	\
130 }
131 
132 DO_ERROR_INFO(do_trap_unknown,
133 	SIGILL, ILL_ILLTRP, "unknown exception");
134 DO_ERROR_INFO(do_trap_insn_misaligned,
135 	SIGBUS, BUS_ADRALN, "instruction address misaligned");
136 DO_ERROR_INFO(do_trap_insn_fault,
137 	SIGSEGV, SEGV_ACCERR, "instruction access fault");
138 DO_ERROR_INFO(do_trap_insn_illegal,
139 	SIGILL, ILL_ILLOPC, "illegal instruction");
140 DO_ERROR_INFO(do_trap_load_fault,
141 	SIGSEGV, SEGV_ACCERR, "load access fault");
142 #ifndef CONFIG_RISCV_M_MODE
143 DO_ERROR_INFO(do_trap_load_misaligned,
144 	SIGBUS, BUS_ADRALN, "Oops - load address misaligned");
145 DO_ERROR_INFO(do_trap_store_misaligned,
146 	SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned");
147 #else
148 int handle_misaligned_load(struct pt_regs *regs);
149 int handle_misaligned_store(struct pt_regs *regs);
150 
151 asmlinkage void __trap_section do_trap_load_misaligned(struct pt_regs *regs)
152 {
153 	if (!handle_misaligned_load(regs))
154 		return;
155 	do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
156 		      "Oops - load address misaligned");
157 }
158 
159 asmlinkage void __trap_section do_trap_store_misaligned(struct pt_regs *regs)
160 {
161 	if (!handle_misaligned_store(regs))
162 		return;
163 	do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
164 		      "Oops - store (or AMO) address misaligned");
165 }
166 #endif
167 DO_ERROR_INFO(do_trap_store_fault,
168 	SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
169 DO_ERROR_INFO(do_trap_ecall_u,
170 	SIGILL, ILL_ILLTRP, "environment call from U-mode");
171 DO_ERROR_INFO(do_trap_ecall_s,
172 	SIGILL, ILL_ILLTRP, "environment call from S-mode");
173 DO_ERROR_INFO(do_trap_ecall_m,
174 	SIGILL, ILL_ILLTRP, "environment call from M-mode");
175 
176 static inline unsigned long get_break_insn_length(unsigned long pc)
177 {
178 	bug_insn_t insn;
179 
180 	if (get_kernel_nofault(insn, (bug_insn_t *)pc))
181 		return 0;
182 
183 	return GET_INSN_LENGTH(insn);
184 }
185 
186 asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
187 {
188 #ifdef CONFIG_KPROBES
189 	if (kprobe_single_step_handler(regs))
190 		return;
191 
192 	if (kprobe_breakpoint_handler(regs))
193 		return;
194 #endif
195 #ifdef CONFIG_UPROBES
196 	if (uprobe_single_step_handler(regs))
197 		return;
198 
199 	if (uprobe_breakpoint_handler(regs))
200 		return;
201 #endif
202 	current->thread.bad_cause = regs->cause;
203 
204 	if (user_mode(regs))
205 		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
206 #ifdef CONFIG_KGDB
207 	else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP)
208 								== NOTIFY_STOP)
209 		return;
210 #endif
211 	else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN)
212 		regs->epc += get_break_insn_length(regs->epc);
213 	else
214 		die(regs, "Kernel BUG");
215 }
216 NOKPROBE_SYMBOL(do_trap_break);
217 
218 #ifdef CONFIG_GENERIC_BUG
219 int is_valid_bugaddr(unsigned long pc)
220 {
221 	bug_insn_t insn;
222 
223 	if (pc < VMALLOC_START)
224 		return 0;
225 	if (get_kernel_nofault(insn, (bug_insn_t *)pc))
226 		return 0;
227 	if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
228 		return (insn == __BUG_INSN_32);
229 	else
230 		return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
231 }
232 #endif /* CONFIG_GENERIC_BUG */
233 
234 #ifdef CONFIG_VMAP_STACK
235 /*
236  * Extra stack space that allows us to provide panic messages when the kernel
237  * has overflowed its stack.
238  */
239 static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
240 		overflow_stack)__aligned(16);
241 /*
242  * A temporary stack for use by handle_kernel_stack_overflow.  This is used so
243  * we can call into C code to get the per-hart overflow stack.  Usage of this
244  * stack must be protected by spin_shadow_stack.
245  */
246 long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
247 
248 /*
249  * A pseudo spinlock to protect the shadow stack from being used by multiple
250  * harts concurrently.  This isn't a real spinlock because the lock side must
251  * be taken without a valid stack and only a single register, it's only taken
252  * while in the process of panicing anyway so the performance and error
253  * checking a proper spinlock gives us doesn't matter.
254  */
255 unsigned long spin_shadow_stack;
256 
257 asmlinkage unsigned long get_overflow_stack(void)
258 {
259 	return (unsigned long)this_cpu_ptr(overflow_stack) +
260 		OVERFLOW_STACK_SIZE;
261 }
262 
263 asmlinkage void handle_bad_stack(struct pt_regs *regs)
264 {
265 	unsigned long tsk_stk = (unsigned long)current->stack;
266 	unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
267 
268 	/*
269 	 * We're done with the shadow stack by this point, as we're on the
270 	 * overflow stack.  Tell any other concurrent overflowing harts that
271 	 * they can proceed with panicing by releasing the pseudo-spinlock.
272 	 *
273 	 * This pairs with an amoswap.aq in handle_kernel_stack_overflow.
274 	 */
275 	smp_store_release(&spin_shadow_stack, 0);
276 
277 	console_verbose();
278 
279 	pr_emerg("Insufficient stack space to handle exception!\n");
280 	pr_emerg("Task stack:     [0x%016lx..0x%016lx]\n",
281 			tsk_stk, tsk_stk + THREAD_SIZE);
282 	pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
283 			ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
284 
285 	__show_regs(regs);
286 	panic("Kernel stack overflow");
287 
288 	for (;;)
289 		wait_for_interrupt();
290 }
291 #endif
292