xref: /linux/arch/riscv/kernel/traps.c (revision 3013c33dcbd9b3107eef8facce0e4c69f3b7f780)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  */
5 
6 #include <linux/cpu.h>
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/randomize_kstack.h>
10 #include <linux/sched.h>
11 #include <linux/sched/debug.h>
12 #include <linux/sched/signal.h>
13 #include <linux/signal.h>
14 #include <linux/kdebug.h>
15 #include <linux/uaccess.h>
16 #include <linux/kprobes.h>
17 #include <linux/uprobes.h>
18 #include <asm/uprobes.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/irq.h>
22 #include <linux/kexec.h>
23 #include <linux/entry-common.h>
24 
25 #include <asm/asm-prototypes.h>
26 #include <asm/bug.h>
27 #include <asm/cfi.h>
28 #include <asm/csr.h>
29 #include <asm/processor.h>
30 #include <asm/ptrace.h>
31 #include <asm/syscall.h>
32 #include <asm/thread_info.h>
33 #include <asm/vector.h>
34 #include <asm/irq_stack.h>
35 
36 int show_unhandled_signals = 1;
37 
38 static DEFINE_RAW_SPINLOCK(die_lock);
39 
copy_code(struct pt_regs * regs,u16 * val,const u16 * insns)40 static int copy_code(struct pt_regs *regs, u16 *val, const u16 *insns)
41 {
42 	const void __user *uaddr = (__force const void __user *)insns;
43 
44 	if (!user_mode(regs))
45 		return get_kernel_nofault(*val, insns);
46 
47 	/* The user space code from other tasks cannot be accessed. */
48 	if (regs != task_pt_regs(current))
49 		return -EPERM;
50 
51 	return copy_from_user_nofault(val, uaddr, sizeof(*val));
52 }
53 
dump_instr(const char * loglvl,struct pt_regs * regs)54 static void dump_instr(const char *loglvl, struct pt_regs *regs)
55 {
56 	char str[sizeof("0000 ") * 12 + 2 + 1], *p = str;
57 	const u16 *insns = (u16 *)instruction_pointer(regs);
58 	long bad;
59 	u16 val;
60 	int i;
61 
62 	for (i = -10; i < 2; i++) {
63 		bad = copy_code(regs, &val, &insns[i]);
64 		if (!bad) {
65 			p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val);
66 		} else {
67 			printk("%sCode: Unable to access instruction at 0x%px.\n",
68 			       loglvl, &insns[i]);
69 			return;
70 		}
71 	}
72 	printk("%sCode: %s\n", loglvl, str);
73 }
74 
die(struct pt_regs * regs,const char * str)75 void die(struct pt_regs *regs, const char *str)
76 {
77 	static int die_counter;
78 	int ret;
79 	long cause;
80 	unsigned long flags;
81 
82 	oops_enter();
83 
84 	raw_spin_lock_irqsave(&die_lock, flags);
85 	console_verbose();
86 	bust_spinlocks(1);
87 
88 	pr_emerg("%s [#%d]\n", str, ++die_counter);
89 	print_modules();
90 	if (regs) {
91 		show_regs(regs);
92 		dump_instr(KERN_EMERG, regs);
93 	}
94 
95 	cause = regs ? regs->cause : -1;
96 	ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV);
97 
98 	if (kexec_should_crash(current))
99 		crash_kexec(regs);
100 
101 	bust_spinlocks(0);
102 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
103 	raw_spin_unlock_irqrestore(&die_lock, flags);
104 	oops_exit();
105 
106 	if (in_interrupt())
107 		panic("Fatal exception in interrupt");
108 	if (panic_on_oops)
109 		panic("Fatal exception");
110 	if (ret != NOTIFY_STOP)
111 		make_task_dead(SIGSEGV);
112 }
113 
do_trap(struct pt_regs * regs,int signo,int code,unsigned long addr)114 void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
115 {
116 	struct task_struct *tsk = current;
117 
118 	if (show_unhandled_signals && unhandled_signal(tsk, signo)
119 	    && printk_ratelimit()) {
120 		pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
121 			tsk->comm, task_pid_nr(tsk), signo, code, addr);
122 		print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
123 		pr_cont("\n");
124 		__show_regs(regs);
125 		dump_instr(KERN_INFO, regs);
126 	}
127 
128 	force_sig_fault(signo, code, (void __user *)addr);
129 }
130 
do_trap_error(struct pt_regs * regs,int signo,int code,unsigned long addr,const char * str)131 static void do_trap_error(struct pt_regs *regs, int signo, int code,
132 	unsigned long addr, const char *str)
133 {
134 	current->thread.bad_cause = regs->cause;
135 
136 	if (user_mode(regs)) {
137 		do_trap(regs, signo, code, addr);
138 	} else {
139 		if (!fixup_exception(regs))
140 			die(regs, str);
141 	}
142 }
143 
144 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE)
145 #define __trap_section __noinstr_section(".xip.traps")
146 #else
147 #define __trap_section noinstr
148 #endif
149 #define DO_ERROR_INFO(name, signo, code, str)					\
150 asmlinkage __visible __trap_section void name(struct pt_regs *regs)		\
151 {										\
152 	if (user_mode(regs)) {							\
153 		irqentry_enter_from_user_mode(regs);				\
154 		do_trap_error(regs, signo, code, regs->epc, "Oops - " str);	\
155 		irqentry_exit_to_user_mode(regs);				\
156 	} else {								\
157 		irqentry_state_t state = irqentry_nmi_enter(regs);		\
158 		do_trap_error(regs, signo, code, regs->epc, "Oops - " str);	\
159 		irqentry_nmi_exit(regs, state);					\
160 	}									\
161 }
162 
163 DO_ERROR_INFO(do_trap_unknown,
164 	SIGILL, ILL_ILLTRP, "unknown exception");
165 DO_ERROR_INFO(do_trap_insn_misaligned,
166 	SIGBUS, BUS_ADRALN, "instruction address misaligned");
167 DO_ERROR_INFO(do_trap_insn_fault,
168 	SIGSEGV, SEGV_ACCERR, "instruction access fault");
169 
do_trap_insn_illegal(struct pt_regs * regs)170 asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *regs)
171 {
172 	bool handled;
173 
174 	if (user_mode(regs)) {
175 		irqentry_enter_from_user_mode(regs);
176 
177 		local_irq_enable();
178 
179 		handled = riscv_v_first_use_handler(regs);
180 
181 		local_irq_disable();
182 
183 		if (!handled)
184 			do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
185 				      "Oops - illegal instruction");
186 
187 		irqentry_exit_to_user_mode(regs);
188 	} else {
189 		irqentry_state_t state = irqentry_nmi_enter(regs);
190 
191 		do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
192 			      "Oops - illegal instruction");
193 
194 		irqentry_nmi_exit(regs, state);
195 	}
196 }
197 
198 DO_ERROR_INFO(do_trap_load_fault,
199 	SIGSEGV, SEGV_ACCERR, "load access fault");
200 
201 enum misaligned_access_type {
202 	MISALIGNED_STORE,
203 	MISALIGNED_LOAD,
204 };
205 static const struct {
206 	const char *type_str;
207 	int (*handler)(struct pt_regs *regs);
208 } misaligned_handler[] = {
209 	[MISALIGNED_STORE] = {
210 		.type_str = "Oops - store (or AMO) address misaligned",
211 		.handler = handle_misaligned_store,
212 	},
213 	[MISALIGNED_LOAD] = {
214 		.type_str = "Oops - load address misaligned",
215 		.handler = handle_misaligned_load,
216 	},
217 };
218 
do_trap_misaligned(struct pt_regs * regs,enum misaligned_access_type type)219 static void do_trap_misaligned(struct pt_regs *regs, enum misaligned_access_type type)
220 {
221 	irqentry_state_t state;
222 
223 	if (user_mode(regs)) {
224 		irqentry_enter_from_user_mode(regs);
225 		local_irq_enable();
226 	} else {
227 		state = irqentry_nmi_enter(regs);
228 	}
229 
230 	if (misaligned_handler[type].handler(regs))
231 		do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
232 			      misaligned_handler[type].type_str);
233 
234 	if (user_mode(regs)) {
235 		local_irq_disable();
236 		irqentry_exit_to_user_mode(regs);
237 	} else {
238 		irqentry_nmi_exit(regs, state);
239 	}
240 }
241 
do_trap_load_misaligned(struct pt_regs * regs)242 asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
243 {
244 	do_trap_misaligned(regs, MISALIGNED_LOAD);
245 }
246 
do_trap_store_misaligned(struct pt_regs * regs)247 asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
248 {
249 	do_trap_misaligned(regs, MISALIGNED_STORE);
250 }
251 
252 DO_ERROR_INFO(do_trap_store_fault,
253 	SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
254 DO_ERROR_INFO(do_trap_ecall_s,
255 	SIGILL, ILL_ILLTRP, "environment call from S-mode");
256 DO_ERROR_INFO(do_trap_ecall_m,
257 	SIGILL, ILL_ILLTRP, "environment call from M-mode");
258 
get_break_insn_length(unsigned long pc)259 static inline unsigned long get_break_insn_length(unsigned long pc)
260 {
261 	bug_insn_t insn;
262 
263 	if (get_kernel_nofault(insn, (bug_insn_t *)pc))
264 		return 0;
265 
266 	return GET_INSN_LENGTH(insn);
267 }
268 
probe_single_step_handler(struct pt_regs * regs)269 static bool probe_single_step_handler(struct pt_regs *regs)
270 {
271 	bool user = user_mode(regs);
272 
273 	return user ? uprobe_single_step_handler(regs) : kprobe_single_step_handler(regs);
274 }
275 
probe_breakpoint_handler(struct pt_regs * regs)276 static bool probe_breakpoint_handler(struct pt_regs *regs)
277 {
278 	bool user = user_mode(regs);
279 
280 	return user ? uprobe_breakpoint_handler(regs) : kprobe_breakpoint_handler(regs);
281 }
282 
handle_break(struct pt_regs * regs)283 void handle_break(struct pt_regs *regs)
284 {
285 	if (probe_single_step_handler(regs))
286 		return;
287 
288 	if (probe_breakpoint_handler(regs))
289 		return;
290 
291 	current->thread.bad_cause = regs->cause;
292 
293 	if (user_mode(regs))
294 		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
295 #ifdef CONFIG_KGDB
296 	else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP)
297 								== NOTIFY_STOP)
298 		return;
299 #endif
300 	else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN ||
301 		 handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN)
302 		regs->epc += get_break_insn_length(regs->epc);
303 	else
304 		die(regs, "Kernel BUG");
305 }
306 
do_trap_break(struct pt_regs * regs)307 asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
308 {
309 	if (user_mode(regs)) {
310 		irqentry_enter_from_user_mode(regs);
311 
312 		handle_break(regs);
313 
314 		irqentry_exit_to_user_mode(regs);
315 	} else {
316 		irqentry_state_t state = irqentry_nmi_enter(regs);
317 
318 		handle_break(regs);
319 
320 		irqentry_nmi_exit(regs, state);
321 	}
322 }
323 
324 asmlinkage __visible __trap_section  __no_stack_protector
do_trap_ecall_u(struct pt_regs * regs)325 void do_trap_ecall_u(struct pt_regs *regs)
326 {
327 	if (user_mode(regs)) {
328 		long syscall = regs->a7;
329 
330 		regs->epc += 4;
331 		regs->orig_a0 = regs->a0;
332 		regs->a0 = -ENOSYS;
333 
334 		riscv_v_vstate_discard(regs);
335 
336 		syscall = syscall_enter_from_user_mode(regs, syscall);
337 
338 		add_random_kstack_offset();
339 
340 		if (syscall >= 0 && syscall < NR_syscalls)
341 			syscall_handler(regs, syscall);
342 
343 		/*
344 		 * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
345 		 * so the maximum stack offset is 1k bytes (10 bits).
346 		 *
347 		 * The actual entropy will be further reduced by the compiler when
348 		 * applying stack alignment constraints: 16-byte (i.e. 4-bit) aligned
349 		 * for RV32I or RV64I.
350 		 *
351 		 * The resulting 6 bits of entropy is seen in SP[9:4].
352 		 */
353 		choose_random_kstack_offset(get_random_u16());
354 
355 		syscall_exit_to_user_mode(regs);
356 	} else {
357 		irqentry_state_t state = irqentry_nmi_enter(regs);
358 
359 		do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->epc,
360 			"Oops - environment call from U-mode");
361 
362 		irqentry_nmi_exit(regs, state);
363 	}
364 
365 }
366 
367 #ifdef CONFIG_MMU
do_page_fault(struct pt_regs * regs)368 asmlinkage __visible noinstr void do_page_fault(struct pt_regs *regs)
369 {
370 	irqentry_state_t state = irqentry_enter(regs);
371 
372 	handle_page_fault(regs);
373 
374 	local_irq_disable();
375 
376 	irqentry_exit(regs, state);
377 }
378 #endif
379 
handle_riscv_irq(struct pt_regs * regs)380 static void noinstr handle_riscv_irq(struct pt_regs *regs)
381 {
382 	struct pt_regs *old_regs;
383 
384 	irq_enter_rcu();
385 	old_regs = set_irq_regs(regs);
386 	handle_arch_irq(regs);
387 	set_irq_regs(old_regs);
388 	irq_exit_rcu();
389 }
390 
do_irq(struct pt_regs * regs)391 asmlinkage void noinstr do_irq(struct pt_regs *regs)
392 {
393 	irqentry_state_t state = irqentry_enter(regs);
394 
395 	if (IS_ENABLED(CONFIG_IRQ_STACKS) && on_thread_stack())
396 		call_on_irq_stack(regs, handle_riscv_irq);
397 	else
398 		handle_riscv_irq(regs);
399 
400 	irqentry_exit(regs, state);
401 }
402 
403 #ifdef CONFIG_GENERIC_BUG
is_valid_bugaddr(unsigned long pc)404 int is_valid_bugaddr(unsigned long pc)
405 {
406 	bug_insn_t insn;
407 
408 	if (pc < VMALLOC_START)
409 		return 0;
410 	if (get_kernel_nofault(insn, (bug_insn_t *)pc))
411 		return 0;
412 	if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
413 		return (insn == __BUG_INSN_32);
414 	else
415 		return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
416 }
417 #endif /* CONFIG_GENERIC_BUG */
418 
419 #ifdef CONFIG_VMAP_STACK
420 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
421 		overflow_stack)__aligned(16);
422 
handle_bad_stack(struct pt_regs * regs)423 asmlinkage void handle_bad_stack(struct pt_regs *regs)
424 {
425 	unsigned long tsk_stk = (unsigned long)current->stack;
426 	unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
427 
428 	console_verbose();
429 
430 	pr_emerg("Insufficient stack space to handle exception!\n");
431 	pr_emerg("Task stack:     [0x%016lx..0x%016lx]\n",
432 			tsk_stk, tsk_stk + THREAD_SIZE);
433 	pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
434 			ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
435 
436 	__show_regs(regs);
437 	panic("Kernel stack overflow");
438 
439 	for (;;)
440 		wait_for_interrupt();
441 }
442 #endif
443