xref: /linux/arch/riscv/kernel/signal.c (revision d30c1683aaecb93d2ab95685dc4300a33d3cea7a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4  *  Chen Liqin <liqin.chen@sunplusct.com>
5  *  Lennox Wu <lennox.wu@sunplusct.com>
6  * Copyright (C) 2012 Regents of the University of California
7  */
8 
9 #include <linux/compat.h>
10 #include <linux/signal.h>
11 #include <linux/uaccess.h>
12 #include <linux/syscalls.h>
13 #include <linux/resume_user_mode.h>
14 #include <linux/linkage.h>
15 #include <linux/entry-common.h>
16 
17 #include <asm/ucontext.h>
18 #include <asm/vdso.h>
19 #include <asm/signal.h>
20 #include <asm/signal32.h>
21 #include <asm/switch_to.h>
22 #include <asm/vector.h>
23 #include <asm/csr.h>
24 #include <asm/cacheflush.h>
25 #include <asm/usercfi.h>
26 
27 unsigned long signal_minsigstksz __ro_after_init;
28 
29 extern u32 __user_rt_sigreturn[2];
30 static size_t riscv_v_sc_size __ro_after_init;
31 static size_t riscv_zicfiss_sc_size __ro_after_init;
32 
33 #define DEBUG_SIG 0
34 
35 struct rt_sigframe {
36 	struct siginfo info;
37 	struct ucontext uc;
38 #ifndef CONFIG_MMU
39 	u32 sigreturn_code[2];
40 #endif
41 };
42 
43 #ifdef CONFIG_FPU
44 static long restore_fp_state(struct pt_regs *regs,
45 			     union __riscv_fp_state __user *sc_fpregs)
46 {
47 	long err;
48 	struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
49 
50 	err = __copy_from_user(&current->thread.fstate, state, sizeof(*state));
51 	if (unlikely(err))
52 		return err;
53 
54 	fstate_restore(current, regs);
55 	return 0;
56 }
57 
58 static long save_fp_state(struct pt_regs *regs,
59 			  union __riscv_fp_state __user *sc_fpregs)
60 {
61 	long err;
62 	struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
63 
64 	fstate_save(current, regs);
65 	err = __copy_to_user(state, &current->thread.fstate, sizeof(*state));
66 	return err;
67 }
68 #else
69 #define save_fp_state(task, regs) (0)
70 #define restore_fp_state(task, regs) (0)
71 #endif
72 
73 static long save_v_state(struct pt_regs *regs, void __user *sc_vec)
74 {
75 	struct __sc_riscv_v_state __user *state;
76 	void __user *datap;
77 	long err;
78 
79 	if (!IS_ENABLED(CONFIG_RISCV_ISA_V) ||
80 	    !((has_vector() || has_xtheadvector()) &&
81 	    riscv_v_vstate_query(regs)))
82 		return 0;
83 
84 	/* Place state to the user's signal context space */
85 	state = (struct __sc_riscv_v_state __user *)sc_vec;
86 	/* Point datap right after the end of __sc_riscv_v_state */
87 	datap = state + 1;
88 
89 	/* datap is designed to be 16 byte aligned for better performance */
90 	WARN_ON(!IS_ALIGNED((unsigned long)datap, 16));
91 
92 	get_cpu_vector_context();
93 	riscv_v_vstate_save(&current->thread.vstate, regs);
94 	put_cpu_vector_context();
95 
96 	/* Copy everything of vstate but datap. */
97 	err = __copy_to_user(&state->v_state, &current->thread.vstate,
98 			     offsetof(struct __riscv_v_ext_state, datap));
99 	/* Copy the pointer datap itself. */
100 	err |= __put_user((__force void *)datap, &state->v_state.datap);
101 	/* Copy the whole vector content to user space datap. */
102 	err |= __copy_to_user(datap, current->thread.vstate.datap, riscv_v_vsize);
103 	if (unlikely(err))
104 		return -EFAULT;
105 
106 	/* Only return the size if everything has done successfully  */
107 	return riscv_v_sc_size;
108 }
109 
110 /*
111  * Restore Vector extension context from the user's signal frame. This function
112  * assumes a valid extension header. So magic and size checking must be done by
113  * the caller.
114  */
115 static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec)
116 {
117 	long err;
118 	struct __sc_riscv_v_state __user *state = sc_vec;
119 	void __user *datap;
120 
121 	/*
122 	 * Mark the vstate as clean prior performing the actual copy,
123 	 * to avoid getting the vstate incorrectly clobbered by the
124 	 *  discarded vector state.
125 	 */
126 	riscv_v_vstate_set_restore(current, regs);
127 
128 	/* Copy everything of __sc_riscv_v_state except datap. */
129 	err = __copy_from_user(&current->thread.vstate, &state->v_state,
130 			       offsetof(struct __riscv_v_ext_state, datap));
131 	if (unlikely(err))
132 		return err;
133 
134 	/* Copy the pointer datap itself. */
135 	err = __get_user(datap, &state->v_state.datap);
136 	if (unlikely(err))
137 		return err;
138 	/*
139 	 * Copy the whole vector content from user space datap. Use
140 	 * copy_from_user to prevent information leak.
141 	 */
142 	return copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize);
143 }
144 
145 static long save_cfiss_state(struct pt_regs *regs, void __user *sc_cfi)
146 {
147 	struct __sc_riscv_cfi_state __user *state = sc_cfi;
148 	unsigned long ss_ptr = 0;
149 	long err = 0;
150 
151 	if (!is_shstk_enabled(current))
152 		return 0;
153 
154 	/*
155 	 * Save a pointer to the shadow stack itself on shadow stack as a form of token.
156 	 * A token on the shadow stack gives the following properties:
157 	 * - Safe save and restore for shadow stack switching. Any save of a shadow stack
158 	 *   must have saved a token on the shadow stack. Similarly any restore of shadow
159 	 *   stack must check the token before restore. Since writing to the shadow stack with
160 	 *   address of the shadow stack itself is not easily allowed, a restore without a save
161 	 *   is quite difficult for an attacker to perform.
162 	 * - A natural break. A token in shadow stack provides a natural break in shadow stack
163 	 *   So a single linear range can be bucketed into different shadow stack segments. Any
164 	 *   sspopchk will detect the condition and fault to kernel as a sw check exception.
165 	 */
166 	err |= save_user_shstk(current, &ss_ptr);
167 	err |= __put_user(ss_ptr, &state->ss_ptr);
168 	if (unlikely(err))
169 		return -EFAULT;
170 
171 	return riscv_zicfiss_sc_size;
172 }
173 
174 static long __restore_cfiss_state(struct pt_regs *regs, void __user *sc_cfi)
175 {
176 	struct __sc_riscv_cfi_state __user *state = sc_cfi;
177 	unsigned long ss_ptr = 0;
178 	long err;
179 
180 	/*
181 	 * Restore shadow stack as a form of token stored on the shadow stack itself as a safe
182 	 * way to restore.
183 	 * A token on the shadow stack gives the following properties:
184 	 * - Safe save and restore for shadow stack switching. Any save of shadow stack
185 	 *   must have saved a token on shadow stack. Similarly any restore of shadow
186 	 *   stack must check the token before restore. Since writing to a shadow stack with
187 	 *   the address of shadow stack itself is not easily allowed, a restore without a save
188 	 *   is quite difficult for an attacker to perform.
189 	 * - A natural break. A token in the shadow stack provides a natural break in shadow stack
190 	 *   So a single linear range can be bucketed into different shadow stack segments.
191 	 *   sspopchk will detect the condition and fault to kernel as a sw check exception.
192 	 */
193 	err = __copy_from_user(&ss_ptr, &state->ss_ptr, sizeof(unsigned long));
194 
195 	if (unlikely(err))
196 		return err;
197 
198 	return restore_user_shstk(current, ss_ptr);
199 }
200 
201 struct arch_ext_priv {
202 	__u32 magic;
203 	long (*save)(struct pt_regs *regs, void __user *sc_vec);
204 };
205 
206 static struct arch_ext_priv arch_ext_list[] = {
207 	{
208 		.magic = RISCV_V_MAGIC,
209 		.save = &save_v_state,
210 	},
211 	{
212 		.magic = RISCV_ZICFISS_MAGIC,
213 		.save = &save_cfiss_state,
214 	},
215 };
216 
217 static const size_t nr_arch_exts = ARRAY_SIZE(arch_ext_list);
218 
219 static long restore_sigcontext(struct pt_regs *regs,
220 	struct sigcontext __user *sc)
221 {
222 	void __user *sc_ext_ptr = &sc->sc_extdesc.hdr;
223 	__u32 rsvd;
224 	long err;
225 	/* sc_regs is structured the same as the start of pt_regs */
226 	err = __copy_from_user(regs, &sc->sc_regs, sizeof(sc->sc_regs));
227 	if (unlikely(err))
228 		return err;
229 
230 	/* Restore the floating-point state. */
231 	if (has_fpu()) {
232 		err = restore_fp_state(regs, &sc->sc_fpregs);
233 		if (unlikely(err))
234 			return err;
235 	}
236 
237 	/* Check the reserved word before extensions parsing */
238 	err = __get_user(rsvd, &sc->sc_extdesc.reserved);
239 	if (unlikely(err))
240 		return err;
241 	if (unlikely(rsvd))
242 		return -EINVAL;
243 
244 	while (!err) {
245 		__u32 magic, size;
246 		struct __riscv_ctx_hdr __user *head = sc_ext_ptr;
247 
248 		err |= __get_user(magic, &head->magic);
249 		err |= __get_user(size, &head->size);
250 		if (unlikely(err))
251 			return err;
252 
253 		sc_ext_ptr += sizeof(*head);
254 		switch (magic) {
255 		case END_MAGIC:
256 			if (size != END_HDR_SIZE)
257 				return -EINVAL;
258 
259 			return 0;
260 		case RISCV_V_MAGIC:
261 			if (!(has_vector() || has_xtheadvector()) || !riscv_v_vstate_query(regs) ||
262 			    size != riscv_v_sc_size)
263 				return -EINVAL;
264 
265 			err = __restore_v_state(regs, sc_ext_ptr);
266 			break;
267 		case RISCV_ZICFISS_MAGIC:
268 			if (!is_shstk_enabled(current) || size != riscv_zicfiss_sc_size)
269 				return -EINVAL;
270 
271 			err = __restore_cfiss_state(regs, sc_ext_ptr);
272 			break;
273 		default:
274 			return -EINVAL;
275 		}
276 		sc_ext_ptr = (void __user *)head + size;
277 	}
278 	return err;
279 }
280 
281 static size_t get_rt_frame_size(bool cal_all)
282 {
283 	struct rt_sigframe __user *frame;
284 	size_t frame_size;
285 	size_t total_context_size = 0;
286 
287 	frame_size = sizeof(*frame);
288 
289 	if (has_vector() || has_xtheadvector()) {
290 		if (cal_all || riscv_v_vstate_query(task_pt_regs(current)))
291 			total_context_size += riscv_v_sc_size;
292 	}
293 
294 	if (is_shstk_enabled(current))
295 		total_context_size += riscv_zicfiss_sc_size;
296 
297 	/*
298 	 * Preserved a __riscv_ctx_hdr for END signal context header if an
299 	 * extension uses __riscv_extra_ext_header
300 	 */
301 	if (total_context_size)
302 		total_context_size += sizeof(struct __riscv_ctx_hdr);
303 
304 	frame_size += total_context_size;
305 
306 	frame_size = round_up(frame_size, 16);
307 	return frame_size;
308 }
309 
310 SYSCALL_DEFINE0(rt_sigreturn)
311 {
312 	struct pt_regs *regs = current_pt_regs();
313 	struct rt_sigframe __user *frame;
314 	struct task_struct *task;
315 	sigset_t set;
316 	size_t frame_size = get_rt_frame_size(false);
317 
318 	/* Always make any pending restarted system calls return -EINTR */
319 	current->restart_block.fn = do_no_restart_syscall;
320 
321 	frame = (struct rt_sigframe __user *)regs->sp;
322 
323 	if (!access_ok(frame, frame_size))
324 		goto badframe;
325 
326 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
327 		goto badframe;
328 
329 	set_current_blocked(&set);
330 
331 	if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
332 		goto badframe;
333 
334 	if (restore_altstack(&frame->uc.uc_stack))
335 		goto badframe;
336 
337 	regs->cause = -1UL;
338 
339 	return regs->a0;
340 
341 badframe:
342 	task = current;
343 	if (show_unhandled_signals) {
344 		pr_info_ratelimited(
345 			"%s[%d]: bad frame in %s: frame=%p pc=%p sp=%p\n",
346 			task->comm, task_pid_nr(task), __func__,
347 			frame, (void *)regs->epc, (void *)regs->sp);
348 	}
349 	force_sig(SIGSEGV);
350 	return 0;
351 }
352 
353 static long setup_sigcontext(struct rt_sigframe __user *frame,
354 	struct pt_regs *regs)
355 {
356 	struct sigcontext __user *sc = &frame->uc.uc_mcontext;
357 	struct __riscv_ctx_hdr __user *sc_ext_ptr = &sc->sc_extdesc.hdr;
358 	struct arch_ext_priv *arch_ext;
359 	long err, i, ext_size;
360 
361 	/* sc_regs is structured the same as the start of pt_regs */
362 	err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs));
363 	/* Save the floating-point state. */
364 	if (has_fpu())
365 		err |= save_fp_state(regs, &sc->sc_fpregs);
366 	/* Save the vector state. */
367 	for (i = 0; i < nr_arch_exts; i++) {
368 		arch_ext = &arch_ext_list[i];
369 		if (!arch_ext->save)
370 			continue;
371 
372 		ext_size = arch_ext->save(regs, sc_ext_ptr + 1);
373 		if (ext_size <= 0) {
374 			err |= ext_size;
375 		} else {
376 			err |= __put_user(arch_ext->magic, &sc_ext_ptr->magic);
377 			err |= __put_user(ext_size, &sc_ext_ptr->size);
378 			sc_ext_ptr = (void __user *)sc_ext_ptr + ext_size;
379 		}
380 	}
381 	/* Write zero to fp-reserved space and check it on restore_sigcontext */
382 	err |= __put_user(0, &sc->sc_extdesc.reserved);
383 	/* And put END __riscv_ctx_hdr at the end. */
384 	err |= __put_user(END_MAGIC, &sc_ext_ptr->magic);
385 	err |= __put_user(END_HDR_SIZE, &sc_ext_ptr->size);
386 
387 	return err;
388 }
389 
390 static inline void __user *get_sigframe(struct ksignal *ksig,
391 	struct pt_regs *regs, size_t framesize)
392 {
393 	unsigned long sp;
394 	/* Default to using normal stack */
395 	sp = regs->sp;
396 
397 	/*
398 	 * If we are on the alternate signal stack and would overflow it, don't.
399 	 * Return an always-bogus address instead so we will die with SIGSEGV.
400 	 */
401 	if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
402 		return (void __user __force *)(-1UL);
403 
404 	/* This is the X/Open sanctioned signal stack switching. */
405 	sp = sigsp(sp, ksig) - framesize;
406 
407 	/* Align the stack frame. */
408 	sp &= ~0xfUL;
409 
410 	return (void __user *)sp;
411 }
412 
413 static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
414 	struct pt_regs *regs)
415 {
416 	struct rt_sigframe __user *frame;
417 	long err = 0;
418 	unsigned long __maybe_unused addr;
419 	size_t frame_size = get_rt_frame_size(false);
420 
421 	frame = get_sigframe(ksig, regs, frame_size);
422 	if (!access_ok(frame, frame_size))
423 		return -EFAULT;
424 
425 	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
426 
427 	/* Create the ucontext. */
428 	err |= __put_user(0, &frame->uc.uc_flags);
429 	err |= __put_user(NULL, &frame->uc.uc_link);
430 	err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
431 	err |= setup_sigcontext(frame, regs);
432 	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
433 	if (err)
434 		return -EFAULT;
435 
436 	/* Set up to return from userspace. */
437 #ifdef CONFIG_MMU
438 	regs->ra = (unsigned long)VDSO_SYMBOL(
439 		current->mm->context.vdso, rt_sigreturn);
440 
441 	/* if bcfi is enabled x1 (ra) and x5 (t0) must match. not sure if we need this? */
442 	if (is_shstk_enabled(current))
443 		regs->t0 = regs->ra;
444 
445 #else
446 	/*
447 	 * For the nommu case we don't have a VDSO.  Instead we push two
448 	 * instructions to call the rt_sigreturn syscall onto the user stack.
449 	 */
450 	if (copy_to_user(&frame->sigreturn_code, __user_rt_sigreturn,
451 			 sizeof(frame->sigreturn_code)))
452 		return -EFAULT;
453 
454 	addr = (unsigned long)&frame->sigreturn_code;
455 	/* Make sure the two instructions are pushed to icache. */
456 	flush_icache_range(addr, addr + sizeof(frame->sigreturn_code));
457 
458 	regs->ra = addr;
459 #endif /* CONFIG_MMU */
460 
461 	/*
462 	 * Set up registers for signal handler.
463 	 * Registers that we don't modify keep the value they had from
464 	 * user-space at the time we took the signal.
465 	 * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
466 	 * since some things rely on this (e.g. glibc's debug/segfault.c).
467 	 */
468 	regs->epc = (unsigned long)ksig->ka.sa.sa_handler;
469 	regs->sp = (unsigned long)frame;
470 	regs->a0 = ksig->sig;                     /* a0: signal number */
471 	regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */
472 	regs->a2 = (unsigned long)(&frame->uc);   /* a2: ucontext pointer */
473 
474 #if DEBUG_SIG
475 	pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n",
476 		current->comm, task_pid_nr(current), ksig->sig,
477 		(void *)regs->epc, (void *)regs->ra, frame);
478 #endif
479 
480 	return 0;
481 }
482 
483 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
484 {
485 	sigset_t *oldset = sigmask_to_save();
486 	int ret;
487 
488 	rseq_signal_deliver(ksig, regs);
489 
490 	/* Set up the stack frame */
491 	if (is_compat_task())
492 		ret = compat_setup_rt_frame(ksig, oldset, regs);
493 	else
494 		ret = setup_rt_frame(ksig, oldset, regs);
495 
496 	signal_setup_done(ret, ksig, 0);
497 }
498 
499 void arch_do_signal_or_restart(struct pt_regs *regs)
500 {
501 	unsigned long continue_addr = 0, restart_addr = 0;
502 	int retval = 0;
503 	struct ksignal ksig;
504 	bool syscall = (regs->cause == EXC_SYSCALL);
505 
506 	/* If we were from a system call, check for system call restarting */
507 	if (syscall) {
508 		continue_addr = regs->epc;
509 		restart_addr = continue_addr - 4;
510 		retval = regs->a0;
511 
512 		/* Avoid additional syscall restarting via ret_from_exception */
513 		regs->cause = -1UL;
514 
515 		/*
516 		 * Prepare for system call restart. We do this here so that a
517 		 * debugger will see the already changed PC.
518 		 */
519 		switch (retval) {
520 		case -ERESTARTNOHAND:
521 		case -ERESTARTSYS:
522 		case -ERESTARTNOINTR:
523 		case -ERESTART_RESTARTBLOCK:
524 			regs->a0 = regs->orig_a0;
525 			regs->epc = restart_addr;
526 			break;
527 		}
528 	}
529 
530 	/*
531 	 * Get the signal to deliver. When running under ptrace, at this point
532 	 * the debugger may change all of our registers.
533 	 */
534 	if (get_signal(&ksig)) {
535 		/*
536 		 * Depending on the signal settings, we may need to revert the
537 		 * decision to restart the system call, but skip this if a
538 		 * debugger has chosen to restart at a different PC.
539 		 */
540 		if (regs->epc == restart_addr &&
541 		    (retval == -ERESTARTNOHAND ||
542 		     retval == -ERESTART_RESTARTBLOCK ||
543 		     (retval == -ERESTARTSYS &&
544 		      !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
545 			regs->a0 = -EINTR;
546 			regs->epc = continue_addr;
547 		}
548 
549 		/* Actually deliver the signal */
550 		handle_signal(&ksig, regs);
551 		return;
552 	}
553 
554 	/*
555 	 * Handle restarting a different system call. As above, if a debugger
556 	 * has chosen to restart at a different PC, ignore the restart.
557 	 */
558 	if (syscall && regs->epc == restart_addr && retval == -ERESTART_RESTARTBLOCK)
559 		regs->a7 = __NR_restart_syscall;
560 
561 	/*
562 	 * If there is no signal to deliver, we just put the saved
563 	 * sigmask back.
564 	 */
565 	restore_saved_sigmask();
566 }
567 
568 void init_rt_signal_env(void);
569 void __init init_rt_signal_env(void)
570 {
571 	riscv_v_sc_size = sizeof(struct __riscv_ctx_hdr) +
572 			  sizeof(struct __sc_riscv_v_state) + riscv_v_vsize;
573 
574 	riscv_zicfiss_sc_size = sizeof(struct __riscv_ctx_hdr) +
575 			  sizeof(struct __sc_riscv_cfi_state);
576 	/*
577 	 * Determine the stack space required for guaranteed signal delivery.
578 	 * The signal_minsigstksz will be populated into the AT_MINSIGSTKSZ entry
579 	 * in the auxiliary array at process startup.
580 	 */
581 	signal_minsigstksz = get_rt_frame_size(true);
582 }
583 
584 #ifdef CONFIG_DYNAMIC_SIGFRAME
585 bool sigaltstack_size_valid(size_t ss_size)
586 {
587 	return ss_size > get_rt_frame_size(false);
588 }
589 #endif /* CONFIG_DYNAMIC_SIGFRAME */
590