xref: /linux/arch/arm64/kernel/signal.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/kernel/signal.c
4  *
5  * Copyright (C) 1995-2009 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 
9 #include <linux/cache.h>
10 #include <linux/compat.h>
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/signal.h>
14 #include <linux/freezer.h>
15 #include <linux/stddef.h>
16 #include <linux/uaccess.h>
17 #include <linux/sizes.h>
18 #include <linux/string.h>
19 #include <linux/ratelimit.h>
20 #include <linux/rseq.h>
21 #include <linux/syscalls.h>
22 
23 #include <asm/daifflags.h>
24 #include <asm/debug-monitors.h>
25 #include <asm/elf.h>
26 #include <asm/exception.h>
27 #include <asm/cacheflush.h>
28 #include <asm/ucontext.h>
29 #include <asm/unistd.h>
30 #include <asm/fpsimd.h>
31 #include <asm/ptrace.h>
32 #include <asm/syscall.h>
33 #include <asm/signal32.h>
34 #include <asm/traps.h>
35 #include <asm/vdso.h>
36 
37 /*
38  * Do a signal return; undo the signal stack. These are aligned to 128-bit.
39  */
40 struct rt_sigframe {
41 	struct siginfo info;
42 	struct ucontext uc;
43 };
44 
45 struct frame_record {
46 	u64 fp;
47 	u64 lr;
48 };
49 
50 struct rt_sigframe_user_layout {
51 	struct rt_sigframe __user *sigframe;
52 	struct frame_record __user *next_frame;
53 
54 	unsigned long size;	/* size of allocated sigframe data */
55 	unsigned long limit;	/* largest allowed size */
56 
57 	unsigned long fpsimd_offset;
58 	unsigned long esr_offset;
59 	unsigned long sve_offset;
60 	unsigned long tpidr2_offset;
61 	unsigned long za_offset;
62 	unsigned long zt_offset;
63 	unsigned long fpmr_offset;
64 	unsigned long poe_offset;
65 	unsigned long extra_offset;
66 	unsigned long end_offset;
67 };
68 
69 #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
70 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
71 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
72 
73 static void init_user_layout(struct rt_sigframe_user_layout *user)
74 {
75 	const size_t reserved_size =
76 		sizeof(user->sigframe->uc.uc_mcontext.__reserved);
77 
78 	memset(user, 0, sizeof(*user));
79 	user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
80 
81 	user->limit = user->size + reserved_size;
82 
83 	user->limit -= TERMINATOR_SIZE;
84 	user->limit -= EXTRA_CONTEXT_SIZE;
85 	/* Reserve space for extension and terminator ^ */
86 }
87 
88 static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
89 {
90 	return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
91 }
92 
93 /*
94  * Sanity limit on the approximate maximum size of signal frame we'll
95  * try to generate.  Stack alignment padding and the frame record are
96  * not taken into account.  This limit is not a guarantee and is
97  * NOT ABI.
98  */
99 #define SIGFRAME_MAXSZ SZ_256K
100 
101 static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
102 			    unsigned long *offset, size_t size, bool extend)
103 {
104 	size_t padded_size = round_up(size, 16);
105 
106 	if (padded_size > user->limit - user->size &&
107 	    !user->extra_offset &&
108 	    extend) {
109 		int ret;
110 
111 		user->limit += EXTRA_CONTEXT_SIZE;
112 		ret = __sigframe_alloc(user, &user->extra_offset,
113 				       sizeof(struct extra_context), false);
114 		if (ret) {
115 			user->limit -= EXTRA_CONTEXT_SIZE;
116 			return ret;
117 		}
118 
119 		/* Reserve space for the __reserved[] terminator */
120 		user->size += TERMINATOR_SIZE;
121 
122 		/*
123 		 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
124 		 * the terminator:
125 		 */
126 		user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
127 	}
128 
129 	/* Still not enough space?  Bad luck! */
130 	if (padded_size > user->limit - user->size)
131 		return -ENOMEM;
132 
133 	*offset = user->size;
134 	user->size += padded_size;
135 
136 	return 0;
137 }
138 
139 /*
140  * Allocate space for an optional record of <size> bytes in the user
141  * signal frame.  The offset from the signal frame base address to the
142  * allocated block is assigned to *offset.
143  */
144 static int sigframe_alloc(struct rt_sigframe_user_layout *user,
145 			  unsigned long *offset, size_t size)
146 {
147 	return __sigframe_alloc(user, offset, size, true);
148 }
149 
150 /* Allocate the null terminator record and prevent further allocations */
151 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
152 {
153 	int ret;
154 
155 	/* Un-reserve the space reserved for the terminator: */
156 	user->limit += TERMINATOR_SIZE;
157 
158 	ret = sigframe_alloc(user, &user->end_offset,
159 			     sizeof(struct _aarch64_ctx));
160 	if (ret)
161 		return ret;
162 
163 	/* Prevent further allocation: */
164 	user->limit = user->size;
165 	return 0;
166 }
167 
168 static void __user *apply_user_offset(
169 	struct rt_sigframe_user_layout const *user, unsigned long offset)
170 {
171 	char __user *base = (char __user *)user->sigframe;
172 
173 	return base + offset;
174 }
175 
176 struct user_ctxs {
177 	struct fpsimd_context __user *fpsimd;
178 	u32 fpsimd_size;
179 	struct sve_context __user *sve;
180 	u32 sve_size;
181 	struct tpidr2_context __user *tpidr2;
182 	u32 tpidr2_size;
183 	struct za_context __user *za;
184 	u32 za_size;
185 	struct zt_context __user *zt;
186 	u32 zt_size;
187 	struct fpmr_context __user *fpmr;
188 	u32 fpmr_size;
189 	struct poe_context __user *poe;
190 	u32 poe_size;
191 };
192 
193 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
194 {
195 	struct user_fpsimd_state const *fpsimd =
196 		&current->thread.uw.fpsimd_state;
197 	int err;
198 
199 	/* copy the FP and status/control registers */
200 	err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
201 	__put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
202 	__put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
203 
204 	/* copy the magic/size information */
205 	__put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
206 	__put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
207 
208 	return err ? -EFAULT : 0;
209 }
210 
211 static int restore_fpsimd_context(struct user_ctxs *user)
212 {
213 	struct user_fpsimd_state fpsimd;
214 	int err = 0;
215 
216 	/* check the size information */
217 	if (user->fpsimd_size != sizeof(struct fpsimd_context))
218 		return -EINVAL;
219 
220 	/* copy the FP and status/control registers */
221 	err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs),
222 			       sizeof(fpsimd.vregs));
223 	__get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err);
224 	__get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err);
225 
226 	clear_thread_flag(TIF_SVE);
227 	current->thread.fp_type = FP_STATE_FPSIMD;
228 
229 	/* load the hardware registers from the fpsimd_state structure */
230 	if (!err)
231 		fpsimd_update_current_state(&fpsimd);
232 
233 	return err ? -EFAULT : 0;
234 }
235 
236 static int preserve_fpmr_context(struct fpmr_context __user *ctx)
237 {
238 	int err = 0;
239 
240 	current->thread.uw.fpmr = read_sysreg_s(SYS_FPMR);
241 
242 	__put_user_error(FPMR_MAGIC, &ctx->head.magic, err);
243 	__put_user_error(sizeof(*ctx), &ctx->head.size, err);
244 	__put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err);
245 
246 	return err;
247 }
248 
249 static int restore_fpmr_context(struct user_ctxs *user)
250 {
251 	u64 fpmr;
252 	int err = 0;
253 
254 	if (user->fpmr_size != sizeof(*user->fpmr))
255 		return -EINVAL;
256 
257 	__get_user_error(fpmr, &user->fpmr->fpmr, err);
258 	if (!err)
259 		write_sysreg_s(fpmr, SYS_FPMR);
260 
261 	return err;
262 }
263 
264 static int preserve_poe_context(struct poe_context __user *ctx)
265 {
266 	int err = 0;
267 
268 	__put_user_error(POE_MAGIC, &ctx->head.magic, err);
269 	__put_user_error(sizeof(*ctx), &ctx->head.size, err);
270 	__put_user_error(read_sysreg_s(SYS_POR_EL0), &ctx->por_el0, err);
271 
272 	return err;
273 }
274 
275 static int restore_poe_context(struct user_ctxs *user)
276 {
277 	u64 por_el0;
278 	int err = 0;
279 
280 	if (user->poe_size != sizeof(*user->poe))
281 		return -EINVAL;
282 
283 	__get_user_error(por_el0, &(user->poe->por_el0), err);
284 	if (!err)
285 		write_sysreg_s(por_el0, SYS_POR_EL0);
286 
287 	return err;
288 }
289 
290 #ifdef CONFIG_ARM64_SVE
291 
292 static int preserve_sve_context(struct sve_context __user *ctx)
293 {
294 	int err = 0;
295 	u16 reserved[ARRAY_SIZE(ctx->__reserved)];
296 	u16 flags = 0;
297 	unsigned int vl = task_get_sve_vl(current);
298 	unsigned int vq = 0;
299 
300 	if (thread_sm_enabled(&current->thread)) {
301 		vl = task_get_sme_vl(current);
302 		vq = sve_vq_from_vl(vl);
303 		flags |= SVE_SIG_FLAG_SM;
304 	} else if (current->thread.fp_type == FP_STATE_SVE) {
305 		vq = sve_vq_from_vl(vl);
306 	}
307 
308 	memset(reserved, 0, sizeof(reserved));
309 
310 	__put_user_error(SVE_MAGIC, &ctx->head.magic, err);
311 	__put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
312 			 &ctx->head.size, err);
313 	__put_user_error(vl, &ctx->vl, err);
314 	__put_user_error(flags, &ctx->flags, err);
315 	BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
316 	err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
317 
318 	if (vq) {
319 		/*
320 		 * This assumes that the SVE state has already been saved to
321 		 * the task struct by calling the function
322 		 * fpsimd_signal_preserve_current_state().
323 		 */
324 		err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
325 				      current->thread.sve_state,
326 				      SVE_SIG_REGS_SIZE(vq));
327 	}
328 
329 	return err ? -EFAULT : 0;
330 }
331 
332 static int restore_sve_fpsimd_context(struct user_ctxs *user)
333 {
334 	int err = 0;
335 	unsigned int vl, vq;
336 	struct user_fpsimd_state fpsimd;
337 	u16 user_vl, flags;
338 
339 	if (user->sve_size < sizeof(*user->sve))
340 		return -EINVAL;
341 
342 	__get_user_error(user_vl, &(user->sve->vl), err);
343 	__get_user_error(flags, &(user->sve->flags), err);
344 	if (err)
345 		return err;
346 
347 	if (flags & SVE_SIG_FLAG_SM) {
348 		if (!system_supports_sme())
349 			return -EINVAL;
350 
351 		vl = task_get_sme_vl(current);
352 	} else {
353 		/*
354 		 * A SME only system use SVE for streaming mode so can
355 		 * have a SVE formatted context with a zero VL and no
356 		 * payload data.
357 		 */
358 		if (!system_supports_sve() && !system_supports_sme())
359 			return -EINVAL;
360 
361 		vl = task_get_sve_vl(current);
362 	}
363 
364 	if (user_vl != vl)
365 		return -EINVAL;
366 
367 	if (user->sve_size == sizeof(*user->sve)) {
368 		clear_thread_flag(TIF_SVE);
369 		current->thread.svcr &= ~SVCR_SM_MASK;
370 		current->thread.fp_type = FP_STATE_FPSIMD;
371 		goto fpsimd_only;
372 	}
373 
374 	vq = sve_vq_from_vl(vl);
375 
376 	if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq))
377 		return -EINVAL;
378 
379 	/*
380 	 * Careful: we are about __copy_from_user() directly into
381 	 * thread.sve_state with preemption enabled, so protection is
382 	 * needed to prevent a racing context switch from writing stale
383 	 * registers back over the new data.
384 	 */
385 
386 	fpsimd_flush_task_state(current);
387 	/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
388 
389 	sve_alloc(current, true);
390 	if (!current->thread.sve_state) {
391 		clear_thread_flag(TIF_SVE);
392 		return -ENOMEM;
393 	}
394 
395 	err = __copy_from_user(current->thread.sve_state,
396 			       (char __user const *)user->sve +
397 					SVE_SIG_REGS_OFFSET,
398 			       SVE_SIG_REGS_SIZE(vq));
399 	if (err)
400 		return -EFAULT;
401 
402 	if (flags & SVE_SIG_FLAG_SM)
403 		current->thread.svcr |= SVCR_SM_MASK;
404 	else
405 		set_thread_flag(TIF_SVE);
406 	current->thread.fp_type = FP_STATE_SVE;
407 
408 fpsimd_only:
409 	/* copy the FP and status/control registers */
410 	/* restore_sigframe() already checked that user->fpsimd != NULL. */
411 	err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
412 			       sizeof(fpsimd.vregs));
413 	__get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
414 	__get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
415 
416 	/* load the hardware registers from the fpsimd_state structure */
417 	if (!err)
418 		fpsimd_update_current_state(&fpsimd);
419 
420 	return err ? -EFAULT : 0;
421 }
422 
423 #else /* ! CONFIG_ARM64_SVE */
424 
425 static int restore_sve_fpsimd_context(struct user_ctxs *user)
426 {
427 	WARN_ON_ONCE(1);
428 	return -EINVAL;
429 }
430 
431 /* Turn any non-optimised out attempts to use this into a link error: */
432 extern int preserve_sve_context(void __user *ctx);
433 
434 #endif /* ! CONFIG_ARM64_SVE */
435 
436 #ifdef CONFIG_ARM64_SME
437 
438 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx)
439 {
440 	int err = 0;
441 
442 	current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
443 
444 	__put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err);
445 	__put_user_error(sizeof(*ctx), &ctx->head.size, err);
446 	__put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err);
447 
448 	return err;
449 }
450 
451 static int restore_tpidr2_context(struct user_ctxs *user)
452 {
453 	u64 tpidr2_el0;
454 	int err = 0;
455 
456 	if (user->tpidr2_size != sizeof(*user->tpidr2))
457 		return -EINVAL;
458 
459 	__get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err);
460 	if (!err)
461 		write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0);
462 
463 	return err;
464 }
465 
466 static int preserve_za_context(struct za_context __user *ctx)
467 {
468 	int err = 0;
469 	u16 reserved[ARRAY_SIZE(ctx->__reserved)];
470 	unsigned int vl = task_get_sme_vl(current);
471 	unsigned int vq;
472 
473 	if (thread_za_enabled(&current->thread))
474 		vq = sve_vq_from_vl(vl);
475 	else
476 		vq = 0;
477 
478 	memset(reserved, 0, sizeof(reserved));
479 
480 	__put_user_error(ZA_MAGIC, &ctx->head.magic, err);
481 	__put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16),
482 			 &ctx->head.size, err);
483 	__put_user_error(vl, &ctx->vl, err);
484 	BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
485 	err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
486 
487 	if (vq) {
488 		/*
489 		 * This assumes that the ZA state has already been saved to
490 		 * the task struct by calling the function
491 		 * fpsimd_signal_preserve_current_state().
492 		 */
493 		err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
494 				      current->thread.sme_state,
495 				      ZA_SIG_REGS_SIZE(vq));
496 	}
497 
498 	return err ? -EFAULT : 0;
499 }
500 
501 static int restore_za_context(struct user_ctxs *user)
502 {
503 	int err = 0;
504 	unsigned int vq;
505 	u16 user_vl;
506 
507 	if (user->za_size < sizeof(*user->za))
508 		return -EINVAL;
509 
510 	__get_user_error(user_vl, &(user->za->vl), err);
511 	if (err)
512 		return err;
513 
514 	if (user_vl != task_get_sme_vl(current))
515 		return -EINVAL;
516 
517 	if (user->za_size == sizeof(*user->za)) {
518 		current->thread.svcr &= ~SVCR_ZA_MASK;
519 		return 0;
520 	}
521 
522 	vq = sve_vq_from_vl(user_vl);
523 
524 	if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq))
525 		return -EINVAL;
526 
527 	/*
528 	 * Careful: we are about __copy_from_user() directly into
529 	 * thread.sme_state with preemption enabled, so protection is
530 	 * needed to prevent a racing context switch from writing stale
531 	 * registers back over the new data.
532 	 */
533 
534 	fpsimd_flush_task_state(current);
535 	/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
536 
537 	sme_alloc(current, true);
538 	if (!current->thread.sme_state) {
539 		current->thread.svcr &= ~SVCR_ZA_MASK;
540 		clear_thread_flag(TIF_SME);
541 		return -ENOMEM;
542 	}
543 
544 	err = __copy_from_user(current->thread.sme_state,
545 			       (char __user const *)user->za +
546 					ZA_SIG_REGS_OFFSET,
547 			       ZA_SIG_REGS_SIZE(vq));
548 	if (err)
549 		return -EFAULT;
550 
551 	set_thread_flag(TIF_SME);
552 	current->thread.svcr |= SVCR_ZA_MASK;
553 
554 	return 0;
555 }
556 
557 static int preserve_zt_context(struct zt_context __user *ctx)
558 {
559 	int err = 0;
560 	u16 reserved[ARRAY_SIZE(ctx->__reserved)];
561 
562 	if (WARN_ON(!thread_za_enabled(&current->thread)))
563 		return -EINVAL;
564 
565 	memset(reserved, 0, sizeof(reserved));
566 
567 	__put_user_error(ZT_MAGIC, &ctx->head.magic, err);
568 	__put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16),
569 			 &ctx->head.size, err);
570 	__put_user_error(1, &ctx->nregs, err);
571 	BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
572 	err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
573 
574 	/*
575 	 * This assumes that the ZT state has already been saved to
576 	 * the task struct by calling the function
577 	 * fpsimd_signal_preserve_current_state().
578 	 */
579 	err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET,
580 			      thread_zt_state(&current->thread),
581 			      ZT_SIG_REGS_SIZE(1));
582 
583 	return err ? -EFAULT : 0;
584 }
585 
586 static int restore_zt_context(struct user_ctxs *user)
587 {
588 	int err;
589 	u16 nregs;
590 
591 	/* ZA must be restored first for this check to be valid */
592 	if (!thread_za_enabled(&current->thread))
593 		return -EINVAL;
594 
595 	if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1))
596 		return -EINVAL;
597 
598 	if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs)))
599 		return -EFAULT;
600 
601 	if (nregs != 1)
602 		return -EINVAL;
603 
604 	/*
605 	 * Careful: we are about __copy_from_user() directly into
606 	 * thread.zt_state with preemption enabled, so protection is
607 	 * needed to prevent a racing context switch from writing stale
608 	 * registers back over the new data.
609 	 */
610 
611 	fpsimd_flush_task_state(current);
612 	/* From now, fpsimd_thread_switch() won't touch ZT in thread state */
613 
614 	err = __copy_from_user(thread_zt_state(&current->thread),
615 			       (char __user const *)user->zt +
616 					ZT_SIG_REGS_OFFSET,
617 			       ZT_SIG_REGS_SIZE(1));
618 	if (err)
619 		return -EFAULT;
620 
621 	return 0;
622 }
623 
624 #else /* ! CONFIG_ARM64_SME */
625 
626 /* Turn any non-optimised out attempts to use these into a link error: */
627 extern int preserve_tpidr2_context(void __user *ctx);
628 extern int restore_tpidr2_context(struct user_ctxs *user);
629 extern int preserve_za_context(void __user *ctx);
630 extern int restore_za_context(struct user_ctxs *user);
631 extern int preserve_zt_context(void __user *ctx);
632 extern int restore_zt_context(struct user_ctxs *user);
633 
634 #endif /* ! CONFIG_ARM64_SME */
635 
636 static int parse_user_sigframe(struct user_ctxs *user,
637 			       struct rt_sigframe __user *sf)
638 {
639 	struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
640 	struct _aarch64_ctx __user *head;
641 	char __user *base = (char __user *)&sc->__reserved;
642 	size_t offset = 0;
643 	size_t limit = sizeof(sc->__reserved);
644 	bool have_extra_context = false;
645 	char const __user *const sfp = (char const __user *)sf;
646 
647 	user->fpsimd = NULL;
648 	user->sve = NULL;
649 	user->tpidr2 = NULL;
650 	user->za = NULL;
651 	user->zt = NULL;
652 	user->fpmr = NULL;
653 	user->poe = NULL;
654 
655 	if (!IS_ALIGNED((unsigned long)base, 16))
656 		goto invalid;
657 
658 	while (1) {
659 		int err = 0;
660 		u32 magic, size;
661 		char const __user *userp;
662 		struct extra_context const __user *extra;
663 		u64 extra_datap;
664 		u32 extra_size;
665 		struct _aarch64_ctx const __user *end;
666 		u32 end_magic, end_size;
667 
668 		if (limit - offset < sizeof(*head))
669 			goto invalid;
670 
671 		if (!IS_ALIGNED(offset, 16))
672 			goto invalid;
673 
674 		head = (struct _aarch64_ctx __user *)(base + offset);
675 		__get_user_error(magic, &head->magic, err);
676 		__get_user_error(size, &head->size, err);
677 		if (err)
678 			return err;
679 
680 		if (limit - offset < size)
681 			goto invalid;
682 
683 		switch (magic) {
684 		case 0:
685 			if (size)
686 				goto invalid;
687 
688 			goto done;
689 
690 		case FPSIMD_MAGIC:
691 			if (!system_supports_fpsimd())
692 				goto invalid;
693 			if (user->fpsimd)
694 				goto invalid;
695 
696 			user->fpsimd = (struct fpsimd_context __user *)head;
697 			user->fpsimd_size = size;
698 			break;
699 
700 		case ESR_MAGIC:
701 			/* ignore */
702 			break;
703 
704 		case POE_MAGIC:
705 			if (!system_supports_poe())
706 				goto invalid;
707 
708 			if (user->poe)
709 				goto invalid;
710 
711 			user->poe = (struct poe_context __user *)head;
712 			user->poe_size = size;
713 			break;
714 
715 		case SVE_MAGIC:
716 			if (!system_supports_sve() && !system_supports_sme())
717 				goto invalid;
718 
719 			if (user->sve)
720 				goto invalid;
721 
722 			user->sve = (struct sve_context __user *)head;
723 			user->sve_size = size;
724 			break;
725 
726 		case TPIDR2_MAGIC:
727 			if (!system_supports_tpidr2())
728 				goto invalid;
729 
730 			if (user->tpidr2)
731 				goto invalid;
732 
733 			user->tpidr2 = (struct tpidr2_context __user *)head;
734 			user->tpidr2_size = size;
735 			break;
736 
737 		case ZA_MAGIC:
738 			if (!system_supports_sme())
739 				goto invalid;
740 
741 			if (user->za)
742 				goto invalid;
743 
744 			user->za = (struct za_context __user *)head;
745 			user->za_size = size;
746 			break;
747 
748 		case ZT_MAGIC:
749 			if (!system_supports_sme2())
750 				goto invalid;
751 
752 			if (user->zt)
753 				goto invalid;
754 
755 			user->zt = (struct zt_context __user *)head;
756 			user->zt_size = size;
757 			break;
758 
759 		case FPMR_MAGIC:
760 			if (!system_supports_fpmr())
761 				goto invalid;
762 
763 			if (user->fpmr)
764 				goto invalid;
765 
766 			user->fpmr = (struct fpmr_context __user *)head;
767 			user->fpmr_size = size;
768 			break;
769 
770 		case EXTRA_MAGIC:
771 			if (have_extra_context)
772 				goto invalid;
773 
774 			if (size < sizeof(*extra))
775 				goto invalid;
776 
777 			userp = (char const __user *)head;
778 
779 			extra = (struct extra_context const __user *)userp;
780 			userp += size;
781 
782 			__get_user_error(extra_datap, &extra->datap, err);
783 			__get_user_error(extra_size, &extra->size, err);
784 			if (err)
785 				return err;
786 
787 			/* Check for the dummy terminator in __reserved[]: */
788 
789 			if (limit - offset - size < TERMINATOR_SIZE)
790 				goto invalid;
791 
792 			end = (struct _aarch64_ctx const __user *)userp;
793 			userp += TERMINATOR_SIZE;
794 
795 			__get_user_error(end_magic, &end->magic, err);
796 			__get_user_error(end_size, &end->size, err);
797 			if (err)
798 				return err;
799 
800 			if (end_magic || end_size)
801 				goto invalid;
802 
803 			/* Prevent looping/repeated parsing of extra_context */
804 			have_extra_context = true;
805 
806 			base = (__force void __user *)extra_datap;
807 			if (!IS_ALIGNED((unsigned long)base, 16))
808 				goto invalid;
809 
810 			if (!IS_ALIGNED(extra_size, 16))
811 				goto invalid;
812 
813 			if (base != userp)
814 				goto invalid;
815 
816 			/* Reject "unreasonably large" frames: */
817 			if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
818 				goto invalid;
819 
820 			/*
821 			 * Ignore trailing terminator in __reserved[]
822 			 * and start parsing extra data:
823 			 */
824 			offset = 0;
825 			limit = extra_size;
826 
827 			if (!access_ok(base, limit))
828 				goto invalid;
829 
830 			continue;
831 
832 		default:
833 			goto invalid;
834 		}
835 
836 		if (size < sizeof(*head))
837 			goto invalid;
838 
839 		if (limit - offset < size)
840 			goto invalid;
841 
842 		offset += size;
843 	}
844 
845 done:
846 	return 0;
847 
848 invalid:
849 	return -EINVAL;
850 }
851 
852 static int restore_sigframe(struct pt_regs *regs,
853 			    struct rt_sigframe __user *sf)
854 {
855 	sigset_t set;
856 	int i, err;
857 	struct user_ctxs user;
858 
859 	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
860 	if (err == 0)
861 		set_current_blocked(&set);
862 
863 	for (i = 0; i < 31; i++)
864 		__get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
865 				 err);
866 	__get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
867 	__get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
868 	__get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
869 
870 	/*
871 	 * Avoid sys_rt_sigreturn() restarting.
872 	 */
873 	forget_syscall(regs);
874 
875 	err |= !valid_user_regs(&regs->user_regs, current);
876 	if (err == 0)
877 		err = parse_user_sigframe(&user, sf);
878 
879 	if (err == 0 && system_supports_fpsimd()) {
880 		if (!user.fpsimd)
881 			return -EINVAL;
882 
883 		if (user.sve)
884 			err = restore_sve_fpsimd_context(&user);
885 		else
886 			err = restore_fpsimd_context(&user);
887 	}
888 
889 	if (err == 0 && system_supports_tpidr2() && user.tpidr2)
890 		err = restore_tpidr2_context(&user);
891 
892 	if (err == 0 && system_supports_fpmr() && user.fpmr)
893 		err = restore_fpmr_context(&user);
894 
895 	if (err == 0 && system_supports_sme() && user.za)
896 		err = restore_za_context(&user);
897 
898 	if (err == 0 && system_supports_sme2() && user.zt)
899 		err = restore_zt_context(&user);
900 
901 	if (err == 0 && system_supports_poe() && user.poe)
902 		err = restore_poe_context(&user);
903 
904 	return err;
905 }
906 
907 SYSCALL_DEFINE0(rt_sigreturn)
908 {
909 	struct pt_regs *regs = current_pt_regs();
910 	struct rt_sigframe __user *frame;
911 
912 	/* Always make any pending restarted system calls return -EINTR */
913 	current->restart_block.fn = do_no_restart_syscall;
914 
915 	/*
916 	 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
917 	 * be word aligned here.
918 	 */
919 	if (regs->sp & 15)
920 		goto badframe;
921 
922 	frame = (struct rt_sigframe __user *)regs->sp;
923 
924 	if (!access_ok(frame, sizeof (*frame)))
925 		goto badframe;
926 
927 	if (restore_sigframe(regs, frame))
928 		goto badframe;
929 
930 	if (restore_altstack(&frame->uc.uc_stack))
931 		goto badframe;
932 
933 	return regs->regs[0];
934 
935 badframe:
936 	arm64_notify_segfault(regs->sp);
937 	return 0;
938 }
939 
940 /*
941  * Determine the layout of optional records in the signal frame
942  *
943  * add_all: if true, lays out the biggest possible signal frame for
944  *	this task; otherwise, generates a layout for the current state
945  *	of the task.
946  */
947 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
948 				 bool add_all)
949 {
950 	int err;
951 
952 	if (system_supports_fpsimd()) {
953 		err = sigframe_alloc(user, &user->fpsimd_offset,
954 				     sizeof(struct fpsimd_context));
955 		if (err)
956 			return err;
957 	}
958 
959 	/* fault information, if valid */
960 	if (add_all || current->thread.fault_code) {
961 		err = sigframe_alloc(user, &user->esr_offset,
962 				     sizeof(struct esr_context));
963 		if (err)
964 			return err;
965 	}
966 
967 	if (system_supports_sve() || system_supports_sme()) {
968 		unsigned int vq = 0;
969 
970 		if (add_all || current->thread.fp_type == FP_STATE_SVE ||
971 		    thread_sm_enabled(&current->thread)) {
972 			int vl = max(sve_max_vl(), sme_max_vl());
973 
974 			if (!add_all)
975 				vl = thread_get_cur_vl(&current->thread);
976 
977 			vq = sve_vq_from_vl(vl);
978 		}
979 
980 		err = sigframe_alloc(user, &user->sve_offset,
981 				     SVE_SIG_CONTEXT_SIZE(vq));
982 		if (err)
983 			return err;
984 	}
985 
986 	if (system_supports_tpidr2()) {
987 		err = sigframe_alloc(user, &user->tpidr2_offset,
988 				     sizeof(struct tpidr2_context));
989 		if (err)
990 			return err;
991 	}
992 
993 	if (system_supports_sme()) {
994 		unsigned int vl;
995 		unsigned int vq = 0;
996 
997 		if (add_all)
998 			vl = sme_max_vl();
999 		else
1000 			vl = task_get_sme_vl(current);
1001 
1002 		if (thread_za_enabled(&current->thread))
1003 			vq = sve_vq_from_vl(vl);
1004 
1005 		err = sigframe_alloc(user, &user->za_offset,
1006 				     ZA_SIG_CONTEXT_SIZE(vq));
1007 		if (err)
1008 			return err;
1009 	}
1010 
1011 	if (system_supports_sme2()) {
1012 		if (add_all || thread_za_enabled(&current->thread)) {
1013 			err = sigframe_alloc(user, &user->zt_offset,
1014 					     ZT_SIG_CONTEXT_SIZE(1));
1015 			if (err)
1016 				return err;
1017 		}
1018 	}
1019 
1020 	if (system_supports_fpmr()) {
1021 		err = sigframe_alloc(user, &user->fpmr_offset,
1022 				     sizeof(struct fpmr_context));
1023 		if (err)
1024 			return err;
1025 	}
1026 
1027 	if (system_supports_poe()) {
1028 		err = sigframe_alloc(user, &user->poe_offset,
1029 				     sizeof(struct poe_context));
1030 		if (err)
1031 			return err;
1032 	}
1033 
1034 	return sigframe_alloc_end(user);
1035 }
1036 
1037 static int setup_sigframe(struct rt_sigframe_user_layout *user,
1038 			  struct pt_regs *regs, sigset_t *set)
1039 {
1040 	int i, err = 0;
1041 	struct rt_sigframe __user *sf = user->sigframe;
1042 
1043 	/* set up the stack frame for unwinding */
1044 	__put_user_error(regs->regs[29], &user->next_frame->fp, err);
1045 	__put_user_error(regs->regs[30], &user->next_frame->lr, err);
1046 
1047 	for (i = 0; i < 31; i++)
1048 		__put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
1049 				 err);
1050 	__put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
1051 	__put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
1052 	__put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
1053 
1054 	__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
1055 
1056 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
1057 
1058 	if (err == 0 && system_supports_fpsimd()) {
1059 		struct fpsimd_context __user *fpsimd_ctx =
1060 			apply_user_offset(user, user->fpsimd_offset);
1061 		err |= preserve_fpsimd_context(fpsimd_ctx);
1062 	}
1063 
1064 	/* fault information, if valid */
1065 	if (err == 0 && user->esr_offset) {
1066 		struct esr_context __user *esr_ctx =
1067 			apply_user_offset(user, user->esr_offset);
1068 
1069 		__put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
1070 		__put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
1071 		__put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
1072 	}
1073 
1074 	/* Scalable Vector Extension state (including streaming), if present */
1075 	if ((system_supports_sve() || system_supports_sme()) &&
1076 	    err == 0 && user->sve_offset) {
1077 		struct sve_context __user *sve_ctx =
1078 			apply_user_offset(user, user->sve_offset);
1079 		err |= preserve_sve_context(sve_ctx);
1080 	}
1081 
1082 	/* TPIDR2 if supported */
1083 	if (system_supports_tpidr2() && err == 0) {
1084 		struct tpidr2_context __user *tpidr2_ctx =
1085 			apply_user_offset(user, user->tpidr2_offset);
1086 		err |= preserve_tpidr2_context(tpidr2_ctx);
1087 	}
1088 
1089 	/* FPMR if supported */
1090 	if (system_supports_fpmr() && err == 0) {
1091 		struct fpmr_context __user *fpmr_ctx =
1092 			apply_user_offset(user, user->fpmr_offset);
1093 		err |= preserve_fpmr_context(fpmr_ctx);
1094 	}
1095 
1096 	if (system_supports_poe() && err == 0 && user->poe_offset) {
1097 		struct poe_context __user *poe_ctx =
1098 			apply_user_offset(user, user->poe_offset);
1099 
1100 		err |= preserve_poe_context(poe_ctx);
1101 	}
1102 
1103 
1104 	/* ZA state if present */
1105 	if (system_supports_sme() && err == 0 && user->za_offset) {
1106 		struct za_context __user *za_ctx =
1107 			apply_user_offset(user, user->za_offset);
1108 		err |= preserve_za_context(za_ctx);
1109 	}
1110 
1111 	/* ZT state if present */
1112 	if (system_supports_sme2() && err == 0 && user->zt_offset) {
1113 		struct zt_context __user *zt_ctx =
1114 			apply_user_offset(user, user->zt_offset);
1115 		err |= preserve_zt_context(zt_ctx);
1116 	}
1117 
1118 	if (err == 0 && user->extra_offset) {
1119 		char __user *sfp = (char __user *)user->sigframe;
1120 		char __user *userp =
1121 			apply_user_offset(user, user->extra_offset);
1122 
1123 		struct extra_context __user *extra;
1124 		struct _aarch64_ctx __user *end;
1125 		u64 extra_datap;
1126 		u32 extra_size;
1127 
1128 		extra = (struct extra_context __user *)userp;
1129 		userp += EXTRA_CONTEXT_SIZE;
1130 
1131 		end = (struct _aarch64_ctx __user *)userp;
1132 		userp += TERMINATOR_SIZE;
1133 
1134 		/*
1135 		 * extra_datap is just written to the signal frame.
1136 		 * The value gets cast back to a void __user *
1137 		 * during sigreturn.
1138 		 */
1139 		extra_datap = (__force u64)userp;
1140 		extra_size = sfp + round_up(user->size, 16) - userp;
1141 
1142 		__put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
1143 		__put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
1144 		__put_user_error(extra_datap, &extra->datap, err);
1145 		__put_user_error(extra_size, &extra->size, err);
1146 
1147 		/* Add the terminator */
1148 		__put_user_error(0, &end->magic, err);
1149 		__put_user_error(0, &end->size, err);
1150 	}
1151 
1152 	/* set the "end" magic */
1153 	if (err == 0) {
1154 		struct _aarch64_ctx __user *end =
1155 			apply_user_offset(user, user->end_offset);
1156 
1157 		__put_user_error(0, &end->magic, err);
1158 		__put_user_error(0, &end->size, err);
1159 	}
1160 
1161 	return err;
1162 }
1163 
1164 static int get_sigframe(struct rt_sigframe_user_layout *user,
1165 			 struct ksignal *ksig, struct pt_regs *regs)
1166 {
1167 	unsigned long sp, sp_top;
1168 	int err;
1169 
1170 	init_user_layout(user);
1171 	err = setup_sigframe_layout(user, false);
1172 	if (err)
1173 		return err;
1174 
1175 	sp = sp_top = sigsp(regs->sp, ksig);
1176 
1177 	sp = round_down(sp - sizeof(struct frame_record), 16);
1178 	user->next_frame = (struct frame_record __user *)sp;
1179 
1180 	sp = round_down(sp, 16) - sigframe_size(user);
1181 	user->sigframe = (struct rt_sigframe __user *)sp;
1182 
1183 	/*
1184 	 * Check that we can actually write to the signal frame.
1185 	 */
1186 	if (!access_ok(user->sigframe, sp_top - sp))
1187 		return -EFAULT;
1188 
1189 	return 0;
1190 }
1191 
1192 static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
1193 			 struct rt_sigframe_user_layout *user, int usig)
1194 {
1195 	__sigrestore_t sigtramp;
1196 
1197 	regs->regs[0] = usig;
1198 	regs->sp = (unsigned long)user->sigframe;
1199 	regs->regs[29] = (unsigned long)&user->next_frame->fp;
1200 	regs->pc = (unsigned long)ka->sa.sa_handler;
1201 
1202 	/*
1203 	 * Signal delivery is a (wacky) indirect function call in
1204 	 * userspace, so simulate the same setting of BTYPE as a BLR
1205 	 * <register containing the signal handler entry point>.
1206 	 * Signal delivery to a location in a PROT_BTI guarded page
1207 	 * that is not a function entry point will now trigger a
1208 	 * SIGILL in userspace.
1209 	 *
1210 	 * If the signal handler entry point is not in a PROT_BTI
1211 	 * guarded page, this is harmless.
1212 	 */
1213 	if (system_supports_bti()) {
1214 		regs->pstate &= ~PSR_BTYPE_MASK;
1215 		regs->pstate |= PSR_BTYPE_C;
1216 	}
1217 
1218 	/* TCO (Tag Check Override) always cleared for signal handlers */
1219 	regs->pstate &= ~PSR_TCO_BIT;
1220 
1221 	/* Signal handlers are invoked with ZA and streaming mode disabled */
1222 	if (system_supports_sme()) {
1223 		/*
1224 		 * If we were in streaming mode the saved register
1225 		 * state was SVE but we will exit SM and use the
1226 		 * FPSIMD register state - flush the saved FPSIMD
1227 		 * register state in case it gets loaded.
1228 		 */
1229 		if (current->thread.svcr & SVCR_SM_MASK) {
1230 			memset(&current->thread.uw.fpsimd_state, 0,
1231 			       sizeof(current->thread.uw.fpsimd_state));
1232 			current->thread.fp_type = FP_STATE_FPSIMD;
1233 		}
1234 
1235 		current->thread.svcr &= ~(SVCR_ZA_MASK |
1236 					  SVCR_SM_MASK);
1237 		sme_smstop();
1238 	}
1239 
1240 	if (system_supports_poe())
1241 		write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0);
1242 
1243 	if (ka->sa.sa_flags & SA_RESTORER)
1244 		sigtramp = ka->sa.sa_restorer;
1245 	else
1246 		sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
1247 
1248 	regs->regs[30] = (unsigned long)sigtramp;
1249 }
1250 
1251 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
1252 			  struct pt_regs *regs)
1253 {
1254 	struct rt_sigframe_user_layout user;
1255 	struct rt_sigframe __user *frame;
1256 	int err = 0;
1257 
1258 	fpsimd_signal_preserve_current_state();
1259 
1260 	if (get_sigframe(&user, ksig, regs))
1261 		return 1;
1262 
1263 	frame = user.sigframe;
1264 
1265 	__put_user_error(0, &frame->uc.uc_flags, err);
1266 	__put_user_error(NULL, &frame->uc.uc_link, err);
1267 
1268 	err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
1269 	err |= setup_sigframe(&user, regs, set);
1270 	if (err == 0) {
1271 		setup_return(regs, &ksig->ka, &user, usig);
1272 		if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
1273 			err |= copy_siginfo_to_user(&frame->info, &ksig->info);
1274 			regs->regs[1] = (unsigned long)&frame->info;
1275 			regs->regs[2] = (unsigned long)&frame->uc;
1276 		}
1277 	}
1278 
1279 	return err;
1280 }
1281 
1282 static void setup_restart_syscall(struct pt_regs *regs)
1283 {
1284 	if (is_compat_task())
1285 		compat_setup_restart_syscall(regs);
1286 	else
1287 		regs->regs[8] = __NR_restart_syscall;
1288 }
1289 
1290 /*
1291  * OK, we're invoking a handler
1292  */
1293 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1294 {
1295 	sigset_t *oldset = sigmask_to_save();
1296 	int usig = ksig->sig;
1297 	int ret;
1298 
1299 	rseq_signal_deliver(ksig, regs);
1300 
1301 	/*
1302 	 * Set up the stack frame
1303 	 */
1304 	if (is_compat_task()) {
1305 		if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1306 			ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
1307 		else
1308 			ret = compat_setup_frame(usig, ksig, oldset, regs);
1309 	} else {
1310 		ret = setup_rt_frame(usig, ksig, oldset, regs);
1311 	}
1312 
1313 	/*
1314 	 * Check that the resulting registers are actually sane.
1315 	 */
1316 	ret |= !valid_user_regs(&regs->user_regs, current);
1317 
1318 	/* Step into the signal handler if we are stepping */
1319 	signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
1320 }
1321 
1322 /*
1323  * Note that 'init' is a special process: it doesn't get signals it doesn't
1324  * want to handle. Thus you cannot kill init even with a SIGKILL even by
1325  * mistake.
1326  *
1327  * Note that we go through the signals twice: once to check the signals that
1328  * the kernel can handle, and then we build all the user-level signal handling
1329  * stack-frames in one go after that.
1330  */
1331 void do_signal(struct pt_regs *regs)
1332 {
1333 	unsigned long continue_addr = 0, restart_addr = 0;
1334 	int retval = 0;
1335 	struct ksignal ksig;
1336 	bool syscall = in_syscall(regs);
1337 
1338 	/*
1339 	 * If we were from a system call, check for system call restarting...
1340 	 */
1341 	if (syscall) {
1342 		continue_addr = regs->pc;
1343 		restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
1344 		retval = regs->regs[0];
1345 
1346 		/*
1347 		 * Avoid additional syscall restarting via ret_to_user.
1348 		 */
1349 		forget_syscall(regs);
1350 
1351 		/*
1352 		 * Prepare for system call restart. We do this here so that a
1353 		 * debugger will see the already changed PC.
1354 		 */
1355 		switch (retval) {
1356 		case -ERESTARTNOHAND:
1357 		case -ERESTARTSYS:
1358 		case -ERESTARTNOINTR:
1359 		case -ERESTART_RESTARTBLOCK:
1360 			regs->regs[0] = regs->orig_x0;
1361 			regs->pc = restart_addr;
1362 			break;
1363 		}
1364 	}
1365 
1366 	/*
1367 	 * Get the signal to deliver. When running under ptrace, at this point
1368 	 * the debugger may change all of our registers.
1369 	 */
1370 	if (get_signal(&ksig)) {
1371 		/*
1372 		 * Depending on the signal settings, we may need to revert the
1373 		 * decision to restart the system call, but skip this if a
1374 		 * debugger has chosen to restart at a different PC.
1375 		 */
1376 		if (regs->pc == restart_addr &&
1377 		    (retval == -ERESTARTNOHAND ||
1378 		     retval == -ERESTART_RESTARTBLOCK ||
1379 		     (retval == -ERESTARTSYS &&
1380 		      !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
1381 			syscall_set_return_value(current, regs, -EINTR, 0);
1382 			regs->pc = continue_addr;
1383 		}
1384 
1385 		handle_signal(&ksig, regs);
1386 		return;
1387 	}
1388 
1389 	/*
1390 	 * Handle restarting a different system call. As above, if a debugger
1391 	 * has chosen to restart at a different PC, ignore the restart.
1392 	 */
1393 	if (syscall && regs->pc == restart_addr) {
1394 		if (retval == -ERESTART_RESTARTBLOCK)
1395 			setup_restart_syscall(regs);
1396 		user_rewind_single_step(current);
1397 	}
1398 
1399 	restore_saved_sigmask();
1400 }
1401 
1402 unsigned long __ro_after_init signal_minsigstksz;
1403 
1404 /*
1405  * Determine the stack space required for guaranteed signal devliery.
1406  * This function is used to populate AT_MINSIGSTKSZ at process startup.
1407  * cpufeatures setup is assumed to be complete.
1408  */
1409 void __init minsigstksz_setup(void)
1410 {
1411 	struct rt_sigframe_user_layout user;
1412 
1413 	init_user_layout(&user);
1414 
1415 	/*
1416 	 * If this fails, SIGFRAME_MAXSZ needs to be enlarged.  It won't
1417 	 * be big enough, but it's our best guess:
1418 	 */
1419 	if (WARN_ON(setup_sigframe_layout(&user, true)))
1420 		return;
1421 
1422 	signal_minsigstksz = sigframe_size(&user) +
1423 		round_up(sizeof(struct frame_record), 16) +
1424 		16; /* max alignment padding */
1425 }
1426 
1427 /*
1428  * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
1429  * changes likely come with new fields that should be added below.
1430  */
1431 static_assert(NSIGILL	== 11);
1432 static_assert(NSIGFPE	== 15);
1433 static_assert(NSIGSEGV	== 10);
1434 static_assert(NSIGBUS	== 5);
1435 static_assert(NSIGTRAP	== 6);
1436 static_assert(NSIGCHLD	== 6);
1437 static_assert(NSIGSYS	== 2);
1438 static_assert(sizeof(siginfo_t) == 128);
1439 static_assert(__alignof__(siginfo_t) == 8);
1440 static_assert(offsetof(siginfo_t, si_signo)	== 0x00);
1441 static_assert(offsetof(siginfo_t, si_errno)	== 0x04);
1442 static_assert(offsetof(siginfo_t, si_code)	== 0x08);
1443 static_assert(offsetof(siginfo_t, si_pid)	== 0x10);
1444 static_assert(offsetof(siginfo_t, si_uid)	== 0x14);
1445 static_assert(offsetof(siginfo_t, si_tid)	== 0x10);
1446 static_assert(offsetof(siginfo_t, si_overrun)	== 0x14);
1447 static_assert(offsetof(siginfo_t, si_status)	== 0x18);
1448 static_assert(offsetof(siginfo_t, si_utime)	== 0x20);
1449 static_assert(offsetof(siginfo_t, si_stime)	== 0x28);
1450 static_assert(offsetof(siginfo_t, si_value)	== 0x18);
1451 static_assert(offsetof(siginfo_t, si_int)	== 0x18);
1452 static_assert(offsetof(siginfo_t, si_ptr)	== 0x18);
1453 static_assert(offsetof(siginfo_t, si_addr)	== 0x10);
1454 static_assert(offsetof(siginfo_t, si_addr_lsb)	== 0x18);
1455 static_assert(offsetof(siginfo_t, si_lower)	== 0x20);
1456 static_assert(offsetof(siginfo_t, si_upper)	== 0x28);
1457 static_assert(offsetof(siginfo_t, si_pkey)	== 0x20);
1458 static_assert(offsetof(siginfo_t, si_perf_data)	== 0x18);
1459 static_assert(offsetof(siginfo_t, si_perf_type)	== 0x20);
1460 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
1461 static_assert(offsetof(siginfo_t, si_band)	== 0x10);
1462 static_assert(offsetof(siginfo_t, si_fd)	== 0x18);
1463 static_assert(offsetof(siginfo_t, si_call_addr)	== 0x10);
1464 static_assert(offsetof(siginfo_t, si_syscall)	== 0x18);
1465 static_assert(offsetof(siginfo_t, si_arch)	== 0x1c);
1466