xref: /linux/arch/arm64/kernel/signal.c (revision 156010ed9c2ac1e9df6c11b1f688cf8a6e0152e6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/kernel/signal.c
4  *
5  * Copyright (C) 1995-2009 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 
9 #include <linux/cache.h>
10 #include <linux/compat.h>
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/signal.h>
14 #include <linux/freezer.h>
15 #include <linux/stddef.h>
16 #include <linux/uaccess.h>
17 #include <linux/sizes.h>
18 #include <linux/string.h>
19 #include <linux/resume_user_mode.h>
20 #include <linux/ratelimit.h>
21 #include <linux/syscalls.h>
22 
23 #include <asm/daifflags.h>
24 #include <asm/debug-monitors.h>
25 #include <asm/elf.h>
26 #include <asm/cacheflush.h>
27 #include <asm/ucontext.h>
28 #include <asm/unistd.h>
29 #include <asm/fpsimd.h>
30 #include <asm/ptrace.h>
31 #include <asm/syscall.h>
32 #include <asm/signal32.h>
33 #include <asm/traps.h>
34 #include <asm/vdso.h>
35 
36 /*
37  * Do a signal return; undo the signal stack. These are aligned to 128-bit.
38  */
39 struct rt_sigframe {
40 	struct siginfo info;
41 	struct ucontext uc;
42 };
43 
44 struct frame_record {
45 	u64 fp;
46 	u64 lr;
47 };
48 
49 struct rt_sigframe_user_layout {
50 	struct rt_sigframe __user *sigframe;
51 	struct frame_record __user *next_frame;
52 
53 	unsigned long size;	/* size of allocated sigframe data */
54 	unsigned long limit;	/* largest allowed size */
55 
56 	unsigned long fpsimd_offset;
57 	unsigned long esr_offset;
58 	unsigned long sve_offset;
59 	unsigned long tpidr2_offset;
60 	unsigned long za_offset;
61 	unsigned long zt_offset;
62 	unsigned long extra_offset;
63 	unsigned long end_offset;
64 };
65 
66 #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
67 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
68 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
69 
70 static void init_user_layout(struct rt_sigframe_user_layout *user)
71 {
72 	const size_t reserved_size =
73 		sizeof(user->sigframe->uc.uc_mcontext.__reserved);
74 
75 	memset(user, 0, sizeof(*user));
76 	user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
77 
78 	user->limit = user->size + reserved_size;
79 
80 	user->limit -= TERMINATOR_SIZE;
81 	user->limit -= EXTRA_CONTEXT_SIZE;
82 	/* Reserve space for extension and terminator ^ */
83 }
84 
85 static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
86 {
87 	return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
88 }
89 
90 /*
91  * Sanity limit on the approximate maximum size of signal frame we'll
92  * try to generate.  Stack alignment padding and the frame record are
93  * not taken into account.  This limit is not a guarantee and is
94  * NOT ABI.
95  */
96 #define SIGFRAME_MAXSZ SZ_256K
97 
98 static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
99 			    unsigned long *offset, size_t size, bool extend)
100 {
101 	size_t padded_size = round_up(size, 16);
102 
103 	if (padded_size > user->limit - user->size &&
104 	    !user->extra_offset &&
105 	    extend) {
106 		int ret;
107 
108 		user->limit += EXTRA_CONTEXT_SIZE;
109 		ret = __sigframe_alloc(user, &user->extra_offset,
110 				       sizeof(struct extra_context), false);
111 		if (ret) {
112 			user->limit -= EXTRA_CONTEXT_SIZE;
113 			return ret;
114 		}
115 
116 		/* Reserve space for the __reserved[] terminator */
117 		user->size += TERMINATOR_SIZE;
118 
119 		/*
120 		 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
121 		 * the terminator:
122 		 */
123 		user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
124 	}
125 
126 	/* Still not enough space?  Bad luck! */
127 	if (padded_size > user->limit - user->size)
128 		return -ENOMEM;
129 
130 	*offset = user->size;
131 	user->size += padded_size;
132 
133 	return 0;
134 }
135 
136 /*
137  * Allocate space for an optional record of <size> bytes in the user
138  * signal frame.  The offset from the signal frame base address to the
139  * allocated block is assigned to *offset.
140  */
141 static int sigframe_alloc(struct rt_sigframe_user_layout *user,
142 			  unsigned long *offset, size_t size)
143 {
144 	return __sigframe_alloc(user, offset, size, true);
145 }
146 
147 /* Allocate the null terminator record and prevent further allocations */
148 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
149 {
150 	int ret;
151 
152 	/* Un-reserve the space reserved for the terminator: */
153 	user->limit += TERMINATOR_SIZE;
154 
155 	ret = sigframe_alloc(user, &user->end_offset,
156 			     sizeof(struct _aarch64_ctx));
157 	if (ret)
158 		return ret;
159 
160 	/* Prevent further allocation: */
161 	user->limit = user->size;
162 	return 0;
163 }
164 
165 static void __user *apply_user_offset(
166 	struct rt_sigframe_user_layout const *user, unsigned long offset)
167 {
168 	char __user *base = (char __user *)user->sigframe;
169 
170 	return base + offset;
171 }
172 
173 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
174 {
175 	struct user_fpsimd_state const *fpsimd =
176 		&current->thread.uw.fpsimd_state;
177 	int err;
178 
179 	/* copy the FP and status/control registers */
180 	err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
181 	__put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
182 	__put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
183 
184 	/* copy the magic/size information */
185 	__put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
186 	__put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
187 
188 	return err ? -EFAULT : 0;
189 }
190 
191 static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
192 {
193 	struct user_fpsimd_state fpsimd;
194 	__u32 magic, size;
195 	int err = 0;
196 
197 	/* check the magic/size information */
198 	__get_user_error(magic, &ctx->head.magic, err);
199 	__get_user_error(size, &ctx->head.size, err);
200 	if (err)
201 		return -EFAULT;
202 	if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context))
203 		return -EINVAL;
204 
205 	/* copy the FP and status/control registers */
206 	err = __copy_from_user(fpsimd.vregs, ctx->vregs,
207 			       sizeof(fpsimd.vregs));
208 	__get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
209 	__get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
210 
211 	clear_thread_flag(TIF_SVE);
212 	current->thread.fp_type = FP_STATE_FPSIMD;
213 
214 	/* load the hardware registers from the fpsimd_state structure */
215 	if (!err)
216 		fpsimd_update_current_state(&fpsimd);
217 
218 	return err ? -EFAULT : 0;
219 }
220 
221 
222 struct user_ctxs {
223 	struct fpsimd_context __user *fpsimd;
224 	struct sve_context __user *sve;
225 	struct tpidr2_context __user *tpidr2;
226 	struct za_context __user *za;
227 	struct zt_context __user *zt;
228 };
229 
230 #ifdef CONFIG_ARM64_SVE
231 
232 static int preserve_sve_context(struct sve_context __user *ctx)
233 {
234 	int err = 0;
235 	u16 reserved[ARRAY_SIZE(ctx->__reserved)];
236 	u16 flags = 0;
237 	unsigned int vl = task_get_sve_vl(current);
238 	unsigned int vq = 0;
239 
240 	if (thread_sm_enabled(&current->thread)) {
241 		vl = task_get_sme_vl(current);
242 		vq = sve_vq_from_vl(vl);
243 		flags |= SVE_SIG_FLAG_SM;
244 	} else if (test_thread_flag(TIF_SVE)) {
245 		vq = sve_vq_from_vl(vl);
246 	}
247 
248 	memset(reserved, 0, sizeof(reserved));
249 
250 	__put_user_error(SVE_MAGIC, &ctx->head.magic, err);
251 	__put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
252 			 &ctx->head.size, err);
253 	__put_user_error(vl, &ctx->vl, err);
254 	__put_user_error(flags, &ctx->flags, err);
255 	BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
256 	err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
257 
258 	if (vq) {
259 		/*
260 		 * This assumes that the SVE state has already been saved to
261 		 * the task struct by calling the function
262 		 * fpsimd_signal_preserve_current_state().
263 		 */
264 		err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
265 				      current->thread.sve_state,
266 				      SVE_SIG_REGS_SIZE(vq));
267 	}
268 
269 	return err ? -EFAULT : 0;
270 }
271 
272 static int restore_sve_fpsimd_context(struct user_ctxs *user)
273 {
274 	int err;
275 	unsigned int vl, vq;
276 	struct user_fpsimd_state fpsimd;
277 	struct sve_context sve;
278 
279 	if (__copy_from_user(&sve, user->sve, sizeof(sve)))
280 		return -EFAULT;
281 
282 	if (sve.flags & SVE_SIG_FLAG_SM) {
283 		if (!system_supports_sme())
284 			return -EINVAL;
285 
286 		vl = task_get_sme_vl(current);
287 	} else {
288 		if (!system_supports_sve())
289 			return -EINVAL;
290 
291 		vl = task_get_sve_vl(current);
292 	}
293 
294 	if (sve.vl != vl)
295 		return -EINVAL;
296 
297 	if (sve.head.size <= sizeof(*user->sve)) {
298 		clear_thread_flag(TIF_SVE);
299 		current->thread.svcr &= ~SVCR_SM_MASK;
300 		current->thread.fp_type = FP_STATE_FPSIMD;
301 		goto fpsimd_only;
302 	}
303 
304 	vq = sve_vq_from_vl(sve.vl);
305 
306 	if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq))
307 		return -EINVAL;
308 
309 	/*
310 	 * Careful: we are about __copy_from_user() directly into
311 	 * thread.sve_state with preemption enabled, so protection is
312 	 * needed to prevent a racing context switch from writing stale
313 	 * registers back over the new data.
314 	 */
315 
316 	fpsimd_flush_task_state(current);
317 	/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
318 
319 	sve_alloc(current, true);
320 	if (!current->thread.sve_state) {
321 		clear_thread_flag(TIF_SVE);
322 		return -ENOMEM;
323 	}
324 
325 	err = __copy_from_user(current->thread.sve_state,
326 			       (char __user const *)user->sve +
327 					SVE_SIG_REGS_OFFSET,
328 			       SVE_SIG_REGS_SIZE(vq));
329 	if (err)
330 		return -EFAULT;
331 
332 	if (sve.flags & SVE_SIG_FLAG_SM)
333 		current->thread.svcr |= SVCR_SM_MASK;
334 	else
335 		set_thread_flag(TIF_SVE);
336 	current->thread.fp_type = FP_STATE_SVE;
337 
338 fpsimd_only:
339 	/* copy the FP and status/control registers */
340 	/* restore_sigframe() already checked that user->fpsimd != NULL. */
341 	err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
342 			       sizeof(fpsimd.vregs));
343 	__get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
344 	__get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
345 
346 	/* load the hardware registers from the fpsimd_state structure */
347 	if (!err)
348 		fpsimd_update_current_state(&fpsimd);
349 
350 	return err ? -EFAULT : 0;
351 }
352 
353 #else /* ! CONFIG_ARM64_SVE */
354 
355 static int restore_sve_fpsimd_context(struct user_ctxs *user)
356 {
357 	WARN_ON_ONCE(1);
358 	return -EINVAL;
359 }
360 
361 /* Turn any non-optimised out attempts to use this into a link error: */
362 extern int preserve_sve_context(void __user *ctx);
363 
364 #endif /* ! CONFIG_ARM64_SVE */
365 
366 #ifdef CONFIG_ARM64_SME
367 
368 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx)
369 {
370 	int err = 0;
371 
372 	current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
373 
374 	__put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err);
375 	__put_user_error(sizeof(*ctx), &ctx->head.size, err);
376 	__put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err);
377 
378 	return err;
379 }
380 
381 static int restore_tpidr2_context(struct user_ctxs *user)
382 {
383 	u64 tpidr2_el0;
384 	int err = 0;
385 
386 	/* Magic and size were validated deciding to call this function */
387 	__get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err);
388 	if (!err)
389 		current->thread.tpidr2_el0 = tpidr2_el0;
390 
391 	return err;
392 }
393 
394 static int preserve_za_context(struct za_context __user *ctx)
395 {
396 	int err = 0;
397 	u16 reserved[ARRAY_SIZE(ctx->__reserved)];
398 	unsigned int vl = task_get_sme_vl(current);
399 	unsigned int vq;
400 
401 	if (thread_za_enabled(&current->thread))
402 		vq = sve_vq_from_vl(vl);
403 	else
404 		vq = 0;
405 
406 	memset(reserved, 0, sizeof(reserved));
407 
408 	__put_user_error(ZA_MAGIC, &ctx->head.magic, err);
409 	__put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16),
410 			 &ctx->head.size, err);
411 	__put_user_error(vl, &ctx->vl, err);
412 	BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
413 	err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
414 
415 	if (vq) {
416 		/*
417 		 * This assumes that the ZA state has already been saved to
418 		 * the task struct by calling the function
419 		 * fpsimd_signal_preserve_current_state().
420 		 */
421 		err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
422 				      current->thread.sme_state,
423 				      ZA_SIG_REGS_SIZE(vq));
424 	}
425 
426 	return err ? -EFAULT : 0;
427 }
428 
429 static int restore_za_context(struct user_ctxs *user)
430 {
431 	int err;
432 	unsigned int vq;
433 	struct za_context za;
434 
435 	if (__copy_from_user(&za, user->za, sizeof(za)))
436 		return -EFAULT;
437 
438 	if (za.vl != task_get_sme_vl(current))
439 		return -EINVAL;
440 
441 	if (za.head.size <= sizeof(*user->za)) {
442 		current->thread.svcr &= ~SVCR_ZA_MASK;
443 		return 0;
444 	}
445 
446 	vq = sve_vq_from_vl(za.vl);
447 
448 	if (za.head.size < ZA_SIG_CONTEXT_SIZE(vq))
449 		return -EINVAL;
450 
451 	/*
452 	 * Careful: we are about __copy_from_user() directly into
453 	 * thread.sme_state with preemption enabled, so protection is
454 	 * needed to prevent a racing context switch from writing stale
455 	 * registers back over the new data.
456 	 */
457 
458 	fpsimd_flush_task_state(current);
459 	/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
460 
461 	sme_alloc(current);
462 	if (!current->thread.sme_state) {
463 		current->thread.svcr &= ~SVCR_ZA_MASK;
464 		clear_thread_flag(TIF_SME);
465 		return -ENOMEM;
466 	}
467 
468 	err = __copy_from_user(current->thread.sme_state,
469 			       (char __user const *)user->za +
470 					ZA_SIG_REGS_OFFSET,
471 			       ZA_SIG_REGS_SIZE(vq));
472 	if (err)
473 		return -EFAULT;
474 
475 	set_thread_flag(TIF_SME);
476 	current->thread.svcr |= SVCR_ZA_MASK;
477 
478 	return 0;
479 }
480 
481 static int preserve_zt_context(struct zt_context __user *ctx)
482 {
483 	int err = 0;
484 	u16 reserved[ARRAY_SIZE(ctx->__reserved)];
485 
486 	if (WARN_ON(!thread_za_enabled(&current->thread)))
487 		return -EINVAL;
488 
489 	memset(reserved, 0, sizeof(reserved));
490 
491 	__put_user_error(ZT_MAGIC, &ctx->head.magic, err);
492 	__put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16),
493 			 &ctx->head.size, err);
494 	__put_user_error(1, &ctx->nregs, err);
495 	BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
496 	err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
497 
498 	/*
499 	 * This assumes that the ZT state has already been saved to
500 	 * the task struct by calling the function
501 	 * fpsimd_signal_preserve_current_state().
502 	 */
503 	err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET,
504 			      thread_zt_state(&current->thread),
505 			      ZT_SIG_REGS_SIZE(1));
506 
507 	return err ? -EFAULT : 0;
508 }
509 
510 static int restore_zt_context(struct user_ctxs *user)
511 {
512 	int err;
513 	struct zt_context zt;
514 
515 	/* ZA must be restored first for this check to be valid */
516 	if (!thread_za_enabled(&current->thread))
517 		return -EINVAL;
518 
519 	if (__copy_from_user(&zt, user->zt, sizeof(zt)))
520 		return -EFAULT;
521 
522 	if (zt.nregs != 1)
523 		return -EINVAL;
524 
525 	if (zt.head.size != ZT_SIG_CONTEXT_SIZE(zt.nregs))
526 		return -EINVAL;
527 
528 	/*
529 	 * Careful: we are about __copy_from_user() directly into
530 	 * thread.zt_state with preemption enabled, so protection is
531 	 * needed to prevent a racing context switch from writing stale
532 	 * registers back over the new data.
533 	 */
534 
535 	fpsimd_flush_task_state(current);
536 	/* From now, fpsimd_thread_switch() won't touch ZT in thread state */
537 
538 	err = __copy_from_user(thread_zt_state(&current->thread),
539 			       (char __user const *)user->zt +
540 					ZT_SIG_REGS_OFFSET,
541 			       ZT_SIG_REGS_SIZE(1));
542 	if (err)
543 		return -EFAULT;
544 
545 	return 0;
546 }
547 
548 #else /* ! CONFIG_ARM64_SME */
549 
550 /* Turn any non-optimised out attempts to use these into a link error: */
551 extern int preserve_tpidr2_context(void __user *ctx);
552 extern int restore_tpidr2_context(struct user_ctxs *user);
553 extern int preserve_za_context(void __user *ctx);
554 extern int restore_za_context(struct user_ctxs *user);
555 extern int preserve_zt_context(void __user *ctx);
556 extern int restore_zt_context(struct user_ctxs *user);
557 
558 #endif /* ! CONFIG_ARM64_SME */
559 
560 static int parse_user_sigframe(struct user_ctxs *user,
561 			       struct rt_sigframe __user *sf)
562 {
563 	struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
564 	struct _aarch64_ctx __user *head;
565 	char __user *base = (char __user *)&sc->__reserved;
566 	size_t offset = 0;
567 	size_t limit = sizeof(sc->__reserved);
568 	bool have_extra_context = false;
569 	char const __user *const sfp = (char const __user *)sf;
570 
571 	user->fpsimd = NULL;
572 	user->sve = NULL;
573 	user->tpidr2 = NULL;
574 	user->za = NULL;
575 	user->zt = NULL;
576 
577 	if (!IS_ALIGNED((unsigned long)base, 16))
578 		goto invalid;
579 
580 	while (1) {
581 		int err = 0;
582 		u32 magic, size;
583 		char const __user *userp;
584 		struct extra_context const __user *extra;
585 		u64 extra_datap;
586 		u32 extra_size;
587 		struct _aarch64_ctx const __user *end;
588 		u32 end_magic, end_size;
589 
590 		if (limit - offset < sizeof(*head))
591 			goto invalid;
592 
593 		if (!IS_ALIGNED(offset, 16))
594 			goto invalid;
595 
596 		head = (struct _aarch64_ctx __user *)(base + offset);
597 		__get_user_error(magic, &head->magic, err);
598 		__get_user_error(size, &head->size, err);
599 		if (err)
600 			return err;
601 
602 		if (limit - offset < size)
603 			goto invalid;
604 
605 		switch (magic) {
606 		case 0:
607 			if (size)
608 				goto invalid;
609 
610 			goto done;
611 
612 		case FPSIMD_MAGIC:
613 			if (!system_supports_fpsimd())
614 				goto invalid;
615 			if (user->fpsimd)
616 				goto invalid;
617 
618 			if (size < sizeof(*user->fpsimd))
619 				goto invalid;
620 
621 			user->fpsimd = (struct fpsimd_context __user *)head;
622 			break;
623 
624 		case ESR_MAGIC:
625 			/* ignore */
626 			break;
627 
628 		case SVE_MAGIC:
629 			if (!system_supports_sve() && !system_supports_sme())
630 				goto invalid;
631 
632 			if (user->sve)
633 				goto invalid;
634 
635 			if (size < sizeof(*user->sve))
636 				goto invalid;
637 
638 			user->sve = (struct sve_context __user *)head;
639 			break;
640 
641 		case TPIDR2_MAGIC:
642 			if (!system_supports_sme())
643 				goto invalid;
644 
645 			if (user->tpidr2)
646 				goto invalid;
647 
648 			if (size != sizeof(*user->tpidr2))
649 				goto invalid;
650 
651 			user->tpidr2 = (struct tpidr2_context __user *)head;
652 			break;
653 
654 		case ZA_MAGIC:
655 			if (!system_supports_sme())
656 				goto invalid;
657 
658 			if (user->za)
659 				goto invalid;
660 
661 			if (size < sizeof(*user->za))
662 				goto invalid;
663 
664 			user->za = (struct za_context __user *)head;
665 			break;
666 
667 		case ZT_MAGIC:
668 			if (!system_supports_sme2())
669 				goto invalid;
670 
671 			if (user->zt)
672 				goto invalid;
673 
674 			if (size < sizeof(*user->zt))
675 				goto invalid;
676 
677 			user->zt = (struct zt_context __user *)head;
678 			break;
679 
680 		case EXTRA_MAGIC:
681 			if (have_extra_context)
682 				goto invalid;
683 
684 			if (size < sizeof(*extra))
685 				goto invalid;
686 
687 			userp = (char const __user *)head;
688 
689 			extra = (struct extra_context const __user *)userp;
690 			userp += size;
691 
692 			__get_user_error(extra_datap, &extra->datap, err);
693 			__get_user_error(extra_size, &extra->size, err);
694 			if (err)
695 				return err;
696 
697 			/* Check for the dummy terminator in __reserved[]: */
698 
699 			if (limit - offset - size < TERMINATOR_SIZE)
700 				goto invalid;
701 
702 			end = (struct _aarch64_ctx const __user *)userp;
703 			userp += TERMINATOR_SIZE;
704 
705 			__get_user_error(end_magic, &end->magic, err);
706 			__get_user_error(end_size, &end->size, err);
707 			if (err)
708 				return err;
709 
710 			if (end_magic || end_size)
711 				goto invalid;
712 
713 			/* Prevent looping/repeated parsing of extra_context */
714 			have_extra_context = true;
715 
716 			base = (__force void __user *)extra_datap;
717 			if (!IS_ALIGNED((unsigned long)base, 16))
718 				goto invalid;
719 
720 			if (!IS_ALIGNED(extra_size, 16))
721 				goto invalid;
722 
723 			if (base != userp)
724 				goto invalid;
725 
726 			/* Reject "unreasonably large" frames: */
727 			if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
728 				goto invalid;
729 
730 			/*
731 			 * Ignore trailing terminator in __reserved[]
732 			 * and start parsing extra data:
733 			 */
734 			offset = 0;
735 			limit = extra_size;
736 
737 			if (!access_ok(base, limit))
738 				goto invalid;
739 
740 			continue;
741 
742 		default:
743 			goto invalid;
744 		}
745 
746 		if (size < sizeof(*head))
747 			goto invalid;
748 
749 		if (limit - offset < size)
750 			goto invalid;
751 
752 		offset += size;
753 	}
754 
755 done:
756 	return 0;
757 
758 invalid:
759 	return -EINVAL;
760 }
761 
762 static int restore_sigframe(struct pt_regs *regs,
763 			    struct rt_sigframe __user *sf)
764 {
765 	sigset_t set;
766 	int i, err;
767 	struct user_ctxs user;
768 
769 	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
770 	if (err == 0)
771 		set_current_blocked(&set);
772 
773 	for (i = 0; i < 31; i++)
774 		__get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
775 				 err);
776 	__get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
777 	__get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
778 	__get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
779 
780 	/*
781 	 * Avoid sys_rt_sigreturn() restarting.
782 	 */
783 	forget_syscall(regs);
784 
785 	err |= !valid_user_regs(&regs->user_regs, current);
786 	if (err == 0)
787 		err = parse_user_sigframe(&user, sf);
788 
789 	if (err == 0 && system_supports_fpsimd()) {
790 		if (!user.fpsimd)
791 			return -EINVAL;
792 
793 		if (user.sve)
794 			err = restore_sve_fpsimd_context(&user);
795 		else
796 			err = restore_fpsimd_context(user.fpsimd);
797 	}
798 
799 	if (err == 0 && system_supports_sme() && user.tpidr2)
800 		err = restore_tpidr2_context(&user);
801 
802 	if (err == 0 && system_supports_sme() && user.za)
803 		err = restore_za_context(&user);
804 
805 	if (err == 0 && system_supports_sme2() && user.zt)
806 		err = restore_zt_context(&user);
807 
808 	return err;
809 }
810 
811 SYSCALL_DEFINE0(rt_sigreturn)
812 {
813 	struct pt_regs *regs = current_pt_regs();
814 	struct rt_sigframe __user *frame;
815 
816 	/* Always make any pending restarted system calls return -EINTR */
817 	current->restart_block.fn = do_no_restart_syscall;
818 
819 	/*
820 	 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
821 	 * be word aligned here.
822 	 */
823 	if (regs->sp & 15)
824 		goto badframe;
825 
826 	frame = (struct rt_sigframe __user *)regs->sp;
827 
828 	if (!access_ok(frame, sizeof (*frame)))
829 		goto badframe;
830 
831 	if (restore_sigframe(regs, frame))
832 		goto badframe;
833 
834 	if (restore_altstack(&frame->uc.uc_stack))
835 		goto badframe;
836 
837 	return regs->regs[0];
838 
839 badframe:
840 	arm64_notify_segfault(regs->sp);
841 	return 0;
842 }
843 
844 /*
845  * Determine the layout of optional records in the signal frame
846  *
847  * add_all: if true, lays out the biggest possible signal frame for
848  *	this task; otherwise, generates a layout for the current state
849  *	of the task.
850  */
851 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
852 				 bool add_all)
853 {
854 	int err;
855 
856 	if (system_supports_fpsimd()) {
857 		err = sigframe_alloc(user, &user->fpsimd_offset,
858 				     sizeof(struct fpsimd_context));
859 		if (err)
860 			return err;
861 	}
862 
863 	/* fault information, if valid */
864 	if (add_all || current->thread.fault_code) {
865 		err = sigframe_alloc(user, &user->esr_offset,
866 				     sizeof(struct esr_context));
867 		if (err)
868 			return err;
869 	}
870 
871 	if (system_supports_sve()) {
872 		unsigned int vq = 0;
873 
874 		if (add_all || test_thread_flag(TIF_SVE) ||
875 		    thread_sm_enabled(&current->thread)) {
876 			int vl = max(sve_max_vl(), sme_max_vl());
877 
878 			if (!add_all)
879 				vl = thread_get_cur_vl(&current->thread);
880 
881 			vq = sve_vq_from_vl(vl);
882 		}
883 
884 		err = sigframe_alloc(user, &user->sve_offset,
885 				     SVE_SIG_CONTEXT_SIZE(vq));
886 		if (err)
887 			return err;
888 	}
889 
890 	if (system_supports_sme()) {
891 		unsigned int vl;
892 		unsigned int vq = 0;
893 
894 		if (add_all)
895 			vl = sme_max_vl();
896 		else
897 			vl = task_get_sme_vl(current);
898 
899 		err = sigframe_alloc(user, &user->tpidr2_offset,
900 				     sizeof(struct tpidr2_context));
901 		if (err)
902 			return err;
903 
904 		if (thread_za_enabled(&current->thread))
905 			vq = sve_vq_from_vl(vl);
906 
907 		err = sigframe_alloc(user, &user->za_offset,
908 				     ZA_SIG_CONTEXT_SIZE(vq));
909 		if (err)
910 			return err;
911 	}
912 
913 	if (system_supports_sme2()) {
914 		if (add_all || thread_za_enabled(&current->thread)) {
915 			err = sigframe_alloc(user, &user->zt_offset,
916 					     ZT_SIG_CONTEXT_SIZE(1));
917 			if (err)
918 				return err;
919 		}
920 	}
921 
922 	return sigframe_alloc_end(user);
923 }
924 
925 static int setup_sigframe(struct rt_sigframe_user_layout *user,
926 			  struct pt_regs *regs, sigset_t *set)
927 {
928 	int i, err = 0;
929 	struct rt_sigframe __user *sf = user->sigframe;
930 
931 	/* set up the stack frame for unwinding */
932 	__put_user_error(regs->regs[29], &user->next_frame->fp, err);
933 	__put_user_error(regs->regs[30], &user->next_frame->lr, err);
934 
935 	for (i = 0; i < 31; i++)
936 		__put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
937 				 err);
938 	__put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
939 	__put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
940 	__put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
941 
942 	__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
943 
944 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
945 
946 	if (err == 0 && system_supports_fpsimd()) {
947 		struct fpsimd_context __user *fpsimd_ctx =
948 			apply_user_offset(user, user->fpsimd_offset);
949 		err |= preserve_fpsimd_context(fpsimd_ctx);
950 	}
951 
952 	/* fault information, if valid */
953 	if (err == 0 && user->esr_offset) {
954 		struct esr_context __user *esr_ctx =
955 			apply_user_offset(user, user->esr_offset);
956 
957 		__put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
958 		__put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
959 		__put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
960 	}
961 
962 	/* Scalable Vector Extension state (including streaming), if present */
963 	if ((system_supports_sve() || system_supports_sme()) &&
964 	    err == 0 && user->sve_offset) {
965 		struct sve_context __user *sve_ctx =
966 			apply_user_offset(user, user->sve_offset);
967 		err |= preserve_sve_context(sve_ctx);
968 	}
969 
970 	/* TPIDR2 if supported */
971 	if (system_supports_sme() && err == 0) {
972 		struct tpidr2_context __user *tpidr2_ctx =
973 			apply_user_offset(user, user->tpidr2_offset);
974 		err |= preserve_tpidr2_context(tpidr2_ctx);
975 	}
976 
977 	/* ZA state if present */
978 	if (system_supports_sme() && err == 0 && user->za_offset) {
979 		struct za_context __user *za_ctx =
980 			apply_user_offset(user, user->za_offset);
981 		err |= preserve_za_context(za_ctx);
982 	}
983 
984 	/* ZT state if present */
985 	if (system_supports_sme2() && err == 0 && user->zt_offset) {
986 		struct zt_context __user *zt_ctx =
987 			apply_user_offset(user, user->zt_offset);
988 		err |= preserve_zt_context(zt_ctx);
989 	}
990 
991 	if (err == 0 && user->extra_offset) {
992 		char __user *sfp = (char __user *)user->sigframe;
993 		char __user *userp =
994 			apply_user_offset(user, user->extra_offset);
995 
996 		struct extra_context __user *extra;
997 		struct _aarch64_ctx __user *end;
998 		u64 extra_datap;
999 		u32 extra_size;
1000 
1001 		extra = (struct extra_context __user *)userp;
1002 		userp += EXTRA_CONTEXT_SIZE;
1003 
1004 		end = (struct _aarch64_ctx __user *)userp;
1005 		userp += TERMINATOR_SIZE;
1006 
1007 		/*
1008 		 * extra_datap is just written to the signal frame.
1009 		 * The value gets cast back to a void __user *
1010 		 * during sigreturn.
1011 		 */
1012 		extra_datap = (__force u64)userp;
1013 		extra_size = sfp + round_up(user->size, 16) - userp;
1014 
1015 		__put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
1016 		__put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
1017 		__put_user_error(extra_datap, &extra->datap, err);
1018 		__put_user_error(extra_size, &extra->size, err);
1019 
1020 		/* Add the terminator */
1021 		__put_user_error(0, &end->magic, err);
1022 		__put_user_error(0, &end->size, err);
1023 	}
1024 
1025 	/* set the "end" magic */
1026 	if (err == 0) {
1027 		struct _aarch64_ctx __user *end =
1028 			apply_user_offset(user, user->end_offset);
1029 
1030 		__put_user_error(0, &end->magic, err);
1031 		__put_user_error(0, &end->size, err);
1032 	}
1033 
1034 	return err;
1035 }
1036 
1037 static int get_sigframe(struct rt_sigframe_user_layout *user,
1038 			 struct ksignal *ksig, struct pt_regs *regs)
1039 {
1040 	unsigned long sp, sp_top;
1041 	int err;
1042 
1043 	init_user_layout(user);
1044 	err = setup_sigframe_layout(user, false);
1045 	if (err)
1046 		return err;
1047 
1048 	sp = sp_top = sigsp(regs->sp, ksig);
1049 
1050 	sp = round_down(sp - sizeof(struct frame_record), 16);
1051 	user->next_frame = (struct frame_record __user *)sp;
1052 
1053 	sp = round_down(sp, 16) - sigframe_size(user);
1054 	user->sigframe = (struct rt_sigframe __user *)sp;
1055 
1056 	/*
1057 	 * Check that we can actually write to the signal frame.
1058 	 */
1059 	if (!access_ok(user->sigframe, sp_top - sp))
1060 		return -EFAULT;
1061 
1062 	return 0;
1063 }
1064 
1065 static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
1066 			 struct rt_sigframe_user_layout *user, int usig)
1067 {
1068 	__sigrestore_t sigtramp;
1069 
1070 	regs->regs[0] = usig;
1071 	regs->sp = (unsigned long)user->sigframe;
1072 	regs->regs[29] = (unsigned long)&user->next_frame->fp;
1073 	regs->pc = (unsigned long)ka->sa.sa_handler;
1074 
1075 	/*
1076 	 * Signal delivery is a (wacky) indirect function call in
1077 	 * userspace, so simulate the same setting of BTYPE as a BLR
1078 	 * <register containing the signal handler entry point>.
1079 	 * Signal delivery to a location in a PROT_BTI guarded page
1080 	 * that is not a function entry point will now trigger a
1081 	 * SIGILL in userspace.
1082 	 *
1083 	 * If the signal handler entry point is not in a PROT_BTI
1084 	 * guarded page, this is harmless.
1085 	 */
1086 	if (system_supports_bti()) {
1087 		regs->pstate &= ~PSR_BTYPE_MASK;
1088 		regs->pstate |= PSR_BTYPE_C;
1089 	}
1090 
1091 	/* TCO (Tag Check Override) always cleared for signal handlers */
1092 	regs->pstate &= ~PSR_TCO_BIT;
1093 
1094 	/* Signal handlers are invoked with ZA and streaming mode disabled */
1095 	if (system_supports_sme()) {
1096 		/*
1097 		 * If we were in streaming mode the saved register
1098 		 * state was SVE but we will exit SM and use the
1099 		 * FPSIMD register state - flush the saved FPSIMD
1100 		 * register state in case it gets loaded.
1101 		 */
1102 		if (current->thread.svcr & SVCR_SM_MASK) {
1103 			memset(&current->thread.uw.fpsimd_state, 0,
1104 			       sizeof(current->thread.uw.fpsimd_state));
1105 			current->thread.fp_type = FP_STATE_FPSIMD;
1106 		}
1107 
1108 		current->thread.svcr &= ~(SVCR_ZA_MASK |
1109 					  SVCR_SM_MASK);
1110 		sme_smstop();
1111 	}
1112 
1113 	if (ka->sa.sa_flags & SA_RESTORER)
1114 		sigtramp = ka->sa.sa_restorer;
1115 	else
1116 		sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
1117 
1118 	regs->regs[30] = (unsigned long)sigtramp;
1119 }
1120 
1121 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
1122 			  struct pt_regs *regs)
1123 {
1124 	struct rt_sigframe_user_layout user;
1125 	struct rt_sigframe __user *frame;
1126 	int err = 0;
1127 
1128 	fpsimd_signal_preserve_current_state();
1129 
1130 	if (get_sigframe(&user, ksig, regs))
1131 		return 1;
1132 
1133 	frame = user.sigframe;
1134 
1135 	__put_user_error(0, &frame->uc.uc_flags, err);
1136 	__put_user_error(NULL, &frame->uc.uc_link, err);
1137 
1138 	err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
1139 	err |= setup_sigframe(&user, regs, set);
1140 	if (err == 0) {
1141 		setup_return(regs, &ksig->ka, &user, usig);
1142 		if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
1143 			err |= copy_siginfo_to_user(&frame->info, &ksig->info);
1144 			regs->regs[1] = (unsigned long)&frame->info;
1145 			regs->regs[2] = (unsigned long)&frame->uc;
1146 		}
1147 	}
1148 
1149 	return err;
1150 }
1151 
1152 static void setup_restart_syscall(struct pt_regs *regs)
1153 {
1154 	if (is_compat_task())
1155 		compat_setup_restart_syscall(regs);
1156 	else
1157 		regs->regs[8] = __NR_restart_syscall;
1158 }
1159 
1160 /*
1161  * OK, we're invoking a handler
1162  */
1163 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1164 {
1165 	sigset_t *oldset = sigmask_to_save();
1166 	int usig = ksig->sig;
1167 	int ret;
1168 
1169 	rseq_signal_deliver(ksig, regs);
1170 
1171 	/*
1172 	 * Set up the stack frame
1173 	 */
1174 	if (is_compat_task()) {
1175 		if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1176 			ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
1177 		else
1178 			ret = compat_setup_frame(usig, ksig, oldset, regs);
1179 	} else {
1180 		ret = setup_rt_frame(usig, ksig, oldset, regs);
1181 	}
1182 
1183 	/*
1184 	 * Check that the resulting registers are actually sane.
1185 	 */
1186 	ret |= !valid_user_regs(&regs->user_regs, current);
1187 
1188 	/* Step into the signal handler if we are stepping */
1189 	signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
1190 }
1191 
1192 /*
1193  * Note that 'init' is a special process: it doesn't get signals it doesn't
1194  * want to handle. Thus you cannot kill init even with a SIGKILL even by
1195  * mistake.
1196  *
1197  * Note that we go through the signals twice: once to check the signals that
1198  * the kernel can handle, and then we build all the user-level signal handling
1199  * stack-frames in one go after that.
1200  */
1201 static void do_signal(struct pt_regs *regs)
1202 {
1203 	unsigned long continue_addr = 0, restart_addr = 0;
1204 	int retval = 0;
1205 	struct ksignal ksig;
1206 	bool syscall = in_syscall(regs);
1207 
1208 	/*
1209 	 * If we were from a system call, check for system call restarting...
1210 	 */
1211 	if (syscall) {
1212 		continue_addr = regs->pc;
1213 		restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
1214 		retval = regs->regs[0];
1215 
1216 		/*
1217 		 * Avoid additional syscall restarting via ret_to_user.
1218 		 */
1219 		forget_syscall(regs);
1220 
1221 		/*
1222 		 * Prepare for system call restart. We do this here so that a
1223 		 * debugger will see the already changed PC.
1224 		 */
1225 		switch (retval) {
1226 		case -ERESTARTNOHAND:
1227 		case -ERESTARTSYS:
1228 		case -ERESTARTNOINTR:
1229 		case -ERESTART_RESTARTBLOCK:
1230 			regs->regs[0] = regs->orig_x0;
1231 			regs->pc = restart_addr;
1232 			break;
1233 		}
1234 	}
1235 
1236 	/*
1237 	 * Get the signal to deliver. When running under ptrace, at this point
1238 	 * the debugger may change all of our registers.
1239 	 */
1240 	if (get_signal(&ksig)) {
1241 		/*
1242 		 * Depending on the signal settings, we may need to revert the
1243 		 * decision to restart the system call, but skip this if a
1244 		 * debugger has chosen to restart at a different PC.
1245 		 */
1246 		if (regs->pc == restart_addr &&
1247 		    (retval == -ERESTARTNOHAND ||
1248 		     retval == -ERESTART_RESTARTBLOCK ||
1249 		     (retval == -ERESTARTSYS &&
1250 		      !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
1251 			syscall_set_return_value(current, regs, -EINTR, 0);
1252 			regs->pc = continue_addr;
1253 		}
1254 
1255 		handle_signal(&ksig, regs);
1256 		return;
1257 	}
1258 
1259 	/*
1260 	 * Handle restarting a different system call. As above, if a debugger
1261 	 * has chosen to restart at a different PC, ignore the restart.
1262 	 */
1263 	if (syscall && regs->pc == restart_addr) {
1264 		if (retval == -ERESTART_RESTARTBLOCK)
1265 			setup_restart_syscall(regs);
1266 		user_rewind_single_step(current);
1267 	}
1268 
1269 	restore_saved_sigmask();
1270 }
1271 
1272 void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
1273 {
1274 	do {
1275 		if (thread_flags & _TIF_NEED_RESCHED) {
1276 			/* Unmask Debug and SError for the next task */
1277 			local_daif_restore(DAIF_PROCCTX_NOIRQ);
1278 
1279 			schedule();
1280 		} else {
1281 			local_daif_restore(DAIF_PROCCTX);
1282 
1283 			if (thread_flags & _TIF_UPROBE)
1284 				uprobe_notify_resume(regs);
1285 
1286 			if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
1287 				clear_thread_flag(TIF_MTE_ASYNC_FAULT);
1288 				send_sig_fault(SIGSEGV, SEGV_MTEAERR,
1289 					       (void __user *)NULL, current);
1290 			}
1291 
1292 			if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
1293 				do_signal(regs);
1294 
1295 			if (thread_flags & _TIF_NOTIFY_RESUME)
1296 				resume_user_mode_work(regs);
1297 
1298 			if (thread_flags & _TIF_FOREIGN_FPSTATE)
1299 				fpsimd_restore_current_state();
1300 		}
1301 
1302 		local_daif_mask();
1303 		thread_flags = read_thread_flags();
1304 	} while (thread_flags & _TIF_WORK_MASK);
1305 }
1306 
1307 unsigned long __ro_after_init signal_minsigstksz;
1308 
1309 /*
1310  * Determine the stack space required for guaranteed signal devliery.
1311  * This function is used to populate AT_MINSIGSTKSZ at process startup.
1312  * cpufeatures setup is assumed to be complete.
1313  */
1314 void __init minsigstksz_setup(void)
1315 {
1316 	struct rt_sigframe_user_layout user;
1317 
1318 	init_user_layout(&user);
1319 
1320 	/*
1321 	 * If this fails, SIGFRAME_MAXSZ needs to be enlarged.  It won't
1322 	 * be big enough, but it's our best guess:
1323 	 */
1324 	if (WARN_ON(setup_sigframe_layout(&user, true)))
1325 		return;
1326 
1327 	signal_minsigstksz = sigframe_size(&user) +
1328 		round_up(sizeof(struct frame_record), 16) +
1329 		16; /* max alignment padding */
1330 }
1331 
1332 /*
1333  * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
1334  * changes likely come with new fields that should be added below.
1335  */
1336 static_assert(NSIGILL	== 11);
1337 static_assert(NSIGFPE	== 15);
1338 static_assert(NSIGSEGV	== 9);
1339 static_assert(NSIGBUS	== 5);
1340 static_assert(NSIGTRAP	== 6);
1341 static_assert(NSIGCHLD	== 6);
1342 static_assert(NSIGSYS	== 2);
1343 static_assert(sizeof(siginfo_t) == 128);
1344 static_assert(__alignof__(siginfo_t) == 8);
1345 static_assert(offsetof(siginfo_t, si_signo)	== 0x00);
1346 static_assert(offsetof(siginfo_t, si_errno)	== 0x04);
1347 static_assert(offsetof(siginfo_t, si_code)	== 0x08);
1348 static_assert(offsetof(siginfo_t, si_pid)	== 0x10);
1349 static_assert(offsetof(siginfo_t, si_uid)	== 0x14);
1350 static_assert(offsetof(siginfo_t, si_tid)	== 0x10);
1351 static_assert(offsetof(siginfo_t, si_overrun)	== 0x14);
1352 static_assert(offsetof(siginfo_t, si_status)	== 0x18);
1353 static_assert(offsetof(siginfo_t, si_utime)	== 0x20);
1354 static_assert(offsetof(siginfo_t, si_stime)	== 0x28);
1355 static_assert(offsetof(siginfo_t, si_value)	== 0x18);
1356 static_assert(offsetof(siginfo_t, si_int)	== 0x18);
1357 static_assert(offsetof(siginfo_t, si_ptr)	== 0x18);
1358 static_assert(offsetof(siginfo_t, si_addr)	== 0x10);
1359 static_assert(offsetof(siginfo_t, si_addr_lsb)	== 0x18);
1360 static_assert(offsetof(siginfo_t, si_lower)	== 0x20);
1361 static_assert(offsetof(siginfo_t, si_upper)	== 0x28);
1362 static_assert(offsetof(siginfo_t, si_pkey)	== 0x20);
1363 static_assert(offsetof(siginfo_t, si_perf_data)	== 0x18);
1364 static_assert(offsetof(siginfo_t, si_perf_type)	== 0x20);
1365 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
1366 static_assert(offsetof(siginfo_t, si_band)	== 0x10);
1367 static_assert(offsetof(siginfo_t, si_fd)	== 0x18);
1368 static_assert(offsetof(siginfo_t, si_call_addr)	== 0x10);
1369 static_assert(offsetof(siginfo_t, si_syscall)	== 0x18);
1370 static_assert(offsetof(siginfo_t, si_arch)	== 0x1c);
1371