xref: /linux/arch/arm/kernel/signal.c (revision 320fefa9e2edc67011e235ea1d50f0d00ddfe004)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/kernel/signal.c
4  *
5  *  Copyright (C) 1995-2009 Russell King
6  */
7 #include <linux/errno.h>
8 #include <linux/random.h>
9 #include <linux/signal.h>
10 #include <linux/personality.h>
11 #include <linux/uaccess.h>
12 #include <linux/resume_user_mode.h>
13 #include <linux/uprobes.h>
14 #include <linux/syscalls.h>
15 
16 #include <asm/elf.h>
17 #include <asm/cacheflush.h>
18 #include <asm/traps.h>
19 #include <asm/unistd.h>
20 #include <asm/vfp.h>
21 
22 #include "signal.h"
23 
24 extern const unsigned long sigreturn_codes[17];
25 
26 static unsigned long signal_return_offset;
27 
28 #ifdef CONFIG_IWMMXT
29 
30 static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
31 {
32 	char kbuf[sizeof(*frame) + 8];
33 	struct iwmmxt_sigframe *kframe;
34 	int err = 0;
35 
36 	/* the iWMMXt context must be 64 bit aligned */
37 	kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
38 
39 	if (test_thread_flag(TIF_USING_IWMMXT)) {
40 		kframe->magic = IWMMXT_MAGIC;
41 		kframe->size = IWMMXT_STORAGE_SIZE;
42 		iwmmxt_task_copy(current_thread_info(), &kframe->storage);
43 	} else {
44 		/*
45 		 * For bug-compatibility with older kernels, some space
46 		 * has to be reserved for iWMMXt even if it's not used.
47 		 * Set the magic and size appropriately so that properly
48 		 * written userspace can skip it reliably:
49 		 */
50 		*kframe = (struct iwmmxt_sigframe) {
51 			.magic = DUMMY_MAGIC,
52 			.size  = IWMMXT_STORAGE_SIZE,
53 		};
54 	}
55 
56 	err = __copy_to_user(frame, kframe, sizeof(*kframe));
57 
58 	return err;
59 }
60 
61 static int restore_iwmmxt_context(char __user **auxp)
62 {
63 	struct iwmmxt_sigframe __user *frame =
64 		(struct iwmmxt_sigframe __user *)*auxp;
65 	char kbuf[sizeof(*frame) + 8];
66 	struct iwmmxt_sigframe *kframe;
67 
68 	/* the iWMMXt context must be 64 bit aligned */
69 	kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
70 	if (__copy_from_user(kframe, frame, sizeof(*frame)))
71 		return -1;
72 
73 	/*
74 	 * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy
75 	 * block is discarded for compatibility with setup_sigframe() if
76 	 * present, but we don't mandate its presence.  If some other
77 	 * magic is here, it's not for us:
78 	 */
79 	if (!test_thread_flag(TIF_USING_IWMMXT) &&
80 	    kframe->magic != DUMMY_MAGIC)
81 		return 0;
82 
83 	if (kframe->size != IWMMXT_STORAGE_SIZE)
84 		return -1;
85 
86 	if (test_thread_flag(TIF_USING_IWMMXT)) {
87 		if (kframe->magic != IWMMXT_MAGIC)
88 			return -1;
89 
90 		iwmmxt_task_restore(current_thread_info(), &kframe->storage);
91 	}
92 
93 	*auxp += IWMMXT_STORAGE_SIZE;
94 	return 0;
95 }
96 
97 #endif
98 
99 #ifdef CONFIG_VFP
100 
101 static int preserve_vfp_context(struct vfp_sigframe __user *frame)
102 {
103 	struct vfp_sigframe kframe;
104 	int err = 0;
105 
106 	memset(&kframe, 0, sizeof(kframe));
107 	kframe.magic = VFP_MAGIC;
108 	kframe.size = VFP_STORAGE_SIZE;
109 
110 	err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
111 	if (err)
112 		return err;
113 
114 	return __copy_to_user(frame, &kframe, sizeof(kframe));
115 }
116 
117 static int restore_vfp_context(char __user **auxp)
118 {
119 	struct vfp_sigframe frame;
120 	int err;
121 
122 	err = __copy_from_user(&frame, *auxp, sizeof(frame));
123 	if (err)
124 		return err;
125 
126 	if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
127 		return -EINVAL;
128 
129 	*auxp += sizeof(frame);
130 	return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
131 }
132 
133 #endif
134 
135 /*
136  * Do a signal return; undo the signal stack.  These are aligned to 64-bit.
137  */
138 
139 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
140 {
141 	struct sigcontext context;
142 	char __user *aux;
143 	sigset_t set;
144 	int err;
145 
146 	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
147 	if (err == 0)
148 		set_current_blocked(&set);
149 
150 	err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
151 	if (err == 0) {
152 		regs->ARM_r0 = context.arm_r0;
153 		regs->ARM_r1 = context.arm_r1;
154 		regs->ARM_r2 = context.arm_r2;
155 		regs->ARM_r3 = context.arm_r3;
156 		regs->ARM_r4 = context.arm_r4;
157 		regs->ARM_r5 = context.arm_r5;
158 		regs->ARM_r6 = context.arm_r6;
159 		regs->ARM_r7 = context.arm_r7;
160 		regs->ARM_r8 = context.arm_r8;
161 		regs->ARM_r9 = context.arm_r9;
162 		regs->ARM_r10 = context.arm_r10;
163 		regs->ARM_fp = context.arm_fp;
164 		regs->ARM_ip = context.arm_ip;
165 		regs->ARM_sp = context.arm_sp;
166 		regs->ARM_lr = context.arm_lr;
167 		regs->ARM_pc = context.arm_pc;
168 		regs->ARM_cpsr = context.arm_cpsr;
169 	}
170 
171 	err |= !valid_user_regs(regs);
172 
173 	aux = (char __user *) sf->uc.uc_regspace;
174 #ifdef CONFIG_IWMMXT
175 	if (err == 0)
176 		err |= restore_iwmmxt_context(&aux);
177 #endif
178 #ifdef CONFIG_VFP
179 	if (err == 0)
180 		err |= restore_vfp_context(&aux);
181 #endif
182 
183 	return err;
184 }
185 
186 asmlinkage int sys_sigreturn(struct pt_regs *regs)
187 {
188 	struct sigframe __user *frame;
189 
190 	/* Always make any pending restarted system calls return -EINTR */
191 	current->restart_block.fn = do_no_restart_syscall;
192 
193 	/*
194 	 * Since we stacked the signal on a 64-bit boundary,
195 	 * then 'sp' should be word aligned here.  If it's
196 	 * not, then the user is trying to mess with us.
197 	 */
198 	if (regs->ARM_sp & 7)
199 		goto badframe;
200 
201 	frame = (struct sigframe __user *)regs->ARM_sp;
202 
203 	if (!access_ok(frame, sizeof (*frame)))
204 		goto badframe;
205 
206 	if (restore_sigframe(regs, frame))
207 		goto badframe;
208 
209 	return regs->ARM_r0;
210 
211 badframe:
212 	force_sig(SIGSEGV);
213 	return 0;
214 }
215 
216 asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
217 {
218 	struct rt_sigframe __user *frame;
219 
220 	/* Always make any pending restarted system calls return -EINTR */
221 	current->restart_block.fn = do_no_restart_syscall;
222 
223 	/*
224 	 * Since we stacked the signal on a 64-bit boundary,
225 	 * then 'sp' should be word aligned here.  If it's
226 	 * not, then the user is trying to mess with us.
227 	 */
228 	if (regs->ARM_sp & 7)
229 		goto badframe;
230 
231 	frame = (struct rt_sigframe __user *)regs->ARM_sp;
232 
233 	if (!access_ok(frame, sizeof (*frame)))
234 		goto badframe;
235 
236 	if (restore_sigframe(regs, &frame->sig))
237 		goto badframe;
238 
239 	if (restore_altstack(&frame->sig.uc.uc_stack))
240 		goto badframe;
241 
242 	return regs->ARM_r0;
243 
244 badframe:
245 	force_sig(SIGSEGV);
246 	return 0;
247 }
248 
249 static int
250 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
251 {
252 	struct aux_sigframe __user *aux;
253 	struct sigcontext context;
254 	int err = 0;
255 
256 	context = (struct sigcontext) {
257 		.arm_r0        = regs->ARM_r0,
258 		.arm_r1        = regs->ARM_r1,
259 		.arm_r2        = regs->ARM_r2,
260 		.arm_r3        = regs->ARM_r3,
261 		.arm_r4        = regs->ARM_r4,
262 		.arm_r5        = regs->ARM_r5,
263 		.arm_r6        = regs->ARM_r6,
264 		.arm_r7        = regs->ARM_r7,
265 		.arm_r8        = regs->ARM_r8,
266 		.arm_r9        = regs->ARM_r9,
267 		.arm_r10       = regs->ARM_r10,
268 		.arm_fp        = regs->ARM_fp,
269 		.arm_ip        = regs->ARM_ip,
270 		.arm_sp        = regs->ARM_sp,
271 		.arm_lr        = regs->ARM_lr,
272 		.arm_pc        = regs->ARM_pc,
273 		.arm_cpsr      = regs->ARM_cpsr,
274 
275 		.trap_no       = current->thread.trap_no,
276 		.error_code    = current->thread.error_code,
277 		.fault_address = current->thread.address,
278 		.oldmask       = set->sig[0],
279 	};
280 
281 	err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
282 
283 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
284 
285 	aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
286 #ifdef CONFIG_IWMMXT
287 	if (err == 0)
288 		err |= preserve_iwmmxt_context(&aux->iwmmxt);
289 #endif
290 #ifdef CONFIG_VFP
291 	if (err == 0)
292 		err |= preserve_vfp_context(&aux->vfp);
293 #endif
294 	err |= __put_user(0, &aux->end_magic);
295 
296 	return err;
297 }
298 
299 static inline void __user *
300 get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize)
301 {
302 	unsigned long sp = sigsp(regs->ARM_sp, ksig);
303 	void __user *frame;
304 
305 	/*
306 	 * ATPCS B01 mandates 8-byte alignment
307 	 */
308 	frame = (void __user *)((sp - framesize) & ~7);
309 
310 	/*
311 	 * Check that we can actually write to the signal frame.
312 	 */
313 	if (!access_ok(frame, framesize))
314 		frame = NULL;
315 
316 	return frame;
317 }
318 
319 static int
320 setup_return(struct pt_regs *regs, struct ksignal *ksig,
321 	     unsigned long __user *rc, void __user *frame)
322 {
323 	unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
324 	unsigned long handler_fdpic_GOT = 0;
325 	unsigned long retcode;
326 	unsigned int idx, thumb = 0;
327 	unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
328 	bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) &&
329 		     (current->personality & FDPIC_FUNCPTRS);
330 
331 	if (fdpic) {
332 		unsigned long __user *fdpic_func_desc =
333 					(unsigned long __user *)handler;
334 		if (__get_user(handler, &fdpic_func_desc[0]) ||
335 		    __get_user(handler_fdpic_GOT, &fdpic_func_desc[1]))
336 			return 1;
337 	}
338 
339 	cpsr |= PSR_ENDSTATE;
340 
341 	/*
342 	 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
343 	 */
344 	if (ksig->ka.sa.sa_flags & SA_THIRTYTWO)
345 		cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
346 
347 #ifdef CONFIG_ARM_THUMB
348 	if (elf_hwcap & HWCAP_THUMB) {
349 		/*
350 		 * The LSB of the handler determines if we're going to
351 		 * be using THUMB or ARM mode for this signal handler.
352 		 */
353 		thumb = handler & 1;
354 
355 		/*
356 		 * Clear the If-Then Thumb-2 execution state.  ARM spec
357 		 * requires this to be all 000s in ARM mode.  Snapdragon
358 		 * S4/Krait misbehaves on a Thumb=>ARM signal transition
359 		 * without this.
360 		 *
361 		 * We must do this whenever we are running on a Thumb-2
362 		 * capable CPU, which includes ARMv6T2.  However, we elect
363 		 * to always do this to simplify the code; this field is
364 		 * marked UNK/SBZP for older architectures.
365 		 */
366 		cpsr &= ~PSR_IT_MASK;
367 
368 		if (thumb) {
369 			cpsr |= PSR_T_BIT;
370 		} else
371 			cpsr &= ~PSR_T_BIT;
372 	}
373 #endif
374 
375 	if (ksig->ka.sa.sa_flags & SA_RESTORER) {
376 		retcode = (unsigned long)ksig->ka.sa.sa_restorer;
377 		if (fdpic) {
378 			/*
379 			 * We need code to load the function descriptor.
380 			 * That code follows the standard sigreturn code
381 			 * (6 words), and is made of 3 + 2 words for each
382 			 * variant. The 4th copied word is the actual FD
383 			 * address that the assembly code expects.
384 			 */
385 			idx = 6 + thumb * 3;
386 			if (ksig->ka.sa.sa_flags & SA_SIGINFO)
387 				idx += 5;
388 			if (__put_user(sigreturn_codes[idx],   rc  ) ||
389 			    __put_user(sigreturn_codes[idx+1], rc+1) ||
390 			    __put_user(sigreturn_codes[idx+2], rc+2) ||
391 			    __put_user(retcode,                rc+3))
392 				return 1;
393 			goto rc_finish;
394 		}
395 	} else {
396 		idx = thumb << 1;
397 		if (ksig->ka.sa.sa_flags & SA_SIGINFO)
398 			idx += 3;
399 
400 		/*
401 		 * Put the sigreturn code on the stack no matter which return
402 		 * mechanism we use in order to remain ABI compliant
403 		 */
404 		if (__put_user(sigreturn_codes[idx],   rc) ||
405 		    __put_user(sigreturn_codes[idx+1], rc+1))
406 			return 1;
407 
408 rc_finish:
409 #ifdef CONFIG_MMU
410 		if (cpsr & MODE32_BIT) {
411 			struct mm_struct *mm = current->mm;
412 
413 			/*
414 			 * 32-bit code can use the signal return page
415 			 * except when the MPU has protected the vectors
416 			 * page from PL0
417 			 */
418 			retcode = mm->context.sigpage + signal_return_offset +
419 				  (idx << 2) + thumb;
420 		} else
421 #endif
422 		{
423 			/*
424 			 * Ensure that the instruction cache sees
425 			 * the return code written onto the stack.
426 			 */
427 			flush_icache_range((unsigned long)rc,
428 					   (unsigned long)(rc + 3));
429 
430 			retcode = ((unsigned long)rc) + thumb;
431 		}
432 	}
433 
434 	regs->ARM_r0 = ksig->sig;
435 	regs->ARM_sp = (unsigned long)frame;
436 	regs->ARM_lr = retcode;
437 	regs->ARM_pc = handler;
438 	if (fdpic)
439 		regs->ARM_r9 = handler_fdpic_GOT;
440 	regs->ARM_cpsr = cpsr;
441 
442 	return 0;
443 }
444 
445 static int
446 setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
447 {
448 	struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
449 	int err = 0;
450 
451 	if (!frame)
452 		return 1;
453 
454 	/*
455 	 * Set uc.uc_flags to a value which sc.trap_no would never have.
456 	 */
457 	err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
458 
459 	err |= setup_sigframe(frame, regs, set);
460 	if (err == 0)
461 		err = setup_return(regs, ksig, frame->retcode, frame);
462 
463 	return err;
464 }
465 
466 static int
467 setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
468 {
469 	struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
470 	int err = 0;
471 
472 	if (!frame)
473 		return 1;
474 
475 	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
476 
477 	err |= __put_user(0, &frame->sig.uc.uc_flags);
478 	err |= __put_user(NULL, &frame->sig.uc.uc_link);
479 
480 	err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
481 	err |= setup_sigframe(&frame->sig, regs, set);
482 	if (err == 0)
483 		err = setup_return(regs, ksig, frame->sig.retcode, frame);
484 
485 	if (err == 0) {
486 		/*
487 		 * For realtime signals we must also set the second and third
488 		 * arguments for the signal handler.
489 		 *   -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
490 		 */
491 		regs->ARM_r1 = (unsigned long)&frame->info;
492 		regs->ARM_r2 = (unsigned long)&frame->sig.uc;
493 	}
494 
495 	return err;
496 }
497 
498 /*
499  * OK, we're invoking a handler
500  */
501 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
502 {
503 	sigset_t *oldset = sigmask_to_save();
504 	int ret;
505 
506 	/*
507 	 * Perform fixup for the pre-signal frame.
508 	 */
509 	rseq_signal_deliver(ksig, regs);
510 
511 	/*
512 	 * Set up the stack frame
513 	 */
514 	if (ksig->ka.sa.sa_flags & SA_SIGINFO)
515 		ret = setup_rt_frame(ksig, oldset, regs);
516 	else
517 		ret = setup_frame(ksig, oldset, regs);
518 
519 	/*
520 	 * Check that the resulting registers are actually sane.
521 	 */
522 	ret |= !valid_user_regs(regs);
523 
524 	signal_setup_done(ret, ksig, 0);
525 }
526 
527 /*
528  * Note that 'init' is a special process: it doesn't get signals it doesn't
529  * want to handle. Thus you cannot kill init even with a SIGKILL even by
530  * mistake.
531  *
532  * Note that we go through the signals twice: once to check the signals that
533  * the kernel can handle, and then we build all the user-level signal handling
534  * stack-frames in one go after that.
535  */
536 static int do_signal(struct pt_regs *regs, int syscall)
537 {
538 	unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
539 	struct ksignal ksig;
540 	int restart = 0;
541 
542 	/*
543 	 * If we were from a system call, check for system call restarting...
544 	 */
545 	if (syscall) {
546 		continue_addr = regs->ARM_pc;
547 		restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
548 		retval = regs->ARM_r0;
549 
550 		/*
551 		 * Prepare for system call restart.  We do this here so that a
552 		 * debugger will see the already changed PSW.
553 		 */
554 		switch (retval) {
555 		case -ERESTART_RESTARTBLOCK:
556 			restart -= 2;
557 			fallthrough;
558 		case -ERESTARTNOHAND:
559 		case -ERESTARTSYS:
560 		case -ERESTARTNOINTR:
561 			restart++;
562 			regs->ARM_r0 = regs->ARM_ORIG_r0;
563 			regs->ARM_pc = restart_addr;
564 			break;
565 		}
566 	}
567 
568 	/*
569 	 * Get the signal to deliver.  When running under ptrace, at this
570 	 * point the debugger may change all our registers ...
571 	 */
572 	/*
573 	 * Depending on the signal settings we may need to revert the
574 	 * decision to restart the system call.  But skip this if a
575 	 * debugger has chosen to restart at a different PC.
576 	 */
577 	if (get_signal(&ksig)) {
578 		/* handler */
579 		if (unlikely(restart) && regs->ARM_pc == restart_addr) {
580 			if (retval == -ERESTARTNOHAND ||
581 			    retval == -ERESTART_RESTARTBLOCK
582 			    || (retval == -ERESTARTSYS
583 				&& !(ksig.ka.sa.sa_flags & SA_RESTART))) {
584 				regs->ARM_r0 = -EINTR;
585 				regs->ARM_pc = continue_addr;
586 			}
587 		}
588 		handle_signal(&ksig, regs);
589 	} else {
590 		/* no handler */
591 		restore_saved_sigmask();
592 		if (unlikely(restart) && regs->ARM_pc == restart_addr) {
593 			regs->ARM_pc = continue_addr;
594 			return restart;
595 		}
596 	}
597 	return 0;
598 }
599 
600 asmlinkage int
601 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
602 {
603 	/*
604 	 * The assembly code enters us with IRQs off, but it hasn't
605 	 * informed the tracing code of that for efficiency reasons.
606 	 * Update the trace code with the current status.
607 	 */
608 	trace_hardirqs_off();
609 	do {
610 		if (likely(thread_flags & _TIF_NEED_RESCHED)) {
611 			schedule();
612 		} else {
613 			if (unlikely(!user_mode(regs)))
614 				return 0;
615 			local_irq_enable();
616 			if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
617 				int restart = do_signal(regs, syscall);
618 				if (unlikely(restart)) {
619 					/*
620 					 * Restart without handlers.
621 					 * Deal with it without leaving
622 					 * the kernel space.
623 					 */
624 					return restart;
625 				}
626 				syscall = 0;
627 			} else if (thread_flags & _TIF_UPROBE) {
628 				uprobe_notify_resume(regs);
629 			} else {
630 				resume_user_mode_work(regs);
631 			}
632 		}
633 		local_irq_disable();
634 		thread_flags = read_thread_flags();
635 	} while (thread_flags & _TIF_WORK_MASK);
636 	return 0;
637 }
638 
639 struct page *get_signal_page(void)
640 {
641 	unsigned long ptr;
642 	unsigned offset;
643 	struct page *page;
644 	void *addr;
645 
646 	page = alloc_pages(GFP_KERNEL, 0);
647 
648 	if (!page)
649 		return NULL;
650 
651 	addr = page_address(page);
652 
653 	/* Poison the entire page */
654 	memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
655 		 PAGE_SIZE / sizeof(u32));
656 
657 	/* Give the signal return code some randomness */
658 	offset = 0x200 + (get_random_u16() & 0x7fc);
659 	signal_return_offset = offset;
660 
661 	/* Copy signal return handlers into the page */
662 	memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
663 
664 	/* Flush out all instructions in this page */
665 	ptr = (unsigned long)addr;
666 	flush_icache_range(ptr, ptr + PAGE_SIZE);
667 
668 	return page;
669 }
670 
671 #ifdef CONFIG_DEBUG_RSEQ
672 asmlinkage void do_rseq_syscall(struct pt_regs *regs)
673 {
674 	rseq_syscall(regs);
675 }
676 #endif
677 
678 /*
679  * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
680  * changes likely come with new fields that should be added below.
681  */
682 static_assert(NSIGILL	== 11);
683 static_assert(NSIGFPE	== 15);
684 static_assert(NSIGSEGV	== 9);
685 static_assert(NSIGBUS	== 5);
686 static_assert(NSIGTRAP	== 6);
687 static_assert(NSIGCHLD	== 6);
688 static_assert(NSIGSYS	== 2);
689 static_assert(sizeof(siginfo_t) == 128);
690 static_assert(__alignof__(siginfo_t) == 4);
691 static_assert(offsetof(siginfo_t, si_signo)	== 0x00);
692 static_assert(offsetof(siginfo_t, si_errno)	== 0x04);
693 static_assert(offsetof(siginfo_t, si_code)	== 0x08);
694 static_assert(offsetof(siginfo_t, si_pid)	== 0x0c);
695 static_assert(offsetof(siginfo_t, si_uid)	== 0x10);
696 static_assert(offsetof(siginfo_t, si_tid)	== 0x0c);
697 static_assert(offsetof(siginfo_t, si_overrun)	== 0x10);
698 static_assert(offsetof(siginfo_t, si_status)	== 0x14);
699 static_assert(offsetof(siginfo_t, si_utime)	== 0x18);
700 static_assert(offsetof(siginfo_t, si_stime)	== 0x1c);
701 static_assert(offsetof(siginfo_t, si_value)	== 0x14);
702 static_assert(offsetof(siginfo_t, si_int)	== 0x14);
703 static_assert(offsetof(siginfo_t, si_ptr)	== 0x14);
704 static_assert(offsetof(siginfo_t, si_addr)	== 0x0c);
705 static_assert(offsetof(siginfo_t, si_addr_lsb)	== 0x10);
706 static_assert(offsetof(siginfo_t, si_lower)	== 0x14);
707 static_assert(offsetof(siginfo_t, si_upper)	== 0x18);
708 static_assert(offsetof(siginfo_t, si_pkey)	== 0x14);
709 static_assert(offsetof(siginfo_t, si_perf_data)	== 0x10);
710 static_assert(offsetof(siginfo_t, si_perf_type)	== 0x14);
711 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x18);
712 static_assert(offsetof(siginfo_t, si_band)	== 0x0c);
713 static_assert(offsetof(siginfo_t, si_fd)	== 0x10);
714 static_assert(offsetof(siginfo_t, si_call_addr)	== 0x0c);
715 static_assert(offsetof(siginfo_t, si_syscall)	== 0x10);
716 static_assert(offsetof(siginfo_t, si_arch)	== 0x14);
717