1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/signal.c 4 * 5 * Copyright (C) 1995-2009 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/cache.h> 10 #include <linux/compat.h> 11 #include <linux/errno.h> 12 #include <linux/kernel.h> 13 #include <linux/signal.h> 14 #include <linux/freezer.h> 15 #include <linux/stddef.h> 16 #include <linux/uaccess.h> 17 #include <linux/sizes.h> 18 #include <linux/string.h> 19 #include <linux/ratelimit.h> 20 #include <linux/rseq.h> 21 #include <linux/syscalls.h> 22 #include <linux/pkeys.h> 23 24 #include <asm/daifflags.h> 25 #include <asm/debug-monitors.h> 26 #include <asm/elf.h> 27 #include <asm/exception.h> 28 #include <asm/cacheflush.h> 29 #include <asm/ucontext.h> 30 #include <asm/unistd.h> 31 #include <asm/fpsimd.h> 32 #include <asm/ptrace.h> 33 #include <asm/syscall.h> 34 #include <asm/signal32.h> 35 #include <asm/traps.h> 36 #include <asm/vdso.h> 37 38 /* 39 * Do a signal return; undo the signal stack. These are aligned to 128-bit. 40 */ 41 struct rt_sigframe { 42 struct siginfo info; 43 struct ucontext uc; 44 }; 45 46 struct frame_record { 47 u64 fp; 48 u64 lr; 49 }; 50 51 struct rt_sigframe_user_layout { 52 struct rt_sigframe __user *sigframe; 53 struct frame_record __user *next_frame; 54 55 unsigned long size; /* size of allocated sigframe data */ 56 unsigned long limit; /* largest allowed size */ 57 58 unsigned long fpsimd_offset; 59 unsigned long esr_offset; 60 unsigned long sve_offset; 61 unsigned long tpidr2_offset; 62 unsigned long za_offset; 63 unsigned long zt_offset; 64 unsigned long fpmr_offset; 65 unsigned long poe_offset; 66 unsigned long extra_offset; 67 unsigned long end_offset; 68 }; 69 70 /* 71 * Holds any EL0-controlled state that influences unprivileged memory accesses. 72 * This includes both accesses done in userspace and uaccess done in the kernel. 73 * 74 * This state needs to be carefully managed to ensure that it doesn't cause 75 * uaccess to fail when setting up the signal frame, and the signal handler 76 * itself also expects a well-defined state when entered. 77 */ 78 struct user_access_state { 79 u64 por_el0; 80 }; 81 82 #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16) 83 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) 84 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) 85 86 /* 87 * Save the user access state into ua_state and reset it to disable any 88 * restrictions. 89 */ 90 static void save_reset_user_access_state(struct user_access_state *ua_state) 91 { 92 if (system_supports_poe()) { 93 u64 por_enable_all = 0; 94 95 for (int pkey = 0; pkey < arch_max_pkey(); pkey++) 96 por_enable_all |= POE_RXW << (pkey * POR_BITS_PER_PKEY); 97 98 ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0); 99 write_sysreg_s(por_enable_all, SYS_POR_EL0); 100 /* Ensure that any subsequent uaccess observes the updated value */ 101 isb(); 102 } 103 } 104 105 /* 106 * Set the user access state for invoking the signal handler. 107 * 108 * No uaccess should be done after that function is called. 109 */ 110 static void set_handler_user_access_state(void) 111 { 112 if (system_supports_poe()) 113 write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0); 114 } 115 116 /* 117 * Restore the user access state to the values saved in ua_state. 118 * 119 * No uaccess should be done after that function is called. 120 */ 121 static void restore_user_access_state(const struct user_access_state *ua_state) 122 { 123 if (system_supports_poe()) 124 write_sysreg_s(ua_state->por_el0, SYS_POR_EL0); 125 } 126 127 static void init_user_layout(struct rt_sigframe_user_layout *user) 128 { 129 const size_t reserved_size = 130 sizeof(user->sigframe->uc.uc_mcontext.__reserved); 131 132 memset(user, 0, sizeof(*user)); 133 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved); 134 135 user->limit = user->size + reserved_size; 136 137 user->limit -= TERMINATOR_SIZE; 138 user->limit -= EXTRA_CONTEXT_SIZE; 139 /* Reserve space for extension and terminator ^ */ 140 } 141 142 static size_t sigframe_size(struct rt_sigframe_user_layout const *user) 143 { 144 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); 145 } 146 147 /* 148 * Sanity limit on the approximate maximum size of signal frame we'll 149 * try to generate. Stack alignment padding and the frame record are 150 * not taken into account. This limit is not a guarantee and is 151 * NOT ABI. 152 */ 153 #define SIGFRAME_MAXSZ SZ_256K 154 155 static int __sigframe_alloc(struct rt_sigframe_user_layout *user, 156 unsigned long *offset, size_t size, bool extend) 157 { 158 size_t padded_size = round_up(size, 16); 159 160 if (padded_size > user->limit - user->size && 161 !user->extra_offset && 162 extend) { 163 int ret; 164 165 user->limit += EXTRA_CONTEXT_SIZE; 166 ret = __sigframe_alloc(user, &user->extra_offset, 167 sizeof(struct extra_context), false); 168 if (ret) { 169 user->limit -= EXTRA_CONTEXT_SIZE; 170 return ret; 171 } 172 173 /* Reserve space for the __reserved[] terminator */ 174 user->size += TERMINATOR_SIZE; 175 176 /* 177 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for 178 * the terminator: 179 */ 180 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE; 181 } 182 183 /* Still not enough space? Bad luck! */ 184 if (padded_size > user->limit - user->size) 185 return -ENOMEM; 186 187 *offset = user->size; 188 user->size += padded_size; 189 190 return 0; 191 } 192 193 /* 194 * Allocate space for an optional record of <size> bytes in the user 195 * signal frame. The offset from the signal frame base address to the 196 * allocated block is assigned to *offset. 197 */ 198 static int sigframe_alloc(struct rt_sigframe_user_layout *user, 199 unsigned long *offset, size_t size) 200 { 201 return __sigframe_alloc(user, offset, size, true); 202 } 203 204 /* Allocate the null terminator record and prevent further allocations */ 205 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) 206 { 207 int ret; 208 209 /* Un-reserve the space reserved for the terminator: */ 210 user->limit += TERMINATOR_SIZE; 211 212 ret = sigframe_alloc(user, &user->end_offset, 213 sizeof(struct _aarch64_ctx)); 214 if (ret) 215 return ret; 216 217 /* Prevent further allocation: */ 218 user->limit = user->size; 219 return 0; 220 } 221 222 static void __user *apply_user_offset( 223 struct rt_sigframe_user_layout const *user, unsigned long offset) 224 { 225 char __user *base = (char __user *)user->sigframe; 226 227 return base + offset; 228 } 229 230 struct user_ctxs { 231 struct fpsimd_context __user *fpsimd; 232 u32 fpsimd_size; 233 struct sve_context __user *sve; 234 u32 sve_size; 235 struct tpidr2_context __user *tpidr2; 236 u32 tpidr2_size; 237 struct za_context __user *za; 238 u32 za_size; 239 struct zt_context __user *zt; 240 u32 zt_size; 241 struct fpmr_context __user *fpmr; 242 u32 fpmr_size; 243 struct poe_context __user *poe; 244 u32 poe_size; 245 }; 246 247 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) 248 { 249 struct user_fpsimd_state const *fpsimd = 250 ¤t->thread.uw.fpsimd_state; 251 int err; 252 253 /* copy the FP and status/control registers */ 254 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); 255 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err); 256 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err); 257 258 /* copy the magic/size information */ 259 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); 260 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); 261 262 return err ? -EFAULT : 0; 263 } 264 265 static int restore_fpsimd_context(struct user_ctxs *user) 266 { 267 struct user_fpsimd_state fpsimd; 268 int err = 0; 269 270 /* check the size information */ 271 if (user->fpsimd_size != sizeof(struct fpsimd_context)) 272 return -EINVAL; 273 274 /* copy the FP and status/control registers */ 275 err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs), 276 sizeof(fpsimd.vregs)); 277 __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err); 278 __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err); 279 280 clear_thread_flag(TIF_SVE); 281 current->thread.fp_type = FP_STATE_FPSIMD; 282 283 /* load the hardware registers from the fpsimd_state structure */ 284 if (!err) 285 fpsimd_update_current_state(&fpsimd); 286 287 return err ? -EFAULT : 0; 288 } 289 290 static int preserve_fpmr_context(struct fpmr_context __user *ctx) 291 { 292 int err = 0; 293 294 current->thread.uw.fpmr = read_sysreg_s(SYS_FPMR); 295 296 __put_user_error(FPMR_MAGIC, &ctx->head.magic, err); 297 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 298 __put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err); 299 300 return err; 301 } 302 303 static int restore_fpmr_context(struct user_ctxs *user) 304 { 305 u64 fpmr; 306 int err = 0; 307 308 if (user->fpmr_size != sizeof(*user->fpmr)) 309 return -EINVAL; 310 311 __get_user_error(fpmr, &user->fpmr->fpmr, err); 312 if (!err) 313 write_sysreg_s(fpmr, SYS_FPMR); 314 315 return err; 316 } 317 318 static int preserve_poe_context(struct poe_context __user *ctx, 319 const struct user_access_state *ua_state) 320 { 321 int err = 0; 322 323 __put_user_error(POE_MAGIC, &ctx->head.magic, err); 324 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 325 __put_user_error(ua_state->por_el0, &ctx->por_el0, err); 326 327 return err; 328 } 329 330 static int restore_poe_context(struct user_ctxs *user, 331 struct user_access_state *ua_state) 332 { 333 u64 por_el0; 334 int err = 0; 335 336 if (user->poe_size != sizeof(*user->poe)) 337 return -EINVAL; 338 339 __get_user_error(por_el0, &(user->poe->por_el0), err); 340 if (!err) 341 ua_state->por_el0 = por_el0; 342 343 return err; 344 } 345 346 #ifdef CONFIG_ARM64_SVE 347 348 static int preserve_sve_context(struct sve_context __user *ctx) 349 { 350 int err = 0; 351 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 352 u16 flags = 0; 353 unsigned int vl = task_get_sve_vl(current); 354 unsigned int vq = 0; 355 356 if (thread_sm_enabled(¤t->thread)) { 357 vl = task_get_sme_vl(current); 358 vq = sve_vq_from_vl(vl); 359 flags |= SVE_SIG_FLAG_SM; 360 } else if (current->thread.fp_type == FP_STATE_SVE) { 361 vq = sve_vq_from_vl(vl); 362 } 363 364 memset(reserved, 0, sizeof(reserved)); 365 366 __put_user_error(SVE_MAGIC, &ctx->head.magic, err); 367 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), 368 &ctx->head.size, err); 369 __put_user_error(vl, &ctx->vl, err); 370 __put_user_error(flags, &ctx->flags, err); 371 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 372 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 373 374 if (vq) { 375 /* 376 * This assumes that the SVE state has already been saved to 377 * the task struct by calling the function 378 * fpsimd_signal_preserve_current_state(). 379 */ 380 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, 381 current->thread.sve_state, 382 SVE_SIG_REGS_SIZE(vq)); 383 } 384 385 return err ? -EFAULT : 0; 386 } 387 388 static int restore_sve_fpsimd_context(struct user_ctxs *user) 389 { 390 int err = 0; 391 unsigned int vl, vq; 392 struct user_fpsimd_state fpsimd; 393 u16 user_vl, flags; 394 395 if (user->sve_size < sizeof(*user->sve)) 396 return -EINVAL; 397 398 __get_user_error(user_vl, &(user->sve->vl), err); 399 __get_user_error(flags, &(user->sve->flags), err); 400 if (err) 401 return err; 402 403 if (flags & SVE_SIG_FLAG_SM) { 404 if (!system_supports_sme()) 405 return -EINVAL; 406 407 vl = task_get_sme_vl(current); 408 } else { 409 /* 410 * A SME only system use SVE for streaming mode so can 411 * have a SVE formatted context with a zero VL and no 412 * payload data. 413 */ 414 if (!system_supports_sve() && !system_supports_sme()) 415 return -EINVAL; 416 417 vl = task_get_sve_vl(current); 418 } 419 420 if (user_vl != vl) 421 return -EINVAL; 422 423 if (user->sve_size == sizeof(*user->sve)) { 424 clear_thread_flag(TIF_SVE); 425 current->thread.svcr &= ~SVCR_SM_MASK; 426 current->thread.fp_type = FP_STATE_FPSIMD; 427 goto fpsimd_only; 428 } 429 430 vq = sve_vq_from_vl(vl); 431 432 if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq)) 433 return -EINVAL; 434 435 /* 436 * Careful: we are about __copy_from_user() directly into 437 * thread.sve_state with preemption enabled, so protection is 438 * needed to prevent a racing context switch from writing stale 439 * registers back over the new data. 440 */ 441 442 fpsimd_flush_task_state(current); 443 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 444 445 sve_alloc(current, true); 446 if (!current->thread.sve_state) { 447 clear_thread_flag(TIF_SVE); 448 return -ENOMEM; 449 } 450 451 err = __copy_from_user(current->thread.sve_state, 452 (char __user const *)user->sve + 453 SVE_SIG_REGS_OFFSET, 454 SVE_SIG_REGS_SIZE(vq)); 455 if (err) 456 return -EFAULT; 457 458 if (flags & SVE_SIG_FLAG_SM) 459 current->thread.svcr |= SVCR_SM_MASK; 460 else 461 set_thread_flag(TIF_SVE); 462 current->thread.fp_type = FP_STATE_SVE; 463 464 fpsimd_only: 465 /* copy the FP and status/control registers */ 466 /* restore_sigframe() already checked that user->fpsimd != NULL. */ 467 err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs, 468 sizeof(fpsimd.vregs)); 469 __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err); 470 __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err); 471 472 /* load the hardware registers from the fpsimd_state structure */ 473 if (!err) 474 fpsimd_update_current_state(&fpsimd); 475 476 return err ? -EFAULT : 0; 477 } 478 479 #else /* ! CONFIG_ARM64_SVE */ 480 481 static int restore_sve_fpsimd_context(struct user_ctxs *user) 482 { 483 WARN_ON_ONCE(1); 484 return -EINVAL; 485 } 486 487 /* Turn any non-optimised out attempts to use this into a link error: */ 488 extern int preserve_sve_context(void __user *ctx); 489 490 #endif /* ! CONFIG_ARM64_SVE */ 491 492 #ifdef CONFIG_ARM64_SME 493 494 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx) 495 { 496 int err = 0; 497 498 current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); 499 500 __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err); 501 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 502 __put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err); 503 504 return err; 505 } 506 507 static int restore_tpidr2_context(struct user_ctxs *user) 508 { 509 u64 tpidr2_el0; 510 int err = 0; 511 512 if (user->tpidr2_size != sizeof(*user->tpidr2)) 513 return -EINVAL; 514 515 __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err); 516 if (!err) 517 write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0); 518 519 return err; 520 } 521 522 static int preserve_za_context(struct za_context __user *ctx) 523 { 524 int err = 0; 525 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 526 unsigned int vl = task_get_sme_vl(current); 527 unsigned int vq; 528 529 if (thread_za_enabled(¤t->thread)) 530 vq = sve_vq_from_vl(vl); 531 else 532 vq = 0; 533 534 memset(reserved, 0, sizeof(reserved)); 535 536 __put_user_error(ZA_MAGIC, &ctx->head.magic, err); 537 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16), 538 &ctx->head.size, err); 539 __put_user_error(vl, &ctx->vl, err); 540 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 541 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 542 543 if (vq) { 544 /* 545 * This assumes that the ZA state has already been saved to 546 * the task struct by calling the function 547 * fpsimd_signal_preserve_current_state(). 548 */ 549 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET, 550 current->thread.sme_state, 551 ZA_SIG_REGS_SIZE(vq)); 552 } 553 554 return err ? -EFAULT : 0; 555 } 556 557 static int restore_za_context(struct user_ctxs *user) 558 { 559 int err = 0; 560 unsigned int vq; 561 u16 user_vl; 562 563 if (user->za_size < sizeof(*user->za)) 564 return -EINVAL; 565 566 __get_user_error(user_vl, &(user->za->vl), err); 567 if (err) 568 return err; 569 570 if (user_vl != task_get_sme_vl(current)) 571 return -EINVAL; 572 573 if (user->za_size == sizeof(*user->za)) { 574 current->thread.svcr &= ~SVCR_ZA_MASK; 575 return 0; 576 } 577 578 vq = sve_vq_from_vl(user_vl); 579 580 if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq)) 581 return -EINVAL; 582 583 /* 584 * Careful: we are about __copy_from_user() directly into 585 * thread.sme_state with preemption enabled, so protection is 586 * needed to prevent a racing context switch from writing stale 587 * registers back over the new data. 588 */ 589 590 fpsimd_flush_task_state(current); 591 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 592 593 sme_alloc(current, true); 594 if (!current->thread.sme_state) { 595 current->thread.svcr &= ~SVCR_ZA_MASK; 596 clear_thread_flag(TIF_SME); 597 return -ENOMEM; 598 } 599 600 err = __copy_from_user(current->thread.sme_state, 601 (char __user const *)user->za + 602 ZA_SIG_REGS_OFFSET, 603 ZA_SIG_REGS_SIZE(vq)); 604 if (err) 605 return -EFAULT; 606 607 set_thread_flag(TIF_SME); 608 current->thread.svcr |= SVCR_ZA_MASK; 609 610 return 0; 611 } 612 613 static int preserve_zt_context(struct zt_context __user *ctx) 614 { 615 int err = 0; 616 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 617 618 if (WARN_ON(!thread_za_enabled(¤t->thread))) 619 return -EINVAL; 620 621 memset(reserved, 0, sizeof(reserved)); 622 623 __put_user_error(ZT_MAGIC, &ctx->head.magic, err); 624 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16), 625 &ctx->head.size, err); 626 __put_user_error(1, &ctx->nregs, err); 627 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 628 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 629 630 /* 631 * This assumes that the ZT state has already been saved to 632 * the task struct by calling the function 633 * fpsimd_signal_preserve_current_state(). 634 */ 635 err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET, 636 thread_zt_state(¤t->thread), 637 ZT_SIG_REGS_SIZE(1)); 638 639 return err ? -EFAULT : 0; 640 } 641 642 static int restore_zt_context(struct user_ctxs *user) 643 { 644 int err; 645 u16 nregs; 646 647 /* ZA must be restored first for this check to be valid */ 648 if (!thread_za_enabled(¤t->thread)) 649 return -EINVAL; 650 651 if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1)) 652 return -EINVAL; 653 654 if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs))) 655 return -EFAULT; 656 657 if (nregs != 1) 658 return -EINVAL; 659 660 /* 661 * Careful: we are about __copy_from_user() directly into 662 * thread.zt_state with preemption enabled, so protection is 663 * needed to prevent a racing context switch from writing stale 664 * registers back over the new data. 665 */ 666 667 fpsimd_flush_task_state(current); 668 /* From now, fpsimd_thread_switch() won't touch ZT in thread state */ 669 670 err = __copy_from_user(thread_zt_state(¤t->thread), 671 (char __user const *)user->zt + 672 ZT_SIG_REGS_OFFSET, 673 ZT_SIG_REGS_SIZE(1)); 674 if (err) 675 return -EFAULT; 676 677 return 0; 678 } 679 680 #else /* ! CONFIG_ARM64_SME */ 681 682 /* Turn any non-optimised out attempts to use these into a link error: */ 683 extern int preserve_tpidr2_context(void __user *ctx); 684 extern int restore_tpidr2_context(struct user_ctxs *user); 685 extern int preserve_za_context(void __user *ctx); 686 extern int restore_za_context(struct user_ctxs *user); 687 extern int preserve_zt_context(void __user *ctx); 688 extern int restore_zt_context(struct user_ctxs *user); 689 690 #endif /* ! CONFIG_ARM64_SME */ 691 692 static int parse_user_sigframe(struct user_ctxs *user, 693 struct rt_sigframe __user *sf) 694 { 695 struct sigcontext __user *const sc = &sf->uc.uc_mcontext; 696 struct _aarch64_ctx __user *head; 697 char __user *base = (char __user *)&sc->__reserved; 698 size_t offset = 0; 699 size_t limit = sizeof(sc->__reserved); 700 bool have_extra_context = false; 701 char const __user *const sfp = (char const __user *)sf; 702 703 user->fpsimd = NULL; 704 user->sve = NULL; 705 user->tpidr2 = NULL; 706 user->za = NULL; 707 user->zt = NULL; 708 user->fpmr = NULL; 709 user->poe = NULL; 710 711 if (!IS_ALIGNED((unsigned long)base, 16)) 712 goto invalid; 713 714 while (1) { 715 int err = 0; 716 u32 magic, size; 717 char const __user *userp; 718 struct extra_context const __user *extra; 719 u64 extra_datap; 720 u32 extra_size; 721 struct _aarch64_ctx const __user *end; 722 u32 end_magic, end_size; 723 724 if (limit - offset < sizeof(*head)) 725 goto invalid; 726 727 if (!IS_ALIGNED(offset, 16)) 728 goto invalid; 729 730 head = (struct _aarch64_ctx __user *)(base + offset); 731 __get_user_error(magic, &head->magic, err); 732 __get_user_error(size, &head->size, err); 733 if (err) 734 return err; 735 736 if (limit - offset < size) 737 goto invalid; 738 739 switch (magic) { 740 case 0: 741 if (size) 742 goto invalid; 743 744 goto done; 745 746 case FPSIMD_MAGIC: 747 if (!system_supports_fpsimd()) 748 goto invalid; 749 if (user->fpsimd) 750 goto invalid; 751 752 user->fpsimd = (struct fpsimd_context __user *)head; 753 user->fpsimd_size = size; 754 break; 755 756 case ESR_MAGIC: 757 /* ignore */ 758 break; 759 760 case POE_MAGIC: 761 if (!system_supports_poe()) 762 goto invalid; 763 764 if (user->poe) 765 goto invalid; 766 767 user->poe = (struct poe_context __user *)head; 768 user->poe_size = size; 769 break; 770 771 case SVE_MAGIC: 772 if (!system_supports_sve() && !system_supports_sme()) 773 goto invalid; 774 775 if (user->sve) 776 goto invalid; 777 778 user->sve = (struct sve_context __user *)head; 779 user->sve_size = size; 780 break; 781 782 case TPIDR2_MAGIC: 783 if (!system_supports_tpidr2()) 784 goto invalid; 785 786 if (user->tpidr2) 787 goto invalid; 788 789 user->tpidr2 = (struct tpidr2_context __user *)head; 790 user->tpidr2_size = size; 791 break; 792 793 case ZA_MAGIC: 794 if (!system_supports_sme()) 795 goto invalid; 796 797 if (user->za) 798 goto invalid; 799 800 user->za = (struct za_context __user *)head; 801 user->za_size = size; 802 break; 803 804 case ZT_MAGIC: 805 if (!system_supports_sme2()) 806 goto invalid; 807 808 if (user->zt) 809 goto invalid; 810 811 user->zt = (struct zt_context __user *)head; 812 user->zt_size = size; 813 break; 814 815 case FPMR_MAGIC: 816 if (!system_supports_fpmr()) 817 goto invalid; 818 819 if (user->fpmr) 820 goto invalid; 821 822 user->fpmr = (struct fpmr_context __user *)head; 823 user->fpmr_size = size; 824 break; 825 826 case EXTRA_MAGIC: 827 if (have_extra_context) 828 goto invalid; 829 830 if (size < sizeof(*extra)) 831 goto invalid; 832 833 userp = (char const __user *)head; 834 835 extra = (struct extra_context const __user *)userp; 836 userp += size; 837 838 __get_user_error(extra_datap, &extra->datap, err); 839 __get_user_error(extra_size, &extra->size, err); 840 if (err) 841 return err; 842 843 /* Check for the dummy terminator in __reserved[]: */ 844 845 if (limit - offset - size < TERMINATOR_SIZE) 846 goto invalid; 847 848 end = (struct _aarch64_ctx const __user *)userp; 849 userp += TERMINATOR_SIZE; 850 851 __get_user_error(end_magic, &end->magic, err); 852 __get_user_error(end_size, &end->size, err); 853 if (err) 854 return err; 855 856 if (end_magic || end_size) 857 goto invalid; 858 859 /* Prevent looping/repeated parsing of extra_context */ 860 have_extra_context = true; 861 862 base = (__force void __user *)extra_datap; 863 if (!IS_ALIGNED((unsigned long)base, 16)) 864 goto invalid; 865 866 if (!IS_ALIGNED(extra_size, 16)) 867 goto invalid; 868 869 if (base != userp) 870 goto invalid; 871 872 /* Reject "unreasonably large" frames: */ 873 if (extra_size > sfp + SIGFRAME_MAXSZ - userp) 874 goto invalid; 875 876 /* 877 * Ignore trailing terminator in __reserved[] 878 * and start parsing extra data: 879 */ 880 offset = 0; 881 limit = extra_size; 882 883 if (!access_ok(base, limit)) 884 goto invalid; 885 886 continue; 887 888 default: 889 goto invalid; 890 } 891 892 if (size < sizeof(*head)) 893 goto invalid; 894 895 if (limit - offset < size) 896 goto invalid; 897 898 offset += size; 899 } 900 901 done: 902 return 0; 903 904 invalid: 905 return -EINVAL; 906 } 907 908 static int restore_sigframe(struct pt_regs *regs, 909 struct rt_sigframe __user *sf, 910 struct user_access_state *ua_state) 911 { 912 sigset_t set; 913 int i, err; 914 struct user_ctxs user; 915 916 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 917 if (err == 0) 918 set_current_blocked(&set); 919 920 for (i = 0; i < 31; i++) 921 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 922 err); 923 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 924 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 925 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 926 927 /* 928 * Avoid sys_rt_sigreturn() restarting. 929 */ 930 forget_syscall(regs); 931 932 err |= !valid_user_regs(®s->user_regs, current); 933 if (err == 0) 934 err = parse_user_sigframe(&user, sf); 935 936 if (err == 0 && system_supports_fpsimd()) { 937 if (!user.fpsimd) 938 return -EINVAL; 939 940 if (user.sve) 941 err = restore_sve_fpsimd_context(&user); 942 else 943 err = restore_fpsimd_context(&user); 944 } 945 946 if (err == 0 && system_supports_tpidr2() && user.tpidr2) 947 err = restore_tpidr2_context(&user); 948 949 if (err == 0 && system_supports_fpmr() && user.fpmr) 950 err = restore_fpmr_context(&user); 951 952 if (err == 0 && system_supports_sme() && user.za) 953 err = restore_za_context(&user); 954 955 if (err == 0 && system_supports_sme2() && user.zt) 956 err = restore_zt_context(&user); 957 958 if (err == 0 && system_supports_poe() && user.poe) 959 err = restore_poe_context(&user, ua_state); 960 961 return err; 962 } 963 964 SYSCALL_DEFINE0(rt_sigreturn) 965 { 966 struct pt_regs *regs = current_pt_regs(); 967 struct rt_sigframe __user *frame; 968 struct user_access_state ua_state; 969 970 /* Always make any pending restarted system calls return -EINTR */ 971 current->restart_block.fn = do_no_restart_syscall; 972 973 /* 974 * Since we stacked the signal on a 128-bit boundary, then 'sp' should 975 * be word aligned here. 976 */ 977 if (regs->sp & 15) 978 goto badframe; 979 980 frame = (struct rt_sigframe __user *)regs->sp; 981 982 if (!access_ok(frame, sizeof (*frame))) 983 goto badframe; 984 985 if (restore_sigframe(regs, frame, &ua_state)) 986 goto badframe; 987 988 if (restore_altstack(&frame->uc.uc_stack)) 989 goto badframe; 990 991 restore_user_access_state(&ua_state); 992 993 return regs->regs[0]; 994 995 badframe: 996 arm64_notify_segfault(regs->sp); 997 return 0; 998 } 999 1000 /* 1001 * Determine the layout of optional records in the signal frame 1002 * 1003 * add_all: if true, lays out the biggest possible signal frame for 1004 * this task; otherwise, generates a layout for the current state 1005 * of the task. 1006 */ 1007 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, 1008 bool add_all) 1009 { 1010 int err; 1011 1012 if (system_supports_fpsimd()) { 1013 err = sigframe_alloc(user, &user->fpsimd_offset, 1014 sizeof(struct fpsimd_context)); 1015 if (err) 1016 return err; 1017 } 1018 1019 /* fault information, if valid */ 1020 if (add_all || current->thread.fault_code) { 1021 err = sigframe_alloc(user, &user->esr_offset, 1022 sizeof(struct esr_context)); 1023 if (err) 1024 return err; 1025 } 1026 1027 if (system_supports_sve() || system_supports_sme()) { 1028 unsigned int vq = 0; 1029 1030 if (add_all || current->thread.fp_type == FP_STATE_SVE || 1031 thread_sm_enabled(¤t->thread)) { 1032 int vl = max(sve_max_vl(), sme_max_vl()); 1033 1034 if (!add_all) 1035 vl = thread_get_cur_vl(¤t->thread); 1036 1037 vq = sve_vq_from_vl(vl); 1038 } 1039 1040 err = sigframe_alloc(user, &user->sve_offset, 1041 SVE_SIG_CONTEXT_SIZE(vq)); 1042 if (err) 1043 return err; 1044 } 1045 1046 if (system_supports_tpidr2()) { 1047 err = sigframe_alloc(user, &user->tpidr2_offset, 1048 sizeof(struct tpidr2_context)); 1049 if (err) 1050 return err; 1051 } 1052 1053 if (system_supports_sme()) { 1054 unsigned int vl; 1055 unsigned int vq = 0; 1056 1057 if (add_all) 1058 vl = sme_max_vl(); 1059 else 1060 vl = task_get_sme_vl(current); 1061 1062 if (thread_za_enabled(¤t->thread)) 1063 vq = sve_vq_from_vl(vl); 1064 1065 err = sigframe_alloc(user, &user->za_offset, 1066 ZA_SIG_CONTEXT_SIZE(vq)); 1067 if (err) 1068 return err; 1069 } 1070 1071 if (system_supports_sme2()) { 1072 if (add_all || thread_za_enabled(¤t->thread)) { 1073 err = sigframe_alloc(user, &user->zt_offset, 1074 ZT_SIG_CONTEXT_SIZE(1)); 1075 if (err) 1076 return err; 1077 } 1078 } 1079 1080 if (system_supports_fpmr()) { 1081 err = sigframe_alloc(user, &user->fpmr_offset, 1082 sizeof(struct fpmr_context)); 1083 if (err) 1084 return err; 1085 } 1086 1087 if (system_supports_poe()) { 1088 err = sigframe_alloc(user, &user->poe_offset, 1089 sizeof(struct poe_context)); 1090 if (err) 1091 return err; 1092 } 1093 1094 return sigframe_alloc_end(user); 1095 } 1096 1097 static int setup_sigframe(struct rt_sigframe_user_layout *user, 1098 struct pt_regs *regs, sigset_t *set, 1099 const struct user_access_state *ua_state) 1100 { 1101 int i, err = 0; 1102 struct rt_sigframe __user *sf = user->sigframe; 1103 1104 /* set up the stack frame for unwinding */ 1105 __put_user_error(regs->regs[29], &user->next_frame->fp, err); 1106 __put_user_error(regs->regs[30], &user->next_frame->lr, err); 1107 1108 for (i = 0; i < 31; i++) 1109 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 1110 err); 1111 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 1112 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 1113 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 1114 1115 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); 1116 1117 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 1118 1119 if (err == 0 && system_supports_fpsimd()) { 1120 struct fpsimd_context __user *fpsimd_ctx = 1121 apply_user_offset(user, user->fpsimd_offset); 1122 err |= preserve_fpsimd_context(fpsimd_ctx); 1123 } 1124 1125 /* fault information, if valid */ 1126 if (err == 0 && user->esr_offset) { 1127 struct esr_context __user *esr_ctx = 1128 apply_user_offset(user, user->esr_offset); 1129 1130 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); 1131 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); 1132 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); 1133 } 1134 1135 /* Scalable Vector Extension state (including streaming), if present */ 1136 if ((system_supports_sve() || system_supports_sme()) && 1137 err == 0 && user->sve_offset) { 1138 struct sve_context __user *sve_ctx = 1139 apply_user_offset(user, user->sve_offset); 1140 err |= preserve_sve_context(sve_ctx); 1141 } 1142 1143 /* TPIDR2 if supported */ 1144 if (system_supports_tpidr2() && err == 0) { 1145 struct tpidr2_context __user *tpidr2_ctx = 1146 apply_user_offset(user, user->tpidr2_offset); 1147 err |= preserve_tpidr2_context(tpidr2_ctx); 1148 } 1149 1150 /* FPMR if supported */ 1151 if (system_supports_fpmr() && err == 0) { 1152 struct fpmr_context __user *fpmr_ctx = 1153 apply_user_offset(user, user->fpmr_offset); 1154 err |= preserve_fpmr_context(fpmr_ctx); 1155 } 1156 1157 if (system_supports_poe() && err == 0 && user->poe_offset) { 1158 struct poe_context __user *poe_ctx = 1159 apply_user_offset(user, user->poe_offset); 1160 1161 err |= preserve_poe_context(poe_ctx, ua_state); 1162 } 1163 1164 /* ZA state if present */ 1165 if (system_supports_sme() && err == 0 && user->za_offset) { 1166 struct za_context __user *za_ctx = 1167 apply_user_offset(user, user->za_offset); 1168 err |= preserve_za_context(za_ctx); 1169 } 1170 1171 /* ZT state if present */ 1172 if (system_supports_sme2() && err == 0 && user->zt_offset) { 1173 struct zt_context __user *zt_ctx = 1174 apply_user_offset(user, user->zt_offset); 1175 err |= preserve_zt_context(zt_ctx); 1176 } 1177 1178 if (err == 0 && user->extra_offset) { 1179 char __user *sfp = (char __user *)user->sigframe; 1180 char __user *userp = 1181 apply_user_offset(user, user->extra_offset); 1182 1183 struct extra_context __user *extra; 1184 struct _aarch64_ctx __user *end; 1185 u64 extra_datap; 1186 u32 extra_size; 1187 1188 extra = (struct extra_context __user *)userp; 1189 userp += EXTRA_CONTEXT_SIZE; 1190 1191 end = (struct _aarch64_ctx __user *)userp; 1192 userp += TERMINATOR_SIZE; 1193 1194 /* 1195 * extra_datap is just written to the signal frame. 1196 * The value gets cast back to a void __user * 1197 * during sigreturn. 1198 */ 1199 extra_datap = (__force u64)userp; 1200 extra_size = sfp + round_up(user->size, 16) - userp; 1201 1202 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); 1203 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); 1204 __put_user_error(extra_datap, &extra->datap, err); 1205 __put_user_error(extra_size, &extra->size, err); 1206 1207 /* Add the terminator */ 1208 __put_user_error(0, &end->magic, err); 1209 __put_user_error(0, &end->size, err); 1210 } 1211 1212 /* set the "end" magic */ 1213 if (err == 0) { 1214 struct _aarch64_ctx __user *end = 1215 apply_user_offset(user, user->end_offset); 1216 1217 __put_user_error(0, &end->magic, err); 1218 __put_user_error(0, &end->size, err); 1219 } 1220 1221 return err; 1222 } 1223 1224 static int get_sigframe(struct rt_sigframe_user_layout *user, 1225 struct ksignal *ksig, struct pt_regs *regs) 1226 { 1227 unsigned long sp, sp_top; 1228 int err; 1229 1230 init_user_layout(user); 1231 err = setup_sigframe_layout(user, false); 1232 if (err) 1233 return err; 1234 1235 sp = sp_top = sigsp(regs->sp, ksig); 1236 1237 sp = round_down(sp - sizeof(struct frame_record), 16); 1238 user->next_frame = (struct frame_record __user *)sp; 1239 1240 sp = round_down(sp, 16) - sigframe_size(user); 1241 user->sigframe = (struct rt_sigframe __user *)sp; 1242 1243 /* 1244 * Check that we can actually write to the signal frame. 1245 */ 1246 if (!access_ok(user->sigframe, sp_top - sp)) 1247 return -EFAULT; 1248 1249 return 0; 1250 } 1251 1252 static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, 1253 struct rt_sigframe_user_layout *user, int usig) 1254 { 1255 __sigrestore_t sigtramp; 1256 1257 regs->regs[0] = usig; 1258 regs->sp = (unsigned long)user->sigframe; 1259 regs->regs[29] = (unsigned long)&user->next_frame->fp; 1260 regs->pc = (unsigned long)ka->sa.sa_handler; 1261 1262 /* 1263 * Signal delivery is a (wacky) indirect function call in 1264 * userspace, so simulate the same setting of BTYPE as a BLR 1265 * <register containing the signal handler entry point>. 1266 * Signal delivery to a location in a PROT_BTI guarded page 1267 * that is not a function entry point will now trigger a 1268 * SIGILL in userspace. 1269 * 1270 * If the signal handler entry point is not in a PROT_BTI 1271 * guarded page, this is harmless. 1272 */ 1273 if (system_supports_bti()) { 1274 regs->pstate &= ~PSR_BTYPE_MASK; 1275 regs->pstate |= PSR_BTYPE_C; 1276 } 1277 1278 /* TCO (Tag Check Override) always cleared for signal handlers */ 1279 regs->pstate &= ~PSR_TCO_BIT; 1280 1281 /* Signal handlers are invoked with ZA and streaming mode disabled */ 1282 if (system_supports_sme()) { 1283 /* 1284 * If we were in streaming mode the saved register 1285 * state was SVE but we will exit SM and use the 1286 * FPSIMD register state - flush the saved FPSIMD 1287 * register state in case it gets loaded. 1288 */ 1289 if (current->thread.svcr & SVCR_SM_MASK) { 1290 memset(¤t->thread.uw.fpsimd_state, 0, 1291 sizeof(current->thread.uw.fpsimd_state)); 1292 current->thread.fp_type = FP_STATE_FPSIMD; 1293 } 1294 1295 current->thread.svcr &= ~(SVCR_ZA_MASK | 1296 SVCR_SM_MASK); 1297 sme_smstop(); 1298 } 1299 1300 if (ka->sa.sa_flags & SA_RESTORER) 1301 sigtramp = ka->sa.sa_restorer; 1302 else 1303 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); 1304 1305 regs->regs[30] = (unsigned long)sigtramp; 1306 } 1307 1308 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, 1309 struct pt_regs *regs) 1310 { 1311 struct rt_sigframe_user_layout user; 1312 struct rt_sigframe __user *frame; 1313 struct user_access_state ua_state; 1314 int err = 0; 1315 1316 fpsimd_signal_preserve_current_state(); 1317 1318 if (get_sigframe(&user, ksig, regs)) 1319 return 1; 1320 1321 save_reset_user_access_state(&ua_state); 1322 frame = user.sigframe; 1323 1324 __put_user_error(0, &frame->uc.uc_flags, err); 1325 __put_user_error(NULL, &frame->uc.uc_link, err); 1326 1327 err |= __save_altstack(&frame->uc.uc_stack, regs->sp); 1328 err |= setup_sigframe(&user, regs, set, &ua_state); 1329 if (err == 0) { 1330 setup_return(regs, &ksig->ka, &user, usig); 1331 if (ksig->ka.sa.sa_flags & SA_SIGINFO) { 1332 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 1333 regs->regs[1] = (unsigned long)&frame->info; 1334 regs->regs[2] = (unsigned long)&frame->uc; 1335 } 1336 } 1337 1338 if (err == 0) 1339 set_handler_user_access_state(); 1340 else 1341 restore_user_access_state(&ua_state); 1342 1343 return err; 1344 } 1345 1346 static void setup_restart_syscall(struct pt_regs *regs) 1347 { 1348 if (is_compat_task()) 1349 compat_setup_restart_syscall(regs); 1350 else 1351 regs->regs[8] = __NR_restart_syscall; 1352 } 1353 1354 /* 1355 * OK, we're invoking a handler 1356 */ 1357 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 1358 { 1359 sigset_t *oldset = sigmask_to_save(); 1360 int usig = ksig->sig; 1361 int ret; 1362 1363 rseq_signal_deliver(ksig, regs); 1364 1365 /* 1366 * Set up the stack frame 1367 */ 1368 if (is_compat_task()) { 1369 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 1370 ret = compat_setup_rt_frame(usig, ksig, oldset, regs); 1371 else 1372 ret = compat_setup_frame(usig, ksig, oldset, regs); 1373 } else { 1374 ret = setup_rt_frame(usig, ksig, oldset, regs); 1375 } 1376 1377 /* 1378 * Check that the resulting registers are actually sane. 1379 */ 1380 ret |= !valid_user_regs(®s->user_regs, current); 1381 1382 /* Step into the signal handler if we are stepping */ 1383 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); 1384 } 1385 1386 /* 1387 * Note that 'init' is a special process: it doesn't get signals it doesn't 1388 * want to handle. Thus you cannot kill init even with a SIGKILL even by 1389 * mistake. 1390 * 1391 * Note that we go through the signals twice: once to check the signals that 1392 * the kernel can handle, and then we build all the user-level signal handling 1393 * stack-frames in one go after that. 1394 */ 1395 void do_signal(struct pt_regs *regs) 1396 { 1397 unsigned long continue_addr = 0, restart_addr = 0; 1398 int retval = 0; 1399 struct ksignal ksig; 1400 bool syscall = in_syscall(regs); 1401 1402 /* 1403 * If we were from a system call, check for system call restarting... 1404 */ 1405 if (syscall) { 1406 continue_addr = regs->pc; 1407 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); 1408 retval = regs->regs[0]; 1409 1410 /* 1411 * Avoid additional syscall restarting via ret_to_user. 1412 */ 1413 forget_syscall(regs); 1414 1415 /* 1416 * Prepare for system call restart. We do this here so that a 1417 * debugger will see the already changed PC. 1418 */ 1419 switch (retval) { 1420 case -ERESTARTNOHAND: 1421 case -ERESTARTSYS: 1422 case -ERESTARTNOINTR: 1423 case -ERESTART_RESTARTBLOCK: 1424 regs->regs[0] = regs->orig_x0; 1425 regs->pc = restart_addr; 1426 break; 1427 } 1428 } 1429 1430 /* 1431 * Get the signal to deliver. When running under ptrace, at this point 1432 * the debugger may change all of our registers. 1433 */ 1434 if (get_signal(&ksig)) { 1435 /* 1436 * Depending on the signal settings, we may need to revert the 1437 * decision to restart the system call, but skip this if a 1438 * debugger has chosen to restart at a different PC. 1439 */ 1440 if (regs->pc == restart_addr && 1441 (retval == -ERESTARTNOHAND || 1442 retval == -ERESTART_RESTARTBLOCK || 1443 (retval == -ERESTARTSYS && 1444 !(ksig.ka.sa.sa_flags & SA_RESTART)))) { 1445 syscall_set_return_value(current, regs, -EINTR, 0); 1446 regs->pc = continue_addr; 1447 } 1448 1449 handle_signal(&ksig, regs); 1450 return; 1451 } 1452 1453 /* 1454 * Handle restarting a different system call. As above, if a debugger 1455 * has chosen to restart at a different PC, ignore the restart. 1456 */ 1457 if (syscall && regs->pc == restart_addr) { 1458 if (retval == -ERESTART_RESTARTBLOCK) 1459 setup_restart_syscall(regs); 1460 user_rewind_single_step(current); 1461 } 1462 1463 restore_saved_sigmask(); 1464 } 1465 1466 unsigned long __ro_after_init signal_minsigstksz; 1467 1468 /* 1469 * Determine the stack space required for guaranteed signal devliery. 1470 * This function is used to populate AT_MINSIGSTKSZ at process startup. 1471 * cpufeatures setup is assumed to be complete. 1472 */ 1473 void __init minsigstksz_setup(void) 1474 { 1475 struct rt_sigframe_user_layout user; 1476 1477 init_user_layout(&user); 1478 1479 /* 1480 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't 1481 * be big enough, but it's our best guess: 1482 */ 1483 if (WARN_ON(setup_sigframe_layout(&user, true))) 1484 return; 1485 1486 signal_minsigstksz = sigframe_size(&user) + 1487 round_up(sizeof(struct frame_record), 16) + 1488 16; /* max alignment padding */ 1489 } 1490 1491 /* 1492 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as 1493 * changes likely come with new fields that should be added below. 1494 */ 1495 static_assert(NSIGILL == 11); 1496 static_assert(NSIGFPE == 15); 1497 static_assert(NSIGSEGV == 10); 1498 static_assert(NSIGBUS == 5); 1499 static_assert(NSIGTRAP == 6); 1500 static_assert(NSIGCHLD == 6); 1501 static_assert(NSIGSYS == 2); 1502 static_assert(sizeof(siginfo_t) == 128); 1503 static_assert(__alignof__(siginfo_t) == 8); 1504 static_assert(offsetof(siginfo_t, si_signo) == 0x00); 1505 static_assert(offsetof(siginfo_t, si_errno) == 0x04); 1506 static_assert(offsetof(siginfo_t, si_code) == 0x08); 1507 static_assert(offsetof(siginfo_t, si_pid) == 0x10); 1508 static_assert(offsetof(siginfo_t, si_uid) == 0x14); 1509 static_assert(offsetof(siginfo_t, si_tid) == 0x10); 1510 static_assert(offsetof(siginfo_t, si_overrun) == 0x14); 1511 static_assert(offsetof(siginfo_t, si_status) == 0x18); 1512 static_assert(offsetof(siginfo_t, si_utime) == 0x20); 1513 static_assert(offsetof(siginfo_t, si_stime) == 0x28); 1514 static_assert(offsetof(siginfo_t, si_value) == 0x18); 1515 static_assert(offsetof(siginfo_t, si_int) == 0x18); 1516 static_assert(offsetof(siginfo_t, si_ptr) == 0x18); 1517 static_assert(offsetof(siginfo_t, si_addr) == 0x10); 1518 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18); 1519 static_assert(offsetof(siginfo_t, si_lower) == 0x20); 1520 static_assert(offsetof(siginfo_t, si_upper) == 0x28); 1521 static_assert(offsetof(siginfo_t, si_pkey) == 0x20); 1522 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); 1523 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); 1524 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); 1525 static_assert(offsetof(siginfo_t, si_band) == 0x10); 1526 static_assert(offsetof(siginfo_t, si_fd) == 0x18); 1527 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10); 1528 static_assert(offsetof(siginfo_t, si_syscall) == 0x18); 1529 static_assert(offsetof(siginfo_t, si_arch) == 0x1c); 1530