1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/signal.c 4 * 5 * Copyright (C) 1995-2009 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/cache.h> 10 #include <linux/compat.h> 11 #include <linux/errno.h> 12 #include <linux/irq-entry-common.h> 13 #include <linux/kernel.h> 14 #include <linux/signal.h> 15 #include <linux/freezer.h> 16 #include <linux/stddef.h> 17 #include <linux/uaccess.h> 18 #include <linux/sizes.h> 19 #include <linux/string.h> 20 #include <linux/ratelimit.h> 21 #include <linux/rseq.h> 22 #include <linux/syscalls.h> 23 #include <linux/pkeys.h> 24 25 #include <asm/daifflags.h> 26 #include <asm/debug-monitors.h> 27 #include <asm/elf.h> 28 #include <asm/exception.h> 29 #include <asm/cacheflush.h> 30 #include <asm/gcs.h> 31 #include <asm/ucontext.h> 32 #include <asm/unistd.h> 33 #include <asm/fpsimd.h> 34 #include <asm/ptrace.h> 35 #include <asm/syscall.h> 36 #include <asm/signal32.h> 37 #include <asm/traps.h> 38 #include <asm/vdso.h> 39 40 #define GCS_SIGNAL_CAP(addr) (((unsigned long)addr) & GCS_CAP_ADDR_MASK) 41 42 /* 43 * Do a signal return; undo the signal stack. These are aligned to 128-bit. 44 */ 45 struct rt_sigframe { 46 struct siginfo info; 47 struct ucontext uc; 48 }; 49 50 struct rt_sigframe_user_layout { 51 struct rt_sigframe __user *sigframe; 52 struct frame_record __user *next_frame; 53 54 unsigned long size; /* size of allocated sigframe data */ 55 unsigned long limit; /* largest allowed size */ 56 57 unsigned long fpsimd_offset; 58 unsigned long esr_offset; 59 unsigned long gcs_offset; 60 unsigned long sve_offset; 61 unsigned long tpidr2_offset; 62 unsigned long za_offset; 63 unsigned long zt_offset; 64 unsigned long fpmr_offset; 65 unsigned long poe_offset; 66 unsigned long extra_offset; 67 unsigned long end_offset; 68 }; 69 70 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) 71 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) 72 73 /* 74 * Holds any EL0-controlled state that influences unprivileged memory accesses. 75 * This includes both accesses done in userspace and uaccess done in the kernel. 76 * 77 * This state needs to be carefully managed to ensure that it doesn't cause 78 * uaccess to fail when setting up the signal frame, and the signal handler 79 * itself also expects a well-defined state when entered. 80 * 81 * The struct should be zero-initialised. Its members should only be accessed 82 * via the accessors below. __valid_fields tracks which of the fields are valid 83 * (have been set to some value). 84 */ 85 struct user_access_state { 86 unsigned int __valid_fields; 87 u64 __por_el0; 88 }; 89 90 #define UA_STATE_HAS_POR_EL0 BIT(0) 91 92 static void set_ua_state_por_el0(struct user_access_state *ua_state, 93 u64 por_el0) 94 { 95 ua_state->__por_el0 = por_el0; 96 ua_state->__valid_fields |= UA_STATE_HAS_POR_EL0; 97 } 98 99 static int get_ua_state_por_el0(const struct user_access_state *ua_state, 100 u64 *por_el0) 101 { 102 if (ua_state->__valid_fields & UA_STATE_HAS_POR_EL0) { 103 *por_el0 = ua_state->__por_el0; 104 return 0; 105 } 106 107 return -ENOENT; 108 } 109 110 /* 111 * Save the user access state into ua_state and reset it to disable any 112 * restrictions. 113 */ 114 static void save_reset_user_access_state(struct user_access_state *ua_state) 115 { 116 if (system_supports_poe()) { 117 u64 por_enable_all = 0; 118 119 for (int pkey = 0; pkey < arch_max_pkey(); pkey++) 120 por_enable_all |= POR_ELx_PERM_PREP(pkey, POE_RWX); 121 122 set_ua_state_por_el0(ua_state, read_sysreg_s(SYS_POR_EL0)); 123 write_sysreg_s(por_enable_all, SYS_POR_EL0); 124 /* 125 * No ISB required as we can tolerate spurious Overlay faults - 126 * the fault handler will check again based on the new value 127 * of POR_EL0. 128 */ 129 } 130 } 131 132 /* 133 * Set the user access state for invoking the signal handler. 134 * 135 * No uaccess should be done after that function is called. 136 */ 137 static void set_handler_user_access_state(void) 138 { 139 if (system_supports_poe()) 140 write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0); 141 } 142 143 /* 144 * Restore the user access state to the values saved in ua_state. 145 * 146 * No uaccess should be done after that function is called. 147 */ 148 static void restore_user_access_state(const struct user_access_state *ua_state) 149 { 150 u64 por_el0; 151 152 if (get_ua_state_por_el0(ua_state, &por_el0) == 0) 153 write_sysreg_s(por_el0, SYS_POR_EL0); 154 } 155 156 static void init_user_layout(struct rt_sigframe_user_layout *user) 157 { 158 const size_t reserved_size = 159 sizeof(user->sigframe->uc.uc_mcontext.__reserved); 160 161 memset(user, 0, sizeof(*user)); 162 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved); 163 164 user->limit = user->size + reserved_size; 165 166 user->limit -= TERMINATOR_SIZE; 167 user->limit -= EXTRA_CONTEXT_SIZE; 168 /* Reserve space for extension and terminator ^ */ 169 } 170 171 static size_t sigframe_size(struct rt_sigframe_user_layout const *user) 172 { 173 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); 174 } 175 176 /* 177 * Sanity limit on the approximate maximum size of signal frame we'll 178 * try to generate. Stack alignment padding and the frame record are 179 * not taken into account. This limit is not a guarantee and is 180 * NOT ABI. 181 */ 182 #define SIGFRAME_MAXSZ SZ_256K 183 184 static int __sigframe_alloc(struct rt_sigframe_user_layout *user, 185 unsigned long *offset, size_t size, bool extend) 186 { 187 size_t padded_size = round_up(size, 16); 188 189 if (padded_size > user->limit - user->size && 190 !user->extra_offset && 191 extend) { 192 int ret; 193 194 user->limit += EXTRA_CONTEXT_SIZE; 195 ret = __sigframe_alloc(user, &user->extra_offset, 196 sizeof(struct extra_context), false); 197 if (ret) { 198 user->limit -= EXTRA_CONTEXT_SIZE; 199 return ret; 200 } 201 202 /* Reserve space for the __reserved[] terminator */ 203 user->size += TERMINATOR_SIZE; 204 205 /* 206 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for 207 * the terminator: 208 */ 209 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE; 210 } 211 212 /* Still not enough space? Bad luck! */ 213 if (padded_size > user->limit - user->size) 214 return -ENOMEM; 215 216 *offset = user->size; 217 user->size += padded_size; 218 219 return 0; 220 } 221 222 /* 223 * Allocate space for an optional record of <size> bytes in the user 224 * signal frame. The offset from the signal frame base address to the 225 * allocated block is assigned to *offset. 226 */ 227 static int sigframe_alloc(struct rt_sigframe_user_layout *user, 228 unsigned long *offset, size_t size) 229 { 230 return __sigframe_alloc(user, offset, size, true); 231 } 232 233 /* Allocate the null terminator record and prevent further allocations */ 234 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) 235 { 236 int ret; 237 238 /* Un-reserve the space reserved for the terminator: */ 239 user->limit += TERMINATOR_SIZE; 240 241 ret = sigframe_alloc(user, &user->end_offset, 242 sizeof(struct _aarch64_ctx)); 243 if (ret) 244 return ret; 245 246 /* Prevent further allocation: */ 247 user->limit = user->size; 248 return 0; 249 } 250 251 static void __user *apply_user_offset( 252 struct rt_sigframe_user_layout const *user, unsigned long offset) 253 { 254 char __user *base = (char __user *)user->sigframe; 255 256 return base + offset; 257 } 258 259 struct user_ctxs { 260 struct fpsimd_context __user *fpsimd; 261 u32 fpsimd_size; 262 struct sve_context __user *sve; 263 u32 sve_size; 264 struct tpidr2_context __user *tpidr2; 265 u32 tpidr2_size; 266 struct za_context __user *za; 267 u32 za_size; 268 struct zt_context __user *zt; 269 u32 zt_size; 270 struct fpmr_context __user *fpmr; 271 u32 fpmr_size; 272 struct poe_context __user *poe; 273 u32 poe_size; 274 struct gcs_context __user *gcs; 275 u32 gcs_size; 276 }; 277 278 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) 279 { 280 struct user_fpsimd_state const *fpsimd = 281 ¤t->thread.uw.fpsimd_state; 282 int err; 283 284 fpsimd_sync_from_effective_state(current); 285 286 /* copy the FP and status/control registers */ 287 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); 288 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err); 289 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err); 290 291 /* copy the magic/size information */ 292 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); 293 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); 294 295 return err ? -EFAULT : 0; 296 } 297 298 static int read_fpsimd_context(struct user_fpsimd_state *fpsimd, 299 struct user_ctxs *user) 300 { 301 int err; 302 303 /* check the size information */ 304 if (user->fpsimd_size != sizeof(struct fpsimd_context)) 305 return -EINVAL; 306 307 /* copy the FP and status/control registers */ 308 err = __copy_from_user(fpsimd->vregs, &(user->fpsimd->vregs), 309 sizeof(fpsimd->vregs)); 310 __get_user_error(fpsimd->fpsr, &(user->fpsimd->fpsr), err); 311 __get_user_error(fpsimd->fpcr, &(user->fpsimd->fpcr), err); 312 313 return err ? -EFAULT : 0; 314 } 315 316 static int restore_fpsimd_context(struct user_ctxs *user) 317 { 318 struct user_fpsimd_state fpsimd; 319 int err; 320 321 err = read_fpsimd_context(&fpsimd, user); 322 if (err) 323 return err; 324 325 clear_thread_flag(TIF_SVE); 326 current->thread.svcr &= ~SVCR_SM_MASK; 327 current->thread.fp_type = FP_STATE_FPSIMD; 328 329 /* load the hardware registers from the fpsimd_state structure */ 330 fpsimd_update_current_state(&fpsimd); 331 return 0; 332 } 333 334 static int preserve_fpmr_context(struct fpmr_context __user *ctx) 335 { 336 int err = 0; 337 338 __put_user_error(FPMR_MAGIC, &ctx->head.magic, err); 339 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 340 __put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err); 341 342 return err; 343 } 344 345 static int restore_fpmr_context(struct user_ctxs *user) 346 { 347 u64 fpmr; 348 int err = 0; 349 350 if (user->fpmr_size != sizeof(*user->fpmr)) 351 return -EINVAL; 352 353 __get_user_error(fpmr, &user->fpmr->fpmr, err); 354 if (!err) 355 current->thread.uw.fpmr = fpmr; 356 357 return err; 358 } 359 360 static int preserve_poe_context(struct poe_context __user *ctx, 361 const struct user_access_state *ua_state) 362 { 363 int err; 364 u64 por_el0; 365 366 err = get_ua_state_por_el0(ua_state, &por_el0); 367 if (WARN_ON_ONCE(err)) 368 return err; 369 370 __put_user_error(POE_MAGIC, &ctx->head.magic, err); 371 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 372 __put_user_error(por_el0, &ctx->por_el0, err); 373 374 return err; 375 } 376 377 static int restore_poe_context(struct user_ctxs *user, 378 struct user_access_state *ua_state) 379 { 380 u64 por_el0; 381 int err = 0; 382 383 if (user->poe_size != sizeof(*user->poe)) 384 return -EINVAL; 385 386 __get_user_error(por_el0, &(user->poe->por_el0), err); 387 if (!err) 388 set_ua_state_por_el0(ua_state, por_el0); 389 390 return err; 391 } 392 393 #ifdef CONFIG_ARM64_SVE 394 395 static int preserve_sve_context(struct sve_context __user *ctx) 396 { 397 int err = 0; 398 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 399 u16 flags = 0; 400 unsigned int vl = task_get_sve_vl(current); 401 unsigned int vq = 0; 402 403 if (thread_sm_enabled(¤t->thread)) { 404 vl = task_get_sme_vl(current); 405 vq = sve_vq_from_vl(vl); 406 flags |= SVE_SIG_FLAG_SM; 407 } else if (current->thread.fp_type == FP_STATE_SVE) { 408 vq = sve_vq_from_vl(vl); 409 } 410 411 memset(reserved, 0, sizeof(reserved)); 412 413 __put_user_error(SVE_MAGIC, &ctx->head.magic, err); 414 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), 415 &ctx->head.size, err); 416 __put_user_error(vl, &ctx->vl, err); 417 __put_user_error(flags, &ctx->flags, err); 418 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 419 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 420 421 if (vq) { 422 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, 423 current->thread.sve_state, 424 SVE_SIG_REGS_SIZE(vq)); 425 } 426 427 return err ? -EFAULT : 0; 428 } 429 430 static int restore_sve_fpsimd_context(struct user_ctxs *user) 431 { 432 int err = 0; 433 unsigned int vl, vq; 434 struct user_fpsimd_state fpsimd; 435 u16 user_vl, flags; 436 bool sm; 437 438 if (user->sve_size < sizeof(*user->sve)) 439 return -EINVAL; 440 441 __get_user_error(user_vl, &(user->sve->vl), err); 442 __get_user_error(flags, &(user->sve->flags), err); 443 if (err) 444 return err; 445 446 sm = flags & SVE_SIG_FLAG_SM; 447 if (sm) { 448 if (!system_supports_sme()) 449 return -EINVAL; 450 451 vl = task_get_sme_vl(current); 452 } else { 453 /* 454 * A SME only system use SVE for streaming mode so can 455 * have a SVE formatted context with a zero VL and no 456 * payload data. 457 */ 458 if (!system_supports_sve() && !system_supports_sme()) 459 return -EINVAL; 460 461 vl = task_get_sve_vl(current); 462 } 463 464 if (user_vl != vl) 465 return -EINVAL; 466 467 /* 468 * Non-streaming SVE state may be preserved without an SVE payload, in 469 * which case the SVE context only has a header with VL==0, and all 470 * state can be restored from the FPSIMD context. 471 * 472 * Streaming SVE state is always preserved with an SVE payload. For 473 * consistency and robustness, reject restoring streaming SVE state 474 * without an SVE payload. 475 */ 476 if (!sm && user->sve_size == sizeof(*user->sve)) 477 return restore_fpsimd_context(user); 478 479 vq = sve_vq_from_vl(vl); 480 481 if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq)) 482 return -EINVAL; 483 484 if (sm) { 485 sme_alloc(current, false); 486 if (!current->thread.sme_state) 487 return -ENOMEM; 488 } 489 490 sve_alloc(current, true); 491 if (!current->thread.sve_state) { 492 clear_thread_flag(TIF_SVE); 493 return -ENOMEM; 494 } 495 496 if (sm) { 497 current->thread.svcr |= SVCR_SM_MASK; 498 set_thread_flag(TIF_SME); 499 } else { 500 current->thread.svcr &= ~SVCR_SM_MASK; 501 set_thread_flag(TIF_SVE); 502 } 503 504 current->thread.fp_type = FP_STATE_SVE; 505 506 err = __copy_from_user(current->thread.sve_state, 507 (char __user const *)user->sve + 508 SVE_SIG_REGS_OFFSET, 509 SVE_SIG_REGS_SIZE(vq)); 510 if (err) 511 return -EFAULT; 512 513 err = read_fpsimd_context(&fpsimd, user); 514 if (err) 515 return err; 516 517 /* Merge the FPSIMD registers into the SVE state */ 518 fpsimd_update_current_state(&fpsimd); 519 520 return 0; 521 } 522 523 #else /* ! CONFIG_ARM64_SVE */ 524 525 static int restore_sve_fpsimd_context(struct user_ctxs *user) 526 { 527 WARN_ON_ONCE(1); 528 return -EINVAL; 529 } 530 531 /* Turn any non-optimised out attempts to use this into a link error: */ 532 extern int preserve_sve_context(void __user *ctx); 533 534 #endif /* ! CONFIG_ARM64_SVE */ 535 536 #ifdef CONFIG_ARM64_SME 537 538 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx) 539 { 540 u64 tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); 541 int err = 0; 542 543 __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err); 544 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 545 __put_user_error(tpidr2_el0, &ctx->tpidr2, err); 546 547 return err; 548 } 549 550 static int restore_tpidr2_context(struct user_ctxs *user) 551 { 552 u64 tpidr2_el0; 553 int err = 0; 554 555 if (user->tpidr2_size != sizeof(*user->tpidr2)) 556 return -EINVAL; 557 558 __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err); 559 if (!err) 560 write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0); 561 562 return err; 563 } 564 565 static int preserve_za_context(struct za_context __user *ctx) 566 { 567 int err = 0; 568 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 569 unsigned int vl = task_get_sme_vl(current); 570 unsigned int vq; 571 572 if (thread_za_enabled(¤t->thread)) 573 vq = sve_vq_from_vl(vl); 574 else 575 vq = 0; 576 577 memset(reserved, 0, sizeof(reserved)); 578 579 __put_user_error(ZA_MAGIC, &ctx->head.magic, err); 580 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16), 581 &ctx->head.size, err); 582 __put_user_error(vl, &ctx->vl, err); 583 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 584 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 585 586 if (vq) { 587 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET, 588 current->thread.sme_state, 589 ZA_SIG_REGS_SIZE(vq)); 590 } 591 592 return err ? -EFAULT : 0; 593 } 594 595 static int restore_za_context(struct user_ctxs *user) 596 { 597 int err = 0; 598 unsigned int vq; 599 u16 user_vl; 600 601 if (user->za_size < sizeof(*user->za)) 602 return -EINVAL; 603 604 __get_user_error(user_vl, &(user->za->vl), err); 605 if (err) 606 return err; 607 608 if (user_vl != task_get_sme_vl(current)) 609 return -EINVAL; 610 611 if (user->za_size == sizeof(*user->za)) { 612 current->thread.svcr &= ~SVCR_ZA_MASK; 613 return 0; 614 } 615 616 vq = sve_vq_from_vl(user_vl); 617 618 if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq)) 619 return -EINVAL; 620 621 sve_alloc(current, false); 622 if (!current->thread.sve_state) 623 return -ENOMEM; 624 625 sme_alloc(current, true); 626 if (!current->thread.sme_state) { 627 current->thread.svcr &= ~SVCR_ZA_MASK; 628 clear_thread_flag(TIF_SME); 629 return -ENOMEM; 630 } 631 632 err = __copy_from_user(current->thread.sme_state, 633 (char __user const *)user->za + 634 ZA_SIG_REGS_OFFSET, 635 ZA_SIG_REGS_SIZE(vq)); 636 if (err) 637 return -EFAULT; 638 639 set_thread_flag(TIF_SME); 640 current->thread.svcr |= SVCR_ZA_MASK; 641 642 return 0; 643 } 644 645 static int preserve_zt_context(struct zt_context __user *ctx) 646 { 647 int err = 0; 648 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 649 650 if (WARN_ON(!thread_za_enabled(¤t->thread))) 651 return -EINVAL; 652 653 memset(reserved, 0, sizeof(reserved)); 654 655 __put_user_error(ZT_MAGIC, &ctx->head.magic, err); 656 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16), 657 &ctx->head.size, err); 658 __put_user_error(1, &ctx->nregs, err); 659 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 660 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 661 662 err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET, 663 thread_zt_state(¤t->thread), 664 ZT_SIG_REGS_SIZE(1)); 665 666 return err ? -EFAULT : 0; 667 } 668 669 static int restore_zt_context(struct user_ctxs *user) 670 { 671 int err; 672 u16 nregs; 673 674 /* ZA must be restored first for this check to be valid */ 675 if (!thread_za_enabled(¤t->thread)) 676 return -EINVAL; 677 678 if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1)) 679 return -EINVAL; 680 681 if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs))) 682 return -EFAULT; 683 684 if (nregs != 1) 685 return -EINVAL; 686 687 err = __copy_from_user(thread_zt_state(¤t->thread), 688 (char __user const *)user->zt + 689 ZT_SIG_REGS_OFFSET, 690 ZT_SIG_REGS_SIZE(1)); 691 if (err) 692 return -EFAULT; 693 694 return 0; 695 } 696 697 #else /* ! CONFIG_ARM64_SME */ 698 699 /* Turn any non-optimised out attempts to use these into a link error: */ 700 extern int preserve_tpidr2_context(void __user *ctx); 701 extern int restore_tpidr2_context(struct user_ctxs *user); 702 extern int preserve_za_context(void __user *ctx); 703 extern int restore_za_context(struct user_ctxs *user); 704 extern int preserve_zt_context(void __user *ctx); 705 extern int restore_zt_context(struct user_ctxs *user); 706 707 #endif /* ! CONFIG_ARM64_SME */ 708 709 #ifdef CONFIG_ARM64_GCS 710 711 static int preserve_gcs_context(struct gcs_context __user *ctx) 712 { 713 int err = 0; 714 u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0); 715 716 /* 717 * If GCS is enabled we will add a cap token to the frame, 718 * include it in the GCSPR_EL0 we report to support stack 719 * switching via sigreturn if GCS is enabled. We do not allow 720 * enabling via sigreturn so the token is only relevant for 721 * threads with GCS enabled. 722 */ 723 if (task_gcs_el0_enabled(current)) 724 gcspr -= 8; 725 726 __put_user_error(GCS_MAGIC, &ctx->head.magic, err); 727 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 728 __put_user_error(gcspr, &ctx->gcspr, err); 729 __put_user_error(0, &ctx->reserved, err); 730 __put_user_error(current->thread.gcs_el0_mode, 731 &ctx->features_enabled, err); 732 733 return err; 734 } 735 736 static int restore_gcs_context(struct user_ctxs *user) 737 { 738 u64 gcspr, enabled; 739 int err = 0; 740 741 if (user->gcs_size != sizeof(*user->gcs)) 742 return -EINVAL; 743 744 __get_user_error(gcspr, &user->gcs->gcspr, err); 745 __get_user_error(enabled, &user->gcs->features_enabled, err); 746 if (err) 747 return err; 748 749 /* Don't allow unknown modes */ 750 if (enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK) 751 return -EINVAL; 752 753 err = gcs_check_locked(current, enabled); 754 if (err != 0) 755 return err; 756 757 /* Don't allow enabling */ 758 if (!task_gcs_el0_enabled(current) && 759 (enabled & PR_SHADOW_STACK_ENABLE)) 760 return -EINVAL; 761 762 /* If we are disabling disable everything */ 763 if (!(enabled & PR_SHADOW_STACK_ENABLE)) 764 enabled = 0; 765 766 current->thread.gcs_el0_mode = enabled; 767 768 /* 769 * We let userspace set GCSPR_EL0 to anything here, we will 770 * validate later in gcs_restore_signal(). 771 */ 772 write_sysreg_s(gcspr, SYS_GCSPR_EL0); 773 774 return 0; 775 } 776 777 #else /* ! CONFIG_ARM64_GCS */ 778 779 /* Turn any non-optimised out attempts to use these into a link error: */ 780 extern int preserve_gcs_context(void __user *ctx); 781 extern int restore_gcs_context(struct user_ctxs *user); 782 783 #endif /* ! CONFIG_ARM64_GCS */ 784 785 static int parse_user_sigframe(struct user_ctxs *user, 786 struct rt_sigframe __user *sf) 787 { 788 struct sigcontext __user *const sc = &sf->uc.uc_mcontext; 789 struct _aarch64_ctx __user *head; 790 char __user *base = (char __user *)&sc->__reserved; 791 size_t offset = 0; 792 size_t limit = sizeof(sc->__reserved); 793 bool have_extra_context = false; 794 char const __user *const sfp = (char const __user *)sf; 795 796 user->fpsimd = NULL; 797 user->sve = NULL; 798 user->tpidr2 = NULL; 799 user->za = NULL; 800 user->zt = NULL; 801 user->fpmr = NULL; 802 user->poe = NULL; 803 user->gcs = NULL; 804 805 if (!IS_ALIGNED((unsigned long)base, 16)) 806 goto invalid; 807 808 while (1) { 809 int err = 0; 810 u32 magic, size; 811 char const __user *userp; 812 struct extra_context const __user *extra; 813 u64 extra_datap; 814 u32 extra_size; 815 struct _aarch64_ctx const __user *end; 816 u32 end_magic, end_size; 817 818 if (limit - offset < sizeof(*head)) 819 goto invalid; 820 821 if (!IS_ALIGNED(offset, 16)) 822 goto invalid; 823 824 head = (struct _aarch64_ctx __user *)(base + offset); 825 __get_user_error(magic, &head->magic, err); 826 __get_user_error(size, &head->size, err); 827 if (err) 828 return err; 829 830 if (limit - offset < size) 831 goto invalid; 832 833 switch (magic) { 834 case 0: 835 if (size) 836 goto invalid; 837 838 goto done; 839 840 case FPSIMD_MAGIC: 841 if (!system_supports_fpsimd()) 842 goto invalid; 843 if (user->fpsimd) 844 goto invalid; 845 846 user->fpsimd = (struct fpsimd_context __user *)head; 847 user->fpsimd_size = size; 848 break; 849 850 case ESR_MAGIC: 851 /* ignore */ 852 break; 853 854 case POE_MAGIC: 855 if (!system_supports_poe()) 856 goto invalid; 857 858 if (user->poe) 859 goto invalid; 860 861 user->poe = (struct poe_context __user *)head; 862 user->poe_size = size; 863 break; 864 865 case SVE_MAGIC: 866 if (!system_supports_sve() && !system_supports_sme()) 867 goto invalid; 868 869 if (user->sve) 870 goto invalid; 871 872 user->sve = (struct sve_context __user *)head; 873 user->sve_size = size; 874 break; 875 876 case TPIDR2_MAGIC: 877 if (!system_supports_tpidr2()) 878 goto invalid; 879 880 if (user->tpidr2) 881 goto invalid; 882 883 user->tpidr2 = (struct tpidr2_context __user *)head; 884 user->tpidr2_size = size; 885 break; 886 887 case ZA_MAGIC: 888 if (!system_supports_sme()) 889 goto invalid; 890 891 if (user->za) 892 goto invalid; 893 894 user->za = (struct za_context __user *)head; 895 user->za_size = size; 896 break; 897 898 case ZT_MAGIC: 899 if (!system_supports_sme2()) 900 goto invalid; 901 902 if (user->zt) 903 goto invalid; 904 905 user->zt = (struct zt_context __user *)head; 906 user->zt_size = size; 907 break; 908 909 case FPMR_MAGIC: 910 if (!system_supports_fpmr()) 911 goto invalid; 912 913 if (user->fpmr) 914 goto invalid; 915 916 user->fpmr = (struct fpmr_context __user *)head; 917 user->fpmr_size = size; 918 break; 919 920 case GCS_MAGIC: 921 if (!system_supports_gcs()) 922 goto invalid; 923 924 if (user->gcs) 925 goto invalid; 926 927 user->gcs = (struct gcs_context __user *)head; 928 user->gcs_size = size; 929 break; 930 931 case EXTRA_MAGIC: 932 if (have_extra_context) 933 goto invalid; 934 935 if (size < sizeof(*extra)) 936 goto invalid; 937 938 userp = (char const __user *)head; 939 940 extra = (struct extra_context const __user *)userp; 941 userp += size; 942 943 __get_user_error(extra_datap, &extra->datap, err); 944 __get_user_error(extra_size, &extra->size, err); 945 if (err) 946 return err; 947 948 /* Check for the dummy terminator in __reserved[]: */ 949 950 if (limit - offset - size < TERMINATOR_SIZE) 951 goto invalid; 952 953 end = (struct _aarch64_ctx const __user *)userp; 954 userp += TERMINATOR_SIZE; 955 956 __get_user_error(end_magic, &end->magic, err); 957 __get_user_error(end_size, &end->size, err); 958 if (err) 959 return err; 960 961 if (end_magic || end_size) 962 goto invalid; 963 964 /* Prevent looping/repeated parsing of extra_context */ 965 have_extra_context = true; 966 967 base = (__force void __user *)extra_datap; 968 if (!IS_ALIGNED((unsigned long)base, 16)) 969 goto invalid; 970 971 if (!IS_ALIGNED(extra_size, 16)) 972 goto invalid; 973 974 if (base != userp) 975 goto invalid; 976 977 /* Reject "unreasonably large" frames: */ 978 if (extra_size > sfp + SIGFRAME_MAXSZ - userp) 979 goto invalid; 980 981 /* 982 * Ignore trailing terminator in __reserved[] 983 * and start parsing extra data: 984 */ 985 offset = 0; 986 limit = extra_size; 987 988 if (!access_ok(base, limit)) 989 goto invalid; 990 991 continue; 992 993 default: 994 goto invalid; 995 } 996 997 if (size < sizeof(*head)) 998 goto invalid; 999 1000 if (limit - offset < size) 1001 goto invalid; 1002 1003 offset += size; 1004 } 1005 1006 done: 1007 return 0; 1008 1009 invalid: 1010 return -EINVAL; 1011 } 1012 1013 static int restore_sigframe(struct pt_regs *regs, 1014 struct rt_sigframe __user *sf, 1015 struct user_access_state *ua_state) 1016 { 1017 sigset_t set; 1018 int i, err; 1019 struct user_ctxs user; 1020 1021 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 1022 if (err == 0) 1023 set_current_blocked(&set); 1024 1025 for (i = 0; i < 31; i++) 1026 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 1027 err); 1028 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 1029 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 1030 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 1031 1032 /* 1033 * Avoid sys_rt_sigreturn() restarting. 1034 */ 1035 forget_syscall(regs); 1036 1037 fpsimd_save_and_flush_current_state(); 1038 1039 err |= !valid_user_regs(®s->user_regs, current); 1040 if (err == 0) 1041 err = parse_user_sigframe(&user, sf); 1042 1043 if (err == 0 && system_supports_fpsimd()) { 1044 if (!user.fpsimd) 1045 return -EINVAL; 1046 1047 if (user.sve) 1048 err = restore_sve_fpsimd_context(&user); 1049 else 1050 err = restore_fpsimd_context(&user); 1051 } 1052 1053 if (err == 0 && system_supports_gcs() && user.gcs) 1054 err = restore_gcs_context(&user); 1055 1056 if (err == 0 && system_supports_tpidr2() && user.tpidr2) 1057 err = restore_tpidr2_context(&user); 1058 1059 if (err == 0 && system_supports_fpmr() && user.fpmr) 1060 err = restore_fpmr_context(&user); 1061 1062 if (err == 0 && system_supports_sme() && user.za) 1063 err = restore_za_context(&user); 1064 1065 if (err == 0 && system_supports_sme2() && user.zt) 1066 err = restore_zt_context(&user); 1067 1068 if (err == 0 && system_supports_poe() && user.poe) 1069 err = restore_poe_context(&user, ua_state); 1070 1071 return err; 1072 } 1073 1074 #ifdef CONFIG_ARM64_GCS 1075 static int gcs_restore_signal(void) 1076 { 1077 u64 gcspr_el0, cap; 1078 int ret; 1079 1080 if (!system_supports_gcs()) 1081 return 0; 1082 1083 if (!(current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE)) 1084 return 0; 1085 1086 gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0); 1087 1088 /* 1089 * Ensure that any changes to the GCS done via GCS operations 1090 * are visible to the normal reads we do to validate the 1091 * token. 1092 */ 1093 gcsb_dsync(); 1094 1095 /* 1096 * GCSPR_EL0 should be pointing at a capped GCS, read the cap. 1097 * We don't enforce that this is in a GCS page, if it is not 1098 * then faults will be generated on GCS operations - the main 1099 * concern is to protect GCS pages. 1100 */ 1101 ret = copy_from_user(&cap, (unsigned long __user *)gcspr_el0, 1102 sizeof(cap)); 1103 if (ret) 1104 return -EFAULT; 1105 1106 /* 1107 * Check that the cap is the actual GCS before replacing it. 1108 */ 1109 if (cap != GCS_SIGNAL_CAP(gcspr_el0)) 1110 return -EINVAL; 1111 1112 /* Invalidate the token to prevent reuse */ 1113 put_user_gcs(0, (unsigned long __user *)gcspr_el0, &ret); 1114 if (ret != 0) 1115 return -EFAULT; 1116 1117 write_sysreg_s(gcspr_el0 + 8, SYS_GCSPR_EL0); 1118 1119 return 0; 1120 } 1121 1122 #else 1123 static int gcs_restore_signal(void) { return 0; } 1124 #endif 1125 1126 SYSCALL_DEFINE0(rt_sigreturn) 1127 { 1128 struct pt_regs *regs = current_pt_regs(); 1129 struct rt_sigframe __user *frame; 1130 struct user_access_state ua_state = {}; 1131 1132 /* Always make any pending restarted system calls return -EINTR */ 1133 current->restart_block.fn = do_no_restart_syscall; 1134 1135 /* 1136 * Since we stacked the signal on a 128-bit boundary, then 'sp' should 1137 * be word aligned here. 1138 */ 1139 if (regs->sp & 15) 1140 goto badframe; 1141 1142 frame = (struct rt_sigframe __user *)regs->sp; 1143 1144 if (!access_ok(frame, sizeof (*frame))) 1145 goto badframe; 1146 1147 if (restore_sigframe(regs, frame, &ua_state)) 1148 goto badframe; 1149 1150 if (gcs_restore_signal()) 1151 goto badframe; 1152 1153 if (restore_altstack(&frame->uc.uc_stack)) 1154 goto badframe; 1155 1156 restore_user_access_state(&ua_state); 1157 1158 return regs->regs[0]; 1159 1160 badframe: 1161 arm64_notify_segfault(regs->sp); 1162 return 0; 1163 } 1164 1165 /* 1166 * Determine the layout of optional records in the signal frame 1167 * 1168 * add_all: if true, lays out the biggest possible signal frame for 1169 * this task; otherwise, generates a layout for the current state 1170 * of the task. 1171 */ 1172 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, 1173 bool add_all) 1174 { 1175 int err; 1176 1177 if (system_supports_fpsimd()) { 1178 err = sigframe_alloc(user, &user->fpsimd_offset, 1179 sizeof(struct fpsimd_context)); 1180 if (err) 1181 return err; 1182 } 1183 1184 /* fault information, if valid */ 1185 if (add_all || current->thread.fault_code) { 1186 err = sigframe_alloc(user, &user->esr_offset, 1187 sizeof(struct esr_context)); 1188 if (err) 1189 return err; 1190 } 1191 1192 #ifdef CONFIG_ARM64_GCS 1193 if (system_supports_gcs() && (add_all || current->thread.gcspr_el0)) { 1194 err = sigframe_alloc(user, &user->gcs_offset, 1195 sizeof(struct gcs_context)); 1196 if (err) 1197 return err; 1198 } 1199 #endif 1200 1201 if (system_supports_sve() || system_supports_sme()) { 1202 unsigned int vq = 0; 1203 1204 if (add_all || current->thread.fp_type == FP_STATE_SVE || 1205 thread_sm_enabled(¤t->thread)) { 1206 int vl = max(sve_max_vl(), sme_max_vl()); 1207 1208 if (!add_all) 1209 vl = thread_get_cur_vl(¤t->thread); 1210 1211 vq = sve_vq_from_vl(vl); 1212 } 1213 1214 err = sigframe_alloc(user, &user->sve_offset, 1215 SVE_SIG_CONTEXT_SIZE(vq)); 1216 if (err) 1217 return err; 1218 } 1219 1220 if (system_supports_tpidr2()) { 1221 err = sigframe_alloc(user, &user->tpidr2_offset, 1222 sizeof(struct tpidr2_context)); 1223 if (err) 1224 return err; 1225 } 1226 1227 if (system_supports_sme()) { 1228 unsigned int vl; 1229 unsigned int vq = 0; 1230 1231 if (add_all) 1232 vl = sme_max_vl(); 1233 else 1234 vl = task_get_sme_vl(current); 1235 1236 if (thread_za_enabled(¤t->thread)) 1237 vq = sve_vq_from_vl(vl); 1238 1239 err = sigframe_alloc(user, &user->za_offset, 1240 ZA_SIG_CONTEXT_SIZE(vq)); 1241 if (err) 1242 return err; 1243 } 1244 1245 if (system_supports_sme2()) { 1246 if (add_all || thread_za_enabled(¤t->thread)) { 1247 err = sigframe_alloc(user, &user->zt_offset, 1248 ZT_SIG_CONTEXT_SIZE(1)); 1249 if (err) 1250 return err; 1251 } 1252 } 1253 1254 if (system_supports_fpmr()) { 1255 err = sigframe_alloc(user, &user->fpmr_offset, 1256 sizeof(struct fpmr_context)); 1257 if (err) 1258 return err; 1259 } 1260 1261 if (system_supports_poe()) { 1262 err = sigframe_alloc(user, &user->poe_offset, 1263 sizeof(struct poe_context)); 1264 if (err) 1265 return err; 1266 } 1267 1268 return sigframe_alloc_end(user); 1269 } 1270 1271 static int setup_sigframe(struct rt_sigframe_user_layout *user, 1272 struct pt_regs *regs, sigset_t *set, 1273 const struct user_access_state *ua_state) 1274 { 1275 int i, err = 0; 1276 struct rt_sigframe __user *sf = user->sigframe; 1277 1278 /* set up the stack frame for unwinding */ 1279 __put_user_error(regs->regs[29], &user->next_frame->fp, err); 1280 __put_user_error(regs->regs[30], &user->next_frame->lr, err); 1281 1282 for (i = 0; i < 31; i++) 1283 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 1284 err); 1285 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 1286 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 1287 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 1288 1289 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); 1290 1291 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 1292 1293 if (err == 0 && system_supports_fpsimd()) { 1294 struct fpsimd_context __user *fpsimd_ctx = 1295 apply_user_offset(user, user->fpsimd_offset); 1296 err |= preserve_fpsimd_context(fpsimd_ctx); 1297 } 1298 1299 /* fault information, if valid */ 1300 if (err == 0 && user->esr_offset) { 1301 struct esr_context __user *esr_ctx = 1302 apply_user_offset(user, user->esr_offset); 1303 1304 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); 1305 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); 1306 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); 1307 } 1308 1309 if (system_supports_gcs() && err == 0 && user->gcs_offset) { 1310 struct gcs_context __user *gcs_ctx = 1311 apply_user_offset(user, user->gcs_offset); 1312 err |= preserve_gcs_context(gcs_ctx); 1313 } 1314 1315 /* Scalable Vector Extension state (including streaming), if present */ 1316 if ((system_supports_sve() || system_supports_sme()) && 1317 err == 0 && user->sve_offset) { 1318 struct sve_context __user *sve_ctx = 1319 apply_user_offset(user, user->sve_offset); 1320 err |= preserve_sve_context(sve_ctx); 1321 } 1322 1323 /* TPIDR2 if supported */ 1324 if (system_supports_tpidr2() && err == 0) { 1325 struct tpidr2_context __user *tpidr2_ctx = 1326 apply_user_offset(user, user->tpidr2_offset); 1327 err |= preserve_tpidr2_context(tpidr2_ctx); 1328 } 1329 1330 /* FPMR if supported */ 1331 if (system_supports_fpmr() && err == 0) { 1332 struct fpmr_context __user *fpmr_ctx = 1333 apply_user_offset(user, user->fpmr_offset); 1334 err |= preserve_fpmr_context(fpmr_ctx); 1335 } 1336 1337 if (system_supports_poe() && err == 0) { 1338 struct poe_context __user *poe_ctx = 1339 apply_user_offset(user, user->poe_offset); 1340 1341 err |= preserve_poe_context(poe_ctx, ua_state); 1342 } 1343 1344 /* ZA state if present */ 1345 if (system_supports_sme() && err == 0 && user->za_offset) { 1346 struct za_context __user *za_ctx = 1347 apply_user_offset(user, user->za_offset); 1348 err |= preserve_za_context(za_ctx); 1349 } 1350 1351 /* ZT state if present */ 1352 if (system_supports_sme2() && err == 0 && user->zt_offset) { 1353 struct zt_context __user *zt_ctx = 1354 apply_user_offset(user, user->zt_offset); 1355 err |= preserve_zt_context(zt_ctx); 1356 } 1357 1358 if (err == 0 && user->extra_offset) { 1359 char __user *sfp = (char __user *)user->sigframe; 1360 char __user *userp = 1361 apply_user_offset(user, user->extra_offset); 1362 1363 struct extra_context __user *extra; 1364 struct _aarch64_ctx __user *end; 1365 u64 extra_datap; 1366 u32 extra_size; 1367 1368 extra = (struct extra_context __user *)userp; 1369 userp += EXTRA_CONTEXT_SIZE; 1370 1371 end = (struct _aarch64_ctx __user *)userp; 1372 userp += TERMINATOR_SIZE; 1373 1374 /* 1375 * extra_datap is just written to the signal frame. 1376 * The value gets cast back to a void __user * 1377 * during sigreturn. 1378 */ 1379 extra_datap = (__force u64)userp; 1380 extra_size = sfp + round_up(user->size, 16) - userp; 1381 1382 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); 1383 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); 1384 __put_user_error(extra_datap, &extra->datap, err); 1385 __put_user_error(extra_size, &extra->size, err); 1386 1387 /* Add the terminator */ 1388 __put_user_error(0, &end->magic, err); 1389 __put_user_error(0, &end->size, err); 1390 } 1391 1392 /* set the "end" magic */ 1393 if (err == 0) { 1394 struct _aarch64_ctx __user *end = 1395 apply_user_offset(user, user->end_offset); 1396 1397 __put_user_error(0, &end->magic, err); 1398 __put_user_error(0, &end->size, err); 1399 } 1400 1401 return err; 1402 } 1403 1404 static int get_sigframe(struct rt_sigframe_user_layout *user, 1405 struct ksignal *ksig, struct pt_regs *regs) 1406 { 1407 unsigned long sp, sp_top; 1408 int err; 1409 1410 init_user_layout(user); 1411 err = setup_sigframe_layout(user, false); 1412 if (err) 1413 return err; 1414 1415 sp = sp_top = sigsp(regs->sp, ksig); 1416 1417 sp = round_down(sp - sizeof(struct frame_record), 16); 1418 user->next_frame = (struct frame_record __user *)sp; 1419 1420 sp = round_down(sp, 16) - sigframe_size(user); 1421 user->sigframe = (struct rt_sigframe __user *)sp; 1422 1423 /* 1424 * Check that we can actually write to the signal frame. 1425 */ 1426 if (!access_ok(user->sigframe, sp_top - sp)) 1427 return -EFAULT; 1428 1429 return 0; 1430 } 1431 1432 #ifdef CONFIG_ARM64_GCS 1433 1434 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig) 1435 { 1436 u64 gcspr_el0; 1437 int ret = 0; 1438 1439 if (!system_supports_gcs()) 1440 return 0; 1441 1442 if (!task_gcs_el0_enabled(current)) 1443 return 0; 1444 1445 /* 1446 * We are entering a signal handler, current register state is 1447 * active. 1448 */ 1449 gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0); 1450 1451 /* 1452 * Push a cap and the GCS entry for the trampoline onto the GCS. 1453 */ 1454 put_user_gcs((unsigned long)sigtramp, 1455 (unsigned long __user *)(gcspr_el0 - 16), &ret); 1456 put_user_gcs(GCS_SIGNAL_CAP(gcspr_el0 - 8), 1457 (unsigned long __user *)(gcspr_el0 - 8), &ret); 1458 if (ret != 0) 1459 return ret; 1460 1461 gcspr_el0 -= 16; 1462 write_sysreg_s(gcspr_el0, SYS_GCSPR_EL0); 1463 1464 return 0; 1465 } 1466 #else 1467 1468 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig) 1469 { 1470 return 0; 1471 } 1472 1473 #endif 1474 1475 static int setup_return(struct pt_regs *regs, struct ksignal *ksig, 1476 struct rt_sigframe_user_layout *user, int usig) 1477 { 1478 __sigrestore_t sigtramp; 1479 int err; 1480 1481 if (ksig->ka.sa.sa_flags & SA_RESTORER) 1482 sigtramp = ksig->ka.sa.sa_restorer; 1483 else 1484 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); 1485 1486 err = gcs_signal_entry(sigtramp, ksig); 1487 if (err) 1488 return err; 1489 1490 /* 1491 * We must not fail from this point onwards. We are going to update 1492 * registers, including SP, in order to invoke the signal handler. If 1493 * we failed and attempted to deliver a nested SIGSEGV to a handler 1494 * after that point, the subsequent sigreturn would end up restoring 1495 * the (partial) state for the original signal handler. 1496 */ 1497 1498 regs->regs[0] = usig; 1499 if (ksig->ka.sa.sa_flags & SA_SIGINFO) { 1500 regs->regs[1] = (unsigned long)&user->sigframe->info; 1501 regs->regs[2] = (unsigned long)&user->sigframe->uc; 1502 } 1503 regs->sp = (unsigned long)user->sigframe; 1504 regs->regs[29] = (unsigned long)&user->next_frame->fp; 1505 regs->regs[30] = (unsigned long)sigtramp; 1506 regs->pc = (unsigned long)ksig->ka.sa.sa_handler; 1507 1508 /* 1509 * Signal delivery is a (wacky) indirect function call in 1510 * userspace, so simulate the same setting of BTYPE as a BLR 1511 * <register containing the signal handler entry point>. 1512 * Signal delivery to a location in a PROT_BTI guarded page 1513 * that is not a function entry point will now trigger a 1514 * SIGILL in userspace. 1515 * 1516 * If the signal handler entry point is not in a PROT_BTI 1517 * guarded page, this is harmless. 1518 */ 1519 if (system_supports_bti()) { 1520 regs->pstate &= ~PSR_BTYPE_MASK; 1521 regs->pstate |= PSR_BTYPE_C; 1522 } 1523 1524 /* TCO (Tag Check Override) always cleared for signal handlers */ 1525 regs->pstate &= ~PSR_TCO_BIT; 1526 1527 /* Signal handlers are invoked with ZA and streaming mode disabled */ 1528 if (system_supports_sme()) { 1529 task_smstop_sm(current); 1530 current->thread.svcr &= ~SVCR_ZA_MASK; 1531 write_sysreg_s(0, SYS_TPIDR2_EL0); 1532 } 1533 1534 return 0; 1535 } 1536 1537 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, 1538 struct pt_regs *regs) 1539 { 1540 struct rt_sigframe_user_layout user; 1541 struct rt_sigframe __user *frame; 1542 struct user_access_state ua_state = {}; 1543 int err = 0; 1544 1545 fpsimd_save_and_flush_current_state(); 1546 1547 if (get_sigframe(&user, ksig, regs)) 1548 return 1; 1549 1550 save_reset_user_access_state(&ua_state); 1551 frame = user.sigframe; 1552 1553 __put_user_error(0, &frame->uc.uc_flags, err); 1554 __put_user_error(NULL, &frame->uc.uc_link, err); 1555 1556 err |= __save_altstack(&frame->uc.uc_stack, regs->sp); 1557 err |= setup_sigframe(&user, regs, set, &ua_state); 1558 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 1559 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 1560 1561 if (err == 0) 1562 err = setup_return(regs, ksig, &user, usig); 1563 1564 /* 1565 * We must not fail if setup_return() succeeded - see comment at the 1566 * beginning of setup_return(). 1567 */ 1568 1569 if (err == 0) 1570 set_handler_user_access_state(); 1571 else 1572 restore_user_access_state(&ua_state); 1573 1574 return err; 1575 } 1576 1577 static void setup_restart_syscall(struct pt_regs *regs) 1578 { 1579 if (is_compat_task()) 1580 compat_setup_restart_syscall(regs); 1581 else 1582 regs->regs[8] = __NR_restart_syscall; 1583 } 1584 1585 /* 1586 * OK, we're invoking a handler 1587 */ 1588 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 1589 { 1590 sigset_t *oldset = sigmask_to_save(); 1591 int usig = ksig->sig; 1592 int ret; 1593 1594 rseq_signal_deliver(ksig, regs); 1595 1596 /* 1597 * Set up the stack frame 1598 */ 1599 if (is_compat_task()) { 1600 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 1601 ret = compat_setup_rt_frame(usig, ksig, oldset, regs); 1602 else 1603 ret = compat_setup_frame(usig, ksig, oldset, regs); 1604 } else { 1605 ret = setup_rt_frame(usig, ksig, oldset, regs); 1606 } 1607 1608 /* 1609 * Check that the resulting registers are actually sane. 1610 */ 1611 ret |= !valid_user_regs(®s->user_regs, current); 1612 1613 /* Step into the signal handler if we are stepping */ 1614 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); 1615 } 1616 1617 /* 1618 * Note that 'init' is a special process: it doesn't get signals it doesn't 1619 * want to handle. Thus you cannot kill init even with a SIGKILL even by 1620 * mistake. 1621 * 1622 * Note that we go through the signals twice: once to check the signals that 1623 * the kernel can handle, and then we build all the user-level signal handling 1624 * stack-frames in one go after that. 1625 */ 1626 void arch_do_signal_or_restart(struct pt_regs *regs) 1627 { 1628 unsigned long continue_addr = 0, restart_addr = 0; 1629 int retval = 0; 1630 struct ksignal ksig; 1631 bool syscall = in_syscall(regs); 1632 1633 /* 1634 * If we were from a system call, check for system call restarting... 1635 */ 1636 if (syscall) { 1637 continue_addr = regs->pc; 1638 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); 1639 retval = regs->regs[0]; 1640 1641 /* 1642 * Avoid additional syscall restarting via ret_to_user. 1643 */ 1644 forget_syscall(regs); 1645 1646 /* 1647 * Prepare for system call restart. We do this here so that a 1648 * debugger will see the already changed PC. 1649 */ 1650 switch (retval) { 1651 case -ERESTARTNOHAND: 1652 case -ERESTARTSYS: 1653 case -ERESTARTNOINTR: 1654 case -ERESTART_RESTARTBLOCK: 1655 regs->regs[0] = regs->orig_x0; 1656 regs->pc = restart_addr; 1657 break; 1658 } 1659 } 1660 1661 /* 1662 * Get the signal to deliver. When running under ptrace, at this point 1663 * the debugger may change all of our registers. 1664 */ 1665 if (get_signal(&ksig)) { 1666 /* 1667 * Depending on the signal settings, we may need to revert the 1668 * decision to restart the system call, but skip this if a 1669 * debugger has chosen to restart at a different PC. 1670 */ 1671 if (regs->pc == restart_addr && 1672 (retval == -ERESTARTNOHAND || 1673 retval == -ERESTART_RESTARTBLOCK || 1674 (retval == -ERESTARTSYS && 1675 !(ksig.ka.sa.sa_flags & SA_RESTART)))) { 1676 syscall_set_return_value(current, regs, -EINTR, 0); 1677 regs->pc = continue_addr; 1678 } 1679 1680 handle_signal(&ksig, regs); 1681 return; 1682 } 1683 1684 /* 1685 * Handle restarting a different system call. As above, if a debugger 1686 * has chosen to restart at a different PC, ignore the restart. 1687 */ 1688 if (syscall && regs->pc == restart_addr) { 1689 if (retval == -ERESTART_RESTARTBLOCK) 1690 setup_restart_syscall(regs); 1691 user_rewind_single_step(current); 1692 } 1693 1694 restore_saved_sigmask(); 1695 } 1696 1697 unsigned long __ro_after_init signal_minsigstksz; 1698 1699 /* 1700 * Determine the stack space required for guaranteed signal devliery. 1701 * This function is used to populate AT_MINSIGSTKSZ at process startup. 1702 * cpufeatures setup is assumed to be complete. 1703 */ 1704 void __init minsigstksz_setup(void) 1705 { 1706 struct rt_sigframe_user_layout user; 1707 1708 init_user_layout(&user); 1709 1710 /* 1711 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't 1712 * be big enough, but it's our best guess: 1713 */ 1714 if (WARN_ON(setup_sigframe_layout(&user, true))) 1715 return; 1716 1717 signal_minsigstksz = sigframe_size(&user) + 1718 round_up(sizeof(struct frame_record), 16) + 1719 16; /* max alignment padding */ 1720 } 1721 1722 /* 1723 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as 1724 * changes likely come with new fields that should be added below. 1725 */ 1726 static_assert(NSIGILL == 11); 1727 static_assert(NSIGFPE == 15); 1728 static_assert(NSIGSEGV == 10); 1729 static_assert(NSIGBUS == 5); 1730 static_assert(NSIGTRAP == 6); 1731 static_assert(NSIGCHLD == 6); 1732 static_assert(NSIGSYS == 2); 1733 static_assert(sizeof(siginfo_t) == 128); 1734 static_assert(__alignof__(siginfo_t) == 8); 1735 static_assert(offsetof(siginfo_t, si_signo) == 0x00); 1736 static_assert(offsetof(siginfo_t, si_errno) == 0x04); 1737 static_assert(offsetof(siginfo_t, si_code) == 0x08); 1738 static_assert(offsetof(siginfo_t, si_pid) == 0x10); 1739 static_assert(offsetof(siginfo_t, si_uid) == 0x14); 1740 static_assert(offsetof(siginfo_t, si_tid) == 0x10); 1741 static_assert(offsetof(siginfo_t, si_overrun) == 0x14); 1742 static_assert(offsetof(siginfo_t, si_status) == 0x18); 1743 static_assert(offsetof(siginfo_t, si_utime) == 0x20); 1744 static_assert(offsetof(siginfo_t, si_stime) == 0x28); 1745 static_assert(offsetof(siginfo_t, si_value) == 0x18); 1746 static_assert(offsetof(siginfo_t, si_int) == 0x18); 1747 static_assert(offsetof(siginfo_t, si_ptr) == 0x18); 1748 static_assert(offsetof(siginfo_t, si_addr) == 0x10); 1749 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18); 1750 static_assert(offsetof(siginfo_t, si_lower) == 0x20); 1751 static_assert(offsetof(siginfo_t, si_upper) == 0x28); 1752 static_assert(offsetof(siginfo_t, si_pkey) == 0x20); 1753 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); 1754 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); 1755 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); 1756 static_assert(offsetof(siginfo_t, si_band) == 0x10); 1757 static_assert(offsetof(siginfo_t, si_fd) == 0x18); 1758 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10); 1759 static_assert(offsetof(siginfo_t, si_syscall) == 0x18); 1760 static_assert(offsetof(siginfo_t, si_arch) == 0x1c); 1761