1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/signal.c 4 * 5 * Copyright (C) 1995-2009 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/cache.h> 10 #include <linux/compat.h> 11 #include <linux/errno.h> 12 #include <linux/kernel.h> 13 #include <linux/signal.h> 14 #include <linux/freezer.h> 15 #include <linux/stddef.h> 16 #include <linux/uaccess.h> 17 #include <linux/sizes.h> 18 #include <linux/string.h> 19 #include <linux/resume_user_mode.h> 20 #include <linux/ratelimit.h> 21 #include <linux/syscalls.h> 22 23 #include <asm/daifflags.h> 24 #include <asm/debug-monitors.h> 25 #include <asm/elf.h> 26 #include <asm/cacheflush.h> 27 #include <asm/ucontext.h> 28 #include <asm/unistd.h> 29 #include <asm/fpsimd.h> 30 #include <asm/ptrace.h> 31 #include <asm/syscall.h> 32 #include <asm/signal32.h> 33 #include <asm/traps.h> 34 #include <asm/vdso.h> 35 36 /* 37 * Do a signal return; undo the signal stack. These are aligned to 128-bit. 38 */ 39 struct rt_sigframe { 40 struct siginfo info; 41 struct ucontext uc; 42 }; 43 44 struct frame_record { 45 u64 fp; 46 u64 lr; 47 }; 48 49 struct rt_sigframe_user_layout { 50 struct rt_sigframe __user *sigframe; 51 struct frame_record __user *next_frame; 52 53 unsigned long size; /* size of allocated sigframe data */ 54 unsigned long limit; /* largest allowed size */ 55 56 unsigned long fpsimd_offset; 57 unsigned long esr_offset; 58 unsigned long sve_offset; 59 unsigned long tpidr2_offset; 60 unsigned long za_offset; 61 unsigned long zt_offset; 62 unsigned long extra_offset; 63 unsigned long end_offset; 64 }; 65 66 #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16) 67 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) 68 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) 69 70 static void init_user_layout(struct rt_sigframe_user_layout *user) 71 { 72 const size_t reserved_size = 73 sizeof(user->sigframe->uc.uc_mcontext.__reserved); 74 75 memset(user, 0, sizeof(*user)); 76 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved); 77 78 user->limit = user->size + reserved_size; 79 80 user->limit -= TERMINATOR_SIZE; 81 user->limit -= EXTRA_CONTEXT_SIZE; 82 /* Reserve space for extension and terminator ^ */ 83 } 84 85 static size_t sigframe_size(struct rt_sigframe_user_layout const *user) 86 { 87 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); 88 } 89 90 /* 91 * Sanity limit on the approximate maximum size of signal frame we'll 92 * try to generate. Stack alignment padding and the frame record are 93 * not taken into account. This limit is not a guarantee and is 94 * NOT ABI. 95 */ 96 #define SIGFRAME_MAXSZ SZ_256K 97 98 static int __sigframe_alloc(struct rt_sigframe_user_layout *user, 99 unsigned long *offset, size_t size, bool extend) 100 { 101 size_t padded_size = round_up(size, 16); 102 103 if (padded_size > user->limit - user->size && 104 !user->extra_offset && 105 extend) { 106 int ret; 107 108 user->limit += EXTRA_CONTEXT_SIZE; 109 ret = __sigframe_alloc(user, &user->extra_offset, 110 sizeof(struct extra_context), false); 111 if (ret) { 112 user->limit -= EXTRA_CONTEXT_SIZE; 113 return ret; 114 } 115 116 /* Reserve space for the __reserved[] terminator */ 117 user->size += TERMINATOR_SIZE; 118 119 /* 120 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for 121 * the terminator: 122 */ 123 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE; 124 } 125 126 /* Still not enough space? Bad luck! */ 127 if (padded_size > user->limit - user->size) 128 return -ENOMEM; 129 130 *offset = user->size; 131 user->size += padded_size; 132 133 return 0; 134 } 135 136 /* 137 * Allocate space for an optional record of <size> bytes in the user 138 * signal frame. The offset from the signal frame base address to the 139 * allocated block is assigned to *offset. 140 */ 141 static int sigframe_alloc(struct rt_sigframe_user_layout *user, 142 unsigned long *offset, size_t size) 143 { 144 return __sigframe_alloc(user, offset, size, true); 145 } 146 147 /* Allocate the null terminator record and prevent further allocations */ 148 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) 149 { 150 int ret; 151 152 /* Un-reserve the space reserved for the terminator: */ 153 user->limit += TERMINATOR_SIZE; 154 155 ret = sigframe_alloc(user, &user->end_offset, 156 sizeof(struct _aarch64_ctx)); 157 if (ret) 158 return ret; 159 160 /* Prevent further allocation: */ 161 user->limit = user->size; 162 return 0; 163 } 164 165 static void __user *apply_user_offset( 166 struct rt_sigframe_user_layout const *user, unsigned long offset) 167 { 168 char __user *base = (char __user *)user->sigframe; 169 170 return base + offset; 171 } 172 173 struct user_ctxs { 174 struct fpsimd_context __user *fpsimd; 175 u32 fpsimd_size; 176 struct sve_context __user *sve; 177 u32 sve_size; 178 struct tpidr2_context __user *tpidr2; 179 u32 tpidr2_size; 180 struct za_context __user *za; 181 u32 za_size; 182 struct zt_context __user *zt; 183 u32 zt_size; 184 }; 185 186 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) 187 { 188 struct user_fpsimd_state const *fpsimd = 189 ¤t->thread.uw.fpsimd_state; 190 int err; 191 192 /* copy the FP and status/control registers */ 193 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); 194 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err); 195 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err); 196 197 /* copy the magic/size information */ 198 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); 199 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); 200 201 return err ? -EFAULT : 0; 202 } 203 204 static int restore_fpsimd_context(struct user_ctxs *user) 205 { 206 struct user_fpsimd_state fpsimd; 207 int err = 0; 208 209 /* check the size information */ 210 if (user->fpsimd_size != sizeof(struct fpsimd_context)) 211 return -EINVAL; 212 213 /* copy the FP and status/control registers */ 214 err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs), 215 sizeof(fpsimd.vregs)); 216 __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err); 217 __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err); 218 219 clear_thread_flag(TIF_SVE); 220 current->thread.fp_type = FP_STATE_FPSIMD; 221 222 /* load the hardware registers from the fpsimd_state structure */ 223 if (!err) 224 fpsimd_update_current_state(&fpsimd); 225 226 return err ? -EFAULT : 0; 227 } 228 229 230 #ifdef CONFIG_ARM64_SVE 231 232 static int preserve_sve_context(struct sve_context __user *ctx) 233 { 234 int err = 0; 235 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 236 u16 flags = 0; 237 unsigned int vl = task_get_sve_vl(current); 238 unsigned int vq = 0; 239 240 if (thread_sm_enabled(¤t->thread)) { 241 vl = task_get_sme_vl(current); 242 vq = sve_vq_from_vl(vl); 243 flags |= SVE_SIG_FLAG_SM; 244 } else if (test_thread_flag(TIF_SVE)) { 245 vq = sve_vq_from_vl(vl); 246 } 247 248 memset(reserved, 0, sizeof(reserved)); 249 250 __put_user_error(SVE_MAGIC, &ctx->head.magic, err); 251 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), 252 &ctx->head.size, err); 253 __put_user_error(vl, &ctx->vl, err); 254 __put_user_error(flags, &ctx->flags, err); 255 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 256 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 257 258 if (vq) { 259 /* 260 * This assumes that the SVE state has already been saved to 261 * the task struct by calling the function 262 * fpsimd_signal_preserve_current_state(). 263 */ 264 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, 265 current->thread.sve_state, 266 SVE_SIG_REGS_SIZE(vq)); 267 } 268 269 return err ? -EFAULT : 0; 270 } 271 272 static int restore_sve_fpsimd_context(struct user_ctxs *user) 273 { 274 int err = 0; 275 unsigned int vl, vq; 276 struct user_fpsimd_state fpsimd; 277 u16 user_vl, flags; 278 279 if (user->sve_size < sizeof(*user->sve)) 280 return -EINVAL; 281 282 __get_user_error(user_vl, &(user->sve->vl), err); 283 __get_user_error(flags, &(user->sve->flags), err); 284 if (err) 285 return err; 286 287 if (flags & SVE_SIG_FLAG_SM) { 288 if (!system_supports_sme()) 289 return -EINVAL; 290 291 vl = task_get_sme_vl(current); 292 } else { 293 /* 294 * A SME only system use SVE for streaming mode so can 295 * have a SVE formatted context with a zero VL and no 296 * payload data. 297 */ 298 if (!system_supports_sve() && !system_supports_sme()) 299 return -EINVAL; 300 301 vl = task_get_sve_vl(current); 302 } 303 304 if (user_vl != vl) 305 return -EINVAL; 306 307 if (user->sve_size == sizeof(*user->sve)) { 308 clear_thread_flag(TIF_SVE); 309 current->thread.svcr &= ~SVCR_SM_MASK; 310 current->thread.fp_type = FP_STATE_FPSIMD; 311 goto fpsimd_only; 312 } 313 314 vq = sve_vq_from_vl(vl); 315 316 if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq)) 317 return -EINVAL; 318 319 /* 320 * Careful: we are about __copy_from_user() directly into 321 * thread.sve_state with preemption enabled, so protection is 322 * needed to prevent a racing context switch from writing stale 323 * registers back over the new data. 324 */ 325 326 fpsimd_flush_task_state(current); 327 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 328 329 sve_alloc(current, true); 330 if (!current->thread.sve_state) { 331 clear_thread_flag(TIF_SVE); 332 return -ENOMEM; 333 } 334 335 err = __copy_from_user(current->thread.sve_state, 336 (char __user const *)user->sve + 337 SVE_SIG_REGS_OFFSET, 338 SVE_SIG_REGS_SIZE(vq)); 339 if (err) 340 return -EFAULT; 341 342 if (flags & SVE_SIG_FLAG_SM) 343 current->thread.svcr |= SVCR_SM_MASK; 344 else 345 set_thread_flag(TIF_SVE); 346 current->thread.fp_type = FP_STATE_SVE; 347 348 fpsimd_only: 349 /* copy the FP and status/control registers */ 350 /* restore_sigframe() already checked that user->fpsimd != NULL. */ 351 err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs, 352 sizeof(fpsimd.vregs)); 353 __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err); 354 __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err); 355 356 /* load the hardware registers from the fpsimd_state structure */ 357 if (!err) 358 fpsimd_update_current_state(&fpsimd); 359 360 return err ? -EFAULT : 0; 361 } 362 363 #else /* ! CONFIG_ARM64_SVE */ 364 365 static int restore_sve_fpsimd_context(struct user_ctxs *user) 366 { 367 WARN_ON_ONCE(1); 368 return -EINVAL; 369 } 370 371 /* Turn any non-optimised out attempts to use this into a link error: */ 372 extern int preserve_sve_context(void __user *ctx); 373 374 #endif /* ! CONFIG_ARM64_SVE */ 375 376 #ifdef CONFIG_ARM64_SME 377 378 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx) 379 { 380 int err = 0; 381 382 current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); 383 384 __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err); 385 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 386 __put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err); 387 388 return err; 389 } 390 391 static int restore_tpidr2_context(struct user_ctxs *user) 392 { 393 u64 tpidr2_el0; 394 int err = 0; 395 396 if (user->tpidr2_size != sizeof(*user->tpidr2)) 397 return -EINVAL; 398 399 __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err); 400 if (!err) 401 current->thread.tpidr2_el0 = tpidr2_el0; 402 403 return err; 404 } 405 406 static int preserve_za_context(struct za_context __user *ctx) 407 { 408 int err = 0; 409 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 410 unsigned int vl = task_get_sme_vl(current); 411 unsigned int vq; 412 413 if (thread_za_enabled(¤t->thread)) 414 vq = sve_vq_from_vl(vl); 415 else 416 vq = 0; 417 418 memset(reserved, 0, sizeof(reserved)); 419 420 __put_user_error(ZA_MAGIC, &ctx->head.magic, err); 421 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16), 422 &ctx->head.size, err); 423 __put_user_error(vl, &ctx->vl, err); 424 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 425 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 426 427 if (vq) { 428 /* 429 * This assumes that the ZA state has already been saved to 430 * the task struct by calling the function 431 * fpsimd_signal_preserve_current_state(). 432 */ 433 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET, 434 current->thread.sme_state, 435 ZA_SIG_REGS_SIZE(vq)); 436 } 437 438 return err ? -EFAULT : 0; 439 } 440 441 static int restore_za_context(struct user_ctxs *user) 442 { 443 int err = 0; 444 unsigned int vq; 445 u16 user_vl; 446 447 if (user->za_size < sizeof(*user->za)) 448 return -EINVAL; 449 450 __get_user_error(user_vl, &(user->za->vl), err); 451 if (err) 452 return err; 453 454 if (user_vl != task_get_sme_vl(current)) 455 return -EINVAL; 456 457 if (user->za_size == sizeof(*user->za)) { 458 current->thread.svcr &= ~SVCR_ZA_MASK; 459 return 0; 460 } 461 462 vq = sve_vq_from_vl(user_vl); 463 464 if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq)) 465 return -EINVAL; 466 467 /* 468 * Careful: we are about __copy_from_user() directly into 469 * thread.sme_state with preemption enabled, so protection is 470 * needed to prevent a racing context switch from writing stale 471 * registers back over the new data. 472 */ 473 474 fpsimd_flush_task_state(current); 475 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 476 477 sme_alloc(current); 478 if (!current->thread.sme_state) { 479 current->thread.svcr &= ~SVCR_ZA_MASK; 480 clear_thread_flag(TIF_SME); 481 return -ENOMEM; 482 } 483 484 err = __copy_from_user(current->thread.sme_state, 485 (char __user const *)user->za + 486 ZA_SIG_REGS_OFFSET, 487 ZA_SIG_REGS_SIZE(vq)); 488 if (err) 489 return -EFAULT; 490 491 set_thread_flag(TIF_SME); 492 current->thread.svcr |= SVCR_ZA_MASK; 493 494 return 0; 495 } 496 497 static int preserve_zt_context(struct zt_context __user *ctx) 498 { 499 int err = 0; 500 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 501 502 if (WARN_ON(!thread_za_enabled(¤t->thread))) 503 return -EINVAL; 504 505 memset(reserved, 0, sizeof(reserved)); 506 507 __put_user_error(ZT_MAGIC, &ctx->head.magic, err); 508 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16), 509 &ctx->head.size, err); 510 __put_user_error(1, &ctx->nregs, err); 511 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 512 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 513 514 /* 515 * This assumes that the ZT state has already been saved to 516 * the task struct by calling the function 517 * fpsimd_signal_preserve_current_state(). 518 */ 519 err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET, 520 thread_zt_state(¤t->thread), 521 ZT_SIG_REGS_SIZE(1)); 522 523 return err ? -EFAULT : 0; 524 } 525 526 static int restore_zt_context(struct user_ctxs *user) 527 { 528 int err; 529 u16 nregs; 530 531 /* ZA must be restored first for this check to be valid */ 532 if (!thread_za_enabled(¤t->thread)) 533 return -EINVAL; 534 535 if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1)) 536 return -EINVAL; 537 538 if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs))) 539 return -EFAULT; 540 541 if (nregs != 1) 542 return -EINVAL; 543 544 /* 545 * Careful: we are about __copy_from_user() directly into 546 * thread.zt_state with preemption enabled, so protection is 547 * needed to prevent a racing context switch from writing stale 548 * registers back over the new data. 549 */ 550 551 fpsimd_flush_task_state(current); 552 /* From now, fpsimd_thread_switch() won't touch ZT in thread state */ 553 554 err = __copy_from_user(thread_zt_state(¤t->thread), 555 (char __user const *)user->zt + 556 ZT_SIG_REGS_OFFSET, 557 ZT_SIG_REGS_SIZE(1)); 558 if (err) 559 return -EFAULT; 560 561 return 0; 562 } 563 564 #else /* ! CONFIG_ARM64_SME */ 565 566 /* Turn any non-optimised out attempts to use these into a link error: */ 567 extern int preserve_tpidr2_context(void __user *ctx); 568 extern int restore_tpidr2_context(struct user_ctxs *user); 569 extern int preserve_za_context(void __user *ctx); 570 extern int restore_za_context(struct user_ctxs *user); 571 extern int preserve_zt_context(void __user *ctx); 572 extern int restore_zt_context(struct user_ctxs *user); 573 574 #endif /* ! CONFIG_ARM64_SME */ 575 576 static int parse_user_sigframe(struct user_ctxs *user, 577 struct rt_sigframe __user *sf) 578 { 579 struct sigcontext __user *const sc = &sf->uc.uc_mcontext; 580 struct _aarch64_ctx __user *head; 581 char __user *base = (char __user *)&sc->__reserved; 582 size_t offset = 0; 583 size_t limit = sizeof(sc->__reserved); 584 bool have_extra_context = false; 585 char const __user *const sfp = (char const __user *)sf; 586 587 user->fpsimd = NULL; 588 user->sve = NULL; 589 user->tpidr2 = NULL; 590 user->za = NULL; 591 user->zt = NULL; 592 593 if (!IS_ALIGNED((unsigned long)base, 16)) 594 goto invalid; 595 596 while (1) { 597 int err = 0; 598 u32 magic, size; 599 char const __user *userp; 600 struct extra_context const __user *extra; 601 u64 extra_datap; 602 u32 extra_size; 603 struct _aarch64_ctx const __user *end; 604 u32 end_magic, end_size; 605 606 if (limit - offset < sizeof(*head)) 607 goto invalid; 608 609 if (!IS_ALIGNED(offset, 16)) 610 goto invalid; 611 612 head = (struct _aarch64_ctx __user *)(base + offset); 613 __get_user_error(magic, &head->magic, err); 614 __get_user_error(size, &head->size, err); 615 if (err) 616 return err; 617 618 if (limit - offset < size) 619 goto invalid; 620 621 switch (magic) { 622 case 0: 623 if (size) 624 goto invalid; 625 626 goto done; 627 628 case FPSIMD_MAGIC: 629 if (!system_supports_fpsimd()) 630 goto invalid; 631 if (user->fpsimd) 632 goto invalid; 633 634 user->fpsimd = (struct fpsimd_context __user *)head; 635 user->fpsimd_size = size; 636 break; 637 638 case ESR_MAGIC: 639 /* ignore */ 640 break; 641 642 case SVE_MAGIC: 643 if (!system_supports_sve() && !system_supports_sme()) 644 goto invalid; 645 646 if (user->sve) 647 goto invalid; 648 649 user->sve = (struct sve_context __user *)head; 650 user->sve_size = size; 651 break; 652 653 case TPIDR2_MAGIC: 654 if (!system_supports_sme()) 655 goto invalid; 656 657 if (user->tpidr2) 658 goto invalid; 659 660 user->tpidr2 = (struct tpidr2_context __user *)head; 661 user->tpidr2_size = size; 662 break; 663 664 case ZA_MAGIC: 665 if (!system_supports_sme()) 666 goto invalid; 667 668 if (user->za) 669 goto invalid; 670 671 user->za = (struct za_context __user *)head; 672 user->za_size = size; 673 break; 674 675 case ZT_MAGIC: 676 if (!system_supports_sme2()) 677 goto invalid; 678 679 if (user->zt) 680 goto invalid; 681 682 user->zt = (struct zt_context __user *)head; 683 user->zt_size = size; 684 break; 685 686 case EXTRA_MAGIC: 687 if (have_extra_context) 688 goto invalid; 689 690 if (size < sizeof(*extra)) 691 goto invalid; 692 693 userp = (char const __user *)head; 694 695 extra = (struct extra_context const __user *)userp; 696 userp += size; 697 698 __get_user_error(extra_datap, &extra->datap, err); 699 __get_user_error(extra_size, &extra->size, err); 700 if (err) 701 return err; 702 703 /* Check for the dummy terminator in __reserved[]: */ 704 705 if (limit - offset - size < TERMINATOR_SIZE) 706 goto invalid; 707 708 end = (struct _aarch64_ctx const __user *)userp; 709 userp += TERMINATOR_SIZE; 710 711 __get_user_error(end_magic, &end->magic, err); 712 __get_user_error(end_size, &end->size, err); 713 if (err) 714 return err; 715 716 if (end_magic || end_size) 717 goto invalid; 718 719 /* Prevent looping/repeated parsing of extra_context */ 720 have_extra_context = true; 721 722 base = (__force void __user *)extra_datap; 723 if (!IS_ALIGNED((unsigned long)base, 16)) 724 goto invalid; 725 726 if (!IS_ALIGNED(extra_size, 16)) 727 goto invalid; 728 729 if (base != userp) 730 goto invalid; 731 732 /* Reject "unreasonably large" frames: */ 733 if (extra_size > sfp + SIGFRAME_MAXSZ - userp) 734 goto invalid; 735 736 /* 737 * Ignore trailing terminator in __reserved[] 738 * and start parsing extra data: 739 */ 740 offset = 0; 741 limit = extra_size; 742 743 if (!access_ok(base, limit)) 744 goto invalid; 745 746 continue; 747 748 default: 749 goto invalid; 750 } 751 752 if (size < sizeof(*head)) 753 goto invalid; 754 755 if (limit - offset < size) 756 goto invalid; 757 758 offset += size; 759 } 760 761 done: 762 return 0; 763 764 invalid: 765 return -EINVAL; 766 } 767 768 static int restore_sigframe(struct pt_regs *regs, 769 struct rt_sigframe __user *sf) 770 { 771 sigset_t set; 772 int i, err; 773 struct user_ctxs user; 774 775 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 776 if (err == 0) 777 set_current_blocked(&set); 778 779 for (i = 0; i < 31; i++) 780 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 781 err); 782 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 783 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 784 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 785 786 /* 787 * Avoid sys_rt_sigreturn() restarting. 788 */ 789 forget_syscall(regs); 790 791 err |= !valid_user_regs(®s->user_regs, current); 792 if (err == 0) 793 err = parse_user_sigframe(&user, sf); 794 795 if (err == 0 && system_supports_fpsimd()) { 796 if (!user.fpsimd) 797 return -EINVAL; 798 799 if (user.sve) 800 err = restore_sve_fpsimd_context(&user); 801 else 802 err = restore_fpsimd_context(&user); 803 } 804 805 if (err == 0 && system_supports_sme() && user.tpidr2) 806 err = restore_tpidr2_context(&user); 807 808 if (err == 0 && system_supports_sme() && user.za) 809 err = restore_za_context(&user); 810 811 if (err == 0 && system_supports_sme2() && user.zt) 812 err = restore_zt_context(&user); 813 814 return err; 815 } 816 817 SYSCALL_DEFINE0(rt_sigreturn) 818 { 819 struct pt_regs *regs = current_pt_regs(); 820 struct rt_sigframe __user *frame; 821 822 /* Always make any pending restarted system calls return -EINTR */ 823 current->restart_block.fn = do_no_restart_syscall; 824 825 /* 826 * Since we stacked the signal on a 128-bit boundary, then 'sp' should 827 * be word aligned here. 828 */ 829 if (regs->sp & 15) 830 goto badframe; 831 832 frame = (struct rt_sigframe __user *)regs->sp; 833 834 if (!access_ok(frame, sizeof (*frame))) 835 goto badframe; 836 837 if (restore_sigframe(regs, frame)) 838 goto badframe; 839 840 if (restore_altstack(&frame->uc.uc_stack)) 841 goto badframe; 842 843 return regs->regs[0]; 844 845 badframe: 846 arm64_notify_segfault(regs->sp); 847 return 0; 848 } 849 850 /* 851 * Determine the layout of optional records in the signal frame 852 * 853 * add_all: if true, lays out the biggest possible signal frame for 854 * this task; otherwise, generates a layout for the current state 855 * of the task. 856 */ 857 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, 858 bool add_all) 859 { 860 int err; 861 862 if (system_supports_fpsimd()) { 863 err = sigframe_alloc(user, &user->fpsimd_offset, 864 sizeof(struct fpsimd_context)); 865 if (err) 866 return err; 867 } 868 869 /* fault information, if valid */ 870 if (add_all || current->thread.fault_code) { 871 err = sigframe_alloc(user, &user->esr_offset, 872 sizeof(struct esr_context)); 873 if (err) 874 return err; 875 } 876 877 if (system_supports_sve() || system_supports_sme()) { 878 unsigned int vq = 0; 879 880 if (add_all || test_thread_flag(TIF_SVE) || 881 thread_sm_enabled(¤t->thread)) { 882 int vl = max(sve_max_vl(), sme_max_vl()); 883 884 if (!add_all) 885 vl = thread_get_cur_vl(¤t->thread); 886 887 vq = sve_vq_from_vl(vl); 888 } 889 890 err = sigframe_alloc(user, &user->sve_offset, 891 SVE_SIG_CONTEXT_SIZE(vq)); 892 if (err) 893 return err; 894 } 895 896 if (system_supports_sme()) { 897 unsigned int vl; 898 unsigned int vq = 0; 899 900 if (add_all) 901 vl = sme_max_vl(); 902 else 903 vl = task_get_sme_vl(current); 904 905 err = sigframe_alloc(user, &user->tpidr2_offset, 906 sizeof(struct tpidr2_context)); 907 if (err) 908 return err; 909 910 if (thread_za_enabled(¤t->thread)) 911 vq = sve_vq_from_vl(vl); 912 913 err = sigframe_alloc(user, &user->za_offset, 914 ZA_SIG_CONTEXT_SIZE(vq)); 915 if (err) 916 return err; 917 } 918 919 if (system_supports_sme2()) { 920 if (add_all || thread_za_enabled(¤t->thread)) { 921 err = sigframe_alloc(user, &user->zt_offset, 922 ZT_SIG_CONTEXT_SIZE(1)); 923 if (err) 924 return err; 925 } 926 } 927 928 return sigframe_alloc_end(user); 929 } 930 931 static int setup_sigframe(struct rt_sigframe_user_layout *user, 932 struct pt_regs *regs, sigset_t *set) 933 { 934 int i, err = 0; 935 struct rt_sigframe __user *sf = user->sigframe; 936 937 /* set up the stack frame for unwinding */ 938 __put_user_error(regs->regs[29], &user->next_frame->fp, err); 939 __put_user_error(regs->regs[30], &user->next_frame->lr, err); 940 941 for (i = 0; i < 31; i++) 942 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 943 err); 944 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 945 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 946 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 947 948 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); 949 950 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 951 952 if (err == 0 && system_supports_fpsimd()) { 953 struct fpsimd_context __user *fpsimd_ctx = 954 apply_user_offset(user, user->fpsimd_offset); 955 err |= preserve_fpsimd_context(fpsimd_ctx); 956 } 957 958 /* fault information, if valid */ 959 if (err == 0 && user->esr_offset) { 960 struct esr_context __user *esr_ctx = 961 apply_user_offset(user, user->esr_offset); 962 963 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); 964 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); 965 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); 966 } 967 968 /* Scalable Vector Extension state (including streaming), if present */ 969 if ((system_supports_sve() || system_supports_sme()) && 970 err == 0 && user->sve_offset) { 971 struct sve_context __user *sve_ctx = 972 apply_user_offset(user, user->sve_offset); 973 err |= preserve_sve_context(sve_ctx); 974 } 975 976 /* TPIDR2 if supported */ 977 if (system_supports_sme() && err == 0) { 978 struct tpidr2_context __user *tpidr2_ctx = 979 apply_user_offset(user, user->tpidr2_offset); 980 err |= preserve_tpidr2_context(tpidr2_ctx); 981 } 982 983 /* ZA state if present */ 984 if (system_supports_sme() && err == 0 && user->za_offset) { 985 struct za_context __user *za_ctx = 986 apply_user_offset(user, user->za_offset); 987 err |= preserve_za_context(za_ctx); 988 } 989 990 /* ZT state if present */ 991 if (system_supports_sme2() && err == 0 && user->zt_offset) { 992 struct zt_context __user *zt_ctx = 993 apply_user_offset(user, user->zt_offset); 994 err |= preserve_zt_context(zt_ctx); 995 } 996 997 if (err == 0 && user->extra_offset) { 998 char __user *sfp = (char __user *)user->sigframe; 999 char __user *userp = 1000 apply_user_offset(user, user->extra_offset); 1001 1002 struct extra_context __user *extra; 1003 struct _aarch64_ctx __user *end; 1004 u64 extra_datap; 1005 u32 extra_size; 1006 1007 extra = (struct extra_context __user *)userp; 1008 userp += EXTRA_CONTEXT_SIZE; 1009 1010 end = (struct _aarch64_ctx __user *)userp; 1011 userp += TERMINATOR_SIZE; 1012 1013 /* 1014 * extra_datap is just written to the signal frame. 1015 * The value gets cast back to a void __user * 1016 * during sigreturn. 1017 */ 1018 extra_datap = (__force u64)userp; 1019 extra_size = sfp + round_up(user->size, 16) - userp; 1020 1021 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); 1022 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); 1023 __put_user_error(extra_datap, &extra->datap, err); 1024 __put_user_error(extra_size, &extra->size, err); 1025 1026 /* Add the terminator */ 1027 __put_user_error(0, &end->magic, err); 1028 __put_user_error(0, &end->size, err); 1029 } 1030 1031 /* set the "end" magic */ 1032 if (err == 0) { 1033 struct _aarch64_ctx __user *end = 1034 apply_user_offset(user, user->end_offset); 1035 1036 __put_user_error(0, &end->magic, err); 1037 __put_user_error(0, &end->size, err); 1038 } 1039 1040 return err; 1041 } 1042 1043 static int get_sigframe(struct rt_sigframe_user_layout *user, 1044 struct ksignal *ksig, struct pt_regs *regs) 1045 { 1046 unsigned long sp, sp_top; 1047 int err; 1048 1049 init_user_layout(user); 1050 err = setup_sigframe_layout(user, false); 1051 if (err) 1052 return err; 1053 1054 sp = sp_top = sigsp(regs->sp, ksig); 1055 1056 sp = round_down(sp - sizeof(struct frame_record), 16); 1057 user->next_frame = (struct frame_record __user *)sp; 1058 1059 sp = round_down(sp, 16) - sigframe_size(user); 1060 user->sigframe = (struct rt_sigframe __user *)sp; 1061 1062 /* 1063 * Check that we can actually write to the signal frame. 1064 */ 1065 if (!access_ok(user->sigframe, sp_top - sp)) 1066 return -EFAULT; 1067 1068 return 0; 1069 } 1070 1071 static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, 1072 struct rt_sigframe_user_layout *user, int usig) 1073 { 1074 __sigrestore_t sigtramp; 1075 1076 regs->regs[0] = usig; 1077 regs->sp = (unsigned long)user->sigframe; 1078 regs->regs[29] = (unsigned long)&user->next_frame->fp; 1079 regs->pc = (unsigned long)ka->sa.sa_handler; 1080 1081 /* 1082 * Signal delivery is a (wacky) indirect function call in 1083 * userspace, so simulate the same setting of BTYPE as a BLR 1084 * <register containing the signal handler entry point>. 1085 * Signal delivery to a location in a PROT_BTI guarded page 1086 * that is not a function entry point will now trigger a 1087 * SIGILL in userspace. 1088 * 1089 * If the signal handler entry point is not in a PROT_BTI 1090 * guarded page, this is harmless. 1091 */ 1092 if (system_supports_bti()) { 1093 regs->pstate &= ~PSR_BTYPE_MASK; 1094 regs->pstate |= PSR_BTYPE_C; 1095 } 1096 1097 /* TCO (Tag Check Override) always cleared for signal handlers */ 1098 regs->pstate &= ~PSR_TCO_BIT; 1099 1100 /* Signal handlers are invoked with ZA and streaming mode disabled */ 1101 if (system_supports_sme()) { 1102 /* 1103 * If we were in streaming mode the saved register 1104 * state was SVE but we will exit SM and use the 1105 * FPSIMD register state - flush the saved FPSIMD 1106 * register state in case it gets loaded. 1107 */ 1108 if (current->thread.svcr & SVCR_SM_MASK) { 1109 memset(¤t->thread.uw.fpsimd_state, 0, 1110 sizeof(current->thread.uw.fpsimd_state)); 1111 current->thread.fp_type = FP_STATE_FPSIMD; 1112 } 1113 1114 current->thread.svcr &= ~(SVCR_ZA_MASK | 1115 SVCR_SM_MASK); 1116 sme_smstop(); 1117 } 1118 1119 if (ka->sa.sa_flags & SA_RESTORER) 1120 sigtramp = ka->sa.sa_restorer; 1121 else 1122 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); 1123 1124 regs->regs[30] = (unsigned long)sigtramp; 1125 } 1126 1127 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, 1128 struct pt_regs *regs) 1129 { 1130 struct rt_sigframe_user_layout user; 1131 struct rt_sigframe __user *frame; 1132 int err = 0; 1133 1134 fpsimd_signal_preserve_current_state(); 1135 1136 if (get_sigframe(&user, ksig, regs)) 1137 return 1; 1138 1139 frame = user.sigframe; 1140 1141 __put_user_error(0, &frame->uc.uc_flags, err); 1142 __put_user_error(NULL, &frame->uc.uc_link, err); 1143 1144 err |= __save_altstack(&frame->uc.uc_stack, regs->sp); 1145 err |= setup_sigframe(&user, regs, set); 1146 if (err == 0) { 1147 setup_return(regs, &ksig->ka, &user, usig); 1148 if (ksig->ka.sa.sa_flags & SA_SIGINFO) { 1149 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 1150 regs->regs[1] = (unsigned long)&frame->info; 1151 regs->regs[2] = (unsigned long)&frame->uc; 1152 } 1153 } 1154 1155 return err; 1156 } 1157 1158 static void setup_restart_syscall(struct pt_regs *regs) 1159 { 1160 if (is_compat_task()) 1161 compat_setup_restart_syscall(regs); 1162 else 1163 regs->regs[8] = __NR_restart_syscall; 1164 } 1165 1166 /* 1167 * OK, we're invoking a handler 1168 */ 1169 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 1170 { 1171 sigset_t *oldset = sigmask_to_save(); 1172 int usig = ksig->sig; 1173 int ret; 1174 1175 rseq_signal_deliver(ksig, regs); 1176 1177 /* 1178 * Set up the stack frame 1179 */ 1180 if (is_compat_task()) { 1181 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 1182 ret = compat_setup_rt_frame(usig, ksig, oldset, regs); 1183 else 1184 ret = compat_setup_frame(usig, ksig, oldset, regs); 1185 } else { 1186 ret = setup_rt_frame(usig, ksig, oldset, regs); 1187 } 1188 1189 /* 1190 * Check that the resulting registers are actually sane. 1191 */ 1192 ret |= !valid_user_regs(®s->user_regs, current); 1193 1194 /* Step into the signal handler if we are stepping */ 1195 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); 1196 } 1197 1198 /* 1199 * Note that 'init' is a special process: it doesn't get signals it doesn't 1200 * want to handle. Thus you cannot kill init even with a SIGKILL even by 1201 * mistake. 1202 * 1203 * Note that we go through the signals twice: once to check the signals that 1204 * the kernel can handle, and then we build all the user-level signal handling 1205 * stack-frames in one go after that. 1206 */ 1207 static void do_signal(struct pt_regs *regs) 1208 { 1209 unsigned long continue_addr = 0, restart_addr = 0; 1210 int retval = 0; 1211 struct ksignal ksig; 1212 bool syscall = in_syscall(regs); 1213 1214 /* 1215 * If we were from a system call, check for system call restarting... 1216 */ 1217 if (syscall) { 1218 continue_addr = regs->pc; 1219 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); 1220 retval = regs->regs[0]; 1221 1222 /* 1223 * Avoid additional syscall restarting via ret_to_user. 1224 */ 1225 forget_syscall(regs); 1226 1227 /* 1228 * Prepare for system call restart. We do this here so that a 1229 * debugger will see the already changed PC. 1230 */ 1231 switch (retval) { 1232 case -ERESTARTNOHAND: 1233 case -ERESTARTSYS: 1234 case -ERESTARTNOINTR: 1235 case -ERESTART_RESTARTBLOCK: 1236 regs->regs[0] = regs->orig_x0; 1237 regs->pc = restart_addr; 1238 break; 1239 } 1240 } 1241 1242 /* 1243 * Get the signal to deliver. When running under ptrace, at this point 1244 * the debugger may change all of our registers. 1245 */ 1246 if (get_signal(&ksig)) { 1247 /* 1248 * Depending on the signal settings, we may need to revert the 1249 * decision to restart the system call, but skip this if a 1250 * debugger has chosen to restart at a different PC. 1251 */ 1252 if (regs->pc == restart_addr && 1253 (retval == -ERESTARTNOHAND || 1254 retval == -ERESTART_RESTARTBLOCK || 1255 (retval == -ERESTARTSYS && 1256 !(ksig.ka.sa.sa_flags & SA_RESTART)))) { 1257 syscall_set_return_value(current, regs, -EINTR, 0); 1258 regs->pc = continue_addr; 1259 } 1260 1261 handle_signal(&ksig, regs); 1262 return; 1263 } 1264 1265 /* 1266 * Handle restarting a different system call. As above, if a debugger 1267 * has chosen to restart at a different PC, ignore the restart. 1268 */ 1269 if (syscall && regs->pc == restart_addr) { 1270 if (retval == -ERESTART_RESTARTBLOCK) 1271 setup_restart_syscall(regs); 1272 user_rewind_single_step(current); 1273 } 1274 1275 restore_saved_sigmask(); 1276 } 1277 1278 void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) 1279 { 1280 do { 1281 if (thread_flags & _TIF_NEED_RESCHED) { 1282 /* Unmask Debug and SError for the next task */ 1283 local_daif_restore(DAIF_PROCCTX_NOIRQ); 1284 1285 schedule(); 1286 } else { 1287 local_daif_restore(DAIF_PROCCTX); 1288 1289 if (thread_flags & _TIF_UPROBE) 1290 uprobe_notify_resume(regs); 1291 1292 if (thread_flags & _TIF_MTE_ASYNC_FAULT) { 1293 clear_thread_flag(TIF_MTE_ASYNC_FAULT); 1294 send_sig_fault(SIGSEGV, SEGV_MTEAERR, 1295 (void __user *)NULL, current); 1296 } 1297 1298 if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) 1299 do_signal(regs); 1300 1301 if (thread_flags & _TIF_NOTIFY_RESUME) 1302 resume_user_mode_work(regs); 1303 1304 if (thread_flags & _TIF_FOREIGN_FPSTATE) 1305 fpsimd_restore_current_state(); 1306 } 1307 1308 local_daif_mask(); 1309 thread_flags = read_thread_flags(); 1310 } while (thread_flags & _TIF_WORK_MASK); 1311 } 1312 1313 unsigned long __ro_after_init signal_minsigstksz; 1314 1315 /* 1316 * Determine the stack space required for guaranteed signal devliery. 1317 * This function is used to populate AT_MINSIGSTKSZ at process startup. 1318 * cpufeatures setup is assumed to be complete. 1319 */ 1320 void __init minsigstksz_setup(void) 1321 { 1322 struct rt_sigframe_user_layout user; 1323 1324 init_user_layout(&user); 1325 1326 /* 1327 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't 1328 * be big enough, but it's our best guess: 1329 */ 1330 if (WARN_ON(setup_sigframe_layout(&user, true))) 1331 return; 1332 1333 signal_minsigstksz = sigframe_size(&user) + 1334 round_up(sizeof(struct frame_record), 16) + 1335 16; /* max alignment padding */ 1336 } 1337 1338 /* 1339 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as 1340 * changes likely come with new fields that should be added below. 1341 */ 1342 static_assert(NSIGILL == 11); 1343 static_assert(NSIGFPE == 15); 1344 static_assert(NSIGSEGV == 9); 1345 static_assert(NSIGBUS == 5); 1346 static_assert(NSIGTRAP == 6); 1347 static_assert(NSIGCHLD == 6); 1348 static_assert(NSIGSYS == 2); 1349 static_assert(sizeof(siginfo_t) == 128); 1350 static_assert(__alignof__(siginfo_t) == 8); 1351 static_assert(offsetof(siginfo_t, si_signo) == 0x00); 1352 static_assert(offsetof(siginfo_t, si_errno) == 0x04); 1353 static_assert(offsetof(siginfo_t, si_code) == 0x08); 1354 static_assert(offsetof(siginfo_t, si_pid) == 0x10); 1355 static_assert(offsetof(siginfo_t, si_uid) == 0x14); 1356 static_assert(offsetof(siginfo_t, si_tid) == 0x10); 1357 static_assert(offsetof(siginfo_t, si_overrun) == 0x14); 1358 static_assert(offsetof(siginfo_t, si_status) == 0x18); 1359 static_assert(offsetof(siginfo_t, si_utime) == 0x20); 1360 static_assert(offsetof(siginfo_t, si_stime) == 0x28); 1361 static_assert(offsetof(siginfo_t, si_value) == 0x18); 1362 static_assert(offsetof(siginfo_t, si_int) == 0x18); 1363 static_assert(offsetof(siginfo_t, si_ptr) == 0x18); 1364 static_assert(offsetof(siginfo_t, si_addr) == 0x10); 1365 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18); 1366 static_assert(offsetof(siginfo_t, si_lower) == 0x20); 1367 static_assert(offsetof(siginfo_t, si_upper) == 0x28); 1368 static_assert(offsetof(siginfo_t, si_pkey) == 0x20); 1369 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); 1370 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); 1371 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); 1372 static_assert(offsetof(siginfo_t, si_band) == 0x10); 1373 static_assert(offsetof(siginfo_t, si_fd) == 0x18); 1374 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10); 1375 static_assert(offsetof(siginfo_t, si_syscall) == 0x18); 1376 static_assert(offsetof(siginfo_t, si_arch) == 0x1c); 1377