1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/signal.c 4 * 5 * Copyright (C) 1995-2009 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/cache.h> 10 #include <linux/compat.h> 11 #include <linux/errno.h> 12 #include <linux/kernel.h> 13 #include <linux/signal.h> 14 #include <linux/freezer.h> 15 #include <linux/stddef.h> 16 #include <linux/uaccess.h> 17 #include <linux/sizes.h> 18 #include <linux/string.h> 19 #include <linux/resume_user_mode.h> 20 #include <linux/ratelimit.h> 21 #include <linux/syscalls.h> 22 23 #include <asm/daifflags.h> 24 #include <asm/debug-monitors.h> 25 #include <asm/elf.h> 26 #include <asm/exception.h> 27 #include <asm/cacheflush.h> 28 #include <asm/ucontext.h> 29 #include <asm/unistd.h> 30 #include <asm/fpsimd.h> 31 #include <asm/ptrace.h> 32 #include <asm/syscall.h> 33 #include <asm/signal32.h> 34 #include <asm/traps.h> 35 #include <asm/vdso.h> 36 37 /* 38 * Do a signal return; undo the signal stack. These are aligned to 128-bit. 39 */ 40 struct rt_sigframe { 41 struct siginfo info; 42 struct ucontext uc; 43 }; 44 45 struct frame_record { 46 u64 fp; 47 u64 lr; 48 }; 49 50 struct rt_sigframe_user_layout { 51 struct rt_sigframe __user *sigframe; 52 struct frame_record __user *next_frame; 53 54 unsigned long size; /* size of allocated sigframe data */ 55 unsigned long limit; /* largest allowed size */ 56 57 unsigned long fpsimd_offset; 58 unsigned long esr_offset; 59 unsigned long sve_offset; 60 unsigned long tpidr2_offset; 61 unsigned long za_offset; 62 unsigned long zt_offset; 63 unsigned long extra_offset; 64 unsigned long end_offset; 65 }; 66 67 #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16) 68 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) 69 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) 70 71 static void init_user_layout(struct rt_sigframe_user_layout *user) 72 { 73 const size_t reserved_size = 74 sizeof(user->sigframe->uc.uc_mcontext.__reserved); 75 76 memset(user, 0, sizeof(*user)); 77 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved); 78 79 user->limit = user->size + reserved_size; 80 81 user->limit -= TERMINATOR_SIZE; 82 user->limit -= EXTRA_CONTEXT_SIZE; 83 /* Reserve space for extension and terminator ^ */ 84 } 85 86 static size_t sigframe_size(struct rt_sigframe_user_layout const *user) 87 { 88 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); 89 } 90 91 /* 92 * Sanity limit on the approximate maximum size of signal frame we'll 93 * try to generate. Stack alignment padding and the frame record are 94 * not taken into account. This limit is not a guarantee and is 95 * NOT ABI. 96 */ 97 #define SIGFRAME_MAXSZ SZ_256K 98 99 static int __sigframe_alloc(struct rt_sigframe_user_layout *user, 100 unsigned long *offset, size_t size, bool extend) 101 { 102 size_t padded_size = round_up(size, 16); 103 104 if (padded_size > user->limit - user->size && 105 !user->extra_offset && 106 extend) { 107 int ret; 108 109 user->limit += EXTRA_CONTEXT_SIZE; 110 ret = __sigframe_alloc(user, &user->extra_offset, 111 sizeof(struct extra_context), false); 112 if (ret) { 113 user->limit -= EXTRA_CONTEXT_SIZE; 114 return ret; 115 } 116 117 /* Reserve space for the __reserved[] terminator */ 118 user->size += TERMINATOR_SIZE; 119 120 /* 121 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for 122 * the terminator: 123 */ 124 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE; 125 } 126 127 /* Still not enough space? Bad luck! */ 128 if (padded_size > user->limit - user->size) 129 return -ENOMEM; 130 131 *offset = user->size; 132 user->size += padded_size; 133 134 return 0; 135 } 136 137 /* 138 * Allocate space for an optional record of <size> bytes in the user 139 * signal frame. The offset from the signal frame base address to the 140 * allocated block is assigned to *offset. 141 */ 142 static int sigframe_alloc(struct rt_sigframe_user_layout *user, 143 unsigned long *offset, size_t size) 144 { 145 return __sigframe_alloc(user, offset, size, true); 146 } 147 148 /* Allocate the null terminator record and prevent further allocations */ 149 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) 150 { 151 int ret; 152 153 /* Un-reserve the space reserved for the terminator: */ 154 user->limit += TERMINATOR_SIZE; 155 156 ret = sigframe_alloc(user, &user->end_offset, 157 sizeof(struct _aarch64_ctx)); 158 if (ret) 159 return ret; 160 161 /* Prevent further allocation: */ 162 user->limit = user->size; 163 return 0; 164 } 165 166 static void __user *apply_user_offset( 167 struct rt_sigframe_user_layout const *user, unsigned long offset) 168 { 169 char __user *base = (char __user *)user->sigframe; 170 171 return base + offset; 172 } 173 174 struct user_ctxs { 175 struct fpsimd_context __user *fpsimd; 176 u32 fpsimd_size; 177 struct sve_context __user *sve; 178 u32 sve_size; 179 struct tpidr2_context __user *tpidr2; 180 u32 tpidr2_size; 181 struct za_context __user *za; 182 u32 za_size; 183 struct zt_context __user *zt; 184 u32 zt_size; 185 }; 186 187 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) 188 { 189 struct user_fpsimd_state const *fpsimd = 190 ¤t->thread.uw.fpsimd_state; 191 int err; 192 193 /* copy the FP and status/control registers */ 194 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); 195 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err); 196 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err); 197 198 /* copy the magic/size information */ 199 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); 200 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); 201 202 return err ? -EFAULT : 0; 203 } 204 205 static int restore_fpsimd_context(struct user_ctxs *user) 206 { 207 struct user_fpsimd_state fpsimd; 208 int err = 0; 209 210 /* check the size information */ 211 if (user->fpsimd_size != sizeof(struct fpsimd_context)) 212 return -EINVAL; 213 214 /* copy the FP and status/control registers */ 215 err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs), 216 sizeof(fpsimd.vregs)); 217 __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err); 218 __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err); 219 220 clear_thread_flag(TIF_SVE); 221 current->thread.fp_type = FP_STATE_FPSIMD; 222 223 /* load the hardware registers from the fpsimd_state structure */ 224 if (!err) 225 fpsimd_update_current_state(&fpsimd); 226 227 return err ? -EFAULT : 0; 228 } 229 230 231 #ifdef CONFIG_ARM64_SVE 232 233 static int preserve_sve_context(struct sve_context __user *ctx) 234 { 235 int err = 0; 236 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 237 u16 flags = 0; 238 unsigned int vl = task_get_sve_vl(current); 239 unsigned int vq = 0; 240 241 if (thread_sm_enabled(¤t->thread)) { 242 vl = task_get_sme_vl(current); 243 vq = sve_vq_from_vl(vl); 244 flags |= SVE_SIG_FLAG_SM; 245 } else if (test_thread_flag(TIF_SVE)) { 246 vq = sve_vq_from_vl(vl); 247 } 248 249 memset(reserved, 0, sizeof(reserved)); 250 251 __put_user_error(SVE_MAGIC, &ctx->head.magic, err); 252 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), 253 &ctx->head.size, err); 254 __put_user_error(vl, &ctx->vl, err); 255 __put_user_error(flags, &ctx->flags, err); 256 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 257 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 258 259 if (vq) { 260 /* 261 * This assumes that the SVE state has already been saved to 262 * the task struct by calling the function 263 * fpsimd_signal_preserve_current_state(). 264 */ 265 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, 266 current->thread.sve_state, 267 SVE_SIG_REGS_SIZE(vq)); 268 } 269 270 return err ? -EFAULT : 0; 271 } 272 273 static int restore_sve_fpsimd_context(struct user_ctxs *user) 274 { 275 int err = 0; 276 unsigned int vl, vq; 277 struct user_fpsimd_state fpsimd; 278 u16 user_vl, flags; 279 280 if (user->sve_size < sizeof(*user->sve)) 281 return -EINVAL; 282 283 __get_user_error(user_vl, &(user->sve->vl), err); 284 __get_user_error(flags, &(user->sve->flags), err); 285 if (err) 286 return err; 287 288 if (flags & SVE_SIG_FLAG_SM) { 289 if (!system_supports_sme()) 290 return -EINVAL; 291 292 vl = task_get_sme_vl(current); 293 } else { 294 /* 295 * A SME only system use SVE for streaming mode so can 296 * have a SVE formatted context with a zero VL and no 297 * payload data. 298 */ 299 if (!system_supports_sve() && !system_supports_sme()) 300 return -EINVAL; 301 302 vl = task_get_sve_vl(current); 303 } 304 305 if (user_vl != vl) 306 return -EINVAL; 307 308 if (user->sve_size == sizeof(*user->sve)) { 309 clear_thread_flag(TIF_SVE); 310 current->thread.svcr &= ~SVCR_SM_MASK; 311 current->thread.fp_type = FP_STATE_FPSIMD; 312 goto fpsimd_only; 313 } 314 315 vq = sve_vq_from_vl(vl); 316 317 if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq)) 318 return -EINVAL; 319 320 /* 321 * Careful: we are about __copy_from_user() directly into 322 * thread.sve_state with preemption enabled, so protection is 323 * needed to prevent a racing context switch from writing stale 324 * registers back over the new data. 325 */ 326 327 fpsimd_flush_task_state(current); 328 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 329 330 sve_alloc(current, true); 331 if (!current->thread.sve_state) { 332 clear_thread_flag(TIF_SVE); 333 return -ENOMEM; 334 } 335 336 err = __copy_from_user(current->thread.sve_state, 337 (char __user const *)user->sve + 338 SVE_SIG_REGS_OFFSET, 339 SVE_SIG_REGS_SIZE(vq)); 340 if (err) 341 return -EFAULT; 342 343 if (flags & SVE_SIG_FLAG_SM) 344 current->thread.svcr |= SVCR_SM_MASK; 345 else 346 set_thread_flag(TIF_SVE); 347 current->thread.fp_type = FP_STATE_SVE; 348 349 fpsimd_only: 350 /* copy the FP and status/control registers */ 351 /* restore_sigframe() already checked that user->fpsimd != NULL. */ 352 err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs, 353 sizeof(fpsimd.vregs)); 354 __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err); 355 __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err); 356 357 /* load the hardware registers from the fpsimd_state structure */ 358 if (!err) 359 fpsimd_update_current_state(&fpsimd); 360 361 return err ? -EFAULT : 0; 362 } 363 364 #else /* ! CONFIG_ARM64_SVE */ 365 366 static int restore_sve_fpsimd_context(struct user_ctxs *user) 367 { 368 WARN_ON_ONCE(1); 369 return -EINVAL; 370 } 371 372 /* Turn any non-optimised out attempts to use this into a link error: */ 373 extern int preserve_sve_context(void __user *ctx); 374 375 #endif /* ! CONFIG_ARM64_SVE */ 376 377 #ifdef CONFIG_ARM64_SME 378 379 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx) 380 { 381 int err = 0; 382 383 current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); 384 385 __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err); 386 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 387 __put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err); 388 389 return err; 390 } 391 392 static int restore_tpidr2_context(struct user_ctxs *user) 393 { 394 u64 tpidr2_el0; 395 int err = 0; 396 397 if (user->tpidr2_size != sizeof(*user->tpidr2)) 398 return -EINVAL; 399 400 __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err); 401 if (!err) 402 write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0); 403 404 return err; 405 } 406 407 static int preserve_za_context(struct za_context __user *ctx) 408 { 409 int err = 0; 410 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 411 unsigned int vl = task_get_sme_vl(current); 412 unsigned int vq; 413 414 if (thread_za_enabled(¤t->thread)) 415 vq = sve_vq_from_vl(vl); 416 else 417 vq = 0; 418 419 memset(reserved, 0, sizeof(reserved)); 420 421 __put_user_error(ZA_MAGIC, &ctx->head.magic, err); 422 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16), 423 &ctx->head.size, err); 424 __put_user_error(vl, &ctx->vl, err); 425 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 426 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 427 428 if (vq) { 429 /* 430 * This assumes that the ZA state has already been saved to 431 * the task struct by calling the function 432 * fpsimd_signal_preserve_current_state(). 433 */ 434 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET, 435 current->thread.sme_state, 436 ZA_SIG_REGS_SIZE(vq)); 437 } 438 439 return err ? -EFAULT : 0; 440 } 441 442 static int restore_za_context(struct user_ctxs *user) 443 { 444 int err = 0; 445 unsigned int vq; 446 u16 user_vl; 447 448 if (user->za_size < sizeof(*user->za)) 449 return -EINVAL; 450 451 __get_user_error(user_vl, &(user->za->vl), err); 452 if (err) 453 return err; 454 455 if (user_vl != task_get_sme_vl(current)) 456 return -EINVAL; 457 458 if (user->za_size == sizeof(*user->za)) { 459 current->thread.svcr &= ~SVCR_ZA_MASK; 460 return 0; 461 } 462 463 vq = sve_vq_from_vl(user_vl); 464 465 if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq)) 466 return -EINVAL; 467 468 /* 469 * Careful: we are about __copy_from_user() directly into 470 * thread.sme_state with preemption enabled, so protection is 471 * needed to prevent a racing context switch from writing stale 472 * registers back over the new data. 473 */ 474 475 fpsimd_flush_task_state(current); 476 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 477 478 sme_alloc(current, true); 479 if (!current->thread.sme_state) { 480 current->thread.svcr &= ~SVCR_ZA_MASK; 481 clear_thread_flag(TIF_SME); 482 return -ENOMEM; 483 } 484 485 err = __copy_from_user(current->thread.sme_state, 486 (char __user const *)user->za + 487 ZA_SIG_REGS_OFFSET, 488 ZA_SIG_REGS_SIZE(vq)); 489 if (err) 490 return -EFAULT; 491 492 set_thread_flag(TIF_SME); 493 current->thread.svcr |= SVCR_ZA_MASK; 494 495 return 0; 496 } 497 498 static int preserve_zt_context(struct zt_context __user *ctx) 499 { 500 int err = 0; 501 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 502 503 if (WARN_ON(!thread_za_enabled(¤t->thread))) 504 return -EINVAL; 505 506 memset(reserved, 0, sizeof(reserved)); 507 508 __put_user_error(ZT_MAGIC, &ctx->head.magic, err); 509 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16), 510 &ctx->head.size, err); 511 __put_user_error(1, &ctx->nregs, err); 512 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 513 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 514 515 /* 516 * This assumes that the ZT state has already been saved to 517 * the task struct by calling the function 518 * fpsimd_signal_preserve_current_state(). 519 */ 520 err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET, 521 thread_zt_state(¤t->thread), 522 ZT_SIG_REGS_SIZE(1)); 523 524 return err ? -EFAULT : 0; 525 } 526 527 static int restore_zt_context(struct user_ctxs *user) 528 { 529 int err; 530 u16 nregs; 531 532 /* ZA must be restored first for this check to be valid */ 533 if (!thread_za_enabled(¤t->thread)) 534 return -EINVAL; 535 536 if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1)) 537 return -EINVAL; 538 539 if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs))) 540 return -EFAULT; 541 542 if (nregs != 1) 543 return -EINVAL; 544 545 /* 546 * Careful: we are about __copy_from_user() directly into 547 * thread.zt_state with preemption enabled, so protection is 548 * needed to prevent a racing context switch from writing stale 549 * registers back over the new data. 550 */ 551 552 fpsimd_flush_task_state(current); 553 /* From now, fpsimd_thread_switch() won't touch ZT in thread state */ 554 555 err = __copy_from_user(thread_zt_state(¤t->thread), 556 (char __user const *)user->zt + 557 ZT_SIG_REGS_OFFSET, 558 ZT_SIG_REGS_SIZE(1)); 559 if (err) 560 return -EFAULT; 561 562 return 0; 563 } 564 565 #else /* ! CONFIG_ARM64_SME */ 566 567 /* Turn any non-optimised out attempts to use these into a link error: */ 568 extern int preserve_tpidr2_context(void __user *ctx); 569 extern int restore_tpidr2_context(struct user_ctxs *user); 570 extern int preserve_za_context(void __user *ctx); 571 extern int restore_za_context(struct user_ctxs *user); 572 extern int preserve_zt_context(void __user *ctx); 573 extern int restore_zt_context(struct user_ctxs *user); 574 575 #endif /* ! CONFIG_ARM64_SME */ 576 577 static int parse_user_sigframe(struct user_ctxs *user, 578 struct rt_sigframe __user *sf) 579 { 580 struct sigcontext __user *const sc = &sf->uc.uc_mcontext; 581 struct _aarch64_ctx __user *head; 582 char __user *base = (char __user *)&sc->__reserved; 583 size_t offset = 0; 584 size_t limit = sizeof(sc->__reserved); 585 bool have_extra_context = false; 586 char const __user *const sfp = (char const __user *)sf; 587 588 user->fpsimd = NULL; 589 user->sve = NULL; 590 user->tpidr2 = NULL; 591 user->za = NULL; 592 user->zt = NULL; 593 594 if (!IS_ALIGNED((unsigned long)base, 16)) 595 goto invalid; 596 597 while (1) { 598 int err = 0; 599 u32 magic, size; 600 char const __user *userp; 601 struct extra_context const __user *extra; 602 u64 extra_datap; 603 u32 extra_size; 604 struct _aarch64_ctx const __user *end; 605 u32 end_magic, end_size; 606 607 if (limit - offset < sizeof(*head)) 608 goto invalid; 609 610 if (!IS_ALIGNED(offset, 16)) 611 goto invalid; 612 613 head = (struct _aarch64_ctx __user *)(base + offset); 614 __get_user_error(magic, &head->magic, err); 615 __get_user_error(size, &head->size, err); 616 if (err) 617 return err; 618 619 if (limit - offset < size) 620 goto invalid; 621 622 switch (magic) { 623 case 0: 624 if (size) 625 goto invalid; 626 627 goto done; 628 629 case FPSIMD_MAGIC: 630 if (!system_supports_fpsimd()) 631 goto invalid; 632 if (user->fpsimd) 633 goto invalid; 634 635 user->fpsimd = (struct fpsimd_context __user *)head; 636 user->fpsimd_size = size; 637 break; 638 639 case ESR_MAGIC: 640 /* ignore */ 641 break; 642 643 case SVE_MAGIC: 644 if (!system_supports_sve() && !system_supports_sme()) 645 goto invalid; 646 647 if (user->sve) 648 goto invalid; 649 650 user->sve = (struct sve_context __user *)head; 651 user->sve_size = size; 652 break; 653 654 case TPIDR2_MAGIC: 655 if (!system_supports_tpidr2()) 656 goto invalid; 657 658 if (user->tpidr2) 659 goto invalid; 660 661 user->tpidr2 = (struct tpidr2_context __user *)head; 662 user->tpidr2_size = size; 663 break; 664 665 case ZA_MAGIC: 666 if (!system_supports_sme()) 667 goto invalid; 668 669 if (user->za) 670 goto invalid; 671 672 user->za = (struct za_context __user *)head; 673 user->za_size = size; 674 break; 675 676 case ZT_MAGIC: 677 if (!system_supports_sme2()) 678 goto invalid; 679 680 if (user->zt) 681 goto invalid; 682 683 user->zt = (struct zt_context __user *)head; 684 user->zt_size = size; 685 break; 686 687 case EXTRA_MAGIC: 688 if (have_extra_context) 689 goto invalid; 690 691 if (size < sizeof(*extra)) 692 goto invalid; 693 694 userp = (char const __user *)head; 695 696 extra = (struct extra_context const __user *)userp; 697 userp += size; 698 699 __get_user_error(extra_datap, &extra->datap, err); 700 __get_user_error(extra_size, &extra->size, err); 701 if (err) 702 return err; 703 704 /* Check for the dummy terminator in __reserved[]: */ 705 706 if (limit - offset - size < TERMINATOR_SIZE) 707 goto invalid; 708 709 end = (struct _aarch64_ctx const __user *)userp; 710 userp += TERMINATOR_SIZE; 711 712 __get_user_error(end_magic, &end->magic, err); 713 __get_user_error(end_size, &end->size, err); 714 if (err) 715 return err; 716 717 if (end_magic || end_size) 718 goto invalid; 719 720 /* Prevent looping/repeated parsing of extra_context */ 721 have_extra_context = true; 722 723 base = (__force void __user *)extra_datap; 724 if (!IS_ALIGNED((unsigned long)base, 16)) 725 goto invalid; 726 727 if (!IS_ALIGNED(extra_size, 16)) 728 goto invalid; 729 730 if (base != userp) 731 goto invalid; 732 733 /* Reject "unreasonably large" frames: */ 734 if (extra_size > sfp + SIGFRAME_MAXSZ - userp) 735 goto invalid; 736 737 /* 738 * Ignore trailing terminator in __reserved[] 739 * and start parsing extra data: 740 */ 741 offset = 0; 742 limit = extra_size; 743 744 if (!access_ok(base, limit)) 745 goto invalid; 746 747 continue; 748 749 default: 750 goto invalid; 751 } 752 753 if (size < sizeof(*head)) 754 goto invalid; 755 756 if (limit - offset < size) 757 goto invalid; 758 759 offset += size; 760 } 761 762 done: 763 return 0; 764 765 invalid: 766 return -EINVAL; 767 } 768 769 static int restore_sigframe(struct pt_regs *regs, 770 struct rt_sigframe __user *sf) 771 { 772 sigset_t set; 773 int i, err; 774 struct user_ctxs user; 775 776 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 777 if (err == 0) 778 set_current_blocked(&set); 779 780 for (i = 0; i < 31; i++) 781 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 782 err); 783 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 784 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 785 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 786 787 /* 788 * Avoid sys_rt_sigreturn() restarting. 789 */ 790 forget_syscall(regs); 791 792 err |= !valid_user_regs(®s->user_regs, current); 793 if (err == 0) 794 err = parse_user_sigframe(&user, sf); 795 796 if (err == 0 && system_supports_fpsimd()) { 797 if (!user.fpsimd) 798 return -EINVAL; 799 800 if (user.sve) 801 err = restore_sve_fpsimd_context(&user); 802 else 803 err = restore_fpsimd_context(&user); 804 } 805 806 if (err == 0 && system_supports_tpidr2() && user.tpidr2) 807 err = restore_tpidr2_context(&user); 808 809 if (err == 0 && system_supports_sme() && user.za) 810 err = restore_za_context(&user); 811 812 if (err == 0 && system_supports_sme2() && user.zt) 813 err = restore_zt_context(&user); 814 815 return err; 816 } 817 818 SYSCALL_DEFINE0(rt_sigreturn) 819 { 820 struct pt_regs *regs = current_pt_regs(); 821 struct rt_sigframe __user *frame; 822 823 /* Always make any pending restarted system calls return -EINTR */ 824 current->restart_block.fn = do_no_restart_syscall; 825 826 /* 827 * Since we stacked the signal on a 128-bit boundary, then 'sp' should 828 * be word aligned here. 829 */ 830 if (regs->sp & 15) 831 goto badframe; 832 833 frame = (struct rt_sigframe __user *)regs->sp; 834 835 if (!access_ok(frame, sizeof (*frame))) 836 goto badframe; 837 838 if (restore_sigframe(regs, frame)) 839 goto badframe; 840 841 if (restore_altstack(&frame->uc.uc_stack)) 842 goto badframe; 843 844 return regs->regs[0]; 845 846 badframe: 847 arm64_notify_segfault(regs->sp); 848 return 0; 849 } 850 851 /* 852 * Determine the layout of optional records in the signal frame 853 * 854 * add_all: if true, lays out the biggest possible signal frame for 855 * this task; otherwise, generates a layout for the current state 856 * of the task. 857 */ 858 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, 859 bool add_all) 860 { 861 int err; 862 863 if (system_supports_fpsimd()) { 864 err = sigframe_alloc(user, &user->fpsimd_offset, 865 sizeof(struct fpsimd_context)); 866 if (err) 867 return err; 868 } 869 870 /* fault information, if valid */ 871 if (add_all || current->thread.fault_code) { 872 err = sigframe_alloc(user, &user->esr_offset, 873 sizeof(struct esr_context)); 874 if (err) 875 return err; 876 } 877 878 if (system_supports_sve() || system_supports_sme()) { 879 unsigned int vq = 0; 880 881 if (add_all || test_thread_flag(TIF_SVE) || 882 thread_sm_enabled(¤t->thread)) { 883 int vl = max(sve_max_vl(), sme_max_vl()); 884 885 if (!add_all) 886 vl = thread_get_cur_vl(¤t->thread); 887 888 vq = sve_vq_from_vl(vl); 889 } 890 891 err = sigframe_alloc(user, &user->sve_offset, 892 SVE_SIG_CONTEXT_SIZE(vq)); 893 if (err) 894 return err; 895 } 896 897 if (system_supports_tpidr2()) { 898 err = sigframe_alloc(user, &user->tpidr2_offset, 899 sizeof(struct tpidr2_context)); 900 if (err) 901 return err; 902 } 903 904 if (system_supports_sme()) { 905 unsigned int vl; 906 unsigned int vq = 0; 907 908 if (add_all) 909 vl = sme_max_vl(); 910 else 911 vl = task_get_sme_vl(current); 912 913 if (thread_za_enabled(¤t->thread)) 914 vq = sve_vq_from_vl(vl); 915 916 err = sigframe_alloc(user, &user->za_offset, 917 ZA_SIG_CONTEXT_SIZE(vq)); 918 if (err) 919 return err; 920 } 921 922 if (system_supports_sme2()) { 923 if (add_all || thread_za_enabled(¤t->thread)) { 924 err = sigframe_alloc(user, &user->zt_offset, 925 ZT_SIG_CONTEXT_SIZE(1)); 926 if (err) 927 return err; 928 } 929 } 930 931 return sigframe_alloc_end(user); 932 } 933 934 static int setup_sigframe(struct rt_sigframe_user_layout *user, 935 struct pt_regs *regs, sigset_t *set) 936 { 937 int i, err = 0; 938 struct rt_sigframe __user *sf = user->sigframe; 939 940 /* set up the stack frame for unwinding */ 941 __put_user_error(regs->regs[29], &user->next_frame->fp, err); 942 __put_user_error(regs->regs[30], &user->next_frame->lr, err); 943 944 for (i = 0; i < 31; i++) 945 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 946 err); 947 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 948 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 949 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 950 951 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); 952 953 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 954 955 if (err == 0 && system_supports_fpsimd()) { 956 struct fpsimd_context __user *fpsimd_ctx = 957 apply_user_offset(user, user->fpsimd_offset); 958 err |= preserve_fpsimd_context(fpsimd_ctx); 959 } 960 961 /* fault information, if valid */ 962 if (err == 0 && user->esr_offset) { 963 struct esr_context __user *esr_ctx = 964 apply_user_offset(user, user->esr_offset); 965 966 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); 967 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); 968 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); 969 } 970 971 /* Scalable Vector Extension state (including streaming), if present */ 972 if ((system_supports_sve() || system_supports_sme()) && 973 err == 0 && user->sve_offset) { 974 struct sve_context __user *sve_ctx = 975 apply_user_offset(user, user->sve_offset); 976 err |= preserve_sve_context(sve_ctx); 977 } 978 979 /* TPIDR2 if supported */ 980 if (system_supports_tpidr2() && err == 0) { 981 struct tpidr2_context __user *tpidr2_ctx = 982 apply_user_offset(user, user->tpidr2_offset); 983 err |= preserve_tpidr2_context(tpidr2_ctx); 984 } 985 986 /* ZA state if present */ 987 if (system_supports_sme() && err == 0 && user->za_offset) { 988 struct za_context __user *za_ctx = 989 apply_user_offset(user, user->za_offset); 990 err |= preserve_za_context(za_ctx); 991 } 992 993 /* ZT state if present */ 994 if (system_supports_sme2() && err == 0 && user->zt_offset) { 995 struct zt_context __user *zt_ctx = 996 apply_user_offset(user, user->zt_offset); 997 err |= preserve_zt_context(zt_ctx); 998 } 999 1000 if (err == 0 && user->extra_offset) { 1001 char __user *sfp = (char __user *)user->sigframe; 1002 char __user *userp = 1003 apply_user_offset(user, user->extra_offset); 1004 1005 struct extra_context __user *extra; 1006 struct _aarch64_ctx __user *end; 1007 u64 extra_datap; 1008 u32 extra_size; 1009 1010 extra = (struct extra_context __user *)userp; 1011 userp += EXTRA_CONTEXT_SIZE; 1012 1013 end = (struct _aarch64_ctx __user *)userp; 1014 userp += TERMINATOR_SIZE; 1015 1016 /* 1017 * extra_datap is just written to the signal frame. 1018 * The value gets cast back to a void __user * 1019 * during sigreturn. 1020 */ 1021 extra_datap = (__force u64)userp; 1022 extra_size = sfp + round_up(user->size, 16) - userp; 1023 1024 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); 1025 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); 1026 __put_user_error(extra_datap, &extra->datap, err); 1027 __put_user_error(extra_size, &extra->size, err); 1028 1029 /* Add the terminator */ 1030 __put_user_error(0, &end->magic, err); 1031 __put_user_error(0, &end->size, err); 1032 } 1033 1034 /* set the "end" magic */ 1035 if (err == 0) { 1036 struct _aarch64_ctx __user *end = 1037 apply_user_offset(user, user->end_offset); 1038 1039 __put_user_error(0, &end->magic, err); 1040 __put_user_error(0, &end->size, err); 1041 } 1042 1043 return err; 1044 } 1045 1046 static int get_sigframe(struct rt_sigframe_user_layout *user, 1047 struct ksignal *ksig, struct pt_regs *regs) 1048 { 1049 unsigned long sp, sp_top; 1050 int err; 1051 1052 init_user_layout(user); 1053 err = setup_sigframe_layout(user, false); 1054 if (err) 1055 return err; 1056 1057 sp = sp_top = sigsp(regs->sp, ksig); 1058 1059 sp = round_down(sp - sizeof(struct frame_record), 16); 1060 user->next_frame = (struct frame_record __user *)sp; 1061 1062 sp = round_down(sp, 16) - sigframe_size(user); 1063 user->sigframe = (struct rt_sigframe __user *)sp; 1064 1065 /* 1066 * Check that we can actually write to the signal frame. 1067 */ 1068 if (!access_ok(user->sigframe, sp_top - sp)) 1069 return -EFAULT; 1070 1071 return 0; 1072 } 1073 1074 static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, 1075 struct rt_sigframe_user_layout *user, int usig) 1076 { 1077 __sigrestore_t sigtramp; 1078 1079 regs->regs[0] = usig; 1080 regs->sp = (unsigned long)user->sigframe; 1081 regs->regs[29] = (unsigned long)&user->next_frame->fp; 1082 regs->pc = (unsigned long)ka->sa.sa_handler; 1083 1084 /* 1085 * Signal delivery is a (wacky) indirect function call in 1086 * userspace, so simulate the same setting of BTYPE as a BLR 1087 * <register containing the signal handler entry point>. 1088 * Signal delivery to a location in a PROT_BTI guarded page 1089 * that is not a function entry point will now trigger a 1090 * SIGILL in userspace. 1091 * 1092 * If the signal handler entry point is not in a PROT_BTI 1093 * guarded page, this is harmless. 1094 */ 1095 if (system_supports_bti()) { 1096 regs->pstate &= ~PSR_BTYPE_MASK; 1097 regs->pstate |= PSR_BTYPE_C; 1098 } 1099 1100 /* TCO (Tag Check Override) always cleared for signal handlers */ 1101 regs->pstate &= ~PSR_TCO_BIT; 1102 1103 /* Signal handlers are invoked with ZA and streaming mode disabled */ 1104 if (system_supports_sme()) { 1105 /* 1106 * If we were in streaming mode the saved register 1107 * state was SVE but we will exit SM and use the 1108 * FPSIMD register state - flush the saved FPSIMD 1109 * register state in case it gets loaded. 1110 */ 1111 if (current->thread.svcr & SVCR_SM_MASK) { 1112 memset(¤t->thread.uw.fpsimd_state, 0, 1113 sizeof(current->thread.uw.fpsimd_state)); 1114 current->thread.fp_type = FP_STATE_FPSIMD; 1115 } 1116 1117 current->thread.svcr &= ~(SVCR_ZA_MASK | 1118 SVCR_SM_MASK); 1119 sme_smstop(); 1120 } 1121 1122 if (ka->sa.sa_flags & SA_RESTORER) 1123 sigtramp = ka->sa.sa_restorer; 1124 else 1125 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); 1126 1127 regs->regs[30] = (unsigned long)sigtramp; 1128 } 1129 1130 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, 1131 struct pt_regs *regs) 1132 { 1133 struct rt_sigframe_user_layout user; 1134 struct rt_sigframe __user *frame; 1135 int err = 0; 1136 1137 fpsimd_signal_preserve_current_state(); 1138 1139 if (get_sigframe(&user, ksig, regs)) 1140 return 1; 1141 1142 frame = user.sigframe; 1143 1144 __put_user_error(0, &frame->uc.uc_flags, err); 1145 __put_user_error(NULL, &frame->uc.uc_link, err); 1146 1147 err |= __save_altstack(&frame->uc.uc_stack, regs->sp); 1148 err |= setup_sigframe(&user, regs, set); 1149 if (err == 0) { 1150 setup_return(regs, &ksig->ka, &user, usig); 1151 if (ksig->ka.sa.sa_flags & SA_SIGINFO) { 1152 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 1153 regs->regs[1] = (unsigned long)&frame->info; 1154 regs->regs[2] = (unsigned long)&frame->uc; 1155 } 1156 } 1157 1158 return err; 1159 } 1160 1161 static void setup_restart_syscall(struct pt_regs *regs) 1162 { 1163 if (is_compat_task()) 1164 compat_setup_restart_syscall(regs); 1165 else 1166 regs->regs[8] = __NR_restart_syscall; 1167 } 1168 1169 /* 1170 * OK, we're invoking a handler 1171 */ 1172 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 1173 { 1174 sigset_t *oldset = sigmask_to_save(); 1175 int usig = ksig->sig; 1176 int ret; 1177 1178 rseq_signal_deliver(ksig, regs); 1179 1180 /* 1181 * Set up the stack frame 1182 */ 1183 if (is_compat_task()) { 1184 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 1185 ret = compat_setup_rt_frame(usig, ksig, oldset, regs); 1186 else 1187 ret = compat_setup_frame(usig, ksig, oldset, regs); 1188 } else { 1189 ret = setup_rt_frame(usig, ksig, oldset, regs); 1190 } 1191 1192 /* 1193 * Check that the resulting registers are actually sane. 1194 */ 1195 ret |= !valid_user_regs(®s->user_regs, current); 1196 1197 /* Step into the signal handler if we are stepping */ 1198 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); 1199 } 1200 1201 /* 1202 * Note that 'init' is a special process: it doesn't get signals it doesn't 1203 * want to handle. Thus you cannot kill init even with a SIGKILL even by 1204 * mistake. 1205 * 1206 * Note that we go through the signals twice: once to check the signals that 1207 * the kernel can handle, and then we build all the user-level signal handling 1208 * stack-frames in one go after that. 1209 */ 1210 static void do_signal(struct pt_regs *regs) 1211 { 1212 unsigned long continue_addr = 0, restart_addr = 0; 1213 int retval = 0; 1214 struct ksignal ksig; 1215 bool syscall = in_syscall(regs); 1216 1217 /* 1218 * If we were from a system call, check for system call restarting... 1219 */ 1220 if (syscall) { 1221 continue_addr = regs->pc; 1222 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); 1223 retval = regs->regs[0]; 1224 1225 /* 1226 * Avoid additional syscall restarting via ret_to_user. 1227 */ 1228 forget_syscall(regs); 1229 1230 /* 1231 * Prepare for system call restart. We do this here so that a 1232 * debugger will see the already changed PC. 1233 */ 1234 switch (retval) { 1235 case -ERESTARTNOHAND: 1236 case -ERESTARTSYS: 1237 case -ERESTARTNOINTR: 1238 case -ERESTART_RESTARTBLOCK: 1239 regs->regs[0] = regs->orig_x0; 1240 regs->pc = restart_addr; 1241 break; 1242 } 1243 } 1244 1245 /* 1246 * Get the signal to deliver. When running under ptrace, at this point 1247 * the debugger may change all of our registers. 1248 */ 1249 if (get_signal(&ksig)) { 1250 /* 1251 * Depending on the signal settings, we may need to revert the 1252 * decision to restart the system call, but skip this if a 1253 * debugger has chosen to restart at a different PC. 1254 */ 1255 if (regs->pc == restart_addr && 1256 (retval == -ERESTARTNOHAND || 1257 retval == -ERESTART_RESTARTBLOCK || 1258 (retval == -ERESTARTSYS && 1259 !(ksig.ka.sa.sa_flags & SA_RESTART)))) { 1260 syscall_set_return_value(current, regs, -EINTR, 0); 1261 regs->pc = continue_addr; 1262 } 1263 1264 handle_signal(&ksig, regs); 1265 return; 1266 } 1267 1268 /* 1269 * Handle restarting a different system call. As above, if a debugger 1270 * has chosen to restart at a different PC, ignore the restart. 1271 */ 1272 if (syscall && regs->pc == restart_addr) { 1273 if (retval == -ERESTART_RESTARTBLOCK) 1274 setup_restart_syscall(regs); 1275 user_rewind_single_step(current); 1276 } 1277 1278 restore_saved_sigmask(); 1279 } 1280 1281 void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) 1282 { 1283 do { 1284 if (thread_flags & _TIF_NEED_RESCHED) { 1285 /* Unmask Debug and SError for the next task */ 1286 local_daif_restore(DAIF_PROCCTX_NOIRQ); 1287 1288 schedule(); 1289 } else { 1290 local_daif_restore(DAIF_PROCCTX); 1291 1292 if (thread_flags & _TIF_UPROBE) 1293 uprobe_notify_resume(regs); 1294 1295 if (thread_flags & _TIF_MTE_ASYNC_FAULT) { 1296 clear_thread_flag(TIF_MTE_ASYNC_FAULT); 1297 send_sig_fault(SIGSEGV, SEGV_MTEAERR, 1298 (void __user *)NULL, current); 1299 } 1300 1301 if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) 1302 do_signal(regs); 1303 1304 if (thread_flags & _TIF_NOTIFY_RESUME) 1305 resume_user_mode_work(regs); 1306 1307 if (thread_flags & _TIF_FOREIGN_FPSTATE) 1308 fpsimd_restore_current_state(); 1309 } 1310 1311 local_daif_mask(); 1312 thread_flags = read_thread_flags(); 1313 } while (thread_flags & _TIF_WORK_MASK); 1314 } 1315 1316 unsigned long __ro_after_init signal_minsigstksz; 1317 1318 /* 1319 * Determine the stack space required for guaranteed signal devliery. 1320 * This function is used to populate AT_MINSIGSTKSZ at process startup. 1321 * cpufeatures setup is assumed to be complete. 1322 */ 1323 void __init minsigstksz_setup(void) 1324 { 1325 struct rt_sigframe_user_layout user; 1326 1327 init_user_layout(&user); 1328 1329 /* 1330 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't 1331 * be big enough, but it's our best guess: 1332 */ 1333 if (WARN_ON(setup_sigframe_layout(&user, true))) 1334 return; 1335 1336 signal_minsigstksz = sigframe_size(&user) + 1337 round_up(sizeof(struct frame_record), 16) + 1338 16; /* max alignment padding */ 1339 } 1340 1341 /* 1342 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as 1343 * changes likely come with new fields that should be added below. 1344 */ 1345 static_assert(NSIGILL == 11); 1346 static_assert(NSIGFPE == 15); 1347 static_assert(NSIGSEGV == 10); 1348 static_assert(NSIGBUS == 5); 1349 static_assert(NSIGTRAP == 6); 1350 static_assert(NSIGCHLD == 6); 1351 static_assert(NSIGSYS == 2); 1352 static_assert(sizeof(siginfo_t) == 128); 1353 static_assert(__alignof__(siginfo_t) == 8); 1354 static_assert(offsetof(siginfo_t, si_signo) == 0x00); 1355 static_assert(offsetof(siginfo_t, si_errno) == 0x04); 1356 static_assert(offsetof(siginfo_t, si_code) == 0x08); 1357 static_assert(offsetof(siginfo_t, si_pid) == 0x10); 1358 static_assert(offsetof(siginfo_t, si_uid) == 0x14); 1359 static_assert(offsetof(siginfo_t, si_tid) == 0x10); 1360 static_assert(offsetof(siginfo_t, si_overrun) == 0x14); 1361 static_assert(offsetof(siginfo_t, si_status) == 0x18); 1362 static_assert(offsetof(siginfo_t, si_utime) == 0x20); 1363 static_assert(offsetof(siginfo_t, si_stime) == 0x28); 1364 static_assert(offsetof(siginfo_t, si_value) == 0x18); 1365 static_assert(offsetof(siginfo_t, si_int) == 0x18); 1366 static_assert(offsetof(siginfo_t, si_ptr) == 0x18); 1367 static_assert(offsetof(siginfo_t, si_addr) == 0x10); 1368 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18); 1369 static_assert(offsetof(siginfo_t, si_lower) == 0x20); 1370 static_assert(offsetof(siginfo_t, si_upper) == 0x28); 1371 static_assert(offsetof(siginfo_t, si_pkey) == 0x20); 1372 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); 1373 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); 1374 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); 1375 static_assert(offsetof(siginfo_t, si_band) == 0x10); 1376 static_assert(offsetof(siginfo_t, si_fd) == 0x18); 1377 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10); 1378 static_assert(offsetof(siginfo_t, si_syscall) == 0x18); 1379 static_assert(offsetof(siginfo_t, si_arch) == 0x1c); 1380