1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/signal.c 4 * 5 * Copyright (C) 1995-2009 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/cache.h> 10 #include <linux/compat.h> 11 #include <linux/errno.h> 12 #include <linux/kernel.h> 13 #include <linux/signal.h> 14 #include <linux/freezer.h> 15 #include <linux/stddef.h> 16 #include <linux/uaccess.h> 17 #include <linux/sizes.h> 18 #include <linux/string.h> 19 #include <linux/ratelimit.h> 20 #include <linux/rseq.h> 21 #include <linux/syscalls.h> 22 23 #include <asm/daifflags.h> 24 #include <asm/debug-monitors.h> 25 #include <asm/elf.h> 26 #include <asm/exception.h> 27 #include <asm/cacheflush.h> 28 #include <asm/gcs.h> 29 #include <asm/ucontext.h> 30 #include <asm/unistd.h> 31 #include <asm/fpsimd.h> 32 #include <asm/ptrace.h> 33 #include <asm/syscall.h> 34 #include <asm/signal32.h> 35 #include <asm/traps.h> 36 #include <asm/vdso.h> 37 38 #ifdef CONFIG_ARM64_GCS 39 #define GCS_SIGNAL_CAP(addr) (((unsigned long)addr) & GCS_CAP_ADDR_MASK) 40 41 static bool gcs_signal_cap_valid(u64 addr, u64 val) 42 { 43 return val == GCS_SIGNAL_CAP(addr); 44 } 45 #endif 46 47 /* 48 * Do a signal return; undo the signal stack. These are aligned to 128-bit. 49 */ 50 struct rt_sigframe { 51 struct siginfo info; 52 struct ucontext uc; 53 }; 54 55 struct frame_record { 56 u64 fp; 57 u64 lr; 58 }; 59 60 struct rt_sigframe_user_layout { 61 struct rt_sigframe __user *sigframe; 62 struct frame_record __user *next_frame; 63 64 unsigned long size; /* size of allocated sigframe data */ 65 unsigned long limit; /* largest allowed size */ 66 67 unsigned long fpsimd_offset; 68 unsigned long esr_offset; 69 unsigned long gcs_offset; 70 unsigned long sve_offset; 71 unsigned long tpidr2_offset; 72 unsigned long za_offset; 73 unsigned long zt_offset; 74 unsigned long fpmr_offset; 75 unsigned long poe_offset; 76 unsigned long extra_offset; 77 unsigned long end_offset; 78 }; 79 80 #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16) 81 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) 82 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) 83 84 static void init_user_layout(struct rt_sigframe_user_layout *user) 85 { 86 const size_t reserved_size = 87 sizeof(user->sigframe->uc.uc_mcontext.__reserved); 88 89 memset(user, 0, sizeof(*user)); 90 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved); 91 92 user->limit = user->size + reserved_size; 93 94 user->limit -= TERMINATOR_SIZE; 95 user->limit -= EXTRA_CONTEXT_SIZE; 96 /* Reserve space for extension and terminator ^ */ 97 } 98 99 static size_t sigframe_size(struct rt_sigframe_user_layout const *user) 100 { 101 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); 102 } 103 104 /* 105 * Sanity limit on the approximate maximum size of signal frame we'll 106 * try to generate. Stack alignment padding and the frame record are 107 * not taken into account. This limit is not a guarantee and is 108 * NOT ABI. 109 */ 110 #define SIGFRAME_MAXSZ SZ_256K 111 112 static int __sigframe_alloc(struct rt_sigframe_user_layout *user, 113 unsigned long *offset, size_t size, bool extend) 114 { 115 size_t padded_size = round_up(size, 16); 116 117 if (padded_size > user->limit - user->size && 118 !user->extra_offset && 119 extend) { 120 int ret; 121 122 user->limit += EXTRA_CONTEXT_SIZE; 123 ret = __sigframe_alloc(user, &user->extra_offset, 124 sizeof(struct extra_context), false); 125 if (ret) { 126 user->limit -= EXTRA_CONTEXT_SIZE; 127 return ret; 128 } 129 130 /* Reserve space for the __reserved[] terminator */ 131 user->size += TERMINATOR_SIZE; 132 133 /* 134 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for 135 * the terminator: 136 */ 137 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE; 138 } 139 140 /* Still not enough space? Bad luck! */ 141 if (padded_size > user->limit - user->size) 142 return -ENOMEM; 143 144 *offset = user->size; 145 user->size += padded_size; 146 147 return 0; 148 } 149 150 /* 151 * Allocate space for an optional record of <size> bytes in the user 152 * signal frame. The offset from the signal frame base address to the 153 * allocated block is assigned to *offset. 154 */ 155 static int sigframe_alloc(struct rt_sigframe_user_layout *user, 156 unsigned long *offset, size_t size) 157 { 158 return __sigframe_alloc(user, offset, size, true); 159 } 160 161 /* Allocate the null terminator record and prevent further allocations */ 162 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) 163 { 164 int ret; 165 166 /* Un-reserve the space reserved for the terminator: */ 167 user->limit += TERMINATOR_SIZE; 168 169 ret = sigframe_alloc(user, &user->end_offset, 170 sizeof(struct _aarch64_ctx)); 171 if (ret) 172 return ret; 173 174 /* Prevent further allocation: */ 175 user->limit = user->size; 176 return 0; 177 } 178 179 static void __user *apply_user_offset( 180 struct rt_sigframe_user_layout const *user, unsigned long offset) 181 { 182 char __user *base = (char __user *)user->sigframe; 183 184 return base + offset; 185 } 186 187 struct user_ctxs { 188 struct fpsimd_context __user *fpsimd; 189 u32 fpsimd_size; 190 struct sve_context __user *sve; 191 u32 sve_size; 192 struct tpidr2_context __user *tpidr2; 193 u32 tpidr2_size; 194 struct za_context __user *za; 195 u32 za_size; 196 struct zt_context __user *zt; 197 u32 zt_size; 198 struct fpmr_context __user *fpmr; 199 u32 fpmr_size; 200 struct poe_context __user *poe; 201 u32 poe_size; 202 struct gcs_context __user *gcs; 203 u32 gcs_size; 204 }; 205 206 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) 207 { 208 struct user_fpsimd_state const *fpsimd = 209 ¤t->thread.uw.fpsimd_state; 210 int err; 211 212 /* copy the FP and status/control registers */ 213 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); 214 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err); 215 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err); 216 217 /* copy the magic/size information */ 218 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); 219 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); 220 221 return err ? -EFAULT : 0; 222 } 223 224 static int restore_fpsimd_context(struct user_ctxs *user) 225 { 226 struct user_fpsimd_state fpsimd; 227 int err = 0; 228 229 /* check the size information */ 230 if (user->fpsimd_size != sizeof(struct fpsimd_context)) 231 return -EINVAL; 232 233 /* copy the FP and status/control registers */ 234 err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs), 235 sizeof(fpsimd.vregs)); 236 __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err); 237 __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err); 238 239 clear_thread_flag(TIF_SVE); 240 current->thread.fp_type = FP_STATE_FPSIMD; 241 242 /* load the hardware registers from the fpsimd_state structure */ 243 if (!err) 244 fpsimd_update_current_state(&fpsimd); 245 246 return err ? -EFAULT : 0; 247 } 248 249 static int preserve_fpmr_context(struct fpmr_context __user *ctx) 250 { 251 int err = 0; 252 253 current->thread.uw.fpmr = read_sysreg_s(SYS_FPMR); 254 255 __put_user_error(FPMR_MAGIC, &ctx->head.magic, err); 256 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 257 __put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err); 258 259 return err; 260 } 261 262 static int restore_fpmr_context(struct user_ctxs *user) 263 { 264 u64 fpmr; 265 int err = 0; 266 267 if (user->fpmr_size != sizeof(*user->fpmr)) 268 return -EINVAL; 269 270 __get_user_error(fpmr, &user->fpmr->fpmr, err); 271 if (!err) 272 write_sysreg_s(fpmr, SYS_FPMR); 273 274 return err; 275 } 276 277 static int preserve_poe_context(struct poe_context __user *ctx) 278 { 279 int err = 0; 280 281 __put_user_error(POE_MAGIC, &ctx->head.magic, err); 282 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 283 __put_user_error(read_sysreg_s(SYS_POR_EL0), &ctx->por_el0, err); 284 285 return err; 286 } 287 288 static int restore_poe_context(struct user_ctxs *user) 289 { 290 u64 por_el0; 291 int err = 0; 292 293 if (user->poe_size != sizeof(*user->poe)) 294 return -EINVAL; 295 296 __get_user_error(por_el0, &(user->poe->por_el0), err); 297 if (!err) 298 write_sysreg_s(por_el0, SYS_POR_EL0); 299 300 return err; 301 } 302 303 #ifdef CONFIG_ARM64_SVE 304 305 static int preserve_sve_context(struct sve_context __user *ctx) 306 { 307 int err = 0; 308 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 309 u16 flags = 0; 310 unsigned int vl = task_get_sve_vl(current); 311 unsigned int vq = 0; 312 313 if (thread_sm_enabled(¤t->thread)) { 314 vl = task_get_sme_vl(current); 315 vq = sve_vq_from_vl(vl); 316 flags |= SVE_SIG_FLAG_SM; 317 } else if (current->thread.fp_type == FP_STATE_SVE) { 318 vq = sve_vq_from_vl(vl); 319 } 320 321 memset(reserved, 0, sizeof(reserved)); 322 323 __put_user_error(SVE_MAGIC, &ctx->head.magic, err); 324 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), 325 &ctx->head.size, err); 326 __put_user_error(vl, &ctx->vl, err); 327 __put_user_error(flags, &ctx->flags, err); 328 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 329 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 330 331 if (vq) { 332 /* 333 * This assumes that the SVE state has already been saved to 334 * the task struct by calling the function 335 * fpsimd_signal_preserve_current_state(). 336 */ 337 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, 338 current->thread.sve_state, 339 SVE_SIG_REGS_SIZE(vq)); 340 } 341 342 return err ? -EFAULT : 0; 343 } 344 345 static int restore_sve_fpsimd_context(struct user_ctxs *user) 346 { 347 int err = 0; 348 unsigned int vl, vq; 349 struct user_fpsimd_state fpsimd; 350 u16 user_vl, flags; 351 352 if (user->sve_size < sizeof(*user->sve)) 353 return -EINVAL; 354 355 __get_user_error(user_vl, &(user->sve->vl), err); 356 __get_user_error(flags, &(user->sve->flags), err); 357 if (err) 358 return err; 359 360 if (flags & SVE_SIG_FLAG_SM) { 361 if (!system_supports_sme()) 362 return -EINVAL; 363 364 vl = task_get_sme_vl(current); 365 } else { 366 /* 367 * A SME only system use SVE for streaming mode so can 368 * have a SVE formatted context with a zero VL and no 369 * payload data. 370 */ 371 if (!system_supports_sve() && !system_supports_sme()) 372 return -EINVAL; 373 374 vl = task_get_sve_vl(current); 375 } 376 377 if (user_vl != vl) 378 return -EINVAL; 379 380 if (user->sve_size == sizeof(*user->sve)) { 381 clear_thread_flag(TIF_SVE); 382 current->thread.svcr &= ~SVCR_SM_MASK; 383 current->thread.fp_type = FP_STATE_FPSIMD; 384 goto fpsimd_only; 385 } 386 387 vq = sve_vq_from_vl(vl); 388 389 if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq)) 390 return -EINVAL; 391 392 /* 393 * Careful: we are about __copy_from_user() directly into 394 * thread.sve_state with preemption enabled, so protection is 395 * needed to prevent a racing context switch from writing stale 396 * registers back over the new data. 397 */ 398 399 fpsimd_flush_task_state(current); 400 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 401 402 sve_alloc(current, true); 403 if (!current->thread.sve_state) { 404 clear_thread_flag(TIF_SVE); 405 return -ENOMEM; 406 } 407 408 err = __copy_from_user(current->thread.sve_state, 409 (char __user const *)user->sve + 410 SVE_SIG_REGS_OFFSET, 411 SVE_SIG_REGS_SIZE(vq)); 412 if (err) 413 return -EFAULT; 414 415 if (flags & SVE_SIG_FLAG_SM) 416 current->thread.svcr |= SVCR_SM_MASK; 417 else 418 set_thread_flag(TIF_SVE); 419 current->thread.fp_type = FP_STATE_SVE; 420 421 fpsimd_only: 422 /* copy the FP and status/control registers */ 423 /* restore_sigframe() already checked that user->fpsimd != NULL. */ 424 err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs, 425 sizeof(fpsimd.vregs)); 426 __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err); 427 __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err); 428 429 /* load the hardware registers from the fpsimd_state structure */ 430 if (!err) 431 fpsimd_update_current_state(&fpsimd); 432 433 return err ? -EFAULT : 0; 434 } 435 436 #else /* ! CONFIG_ARM64_SVE */ 437 438 static int restore_sve_fpsimd_context(struct user_ctxs *user) 439 { 440 WARN_ON_ONCE(1); 441 return -EINVAL; 442 } 443 444 /* Turn any non-optimised out attempts to use this into a link error: */ 445 extern int preserve_sve_context(void __user *ctx); 446 447 #endif /* ! CONFIG_ARM64_SVE */ 448 449 #ifdef CONFIG_ARM64_SME 450 451 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx) 452 { 453 int err = 0; 454 455 current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); 456 457 __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err); 458 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 459 __put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err); 460 461 return err; 462 } 463 464 static int restore_tpidr2_context(struct user_ctxs *user) 465 { 466 u64 tpidr2_el0; 467 int err = 0; 468 469 if (user->tpidr2_size != sizeof(*user->tpidr2)) 470 return -EINVAL; 471 472 __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err); 473 if (!err) 474 write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0); 475 476 return err; 477 } 478 479 static int preserve_za_context(struct za_context __user *ctx) 480 { 481 int err = 0; 482 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 483 unsigned int vl = task_get_sme_vl(current); 484 unsigned int vq; 485 486 if (thread_za_enabled(¤t->thread)) 487 vq = sve_vq_from_vl(vl); 488 else 489 vq = 0; 490 491 memset(reserved, 0, sizeof(reserved)); 492 493 __put_user_error(ZA_MAGIC, &ctx->head.magic, err); 494 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16), 495 &ctx->head.size, err); 496 __put_user_error(vl, &ctx->vl, err); 497 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 498 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 499 500 if (vq) { 501 /* 502 * This assumes that the ZA state has already been saved to 503 * the task struct by calling the function 504 * fpsimd_signal_preserve_current_state(). 505 */ 506 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET, 507 current->thread.sme_state, 508 ZA_SIG_REGS_SIZE(vq)); 509 } 510 511 return err ? -EFAULT : 0; 512 } 513 514 static int restore_za_context(struct user_ctxs *user) 515 { 516 int err = 0; 517 unsigned int vq; 518 u16 user_vl; 519 520 if (user->za_size < sizeof(*user->za)) 521 return -EINVAL; 522 523 __get_user_error(user_vl, &(user->za->vl), err); 524 if (err) 525 return err; 526 527 if (user_vl != task_get_sme_vl(current)) 528 return -EINVAL; 529 530 if (user->za_size == sizeof(*user->za)) { 531 current->thread.svcr &= ~SVCR_ZA_MASK; 532 return 0; 533 } 534 535 vq = sve_vq_from_vl(user_vl); 536 537 if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq)) 538 return -EINVAL; 539 540 /* 541 * Careful: we are about __copy_from_user() directly into 542 * thread.sme_state with preemption enabled, so protection is 543 * needed to prevent a racing context switch from writing stale 544 * registers back over the new data. 545 */ 546 547 fpsimd_flush_task_state(current); 548 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 549 550 sme_alloc(current, true); 551 if (!current->thread.sme_state) { 552 current->thread.svcr &= ~SVCR_ZA_MASK; 553 clear_thread_flag(TIF_SME); 554 return -ENOMEM; 555 } 556 557 err = __copy_from_user(current->thread.sme_state, 558 (char __user const *)user->za + 559 ZA_SIG_REGS_OFFSET, 560 ZA_SIG_REGS_SIZE(vq)); 561 if (err) 562 return -EFAULT; 563 564 set_thread_flag(TIF_SME); 565 current->thread.svcr |= SVCR_ZA_MASK; 566 567 return 0; 568 } 569 570 static int preserve_zt_context(struct zt_context __user *ctx) 571 { 572 int err = 0; 573 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 574 575 if (WARN_ON(!thread_za_enabled(¤t->thread))) 576 return -EINVAL; 577 578 memset(reserved, 0, sizeof(reserved)); 579 580 __put_user_error(ZT_MAGIC, &ctx->head.magic, err); 581 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16), 582 &ctx->head.size, err); 583 __put_user_error(1, &ctx->nregs, err); 584 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 585 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 586 587 /* 588 * This assumes that the ZT state has already been saved to 589 * the task struct by calling the function 590 * fpsimd_signal_preserve_current_state(). 591 */ 592 err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET, 593 thread_zt_state(¤t->thread), 594 ZT_SIG_REGS_SIZE(1)); 595 596 return err ? -EFAULT : 0; 597 } 598 599 static int restore_zt_context(struct user_ctxs *user) 600 { 601 int err; 602 u16 nregs; 603 604 /* ZA must be restored first for this check to be valid */ 605 if (!thread_za_enabled(¤t->thread)) 606 return -EINVAL; 607 608 if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1)) 609 return -EINVAL; 610 611 if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs))) 612 return -EFAULT; 613 614 if (nregs != 1) 615 return -EINVAL; 616 617 /* 618 * Careful: we are about __copy_from_user() directly into 619 * thread.zt_state with preemption enabled, so protection is 620 * needed to prevent a racing context switch from writing stale 621 * registers back over the new data. 622 */ 623 624 fpsimd_flush_task_state(current); 625 /* From now, fpsimd_thread_switch() won't touch ZT in thread state */ 626 627 err = __copy_from_user(thread_zt_state(¤t->thread), 628 (char __user const *)user->zt + 629 ZT_SIG_REGS_OFFSET, 630 ZT_SIG_REGS_SIZE(1)); 631 if (err) 632 return -EFAULT; 633 634 return 0; 635 } 636 637 #else /* ! CONFIG_ARM64_SME */ 638 639 /* Turn any non-optimised out attempts to use these into a link error: */ 640 extern int preserve_tpidr2_context(void __user *ctx); 641 extern int restore_tpidr2_context(struct user_ctxs *user); 642 extern int preserve_za_context(void __user *ctx); 643 extern int restore_za_context(struct user_ctxs *user); 644 extern int preserve_zt_context(void __user *ctx); 645 extern int restore_zt_context(struct user_ctxs *user); 646 647 #endif /* ! CONFIG_ARM64_SME */ 648 649 #ifdef CONFIG_ARM64_GCS 650 651 static int preserve_gcs_context(struct gcs_context __user *ctx) 652 { 653 int err = 0; 654 u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0); 655 656 /* 657 * If GCS is enabled we will add a cap token to the frame, 658 * include it in the GCSPR_EL0 we report to support stack 659 * switching via sigreturn if GCS is enabled. We do not allow 660 * enabling via sigreturn so the token is only relevant for 661 * threads with GCS enabled. 662 */ 663 if (task_gcs_el0_enabled(current)) 664 gcspr -= 8; 665 666 __put_user_error(GCS_MAGIC, &ctx->head.magic, err); 667 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 668 __put_user_error(gcspr, &ctx->gcspr, err); 669 __put_user_error(0, &ctx->reserved, err); 670 __put_user_error(current->thread.gcs_el0_mode, 671 &ctx->features_enabled, err); 672 673 return err; 674 } 675 676 static int restore_gcs_context(struct user_ctxs *user) 677 { 678 u64 gcspr, enabled; 679 int err = 0; 680 681 if (user->gcs_size != sizeof(*user->gcs)) 682 return -EINVAL; 683 684 __get_user_error(gcspr, &user->gcs->gcspr, err); 685 __get_user_error(enabled, &user->gcs->features_enabled, err); 686 if (err) 687 return err; 688 689 /* Don't allow unknown modes */ 690 if (enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK) 691 return -EINVAL; 692 693 err = gcs_check_locked(current, enabled); 694 if (err != 0) 695 return err; 696 697 /* Don't allow enabling */ 698 if (!task_gcs_el0_enabled(current) && 699 (enabled & PR_SHADOW_STACK_ENABLE)) 700 return -EINVAL; 701 702 /* If we are disabling disable everything */ 703 if (!(enabled & PR_SHADOW_STACK_ENABLE)) 704 enabled = 0; 705 706 current->thread.gcs_el0_mode = enabled; 707 708 /* 709 * We let userspace set GCSPR_EL0 to anything here, we will 710 * validate later in gcs_restore_signal(). 711 */ 712 write_sysreg_s(gcspr, SYS_GCSPR_EL0); 713 714 return 0; 715 } 716 717 #else /* ! CONFIG_ARM64_GCS */ 718 719 /* Turn any non-optimised out attempts to use these into a link error: */ 720 extern int preserve_gcs_context(void __user *ctx); 721 extern int restore_gcs_context(struct user_ctxs *user); 722 723 #endif /* ! CONFIG_ARM64_GCS */ 724 725 static int parse_user_sigframe(struct user_ctxs *user, 726 struct rt_sigframe __user *sf) 727 { 728 struct sigcontext __user *const sc = &sf->uc.uc_mcontext; 729 struct _aarch64_ctx __user *head; 730 char __user *base = (char __user *)&sc->__reserved; 731 size_t offset = 0; 732 size_t limit = sizeof(sc->__reserved); 733 bool have_extra_context = false; 734 char const __user *const sfp = (char const __user *)sf; 735 736 user->fpsimd = NULL; 737 user->sve = NULL; 738 user->tpidr2 = NULL; 739 user->za = NULL; 740 user->zt = NULL; 741 user->fpmr = NULL; 742 user->poe = NULL; 743 user->gcs = NULL; 744 745 if (!IS_ALIGNED((unsigned long)base, 16)) 746 goto invalid; 747 748 while (1) { 749 int err = 0; 750 u32 magic, size; 751 char const __user *userp; 752 struct extra_context const __user *extra; 753 u64 extra_datap; 754 u32 extra_size; 755 struct _aarch64_ctx const __user *end; 756 u32 end_magic, end_size; 757 758 if (limit - offset < sizeof(*head)) 759 goto invalid; 760 761 if (!IS_ALIGNED(offset, 16)) 762 goto invalid; 763 764 head = (struct _aarch64_ctx __user *)(base + offset); 765 __get_user_error(magic, &head->magic, err); 766 __get_user_error(size, &head->size, err); 767 if (err) 768 return err; 769 770 if (limit - offset < size) 771 goto invalid; 772 773 switch (magic) { 774 case 0: 775 if (size) 776 goto invalid; 777 778 goto done; 779 780 case FPSIMD_MAGIC: 781 if (!system_supports_fpsimd()) 782 goto invalid; 783 if (user->fpsimd) 784 goto invalid; 785 786 user->fpsimd = (struct fpsimd_context __user *)head; 787 user->fpsimd_size = size; 788 break; 789 790 case ESR_MAGIC: 791 /* ignore */ 792 break; 793 794 case POE_MAGIC: 795 if (!system_supports_poe()) 796 goto invalid; 797 798 if (user->poe) 799 goto invalid; 800 801 user->poe = (struct poe_context __user *)head; 802 user->poe_size = size; 803 break; 804 805 case SVE_MAGIC: 806 if (!system_supports_sve() && !system_supports_sme()) 807 goto invalid; 808 809 if (user->sve) 810 goto invalid; 811 812 user->sve = (struct sve_context __user *)head; 813 user->sve_size = size; 814 break; 815 816 case TPIDR2_MAGIC: 817 if (!system_supports_tpidr2()) 818 goto invalid; 819 820 if (user->tpidr2) 821 goto invalid; 822 823 user->tpidr2 = (struct tpidr2_context __user *)head; 824 user->tpidr2_size = size; 825 break; 826 827 case ZA_MAGIC: 828 if (!system_supports_sme()) 829 goto invalid; 830 831 if (user->za) 832 goto invalid; 833 834 user->za = (struct za_context __user *)head; 835 user->za_size = size; 836 break; 837 838 case ZT_MAGIC: 839 if (!system_supports_sme2()) 840 goto invalid; 841 842 if (user->zt) 843 goto invalid; 844 845 user->zt = (struct zt_context __user *)head; 846 user->zt_size = size; 847 break; 848 849 case FPMR_MAGIC: 850 if (!system_supports_fpmr()) 851 goto invalid; 852 853 if (user->fpmr) 854 goto invalid; 855 856 user->fpmr = (struct fpmr_context __user *)head; 857 user->fpmr_size = size; 858 break; 859 860 case GCS_MAGIC: 861 if (!system_supports_gcs()) 862 goto invalid; 863 864 if (user->gcs) 865 goto invalid; 866 867 user->gcs = (struct gcs_context __user *)head; 868 user->gcs_size = size; 869 break; 870 871 case EXTRA_MAGIC: 872 if (have_extra_context) 873 goto invalid; 874 875 if (size < sizeof(*extra)) 876 goto invalid; 877 878 userp = (char const __user *)head; 879 880 extra = (struct extra_context const __user *)userp; 881 userp += size; 882 883 __get_user_error(extra_datap, &extra->datap, err); 884 __get_user_error(extra_size, &extra->size, err); 885 if (err) 886 return err; 887 888 /* Check for the dummy terminator in __reserved[]: */ 889 890 if (limit - offset - size < TERMINATOR_SIZE) 891 goto invalid; 892 893 end = (struct _aarch64_ctx const __user *)userp; 894 userp += TERMINATOR_SIZE; 895 896 __get_user_error(end_magic, &end->magic, err); 897 __get_user_error(end_size, &end->size, err); 898 if (err) 899 return err; 900 901 if (end_magic || end_size) 902 goto invalid; 903 904 /* Prevent looping/repeated parsing of extra_context */ 905 have_extra_context = true; 906 907 base = (__force void __user *)extra_datap; 908 if (!IS_ALIGNED((unsigned long)base, 16)) 909 goto invalid; 910 911 if (!IS_ALIGNED(extra_size, 16)) 912 goto invalid; 913 914 if (base != userp) 915 goto invalid; 916 917 /* Reject "unreasonably large" frames: */ 918 if (extra_size > sfp + SIGFRAME_MAXSZ - userp) 919 goto invalid; 920 921 /* 922 * Ignore trailing terminator in __reserved[] 923 * and start parsing extra data: 924 */ 925 offset = 0; 926 limit = extra_size; 927 928 if (!access_ok(base, limit)) 929 goto invalid; 930 931 continue; 932 933 default: 934 goto invalid; 935 } 936 937 if (size < sizeof(*head)) 938 goto invalid; 939 940 if (limit - offset < size) 941 goto invalid; 942 943 offset += size; 944 } 945 946 done: 947 return 0; 948 949 invalid: 950 return -EINVAL; 951 } 952 953 static int restore_sigframe(struct pt_regs *regs, 954 struct rt_sigframe __user *sf) 955 { 956 sigset_t set; 957 int i, err; 958 struct user_ctxs user; 959 960 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 961 if (err == 0) 962 set_current_blocked(&set); 963 964 for (i = 0; i < 31; i++) 965 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 966 err); 967 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 968 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 969 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 970 971 /* 972 * Avoid sys_rt_sigreturn() restarting. 973 */ 974 forget_syscall(regs); 975 976 err |= !valid_user_regs(®s->user_regs, current); 977 if (err == 0) 978 err = parse_user_sigframe(&user, sf); 979 980 if (err == 0 && system_supports_fpsimd()) { 981 if (!user.fpsimd) 982 return -EINVAL; 983 984 if (user.sve) 985 err = restore_sve_fpsimd_context(&user); 986 else 987 err = restore_fpsimd_context(&user); 988 } 989 990 if (err == 0 && system_supports_gcs() && user.gcs) 991 err = restore_gcs_context(&user); 992 993 if (err == 0 && system_supports_tpidr2() && user.tpidr2) 994 err = restore_tpidr2_context(&user); 995 996 if (err == 0 && system_supports_fpmr() && user.fpmr) 997 err = restore_fpmr_context(&user); 998 999 if (err == 0 && system_supports_sme() && user.za) 1000 err = restore_za_context(&user); 1001 1002 if (err == 0 && system_supports_sme2() && user.zt) 1003 err = restore_zt_context(&user); 1004 1005 if (err == 0 && system_supports_poe() && user.poe) 1006 err = restore_poe_context(&user); 1007 1008 return err; 1009 } 1010 1011 #ifdef CONFIG_ARM64_GCS 1012 static int gcs_restore_signal(void) 1013 { 1014 unsigned long __user *gcspr_el0; 1015 u64 cap; 1016 int ret; 1017 1018 if (!system_supports_gcs()) 1019 return 0; 1020 1021 if (!(current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE)) 1022 return 0; 1023 1024 gcspr_el0 = (unsigned long __user *)read_sysreg_s(SYS_GCSPR_EL0); 1025 1026 /* 1027 * Ensure that any changes to the GCS done via GCS operations 1028 * are visible to the normal reads we do to validate the 1029 * token. 1030 */ 1031 gcsb_dsync(); 1032 1033 /* 1034 * GCSPR_EL0 should be pointing at a capped GCS, read the cap. 1035 * We don't enforce that this is in a GCS page, if it is not 1036 * then faults will be generated on GCS operations - the main 1037 * concern is to protect GCS pages. 1038 */ 1039 ret = copy_from_user(&cap, gcspr_el0, sizeof(cap)); 1040 if (ret) 1041 return -EFAULT; 1042 1043 /* 1044 * Check that the cap is the actual GCS before replacing it. 1045 */ 1046 if (!gcs_signal_cap_valid((u64)gcspr_el0, cap)) 1047 return -EINVAL; 1048 1049 /* Invalidate the token to prevent reuse */ 1050 put_user_gcs(0, (__user void*)gcspr_el0, &ret); 1051 if (ret != 0) 1052 return -EFAULT; 1053 1054 write_sysreg_s(gcspr_el0 + 1, SYS_GCSPR_EL0); 1055 1056 return 0; 1057 } 1058 1059 #else 1060 static int gcs_restore_signal(void) { return 0; } 1061 #endif 1062 1063 SYSCALL_DEFINE0(rt_sigreturn) 1064 { 1065 struct pt_regs *regs = current_pt_regs(); 1066 struct rt_sigframe __user *frame; 1067 1068 /* Always make any pending restarted system calls return -EINTR */ 1069 current->restart_block.fn = do_no_restart_syscall; 1070 1071 /* 1072 * Since we stacked the signal on a 128-bit boundary, then 'sp' should 1073 * be word aligned here. 1074 */ 1075 if (regs->sp & 15) 1076 goto badframe; 1077 1078 frame = (struct rt_sigframe __user *)regs->sp; 1079 1080 if (!access_ok(frame, sizeof (*frame))) 1081 goto badframe; 1082 1083 if (restore_sigframe(regs, frame)) 1084 goto badframe; 1085 1086 if (gcs_restore_signal()) 1087 goto badframe; 1088 1089 if (restore_altstack(&frame->uc.uc_stack)) 1090 goto badframe; 1091 1092 return regs->regs[0]; 1093 1094 badframe: 1095 arm64_notify_segfault(regs->sp); 1096 return 0; 1097 } 1098 1099 /* 1100 * Determine the layout of optional records in the signal frame 1101 * 1102 * add_all: if true, lays out the biggest possible signal frame for 1103 * this task; otherwise, generates a layout for the current state 1104 * of the task. 1105 */ 1106 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, 1107 bool add_all) 1108 { 1109 int err; 1110 1111 if (system_supports_fpsimd()) { 1112 err = sigframe_alloc(user, &user->fpsimd_offset, 1113 sizeof(struct fpsimd_context)); 1114 if (err) 1115 return err; 1116 } 1117 1118 /* fault information, if valid */ 1119 if (add_all || current->thread.fault_code) { 1120 err = sigframe_alloc(user, &user->esr_offset, 1121 sizeof(struct esr_context)); 1122 if (err) 1123 return err; 1124 } 1125 1126 #ifdef CONFIG_ARM64_GCS 1127 if (system_supports_gcs() && (add_all || current->thread.gcspr_el0)) { 1128 err = sigframe_alloc(user, &user->gcs_offset, 1129 sizeof(struct gcs_context)); 1130 if (err) 1131 return err; 1132 } 1133 #endif 1134 1135 if (system_supports_sve() || system_supports_sme()) { 1136 unsigned int vq = 0; 1137 1138 if (add_all || current->thread.fp_type == FP_STATE_SVE || 1139 thread_sm_enabled(¤t->thread)) { 1140 int vl = max(sve_max_vl(), sme_max_vl()); 1141 1142 if (!add_all) 1143 vl = thread_get_cur_vl(¤t->thread); 1144 1145 vq = sve_vq_from_vl(vl); 1146 } 1147 1148 err = sigframe_alloc(user, &user->sve_offset, 1149 SVE_SIG_CONTEXT_SIZE(vq)); 1150 if (err) 1151 return err; 1152 } 1153 1154 if (system_supports_tpidr2()) { 1155 err = sigframe_alloc(user, &user->tpidr2_offset, 1156 sizeof(struct tpidr2_context)); 1157 if (err) 1158 return err; 1159 } 1160 1161 if (system_supports_sme()) { 1162 unsigned int vl; 1163 unsigned int vq = 0; 1164 1165 if (add_all) 1166 vl = sme_max_vl(); 1167 else 1168 vl = task_get_sme_vl(current); 1169 1170 if (thread_za_enabled(¤t->thread)) 1171 vq = sve_vq_from_vl(vl); 1172 1173 err = sigframe_alloc(user, &user->za_offset, 1174 ZA_SIG_CONTEXT_SIZE(vq)); 1175 if (err) 1176 return err; 1177 } 1178 1179 if (system_supports_sme2()) { 1180 if (add_all || thread_za_enabled(¤t->thread)) { 1181 err = sigframe_alloc(user, &user->zt_offset, 1182 ZT_SIG_CONTEXT_SIZE(1)); 1183 if (err) 1184 return err; 1185 } 1186 } 1187 1188 if (system_supports_fpmr()) { 1189 err = sigframe_alloc(user, &user->fpmr_offset, 1190 sizeof(struct fpmr_context)); 1191 if (err) 1192 return err; 1193 } 1194 1195 if (system_supports_poe()) { 1196 err = sigframe_alloc(user, &user->poe_offset, 1197 sizeof(struct poe_context)); 1198 if (err) 1199 return err; 1200 } 1201 1202 return sigframe_alloc_end(user); 1203 } 1204 1205 static int setup_sigframe(struct rt_sigframe_user_layout *user, 1206 struct pt_regs *regs, sigset_t *set) 1207 { 1208 int i, err = 0; 1209 struct rt_sigframe __user *sf = user->sigframe; 1210 1211 /* set up the stack frame for unwinding */ 1212 __put_user_error(regs->regs[29], &user->next_frame->fp, err); 1213 __put_user_error(regs->regs[30], &user->next_frame->lr, err); 1214 1215 for (i = 0; i < 31; i++) 1216 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 1217 err); 1218 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 1219 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 1220 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 1221 1222 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); 1223 1224 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 1225 1226 if (err == 0 && system_supports_fpsimd()) { 1227 struct fpsimd_context __user *fpsimd_ctx = 1228 apply_user_offset(user, user->fpsimd_offset); 1229 err |= preserve_fpsimd_context(fpsimd_ctx); 1230 } 1231 1232 /* fault information, if valid */ 1233 if (err == 0 && user->esr_offset) { 1234 struct esr_context __user *esr_ctx = 1235 apply_user_offset(user, user->esr_offset); 1236 1237 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); 1238 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); 1239 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); 1240 } 1241 1242 if (system_supports_gcs() && err == 0 && user->gcs_offset) { 1243 struct gcs_context __user *gcs_ctx = 1244 apply_user_offset(user, user->gcs_offset); 1245 err |= preserve_gcs_context(gcs_ctx); 1246 } 1247 1248 /* Scalable Vector Extension state (including streaming), if present */ 1249 if ((system_supports_sve() || system_supports_sme()) && 1250 err == 0 && user->sve_offset) { 1251 struct sve_context __user *sve_ctx = 1252 apply_user_offset(user, user->sve_offset); 1253 err |= preserve_sve_context(sve_ctx); 1254 } 1255 1256 /* TPIDR2 if supported */ 1257 if (system_supports_tpidr2() && err == 0) { 1258 struct tpidr2_context __user *tpidr2_ctx = 1259 apply_user_offset(user, user->tpidr2_offset); 1260 err |= preserve_tpidr2_context(tpidr2_ctx); 1261 } 1262 1263 /* FPMR if supported */ 1264 if (system_supports_fpmr() && err == 0) { 1265 struct fpmr_context __user *fpmr_ctx = 1266 apply_user_offset(user, user->fpmr_offset); 1267 err |= preserve_fpmr_context(fpmr_ctx); 1268 } 1269 1270 if (system_supports_poe() && err == 0 && user->poe_offset) { 1271 struct poe_context __user *poe_ctx = 1272 apply_user_offset(user, user->poe_offset); 1273 1274 err |= preserve_poe_context(poe_ctx); 1275 } 1276 1277 1278 /* ZA state if present */ 1279 if (system_supports_sme() && err == 0 && user->za_offset) { 1280 struct za_context __user *za_ctx = 1281 apply_user_offset(user, user->za_offset); 1282 err |= preserve_za_context(za_ctx); 1283 } 1284 1285 /* ZT state if present */ 1286 if (system_supports_sme2() && err == 0 && user->zt_offset) { 1287 struct zt_context __user *zt_ctx = 1288 apply_user_offset(user, user->zt_offset); 1289 err |= preserve_zt_context(zt_ctx); 1290 } 1291 1292 if (err == 0 && user->extra_offset) { 1293 char __user *sfp = (char __user *)user->sigframe; 1294 char __user *userp = 1295 apply_user_offset(user, user->extra_offset); 1296 1297 struct extra_context __user *extra; 1298 struct _aarch64_ctx __user *end; 1299 u64 extra_datap; 1300 u32 extra_size; 1301 1302 extra = (struct extra_context __user *)userp; 1303 userp += EXTRA_CONTEXT_SIZE; 1304 1305 end = (struct _aarch64_ctx __user *)userp; 1306 userp += TERMINATOR_SIZE; 1307 1308 /* 1309 * extra_datap is just written to the signal frame. 1310 * The value gets cast back to a void __user * 1311 * during sigreturn. 1312 */ 1313 extra_datap = (__force u64)userp; 1314 extra_size = sfp + round_up(user->size, 16) - userp; 1315 1316 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); 1317 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); 1318 __put_user_error(extra_datap, &extra->datap, err); 1319 __put_user_error(extra_size, &extra->size, err); 1320 1321 /* Add the terminator */ 1322 __put_user_error(0, &end->magic, err); 1323 __put_user_error(0, &end->size, err); 1324 } 1325 1326 /* set the "end" magic */ 1327 if (err == 0) { 1328 struct _aarch64_ctx __user *end = 1329 apply_user_offset(user, user->end_offset); 1330 1331 __put_user_error(0, &end->magic, err); 1332 __put_user_error(0, &end->size, err); 1333 } 1334 1335 return err; 1336 } 1337 1338 static int get_sigframe(struct rt_sigframe_user_layout *user, 1339 struct ksignal *ksig, struct pt_regs *regs) 1340 { 1341 unsigned long sp, sp_top; 1342 int err; 1343 1344 init_user_layout(user); 1345 err = setup_sigframe_layout(user, false); 1346 if (err) 1347 return err; 1348 1349 sp = sp_top = sigsp(regs->sp, ksig); 1350 1351 sp = round_down(sp - sizeof(struct frame_record), 16); 1352 user->next_frame = (struct frame_record __user *)sp; 1353 1354 sp = round_down(sp, 16) - sigframe_size(user); 1355 user->sigframe = (struct rt_sigframe __user *)sp; 1356 1357 /* 1358 * Check that we can actually write to the signal frame. 1359 */ 1360 if (!access_ok(user->sigframe, sp_top - sp)) 1361 return -EFAULT; 1362 1363 return 0; 1364 } 1365 1366 #ifdef CONFIG_ARM64_GCS 1367 1368 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig) 1369 { 1370 unsigned long __user *gcspr_el0; 1371 int ret = 0; 1372 1373 if (!system_supports_gcs()) 1374 return 0; 1375 1376 if (!task_gcs_el0_enabled(current)) 1377 return 0; 1378 1379 /* 1380 * We are entering a signal handler, current register state is 1381 * active. 1382 */ 1383 gcspr_el0 = (unsigned long __user *)read_sysreg_s(SYS_GCSPR_EL0); 1384 1385 /* 1386 * Push a cap and the GCS entry for the trampoline onto the GCS. 1387 */ 1388 put_user_gcs((unsigned long)sigtramp, gcspr_el0 - 2, &ret); 1389 put_user_gcs(GCS_SIGNAL_CAP(gcspr_el0 - 1), gcspr_el0 - 1, &ret); 1390 if (ret != 0) 1391 return ret; 1392 1393 gcspr_el0 -= 2; 1394 write_sysreg_s((unsigned long)gcspr_el0, SYS_GCSPR_EL0); 1395 1396 return 0; 1397 } 1398 #else 1399 1400 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig) 1401 { 1402 return 0; 1403 } 1404 1405 #endif 1406 1407 static int setup_return(struct pt_regs *regs, struct ksignal *ksig, 1408 struct rt_sigframe_user_layout *user, int usig) 1409 { 1410 __sigrestore_t sigtramp; 1411 1412 regs->regs[0] = usig; 1413 regs->sp = (unsigned long)user->sigframe; 1414 regs->regs[29] = (unsigned long)&user->next_frame->fp; 1415 regs->pc = (unsigned long)ksig->ka.sa.sa_handler; 1416 1417 /* 1418 * Signal delivery is a (wacky) indirect function call in 1419 * userspace, so simulate the same setting of BTYPE as a BLR 1420 * <register containing the signal handler entry point>. 1421 * Signal delivery to a location in a PROT_BTI guarded page 1422 * that is not a function entry point will now trigger a 1423 * SIGILL in userspace. 1424 * 1425 * If the signal handler entry point is not in a PROT_BTI 1426 * guarded page, this is harmless. 1427 */ 1428 if (system_supports_bti()) { 1429 regs->pstate &= ~PSR_BTYPE_MASK; 1430 regs->pstate |= PSR_BTYPE_C; 1431 } 1432 1433 /* TCO (Tag Check Override) always cleared for signal handlers */ 1434 regs->pstate &= ~PSR_TCO_BIT; 1435 1436 /* Signal handlers are invoked with ZA and streaming mode disabled */ 1437 if (system_supports_sme()) { 1438 /* 1439 * If we were in streaming mode the saved register 1440 * state was SVE but we will exit SM and use the 1441 * FPSIMD register state - flush the saved FPSIMD 1442 * register state in case it gets loaded. 1443 */ 1444 if (current->thread.svcr & SVCR_SM_MASK) { 1445 memset(¤t->thread.uw.fpsimd_state, 0, 1446 sizeof(current->thread.uw.fpsimd_state)); 1447 current->thread.fp_type = FP_STATE_FPSIMD; 1448 } 1449 1450 current->thread.svcr &= ~(SVCR_ZA_MASK | 1451 SVCR_SM_MASK); 1452 sme_smstop(); 1453 } 1454 1455 if (system_supports_poe()) 1456 write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0); 1457 1458 if (ksig->ka.sa.sa_flags & SA_RESTORER) 1459 sigtramp = ksig->ka.sa.sa_restorer; 1460 else 1461 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); 1462 1463 regs->regs[30] = (unsigned long)sigtramp; 1464 1465 return gcs_signal_entry(sigtramp, ksig); 1466 } 1467 1468 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, 1469 struct pt_regs *regs) 1470 { 1471 struct rt_sigframe_user_layout user; 1472 struct rt_sigframe __user *frame; 1473 int err = 0; 1474 1475 fpsimd_signal_preserve_current_state(); 1476 1477 if (get_sigframe(&user, ksig, regs)) 1478 return 1; 1479 1480 frame = user.sigframe; 1481 1482 __put_user_error(0, &frame->uc.uc_flags, err); 1483 __put_user_error(NULL, &frame->uc.uc_link, err); 1484 1485 err |= __save_altstack(&frame->uc.uc_stack, regs->sp); 1486 err |= setup_sigframe(&user, regs, set); 1487 if (err == 0) { 1488 err = setup_return(regs, ksig, &user, usig); 1489 if (ksig->ka.sa.sa_flags & SA_SIGINFO) { 1490 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 1491 regs->regs[1] = (unsigned long)&frame->info; 1492 regs->regs[2] = (unsigned long)&frame->uc; 1493 } 1494 } 1495 1496 return err; 1497 } 1498 1499 static void setup_restart_syscall(struct pt_regs *regs) 1500 { 1501 if (is_compat_task()) 1502 compat_setup_restart_syscall(regs); 1503 else 1504 regs->regs[8] = __NR_restart_syscall; 1505 } 1506 1507 /* 1508 * OK, we're invoking a handler 1509 */ 1510 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 1511 { 1512 sigset_t *oldset = sigmask_to_save(); 1513 int usig = ksig->sig; 1514 int ret; 1515 1516 rseq_signal_deliver(ksig, regs); 1517 1518 /* 1519 * Set up the stack frame 1520 */ 1521 if (is_compat_task()) { 1522 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 1523 ret = compat_setup_rt_frame(usig, ksig, oldset, regs); 1524 else 1525 ret = compat_setup_frame(usig, ksig, oldset, regs); 1526 } else { 1527 ret = setup_rt_frame(usig, ksig, oldset, regs); 1528 } 1529 1530 /* 1531 * Check that the resulting registers are actually sane. 1532 */ 1533 ret |= !valid_user_regs(®s->user_regs, current); 1534 1535 /* Step into the signal handler if we are stepping */ 1536 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); 1537 } 1538 1539 /* 1540 * Note that 'init' is a special process: it doesn't get signals it doesn't 1541 * want to handle. Thus you cannot kill init even with a SIGKILL even by 1542 * mistake. 1543 * 1544 * Note that we go through the signals twice: once to check the signals that 1545 * the kernel can handle, and then we build all the user-level signal handling 1546 * stack-frames in one go after that. 1547 */ 1548 void do_signal(struct pt_regs *regs) 1549 { 1550 unsigned long continue_addr = 0, restart_addr = 0; 1551 int retval = 0; 1552 struct ksignal ksig; 1553 bool syscall = in_syscall(regs); 1554 1555 /* 1556 * If we were from a system call, check for system call restarting... 1557 */ 1558 if (syscall) { 1559 continue_addr = regs->pc; 1560 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); 1561 retval = regs->regs[0]; 1562 1563 /* 1564 * Avoid additional syscall restarting via ret_to_user. 1565 */ 1566 forget_syscall(regs); 1567 1568 /* 1569 * Prepare for system call restart. We do this here so that a 1570 * debugger will see the already changed PC. 1571 */ 1572 switch (retval) { 1573 case -ERESTARTNOHAND: 1574 case -ERESTARTSYS: 1575 case -ERESTARTNOINTR: 1576 case -ERESTART_RESTARTBLOCK: 1577 regs->regs[0] = regs->orig_x0; 1578 regs->pc = restart_addr; 1579 break; 1580 } 1581 } 1582 1583 /* 1584 * Get the signal to deliver. When running under ptrace, at this point 1585 * the debugger may change all of our registers. 1586 */ 1587 if (get_signal(&ksig)) { 1588 /* 1589 * Depending on the signal settings, we may need to revert the 1590 * decision to restart the system call, but skip this if a 1591 * debugger has chosen to restart at a different PC. 1592 */ 1593 if (regs->pc == restart_addr && 1594 (retval == -ERESTARTNOHAND || 1595 retval == -ERESTART_RESTARTBLOCK || 1596 (retval == -ERESTARTSYS && 1597 !(ksig.ka.sa.sa_flags & SA_RESTART)))) { 1598 syscall_set_return_value(current, regs, -EINTR, 0); 1599 regs->pc = continue_addr; 1600 } 1601 1602 handle_signal(&ksig, regs); 1603 return; 1604 } 1605 1606 /* 1607 * Handle restarting a different system call. As above, if a debugger 1608 * has chosen to restart at a different PC, ignore the restart. 1609 */ 1610 if (syscall && regs->pc == restart_addr) { 1611 if (retval == -ERESTART_RESTARTBLOCK) 1612 setup_restart_syscall(regs); 1613 user_rewind_single_step(current); 1614 } 1615 1616 restore_saved_sigmask(); 1617 } 1618 1619 unsigned long __ro_after_init signal_minsigstksz; 1620 1621 /* 1622 * Determine the stack space required for guaranteed signal devliery. 1623 * This function is used to populate AT_MINSIGSTKSZ at process startup. 1624 * cpufeatures setup is assumed to be complete. 1625 */ 1626 void __init minsigstksz_setup(void) 1627 { 1628 struct rt_sigframe_user_layout user; 1629 1630 init_user_layout(&user); 1631 1632 /* 1633 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't 1634 * be big enough, but it's our best guess: 1635 */ 1636 if (WARN_ON(setup_sigframe_layout(&user, true))) 1637 return; 1638 1639 signal_minsigstksz = sigframe_size(&user) + 1640 round_up(sizeof(struct frame_record), 16) + 1641 16; /* max alignment padding */ 1642 } 1643 1644 /* 1645 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as 1646 * changes likely come with new fields that should be added below. 1647 */ 1648 static_assert(NSIGILL == 11); 1649 static_assert(NSIGFPE == 15); 1650 static_assert(NSIGSEGV == 10); 1651 static_assert(NSIGBUS == 5); 1652 static_assert(NSIGTRAP == 6); 1653 static_assert(NSIGCHLD == 6); 1654 static_assert(NSIGSYS == 2); 1655 static_assert(sizeof(siginfo_t) == 128); 1656 static_assert(__alignof__(siginfo_t) == 8); 1657 static_assert(offsetof(siginfo_t, si_signo) == 0x00); 1658 static_assert(offsetof(siginfo_t, si_errno) == 0x04); 1659 static_assert(offsetof(siginfo_t, si_code) == 0x08); 1660 static_assert(offsetof(siginfo_t, si_pid) == 0x10); 1661 static_assert(offsetof(siginfo_t, si_uid) == 0x14); 1662 static_assert(offsetof(siginfo_t, si_tid) == 0x10); 1663 static_assert(offsetof(siginfo_t, si_overrun) == 0x14); 1664 static_assert(offsetof(siginfo_t, si_status) == 0x18); 1665 static_assert(offsetof(siginfo_t, si_utime) == 0x20); 1666 static_assert(offsetof(siginfo_t, si_stime) == 0x28); 1667 static_assert(offsetof(siginfo_t, si_value) == 0x18); 1668 static_assert(offsetof(siginfo_t, si_int) == 0x18); 1669 static_assert(offsetof(siginfo_t, si_ptr) == 0x18); 1670 static_assert(offsetof(siginfo_t, si_addr) == 0x10); 1671 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18); 1672 static_assert(offsetof(siginfo_t, si_lower) == 0x20); 1673 static_assert(offsetof(siginfo_t, si_upper) == 0x28); 1674 static_assert(offsetof(siginfo_t, si_pkey) == 0x20); 1675 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); 1676 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); 1677 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); 1678 static_assert(offsetof(siginfo_t, si_band) == 0x10); 1679 static_assert(offsetof(siginfo_t, si_fd) == 0x18); 1680 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10); 1681 static_assert(offsetof(siginfo_t, si_syscall) == 0x18); 1682 static_assert(offsetof(siginfo_t, si_arch) == 0x1c); 1683