1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/signal.c 4 * 5 * Copyright (C) 1995-2009 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/cache.h> 10 #include <linux/compat.h> 11 #include <linux/errno.h> 12 #include <linux/kernel.h> 13 #include <linux/signal.h> 14 #include <linux/freezer.h> 15 #include <linux/stddef.h> 16 #include <linux/uaccess.h> 17 #include <linux/sizes.h> 18 #include <linux/string.h> 19 #include <linux/ratelimit.h> 20 #include <linux/rseq.h> 21 #include <linux/syscalls.h> 22 #include <linux/pkeys.h> 23 24 #include <asm/daifflags.h> 25 #include <asm/debug-monitors.h> 26 #include <asm/elf.h> 27 #include <asm/exception.h> 28 #include <asm/cacheflush.h> 29 #include <asm/gcs.h> 30 #include <asm/ucontext.h> 31 #include <asm/unistd.h> 32 #include <asm/fpsimd.h> 33 #include <asm/ptrace.h> 34 #include <asm/syscall.h> 35 #include <asm/signal32.h> 36 #include <asm/traps.h> 37 #include <asm/vdso.h> 38 39 #define GCS_SIGNAL_CAP(addr) (((unsigned long)addr) & GCS_CAP_ADDR_MASK) 40 41 /* 42 * Do a signal return; undo the signal stack. These are aligned to 128-bit. 43 */ 44 struct rt_sigframe { 45 struct siginfo info; 46 struct ucontext uc; 47 }; 48 49 struct rt_sigframe_user_layout { 50 struct rt_sigframe __user *sigframe; 51 struct frame_record __user *next_frame; 52 53 unsigned long size; /* size of allocated sigframe data */ 54 unsigned long limit; /* largest allowed size */ 55 56 unsigned long fpsimd_offset; 57 unsigned long esr_offset; 58 unsigned long gcs_offset; 59 unsigned long sve_offset; 60 unsigned long tpidr2_offset; 61 unsigned long za_offset; 62 unsigned long zt_offset; 63 unsigned long fpmr_offset; 64 unsigned long poe_offset; 65 unsigned long extra_offset; 66 unsigned long end_offset; 67 }; 68 69 /* 70 * Holds any EL0-controlled state that influences unprivileged memory accesses. 71 * This includes both accesses done in userspace and uaccess done in the kernel. 72 * 73 * This state needs to be carefully managed to ensure that it doesn't cause 74 * uaccess to fail when setting up the signal frame, and the signal handler 75 * itself also expects a well-defined state when entered. 76 */ 77 struct user_access_state { 78 u64 por_el0; 79 }; 80 81 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) 82 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) 83 84 /* 85 * Save the user access state into ua_state and reset it to disable any 86 * restrictions. 87 */ 88 static void save_reset_user_access_state(struct user_access_state *ua_state) 89 { 90 if (system_supports_poe()) { 91 u64 por_enable_all = 0; 92 93 for (int pkey = 0; pkey < arch_max_pkey(); pkey++) 94 por_enable_all |= POE_RXW << (pkey * POR_BITS_PER_PKEY); 95 96 ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0); 97 write_sysreg_s(por_enable_all, SYS_POR_EL0); 98 /* Ensure that any subsequent uaccess observes the updated value */ 99 isb(); 100 } 101 } 102 103 /* 104 * Set the user access state for invoking the signal handler. 105 * 106 * No uaccess should be done after that function is called. 107 */ 108 static void set_handler_user_access_state(void) 109 { 110 if (system_supports_poe()) 111 write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0); 112 } 113 114 /* 115 * Restore the user access state to the values saved in ua_state. 116 * 117 * No uaccess should be done after that function is called. 118 */ 119 static void restore_user_access_state(const struct user_access_state *ua_state) 120 { 121 if (system_supports_poe()) 122 write_sysreg_s(ua_state->por_el0, SYS_POR_EL0); 123 } 124 125 static void init_user_layout(struct rt_sigframe_user_layout *user) 126 { 127 const size_t reserved_size = 128 sizeof(user->sigframe->uc.uc_mcontext.__reserved); 129 130 memset(user, 0, sizeof(*user)); 131 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved); 132 133 user->limit = user->size + reserved_size; 134 135 user->limit -= TERMINATOR_SIZE; 136 user->limit -= EXTRA_CONTEXT_SIZE; 137 /* Reserve space for extension and terminator ^ */ 138 } 139 140 static size_t sigframe_size(struct rt_sigframe_user_layout const *user) 141 { 142 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); 143 } 144 145 /* 146 * Sanity limit on the approximate maximum size of signal frame we'll 147 * try to generate. Stack alignment padding and the frame record are 148 * not taken into account. This limit is not a guarantee and is 149 * NOT ABI. 150 */ 151 #define SIGFRAME_MAXSZ SZ_256K 152 153 static int __sigframe_alloc(struct rt_sigframe_user_layout *user, 154 unsigned long *offset, size_t size, bool extend) 155 { 156 size_t padded_size = round_up(size, 16); 157 158 if (padded_size > user->limit - user->size && 159 !user->extra_offset && 160 extend) { 161 int ret; 162 163 user->limit += EXTRA_CONTEXT_SIZE; 164 ret = __sigframe_alloc(user, &user->extra_offset, 165 sizeof(struct extra_context), false); 166 if (ret) { 167 user->limit -= EXTRA_CONTEXT_SIZE; 168 return ret; 169 } 170 171 /* Reserve space for the __reserved[] terminator */ 172 user->size += TERMINATOR_SIZE; 173 174 /* 175 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for 176 * the terminator: 177 */ 178 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE; 179 } 180 181 /* Still not enough space? Bad luck! */ 182 if (padded_size > user->limit - user->size) 183 return -ENOMEM; 184 185 *offset = user->size; 186 user->size += padded_size; 187 188 return 0; 189 } 190 191 /* 192 * Allocate space for an optional record of <size> bytes in the user 193 * signal frame. The offset from the signal frame base address to the 194 * allocated block is assigned to *offset. 195 */ 196 static int sigframe_alloc(struct rt_sigframe_user_layout *user, 197 unsigned long *offset, size_t size) 198 { 199 return __sigframe_alloc(user, offset, size, true); 200 } 201 202 /* Allocate the null terminator record and prevent further allocations */ 203 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) 204 { 205 int ret; 206 207 /* Un-reserve the space reserved for the terminator: */ 208 user->limit += TERMINATOR_SIZE; 209 210 ret = sigframe_alloc(user, &user->end_offset, 211 sizeof(struct _aarch64_ctx)); 212 if (ret) 213 return ret; 214 215 /* Prevent further allocation: */ 216 user->limit = user->size; 217 return 0; 218 } 219 220 static void __user *apply_user_offset( 221 struct rt_sigframe_user_layout const *user, unsigned long offset) 222 { 223 char __user *base = (char __user *)user->sigframe; 224 225 return base + offset; 226 } 227 228 struct user_ctxs { 229 struct fpsimd_context __user *fpsimd; 230 u32 fpsimd_size; 231 struct sve_context __user *sve; 232 u32 sve_size; 233 struct tpidr2_context __user *tpidr2; 234 u32 tpidr2_size; 235 struct za_context __user *za; 236 u32 za_size; 237 struct zt_context __user *zt; 238 u32 zt_size; 239 struct fpmr_context __user *fpmr; 240 u32 fpmr_size; 241 struct poe_context __user *poe; 242 u32 poe_size; 243 struct gcs_context __user *gcs; 244 u32 gcs_size; 245 }; 246 247 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) 248 { 249 struct user_fpsimd_state const *fpsimd = 250 ¤t->thread.uw.fpsimd_state; 251 int err; 252 253 /* copy the FP and status/control registers */ 254 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); 255 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err); 256 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err); 257 258 /* copy the magic/size information */ 259 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); 260 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); 261 262 return err ? -EFAULT : 0; 263 } 264 265 static int restore_fpsimd_context(struct user_ctxs *user) 266 { 267 struct user_fpsimd_state fpsimd; 268 int err = 0; 269 270 /* check the size information */ 271 if (user->fpsimd_size != sizeof(struct fpsimd_context)) 272 return -EINVAL; 273 274 /* copy the FP and status/control registers */ 275 err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs), 276 sizeof(fpsimd.vregs)); 277 __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err); 278 __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err); 279 280 clear_thread_flag(TIF_SVE); 281 current->thread.fp_type = FP_STATE_FPSIMD; 282 283 /* load the hardware registers from the fpsimd_state structure */ 284 if (!err) 285 fpsimd_update_current_state(&fpsimd); 286 287 return err ? -EFAULT : 0; 288 } 289 290 static int preserve_fpmr_context(struct fpmr_context __user *ctx) 291 { 292 int err = 0; 293 294 current->thread.uw.fpmr = read_sysreg_s(SYS_FPMR); 295 296 __put_user_error(FPMR_MAGIC, &ctx->head.magic, err); 297 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 298 __put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err); 299 300 return err; 301 } 302 303 static int restore_fpmr_context(struct user_ctxs *user) 304 { 305 u64 fpmr; 306 int err = 0; 307 308 if (user->fpmr_size != sizeof(*user->fpmr)) 309 return -EINVAL; 310 311 __get_user_error(fpmr, &user->fpmr->fpmr, err); 312 if (!err) 313 write_sysreg_s(fpmr, SYS_FPMR); 314 315 return err; 316 } 317 318 static int preserve_poe_context(struct poe_context __user *ctx, 319 const struct user_access_state *ua_state) 320 { 321 int err = 0; 322 323 __put_user_error(POE_MAGIC, &ctx->head.magic, err); 324 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 325 __put_user_error(ua_state->por_el0, &ctx->por_el0, err); 326 327 return err; 328 } 329 330 static int restore_poe_context(struct user_ctxs *user, 331 struct user_access_state *ua_state) 332 { 333 u64 por_el0; 334 int err = 0; 335 336 if (user->poe_size != sizeof(*user->poe)) 337 return -EINVAL; 338 339 __get_user_error(por_el0, &(user->poe->por_el0), err); 340 if (!err) 341 ua_state->por_el0 = por_el0; 342 343 return err; 344 } 345 346 #ifdef CONFIG_ARM64_SVE 347 348 static int preserve_sve_context(struct sve_context __user *ctx) 349 { 350 int err = 0; 351 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 352 u16 flags = 0; 353 unsigned int vl = task_get_sve_vl(current); 354 unsigned int vq = 0; 355 356 if (thread_sm_enabled(¤t->thread)) { 357 vl = task_get_sme_vl(current); 358 vq = sve_vq_from_vl(vl); 359 flags |= SVE_SIG_FLAG_SM; 360 } else if (current->thread.fp_type == FP_STATE_SVE) { 361 vq = sve_vq_from_vl(vl); 362 } 363 364 memset(reserved, 0, sizeof(reserved)); 365 366 __put_user_error(SVE_MAGIC, &ctx->head.magic, err); 367 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), 368 &ctx->head.size, err); 369 __put_user_error(vl, &ctx->vl, err); 370 __put_user_error(flags, &ctx->flags, err); 371 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 372 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 373 374 if (vq) { 375 /* 376 * This assumes that the SVE state has already been saved to 377 * the task struct by calling the function 378 * fpsimd_signal_preserve_current_state(). 379 */ 380 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, 381 current->thread.sve_state, 382 SVE_SIG_REGS_SIZE(vq)); 383 } 384 385 return err ? -EFAULT : 0; 386 } 387 388 static int restore_sve_fpsimd_context(struct user_ctxs *user) 389 { 390 int err = 0; 391 unsigned int vl, vq; 392 struct user_fpsimd_state fpsimd; 393 u16 user_vl, flags; 394 395 if (user->sve_size < sizeof(*user->sve)) 396 return -EINVAL; 397 398 __get_user_error(user_vl, &(user->sve->vl), err); 399 __get_user_error(flags, &(user->sve->flags), err); 400 if (err) 401 return err; 402 403 if (flags & SVE_SIG_FLAG_SM) { 404 if (!system_supports_sme()) 405 return -EINVAL; 406 407 vl = task_get_sme_vl(current); 408 } else { 409 /* 410 * A SME only system use SVE for streaming mode so can 411 * have a SVE formatted context with a zero VL and no 412 * payload data. 413 */ 414 if (!system_supports_sve() && !system_supports_sme()) 415 return -EINVAL; 416 417 vl = task_get_sve_vl(current); 418 } 419 420 if (user_vl != vl) 421 return -EINVAL; 422 423 if (user->sve_size == sizeof(*user->sve)) { 424 clear_thread_flag(TIF_SVE); 425 current->thread.svcr &= ~SVCR_SM_MASK; 426 current->thread.fp_type = FP_STATE_FPSIMD; 427 goto fpsimd_only; 428 } 429 430 vq = sve_vq_from_vl(vl); 431 432 if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq)) 433 return -EINVAL; 434 435 /* 436 * Careful: we are about __copy_from_user() directly into 437 * thread.sve_state with preemption enabled, so protection is 438 * needed to prevent a racing context switch from writing stale 439 * registers back over the new data. 440 */ 441 442 fpsimd_flush_task_state(current); 443 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 444 445 sve_alloc(current, true); 446 if (!current->thread.sve_state) { 447 clear_thread_flag(TIF_SVE); 448 return -ENOMEM; 449 } 450 451 err = __copy_from_user(current->thread.sve_state, 452 (char __user const *)user->sve + 453 SVE_SIG_REGS_OFFSET, 454 SVE_SIG_REGS_SIZE(vq)); 455 if (err) 456 return -EFAULT; 457 458 if (flags & SVE_SIG_FLAG_SM) 459 current->thread.svcr |= SVCR_SM_MASK; 460 else 461 set_thread_flag(TIF_SVE); 462 current->thread.fp_type = FP_STATE_SVE; 463 464 fpsimd_only: 465 /* copy the FP and status/control registers */ 466 /* restore_sigframe() already checked that user->fpsimd != NULL. */ 467 err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs, 468 sizeof(fpsimd.vregs)); 469 __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err); 470 __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err); 471 472 /* load the hardware registers from the fpsimd_state structure */ 473 if (!err) 474 fpsimd_update_current_state(&fpsimd); 475 476 return err ? -EFAULT : 0; 477 } 478 479 #else /* ! CONFIG_ARM64_SVE */ 480 481 static int restore_sve_fpsimd_context(struct user_ctxs *user) 482 { 483 WARN_ON_ONCE(1); 484 return -EINVAL; 485 } 486 487 /* Turn any non-optimised out attempts to use this into a link error: */ 488 extern int preserve_sve_context(void __user *ctx); 489 490 #endif /* ! CONFIG_ARM64_SVE */ 491 492 #ifdef CONFIG_ARM64_SME 493 494 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx) 495 { 496 int err = 0; 497 498 current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); 499 500 __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err); 501 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 502 __put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err); 503 504 return err; 505 } 506 507 static int restore_tpidr2_context(struct user_ctxs *user) 508 { 509 u64 tpidr2_el0; 510 int err = 0; 511 512 if (user->tpidr2_size != sizeof(*user->tpidr2)) 513 return -EINVAL; 514 515 __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err); 516 if (!err) 517 write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0); 518 519 return err; 520 } 521 522 static int preserve_za_context(struct za_context __user *ctx) 523 { 524 int err = 0; 525 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 526 unsigned int vl = task_get_sme_vl(current); 527 unsigned int vq; 528 529 if (thread_za_enabled(¤t->thread)) 530 vq = sve_vq_from_vl(vl); 531 else 532 vq = 0; 533 534 memset(reserved, 0, sizeof(reserved)); 535 536 __put_user_error(ZA_MAGIC, &ctx->head.magic, err); 537 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16), 538 &ctx->head.size, err); 539 __put_user_error(vl, &ctx->vl, err); 540 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 541 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 542 543 if (vq) { 544 /* 545 * This assumes that the ZA state has already been saved to 546 * the task struct by calling the function 547 * fpsimd_signal_preserve_current_state(). 548 */ 549 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET, 550 current->thread.sme_state, 551 ZA_SIG_REGS_SIZE(vq)); 552 } 553 554 return err ? -EFAULT : 0; 555 } 556 557 static int restore_za_context(struct user_ctxs *user) 558 { 559 int err = 0; 560 unsigned int vq; 561 u16 user_vl; 562 563 if (user->za_size < sizeof(*user->za)) 564 return -EINVAL; 565 566 __get_user_error(user_vl, &(user->za->vl), err); 567 if (err) 568 return err; 569 570 if (user_vl != task_get_sme_vl(current)) 571 return -EINVAL; 572 573 if (user->za_size == sizeof(*user->za)) { 574 current->thread.svcr &= ~SVCR_ZA_MASK; 575 return 0; 576 } 577 578 vq = sve_vq_from_vl(user_vl); 579 580 if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq)) 581 return -EINVAL; 582 583 /* 584 * Careful: we are about __copy_from_user() directly into 585 * thread.sme_state with preemption enabled, so protection is 586 * needed to prevent a racing context switch from writing stale 587 * registers back over the new data. 588 */ 589 590 fpsimd_flush_task_state(current); 591 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 592 593 sme_alloc(current, true); 594 if (!current->thread.sme_state) { 595 current->thread.svcr &= ~SVCR_ZA_MASK; 596 clear_thread_flag(TIF_SME); 597 return -ENOMEM; 598 } 599 600 err = __copy_from_user(current->thread.sme_state, 601 (char __user const *)user->za + 602 ZA_SIG_REGS_OFFSET, 603 ZA_SIG_REGS_SIZE(vq)); 604 if (err) 605 return -EFAULT; 606 607 set_thread_flag(TIF_SME); 608 current->thread.svcr |= SVCR_ZA_MASK; 609 610 return 0; 611 } 612 613 static int preserve_zt_context(struct zt_context __user *ctx) 614 { 615 int err = 0; 616 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 617 618 if (WARN_ON(!thread_za_enabled(¤t->thread))) 619 return -EINVAL; 620 621 memset(reserved, 0, sizeof(reserved)); 622 623 __put_user_error(ZT_MAGIC, &ctx->head.magic, err); 624 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16), 625 &ctx->head.size, err); 626 __put_user_error(1, &ctx->nregs, err); 627 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 628 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 629 630 /* 631 * This assumes that the ZT state has already been saved to 632 * the task struct by calling the function 633 * fpsimd_signal_preserve_current_state(). 634 */ 635 err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET, 636 thread_zt_state(¤t->thread), 637 ZT_SIG_REGS_SIZE(1)); 638 639 return err ? -EFAULT : 0; 640 } 641 642 static int restore_zt_context(struct user_ctxs *user) 643 { 644 int err; 645 u16 nregs; 646 647 /* ZA must be restored first for this check to be valid */ 648 if (!thread_za_enabled(¤t->thread)) 649 return -EINVAL; 650 651 if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1)) 652 return -EINVAL; 653 654 if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs))) 655 return -EFAULT; 656 657 if (nregs != 1) 658 return -EINVAL; 659 660 /* 661 * Careful: we are about __copy_from_user() directly into 662 * thread.zt_state with preemption enabled, so protection is 663 * needed to prevent a racing context switch from writing stale 664 * registers back over the new data. 665 */ 666 667 fpsimd_flush_task_state(current); 668 /* From now, fpsimd_thread_switch() won't touch ZT in thread state */ 669 670 err = __copy_from_user(thread_zt_state(¤t->thread), 671 (char __user const *)user->zt + 672 ZT_SIG_REGS_OFFSET, 673 ZT_SIG_REGS_SIZE(1)); 674 if (err) 675 return -EFAULT; 676 677 return 0; 678 } 679 680 #else /* ! CONFIG_ARM64_SME */ 681 682 /* Turn any non-optimised out attempts to use these into a link error: */ 683 extern int preserve_tpidr2_context(void __user *ctx); 684 extern int restore_tpidr2_context(struct user_ctxs *user); 685 extern int preserve_za_context(void __user *ctx); 686 extern int restore_za_context(struct user_ctxs *user); 687 extern int preserve_zt_context(void __user *ctx); 688 extern int restore_zt_context(struct user_ctxs *user); 689 690 #endif /* ! CONFIG_ARM64_SME */ 691 692 #ifdef CONFIG_ARM64_GCS 693 694 static int preserve_gcs_context(struct gcs_context __user *ctx) 695 { 696 int err = 0; 697 u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0); 698 699 /* 700 * If GCS is enabled we will add a cap token to the frame, 701 * include it in the GCSPR_EL0 we report to support stack 702 * switching via sigreturn if GCS is enabled. We do not allow 703 * enabling via sigreturn so the token is only relevant for 704 * threads with GCS enabled. 705 */ 706 if (task_gcs_el0_enabled(current)) 707 gcspr -= 8; 708 709 __put_user_error(GCS_MAGIC, &ctx->head.magic, err); 710 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 711 __put_user_error(gcspr, &ctx->gcspr, err); 712 __put_user_error(0, &ctx->reserved, err); 713 __put_user_error(current->thread.gcs_el0_mode, 714 &ctx->features_enabled, err); 715 716 return err; 717 } 718 719 static int restore_gcs_context(struct user_ctxs *user) 720 { 721 u64 gcspr, enabled; 722 int err = 0; 723 724 if (user->gcs_size != sizeof(*user->gcs)) 725 return -EINVAL; 726 727 __get_user_error(gcspr, &user->gcs->gcspr, err); 728 __get_user_error(enabled, &user->gcs->features_enabled, err); 729 if (err) 730 return err; 731 732 /* Don't allow unknown modes */ 733 if (enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK) 734 return -EINVAL; 735 736 err = gcs_check_locked(current, enabled); 737 if (err != 0) 738 return err; 739 740 /* Don't allow enabling */ 741 if (!task_gcs_el0_enabled(current) && 742 (enabled & PR_SHADOW_STACK_ENABLE)) 743 return -EINVAL; 744 745 /* If we are disabling disable everything */ 746 if (!(enabled & PR_SHADOW_STACK_ENABLE)) 747 enabled = 0; 748 749 current->thread.gcs_el0_mode = enabled; 750 751 /* 752 * We let userspace set GCSPR_EL0 to anything here, we will 753 * validate later in gcs_restore_signal(). 754 */ 755 write_sysreg_s(gcspr, SYS_GCSPR_EL0); 756 757 return 0; 758 } 759 760 #else /* ! CONFIG_ARM64_GCS */ 761 762 /* Turn any non-optimised out attempts to use these into a link error: */ 763 extern int preserve_gcs_context(void __user *ctx); 764 extern int restore_gcs_context(struct user_ctxs *user); 765 766 #endif /* ! CONFIG_ARM64_GCS */ 767 768 static int parse_user_sigframe(struct user_ctxs *user, 769 struct rt_sigframe __user *sf) 770 { 771 struct sigcontext __user *const sc = &sf->uc.uc_mcontext; 772 struct _aarch64_ctx __user *head; 773 char __user *base = (char __user *)&sc->__reserved; 774 size_t offset = 0; 775 size_t limit = sizeof(sc->__reserved); 776 bool have_extra_context = false; 777 char const __user *const sfp = (char const __user *)sf; 778 779 user->fpsimd = NULL; 780 user->sve = NULL; 781 user->tpidr2 = NULL; 782 user->za = NULL; 783 user->zt = NULL; 784 user->fpmr = NULL; 785 user->poe = NULL; 786 user->gcs = NULL; 787 788 if (!IS_ALIGNED((unsigned long)base, 16)) 789 goto invalid; 790 791 while (1) { 792 int err = 0; 793 u32 magic, size; 794 char const __user *userp; 795 struct extra_context const __user *extra; 796 u64 extra_datap; 797 u32 extra_size; 798 struct _aarch64_ctx const __user *end; 799 u32 end_magic, end_size; 800 801 if (limit - offset < sizeof(*head)) 802 goto invalid; 803 804 if (!IS_ALIGNED(offset, 16)) 805 goto invalid; 806 807 head = (struct _aarch64_ctx __user *)(base + offset); 808 __get_user_error(magic, &head->magic, err); 809 __get_user_error(size, &head->size, err); 810 if (err) 811 return err; 812 813 if (limit - offset < size) 814 goto invalid; 815 816 switch (magic) { 817 case 0: 818 if (size) 819 goto invalid; 820 821 goto done; 822 823 case FPSIMD_MAGIC: 824 if (!system_supports_fpsimd()) 825 goto invalid; 826 if (user->fpsimd) 827 goto invalid; 828 829 user->fpsimd = (struct fpsimd_context __user *)head; 830 user->fpsimd_size = size; 831 break; 832 833 case ESR_MAGIC: 834 /* ignore */ 835 break; 836 837 case POE_MAGIC: 838 if (!system_supports_poe()) 839 goto invalid; 840 841 if (user->poe) 842 goto invalid; 843 844 user->poe = (struct poe_context __user *)head; 845 user->poe_size = size; 846 break; 847 848 case SVE_MAGIC: 849 if (!system_supports_sve() && !system_supports_sme()) 850 goto invalid; 851 852 if (user->sve) 853 goto invalid; 854 855 user->sve = (struct sve_context __user *)head; 856 user->sve_size = size; 857 break; 858 859 case TPIDR2_MAGIC: 860 if (!system_supports_tpidr2()) 861 goto invalid; 862 863 if (user->tpidr2) 864 goto invalid; 865 866 user->tpidr2 = (struct tpidr2_context __user *)head; 867 user->tpidr2_size = size; 868 break; 869 870 case ZA_MAGIC: 871 if (!system_supports_sme()) 872 goto invalid; 873 874 if (user->za) 875 goto invalid; 876 877 user->za = (struct za_context __user *)head; 878 user->za_size = size; 879 break; 880 881 case ZT_MAGIC: 882 if (!system_supports_sme2()) 883 goto invalid; 884 885 if (user->zt) 886 goto invalid; 887 888 user->zt = (struct zt_context __user *)head; 889 user->zt_size = size; 890 break; 891 892 case FPMR_MAGIC: 893 if (!system_supports_fpmr()) 894 goto invalid; 895 896 if (user->fpmr) 897 goto invalid; 898 899 user->fpmr = (struct fpmr_context __user *)head; 900 user->fpmr_size = size; 901 break; 902 903 case GCS_MAGIC: 904 if (!system_supports_gcs()) 905 goto invalid; 906 907 if (user->gcs) 908 goto invalid; 909 910 user->gcs = (struct gcs_context __user *)head; 911 user->gcs_size = size; 912 break; 913 914 case EXTRA_MAGIC: 915 if (have_extra_context) 916 goto invalid; 917 918 if (size < sizeof(*extra)) 919 goto invalid; 920 921 userp = (char const __user *)head; 922 923 extra = (struct extra_context const __user *)userp; 924 userp += size; 925 926 __get_user_error(extra_datap, &extra->datap, err); 927 __get_user_error(extra_size, &extra->size, err); 928 if (err) 929 return err; 930 931 /* Check for the dummy terminator in __reserved[]: */ 932 933 if (limit - offset - size < TERMINATOR_SIZE) 934 goto invalid; 935 936 end = (struct _aarch64_ctx const __user *)userp; 937 userp += TERMINATOR_SIZE; 938 939 __get_user_error(end_magic, &end->magic, err); 940 __get_user_error(end_size, &end->size, err); 941 if (err) 942 return err; 943 944 if (end_magic || end_size) 945 goto invalid; 946 947 /* Prevent looping/repeated parsing of extra_context */ 948 have_extra_context = true; 949 950 base = (__force void __user *)extra_datap; 951 if (!IS_ALIGNED((unsigned long)base, 16)) 952 goto invalid; 953 954 if (!IS_ALIGNED(extra_size, 16)) 955 goto invalid; 956 957 if (base != userp) 958 goto invalid; 959 960 /* Reject "unreasonably large" frames: */ 961 if (extra_size > sfp + SIGFRAME_MAXSZ - userp) 962 goto invalid; 963 964 /* 965 * Ignore trailing terminator in __reserved[] 966 * and start parsing extra data: 967 */ 968 offset = 0; 969 limit = extra_size; 970 971 if (!access_ok(base, limit)) 972 goto invalid; 973 974 continue; 975 976 default: 977 goto invalid; 978 } 979 980 if (size < sizeof(*head)) 981 goto invalid; 982 983 if (limit - offset < size) 984 goto invalid; 985 986 offset += size; 987 } 988 989 done: 990 return 0; 991 992 invalid: 993 return -EINVAL; 994 } 995 996 static int restore_sigframe(struct pt_regs *regs, 997 struct rt_sigframe __user *sf, 998 struct user_access_state *ua_state) 999 { 1000 sigset_t set; 1001 int i, err; 1002 struct user_ctxs user; 1003 1004 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 1005 if (err == 0) 1006 set_current_blocked(&set); 1007 1008 for (i = 0; i < 31; i++) 1009 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 1010 err); 1011 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 1012 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 1013 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 1014 1015 /* 1016 * Avoid sys_rt_sigreturn() restarting. 1017 */ 1018 forget_syscall(regs); 1019 1020 err |= !valid_user_regs(®s->user_regs, current); 1021 if (err == 0) 1022 err = parse_user_sigframe(&user, sf); 1023 1024 if (err == 0 && system_supports_fpsimd()) { 1025 if (!user.fpsimd) 1026 return -EINVAL; 1027 1028 if (user.sve) 1029 err = restore_sve_fpsimd_context(&user); 1030 else 1031 err = restore_fpsimd_context(&user); 1032 } 1033 1034 if (err == 0 && system_supports_gcs() && user.gcs) 1035 err = restore_gcs_context(&user); 1036 1037 if (err == 0 && system_supports_tpidr2() && user.tpidr2) 1038 err = restore_tpidr2_context(&user); 1039 1040 if (err == 0 && system_supports_fpmr() && user.fpmr) 1041 err = restore_fpmr_context(&user); 1042 1043 if (err == 0 && system_supports_sme() && user.za) 1044 err = restore_za_context(&user); 1045 1046 if (err == 0 && system_supports_sme2() && user.zt) 1047 err = restore_zt_context(&user); 1048 1049 if (err == 0 && system_supports_poe() && user.poe) 1050 err = restore_poe_context(&user, ua_state); 1051 1052 return err; 1053 } 1054 1055 #ifdef CONFIG_ARM64_GCS 1056 static int gcs_restore_signal(void) 1057 { 1058 u64 gcspr_el0, cap; 1059 int ret; 1060 1061 if (!system_supports_gcs()) 1062 return 0; 1063 1064 if (!(current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE)) 1065 return 0; 1066 1067 gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0); 1068 1069 /* 1070 * Ensure that any changes to the GCS done via GCS operations 1071 * are visible to the normal reads we do to validate the 1072 * token. 1073 */ 1074 gcsb_dsync(); 1075 1076 /* 1077 * GCSPR_EL0 should be pointing at a capped GCS, read the cap. 1078 * We don't enforce that this is in a GCS page, if it is not 1079 * then faults will be generated on GCS operations - the main 1080 * concern is to protect GCS pages. 1081 */ 1082 ret = copy_from_user(&cap, (unsigned long __user *)gcspr_el0, 1083 sizeof(cap)); 1084 if (ret) 1085 return -EFAULT; 1086 1087 /* 1088 * Check that the cap is the actual GCS before replacing it. 1089 */ 1090 if (cap != GCS_SIGNAL_CAP(gcspr_el0)) 1091 return -EINVAL; 1092 1093 /* Invalidate the token to prevent reuse */ 1094 put_user_gcs(0, (unsigned long __user *)gcspr_el0, &ret); 1095 if (ret != 0) 1096 return -EFAULT; 1097 1098 write_sysreg_s(gcspr_el0 + 8, SYS_GCSPR_EL0); 1099 1100 return 0; 1101 } 1102 1103 #else 1104 static int gcs_restore_signal(void) { return 0; } 1105 #endif 1106 1107 SYSCALL_DEFINE0(rt_sigreturn) 1108 { 1109 struct pt_regs *regs = current_pt_regs(); 1110 struct rt_sigframe __user *frame; 1111 struct user_access_state ua_state; 1112 1113 /* Always make any pending restarted system calls return -EINTR */ 1114 current->restart_block.fn = do_no_restart_syscall; 1115 1116 /* 1117 * Since we stacked the signal on a 128-bit boundary, then 'sp' should 1118 * be word aligned here. 1119 */ 1120 if (regs->sp & 15) 1121 goto badframe; 1122 1123 frame = (struct rt_sigframe __user *)regs->sp; 1124 1125 if (!access_ok(frame, sizeof (*frame))) 1126 goto badframe; 1127 1128 if (restore_sigframe(regs, frame, &ua_state)) 1129 goto badframe; 1130 1131 if (gcs_restore_signal()) 1132 goto badframe; 1133 1134 if (restore_altstack(&frame->uc.uc_stack)) 1135 goto badframe; 1136 1137 restore_user_access_state(&ua_state); 1138 1139 return regs->regs[0]; 1140 1141 badframe: 1142 arm64_notify_segfault(regs->sp); 1143 return 0; 1144 } 1145 1146 /* 1147 * Determine the layout of optional records in the signal frame 1148 * 1149 * add_all: if true, lays out the biggest possible signal frame for 1150 * this task; otherwise, generates a layout for the current state 1151 * of the task. 1152 */ 1153 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, 1154 bool add_all) 1155 { 1156 int err; 1157 1158 if (system_supports_fpsimd()) { 1159 err = sigframe_alloc(user, &user->fpsimd_offset, 1160 sizeof(struct fpsimd_context)); 1161 if (err) 1162 return err; 1163 } 1164 1165 /* fault information, if valid */ 1166 if (add_all || current->thread.fault_code) { 1167 err = sigframe_alloc(user, &user->esr_offset, 1168 sizeof(struct esr_context)); 1169 if (err) 1170 return err; 1171 } 1172 1173 #ifdef CONFIG_ARM64_GCS 1174 if (system_supports_gcs() && (add_all || current->thread.gcspr_el0)) { 1175 err = sigframe_alloc(user, &user->gcs_offset, 1176 sizeof(struct gcs_context)); 1177 if (err) 1178 return err; 1179 } 1180 #endif 1181 1182 if (system_supports_sve() || system_supports_sme()) { 1183 unsigned int vq = 0; 1184 1185 if (add_all || current->thread.fp_type == FP_STATE_SVE || 1186 thread_sm_enabled(¤t->thread)) { 1187 int vl = max(sve_max_vl(), sme_max_vl()); 1188 1189 if (!add_all) 1190 vl = thread_get_cur_vl(¤t->thread); 1191 1192 vq = sve_vq_from_vl(vl); 1193 } 1194 1195 err = sigframe_alloc(user, &user->sve_offset, 1196 SVE_SIG_CONTEXT_SIZE(vq)); 1197 if (err) 1198 return err; 1199 } 1200 1201 if (system_supports_tpidr2()) { 1202 err = sigframe_alloc(user, &user->tpidr2_offset, 1203 sizeof(struct tpidr2_context)); 1204 if (err) 1205 return err; 1206 } 1207 1208 if (system_supports_sme()) { 1209 unsigned int vl; 1210 unsigned int vq = 0; 1211 1212 if (add_all) 1213 vl = sme_max_vl(); 1214 else 1215 vl = task_get_sme_vl(current); 1216 1217 if (thread_za_enabled(¤t->thread)) 1218 vq = sve_vq_from_vl(vl); 1219 1220 err = sigframe_alloc(user, &user->za_offset, 1221 ZA_SIG_CONTEXT_SIZE(vq)); 1222 if (err) 1223 return err; 1224 } 1225 1226 if (system_supports_sme2()) { 1227 if (add_all || thread_za_enabled(¤t->thread)) { 1228 err = sigframe_alloc(user, &user->zt_offset, 1229 ZT_SIG_CONTEXT_SIZE(1)); 1230 if (err) 1231 return err; 1232 } 1233 } 1234 1235 if (system_supports_fpmr()) { 1236 err = sigframe_alloc(user, &user->fpmr_offset, 1237 sizeof(struct fpmr_context)); 1238 if (err) 1239 return err; 1240 } 1241 1242 if (system_supports_poe()) { 1243 err = sigframe_alloc(user, &user->poe_offset, 1244 sizeof(struct poe_context)); 1245 if (err) 1246 return err; 1247 } 1248 1249 return sigframe_alloc_end(user); 1250 } 1251 1252 static int setup_sigframe(struct rt_sigframe_user_layout *user, 1253 struct pt_regs *regs, sigset_t *set, 1254 const struct user_access_state *ua_state) 1255 { 1256 int i, err = 0; 1257 struct rt_sigframe __user *sf = user->sigframe; 1258 1259 /* set up the stack frame for unwinding */ 1260 __put_user_error(regs->regs[29], &user->next_frame->fp, err); 1261 __put_user_error(regs->regs[30], &user->next_frame->lr, err); 1262 1263 for (i = 0; i < 31; i++) 1264 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 1265 err); 1266 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 1267 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 1268 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 1269 1270 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); 1271 1272 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 1273 1274 if (err == 0 && system_supports_fpsimd()) { 1275 struct fpsimd_context __user *fpsimd_ctx = 1276 apply_user_offset(user, user->fpsimd_offset); 1277 err |= preserve_fpsimd_context(fpsimd_ctx); 1278 } 1279 1280 /* fault information, if valid */ 1281 if (err == 0 && user->esr_offset) { 1282 struct esr_context __user *esr_ctx = 1283 apply_user_offset(user, user->esr_offset); 1284 1285 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); 1286 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); 1287 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); 1288 } 1289 1290 if (system_supports_gcs() && err == 0 && user->gcs_offset) { 1291 struct gcs_context __user *gcs_ctx = 1292 apply_user_offset(user, user->gcs_offset); 1293 err |= preserve_gcs_context(gcs_ctx); 1294 } 1295 1296 /* Scalable Vector Extension state (including streaming), if present */ 1297 if ((system_supports_sve() || system_supports_sme()) && 1298 err == 0 && user->sve_offset) { 1299 struct sve_context __user *sve_ctx = 1300 apply_user_offset(user, user->sve_offset); 1301 err |= preserve_sve_context(sve_ctx); 1302 } 1303 1304 /* TPIDR2 if supported */ 1305 if (system_supports_tpidr2() && err == 0) { 1306 struct tpidr2_context __user *tpidr2_ctx = 1307 apply_user_offset(user, user->tpidr2_offset); 1308 err |= preserve_tpidr2_context(tpidr2_ctx); 1309 } 1310 1311 /* FPMR if supported */ 1312 if (system_supports_fpmr() && err == 0) { 1313 struct fpmr_context __user *fpmr_ctx = 1314 apply_user_offset(user, user->fpmr_offset); 1315 err |= preserve_fpmr_context(fpmr_ctx); 1316 } 1317 1318 if (system_supports_poe() && err == 0) { 1319 struct poe_context __user *poe_ctx = 1320 apply_user_offset(user, user->poe_offset); 1321 1322 err |= preserve_poe_context(poe_ctx, ua_state); 1323 } 1324 1325 /* ZA state if present */ 1326 if (system_supports_sme() && err == 0 && user->za_offset) { 1327 struct za_context __user *za_ctx = 1328 apply_user_offset(user, user->za_offset); 1329 err |= preserve_za_context(za_ctx); 1330 } 1331 1332 /* ZT state if present */ 1333 if (system_supports_sme2() && err == 0 && user->zt_offset) { 1334 struct zt_context __user *zt_ctx = 1335 apply_user_offset(user, user->zt_offset); 1336 err |= preserve_zt_context(zt_ctx); 1337 } 1338 1339 if (err == 0 && user->extra_offset) { 1340 char __user *sfp = (char __user *)user->sigframe; 1341 char __user *userp = 1342 apply_user_offset(user, user->extra_offset); 1343 1344 struct extra_context __user *extra; 1345 struct _aarch64_ctx __user *end; 1346 u64 extra_datap; 1347 u32 extra_size; 1348 1349 extra = (struct extra_context __user *)userp; 1350 userp += EXTRA_CONTEXT_SIZE; 1351 1352 end = (struct _aarch64_ctx __user *)userp; 1353 userp += TERMINATOR_SIZE; 1354 1355 /* 1356 * extra_datap is just written to the signal frame. 1357 * The value gets cast back to a void __user * 1358 * during sigreturn. 1359 */ 1360 extra_datap = (__force u64)userp; 1361 extra_size = sfp + round_up(user->size, 16) - userp; 1362 1363 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); 1364 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); 1365 __put_user_error(extra_datap, &extra->datap, err); 1366 __put_user_error(extra_size, &extra->size, err); 1367 1368 /* Add the terminator */ 1369 __put_user_error(0, &end->magic, err); 1370 __put_user_error(0, &end->size, err); 1371 } 1372 1373 /* set the "end" magic */ 1374 if (err == 0) { 1375 struct _aarch64_ctx __user *end = 1376 apply_user_offset(user, user->end_offset); 1377 1378 __put_user_error(0, &end->magic, err); 1379 __put_user_error(0, &end->size, err); 1380 } 1381 1382 return err; 1383 } 1384 1385 static int get_sigframe(struct rt_sigframe_user_layout *user, 1386 struct ksignal *ksig, struct pt_regs *regs) 1387 { 1388 unsigned long sp, sp_top; 1389 int err; 1390 1391 init_user_layout(user); 1392 err = setup_sigframe_layout(user, false); 1393 if (err) 1394 return err; 1395 1396 sp = sp_top = sigsp(regs->sp, ksig); 1397 1398 sp = round_down(sp - sizeof(struct frame_record), 16); 1399 user->next_frame = (struct frame_record __user *)sp; 1400 1401 sp = round_down(sp, 16) - sigframe_size(user); 1402 user->sigframe = (struct rt_sigframe __user *)sp; 1403 1404 /* 1405 * Check that we can actually write to the signal frame. 1406 */ 1407 if (!access_ok(user->sigframe, sp_top - sp)) 1408 return -EFAULT; 1409 1410 return 0; 1411 } 1412 1413 #ifdef CONFIG_ARM64_GCS 1414 1415 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig) 1416 { 1417 u64 gcspr_el0; 1418 int ret = 0; 1419 1420 if (!system_supports_gcs()) 1421 return 0; 1422 1423 if (!task_gcs_el0_enabled(current)) 1424 return 0; 1425 1426 /* 1427 * We are entering a signal handler, current register state is 1428 * active. 1429 */ 1430 gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0); 1431 1432 /* 1433 * Push a cap and the GCS entry for the trampoline onto the GCS. 1434 */ 1435 put_user_gcs((unsigned long)sigtramp, 1436 (unsigned long __user *)(gcspr_el0 - 16), &ret); 1437 put_user_gcs(GCS_SIGNAL_CAP(gcspr_el0 - 8), 1438 (unsigned long __user *)(gcspr_el0 - 8), &ret); 1439 if (ret != 0) 1440 return ret; 1441 1442 gcspr_el0 -= 16; 1443 write_sysreg_s(gcspr_el0, SYS_GCSPR_EL0); 1444 1445 return 0; 1446 } 1447 #else 1448 1449 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig) 1450 { 1451 return 0; 1452 } 1453 1454 #endif 1455 1456 static int setup_return(struct pt_regs *regs, struct ksignal *ksig, 1457 struct rt_sigframe_user_layout *user, int usig) 1458 { 1459 __sigrestore_t sigtramp; 1460 int err; 1461 1462 if (ksig->ka.sa.sa_flags & SA_RESTORER) 1463 sigtramp = ksig->ka.sa.sa_restorer; 1464 else 1465 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); 1466 1467 err = gcs_signal_entry(sigtramp, ksig); 1468 if (err) 1469 return err; 1470 1471 /* 1472 * We must not fail from this point onwards. We are going to update 1473 * registers, including SP, in order to invoke the signal handler. If 1474 * we failed and attempted to deliver a nested SIGSEGV to a handler 1475 * after that point, the subsequent sigreturn would end up restoring 1476 * the (partial) state for the original signal handler. 1477 */ 1478 1479 regs->regs[0] = usig; 1480 if (ksig->ka.sa.sa_flags & SA_SIGINFO) { 1481 regs->regs[1] = (unsigned long)&user->sigframe->info; 1482 regs->regs[2] = (unsigned long)&user->sigframe->uc; 1483 } 1484 regs->sp = (unsigned long)user->sigframe; 1485 regs->regs[29] = (unsigned long)&user->next_frame->fp; 1486 regs->regs[30] = (unsigned long)sigtramp; 1487 regs->pc = (unsigned long)ksig->ka.sa.sa_handler; 1488 1489 /* 1490 * Signal delivery is a (wacky) indirect function call in 1491 * userspace, so simulate the same setting of BTYPE as a BLR 1492 * <register containing the signal handler entry point>. 1493 * Signal delivery to a location in a PROT_BTI guarded page 1494 * that is not a function entry point will now trigger a 1495 * SIGILL in userspace. 1496 * 1497 * If the signal handler entry point is not in a PROT_BTI 1498 * guarded page, this is harmless. 1499 */ 1500 if (system_supports_bti()) { 1501 regs->pstate &= ~PSR_BTYPE_MASK; 1502 regs->pstate |= PSR_BTYPE_C; 1503 } 1504 1505 /* TCO (Tag Check Override) always cleared for signal handlers */ 1506 regs->pstate &= ~PSR_TCO_BIT; 1507 1508 /* Signal handlers are invoked with ZA and streaming mode disabled */ 1509 if (system_supports_sme()) { 1510 /* 1511 * If we were in streaming mode the saved register 1512 * state was SVE but we will exit SM and use the 1513 * FPSIMD register state - flush the saved FPSIMD 1514 * register state in case it gets loaded. 1515 */ 1516 if (current->thread.svcr & SVCR_SM_MASK) { 1517 memset(¤t->thread.uw.fpsimd_state, 0, 1518 sizeof(current->thread.uw.fpsimd_state)); 1519 current->thread.fp_type = FP_STATE_FPSIMD; 1520 } 1521 1522 current->thread.svcr &= ~(SVCR_ZA_MASK | 1523 SVCR_SM_MASK); 1524 sme_smstop(); 1525 } 1526 1527 return 0; 1528 } 1529 1530 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, 1531 struct pt_regs *regs) 1532 { 1533 struct rt_sigframe_user_layout user; 1534 struct rt_sigframe __user *frame; 1535 struct user_access_state ua_state; 1536 int err = 0; 1537 1538 fpsimd_signal_preserve_current_state(); 1539 1540 if (get_sigframe(&user, ksig, regs)) 1541 return 1; 1542 1543 save_reset_user_access_state(&ua_state); 1544 frame = user.sigframe; 1545 1546 __put_user_error(0, &frame->uc.uc_flags, err); 1547 __put_user_error(NULL, &frame->uc.uc_link, err); 1548 1549 err |= __save_altstack(&frame->uc.uc_stack, regs->sp); 1550 err |= setup_sigframe(&user, regs, set, &ua_state); 1551 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 1552 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 1553 1554 if (err == 0) 1555 err = setup_return(regs, ksig, &user, usig); 1556 1557 /* 1558 * We must not fail if setup_return() succeeded - see comment at the 1559 * beginning of setup_return(). 1560 */ 1561 1562 if (err == 0) 1563 set_handler_user_access_state(); 1564 else 1565 restore_user_access_state(&ua_state); 1566 1567 return err; 1568 } 1569 1570 static void setup_restart_syscall(struct pt_regs *regs) 1571 { 1572 if (is_compat_task()) 1573 compat_setup_restart_syscall(regs); 1574 else 1575 regs->regs[8] = __NR_restart_syscall; 1576 } 1577 1578 /* 1579 * OK, we're invoking a handler 1580 */ 1581 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 1582 { 1583 sigset_t *oldset = sigmask_to_save(); 1584 int usig = ksig->sig; 1585 int ret; 1586 1587 rseq_signal_deliver(ksig, regs); 1588 1589 /* 1590 * Set up the stack frame 1591 */ 1592 if (is_compat_task()) { 1593 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 1594 ret = compat_setup_rt_frame(usig, ksig, oldset, regs); 1595 else 1596 ret = compat_setup_frame(usig, ksig, oldset, regs); 1597 } else { 1598 ret = setup_rt_frame(usig, ksig, oldset, regs); 1599 } 1600 1601 /* 1602 * Check that the resulting registers are actually sane. 1603 */ 1604 ret |= !valid_user_regs(®s->user_regs, current); 1605 1606 /* Step into the signal handler if we are stepping */ 1607 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); 1608 } 1609 1610 /* 1611 * Note that 'init' is a special process: it doesn't get signals it doesn't 1612 * want to handle. Thus you cannot kill init even with a SIGKILL even by 1613 * mistake. 1614 * 1615 * Note that we go through the signals twice: once to check the signals that 1616 * the kernel can handle, and then we build all the user-level signal handling 1617 * stack-frames in one go after that. 1618 */ 1619 void do_signal(struct pt_regs *regs) 1620 { 1621 unsigned long continue_addr = 0, restart_addr = 0; 1622 int retval = 0; 1623 struct ksignal ksig; 1624 bool syscall = in_syscall(regs); 1625 1626 /* 1627 * If we were from a system call, check for system call restarting... 1628 */ 1629 if (syscall) { 1630 continue_addr = regs->pc; 1631 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); 1632 retval = regs->regs[0]; 1633 1634 /* 1635 * Avoid additional syscall restarting via ret_to_user. 1636 */ 1637 forget_syscall(regs); 1638 1639 /* 1640 * Prepare for system call restart. We do this here so that a 1641 * debugger will see the already changed PC. 1642 */ 1643 switch (retval) { 1644 case -ERESTARTNOHAND: 1645 case -ERESTARTSYS: 1646 case -ERESTARTNOINTR: 1647 case -ERESTART_RESTARTBLOCK: 1648 regs->regs[0] = regs->orig_x0; 1649 regs->pc = restart_addr; 1650 break; 1651 } 1652 } 1653 1654 /* 1655 * Get the signal to deliver. When running under ptrace, at this point 1656 * the debugger may change all of our registers. 1657 */ 1658 if (get_signal(&ksig)) { 1659 /* 1660 * Depending on the signal settings, we may need to revert the 1661 * decision to restart the system call, but skip this if a 1662 * debugger has chosen to restart at a different PC. 1663 */ 1664 if (regs->pc == restart_addr && 1665 (retval == -ERESTARTNOHAND || 1666 retval == -ERESTART_RESTARTBLOCK || 1667 (retval == -ERESTARTSYS && 1668 !(ksig.ka.sa.sa_flags & SA_RESTART)))) { 1669 syscall_set_return_value(current, regs, -EINTR, 0); 1670 regs->pc = continue_addr; 1671 } 1672 1673 handle_signal(&ksig, regs); 1674 return; 1675 } 1676 1677 /* 1678 * Handle restarting a different system call. As above, if a debugger 1679 * has chosen to restart at a different PC, ignore the restart. 1680 */ 1681 if (syscall && regs->pc == restart_addr) { 1682 if (retval == -ERESTART_RESTARTBLOCK) 1683 setup_restart_syscall(regs); 1684 user_rewind_single_step(current); 1685 } 1686 1687 restore_saved_sigmask(); 1688 } 1689 1690 unsigned long __ro_after_init signal_minsigstksz; 1691 1692 /* 1693 * Determine the stack space required for guaranteed signal devliery. 1694 * This function is used to populate AT_MINSIGSTKSZ at process startup. 1695 * cpufeatures setup is assumed to be complete. 1696 */ 1697 void __init minsigstksz_setup(void) 1698 { 1699 struct rt_sigframe_user_layout user; 1700 1701 init_user_layout(&user); 1702 1703 /* 1704 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't 1705 * be big enough, but it's our best guess: 1706 */ 1707 if (WARN_ON(setup_sigframe_layout(&user, true))) 1708 return; 1709 1710 signal_minsigstksz = sigframe_size(&user) + 1711 round_up(sizeof(struct frame_record), 16) + 1712 16; /* max alignment padding */ 1713 } 1714 1715 /* 1716 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as 1717 * changes likely come with new fields that should be added below. 1718 */ 1719 static_assert(NSIGILL == 11); 1720 static_assert(NSIGFPE == 15); 1721 static_assert(NSIGSEGV == 10); 1722 static_assert(NSIGBUS == 5); 1723 static_assert(NSIGTRAP == 6); 1724 static_assert(NSIGCHLD == 6); 1725 static_assert(NSIGSYS == 2); 1726 static_assert(sizeof(siginfo_t) == 128); 1727 static_assert(__alignof__(siginfo_t) == 8); 1728 static_assert(offsetof(siginfo_t, si_signo) == 0x00); 1729 static_assert(offsetof(siginfo_t, si_errno) == 0x04); 1730 static_assert(offsetof(siginfo_t, si_code) == 0x08); 1731 static_assert(offsetof(siginfo_t, si_pid) == 0x10); 1732 static_assert(offsetof(siginfo_t, si_uid) == 0x14); 1733 static_assert(offsetof(siginfo_t, si_tid) == 0x10); 1734 static_assert(offsetof(siginfo_t, si_overrun) == 0x14); 1735 static_assert(offsetof(siginfo_t, si_status) == 0x18); 1736 static_assert(offsetof(siginfo_t, si_utime) == 0x20); 1737 static_assert(offsetof(siginfo_t, si_stime) == 0x28); 1738 static_assert(offsetof(siginfo_t, si_value) == 0x18); 1739 static_assert(offsetof(siginfo_t, si_int) == 0x18); 1740 static_assert(offsetof(siginfo_t, si_ptr) == 0x18); 1741 static_assert(offsetof(siginfo_t, si_addr) == 0x10); 1742 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18); 1743 static_assert(offsetof(siginfo_t, si_lower) == 0x20); 1744 static_assert(offsetof(siginfo_t, si_upper) == 0x28); 1745 static_assert(offsetof(siginfo_t, si_pkey) == 0x20); 1746 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); 1747 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); 1748 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); 1749 static_assert(offsetof(siginfo_t, si_band) == 0x10); 1750 static_assert(offsetof(siginfo_t, si_fd) == 0x18); 1751 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10); 1752 static_assert(offsetof(siginfo_t, si_syscall) == 0x18); 1753 static_assert(offsetof(siginfo_t, si_arch) == 0x1c); 1754