1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/signal.c 4 * 5 * Copyright (C) 1995-2009 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/cache.h> 10 #include <linux/compat.h> 11 #include <linux/errno.h> 12 #include <linux/kernel.h> 13 #include <linux/signal.h> 14 #include <linux/freezer.h> 15 #include <linux/stddef.h> 16 #include <linux/uaccess.h> 17 #include <linux/sizes.h> 18 #include <linux/string.h> 19 #include <linux/ratelimit.h> 20 #include <linux/rseq.h> 21 #include <linux/syscalls.h> 22 #include <linux/pkeys.h> 23 24 #include <asm/daifflags.h> 25 #include <asm/debug-monitors.h> 26 #include <asm/elf.h> 27 #include <asm/exception.h> 28 #include <asm/cacheflush.h> 29 #include <asm/gcs.h> 30 #include <asm/ucontext.h> 31 #include <asm/unistd.h> 32 #include <asm/fpsimd.h> 33 #include <asm/ptrace.h> 34 #include <asm/syscall.h> 35 #include <asm/signal32.h> 36 #include <asm/traps.h> 37 #include <asm/vdso.h> 38 39 #ifdef CONFIG_ARM64_GCS 40 #define GCS_SIGNAL_CAP(addr) (((unsigned long)addr) & GCS_CAP_ADDR_MASK) 41 42 static bool gcs_signal_cap_valid(u64 addr, u64 val) 43 { 44 return val == GCS_SIGNAL_CAP(addr); 45 } 46 #endif 47 48 /* 49 * Do a signal return; undo the signal stack. These are aligned to 128-bit. 50 */ 51 struct rt_sigframe { 52 struct siginfo info; 53 struct ucontext uc; 54 }; 55 56 struct rt_sigframe_user_layout { 57 struct rt_sigframe __user *sigframe; 58 struct frame_record __user *next_frame; 59 60 unsigned long size; /* size of allocated sigframe data */ 61 unsigned long limit; /* largest allowed size */ 62 63 unsigned long fpsimd_offset; 64 unsigned long esr_offset; 65 unsigned long gcs_offset; 66 unsigned long sve_offset; 67 unsigned long tpidr2_offset; 68 unsigned long za_offset; 69 unsigned long zt_offset; 70 unsigned long fpmr_offset; 71 unsigned long poe_offset; 72 unsigned long extra_offset; 73 unsigned long end_offset; 74 }; 75 76 /* 77 * Holds any EL0-controlled state that influences unprivileged memory accesses. 78 * This includes both accesses done in userspace and uaccess done in the kernel. 79 * 80 * This state needs to be carefully managed to ensure that it doesn't cause 81 * uaccess to fail when setting up the signal frame, and the signal handler 82 * itself also expects a well-defined state when entered. 83 */ 84 struct user_access_state { 85 u64 por_el0; 86 }; 87 88 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) 89 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) 90 91 /* 92 * Save the user access state into ua_state and reset it to disable any 93 * restrictions. 94 */ 95 static void save_reset_user_access_state(struct user_access_state *ua_state) 96 { 97 if (system_supports_poe()) { 98 u64 por_enable_all = 0; 99 100 for (int pkey = 0; pkey < arch_max_pkey(); pkey++) 101 por_enable_all |= POE_RXW << (pkey * POR_BITS_PER_PKEY); 102 103 ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0); 104 write_sysreg_s(por_enable_all, SYS_POR_EL0); 105 /* Ensure that any subsequent uaccess observes the updated value */ 106 isb(); 107 } 108 } 109 110 /* 111 * Set the user access state for invoking the signal handler. 112 * 113 * No uaccess should be done after that function is called. 114 */ 115 static void set_handler_user_access_state(void) 116 { 117 if (system_supports_poe()) 118 write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0); 119 } 120 121 /* 122 * Restore the user access state to the values saved in ua_state. 123 * 124 * No uaccess should be done after that function is called. 125 */ 126 static void restore_user_access_state(const struct user_access_state *ua_state) 127 { 128 if (system_supports_poe()) 129 write_sysreg_s(ua_state->por_el0, SYS_POR_EL0); 130 } 131 132 static void init_user_layout(struct rt_sigframe_user_layout *user) 133 { 134 const size_t reserved_size = 135 sizeof(user->sigframe->uc.uc_mcontext.__reserved); 136 137 memset(user, 0, sizeof(*user)); 138 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved); 139 140 user->limit = user->size + reserved_size; 141 142 user->limit -= TERMINATOR_SIZE; 143 user->limit -= EXTRA_CONTEXT_SIZE; 144 /* Reserve space for extension and terminator ^ */ 145 } 146 147 static size_t sigframe_size(struct rt_sigframe_user_layout const *user) 148 { 149 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); 150 } 151 152 /* 153 * Sanity limit on the approximate maximum size of signal frame we'll 154 * try to generate. Stack alignment padding and the frame record are 155 * not taken into account. This limit is not a guarantee and is 156 * NOT ABI. 157 */ 158 #define SIGFRAME_MAXSZ SZ_256K 159 160 static int __sigframe_alloc(struct rt_sigframe_user_layout *user, 161 unsigned long *offset, size_t size, bool extend) 162 { 163 size_t padded_size = round_up(size, 16); 164 165 if (padded_size > user->limit - user->size && 166 !user->extra_offset && 167 extend) { 168 int ret; 169 170 user->limit += EXTRA_CONTEXT_SIZE; 171 ret = __sigframe_alloc(user, &user->extra_offset, 172 sizeof(struct extra_context), false); 173 if (ret) { 174 user->limit -= EXTRA_CONTEXT_SIZE; 175 return ret; 176 } 177 178 /* Reserve space for the __reserved[] terminator */ 179 user->size += TERMINATOR_SIZE; 180 181 /* 182 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for 183 * the terminator: 184 */ 185 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE; 186 } 187 188 /* Still not enough space? Bad luck! */ 189 if (padded_size > user->limit - user->size) 190 return -ENOMEM; 191 192 *offset = user->size; 193 user->size += padded_size; 194 195 return 0; 196 } 197 198 /* 199 * Allocate space for an optional record of <size> bytes in the user 200 * signal frame. The offset from the signal frame base address to the 201 * allocated block is assigned to *offset. 202 */ 203 static int sigframe_alloc(struct rt_sigframe_user_layout *user, 204 unsigned long *offset, size_t size) 205 { 206 return __sigframe_alloc(user, offset, size, true); 207 } 208 209 /* Allocate the null terminator record and prevent further allocations */ 210 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) 211 { 212 int ret; 213 214 /* Un-reserve the space reserved for the terminator: */ 215 user->limit += TERMINATOR_SIZE; 216 217 ret = sigframe_alloc(user, &user->end_offset, 218 sizeof(struct _aarch64_ctx)); 219 if (ret) 220 return ret; 221 222 /* Prevent further allocation: */ 223 user->limit = user->size; 224 return 0; 225 } 226 227 static void __user *apply_user_offset( 228 struct rt_sigframe_user_layout const *user, unsigned long offset) 229 { 230 char __user *base = (char __user *)user->sigframe; 231 232 return base + offset; 233 } 234 235 struct user_ctxs { 236 struct fpsimd_context __user *fpsimd; 237 u32 fpsimd_size; 238 struct sve_context __user *sve; 239 u32 sve_size; 240 struct tpidr2_context __user *tpidr2; 241 u32 tpidr2_size; 242 struct za_context __user *za; 243 u32 za_size; 244 struct zt_context __user *zt; 245 u32 zt_size; 246 struct fpmr_context __user *fpmr; 247 u32 fpmr_size; 248 struct poe_context __user *poe; 249 u32 poe_size; 250 struct gcs_context __user *gcs; 251 u32 gcs_size; 252 }; 253 254 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) 255 { 256 struct user_fpsimd_state const *fpsimd = 257 ¤t->thread.uw.fpsimd_state; 258 int err; 259 260 /* copy the FP and status/control registers */ 261 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); 262 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err); 263 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err); 264 265 /* copy the magic/size information */ 266 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); 267 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); 268 269 return err ? -EFAULT : 0; 270 } 271 272 static int restore_fpsimd_context(struct user_ctxs *user) 273 { 274 struct user_fpsimd_state fpsimd; 275 int err = 0; 276 277 /* check the size information */ 278 if (user->fpsimd_size != sizeof(struct fpsimd_context)) 279 return -EINVAL; 280 281 /* copy the FP and status/control registers */ 282 err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs), 283 sizeof(fpsimd.vregs)); 284 __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err); 285 __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err); 286 287 clear_thread_flag(TIF_SVE); 288 current->thread.fp_type = FP_STATE_FPSIMD; 289 290 /* load the hardware registers from the fpsimd_state structure */ 291 if (!err) 292 fpsimd_update_current_state(&fpsimd); 293 294 return err ? -EFAULT : 0; 295 } 296 297 static int preserve_fpmr_context(struct fpmr_context __user *ctx) 298 { 299 int err = 0; 300 301 current->thread.uw.fpmr = read_sysreg_s(SYS_FPMR); 302 303 __put_user_error(FPMR_MAGIC, &ctx->head.magic, err); 304 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 305 __put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err); 306 307 return err; 308 } 309 310 static int restore_fpmr_context(struct user_ctxs *user) 311 { 312 u64 fpmr; 313 int err = 0; 314 315 if (user->fpmr_size != sizeof(*user->fpmr)) 316 return -EINVAL; 317 318 __get_user_error(fpmr, &user->fpmr->fpmr, err); 319 if (!err) 320 write_sysreg_s(fpmr, SYS_FPMR); 321 322 return err; 323 } 324 325 static int preserve_poe_context(struct poe_context __user *ctx, 326 const struct user_access_state *ua_state) 327 { 328 int err = 0; 329 330 __put_user_error(POE_MAGIC, &ctx->head.magic, err); 331 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 332 __put_user_error(ua_state->por_el0, &ctx->por_el0, err); 333 334 return err; 335 } 336 337 static int restore_poe_context(struct user_ctxs *user, 338 struct user_access_state *ua_state) 339 { 340 u64 por_el0; 341 int err = 0; 342 343 if (user->poe_size != sizeof(*user->poe)) 344 return -EINVAL; 345 346 __get_user_error(por_el0, &(user->poe->por_el0), err); 347 if (!err) 348 ua_state->por_el0 = por_el0; 349 350 return err; 351 } 352 353 #ifdef CONFIG_ARM64_SVE 354 355 static int preserve_sve_context(struct sve_context __user *ctx) 356 { 357 int err = 0; 358 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 359 u16 flags = 0; 360 unsigned int vl = task_get_sve_vl(current); 361 unsigned int vq = 0; 362 363 if (thread_sm_enabled(¤t->thread)) { 364 vl = task_get_sme_vl(current); 365 vq = sve_vq_from_vl(vl); 366 flags |= SVE_SIG_FLAG_SM; 367 } else if (current->thread.fp_type == FP_STATE_SVE) { 368 vq = sve_vq_from_vl(vl); 369 } 370 371 memset(reserved, 0, sizeof(reserved)); 372 373 __put_user_error(SVE_MAGIC, &ctx->head.magic, err); 374 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), 375 &ctx->head.size, err); 376 __put_user_error(vl, &ctx->vl, err); 377 __put_user_error(flags, &ctx->flags, err); 378 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 379 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 380 381 if (vq) { 382 /* 383 * This assumes that the SVE state has already been saved to 384 * the task struct by calling the function 385 * fpsimd_signal_preserve_current_state(). 386 */ 387 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, 388 current->thread.sve_state, 389 SVE_SIG_REGS_SIZE(vq)); 390 } 391 392 return err ? -EFAULT : 0; 393 } 394 395 static int restore_sve_fpsimd_context(struct user_ctxs *user) 396 { 397 int err = 0; 398 unsigned int vl, vq; 399 struct user_fpsimd_state fpsimd; 400 u16 user_vl, flags; 401 402 if (user->sve_size < sizeof(*user->sve)) 403 return -EINVAL; 404 405 __get_user_error(user_vl, &(user->sve->vl), err); 406 __get_user_error(flags, &(user->sve->flags), err); 407 if (err) 408 return err; 409 410 if (flags & SVE_SIG_FLAG_SM) { 411 if (!system_supports_sme()) 412 return -EINVAL; 413 414 vl = task_get_sme_vl(current); 415 } else { 416 /* 417 * A SME only system use SVE for streaming mode so can 418 * have a SVE formatted context with a zero VL and no 419 * payload data. 420 */ 421 if (!system_supports_sve() && !system_supports_sme()) 422 return -EINVAL; 423 424 vl = task_get_sve_vl(current); 425 } 426 427 if (user_vl != vl) 428 return -EINVAL; 429 430 if (user->sve_size == sizeof(*user->sve)) { 431 clear_thread_flag(TIF_SVE); 432 current->thread.svcr &= ~SVCR_SM_MASK; 433 current->thread.fp_type = FP_STATE_FPSIMD; 434 goto fpsimd_only; 435 } 436 437 vq = sve_vq_from_vl(vl); 438 439 if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq)) 440 return -EINVAL; 441 442 /* 443 * Careful: we are about __copy_from_user() directly into 444 * thread.sve_state with preemption enabled, so protection is 445 * needed to prevent a racing context switch from writing stale 446 * registers back over the new data. 447 */ 448 449 fpsimd_flush_task_state(current); 450 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 451 452 sve_alloc(current, true); 453 if (!current->thread.sve_state) { 454 clear_thread_flag(TIF_SVE); 455 return -ENOMEM; 456 } 457 458 err = __copy_from_user(current->thread.sve_state, 459 (char __user const *)user->sve + 460 SVE_SIG_REGS_OFFSET, 461 SVE_SIG_REGS_SIZE(vq)); 462 if (err) 463 return -EFAULT; 464 465 if (flags & SVE_SIG_FLAG_SM) 466 current->thread.svcr |= SVCR_SM_MASK; 467 else 468 set_thread_flag(TIF_SVE); 469 current->thread.fp_type = FP_STATE_SVE; 470 471 fpsimd_only: 472 /* copy the FP and status/control registers */ 473 /* restore_sigframe() already checked that user->fpsimd != NULL. */ 474 err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs, 475 sizeof(fpsimd.vregs)); 476 __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err); 477 __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err); 478 479 /* load the hardware registers from the fpsimd_state structure */ 480 if (!err) 481 fpsimd_update_current_state(&fpsimd); 482 483 return err ? -EFAULT : 0; 484 } 485 486 #else /* ! CONFIG_ARM64_SVE */ 487 488 static int restore_sve_fpsimd_context(struct user_ctxs *user) 489 { 490 WARN_ON_ONCE(1); 491 return -EINVAL; 492 } 493 494 /* Turn any non-optimised out attempts to use this into a link error: */ 495 extern int preserve_sve_context(void __user *ctx); 496 497 #endif /* ! CONFIG_ARM64_SVE */ 498 499 #ifdef CONFIG_ARM64_SME 500 501 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx) 502 { 503 int err = 0; 504 505 current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); 506 507 __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err); 508 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 509 __put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err); 510 511 return err; 512 } 513 514 static int restore_tpidr2_context(struct user_ctxs *user) 515 { 516 u64 tpidr2_el0; 517 int err = 0; 518 519 if (user->tpidr2_size != sizeof(*user->tpidr2)) 520 return -EINVAL; 521 522 __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err); 523 if (!err) 524 write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0); 525 526 return err; 527 } 528 529 static int preserve_za_context(struct za_context __user *ctx) 530 { 531 int err = 0; 532 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 533 unsigned int vl = task_get_sme_vl(current); 534 unsigned int vq; 535 536 if (thread_za_enabled(¤t->thread)) 537 vq = sve_vq_from_vl(vl); 538 else 539 vq = 0; 540 541 memset(reserved, 0, sizeof(reserved)); 542 543 __put_user_error(ZA_MAGIC, &ctx->head.magic, err); 544 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16), 545 &ctx->head.size, err); 546 __put_user_error(vl, &ctx->vl, err); 547 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 548 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 549 550 if (vq) { 551 /* 552 * This assumes that the ZA state has already been saved to 553 * the task struct by calling the function 554 * fpsimd_signal_preserve_current_state(). 555 */ 556 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET, 557 current->thread.sme_state, 558 ZA_SIG_REGS_SIZE(vq)); 559 } 560 561 return err ? -EFAULT : 0; 562 } 563 564 static int restore_za_context(struct user_ctxs *user) 565 { 566 int err = 0; 567 unsigned int vq; 568 u16 user_vl; 569 570 if (user->za_size < sizeof(*user->za)) 571 return -EINVAL; 572 573 __get_user_error(user_vl, &(user->za->vl), err); 574 if (err) 575 return err; 576 577 if (user_vl != task_get_sme_vl(current)) 578 return -EINVAL; 579 580 if (user->za_size == sizeof(*user->za)) { 581 current->thread.svcr &= ~SVCR_ZA_MASK; 582 return 0; 583 } 584 585 vq = sve_vq_from_vl(user_vl); 586 587 if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq)) 588 return -EINVAL; 589 590 /* 591 * Careful: we are about __copy_from_user() directly into 592 * thread.sme_state with preemption enabled, so protection is 593 * needed to prevent a racing context switch from writing stale 594 * registers back over the new data. 595 */ 596 597 fpsimd_flush_task_state(current); 598 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 599 600 sme_alloc(current, true); 601 if (!current->thread.sme_state) { 602 current->thread.svcr &= ~SVCR_ZA_MASK; 603 clear_thread_flag(TIF_SME); 604 return -ENOMEM; 605 } 606 607 err = __copy_from_user(current->thread.sme_state, 608 (char __user const *)user->za + 609 ZA_SIG_REGS_OFFSET, 610 ZA_SIG_REGS_SIZE(vq)); 611 if (err) 612 return -EFAULT; 613 614 set_thread_flag(TIF_SME); 615 current->thread.svcr |= SVCR_ZA_MASK; 616 617 return 0; 618 } 619 620 static int preserve_zt_context(struct zt_context __user *ctx) 621 { 622 int err = 0; 623 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 624 625 if (WARN_ON(!thread_za_enabled(¤t->thread))) 626 return -EINVAL; 627 628 memset(reserved, 0, sizeof(reserved)); 629 630 __put_user_error(ZT_MAGIC, &ctx->head.magic, err); 631 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16), 632 &ctx->head.size, err); 633 __put_user_error(1, &ctx->nregs, err); 634 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 635 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 636 637 /* 638 * This assumes that the ZT state has already been saved to 639 * the task struct by calling the function 640 * fpsimd_signal_preserve_current_state(). 641 */ 642 err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET, 643 thread_zt_state(¤t->thread), 644 ZT_SIG_REGS_SIZE(1)); 645 646 return err ? -EFAULT : 0; 647 } 648 649 static int restore_zt_context(struct user_ctxs *user) 650 { 651 int err; 652 u16 nregs; 653 654 /* ZA must be restored first for this check to be valid */ 655 if (!thread_za_enabled(¤t->thread)) 656 return -EINVAL; 657 658 if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1)) 659 return -EINVAL; 660 661 if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs))) 662 return -EFAULT; 663 664 if (nregs != 1) 665 return -EINVAL; 666 667 /* 668 * Careful: we are about __copy_from_user() directly into 669 * thread.zt_state with preemption enabled, so protection is 670 * needed to prevent a racing context switch from writing stale 671 * registers back over the new data. 672 */ 673 674 fpsimd_flush_task_state(current); 675 /* From now, fpsimd_thread_switch() won't touch ZT in thread state */ 676 677 err = __copy_from_user(thread_zt_state(¤t->thread), 678 (char __user const *)user->zt + 679 ZT_SIG_REGS_OFFSET, 680 ZT_SIG_REGS_SIZE(1)); 681 if (err) 682 return -EFAULT; 683 684 return 0; 685 } 686 687 #else /* ! CONFIG_ARM64_SME */ 688 689 /* Turn any non-optimised out attempts to use these into a link error: */ 690 extern int preserve_tpidr2_context(void __user *ctx); 691 extern int restore_tpidr2_context(struct user_ctxs *user); 692 extern int preserve_za_context(void __user *ctx); 693 extern int restore_za_context(struct user_ctxs *user); 694 extern int preserve_zt_context(void __user *ctx); 695 extern int restore_zt_context(struct user_ctxs *user); 696 697 #endif /* ! CONFIG_ARM64_SME */ 698 699 #ifdef CONFIG_ARM64_GCS 700 701 static int preserve_gcs_context(struct gcs_context __user *ctx) 702 { 703 int err = 0; 704 u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0); 705 706 /* 707 * If GCS is enabled we will add a cap token to the frame, 708 * include it in the GCSPR_EL0 we report to support stack 709 * switching via sigreturn if GCS is enabled. We do not allow 710 * enabling via sigreturn so the token is only relevant for 711 * threads with GCS enabled. 712 */ 713 if (task_gcs_el0_enabled(current)) 714 gcspr -= 8; 715 716 __put_user_error(GCS_MAGIC, &ctx->head.magic, err); 717 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 718 __put_user_error(gcspr, &ctx->gcspr, err); 719 __put_user_error(0, &ctx->reserved, err); 720 __put_user_error(current->thread.gcs_el0_mode, 721 &ctx->features_enabled, err); 722 723 return err; 724 } 725 726 static int restore_gcs_context(struct user_ctxs *user) 727 { 728 u64 gcspr, enabled; 729 int err = 0; 730 731 if (user->gcs_size != sizeof(*user->gcs)) 732 return -EINVAL; 733 734 __get_user_error(gcspr, &user->gcs->gcspr, err); 735 __get_user_error(enabled, &user->gcs->features_enabled, err); 736 if (err) 737 return err; 738 739 /* Don't allow unknown modes */ 740 if (enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK) 741 return -EINVAL; 742 743 err = gcs_check_locked(current, enabled); 744 if (err != 0) 745 return err; 746 747 /* Don't allow enabling */ 748 if (!task_gcs_el0_enabled(current) && 749 (enabled & PR_SHADOW_STACK_ENABLE)) 750 return -EINVAL; 751 752 /* If we are disabling disable everything */ 753 if (!(enabled & PR_SHADOW_STACK_ENABLE)) 754 enabled = 0; 755 756 current->thread.gcs_el0_mode = enabled; 757 758 /* 759 * We let userspace set GCSPR_EL0 to anything here, we will 760 * validate later in gcs_restore_signal(). 761 */ 762 write_sysreg_s(gcspr, SYS_GCSPR_EL0); 763 764 return 0; 765 } 766 767 #else /* ! CONFIG_ARM64_GCS */ 768 769 /* Turn any non-optimised out attempts to use these into a link error: */ 770 extern int preserve_gcs_context(void __user *ctx); 771 extern int restore_gcs_context(struct user_ctxs *user); 772 773 #endif /* ! CONFIG_ARM64_GCS */ 774 775 static int parse_user_sigframe(struct user_ctxs *user, 776 struct rt_sigframe __user *sf) 777 { 778 struct sigcontext __user *const sc = &sf->uc.uc_mcontext; 779 struct _aarch64_ctx __user *head; 780 char __user *base = (char __user *)&sc->__reserved; 781 size_t offset = 0; 782 size_t limit = sizeof(sc->__reserved); 783 bool have_extra_context = false; 784 char const __user *const sfp = (char const __user *)sf; 785 786 user->fpsimd = NULL; 787 user->sve = NULL; 788 user->tpidr2 = NULL; 789 user->za = NULL; 790 user->zt = NULL; 791 user->fpmr = NULL; 792 user->poe = NULL; 793 user->gcs = NULL; 794 795 if (!IS_ALIGNED((unsigned long)base, 16)) 796 goto invalid; 797 798 while (1) { 799 int err = 0; 800 u32 magic, size; 801 char const __user *userp; 802 struct extra_context const __user *extra; 803 u64 extra_datap; 804 u32 extra_size; 805 struct _aarch64_ctx const __user *end; 806 u32 end_magic, end_size; 807 808 if (limit - offset < sizeof(*head)) 809 goto invalid; 810 811 if (!IS_ALIGNED(offset, 16)) 812 goto invalid; 813 814 head = (struct _aarch64_ctx __user *)(base + offset); 815 __get_user_error(magic, &head->magic, err); 816 __get_user_error(size, &head->size, err); 817 if (err) 818 return err; 819 820 if (limit - offset < size) 821 goto invalid; 822 823 switch (magic) { 824 case 0: 825 if (size) 826 goto invalid; 827 828 goto done; 829 830 case FPSIMD_MAGIC: 831 if (!system_supports_fpsimd()) 832 goto invalid; 833 if (user->fpsimd) 834 goto invalid; 835 836 user->fpsimd = (struct fpsimd_context __user *)head; 837 user->fpsimd_size = size; 838 break; 839 840 case ESR_MAGIC: 841 /* ignore */ 842 break; 843 844 case POE_MAGIC: 845 if (!system_supports_poe()) 846 goto invalid; 847 848 if (user->poe) 849 goto invalid; 850 851 user->poe = (struct poe_context __user *)head; 852 user->poe_size = size; 853 break; 854 855 case SVE_MAGIC: 856 if (!system_supports_sve() && !system_supports_sme()) 857 goto invalid; 858 859 if (user->sve) 860 goto invalid; 861 862 user->sve = (struct sve_context __user *)head; 863 user->sve_size = size; 864 break; 865 866 case TPIDR2_MAGIC: 867 if (!system_supports_tpidr2()) 868 goto invalid; 869 870 if (user->tpidr2) 871 goto invalid; 872 873 user->tpidr2 = (struct tpidr2_context __user *)head; 874 user->tpidr2_size = size; 875 break; 876 877 case ZA_MAGIC: 878 if (!system_supports_sme()) 879 goto invalid; 880 881 if (user->za) 882 goto invalid; 883 884 user->za = (struct za_context __user *)head; 885 user->za_size = size; 886 break; 887 888 case ZT_MAGIC: 889 if (!system_supports_sme2()) 890 goto invalid; 891 892 if (user->zt) 893 goto invalid; 894 895 user->zt = (struct zt_context __user *)head; 896 user->zt_size = size; 897 break; 898 899 case FPMR_MAGIC: 900 if (!system_supports_fpmr()) 901 goto invalid; 902 903 if (user->fpmr) 904 goto invalid; 905 906 user->fpmr = (struct fpmr_context __user *)head; 907 user->fpmr_size = size; 908 break; 909 910 case GCS_MAGIC: 911 if (!system_supports_gcs()) 912 goto invalid; 913 914 if (user->gcs) 915 goto invalid; 916 917 user->gcs = (struct gcs_context __user *)head; 918 user->gcs_size = size; 919 break; 920 921 case EXTRA_MAGIC: 922 if (have_extra_context) 923 goto invalid; 924 925 if (size < sizeof(*extra)) 926 goto invalid; 927 928 userp = (char const __user *)head; 929 930 extra = (struct extra_context const __user *)userp; 931 userp += size; 932 933 __get_user_error(extra_datap, &extra->datap, err); 934 __get_user_error(extra_size, &extra->size, err); 935 if (err) 936 return err; 937 938 /* Check for the dummy terminator in __reserved[]: */ 939 940 if (limit - offset - size < TERMINATOR_SIZE) 941 goto invalid; 942 943 end = (struct _aarch64_ctx const __user *)userp; 944 userp += TERMINATOR_SIZE; 945 946 __get_user_error(end_magic, &end->magic, err); 947 __get_user_error(end_size, &end->size, err); 948 if (err) 949 return err; 950 951 if (end_magic || end_size) 952 goto invalid; 953 954 /* Prevent looping/repeated parsing of extra_context */ 955 have_extra_context = true; 956 957 base = (__force void __user *)extra_datap; 958 if (!IS_ALIGNED((unsigned long)base, 16)) 959 goto invalid; 960 961 if (!IS_ALIGNED(extra_size, 16)) 962 goto invalid; 963 964 if (base != userp) 965 goto invalid; 966 967 /* Reject "unreasonably large" frames: */ 968 if (extra_size > sfp + SIGFRAME_MAXSZ - userp) 969 goto invalid; 970 971 /* 972 * Ignore trailing terminator in __reserved[] 973 * and start parsing extra data: 974 */ 975 offset = 0; 976 limit = extra_size; 977 978 if (!access_ok(base, limit)) 979 goto invalid; 980 981 continue; 982 983 default: 984 goto invalid; 985 } 986 987 if (size < sizeof(*head)) 988 goto invalid; 989 990 if (limit - offset < size) 991 goto invalid; 992 993 offset += size; 994 } 995 996 done: 997 return 0; 998 999 invalid: 1000 return -EINVAL; 1001 } 1002 1003 static int restore_sigframe(struct pt_regs *regs, 1004 struct rt_sigframe __user *sf, 1005 struct user_access_state *ua_state) 1006 { 1007 sigset_t set; 1008 int i, err; 1009 struct user_ctxs user; 1010 1011 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 1012 if (err == 0) 1013 set_current_blocked(&set); 1014 1015 for (i = 0; i < 31; i++) 1016 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 1017 err); 1018 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 1019 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 1020 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 1021 1022 /* 1023 * Avoid sys_rt_sigreturn() restarting. 1024 */ 1025 forget_syscall(regs); 1026 1027 err |= !valid_user_regs(®s->user_regs, current); 1028 if (err == 0) 1029 err = parse_user_sigframe(&user, sf); 1030 1031 if (err == 0 && system_supports_fpsimd()) { 1032 if (!user.fpsimd) 1033 return -EINVAL; 1034 1035 if (user.sve) 1036 err = restore_sve_fpsimd_context(&user); 1037 else 1038 err = restore_fpsimd_context(&user); 1039 } 1040 1041 if (err == 0 && system_supports_gcs() && user.gcs) 1042 err = restore_gcs_context(&user); 1043 1044 if (err == 0 && system_supports_tpidr2() && user.tpidr2) 1045 err = restore_tpidr2_context(&user); 1046 1047 if (err == 0 && system_supports_fpmr() && user.fpmr) 1048 err = restore_fpmr_context(&user); 1049 1050 if (err == 0 && system_supports_sme() && user.za) 1051 err = restore_za_context(&user); 1052 1053 if (err == 0 && system_supports_sme2() && user.zt) 1054 err = restore_zt_context(&user); 1055 1056 if (err == 0 && system_supports_poe() && user.poe) 1057 err = restore_poe_context(&user, ua_state); 1058 1059 return err; 1060 } 1061 1062 #ifdef CONFIG_ARM64_GCS 1063 static int gcs_restore_signal(void) 1064 { 1065 unsigned long __user *gcspr_el0; 1066 u64 cap; 1067 int ret; 1068 1069 if (!system_supports_gcs()) 1070 return 0; 1071 1072 if (!(current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE)) 1073 return 0; 1074 1075 gcspr_el0 = (unsigned long __user *)read_sysreg_s(SYS_GCSPR_EL0); 1076 1077 /* 1078 * Ensure that any changes to the GCS done via GCS operations 1079 * are visible to the normal reads we do to validate the 1080 * token. 1081 */ 1082 gcsb_dsync(); 1083 1084 /* 1085 * GCSPR_EL0 should be pointing at a capped GCS, read the cap. 1086 * We don't enforce that this is in a GCS page, if it is not 1087 * then faults will be generated on GCS operations - the main 1088 * concern is to protect GCS pages. 1089 */ 1090 ret = copy_from_user(&cap, gcspr_el0, sizeof(cap)); 1091 if (ret) 1092 return -EFAULT; 1093 1094 /* 1095 * Check that the cap is the actual GCS before replacing it. 1096 */ 1097 if (!gcs_signal_cap_valid((u64)gcspr_el0, cap)) 1098 return -EINVAL; 1099 1100 /* Invalidate the token to prevent reuse */ 1101 put_user_gcs(0, (__user void*)gcspr_el0, &ret); 1102 if (ret != 0) 1103 return -EFAULT; 1104 1105 write_sysreg_s(gcspr_el0 + 1, SYS_GCSPR_EL0); 1106 1107 return 0; 1108 } 1109 1110 #else 1111 static int gcs_restore_signal(void) { return 0; } 1112 #endif 1113 1114 SYSCALL_DEFINE0(rt_sigreturn) 1115 { 1116 struct pt_regs *regs = current_pt_regs(); 1117 struct rt_sigframe __user *frame; 1118 struct user_access_state ua_state; 1119 1120 /* Always make any pending restarted system calls return -EINTR */ 1121 current->restart_block.fn = do_no_restart_syscall; 1122 1123 /* 1124 * Since we stacked the signal on a 128-bit boundary, then 'sp' should 1125 * be word aligned here. 1126 */ 1127 if (regs->sp & 15) 1128 goto badframe; 1129 1130 frame = (struct rt_sigframe __user *)regs->sp; 1131 1132 if (!access_ok(frame, sizeof (*frame))) 1133 goto badframe; 1134 1135 if (restore_sigframe(regs, frame, &ua_state)) 1136 goto badframe; 1137 1138 if (gcs_restore_signal()) 1139 goto badframe; 1140 1141 if (restore_altstack(&frame->uc.uc_stack)) 1142 goto badframe; 1143 1144 restore_user_access_state(&ua_state); 1145 1146 return regs->regs[0]; 1147 1148 badframe: 1149 arm64_notify_segfault(regs->sp); 1150 return 0; 1151 } 1152 1153 /* 1154 * Determine the layout of optional records in the signal frame 1155 * 1156 * add_all: if true, lays out the biggest possible signal frame for 1157 * this task; otherwise, generates a layout for the current state 1158 * of the task. 1159 */ 1160 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, 1161 bool add_all) 1162 { 1163 int err; 1164 1165 if (system_supports_fpsimd()) { 1166 err = sigframe_alloc(user, &user->fpsimd_offset, 1167 sizeof(struct fpsimd_context)); 1168 if (err) 1169 return err; 1170 } 1171 1172 /* fault information, if valid */ 1173 if (add_all || current->thread.fault_code) { 1174 err = sigframe_alloc(user, &user->esr_offset, 1175 sizeof(struct esr_context)); 1176 if (err) 1177 return err; 1178 } 1179 1180 #ifdef CONFIG_ARM64_GCS 1181 if (system_supports_gcs() && (add_all || current->thread.gcspr_el0)) { 1182 err = sigframe_alloc(user, &user->gcs_offset, 1183 sizeof(struct gcs_context)); 1184 if (err) 1185 return err; 1186 } 1187 #endif 1188 1189 if (system_supports_sve() || system_supports_sme()) { 1190 unsigned int vq = 0; 1191 1192 if (add_all || current->thread.fp_type == FP_STATE_SVE || 1193 thread_sm_enabled(¤t->thread)) { 1194 int vl = max(sve_max_vl(), sme_max_vl()); 1195 1196 if (!add_all) 1197 vl = thread_get_cur_vl(¤t->thread); 1198 1199 vq = sve_vq_from_vl(vl); 1200 } 1201 1202 err = sigframe_alloc(user, &user->sve_offset, 1203 SVE_SIG_CONTEXT_SIZE(vq)); 1204 if (err) 1205 return err; 1206 } 1207 1208 if (system_supports_tpidr2()) { 1209 err = sigframe_alloc(user, &user->tpidr2_offset, 1210 sizeof(struct tpidr2_context)); 1211 if (err) 1212 return err; 1213 } 1214 1215 if (system_supports_sme()) { 1216 unsigned int vl; 1217 unsigned int vq = 0; 1218 1219 if (add_all) 1220 vl = sme_max_vl(); 1221 else 1222 vl = task_get_sme_vl(current); 1223 1224 if (thread_za_enabled(¤t->thread)) 1225 vq = sve_vq_from_vl(vl); 1226 1227 err = sigframe_alloc(user, &user->za_offset, 1228 ZA_SIG_CONTEXT_SIZE(vq)); 1229 if (err) 1230 return err; 1231 } 1232 1233 if (system_supports_sme2()) { 1234 if (add_all || thread_za_enabled(¤t->thread)) { 1235 err = sigframe_alloc(user, &user->zt_offset, 1236 ZT_SIG_CONTEXT_SIZE(1)); 1237 if (err) 1238 return err; 1239 } 1240 } 1241 1242 if (system_supports_fpmr()) { 1243 err = sigframe_alloc(user, &user->fpmr_offset, 1244 sizeof(struct fpmr_context)); 1245 if (err) 1246 return err; 1247 } 1248 1249 if (system_supports_poe()) { 1250 err = sigframe_alloc(user, &user->poe_offset, 1251 sizeof(struct poe_context)); 1252 if (err) 1253 return err; 1254 } 1255 1256 return sigframe_alloc_end(user); 1257 } 1258 1259 static int setup_sigframe(struct rt_sigframe_user_layout *user, 1260 struct pt_regs *regs, sigset_t *set, 1261 const struct user_access_state *ua_state) 1262 { 1263 int i, err = 0; 1264 struct rt_sigframe __user *sf = user->sigframe; 1265 1266 /* set up the stack frame for unwinding */ 1267 __put_user_error(regs->regs[29], &user->next_frame->fp, err); 1268 __put_user_error(regs->regs[30], &user->next_frame->lr, err); 1269 1270 for (i = 0; i < 31; i++) 1271 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 1272 err); 1273 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 1274 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 1275 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 1276 1277 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); 1278 1279 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 1280 1281 if (err == 0 && system_supports_fpsimd()) { 1282 struct fpsimd_context __user *fpsimd_ctx = 1283 apply_user_offset(user, user->fpsimd_offset); 1284 err |= preserve_fpsimd_context(fpsimd_ctx); 1285 } 1286 1287 /* fault information, if valid */ 1288 if (err == 0 && user->esr_offset) { 1289 struct esr_context __user *esr_ctx = 1290 apply_user_offset(user, user->esr_offset); 1291 1292 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); 1293 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); 1294 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); 1295 } 1296 1297 if (system_supports_gcs() && err == 0 && user->gcs_offset) { 1298 struct gcs_context __user *gcs_ctx = 1299 apply_user_offset(user, user->gcs_offset); 1300 err |= preserve_gcs_context(gcs_ctx); 1301 } 1302 1303 /* Scalable Vector Extension state (including streaming), if present */ 1304 if ((system_supports_sve() || system_supports_sme()) && 1305 err == 0 && user->sve_offset) { 1306 struct sve_context __user *sve_ctx = 1307 apply_user_offset(user, user->sve_offset); 1308 err |= preserve_sve_context(sve_ctx); 1309 } 1310 1311 /* TPIDR2 if supported */ 1312 if (system_supports_tpidr2() && err == 0) { 1313 struct tpidr2_context __user *tpidr2_ctx = 1314 apply_user_offset(user, user->tpidr2_offset); 1315 err |= preserve_tpidr2_context(tpidr2_ctx); 1316 } 1317 1318 /* FPMR if supported */ 1319 if (system_supports_fpmr() && err == 0) { 1320 struct fpmr_context __user *fpmr_ctx = 1321 apply_user_offset(user, user->fpmr_offset); 1322 err |= preserve_fpmr_context(fpmr_ctx); 1323 } 1324 1325 if (system_supports_poe() && err == 0) { 1326 struct poe_context __user *poe_ctx = 1327 apply_user_offset(user, user->poe_offset); 1328 1329 err |= preserve_poe_context(poe_ctx, ua_state); 1330 } 1331 1332 /* ZA state if present */ 1333 if (system_supports_sme() && err == 0 && user->za_offset) { 1334 struct za_context __user *za_ctx = 1335 apply_user_offset(user, user->za_offset); 1336 err |= preserve_za_context(za_ctx); 1337 } 1338 1339 /* ZT state if present */ 1340 if (system_supports_sme2() && err == 0 && user->zt_offset) { 1341 struct zt_context __user *zt_ctx = 1342 apply_user_offset(user, user->zt_offset); 1343 err |= preserve_zt_context(zt_ctx); 1344 } 1345 1346 if (err == 0 && user->extra_offset) { 1347 char __user *sfp = (char __user *)user->sigframe; 1348 char __user *userp = 1349 apply_user_offset(user, user->extra_offset); 1350 1351 struct extra_context __user *extra; 1352 struct _aarch64_ctx __user *end; 1353 u64 extra_datap; 1354 u32 extra_size; 1355 1356 extra = (struct extra_context __user *)userp; 1357 userp += EXTRA_CONTEXT_SIZE; 1358 1359 end = (struct _aarch64_ctx __user *)userp; 1360 userp += TERMINATOR_SIZE; 1361 1362 /* 1363 * extra_datap is just written to the signal frame. 1364 * The value gets cast back to a void __user * 1365 * during sigreturn. 1366 */ 1367 extra_datap = (__force u64)userp; 1368 extra_size = sfp + round_up(user->size, 16) - userp; 1369 1370 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); 1371 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); 1372 __put_user_error(extra_datap, &extra->datap, err); 1373 __put_user_error(extra_size, &extra->size, err); 1374 1375 /* Add the terminator */ 1376 __put_user_error(0, &end->magic, err); 1377 __put_user_error(0, &end->size, err); 1378 } 1379 1380 /* set the "end" magic */ 1381 if (err == 0) { 1382 struct _aarch64_ctx __user *end = 1383 apply_user_offset(user, user->end_offset); 1384 1385 __put_user_error(0, &end->magic, err); 1386 __put_user_error(0, &end->size, err); 1387 } 1388 1389 return err; 1390 } 1391 1392 static int get_sigframe(struct rt_sigframe_user_layout *user, 1393 struct ksignal *ksig, struct pt_regs *regs) 1394 { 1395 unsigned long sp, sp_top; 1396 int err; 1397 1398 init_user_layout(user); 1399 err = setup_sigframe_layout(user, false); 1400 if (err) 1401 return err; 1402 1403 sp = sp_top = sigsp(regs->sp, ksig); 1404 1405 sp = round_down(sp - sizeof(struct frame_record), 16); 1406 user->next_frame = (struct frame_record __user *)sp; 1407 1408 sp = round_down(sp, 16) - sigframe_size(user); 1409 user->sigframe = (struct rt_sigframe __user *)sp; 1410 1411 /* 1412 * Check that we can actually write to the signal frame. 1413 */ 1414 if (!access_ok(user->sigframe, sp_top - sp)) 1415 return -EFAULT; 1416 1417 return 0; 1418 } 1419 1420 #ifdef CONFIG_ARM64_GCS 1421 1422 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig) 1423 { 1424 unsigned long __user *gcspr_el0; 1425 int ret = 0; 1426 1427 if (!system_supports_gcs()) 1428 return 0; 1429 1430 if (!task_gcs_el0_enabled(current)) 1431 return 0; 1432 1433 /* 1434 * We are entering a signal handler, current register state is 1435 * active. 1436 */ 1437 gcspr_el0 = (unsigned long __user *)read_sysreg_s(SYS_GCSPR_EL0); 1438 1439 /* 1440 * Push a cap and the GCS entry for the trampoline onto the GCS. 1441 */ 1442 put_user_gcs((unsigned long)sigtramp, gcspr_el0 - 2, &ret); 1443 put_user_gcs(GCS_SIGNAL_CAP(gcspr_el0 - 1), gcspr_el0 - 1, &ret); 1444 if (ret != 0) 1445 return ret; 1446 1447 gcspr_el0 -= 2; 1448 write_sysreg_s((unsigned long)gcspr_el0, SYS_GCSPR_EL0); 1449 1450 return 0; 1451 } 1452 #else 1453 1454 static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig) 1455 { 1456 return 0; 1457 } 1458 1459 #endif 1460 1461 static int setup_return(struct pt_regs *regs, struct ksignal *ksig, 1462 struct rt_sigframe_user_layout *user, int usig) 1463 { 1464 __sigrestore_t sigtramp; 1465 1466 regs->regs[0] = usig; 1467 regs->sp = (unsigned long)user->sigframe; 1468 regs->regs[29] = (unsigned long)&user->next_frame->fp; 1469 regs->pc = (unsigned long)ksig->ka.sa.sa_handler; 1470 1471 /* 1472 * Signal delivery is a (wacky) indirect function call in 1473 * userspace, so simulate the same setting of BTYPE as a BLR 1474 * <register containing the signal handler entry point>. 1475 * Signal delivery to a location in a PROT_BTI guarded page 1476 * that is not a function entry point will now trigger a 1477 * SIGILL in userspace. 1478 * 1479 * If the signal handler entry point is not in a PROT_BTI 1480 * guarded page, this is harmless. 1481 */ 1482 if (system_supports_bti()) { 1483 regs->pstate &= ~PSR_BTYPE_MASK; 1484 regs->pstate |= PSR_BTYPE_C; 1485 } 1486 1487 /* TCO (Tag Check Override) always cleared for signal handlers */ 1488 regs->pstate &= ~PSR_TCO_BIT; 1489 1490 /* Signal handlers are invoked with ZA and streaming mode disabled */ 1491 if (system_supports_sme()) { 1492 /* 1493 * If we were in streaming mode the saved register 1494 * state was SVE but we will exit SM and use the 1495 * FPSIMD register state - flush the saved FPSIMD 1496 * register state in case it gets loaded. 1497 */ 1498 if (current->thread.svcr & SVCR_SM_MASK) { 1499 memset(¤t->thread.uw.fpsimd_state, 0, 1500 sizeof(current->thread.uw.fpsimd_state)); 1501 current->thread.fp_type = FP_STATE_FPSIMD; 1502 } 1503 1504 current->thread.svcr &= ~(SVCR_ZA_MASK | 1505 SVCR_SM_MASK); 1506 sme_smstop(); 1507 } 1508 1509 if (ksig->ka.sa.sa_flags & SA_RESTORER) 1510 sigtramp = ksig->ka.sa.sa_restorer; 1511 else 1512 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); 1513 1514 regs->regs[30] = (unsigned long)sigtramp; 1515 1516 return gcs_signal_entry(sigtramp, ksig); 1517 } 1518 1519 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, 1520 struct pt_regs *regs) 1521 { 1522 struct rt_sigframe_user_layout user; 1523 struct rt_sigframe __user *frame; 1524 struct user_access_state ua_state; 1525 int err = 0; 1526 1527 fpsimd_signal_preserve_current_state(); 1528 1529 if (get_sigframe(&user, ksig, regs)) 1530 return 1; 1531 1532 save_reset_user_access_state(&ua_state); 1533 frame = user.sigframe; 1534 1535 __put_user_error(0, &frame->uc.uc_flags, err); 1536 __put_user_error(NULL, &frame->uc.uc_link, err); 1537 1538 err |= __save_altstack(&frame->uc.uc_stack, regs->sp); 1539 err |= setup_sigframe(&user, regs, set, &ua_state); 1540 if (err == 0) { 1541 err = setup_return(regs, ksig, &user, usig); 1542 if (ksig->ka.sa.sa_flags & SA_SIGINFO) { 1543 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 1544 regs->regs[1] = (unsigned long)&frame->info; 1545 regs->regs[2] = (unsigned long)&frame->uc; 1546 } 1547 } 1548 1549 if (err == 0) 1550 set_handler_user_access_state(); 1551 else 1552 restore_user_access_state(&ua_state); 1553 1554 return err; 1555 } 1556 1557 static void setup_restart_syscall(struct pt_regs *regs) 1558 { 1559 if (is_compat_task()) 1560 compat_setup_restart_syscall(regs); 1561 else 1562 regs->regs[8] = __NR_restart_syscall; 1563 } 1564 1565 /* 1566 * OK, we're invoking a handler 1567 */ 1568 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 1569 { 1570 sigset_t *oldset = sigmask_to_save(); 1571 int usig = ksig->sig; 1572 int ret; 1573 1574 rseq_signal_deliver(ksig, regs); 1575 1576 /* 1577 * Set up the stack frame 1578 */ 1579 if (is_compat_task()) { 1580 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 1581 ret = compat_setup_rt_frame(usig, ksig, oldset, regs); 1582 else 1583 ret = compat_setup_frame(usig, ksig, oldset, regs); 1584 } else { 1585 ret = setup_rt_frame(usig, ksig, oldset, regs); 1586 } 1587 1588 /* 1589 * Check that the resulting registers are actually sane. 1590 */ 1591 ret |= !valid_user_regs(®s->user_regs, current); 1592 1593 /* Step into the signal handler if we are stepping */ 1594 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); 1595 } 1596 1597 /* 1598 * Note that 'init' is a special process: it doesn't get signals it doesn't 1599 * want to handle. Thus you cannot kill init even with a SIGKILL even by 1600 * mistake. 1601 * 1602 * Note that we go through the signals twice: once to check the signals that 1603 * the kernel can handle, and then we build all the user-level signal handling 1604 * stack-frames in one go after that. 1605 */ 1606 void do_signal(struct pt_regs *regs) 1607 { 1608 unsigned long continue_addr = 0, restart_addr = 0; 1609 int retval = 0; 1610 struct ksignal ksig; 1611 bool syscall = in_syscall(regs); 1612 1613 /* 1614 * If we were from a system call, check for system call restarting... 1615 */ 1616 if (syscall) { 1617 continue_addr = regs->pc; 1618 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); 1619 retval = regs->regs[0]; 1620 1621 /* 1622 * Avoid additional syscall restarting via ret_to_user. 1623 */ 1624 forget_syscall(regs); 1625 1626 /* 1627 * Prepare for system call restart. We do this here so that a 1628 * debugger will see the already changed PC. 1629 */ 1630 switch (retval) { 1631 case -ERESTARTNOHAND: 1632 case -ERESTARTSYS: 1633 case -ERESTARTNOINTR: 1634 case -ERESTART_RESTARTBLOCK: 1635 regs->regs[0] = regs->orig_x0; 1636 regs->pc = restart_addr; 1637 break; 1638 } 1639 } 1640 1641 /* 1642 * Get the signal to deliver. When running under ptrace, at this point 1643 * the debugger may change all of our registers. 1644 */ 1645 if (get_signal(&ksig)) { 1646 /* 1647 * Depending on the signal settings, we may need to revert the 1648 * decision to restart the system call, but skip this if a 1649 * debugger has chosen to restart at a different PC. 1650 */ 1651 if (regs->pc == restart_addr && 1652 (retval == -ERESTARTNOHAND || 1653 retval == -ERESTART_RESTARTBLOCK || 1654 (retval == -ERESTARTSYS && 1655 !(ksig.ka.sa.sa_flags & SA_RESTART)))) { 1656 syscall_set_return_value(current, regs, -EINTR, 0); 1657 regs->pc = continue_addr; 1658 } 1659 1660 handle_signal(&ksig, regs); 1661 return; 1662 } 1663 1664 /* 1665 * Handle restarting a different system call. As above, if a debugger 1666 * has chosen to restart at a different PC, ignore the restart. 1667 */ 1668 if (syscall && regs->pc == restart_addr) { 1669 if (retval == -ERESTART_RESTARTBLOCK) 1670 setup_restart_syscall(regs); 1671 user_rewind_single_step(current); 1672 } 1673 1674 restore_saved_sigmask(); 1675 } 1676 1677 unsigned long __ro_after_init signal_minsigstksz; 1678 1679 /* 1680 * Determine the stack space required for guaranteed signal devliery. 1681 * This function is used to populate AT_MINSIGSTKSZ at process startup. 1682 * cpufeatures setup is assumed to be complete. 1683 */ 1684 void __init minsigstksz_setup(void) 1685 { 1686 struct rt_sigframe_user_layout user; 1687 1688 init_user_layout(&user); 1689 1690 /* 1691 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't 1692 * be big enough, but it's our best guess: 1693 */ 1694 if (WARN_ON(setup_sigframe_layout(&user, true))) 1695 return; 1696 1697 signal_minsigstksz = sigframe_size(&user) + 1698 round_up(sizeof(struct frame_record), 16) + 1699 16; /* max alignment padding */ 1700 } 1701 1702 /* 1703 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as 1704 * changes likely come with new fields that should be added below. 1705 */ 1706 static_assert(NSIGILL == 11); 1707 static_assert(NSIGFPE == 15); 1708 static_assert(NSIGSEGV == 10); 1709 static_assert(NSIGBUS == 5); 1710 static_assert(NSIGTRAP == 6); 1711 static_assert(NSIGCHLD == 6); 1712 static_assert(NSIGSYS == 2); 1713 static_assert(sizeof(siginfo_t) == 128); 1714 static_assert(__alignof__(siginfo_t) == 8); 1715 static_assert(offsetof(siginfo_t, si_signo) == 0x00); 1716 static_assert(offsetof(siginfo_t, si_errno) == 0x04); 1717 static_assert(offsetof(siginfo_t, si_code) == 0x08); 1718 static_assert(offsetof(siginfo_t, si_pid) == 0x10); 1719 static_assert(offsetof(siginfo_t, si_uid) == 0x14); 1720 static_assert(offsetof(siginfo_t, si_tid) == 0x10); 1721 static_assert(offsetof(siginfo_t, si_overrun) == 0x14); 1722 static_assert(offsetof(siginfo_t, si_status) == 0x18); 1723 static_assert(offsetof(siginfo_t, si_utime) == 0x20); 1724 static_assert(offsetof(siginfo_t, si_stime) == 0x28); 1725 static_assert(offsetof(siginfo_t, si_value) == 0x18); 1726 static_assert(offsetof(siginfo_t, si_int) == 0x18); 1727 static_assert(offsetof(siginfo_t, si_ptr) == 0x18); 1728 static_assert(offsetof(siginfo_t, si_addr) == 0x10); 1729 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18); 1730 static_assert(offsetof(siginfo_t, si_lower) == 0x20); 1731 static_assert(offsetof(siginfo_t, si_upper) == 0x28); 1732 static_assert(offsetof(siginfo_t, si_pkey) == 0x20); 1733 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); 1734 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); 1735 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); 1736 static_assert(offsetof(siginfo_t, si_band) == 0x10); 1737 static_assert(offsetof(siginfo_t, si_fd) == 0x18); 1738 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10); 1739 static_assert(offsetof(siginfo_t, si_syscall) == 0x18); 1740 static_assert(offsetof(siginfo_t, si_arch) == 0x1c); 1741