1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/ptrace.c 4 * 5 * By Ross Biro 1/23/92 6 * edited by Linus Torvalds 7 * ARM modifications Copyright (C) 2000 Russell King 8 * Copyright (C) 2012 ARM Ltd. 9 */ 10 11 #include <linux/audit.h> 12 #include <linux/compat.h> 13 #include <linux/kernel.h> 14 #include <linux/sched/signal.h> 15 #include <linux/sched/task_stack.h> 16 #include <linux/mm.h> 17 #include <linux/nospec.h> 18 #include <linux/smp.h> 19 #include <linux/ptrace.h> 20 #include <linux/user.h> 21 #include <linux/seccomp.h> 22 #include <linux/security.h> 23 #include <linux/init.h> 24 #include <linux/signal.h> 25 #include <linux/string.h> 26 #include <linux/uaccess.h> 27 #include <linux/perf_event.h> 28 #include <linux/hw_breakpoint.h> 29 #include <linux/regset.h> 30 #include <linux/elf.h> 31 #include <linux/rseq.h> 32 33 #include <asm/compat.h> 34 #include <asm/cpufeature.h> 35 #include <asm/debug-monitors.h> 36 #include <asm/fpsimd.h> 37 #include <asm/gcs.h> 38 #include <asm/mte.h> 39 #include <asm/pointer_auth.h> 40 #include <asm/stacktrace.h> 41 #include <asm/syscall.h> 42 #include <asm/traps.h> 43 #include <asm/system_misc.h> 44 45 #define CREATE_TRACE_POINTS 46 #include <trace/events/syscalls.h> 47 48 struct pt_regs_offset { 49 const char *name; 50 int offset; 51 }; 52 53 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 54 #define REG_OFFSET_END {.name = NULL, .offset = 0} 55 #define GPR_OFFSET_NAME(r) \ 56 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} 57 58 static const struct pt_regs_offset regoffset_table[] = { 59 GPR_OFFSET_NAME(0), 60 GPR_OFFSET_NAME(1), 61 GPR_OFFSET_NAME(2), 62 GPR_OFFSET_NAME(3), 63 GPR_OFFSET_NAME(4), 64 GPR_OFFSET_NAME(5), 65 GPR_OFFSET_NAME(6), 66 GPR_OFFSET_NAME(7), 67 GPR_OFFSET_NAME(8), 68 GPR_OFFSET_NAME(9), 69 GPR_OFFSET_NAME(10), 70 GPR_OFFSET_NAME(11), 71 GPR_OFFSET_NAME(12), 72 GPR_OFFSET_NAME(13), 73 GPR_OFFSET_NAME(14), 74 GPR_OFFSET_NAME(15), 75 GPR_OFFSET_NAME(16), 76 GPR_OFFSET_NAME(17), 77 GPR_OFFSET_NAME(18), 78 GPR_OFFSET_NAME(19), 79 GPR_OFFSET_NAME(20), 80 GPR_OFFSET_NAME(21), 81 GPR_OFFSET_NAME(22), 82 GPR_OFFSET_NAME(23), 83 GPR_OFFSET_NAME(24), 84 GPR_OFFSET_NAME(25), 85 GPR_OFFSET_NAME(26), 86 GPR_OFFSET_NAME(27), 87 GPR_OFFSET_NAME(28), 88 GPR_OFFSET_NAME(29), 89 GPR_OFFSET_NAME(30), 90 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, 91 REG_OFFSET_NAME(sp), 92 REG_OFFSET_NAME(pc), 93 REG_OFFSET_NAME(pstate), 94 REG_OFFSET_END, 95 }; 96 97 /** 98 * regs_query_register_offset() - query register offset from its name 99 * @name: the name of a register 100 * 101 * regs_query_register_offset() returns the offset of a register in struct 102 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 103 */ 104 int regs_query_register_offset(const char *name) 105 { 106 const struct pt_regs_offset *roff; 107 108 for (roff = regoffset_table; roff->name != NULL; roff++) 109 if (!strcmp(roff->name, name)) 110 return roff->offset; 111 return -EINVAL; 112 } 113 114 /** 115 * regs_within_kernel_stack() - check the address in the stack 116 * @regs: pt_regs which contains kernel stack pointer. 117 * @addr: address which is checked. 118 * 119 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 120 * If @addr is within the kernel stack, it returns true. If not, returns false. 121 */ 122 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 123 { 124 return ((addr & ~(THREAD_SIZE - 1)) == 125 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || 126 on_irq_stack(addr, sizeof(unsigned long)); 127 } 128 129 /** 130 * regs_get_kernel_stack_nth() - get Nth entry of the stack 131 * @regs: pt_regs which contains kernel stack pointer. 132 * @n: stack entry number. 133 * 134 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 135 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 136 * this returns 0. 137 */ 138 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 139 { 140 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 141 142 addr += n; 143 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 144 return READ_ONCE_NOCHECK(*addr); 145 else 146 return 0; 147 } 148 149 /* 150 * TODO: does not yet catch signals sent when the child dies. 151 * in exit.c or in signal.c. 152 */ 153 154 /* 155 * Called by kernel/ptrace.c when detaching.. 156 */ 157 void ptrace_disable(struct task_struct *child) 158 { 159 /* 160 * This would be better off in core code, but PTRACE_DETACH has 161 * grown its fair share of arch-specific worts and changing it 162 * is likely to cause regressions on obscure architectures. 163 */ 164 user_disable_single_step(child); 165 } 166 167 #ifdef CONFIG_HAVE_HW_BREAKPOINT 168 /* 169 * Handle hitting a HW-breakpoint. 170 */ 171 static void ptrace_hbptriggered(struct perf_event *bp, 172 struct perf_sample_data *data, 173 struct pt_regs *regs) 174 { 175 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 176 const char *desc = "Hardware breakpoint trap (ptrace)"; 177 178 if (is_compat_task()) { 179 int si_errno = 0; 180 int i; 181 182 for (i = 0; i < ARM_MAX_BRP; ++i) { 183 if (current->thread.debug.hbp_break[i] == bp) { 184 si_errno = (i << 1) + 1; 185 break; 186 } 187 } 188 189 for (i = 0; i < ARM_MAX_WRP; ++i) { 190 if (current->thread.debug.hbp_watch[i] == bp) { 191 si_errno = -((i << 1) + 1); 192 break; 193 } 194 } 195 arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger, 196 desc); 197 return; 198 } 199 200 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc); 201 } 202 203 /* 204 * Unregister breakpoints from this task and reset the pointers in 205 * the thread_struct. 206 */ 207 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 208 { 209 int i; 210 struct thread_struct *t = &tsk->thread; 211 212 for (i = 0; i < ARM_MAX_BRP; i++) { 213 if (t->debug.hbp_break[i]) { 214 unregister_hw_breakpoint(t->debug.hbp_break[i]); 215 t->debug.hbp_break[i] = NULL; 216 } 217 } 218 219 for (i = 0; i < ARM_MAX_WRP; i++) { 220 if (t->debug.hbp_watch[i]) { 221 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 222 t->debug.hbp_watch[i] = NULL; 223 } 224 } 225 } 226 227 void ptrace_hw_copy_thread(struct task_struct *tsk) 228 { 229 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 230 } 231 232 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 233 struct task_struct *tsk, 234 unsigned long idx) 235 { 236 struct perf_event *bp = ERR_PTR(-EINVAL); 237 238 switch (note_type) { 239 case NT_ARM_HW_BREAK: 240 if (idx >= ARM_MAX_BRP) 241 goto out; 242 idx = array_index_nospec(idx, ARM_MAX_BRP); 243 bp = tsk->thread.debug.hbp_break[idx]; 244 break; 245 case NT_ARM_HW_WATCH: 246 if (idx >= ARM_MAX_WRP) 247 goto out; 248 idx = array_index_nospec(idx, ARM_MAX_WRP); 249 bp = tsk->thread.debug.hbp_watch[idx]; 250 break; 251 } 252 253 out: 254 return bp; 255 } 256 257 static int ptrace_hbp_set_event(unsigned int note_type, 258 struct task_struct *tsk, 259 unsigned long idx, 260 struct perf_event *bp) 261 { 262 int err = -EINVAL; 263 264 switch (note_type) { 265 case NT_ARM_HW_BREAK: 266 if (idx >= ARM_MAX_BRP) 267 goto out; 268 idx = array_index_nospec(idx, ARM_MAX_BRP); 269 tsk->thread.debug.hbp_break[idx] = bp; 270 err = 0; 271 break; 272 case NT_ARM_HW_WATCH: 273 if (idx >= ARM_MAX_WRP) 274 goto out; 275 idx = array_index_nospec(idx, ARM_MAX_WRP); 276 tsk->thread.debug.hbp_watch[idx] = bp; 277 err = 0; 278 break; 279 } 280 281 out: 282 return err; 283 } 284 285 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 286 struct task_struct *tsk, 287 unsigned long idx) 288 { 289 struct perf_event *bp; 290 struct perf_event_attr attr; 291 int err, type; 292 293 switch (note_type) { 294 case NT_ARM_HW_BREAK: 295 type = HW_BREAKPOINT_X; 296 break; 297 case NT_ARM_HW_WATCH: 298 type = HW_BREAKPOINT_RW; 299 break; 300 default: 301 return ERR_PTR(-EINVAL); 302 } 303 304 ptrace_breakpoint_init(&attr); 305 306 /* 307 * Initialise fields to sane defaults 308 * (i.e. values that will pass validation). 309 */ 310 attr.bp_addr = 0; 311 attr.bp_len = HW_BREAKPOINT_LEN_4; 312 attr.bp_type = type; 313 attr.disabled = 1; 314 315 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 316 if (IS_ERR(bp)) 317 return bp; 318 319 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 320 if (err) 321 return ERR_PTR(err); 322 323 return bp; 324 } 325 326 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 327 struct arch_hw_breakpoint_ctrl ctrl, 328 struct perf_event_attr *attr) 329 { 330 int err, len, type, offset, disabled = !ctrl.enabled; 331 332 attr->disabled = disabled; 333 if (disabled) 334 return 0; 335 336 err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 337 if (err) 338 return err; 339 340 switch (note_type) { 341 case NT_ARM_HW_BREAK: 342 if ((type & HW_BREAKPOINT_X) != type) 343 return -EINVAL; 344 break; 345 case NT_ARM_HW_WATCH: 346 if ((type & HW_BREAKPOINT_RW) != type) 347 return -EINVAL; 348 break; 349 default: 350 return -EINVAL; 351 } 352 353 attr->bp_len = len; 354 attr->bp_type = type; 355 attr->bp_addr += offset; 356 357 return 0; 358 } 359 360 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 361 { 362 u8 num; 363 u32 reg = 0; 364 365 switch (note_type) { 366 case NT_ARM_HW_BREAK: 367 num = hw_breakpoint_slots(TYPE_INST); 368 break; 369 case NT_ARM_HW_WATCH: 370 num = hw_breakpoint_slots(TYPE_DATA); 371 break; 372 default: 373 return -EINVAL; 374 } 375 376 reg |= debug_monitors_arch(); 377 reg <<= 8; 378 reg |= num; 379 380 *info = reg; 381 return 0; 382 } 383 384 static int ptrace_hbp_get_ctrl(unsigned int note_type, 385 struct task_struct *tsk, 386 unsigned long idx, 387 u32 *ctrl) 388 { 389 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 390 391 if (IS_ERR(bp)) 392 return PTR_ERR(bp); 393 394 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 395 return 0; 396 } 397 398 static int ptrace_hbp_get_addr(unsigned int note_type, 399 struct task_struct *tsk, 400 unsigned long idx, 401 u64 *addr) 402 { 403 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 404 405 if (IS_ERR(bp)) 406 return PTR_ERR(bp); 407 408 *addr = bp ? counter_arch_bp(bp)->address : 0; 409 return 0; 410 } 411 412 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 413 struct task_struct *tsk, 414 unsigned long idx) 415 { 416 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 417 418 if (!bp) 419 bp = ptrace_hbp_create(note_type, tsk, idx); 420 421 return bp; 422 } 423 424 static int ptrace_hbp_set_ctrl(unsigned int note_type, 425 struct task_struct *tsk, 426 unsigned long idx, 427 u32 uctrl) 428 { 429 int err; 430 struct perf_event *bp; 431 struct perf_event_attr attr; 432 struct arch_hw_breakpoint_ctrl ctrl; 433 434 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 435 if (IS_ERR(bp)) { 436 err = PTR_ERR(bp); 437 return err; 438 } 439 440 attr = bp->attr; 441 decode_ctrl_reg(uctrl, &ctrl); 442 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 443 if (err) 444 return err; 445 446 return modify_user_hw_breakpoint(bp, &attr); 447 } 448 449 static int ptrace_hbp_set_addr(unsigned int note_type, 450 struct task_struct *tsk, 451 unsigned long idx, 452 u64 addr) 453 { 454 int err; 455 struct perf_event *bp; 456 struct perf_event_attr attr; 457 458 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 459 if (IS_ERR(bp)) { 460 err = PTR_ERR(bp); 461 return err; 462 } 463 464 attr = bp->attr; 465 attr.bp_addr = addr; 466 err = modify_user_hw_breakpoint(bp, &attr); 467 return err; 468 } 469 470 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 471 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 472 #define PTRACE_HBP_PAD_SZ sizeof(u32) 473 474 static int hw_break_get(struct task_struct *target, 475 const struct user_regset *regset, 476 struct membuf to) 477 { 478 unsigned int note_type = regset->core_note_type; 479 int ret, idx = 0; 480 u32 info, ctrl; 481 u64 addr; 482 483 /* Resource info */ 484 ret = ptrace_hbp_get_resource_info(note_type, &info); 485 if (ret) 486 return ret; 487 488 membuf_write(&to, &info, sizeof(info)); 489 membuf_zero(&to, sizeof(u32)); 490 /* (address, ctrl) registers */ 491 while (to.left) { 492 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 493 if (ret) 494 return ret; 495 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 496 if (ret) 497 return ret; 498 membuf_store(&to, addr); 499 membuf_store(&to, ctrl); 500 membuf_zero(&to, sizeof(u32)); 501 idx++; 502 } 503 return 0; 504 } 505 506 static int hw_break_set(struct task_struct *target, 507 const struct user_regset *regset, 508 unsigned int pos, unsigned int count, 509 const void *kbuf, const void __user *ubuf) 510 { 511 unsigned int note_type = regset->core_note_type; 512 int ret, idx = 0, offset, limit; 513 u32 ctrl; 514 u64 addr; 515 516 /* Resource info and pad */ 517 offset = offsetof(struct user_hwdebug_state, dbg_regs); 518 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 519 520 /* (address, ctrl) registers */ 521 limit = regset->n * regset->size; 522 while (count && offset < limit) { 523 if (count < PTRACE_HBP_ADDR_SZ) 524 return -EINVAL; 525 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 526 offset, offset + PTRACE_HBP_ADDR_SZ); 527 if (ret) 528 return ret; 529 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 530 if (ret) 531 return ret; 532 offset += PTRACE_HBP_ADDR_SZ; 533 534 if (!count) 535 break; 536 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 537 offset, offset + PTRACE_HBP_CTRL_SZ); 538 if (ret) 539 return ret; 540 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 541 if (ret) 542 return ret; 543 offset += PTRACE_HBP_CTRL_SZ; 544 545 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 546 offset, offset + PTRACE_HBP_PAD_SZ); 547 offset += PTRACE_HBP_PAD_SZ; 548 idx++; 549 } 550 551 return 0; 552 } 553 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 554 555 static int gpr_get(struct task_struct *target, 556 const struct user_regset *regset, 557 struct membuf to) 558 { 559 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 560 return membuf_write(&to, uregs, sizeof(*uregs)); 561 } 562 563 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 564 unsigned int pos, unsigned int count, 565 const void *kbuf, const void __user *ubuf) 566 { 567 int ret; 568 struct user_pt_regs newregs = task_pt_regs(target)->user_regs; 569 570 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 571 if (ret) 572 return ret; 573 574 if (!valid_user_regs(&newregs, target)) 575 return -EINVAL; 576 577 task_pt_regs(target)->user_regs = newregs; 578 return 0; 579 } 580 581 static int fpr_active(struct task_struct *target, const struct user_regset *regset) 582 { 583 if (!system_supports_fpsimd()) 584 return -ENODEV; 585 return regset->n; 586 } 587 588 /* 589 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 590 */ 591 static int __fpr_get(struct task_struct *target, 592 const struct user_regset *regset, 593 struct membuf to) 594 { 595 struct user_fpsimd_state *uregs; 596 597 fpsimd_sync_from_effective_state(target); 598 599 uregs = &target->thread.uw.fpsimd_state; 600 601 return membuf_write(&to, uregs, sizeof(*uregs)); 602 } 603 604 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 605 struct membuf to) 606 { 607 if (!system_supports_fpsimd()) 608 return -EINVAL; 609 610 if (target == current) 611 fpsimd_preserve_current_state(); 612 613 return __fpr_get(target, regset, to); 614 } 615 616 static int __fpr_set(struct task_struct *target, 617 const struct user_regset *regset, 618 unsigned int pos, unsigned int count, 619 const void *kbuf, const void __user *ubuf, 620 unsigned int start_pos) 621 { 622 int ret; 623 struct user_fpsimd_state newstate; 624 625 /* 626 * Ensure target->thread.uw.fpsimd_state is up to date, so that a 627 * short copyin can't resurrect stale data. 628 */ 629 fpsimd_sync_from_effective_state(target); 630 631 newstate = target->thread.uw.fpsimd_state; 632 633 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 634 start_pos, start_pos + sizeof(newstate)); 635 if (ret) 636 return ret; 637 638 target->thread.uw.fpsimd_state = newstate; 639 640 return ret; 641 } 642 643 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 644 unsigned int pos, unsigned int count, 645 const void *kbuf, const void __user *ubuf) 646 { 647 int ret; 648 649 if (!system_supports_fpsimd()) 650 return -EINVAL; 651 652 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); 653 if (ret) 654 return ret; 655 656 fpsimd_sync_to_effective_state_zeropad(target); 657 fpsimd_flush_task_state(target); 658 659 return ret; 660 } 661 662 static int tls_get(struct task_struct *target, const struct user_regset *regset, 663 struct membuf to) 664 { 665 int ret; 666 667 if (target == current) 668 tls_preserve_current_state(); 669 670 ret = membuf_store(&to, target->thread.uw.tp_value); 671 if (system_supports_tpidr2()) 672 ret = membuf_store(&to, target->thread.tpidr2_el0); 673 else 674 ret = membuf_zero(&to, sizeof(u64)); 675 676 return ret; 677 } 678 679 static int tls_set(struct task_struct *target, const struct user_regset *regset, 680 unsigned int pos, unsigned int count, 681 const void *kbuf, const void __user *ubuf) 682 { 683 int ret; 684 unsigned long tls[2]; 685 686 tls[0] = target->thread.uw.tp_value; 687 if (system_supports_tpidr2()) 688 tls[1] = target->thread.tpidr2_el0; 689 690 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count); 691 if (ret) 692 return ret; 693 694 target->thread.uw.tp_value = tls[0]; 695 if (system_supports_tpidr2()) 696 target->thread.tpidr2_el0 = tls[1]; 697 698 return ret; 699 } 700 701 static int fpmr_get(struct task_struct *target, const struct user_regset *regset, 702 struct membuf to) 703 { 704 if (!system_supports_fpmr()) 705 return -EINVAL; 706 707 if (target == current) 708 fpsimd_preserve_current_state(); 709 710 return membuf_store(&to, target->thread.uw.fpmr); 711 } 712 713 static int fpmr_set(struct task_struct *target, const struct user_regset *regset, 714 unsigned int pos, unsigned int count, 715 const void *kbuf, const void __user *ubuf) 716 { 717 int ret; 718 unsigned long fpmr; 719 720 if (!system_supports_fpmr()) 721 return -EINVAL; 722 723 fpmr = target->thread.uw.fpmr; 724 725 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count); 726 if (ret) 727 return ret; 728 729 target->thread.uw.fpmr = fpmr; 730 731 fpsimd_flush_task_state(target); 732 733 return 0; 734 } 735 736 static int system_call_get(struct task_struct *target, 737 const struct user_regset *regset, 738 struct membuf to) 739 { 740 return membuf_store(&to, task_pt_regs(target)->syscallno); 741 } 742 743 static int system_call_set(struct task_struct *target, 744 const struct user_regset *regset, 745 unsigned int pos, unsigned int count, 746 const void *kbuf, const void __user *ubuf) 747 { 748 int syscallno = task_pt_regs(target)->syscallno; 749 int ret; 750 751 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 752 if (ret) 753 return ret; 754 755 task_pt_regs(target)->syscallno = syscallno; 756 return ret; 757 } 758 759 #ifdef CONFIG_ARM64_SVE 760 761 static void sve_init_header_from_task(struct user_sve_header *header, 762 struct task_struct *target, 763 enum vec_type type) 764 { 765 unsigned int vq; 766 bool active; 767 enum vec_type task_type; 768 769 memset(header, 0, sizeof(*header)); 770 771 /* Check if the requested registers are active for the task */ 772 if (thread_sm_enabled(&target->thread)) 773 task_type = ARM64_VEC_SME; 774 else 775 task_type = ARM64_VEC_SVE; 776 active = (task_type == type); 777 778 if (active && target->thread.fp_type == FP_STATE_SVE) 779 header->flags = SVE_PT_REGS_SVE; 780 else 781 header->flags = SVE_PT_REGS_FPSIMD; 782 783 switch (type) { 784 case ARM64_VEC_SVE: 785 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) 786 header->flags |= SVE_PT_VL_INHERIT; 787 break; 788 case ARM64_VEC_SME: 789 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) 790 header->flags |= SVE_PT_VL_INHERIT; 791 break; 792 default: 793 WARN_ON_ONCE(1); 794 return; 795 } 796 797 header->vl = task_get_vl(target, type); 798 vq = sve_vq_from_vl(header->vl); 799 800 header->max_vl = vec_max_vl(type); 801 if (active) 802 header->size = SVE_PT_SIZE(vq, header->flags); 803 else 804 header->size = sizeof(header); 805 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), 806 SVE_PT_REGS_SVE); 807 } 808 809 static unsigned int sve_size_from_header(struct user_sve_header const *header) 810 { 811 return ALIGN(header->size, SVE_VQ_BYTES); 812 } 813 814 static int sve_get_common(struct task_struct *target, 815 const struct user_regset *regset, 816 struct membuf to, 817 enum vec_type type) 818 { 819 struct user_sve_header header; 820 unsigned int vq; 821 unsigned long start, end; 822 823 if (target == current) 824 fpsimd_preserve_current_state(); 825 826 /* Header */ 827 sve_init_header_from_task(&header, target, type); 828 vq = sve_vq_from_vl(header.vl); 829 830 membuf_write(&to, &header, sizeof(header)); 831 832 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 833 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 834 835 /* 836 * When the requested vector type is not active, do not present data 837 * from the other mode to userspace. 838 */ 839 if (header.size == sizeof(header)) 840 return 0; 841 842 switch ((header.flags & SVE_PT_REGS_MASK)) { 843 case SVE_PT_REGS_FPSIMD: 844 return __fpr_get(target, regset, to); 845 846 case SVE_PT_REGS_SVE: 847 start = SVE_PT_SVE_OFFSET; 848 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 849 membuf_write(&to, target->thread.sve_state, end - start); 850 851 start = end; 852 end = SVE_PT_SVE_FPSR_OFFSET(vq); 853 membuf_zero(&to, end - start); 854 855 /* 856 * Copy fpsr, and fpcr which must follow contiguously in 857 * struct fpsimd_state: 858 */ 859 start = end; 860 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 861 membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, 862 end - start); 863 864 start = end; 865 end = sve_size_from_header(&header); 866 return membuf_zero(&to, end - start); 867 868 default: 869 BUILD_BUG(); 870 } 871 } 872 873 static int sve_get(struct task_struct *target, 874 const struct user_regset *regset, 875 struct membuf to) 876 { 877 if (!system_supports_sve()) 878 return -EINVAL; 879 880 return sve_get_common(target, regset, to, ARM64_VEC_SVE); 881 } 882 883 static int sve_set_common(struct task_struct *target, 884 const struct user_regset *regset, 885 unsigned int pos, unsigned int count, 886 const void *kbuf, const void __user *ubuf, 887 enum vec_type type) 888 { 889 int ret; 890 struct user_sve_header header; 891 unsigned int vq; 892 unsigned long start, end; 893 bool fpsimd; 894 895 fpsimd_flush_task_state(target); 896 897 /* Header */ 898 if (count < sizeof(header)) 899 return -EINVAL; 900 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 901 0, sizeof(header)); 902 if (ret) 903 return ret; 904 905 /* 906 * Streaming SVE data is always stored and presented in SVE format. 907 * Require the user to provide SVE formatted data for consistency, and 908 * to avoid the risk that we configure the task into an invalid state. 909 */ 910 fpsimd = (header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD; 911 if (fpsimd && type == ARM64_VEC_SME) 912 return -EINVAL; 913 914 /* 915 * On systems without SVE we accept FPSIMD format writes with 916 * a VL of 0 to allow exiting streaming mode, otherwise a VL 917 * is required. 918 */ 919 if (header.vl) { 920 /* 921 * If the system does not support SVE we can't 922 * configure a SVE VL. 923 */ 924 if (!system_supports_sve() && type == ARM64_VEC_SVE) 925 return -EINVAL; 926 927 /* 928 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are 929 * consumed by vec_set_vector_length(), which will 930 * also validate them for us: 931 */ 932 ret = vec_set_vector_length(target, type, header.vl, 933 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); 934 if (ret) 935 return ret; 936 } else { 937 /* If the system supports SVE we require a VL. */ 938 if (system_supports_sve()) 939 return -EINVAL; 940 941 /* 942 * Only FPSIMD formatted data with no flags set is 943 * supported. 944 */ 945 if (header.flags != SVE_PT_REGS_FPSIMD) 946 return -EINVAL; 947 } 948 949 /* Allocate SME storage if necessary, preserving any existing ZA/ZT state */ 950 if (type == ARM64_VEC_SME) { 951 sme_alloc(target, false); 952 if (!target->thread.sme_state) 953 return -ENOMEM; 954 } 955 956 /* Allocate SVE storage if necessary, zeroing any existing SVE state */ 957 if (!fpsimd) { 958 sve_alloc(target, true); 959 if (!target->thread.sve_state) 960 return -ENOMEM; 961 } 962 963 /* 964 * Actual VL set may be different from what the user asked 965 * for, or we may have configured the _ONEXEC VL not the 966 * current VL: 967 */ 968 vq = sve_vq_from_vl(task_get_vl(target, type)); 969 970 /* Enter/exit streaming mode */ 971 switch (type) { 972 case ARM64_VEC_SVE: 973 target->thread.svcr &= ~SVCR_SM_MASK; 974 set_tsk_thread_flag(target, TIF_SVE); 975 break; 976 case ARM64_VEC_SME: 977 target->thread.svcr |= SVCR_SM_MASK; 978 set_tsk_thread_flag(target, TIF_SME); 979 break; 980 default: 981 WARN_ON_ONCE(1); 982 return -EINVAL; 983 } 984 985 /* Always zero V regs, FPSR, and FPCR */ 986 memset(¤t->thread.uw.fpsimd_state, 0, 987 sizeof(current->thread.uw.fpsimd_state)); 988 989 /* Registers: FPSIMD-only case */ 990 991 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 992 if (fpsimd) { 993 clear_tsk_thread_flag(target, TIF_SVE); 994 target->thread.fp_type = FP_STATE_FPSIMD; 995 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 996 SVE_PT_FPSIMD_OFFSET); 997 return ret; 998 } 999 1000 /* Otherwise: no registers or full SVE case. */ 1001 1002 target->thread.fp_type = FP_STATE_SVE; 1003 1004 /* 1005 * If setting a different VL from the requested VL and there is 1006 * register data, the data layout will be wrong: don't even 1007 * try to set the registers in this case. 1008 */ 1009 if (count && vq != sve_vq_from_vl(header.vl)) 1010 return -EIO; 1011 1012 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 1013 start = SVE_PT_SVE_OFFSET; 1014 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 1015 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1016 target->thread.sve_state, 1017 start, end); 1018 if (ret) 1019 return ret; 1020 1021 start = end; 1022 end = SVE_PT_SVE_FPSR_OFFSET(vq); 1023 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, start, end); 1024 1025 /* 1026 * Copy fpsr, and fpcr which must follow contiguously in 1027 * struct fpsimd_state: 1028 */ 1029 start = end; 1030 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 1031 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1032 &target->thread.uw.fpsimd_state.fpsr, 1033 start, end); 1034 1035 return ret; 1036 } 1037 1038 static int sve_set(struct task_struct *target, 1039 const struct user_regset *regset, 1040 unsigned int pos, unsigned int count, 1041 const void *kbuf, const void __user *ubuf) 1042 { 1043 if (!system_supports_sve() && !system_supports_sme()) 1044 return -EINVAL; 1045 1046 return sve_set_common(target, regset, pos, count, kbuf, ubuf, 1047 ARM64_VEC_SVE); 1048 } 1049 1050 #endif /* CONFIG_ARM64_SVE */ 1051 1052 #ifdef CONFIG_ARM64_SME 1053 1054 static int ssve_get(struct task_struct *target, 1055 const struct user_regset *regset, 1056 struct membuf to) 1057 { 1058 if (!system_supports_sme()) 1059 return -EINVAL; 1060 1061 return sve_get_common(target, regset, to, ARM64_VEC_SME); 1062 } 1063 1064 static int ssve_set(struct task_struct *target, 1065 const struct user_regset *regset, 1066 unsigned int pos, unsigned int count, 1067 const void *kbuf, const void __user *ubuf) 1068 { 1069 if (!system_supports_sme()) 1070 return -EINVAL; 1071 1072 return sve_set_common(target, regset, pos, count, kbuf, ubuf, 1073 ARM64_VEC_SME); 1074 } 1075 1076 static int za_get(struct task_struct *target, 1077 const struct user_regset *regset, 1078 struct membuf to) 1079 { 1080 struct user_za_header header; 1081 unsigned int vq; 1082 unsigned long start, end; 1083 1084 if (!system_supports_sme()) 1085 return -EINVAL; 1086 1087 /* Header */ 1088 memset(&header, 0, sizeof(header)); 1089 1090 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) 1091 header.flags |= ZA_PT_VL_INHERIT; 1092 1093 header.vl = task_get_sme_vl(target); 1094 vq = sve_vq_from_vl(header.vl); 1095 header.max_vl = sme_max_vl(); 1096 header.max_size = ZA_PT_SIZE(vq); 1097 1098 /* If ZA is not active there is only the header */ 1099 if (thread_za_enabled(&target->thread)) 1100 header.size = ZA_PT_SIZE(vq); 1101 else 1102 header.size = ZA_PT_ZA_OFFSET; 1103 1104 membuf_write(&to, &header, sizeof(header)); 1105 1106 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); 1107 end = ZA_PT_ZA_OFFSET; 1108 1109 if (target == current) 1110 fpsimd_preserve_current_state(); 1111 1112 /* Any register data to include? */ 1113 if (thread_za_enabled(&target->thread)) { 1114 start = end; 1115 end = ZA_PT_SIZE(vq); 1116 membuf_write(&to, target->thread.sme_state, end - start); 1117 } 1118 1119 /* Zero any trailing padding */ 1120 start = end; 1121 end = ALIGN(header.size, SVE_VQ_BYTES); 1122 return membuf_zero(&to, end - start); 1123 } 1124 1125 static int za_set(struct task_struct *target, 1126 const struct user_regset *regset, 1127 unsigned int pos, unsigned int count, 1128 const void *kbuf, const void __user *ubuf) 1129 { 1130 int ret; 1131 struct user_za_header header; 1132 unsigned int vq; 1133 unsigned long start, end; 1134 1135 if (!system_supports_sme()) 1136 return -EINVAL; 1137 1138 /* Header */ 1139 if (count < sizeof(header)) 1140 return -EINVAL; 1141 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 1142 0, sizeof(header)); 1143 if (ret) 1144 goto out; 1145 1146 /* 1147 * All current ZA_PT_* flags are consumed by 1148 * vec_set_vector_length(), which will also validate them for 1149 * us: 1150 */ 1151 ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl, 1152 ((unsigned long)header.flags) << 16); 1153 if (ret) 1154 goto out; 1155 1156 /* 1157 * Actual VL set may be different from what the user asked 1158 * for, or we may have configured the _ONEXEC rather than 1159 * current VL: 1160 */ 1161 vq = sve_vq_from_vl(task_get_sme_vl(target)); 1162 1163 /* Ensure there is some SVE storage for streaming mode */ 1164 if (!target->thread.sve_state) { 1165 sve_alloc(target, false); 1166 if (!target->thread.sve_state) { 1167 ret = -ENOMEM; 1168 goto out; 1169 } 1170 } 1171 1172 /* 1173 * Only flush the storage if PSTATE.ZA was not already set, 1174 * otherwise preserve any existing data. 1175 */ 1176 sme_alloc(target, !thread_za_enabled(&target->thread)); 1177 if (!target->thread.sme_state) 1178 return -ENOMEM; 1179 1180 /* If there is no data then disable ZA */ 1181 if (!count) { 1182 target->thread.svcr &= ~SVCR_ZA_MASK; 1183 goto out; 1184 } 1185 1186 /* 1187 * If setting a different VL from the requested VL and there is 1188 * register data, the data layout will be wrong: don't even 1189 * try to set the registers in this case. 1190 */ 1191 if (vq != sve_vq_from_vl(header.vl)) { 1192 ret = -EIO; 1193 goto out; 1194 } 1195 1196 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); 1197 start = ZA_PT_ZA_OFFSET; 1198 end = ZA_PT_SIZE(vq); 1199 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1200 target->thread.sme_state, 1201 start, end); 1202 if (ret) 1203 goto out; 1204 1205 /* Mark ZA as active and let userspace use it */ 1206 set_tsk_thread_flag(target, TIF_SME); 1207 target->thread.svcr |= SVCR_ZA_MASK; 1208 1209 out: 1210 fpsimd_flush_task_state(target); 1211 return ret; 1212 } 1213 1214 static int zt_get(struct task_struct *target, 1215 const struct user_regset *regset, 1216 struct membuf to) 1217 { 1218 if (!system_supports_sme2()) 1219 return -EINVAL; 1220 1221 /* 1222 * If PSTATE.ZA is not set then ZT will be zeroed when it is 1223 * enabled so report the current register value as zero. 1224 */ 1225 if (thread_za_enabled(&target->thread)) 1226 membuf_write(&to, thread_zt_state(&target->thread), 1227 ZT_SIG_REG_BYTES); 1228 else 1229 membuf_zero(&to, ZT_SIG_REG_BYTES); 1230 1231 return 0; 1232 } 1233 1234 static int zt_set(struct task_struct *target, 1235 const struct user_regset *regset, 1236 unsigned int pos, unsigned int count, 1237 const void *kbuf, const void __user *ubuf) 1238 { 1239 int ret; 1240 1241 if (!system_supports_sme2()) 1242 return -EINVAL; 1243 1244 /* Ensure SVE storage in case this is first use of SME */ 1245 sve_alloc(target, false); 1246 if (!target->thread.sve_state) 1247 return -ENOMEM; 1248 1249 if (!thread_za_enabled(&target->thread)) { 1250 sme_alloc(target, true); 1251 if (!target->thread.sme_state) 1252 return -ENOMEM; 1253 } 1254 1255 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1256 thread_zt_state(&target->thread), 1257 0, ZT_SIG_REG_BYTES); 1258 if (ret == 0) { 1259 target->thread.svcr |= SVCR_ZA_MASK; 1260 set_tsk_thread_flag(target, TIF_SME); 1261 } 1262 1263 fpsimd_flush_task_state(target); 1264 1265 return ret; 1266 } 1267 1268 #endif /* CONFIG_ARM64_SME */ 1269 1270 #ifdef CONFIG_ARM64_PTR_AUTH 1271 static int pac_mask_get(struct task_struct *target, 1272 const struct user_regset *regset, 1273 struct membuf to) 1274 { 1275 /* 1276 * The PAC bits can differ across data and instruction pointers 1277 * depending on TCR_EL1.TBID*, which we may make use of in future, so 1278 * we expose separate masks. 1279 */ 1280 unsigned long mask = ptrauth_user_pac_mask(); 1281 struct user_pac_mask uregs = { 1282 .data_mask = mask, 1283 .insn_mask = mask, 1284 }; 1285 1286 if (!system_supports_address_auth()) 1287 return -EINVAL; 1288 1289 return membuf_write(&to, &uregs, sizeof(uregs)); 1290 } 1291 1292 static int pac_enabled_keys_get(struct task_struct *target, 1293 const struct user_regset *regset, 1294 struct membuf to) 1295 { 1296 long enabled_keys = ptrauth_get_enabled_keys(target); 1297 1298 if (IS_ERR_VALUE(enabled_keys)) 1299 return enabled_keys; 1300 1301 return membuf_write(&to, &enabled_keys, sizeof(enabled_keys)); 1302 } 1303 1304 static int pac_enabled_keys_set(struct task_struct *target, 1305 const struct user_regset *regset, 1306 unsigned int pos, unsigned int count, 1307 const void *kbuf, const void __user *ubuf) 1308 { 1309 int ret; 1310 long enabled_keys = ptrauth_get_enabled_keys(target); 1311 1312 if (IS_ERR_VALUE(enabled_keys)) 1313 return enabled_keys; 1314 1315 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0, 1316 sizeof(long)); 1317 if (ret) 1318 return ret; 1319 1320 return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK, 1321 enabled_keys); 1322 } 1323 1324 #ifdef CONFIG_CHECKPOINT_RESTORE 1325 static __uint128_t pac_key_to_user(const struct ptrauth_key *key) 1326 { 1327 return (__uint128_t)key->hi << 64 | key->lo; 1328 } 1329 1330 static struct ptrauth_key pac_key_from_user(__uint128_t ukey) 1331 { 1332 struct ptrauth_key key = { 1333 .lo = (unsigned long)ukey, 1334 .hi = (unsigned long)(ukey >> 64), 1335 }; 1336 1337 return key; 1338 } 1339 1340 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, 1341 const struct ptrauth_keys_user *keys) 1342 { 1343 ukeys->apiakey = pac_key_to_user(&keys->apia); 1344 ukeys->apibkey = pac_key_to_user(&keys->apib); 1345 ukeys->apdakey = pac_key_to_user(&keys->apda); 1346 ukeys->apdbkey = pac_key_to_user(&keys->apdb); 1347 } 1348 1349 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys, 1350 const struct user_pac_address_keys *ukeys) 1351 { 1352 keys->apia = pac_key_from_user(ukeys->apiakey); 1353 keys->apib = pac_key_from_user(ukeys->apibkey); 1354 keys->apda = pac_key_from_user(ukeys->apdakey); 1355 keys->apdb = pac_key_from_user(ukeys->apdbkey); 1356 } 1357 1358 static int pac_address_keys_get(struct task_struct *target, 1359 const struct user_regset *regset, 1360 struct membuf to) 1361 { 1362 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1363 struct user_pac_address_keys user_keys; 1364 1365 if (!system_supports_address_auth()) 1366 return -EINVAL; 1367 1368 pac_address_keys_to_user(&user_keys, keys); 1369 1370 return membuf_write(&to, &user_keys, sizeof(user_keys)); 1371 } 1372 1373 static int pac_address_keys_set(struct task_struct *target, 1374 const struct user_regset *regset, 1375 unsigned int pos, unsigned int count, 1376 const void *kbuf, const void __user *ubuf) 1377 { 1378 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1379 struct user_pac_address_keys user_keys; 1380 int ret; 1381 1382 if (!system_supports_address_auth()) 1383 return -EINVAL; 1384 1385 pac_address_keys_to_user(&user_keys, keys); 1386 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1387 &user_keys, 0, -1); 1388 if (ret) 1389 return ret; 1390 pac_address_keys_from_user(keys, &user_keys); 1391 1392 return 0; 1393 } 1394 1395 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, 1396 const struct ptrauth_keys_user *keys) 1397 { 1398 ukeys->apgakey = pac_key_to_user(&keys->apga); 1399 } 1400 1401 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys, 1402 const struct user_pac_generic_keys *ukeys) 1403 { 1404 keys->apga = pac_key_from_user(ukeys->apgakey); 1405 } 1406 1407 static int pac_generic_keys_get(struct task_struct *target, 1408 const struct user_regset *regset, 1409 struct membuf to) 1410 { 1411 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1412 struct user_pac_generic_keys user_keys; 1413 1414 if (!system_supports_generic_auth()) 1415 return -EINVAL; 1416 1417 pac_generic_keys_to_user(&user_keys, keys); 1418 1419 return membuf_write(&to, &user_keys, sizeof(user_keys)); 1420 } 1421 1422 static int pac_generic_keys_set(struct task_struct *target, 1423 const struct user_regset *regset, 1424 unsigned int pos, unsigned int count, 1425 const void *kbuf, const void __user *ubuf) 1426 { 1427 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1428 struct user_pac_generic_keys user_keys; 1429 int ret; 1430 1431 if (!system_supports_generic_auth()) 1432 return -EINVAL; 1433 1434 pac_generic_keys_to_user(&user_keys, keys); 1435 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1436 &user_keys, 0, -1); 1437 if (ret) 1438 return ret; 1439 pac_generic_keys_from_user(keys, &user_keys); 1440 1441 return 0; 1442 } 1443 #endif /* CONFIG_CHECKPOINT_RESTORE */ 1444 #endif /* CONFIG_ARM64_PTR_AUTH */ 1445 1446 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1447 static int tagged_addr_ctrl_get(struct task_struct *target, 1448 const struct user_regset *regset, 1449 struct membuf to) 1450 { 1451 long ctrl = get_tagged_addr_ctrl(target); 1452 1453 if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl))) 1454 return ctrl; 1455 1456 return membuf_write(&to, &ctrl, sizeof(ctrl)); 1457 } 1458 1459 static int tagged_addr_ctrl_set(struct task_struct *target, const struct 1460 user_regset *regset, unsigned int pos, 1461 unsigned int count, const void *kbuf, const 1462 void __user *ubuf) 1463 { 1464 int ret; 1465 long ctrl; 1466 1467 ctrl = get_tagged_addr_ctrl(target); 1468 if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl))) 1469 return ctrl; 1470 1471 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); 1472 if (ret) 1473 return ret; 1474 1475 return set_tagged_addr_ctrl(target, ctrl); 1476 } 1477 #endif 1478 1479 #ifdef CONFIG_ARM64_POE 1480 static int poe_get(struct task_struct *target, 1481 const struct user_regset *regset, 1482 struct membuf to) 1483 { 1484 if (!system_supports_poe()) 1485 return -EINVAL; 1486 1487 if (target == current) 1488 current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0); 1489 1490 return membuf_write(&to, &target->thread.por_el0, 1491 sizeof(target->thread.por_el0)); 1492 } 1493 1494 static int poe_set(struct task_struct *target, const struct 1495 user_regset *regset, unsigned int pos, 1496 unsigned int count, const void *kbuf, const 1497 void __user *ubuf) 1498 { 1499 int ret; 1500 long ctrl; 1501 1502 if (!system_supports_poe()) 1503 return -EINVAL; 1504 1505 ctrl = target->thread.por_el0; 1506 1507 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); 1508 if (ret) 1509 return ret; 1510 1511 target->thread.por_el0 = ctrl; 1512 1513 return 0; 1514 } 1515 #endif 1516 1517 #ifdef CONFIG_ARM64_GCS 1518 static void task_gcs_to_user(struct user_gcs *user_gcs, 1519 const struct task_struct *target) 1520 { 1521 user_gcs->features_enabled = target->thread.gcs_el0_mode; 1522 user_gcs->features_locked = target->thread.gcs_el0_locked; 1523 user_gcs->gcspr_el0 = target->thread.gcspr_el0; 1524 } 1525 1526 static void task_gcs_from_user(struct task_struct *target, 1527 const struct user_gcs *user_gcs) 1528 { 1529 target->thread.gcs_el0_mode = user_gcs->features_enabled; 1530 target->thread.gcs_el0_locked = user_gcs->features_locked; 1531 target->thread.gcspr_el0 = user_gcs->gcspr_el0; 1532 } 1533 1534 static int gcs_get(struct task_struct *target, 1535 const struct user_regset *regset, 1536 struct membuf to) 1537 { 1538 struct user_gcs user_gcs; 1539 1540 if (!system_supports_gcs()) 1541 return -EINVAL; 1542 1543 if (target == current) 1544 gcs_preserve_current_state(); 1545 1546 task_gcs_to_user(&user_gcs, target); 1547 1548 return membuf_write(&to, &user_gcs, sizeof(user_gcs)); 1549 } 1550 1551 static int gcs_set(struct task_struct *target, const struct 1552 user_regset *regset, unsigned int pos, 1553 unsigned int count, const void *kbuf, const 1554 void __user *ubuf) 1555 { 1556 int ret; 1557 struct user_gcs user_gcs; 1558 1559 if (!system_supports_gcs()) 1560 return -EINVAL; 1561 1562 task_gcs_to_user(&user_gcs, target); 1563 1564 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_gcs, 0, -1); 1565 if (ret) 1566 return ret; 1567 1568 if (user_gcs.features_enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK) 1569 return -EINVAL; 1570 1571 task_gcs_from_user(target, &user_gcs); 1572 1573 return 0; 1574 } 1575 #endif 1576 1577 enum aarch64_regset { 1578 REGSET_GPR, 1579 REGSET_FPR, 1580 REGSET_TLS, 1581 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1582 REGSET_HW_BREAK, 1583 REGSET_HW_WATCH, 1584 #endif 1585 REGSET_FPMR, 1586 REGSET_SYSTEM_CALL, 1587 #ifdef CONFIG_ARM64_SVE 1588 REGSET_SVE, 1589 #endif 1590 #ifdef CONFIG_ARM64_SME 1591 REGSET_SSVE, 1592 REGSET_ZA, 1593 REGSET_ZT, 1594 #endif 1595 #ifdef CONFIG_ARM64_PTR_AUTH 1596 REGSET_PAC_MASK, 1597 REGSET_PAC_ENABLED_KEYS, 1598 #ifdef CONFIG_CHECKPOINT_RESTORE 1599 REGSET_PACA_KEYS, 1600 REGSET_PACG_KEYS, 1601 #endif 1602 #endif 1603 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1604 REGSET_TAGGED_ADDR_CTRL, 1605 #endif 1606 #ifdef CONFIG_ARM64_POE 1607 REGSET_POE, 1608 #endif 1609 #ifdef CONFIG_ARM64_GCS 1610 REGSET_GCS, 1611 #endif 1612 }; 1613 1614 static const struct user_regset aarch64_regsets[] = { 1615 [REGSET_GPR] = { 1616 USER_REGSET_NOTE_TYPE(PRSTATUS), 1617 .n = sizeof(struct user_pt_regs) / sizeof(u64), 1618 .size = sizeof(u64), 1619 .align = sizeof(u64), 1620 .regset_get = gpr_get, 1621 .set = gpr_set 1622 }, 1623 [REGSET_FPR] = { 1624 USER_REGSET_NOTE_TYPE(PRFPREG), 1625 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 1626 /* 1627 * We pretend we have 32-bit registers because the fpsr and 1628 * fpcr are 32-bits wide. 1629 */ 1630 .size = sizeof(u32), 1631 .align = sizeof(u32), 1632 .active = fpr_active, 1633 .regset_get = fpr_get, 1634 .set = fpr_set 1635 }, 1636 [REGSET_TLS] = { 1637 USER_REGSET_NOTE_TYPE(ARM_TLS), 1638 .n = 2, 1639 .size = sizeof(void *), 1640 .align = sizeof(void *), 1641 .regset_get = tls_get, 1642 .set = tls_set, 1643 }, 1644 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1645 [REGSET_HW_BREAK] = { 1646 USER_REGSET_NOTE_TYPE(ARM_HW_BREAK), 1647 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1648 .size = sizeof(u32), 1649 .align = sizeof(u32), 1650 .regset_get = hw_break_get, 1651 .set = hw_break_set, 1652 }, 1653 [REGSET_HW_WATCH] = { 1654 USER_REGSET_NOTE_TYPE(ARM_HW_WATCH), 1655 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1656 .size = sizeof(u32), 1657 .align = sizeof(u32), 1658 .regset_get = hw_break_get, 1659 .set = hw_break_set, 1660 }, 1661 #endif 1662 [REGSET_SYSTEM_CALL] = { 1663 USER_REGSET_NOTE_TYPE(ARM_SYSTEM_CALL), 1664 .n = 1, 1665 .size = sizeof(int), 1666 .align = sizeof(int), 1667 .regset_get = system_call_get, 1668 .set = system_call_set, 1669 }, 1670 [REGSET_FPMR] = { 1671 USER_REGSET_NOTE_TYPE(ARM_FPMR), 1672 .n = 1, 1673 .size = sizeof(u64), 1674 .align = sizeof(u64), 1675 .regset_get = fpmr_get, 1676 .set = fpmr_set, 1677 }, 1678 #ifdef CONFIG_ARM64_SVE 1679 [REGSET_SVE] = { /* Scalable Vector Extension */ 1680 USER_REGSET_NOTE_TYPE(ARM_SVE), 1681 .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX, 1682 SVE_PT_REGS_SVE), 1683 SVE_VQ_BYTES), 1684 .size = SVE_VQ_BYTES, 1685 .align = SVE_VQ_BYTES, 1686 .regset_get = sve_get, 1687 .set = sve_set, 1688 }, 1689 #endif 1690 #ifdef CONFIG_ARM64_SME 1691 [REGSET_SSVE] = { /* Streaming mode SVE */ 1692 USER_REGSET_NOTE_TYPE(ARM_SSVE), 1693 .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE), 1694 SVE_VQ_BYTES), 1695 .size = SVE_VQ_BYTES, 1696 .align = SVE_VQ_BYTES, 1697 .regset_get = ssve_get, 1698 .set = ssve_set, 1699 }, 1700 [REGSET_ZA] = { /* SME ZA */ 1701 USER_REGSET_NOTE_TYPE(ARM_ZA), 1702 /* 1703 * ZA is a single register but it's variably sized and 1704 * the ptrace core requires that the size of any data 1705 * be an exact multiple of the configured register 1706 * size so report as though we had SVE_VQ_BYTES 1707 * registers. These values aren't exposed to 1708 * userspace. 1709 */ 1710 .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES), 1711 .size = SVE_VQ_BYTES, 1712 .align = SVE_VQ_BYTES, 1713 .regset_get = za_get, 1714 .set = za_set, 1715 }, 1716 [REGSET_ZT] = { /* SME ZT */ 1717 USER_REGSET_NOTE_TYPE(ARM_ZT), 1718 .n = 1, 1719 .size = ZT_SIG_REG_BYTES, 1720 .align = sizeof(u64), 1721 .regset_get = zt_get, 1722 .set = zt_set, 1723 }, 1724 #endif 1725 #ifdef CONFIG_ARM64_PTR_AUTH 1726 [REGSET_PAC_MASK] = { 1727 USER_REGSET_NOTE_TYPE(ARM_PAC_MASK), 1728 .n = sizeof(struct user_pac_mask) / sizeof(u64), 1729 .size = sizeof(u64), 1730 .align = sizeof(u64), 1731 .regset_get = pac_mask_get, 1732 /* this cannot be set dynamically */ 1733 }, 1734 [REGSET_PAC_ENABLED_KEYS] = { 1735 USER_REGSET_NOTE_TYPE(ARM_PAC_ENABLED_KEYS), 1736 .n = 1, 1737 .size = sizeof(long), 1738 .align = sizeof(long), 1739 .regset_get = pac_enabled_keys_get, 1740 .set = pac_enabled_keys_set, 1741 }, 1742 #ifdef CONFIG_CHECKPOINT_RESTORE 1743 [REGSET_PACA_KEYS] = { 1744 USER_REGSET_NOTE_TYPE(ARM_PACA_KEYS), 1745 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t), 1746 .size = sizeof(__uint128_t), 1747 .align = sizeof(__uint128_t), 1748 .regset_get = pac_address_keys_get, 1749 .set = pac_address_keys_set, 1750 }, 1751 [REGSET_PACG_KEYS] = { 1752 USER_REGSET_NOTE_TYPE(ARM_PACG_KEYS), 1753 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t), 1754 .size = sizeof(__uint128_t), 1755 .align = sizeof(__uint128_t), 1756 .regset_get = pac_generic_keys_get, 1757 .set = pac_generic_keys_set, 1758 }, 1759 #endif 1760 #endif 1761 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1762 [REGSET_TAGGED_ADDR_CTRL] = { 1763 USER_REGSET_NOTE_TYPE(ARM_TAGGED_ADDR_CTRL), 1764 .n = 1, 1765 .size = sizeof(long), 1766 .align = sizeof(long), 1767 .regset_get = tagged_addr_ctrl_get, 1768 .set = tagged_addr_ctrl_set, 1769 }, 1770 #endif 1771 #ifdef CONFIG_ARM64_POE 1772 [REGSET_POE] = { 1773 USER_REGSET_NOTE_TYPE(ARM_POE), 1774 .n = 1, 1775 .size = sizeof(long), 1776 .align = sizeof(long), 1777 .regset_get = poe_get, 1778 .set = poe_set, 1779 }, 1780 #endif 1781 #ifdef CONFIG_ARM64_GCS 1782 [REGSET_GCS] = { 1783 USER_REGSET_NOTE_TYPE(ARM_GCS), 1784 .n = sizeof(struct user_gcs) / sizeof(u64), 1785 .size = sizeof(u64), 1786 .align = sizeof(u64), 1787 .regset_get = gcs_get, 1788 .set = gcs_set, 1789 }, 1790 #endif 1791 }; 1792 1793 static const struct user_regset_view user_aarch64_view = { 1794 .name = "aarch64", .e_machine = EM_AARCH64, 1795 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 1796 }; 1797 1798 enum compat_regset { 1799 REGSET_COMPAT_GPR, 1800 REGSET_COMPAT_VFP, 1801 }; 1802 1803 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx) 1804 { 1805 struct pt_regs *regs = task_pt_regs(task); 1806 1807 switch (idx) { 1808 case 15: 1809 return regs->pc; 1810 case 16: 1811 return pstate_to_compat_psr(regs->pstate); 1812 case 17: 1813 return regs->orig_x0; 1814 default: 1815 return regs->regs[idx]; 1816 } 1817 } 1818 1819 static int compat_gpr_get(struct task_struct *target, 1820 const struct user_regset *regset, 1821 struct membuf to) 1822 { 1823 int i = 0; 1824 1825 while (to.left) 1826 membuf_store(&to, compat_get_user_reg(target, i++)); 1827 return 0; 1828 } 1829 1830 static int compat_gpr_set(struct task_struct *target, 1831 const struct user_regset *regset, 1832 unsigned int pos, unsigned int count, 1833 const void *kbuf, const void __user *ubuf) 1834 { 1835 struct pt_regs newregs; 1836 int ret = 0; 1837 unsigned int i, start, num_regs; 1838 1839 /* Calculate the number of AArch32 registers contained in count */ 1840 num_regs = count / regset->size; 1841 1842 /* Convert pos into an register number */ 1843 start = pos / regset->size; 1844 1845 if (start + num_regs > regset->n) 1846 return -EIO; 1847 1848 newregs = *task_pt_regs(target); 1849 1850 for (i = 0; i < num_regs; ++i) { 1851 unsigned int idx = start + i; 1852 compat_ulong_t reg; 1853 1854 if (kbuf) { 1855 memcpy(®, kbuf, sizeof(reg)); 1856 kbuf += sizeof(reg); 1857 } else { 1858 ret = copy_from_user(®, ubuf, sizeof(reg)); 1859 if (ret) { 1860 ret = -EFAULT; 1861 break; 1862 } 1863 1864 ubuf += sizeof(reg); 1865 } 1866 1867 switch (idx) { 1868 case 15: 1869 newregs.pc = reg; 1870 break; 1871 case 16: 1872 reg = compat_psr_to_pstate(reg); 1873 newregs.pstate = reg; 1874 break; 1875 case 17: 1876 newregs.orig_x0 = reg; 1877 break; 1878 default: 1879 newregs.regs[idx] = reg; 1880 } 1881 1882 } 1883 1884 if (valid_user_regs(&newregs.user_regs, target)) 1885 *task_pt_regs(target) = newregs; 1886 else 1887 ret = -EINVAL; 1888 1889 return ret; 1890 } 1891 1892 static int compat_vfp_get(struct task_struct *target, 1893 const struct user_regset *regset, 1894 struct membuf to) 1895 { 1896 struct user_fpsimd_state *uregs; 1897 compat_ulong_t fpscr; 1898 1899 if (!system_supports_fpsimd()) 1900 return -EINVAL; 1901 1902 uregs = &target->thread.uw.fpsimd_state; 1903 1904 if (target == current) 1905 fpsimd_preserve_current_state(); 1906 1907 /* 1908 * The VFP registers are packed into the fpsimd_state, so they all sit 1909 * nicely together for us. We just need to create the fpscr separately. 1910 */ 1911 membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t)); 1912 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 1913 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 1914 return membuf_store(&to, fpscr); 1915 } 1916 1917 static int compat_vfp_set(struct task_struct *target, 1918 const struct user_regset *regset, 1919 unsigned int pos, unsigned int count, 1920 const void *kbuf, const void __user *ubuf) 1921 { 1922 struct user_fpsimd_state *uregs; 1923 compat_ulong_t fpscr; 1924 int ret, vregs_end_pos; 1925 1926 if (!system_supports_fpsimd()) 1927 return -EINVAL; 1928 1929 uregs = &target->thread.uw.fpsimd_state; 1930 1931 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1932 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 1933 vregs_end_pos); 1934 1935 if (count && !ret) { 1936 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, 1937 vregs_end_pos, VFP_STATE_SIZE); 1938 if (!ret) { 1939 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 1940 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 1941 } 1942 } 1943 1944 fpsimd_flush_task_state(target); 1945 return ret; 1946 } 1947 1948 static int compat_tls_get(struct task_struct *target, 1949 const struct user_regset *regset, 1950 struct membuf to) 1951 { 1952 return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value); 1953 } 1954 1955 static int compat_tls_set(struct task_struct *target, 1956 const struct user_regset *regset, unsigned int pos, 1957 unsigned int count, const void *kbuf, 1958 const void __user *ubuf) 1959 { 1960 int ret; 1961 compat_ulong_t tls = target->thread.uw.tp_value; 1962 1963 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1964 if (ret) 1965 return ret; 1966 1967 target->thread.uw.tp_value = tls; 1968 return ret; 1969 } 1970 1971 static const struct user_regset aarch32_regsets[] = { 1972 [REGSET_COMPAT_GPR] = { 1973 USER_REGSET_NOTE_TYPE(PRSTATUS), 1974 .n = COMPAT_ELF_NGREG, 1975 .size = sizeof(compat_elf_greg_t), 1976 .align = sizeof(compat_elf_greg_t), 1977 .regset_get = compat_gpr_get, 1978 .set = compat_gpr_set 1979 }, 1980 [REGSET_COMPAT_VFP] = { 1981 USER_REGSET_NOTE_TYPE(ARM_VFP), 1982 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1983 .size = sizeof(compat_ulong_t), 1984 .align = sizeof(compat_ulong_t), 1985 .active = fpr_active, 1986 .regset_get = compat_vfp_get, 1987 .set = compat_vfp_set 1988 }, 1989 }; 1990 1991 static const struct user_regset_view user_aarch32_view = { 1992 .name = "aarch32", .e_machine = EM_ARM, 1993 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 1994 }; 1995 1996 static const struct user_regset aarch32_ptrace_regsets[] = { 1997 [REGSET_GPR] = { 1998 USER_REGSET_NOTE_TYPE(PRSTATUS), 1999 .n = COMPAT_ELF_NGREG, 2000 .size = sizeof(compat_elf_greg_t), 2001 .align = sizeof(compat_elf_greg_t), 2002 .regset_get = compat_gpr_get, 2003 .set = compat_gpr_set 2004 }, 2005 [REGSET_FPR] = { 2006 USER_REGSET_NOTE_TYPE(ARM_VFP), 2007 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 2008 .size = sizeof(compat_ulong_t), 2009 .align = sizeof(compat_ulong_t), 2010 .regset_get = compat_vfp_get, 2011 .set = compat_vfp_set 2012 }, 2013 [REGSET_TLS] = { 2014 USER_REGSET_NOTE_TYPE(ARM_TLS), 2015 .n = 1, 2016 .size = sizeof(compat_ulong_t), 2017 .align = sizeof(compat_ulong_t), 2018 .regset_get = compat_tls_get, 2019 .set = compat_tls_set, 2020 }, 2021 #ifdef CONFIG_HAVE_HW_BREAKPOINT 2022 [REGSET_HW_BREAK] = { 2023 USER_REGSET_NOTE_TYPE(ARM_HW_BREAK), 2024 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 2025 .size = sizeof(u32), 2026 .align = sizeof(u32), 2027 .regset_get = hw_break_get, 2028 .set = hw_break_set, 2029 }, 2030 [REGSET_HW_WATCH] = { 2031 USER_REGSET_NOTE_TYPE(ARM_HW_WATCH), 2032 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 2033 .size = sizeof(u32), 2034 .align = sizeof(u32), 2035 .regset_get = hw_break_get, 2036 .set = hw_break_set, 2037 }, 2038 #endif 2039 [REGSET_SYSTEM_CALL] = { 2040 USER_REGSET_NOTE_TYPE(ARM_SYSTEM_CALL), 2041 .n = 1, 2042 .size = sizeof(int), 2043 .align = sizeof(int), 2044 .regset_get = system_call_get, 2045 .set = system_call_set, 2046 }, 2047 }; 2048 2049 static const struct user_regset_view user_aarch32_ptrace_view = { 2050 .name = "aarch32", .e_machine = EM_ARM, 2051 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) 2052 }; 2053 2054 #ifdef CONFIG_COMPAT 2055 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 2056 compat_ulong_t __user *ret) 2057 { 2058 compat_ulong_t tmp; 2059 2060 if (off & 3) 2061 return -EIO; 2062 2063 if (off == COMPAT_PT_TEXT_ADDR) 2064 tmp = tsk->mm->start_code; 2065 else if (off == COMPAT_PT_DATA_ADDR) 2066 tmp = tsk->mm->start_data; 2067 else if (off == COMPAT_PT_TEXT_END_ADDR) 2068 tmp = tsk->mm->end_code; 2069 else if (off < sizeof(compat_elf_gregset_t)) 2070 tmp = compat_get_user_reg(tsk, off >> 2); 2071 else if (off >= COMPAT_USER_SZ) 2072 return -EIO; 2073 else 2074 tmp = 0; 2075 2076 return put_user(tmp, ret); 2077 } 2078 2079 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 2080 compat_ulong_t val) 2081 { 2082 struct pt_regs newregs = *task_pt_regs(tsk); 2083 unsigned int idx = off / 4; 2084 2085 if (off & 3 || off >= COMPAT_USER_SZ) 2086 return -EIO; 2087 2088 if (off >= sizeof(compat_elf_gregset_t)) 2089 return 0; 2090 2091 switch (idx) { 2092 case 15: 2093 newregs.pc = val; 2094 break; 2095 case 16: 2096 newregs.pstate = compat_psr_to_pstate(val); 2097 break; 2098 case 17: 2099 newregs.orig_x0 = val; 2100 break; 2101 default: 2102 newregs.regs[idx] = val; 2103 } 2104 2105 if (!valid_user_regs(&newregs.user_regs, tsk)) 2106 return -EINVAL; 2107 2108 *task_pt_regs(tsk) = newregs; 2109 return 0; 2110 } 2111 2112 #ifdef CONFIG_HAVE_HW_BREAKPOINT 2113 2114 /* 2115 * Convert a virtual register number into an index for a thread_info 2116 * breakpoint array. Breakpoints are identified using positive numbers 2117 * whilst watchpoints are negative. The registers are laid out as pairs 2118 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 2119 * Register 0 is reserved for describing resource information. 2120 */ 2121 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 2122 { 2123 return (abs(num) - 1) >> 1; 2124 } 2125 2126 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 2127 { 2128 u8 num_brps, num_wrps, debug_arch, wp_len; 2129 u32 reg = 0; 2130 2131 num_brps = hw_breakpoint_slots(TYPE_INST); 2132 num_wrps = hw_breakpoint_slots(TYPE_DATA); 2133 2134 debug_arch = debug_monitors_arch(); 2135 wp_len = 8; 2136 reg |= debug_arch; 2137 reg <<= 8; 2138 reg |= wp_len; 2139 reg <<= 8; 2140 reg |= num_wrps; 2141 reg <<= 8; 2142 reg |= num_brps; 2143 2144 *kdata = reg; 2145 return 0; 2146 } 2147 2148 static int compat_ptrace_hbp_get(unsigned int note_type, 2149 struct task_struct *tsk, 2150 compat_long_t num, 2151 u32 *kdata) 2152 { 2153 u64 addr = 0; 2154 u32 ctrl = 0; 2155 2156 int err, idx = compat_ptrace_hbp_num_to_idx(num); 2157 2158 if (num & 1) { 2159 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 2160 *kdata = (u32)addr; 2161 } else { 2162 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 2163 *kdata = ctrl; 2164 } 2165 2166 return err; 2167 } 2168 2169 static int compat_ptrace_hbp_set(unsigned int note_type, 2170 struct task_struct *tsk, 2171 compat_long_t num, 2172 u32 *kdata) 2173 { 2174 u64 addr; 2175 u32 ctrl; 2176 2177 int err, idx = compat_ptrace_hbp_num_to_idx(num); 2178 2179 if (num & 1) { 2180 addr = *kdata; 2181 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 2182 } else { 2183 ctrl = *kdata; 2184 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 2185 } 2186 2187 return err; 2188 } 2189 2190 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 2191 compat_ulong_t __user *data) 2192 { 2193 int ret; 2194 u32 kdata; 2195 2196 /* Watchpoint */ 2197 if (num < 0) { 2198 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 2199 /* Resource info */ 2200 } else if (num == 0) { 2201 ret = compat_ptrace_hbp_get_resource_info(&kdata); 2202 /* Breakpoint */ 2203 } else { 2204 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 2205 } 2206 2207 if (!ret) 2208 ret = put_user(kdata, data); 2209 2210 return ret; 2211 } 2212 2213 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 2214 compat_ulong_t __user *data) 2215 { 2216 int ret; 2217 u32 kdata = 0; 2218 2219 if (num == 0) 2220 return 0; 2221 2222 ret = get_user(kdata, data); 2223 if (ret) 2224 return ret; 2225 2226 if (num < 0) 2227 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 2228 else 2229 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 2230 2231 return ret; 2232 } 2233 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 2234 2235 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 2236 compat_ulong_t caddr, compat_ulong_t cdata) 2237 { 2238 unsigned long addr = caddr; 2239 unsigned long data = cdata; 2240 void __user *datap = compat_ptr(data); 2241 int ret; 2242 2243 switch (request) { 2244 case PTRACE_PEEKUSR: 2245 ret = compat_ptrace_read_user(child, addr, datap); 2246 break; 2247 2248 case PTRACE_POKEUSR: 2249 ret = compat_ptrace_write_user(child, addr, data); 2250 break; 2251 2252 case COMPAT_PTRACE_GETREGS: 2253 ret = copy_regset_to_user(child, 2254 &user_aarch32_view, 2255 REGSET_COMPAT_GPR, 2256 0, sizeof(compat_elf_gregset_t), 2257 datap); 2258 break; 2259 2260 case COMPAT_PTRACE_SETREGS: 2261 ret = copy_regset_from_user(child, 2262 &user_aarch32_view, 2263 REGSET_COMPAT_GPR, 2264 0, sizeof(compat_elf_gregset_t), 2265 datap); 2266 break; 2267 2268 case COMPAT_PTRACE_GET_THREAD_AREA: 2269 ret = put_user((compat_ulong_t)child->thread.uw.tp_value, 2270 (compat_ulong_t __user *)datap); 2271 break; 2272 2273 case COMPAT_PTRACE_SET_SYSCALL: 2274 task_pt_regs(child)->syscallno = data; 2275 ret = 0; 2276 break; 2277 2278 case COMPAT_PTRACE_GETVFPREGS: 2279 ret = copy_regset_to_user(child, 2280 &user_aarch32_view, 2281 REGSET_COMPAT_VFP, 2282 0, VFP_STATE_SIZE, 2283 datap); 2284 break; 2285 2286 case COMPAT_PTRACE_SETVFPREGS: 2287 ret = copy_regset_from_user(child, 2288 &user_aarch32_view, 2289 REGSET_COMPAT_VFP, 2290 0, VFP_STATE_SIZE, 2291 datap); 2292 break; 2293 2294 #ifdef CONFIG_HAVE_HW_BREAKPOINT 2295 case COMPAT_PTRACE_GETHBPREGS: 2296 ret = compat_ptrace_gethbpregs(child, addr, datap); 2297 break; 2298 2299 case COMPAT_PTRACE_SETHBPREGS: 2300 ret = compat_ptrace_sethbpregs(child, addr, datap); 2301 break; 2302 #endif 2303 2304 default: 2305 ret = compat_ptrace_request(child, request, addr, 2306 data); 2307 break; 2308 } 2309 2310 return ret; 2311 } 2312 #endif /* CONFIG_COMPAT */ 2313 2314 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 2315 { 2316 /* 2317 * Core dumping of 32-bit tasks or compat ptrace requests must use the 2318 * user_aarch32_view compatible with arm32. Native ptrace requests on 2319 * 32-bit children use an extended user_aarch32_ptrace_view to allow 2320 * access to the TLS register. 2321 */ 2322 if (is_compat_task()) 2323 return &user_aarch32_view; 2324 else if (is_compat_thread(task_thread_info(task))) 2325 return &user_aarch32_ptrace_view; 2326 2327 return &user_aarch64_view; 2328 } 2329 2330 long arch_ptrace(struct task_struct *child, long request, 2331 unsigned long addr, unsigned long data) 2332 { 2333 switch (request) { 2334 case PTRACE_PEEKMTETAGS: 2335 case PTRACE_POKEMTETAGS: 2336 return mte_ptrace_copy_tags(child, request, addr, data); 2337 } 2338 2339 return ptrace_request(child, request, addr, data); 2340 } 2341 2342 enum ptrace_syscall_dir { 2343 PTRACE_SYSCALL_ENTER = 0, 2344 PTRACE_SYSCALL_EXIT, 2345 }; 2346 2347 static __always_inline unsigned long ptrace_save_reg(struct pt_regs *regs, 2348 enum ptrace_syscall_dir dir, 2349 int *regno) 2350 { 2351 unsigned long saved_reg; 2352 2353 /* 2354 * We have some ABI weirdness here in the way that we handle syscall 2355 * exit stops because we indicate whether or not the stop has been 2356 * signalled from syscall entry or syscall exit by clobbering a general 2357 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee 2358 * and restoring its old value after the stop. This means that: 2359 * 2360 * - Any writes by the tracer to this register during the stop are 2361 * ignored/discarded. 2362 * 2363 * - The actual value of the register is not available during the stop, 2364 * so the tracer cannot save it and restore it later. 2365 * 2366 * - Syscall stops behave differently to seccomp and pseudo-step traps 2367 * (the latter do not nobble any registers). 2368 */ 2369 *regno = (is_compat_task() ? 12 : 7); 2370 saved_reg = regs->regs[*regno]; 2371 regs->regs[*regno] = dir; 2372 2373 return saved_reg; 2374 } 2375 2376 static int report_syscall_entry(struct pt_regs *regs) 2377 { 2378 unsigned long saved_reg; 2379 int regno, ret; 2380 2381 saved_reg = ptrace_save_reg(regs, PTRACE_SYSCALL_ENTER, ®no); 2382 ret = ptrace_report_syscall_entry(regs); 2383 if (ret) 2384 forget_syscall(regs); 2385 regs->regs[regno] = saved_reg; 2386 2387 return ret; 2388 } 2389 2390 static void report_syscall_exit(struct pt_regs *regs) 2391 { 2392 unsigned long saved_reg; 2393 int regno; 2394 2395 saved_reg = ptrace_save_reg(regs, PTRACE_SYSCALL_EXIT, ®no); 2396 if (!test_thread_flag(TIF_SINGLESTEP)) { 2397 ptrace_report_syscall_exit(regs, 0); 2398 regs->regs[regno] = saved_reg; 2399 } else { 2400 regs->regs[regno] = saved_reg; 2401 2402 /* 2403 * Signal a pseudo-step exception since we are stepping but 2404 * tracer modifications to the registers may have rewound the 2405 * state machine. 2406 */ 2407 ptrace_report_syscall_exit(regs, 1); 2408 } 2409 } 2410 2411 int syscall_trace_enter(struct pt_regs *regs) 2412 { 2413 unsigned long flags = read_thread_flags(); 2414 int ret; 2415 2416 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { 2417 ret = report_syscall_entry(regs); 2418 if (ret || (flags & _TIF_SYSCALL_EMU)) 2419 return NO_SYSCALL; 2420 } 2421 2422 /* Do the secure computing after ptrace; failures should be fast. */ 2423 if (secure_computing() == -1) 2424 return NO_SYSCALL; 2425 2426 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 2427 trace_sys_enter(regs, regs->syscallno); 2428 2429 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1], 2430 regs->regs[2], regs->regs[3]); 2431 2432 return regs->syscallno; 2433 } 2434 2435 void syscall_trace_exit(struct pt_regs *regs) 2436 { 2437 unsigned long flags = read_thread_flags(); 2438 2439 audit_syscall_exit(regs); 2440 2441 if (flags & _TIF_SYSCALL_TRACEPOINT) 2442 trace_sys_exit(regs, syscall_get_return_value(current, regs)); 2443 2444 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)) 2445 report_syscall_exit(regs); 2446 2447 rseq_syscall(regs); 2448 } 2449 2450 /* 2451 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. 2452 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is 2453 * not described in ARM DDI 0487D.a. 2454 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may 2455 * be allocated an EL0 meaning in future. 2456 * Userspace cannot use these until they have an architectural meaning. 2457 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. 2458 * We also reserve IL for the kernel; SS is handled dynamically. 2459 */ 2460 #define SPSR_EL1_AARCH64_RES0_BITS \ 2461 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \ 2462 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5)) 2463 #define SPSR_EL1_AARCH32_RES0_BITS \ 2464 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) 2465 2466 static int valid_compat_regs(struct user_pt_regs *regs) 2467 { 2468 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; 2469 2470 if (!system_supports_mixed_endian_el0()) { 2471 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 2472 regs->pstate |= PSR_AA32_E_BIT; 2473 else 2474 regs->pstate &= ~PSR_AA32_E_BIT; 2475 } 2476 2477 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && 2478 (regs->pstate & PSR_AA32_A_BIT) == 0 && 2479 (regs->pstate & PSR_AA32_I_BIT) == 0 && 2480 (regs->pstate & PSR_AA32_F_BIT) == 0) { 2481 return 1; 2482 } 2483 2484 /* 2485 * Force PSR to a valid 32-bit EL0t, preserving the same bits as 2486 * arch/arm. 2487 */ 2488 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT | 2489 PSR_AA32_C_BIT | PSR_AA32_V_BIT | 2490 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK | 2491 PSR_AA32_GE_MASK | PSR_AA32_E_BIT | 2492 PSR_AA32_T_BIT; 2493 regs->pstate |= PSR_MODE32_BIT; 2494 2495 return 0; 2496 } 2497 2498 static int valid_native_regs(struct user_pt_regs *regs) 2499 { 2500 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; 2501 2502 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && 2503 (regs->pstate & PSR_D_BIT) == 0 && 2504 (regs->pstate & PSR_A_BIT) == 0 && 2505 (regs->pstate & PSR_I_BIT) == 0 && 2506 (regs->pstate & PSR_F_BIT) == 0) { 2507 return 1; 2508 } 2509 2510 /* Force PSR to a valid 64-bit EL0t */ 2511 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; 2512 2513 return 0; 2514 } 2515 2516 /* 2517 * Are the current registers suitable for user mode? (used to maintain 2518 * security in signal handlers) 2519 */ 2520 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) 2521 { 2522 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ 2523 user_regs_reset_single_step(regs, task); 2524 2525 if (is_compat_thread(task_thread_info(task))) 2526 return valid_compat_regs(regs); 2527 else 2528 return valid_native_regs(regs); 2529 } 2530