1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/ptrace.c 4 * 5 * By Ross Biro 1/23/92 6 * edited by Linus Torvalds 7 * ARM modifications Copyright (C) 2000 Russell King 8 * Copyright (C) 2012 ARM Ltd. 9 */ 10 11 #include <linux/audit.h> 12 #include <linux/compat.h> 13 #include <linux/kernel.h> 14 #include <linux/sched/signal.h> 15 #include <linux/sched/task_stack.h> 16 #include <linux/mm.h> 17 #include <linux/nospec.h> 18 #include <linux/smp.h> 19 #include <linux/ptrace.h> 20 #include <linux/user.h> 21 #include <linux/seccomp.h> 22 #include <linux/security.h> 23 #include <linux/init.h> 24 #include <linux/signal.h> 25 #include <linux/string.h> 26 #include <linux/uaccess.h> 27 #include <linux/perf_event.h> 28 #include <linux/hw_breakpoint.h> 29 #include <linux/regset.h> 30 #include <linux/elf.h> 31 #include <linux/rseq.h> 32 33 #include <asm/compat.h> 34 #include <asm/cpufeature.h> 35 #include <asm/debug-monitors.h> 36 #include <asm/fpsimd.h> 37 #include <asm/gcs.h> 38 #include <asm/mte.h> 39 #include <asm/pointer_auth.h> 40 #include <asm/stacktrace.h> 41 #include <asm/syscall.h> 42 #include <asm/traps.h> 43 #include <asm/system_misc.h> 44 45 #define CREATE_TRACE_POINTS 46 #include <trace/events/syscalls.h> 47 48 struct pt_regs_offset { 49 const char *name; 50 int offset; 51 }; 52 53 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 54 #define REG_OFFSET_END {.name = NULL, .offset = 0} 55 #define GPR_OFFSET_NAME(r) \ 56 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} 57 58 static const struct pt_regs_offset regoffset_table[] = { 59 GPR_OFFSET_NAME(0), 60 GPR_OFFSET_NAME(1), 61 GPR_OFFSET_NAME(2), 62 GPR_OFFSET_NAME(3), 63 GPR_OFFSET_NAME(4), 64 GPR_OFFSET_NAME(5), 65 GPR_OFFSET_NAME(6), 66 GPR_OFFSET_NAME(7), 67 GPR_OFFSET_NAME(8), 68 GPR_OFFSET_NAME(9), 69 GPR_OFFSET_NAME(10), 70 GPR_OFFSET_NAME(11), 71 GPR_OFFSET_NAME(12), 72 GPR_OFFSET_NAME(13), 73 GPR_OFFSET_NAME(14), 74 GPR_OFFSET_NAME(15), 75 GPR_OFFSET_NAME(16), 76 GPR_OFFSET_NAME(17), 77 GPR_OFFSET_NAME(18), 78 GPR_OFFSET_NAME(19), 79 GPR_OFFSET_NAME(20), 80 GPR_OFFSET_NAME(21), 81 GPR_OFFSET_NAME(22), 82 GPR_OFFSET_NAME(23), 83 GPR_OFFSET_NAME(24), 84 GPR_OFFSET_NAME(25), 85 GPR_OFFSET_NAME(26), 86 GPR_OFFSET_NAME(27), 87 GPR_OFFSET_NAME(28), 88 GPR_OFFSET_NAME(29), 89 GPR_OFFSET_NAME(30), 90 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, 91 REG_OFFSET_NAME(sp), 92 REG_OFFSET_NAME(pc), 93 REG_OFFSET_NAME(pstate), 94 REG_OFFSET_END, 95 }; 96 97 /** 98 * regs_query_register_offset() - query register offset from its name 99 * @name: the name of a register 100 * 101 * regs_query_register_offset() returns the offset of a register in struct 102 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 103 */ 104 int regs_query_register_offset(const char *name) 105 { 106 const struct pt_regs_offset *roff; 107 108 for (roff = regoffset_table; roff->name != NULL; roff++) 109 if (!strcmp(roff->name, name)) 110 return roff->offset; 111 return -EINVAL; 112 } 113 114 /** 115 * regs_within_kernel_stack() - check the address in the stack 116 * @regs: pt_regs which contains kernel stack pointer. 117 * @addr: address which is checked. 118 * 119 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 120 * If @addr is within the kernel stack, it returns true. If not, returns false. 121 */ 122 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 123 { 124 return ((addr & ~(THREAD_SIZE - 1)) == 125 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || 126 on_irq_stack(addr, sizeof(unsigned long)); 127 } 128 129 /** 130 * regs_get_kernel_stack_nth() - get Nth entry of the stack 131 * @regs: pt_regs which contains kernel stack pointer. 132 * @n: stack entry number. 133 * 134 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 135 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 136 * this returns 0. 137 */ 138 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 139 { 140 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 141 142 addr += n; 143 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 144 return *addr; 145 else 146 return 0; 147 } 148 149 /* 150 * TODO: does not yet catch signals sent when the child dies. 151 * in exit.c or in signal.c. 152 */ 153 154 /* 155 * Called by kernel/ptrace.c when detaching.. 156 */ 157 void ptrace_disable(struct task_struct *child) 158 { 159 /* 160 * This would be better off in core code, but PTRACE_DETACH has 161 * grown its fair share of arch-specific worts and changing it 162 * is likely to cause regressions on obscure architectures. 163 */ 164 user_disable_single_step(child); 165 } 166 167 #ifdef CONFIG_HAVE_HW_BREAKPOINT 168 /* 169 * Handle hitting a HW-breakpoint. 170 */ 171 static void ptrace_hbptriggered(struct perf_event *bp, 172 struct perf_sample_data *data, 173 struct pt_regs *regs) 174 { 175 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 176 const char *desc = "Hardware breakpoint trap (ptrace)"; 177 178 if (is_compat_task()) { 179 int si_errno = 0; 180 int i; 181 182 for (i = 0; i < ARM_MAX_BRP; ++i) { 183 if (current->thread.debug.hbp_break[i] == bp) { 184 si_errno = (i << 1) + 1; 185 break; 186 } 187 } 188 189 for (i = 0; i < ARM_MAX_WRP; ++i) { 190 if (current->thread.debug.hbp_watch[i] == bp) { 191 si_errno = -((i << 1) + 1); 192 break; 193 } 194 } 195 arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger, 196 desc); 197 return; 198 } 199 200 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc); 201 } 202 203 /* 204 * Unregister breakpoints from this task and reset the pointers in 205 * the thread_struct. 206 */ 207 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 208 { 209 int i; 210 struct thread_struct *t = &tsk->thread; 211 212 for (i = 0; i < ARM_MAX_BRP; i++) { 213 if (t->debug.hbp_break[i]) { 214 unregister_hw_breakpoint(t->debug.hbp_break[i]); 215 t->debug.hbp_break[i] = NULL; 216 } 217 } 218 219 for (i = 0; i < ARM_MAX_WRP; i++) { 220 if (t->debug.hbp_watch[i]) { 221 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 222 t->debug.hbp_watch[i] = NULL; 223 } 224 } 225 } 226 227 void ptrace_hw_copy_thread(struct task_struct *tsk) 228 { 229 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 230 } 231 232 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 233 struct task_struct *tsk, 234 unsigned long idx) 235 { 236 struct perf_event *bp = ERR_PTR(-EINVAL); 237 238 switch (note_type) { 239 case NT_ARM_HW_BREAK: 240 if (idx >= ARM_MAX_BRP) 241 goto out; 242 idx = array_index_nospec(idx, ARM_MAX_BRP); 243 bp = tsk->thread.debug.hbp_break[idx]; 244 break; 245 case NT_ARM_HW_WATCH: 246 if (idx >= ARM_MAX_WRP) 247 goto out; 248 idx = array_index_nospec(idx, ARM_MAX_WRP); 249 bp = tsk->thread.debug.hbp_watch[idx]; 250 break; 251 } 252 253 out: 254 return bp; 255 } 256 257 static int ptrace_hbp_set_event(unsigned int note_type, 258 struct task_struct *tsk, 259 unsigned long idx, 260 struct perf_event *bp) 261 { 262 int err = -EINVAL; 263 264 switch (note_type) { 265 case NT_ARM_HW_BREAK: 266 if (idx >= ARM_MAX_BRP) 267 goto out; 268 idx = array_index_nospec(idx, ARM_MAX_BRP); 269 tsk->thread.debug.hbp_break[idx] = bp; 270 err = 0; 271 break; 272 case NT_ARM_HW_WATCH: 273 if (idx >= ARM_MAX_WRP) 274 goto out; 275 idx = array_index_nospec(idx, ARM_MAX_WRP); 276 tsk->thread.debug.hbp_watch[idx] = bp; 277 err = 0; 278 break; 279 } 280 281 out: 282 return err; 283 } 284 285 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 286 struct task_struct *tsk, 287 unsigned long idx) 288 { 289 struct perf_event *bp; 290 struct perf_event_attr attr; 291 int err, type; 292 293 switch (note_type) { 294 case NT_ARM_HW_BREAK: 295 type = HW_BREAKPOINT_X; 296 break; 297 case NT_ARM_HW_WATCH: 298 type = HW_BREAKPOINT_RW; 299 break; 300 default: 301 return ERR_PTR(-EINVAL); 302 } 303 304 ptrace_breakpoint_init(&attr); 305 306 /* 307 * Initialise fields to sane defaults 308 * (i.e. values that will pass validation). 309 */ 310 attr.bp_addr = 0; 311 attr.bp_len = HW_BREAKPOINT_LEN_4; 312 attr.bp_type = type; 313 attr.disabled = 1; 314 315 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 316 if (IS_ERR(bp)) 317 return bp; 318 319 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 320 if (err) 321 return ERR_PTR(err); 322 323 return bp; 324 } 325 326 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 327 struct arch_hw_breakpoint_ctrl ctrl, 328 struct perf_event_attr *attr) 329 { 330 int err, len, type, offset, disabled = !ctrl.enabled; 331 332 attr->disabled = disabled; 333 if (disabled) 334 return 0; 335 336 err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 337 if (err) 338 return err; 339 340 switch (note_type) { 341 case NT_ARM_HW_BREAK: 342 if ((type & HW_BREAKPOINT_X) != type) 343 return -EINVAL; 344 break; 345 case NT_ARM_HW_WATCH: 346 if ((type & HW_BREAKPOINT_RW) != type) 347 return -EINVAL; 348 break; 349 default: 350 return -EINVAL; 351 } 352 353 attr->bp_len = len; 354 attr->bp_type = type; 355 attr->bp_addr += offset; 356 357 return 0; 358 } 359 360 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 361 { 362 u8 num; 363 u32 reg = 0; 364 365 switch (note_type) { 366 case NT_ARM_HW_BREAK: 367 num = hw_breakpoint_slots(TYPE_INST); 368 break; 369 case NT_ARM_HW_WATCH: 370 num = hw_breakpoint_slots(TYPE_DATA); 371 break; 372 default: 373 return -EINVAL; 374 } 375 376 reg |= debug_monitors_arch(); 377 reg <<= 8; 378 reg |= num; 379 380 *info = reg; 381 return 0; 382 } 383 384 static int ptrace_hbp_get_ctrl(unsigned int note_type, 385 struct task_struct *tsk, 386 unsigned long idx, 387 u32 *ctrl) 388 { 389 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 390 391 if (IS_ERR(bp)) 392 return PTR_ERR(bp); 393 394 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 395 return 0; 396 } 397 398 static int ptrace_hbp_get_addr(unsigned int note_type, 399 struct task_struct *tsk, 400 unsigned long idx, 401 u64 *addr) 402 { 403 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 404 405 if (IS_ERR(bp)) 406 return PTR_ERR(bp); 407 408 *addr = bp ? counter_arch_bp(bp)->address : 0; 409 return 0; 410 } 411 412 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 413 struct task_struct *tsk, 414 unsigned long idx) 415 { 416 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 417 418 if (!bp) 419 bp = ptrace_hbp_create(note_type, tsk, idx); 420 421 return bp; 422 } 423 424 static int ptrace_hbp_set_ctrl(unsigned int note_type, 425 struct task_struct *tsk, 426 unsigned long idx, 427 u32 uctrl) 428 { 429 int err; 430 struct perf_event *bp; 431 struct perf_event_attr attr; 432 struct arch_hw_breakpoint_ctrl ctrl; 433 434 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 435 if (IS_ERR(bp)) { 436 err = PTR_ERR(bp); 437 return err; 438 } 439 440 attr = bp->attr; 441 decode_ctrl_reg(uctrl, &ctrl); 442 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 443 if (err) 444 return err; 445 446 return modify_user_hw_breakpoint(bp, &attr); 447 } 448 449 static int ptrace_hbp_set_addr(unsigned int note_type, 450 struct task_struct *tsk, 451 unsigned long idx, 452 u64 addr) 453 { 454 int err; 455 struct perf_event *bp; 456 struct perf_event_attr attr; 457 458 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 459 if (IS_ERR(bp)) { 460 err = PTR_ERR(bp); 461 return err; 462 } 463 464 attr = bp->attr; 465 attr.bp_addr = addr; 466 err = modify_user_hw_breakpoint(bp, &attr); 467 return err; 468 } 469 470 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 471 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 472 #define PTRACE_HBP_PAD_SZ sizeof(u32) 473 474 static int hw_break_get(struct task_struct *target, 475 const struct user_regset *regset, 476 struct membuf to) 477 { 478 unsigned int note_type = regset->core_note_type; 479 int ret, idx = 0; 480 u32 info, ctrl; 481 u64 addr; 482 483 /* Resource info */ 484 ret = ptrace_hbp_get_resource_info(note_type, &info); 485 if (ret) 486 return ret; 487 488 membuf_write(&to, &info, sizeof(info)); 489 membuf_zero(&to, sizeof(u32)); 490 /* (address, ctrl) registers */ 491 while (to.left) { 492 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 493 if (ret) 494 return ret; 495 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 496 if (ret) 497 return ret; 498 membuf_store(&to, addr); 499 membuf_store(&to, ctrl); 500 membuf_zero(&to, sizeof(u32)); 501 idx++; 502 } 503 return 0; 504 } 505 506 static int hw_break_set(struct task_struct *target, 507 const struct user_regset *regset, 508 unsigned int pos, unsigned int count, 509 const void *kbuf, const void __user *ubuf) 510 { 511 unsigned int note_type = regset->core_note_type; 512 int ret, idx = 0, offset, limit; 513 u32 ctrl; 514 u64 addr; 515 516 /* Resource info and pad */ 517 offset = offsetof(struct user_hwdebug_state, dbg_regs); 518 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 519 520 /* (address, ctrl) registers */ 521 limit = regset->n * regset->size; 522 while (count && offset < limit) { 523 if (count < PTRACE_HBP_ADDR_SZ) 524 return -EINVAL; 525 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 526 offset, offset + PTRACE_HBP_ADDR_SZ); 527 if (ret) 528 return ret; 529 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 530 if (ret) 531 return ret; 532 offset += PTRACE_HBP_ADDR_SZ; 533 534 if (!count) 535 break; 536 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 537 offset, offset + PTRACE_HBP_CTRL_SZ); 538 if (ret) 539 return ret; 540 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 541 if (ret) 542 return ret; 543 offset += PTRACE_HBP_CTRL_SZ; 544 545 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 546 offset, offset + PTRACE_HBP_PAD_SZ); 547 offset += PTRACE_HBP_PAD_SZ; 548 idx++; 549 } 550 551 return 0; 552 } 553 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 554 555 static int gpr_get(struct task_struct *target, 556 const struct user_regset *regset, 557 struct membuf to) 558 { 559 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 560 return membuf_write(&to, uregs, sizeof(*uregs)); 561 } 562 563 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 564 unsigned int pos, unsigned int count, 565 const void *kbuf, const void __user *ubuf) 566 { 567 int ret; 568 struct user_pt_regs newregs = task_pt_regs(target)->user_regs; 569 570 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 571 if (ret) 572 return ret; 573 574 if (!valid_user_regs(&newregs, target)) 575 return -EINVAL; 576 577 task_pt_regs(target)->user_regs = newregs; 578 return 0; 579 } 580 581 static int fpr_active(struct task_struct *target, const struct user_regset *regset) 582 { 583 if (!system_supports_fpsimd()) 584 return -ENODEV; 585 return regset->n; 586 } 587 588 /* 589 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 590 */ 591 static int __fpr_get(struct task_struct *target, 592 const struct user_regset *regset, 593 struct membuf to) 594 { 595 struct user_fpsimd_state *uregs; 596 597 sve_sync_to_fpsimd(target); 598 599 uregs = &target->thread.uw.fpsimd_state; 600 601 return membuf_write(&to, uregs, sizeof(*uregs)); 602 } 603 604 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 605 struct membuf to) 606 { 607 if (!system_supports_fpsimd()) 608 return -EINVAL; 609 610 if (target == current) 611 fpsimd_preserve_current_state(); 612 613 return __fpr_get(target, regset, to); 614 } 615 616 static int __fpr_set(struct task_struct *target, 617 const struct user_regset *regset, 618 unsigned int pos, unsigned int count, 619 const void *kbuf, const void __user *ubuf, 620 unsigned int start_pos) 621 { 622 int ret; 623 struct user_fpsimd_state newstate; 624 625 /* 626 * Ensure target->thread.uw.fpsimd_state is up to date, so that a 627 * short copyin can't resurrect stale data. 628 */ 629 sve_sync_to_fpsimd(target); 630 631 newstate = target->thread.uw.fpsimd_state; 632 633 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 634 start_pos, start_pos + sizeof(newstate)); 635 if (ret) 636 return ret; 637 638 target->thread.uw.fpsimd_state = newstate; 639 640 return ret; 641 } 642 643 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 644 unsigned int pos, unsigned int count, 645 const void *kbuf, const void __user *ubuf) 646 { 647 int ret; 648 649 if (!system_supports_fpsimd()) 650 return -EINVAL; 651 652 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); 653 if (ret) 654 return ret; 655 656 sve_sync_from_fpsimd_zeropad(target); 657 fpsimd_flush_task_state(target); 658 659 return ret; 660 } 661 662 static int tls_get(struct task_struct *target, const struct user_regset *regset, 663 struct membuf to) 664 { 665 int ret; 666 667 if (target == current) 668 tls_preserve_current_state(); 669 670 ret = membuf_store(&to, target->thread.uw.tp_value); 671 if (system_supports_tpidr2()) 672 ret = membuf_store(&to, target->thread.tpidr2_el0); 673 else 674 ret = membuf_zero(&to, sizeof(u64)); 675 676 return ret; 677 } 678 679 static int tls_set(struct task_struct *target, const struct user_regset *regset, 680 unsigned int pos, unsigned int count, 681 const void *kbuf, const void __user *ubuf) 682 { 683 int ret; 684 unsigned long tls[2]; 685 686 tls[0] = target->thread.uw.tp_value; 687 if (system_supports_tpidr2()) 688 tls[1] = target->thread.tpidr2_el0; 689 690 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count); 691 if (ret) 692 return ret; 693 694 target->thread.uw.tp_value = tls[0]; 695 if (system_supports_tpidr2()) 696 target->thread.tpidr2_el0 = tls[1]; 697 698 return ret; 699 } 700 701 static int fpmr_get(struct task_struct *target, const struct user_regset *regset, 702 struct membuf to) 703 { 704 if (!system_supports_fpmr()) 705 return -EINVAL; 706 707 if (target == current) 708 fpsimd_preserve_current_state(); 709 710 return membuf_store(&to, target->thread.uw.fpmr); 711 } 712 713 static int fpmr_set(struct task_struct *target, const struct user_regset *regset, 714 unsigned int pos, unsigned int count, 715 const void *kbuf, const void __user *ubuf) 716 { 717 int ret; 718 unsigned long fpmr; 719 720 if (!system_supports_fpmr()) 721 return -EINVAL; 722 723 fpmr = target->thread.uw.fpmr; 724 725 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count); 726 if (ret) 727 return ret; 728 729 target->thread.uw.fpmr = fpmr; 730 731 fpsimd_flush_task_state(target); 732 733 return 0; 734 } 735 736 static int system_call_get(struct task_struct *target, 737 const struct user_regset *regset, 738 struct membuf to) 739 { 740 return membuf_store(&to, task_pt_regs(target)->syscallno); 741 } 742 743 static int system_call_set(struct task_struct *target, 744 const struct user_regset *regset, 745 unsigned int pos, unsigned int count, 746 const void *kbuf, const void __user *ubuf) 747 { 748 int syscallno = task_pt_regs(target)->syscallno; 749 int ret; 750 751 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 752 if (ret) 753 return ret; 754 755 task_pt_regs(target)->syscallno = syscallno; 756 return ret; 757 } 758 759 #ifdef CONFIG_ARM64_SVE 760 761 static void sve_init_header_from_task(struct user_sve_header *header, 762 struct task_struct *target, 763 enum vec_type type) 764 { 765 unsigned int vq; 766 bool active; 767 enum vec_type task_type; 768 769 memset(header, 0, sizeof(*header)); 770 771 /* Check if the requested registers are active for the task */ 772 if (thread_sm_enabled(&target->thread)) 773 task_type = ARM64_VEC_SME; 774 else 775 task_type = ARM64_VEC_SVE; 776 active = (task_type == type); 777 778 switch (type) { 779 case ARM64_VEC_SVE: 780 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) 781 header->flags |= SVE_PT_VL_INHERIT; 782 break; 783 case ARM64_VEC_SME: 784 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) 785 header->flags |= SVE_PT_VL_INHERIT; 786 break; 787 default: 788 WARN_ON_ONCE(1); 789 return; 790 } 791 792 if (active) { 793 if (target->thread.fp_type == FP_STATE_FPSIMD) { 794 header->flags |= SVE_PT_REGS_FPSIMD; 795 } else { 796 header->flags |= SVE_PT_REGS_SVE; 797 } 798 } 799 800 header->vl = task_get_vl(target, type); 801 vq = sve_vq_from_vl(header->vl); 802 803 header->max_vl = vec_max_vl(type); 804 header->size = SVE_PT_SIZE(vq, header->flags); 805 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), 806 SVE_PT_REGS_SVE); 807 } 808 809 static unsigned int sve_size_from_header(struct user_sve_header const *header) 810 { 811 return ALIGN(header->size, SVE_VQ_BYTES); 812 } 813 814 static int sve_get_common(struct task_struct *target, 815 const struct user_regset *regset, 816 struct membuf to, 817 enum vec_type type) 818 { 819 struct user_sve_header header; 820 unsigned int vq; 821 unsigned long start, end; 822 823 /* Header */ 824 sve_init_header_from_task(&header, target, type); 825 vq = sve_vq_from_vl(header.vl); 826 827 membuf_write(&to, &header, sizeof(header)); 828 829 if (target == current) 830 fpsimd_preserve_current_state(); 831 832 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 833 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 834 835 switch ((header.flags & SVE_PT_REGS_MASK)) { 836 case SVE_PT_REGS_FPSIMD: 837 return __fpr_get(target, regset, to); 838 839 case SVE_PT_REGS_SVE: 840 start = SVE_PT_SVE_OFFSET; 841 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 842 membuf_write(&to, target->thread.sve_state, end - start); 843 844 start = end; 845 end = SVE_PT_SVE_FPSR_OFFSET(vq); 846 membuf_zero(&to, end - start); 847 848 /* 849 * Copy fpsr, and fpcr which must follow contiguously in 850 * struct fpsimd_state: 851 */ 852 start = end; 853 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 854 membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, 855 end - start); 856 857 start = end; 858 end = sve_size_from_header(&header); 859 return membuf_zero(&to, end - start); 860 861 default: 862 return 0; 863 } 864 } 865 866 static int sve_get(struct task_struct *target, 867 const struct user_regset *regset, 868 struct membuf to) 869 { 870 if (!system_supports_sve()) 871 return -EINVAL; 872 873 return sve_get_common(target, regset, to, ARM64_VEC_SVE); 874 } 875 876 static int sve_set_common(struct task_struct *target, 877 const struct user_regset *regset, 878 unsigned int pos, unsigned int count, 879 const void *kbuf, const void __user *ubuf, 880 enum vec_type type) 881 { 882 int ret; 883 struct user_sve_header header; 884 unsigned int vq; 885 unsigned long start, end; 886 887 /* Header */ 888 if (count < sizeof(header)) 889 return -EINVAL; 890 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 891 0, sizeof(header)); 892 if (ret) 893 goto out; 894 895 /* 896 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by 897 * vec_set_vector_length(), which will also validate them for us: 898 */ 899 ret = vec_set_vector_length(target, type, header.vl, 900 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); 901 if (ret) 902 goto out; 903 904 /* 905 * Actual VL set may be different from what the user asked 906 * for, or we may have configured the _ONEXEC VL not the 907 * current VL: 908 */ 909 vq = sve_vq_from_vl(task_get_vl(target, type)); 910 911 /* Enter/exit streaming mode */ 912 if (system_supports_sme()) { 913 u64 old_svcr = target->thread.svcr; 914 915 switch (type) { 916 case ARM64_VEC_SVE: 917 target->thread.svcr &= ~SVCR_SM_MASK; 918 break; 919 case ARM64_VEC_SME: 920 target->thread.svcr |= SVCR_SM_MASK; 921 922 /* 923 * Disable traps and ensure there is SME storage but 924 * preserve any currently set values in ZA/ZT. 925 */ 926 sme_alloc(target, false); 927 set_tsk_thread_flag(target, TIF_SME); 928 break; 929 default: 930 WARN_ON_ONCE(1); 931 ret = -EINVAL; 932 goto out; 933 } 934 935 /* 936 * If we switched then invalidate any existing SVE 937 * state and ensure there's storage. 938 */ 939 if (target->thread.svcr != old_svcr) 940 sve_alloc(target, true); 941 } 942 943 /* Registers: FPSIMD-only case */ 944 945 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 946 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) { 947 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 948 SVE_PT_FPSIMD_OFFSET); 949 clear_tsk_thread_flag(target, TIF_SVE); 950 target->thread.fp_type = FP_STATE_FPSIMD; 951 goto out; 952 } 953 954 /* 955 * Otherwise: no registers or full SVE case. For backwards 956 * compatibility reasons we treat empty flags as SVE registers. 957 */ 958 959 /* 960 * If setting a different VL from the requested VL and there is 961 * register data, the data layout will be wrong: don't even 962 * try to set the registers in this case. 963 */ 964 if (count && vq != sve_vq_from_vl(header.vl)) { 965 ret = -EIO; 966 goto out; 967 } 968 969 sve_alloc(target, true); 970 if (!target->thread.sve_state) { 971 ret = -ENOMEM; 972 clear_tsk_thread_flag(target, TIF_SVE); 973 target->thread.fp_type = FP_STATE_FPSIMD; 974 goto out; 975 } 976 977 /* 978 * Ensure target->thread.sve_state is up to date with target's 979 * FPSIMD regs, so that a short copyin leaves trailing 980 * registers unmodified. Only enable SVE if we are 981 * configuring normal SVE, a system with streaming SVE may not 982 * have normal SVE. 983 */ 984 fpsimd_sync_to_sve(target); 985 if (type == ARM64_VEC_SVE) 986 set_tsk_thread_flag(target, TIF_SVE); 987 target->thread.fp_type = FP_STATE_SVE; 988 989 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 990 start = SVE_PT_SVE_OFFSET; 991 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 992 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 993 target->thread.sve_state, 994 start, end); 995 if (ret) 996 goto out; 997 998 start = end; 999 end = SVE_PT_SVE_FPSR_OFFSET(vq); 1000 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, start, end); 1001 1002 /* 1003 * Copy fpsr, and fpcr which must follow contiguously in 1004 * struct fpsimd_state: 1005 */ 1006 start = end; 1007 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 1008 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1009 &target->thread.uw.fpsimd_state.fpsr, 1010 start, end); 1011 1012 out: 1013 fpsimd_flush_task_state(target); 1014 return ret; 1015 } 1016 1017 static int sve_set(struct task_struct *target, 1018 const struct user_regset *regset, 1019 unsigned int pos, unsigned int count, 1020 const void *kbuf, const void __user *ubuf) 1021 { 1022 if (!system_supports_sve()) 1023 return -EINVAL; 1024 1025 return sve_set_common(target, regset, pos, count, kbuf, ubuf, 1026 ARM64_VEC_SVE); 1027 } 1028 1029 #endif /* CONFIG_ARM64_SVE */ 1030 1031 #ifdef CONFIG_ARM64_SME 1032 1033 static int ssve_get(struct task_struct *target, 1034 const struct user_regset *regset, 1035 struct membuf to) 1036 { 1037 if (!system_supports_sme()) 1038 return -EINVAL; 1039 1040 return sve_get_common(target, regset, to, ARM64_VEC_SME); 1041 } 1042 1043 static int ssve_set(struct task_struct *target, 1044 const struct user_regset *regset, 1045 unsigned int pos, unsigned int count, 1046 const void *kbuf, const void __user *ubuf) 1047 { 1048 if (!system_supports_sme()) 1049 return -EINVAL; 1050 1051 return sve_set_common(target, regset, pos, count, kbuf, ubuf, 1052 ARM64_VEC_SME); 1053 } 1054 1055 static int za_get(struct task_struct *target, 1056 const struct user_regset *regset, 1057 struct membuf to) 1058 { 1059 struct user_za_header header; 1060 unsigned int vq; 1061 unsigned long start, end; 1062 1063 if (!system_supports_sme()) 1064 return -EINVAL; 1065 1066 /* Header */ 1067 memset(&header, 0, sizeof(header)); 1068 1069 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) 1070 header.flags |= ZA_PT_VL_INHERIT; 1071 1072 header.vl = task_get_sme_vl(target); 1073 vq = sve_vq_from_vl(header.vl); 1074 header.max_vl = sme_max_vl(); 1075 header.max_size = ZA_PT_SIZE(vq); 1076 1077 /* If ZA is not active there is only the header */ 1078 if (thread_za_enabled(&target->thread)) 1079 header.size = ZA_PT_SIZE(vq); 1080 else 1081 header.size = ZA_PT_ZA_OFFSET; 1082 1083 membuf_write(&to, &header, sizeof(header)); 1084 1085 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); 1086 end = ZA_PT_ZA_OFFSET; 1087 1088 if (target == current) 1089 fpsimd_preserve_current_state(); 1090 1091 /* Any register data to include? */ 1092 if (thread_za_enabled(&target->thread)) { 1093 start = end; 1094 end = ZA_PT_SIZE(vq); 1095 membuf_write(&to, target->thread.sme_state, end - start); 1096 } 1097 1098 /* Zero any trailing padding */ 1099 start = end; 1100 end = ALIGN(header.size, SVE_VQ_BYTES); 1101 return membuf_zero(&to, end - start); 1102 } 1103 1104 static int za_set(struct task_struct *target, 1105 const struct user_regset *regset, 1106 unsigned int pos, unsigned int count, 1107 const void *kbuf, const void __user *ubuf) 1108 { 1109 int ret; 1110 struct user_za_header header; 1111 unsigned int vq; 1112 unsigned long start, end; 1113 1114 if (!system_supports_sme()) 1115 return -EINVAL; 1116 1117 /* Header */ 1118 if (count < sizeof(header)) 1119 return -EINVAL; 1120 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 1121 0, sizeof(header)); 1122 if (ret) 1123 goto out; 1124 1125 /* 1126 * All current ZA_PT_* flags are consumed by 1127 * vec_set_vector_length(), which will also validate them for 1128 * us: 1129 */ 1130 ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl, 1131 ((unsigned long)header.flags) << 16); 1132 if (ret) 1133 goto out; 1134 1135 /* 1136 * Actual VL set may be different from what the user asked 1137 * for, or we may have configured the _ONEXEC rather than 1138 * current VL: 1139 */ 1140 vq = sve_vq_from_vl(task_get_sme_vl(target)); 1141 1142 /* Ensure there is some SVE storage for streaming mode */ 1143 if (!target->thread.sve_state) { 1144 sve_alloc(target, false); 1145 if (!target->thread.sve_state) { 1146 ret = -ENOMEM; 1147 goto out; 1148 } 1149 } 1150 1151 /* 1152 * Only flush the storage if PSTATE.ZA was not already set, 1153 * otherwise preserve any existing data. 1154 */ 1155 sme_alloc(target, !thread_za_enabled(&target->thread)); 1156 if (!target->thread.sme_state) 1157 return -ENOMEM; 1158 1159 /* If there is no data then disable ZA */ 1160 if (!count) { 1161 target->thread.svcr &= ~SVCR_ZA_MASK; 1162 goto out; 1163 } 1164 1165 /* 1166 * If setting a different VL from the requested VL and there is 1167 * register data, the data layout will be wrong: don't even 1168 * try to set the registers in this case. 1169 */ 1170 if (vq != sve_vq_from_vl(header.vl)) { 1171 ret = -EIO; 1172 goto out; 1173 } 1174 1175 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); 1176 start = ZA_PT_ZA_OFFSET; 1177 end = ZA_PT_SIZE(vq); 1178 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1179 target->thread.sme_state, 1180 start, end); 1181 if (ret) 1182 goto out; 1183 1184 /* Mark ZA as active and let userspace use it */ 1185 set_tsk_thread_flag(target, TIF_SME); 1186 target->thread.svcr |= SVCR_ZA_MASK; 1187 1188 out: 1189 fpsimd_flush_task_state(target); 1190 return ret; 1191 } 1192 1193 static int zt_get(struct task_struct *target, 1194 const struct user_regset *regset, 1195 struct membuf to) 1196 { 1197 if (!system_supports_sme2()) 1198 return -EINVAL; 1199 1200 /* 1201 * If PSTATE.ZA is not set then ZT will be zeroed when it is 1202 * enabled so report the current register value as zero. 1203 */ 1204 if (thread_za_enabled(&target->thread)) 1205 membuf_write(&to, thread_zt_state(&target->thread), 1206 ZT_SIG_REG_BYTES); 1207 else 1208 membuf_zero(&to, ZT_SIG_REG_BYTES); 1209 1210 return 0; 1211 } 1212 1213 static int zt_set(struct task_struct *target, 1214 const struct user_regset *regset, 1215 unsigned int pos, unsigned int count, 1216 const void *kbuf, const void __user *ubuf) 1217 { 1218 int ret; 1219 1220 if (!system_supports_sme2()) 1221 return -EINVAL; 1222 1223 /* Ensure SVE storage in case this is first use of SME */ 1224 sve_alloc(target, false); 1225 if (!target->thread.sve_state) 1226 return -ENOMEM; 1227 1228 if (!thread_za_enabled(&target->thread)) { 1229 sme_alloc(target, true); 1230 if (!target->thread.sme_state) 1231 return -ENOMEM; 1232 } 1233 1234 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1235 thread_zt_state(&target->thread), 1236 0, ZT_SIG_REG_BYTES); 1237 if (ret == 0) { 1238 target->thread.svcr |= SVCR_ZA_MASK; 1239 set_tsk_thread_flag(target, TIF_SME); 1240 } 1241 1242 fpsimd_flush_task_state(target); 1243 1244 return ret; 1245 } 1246 1247 #endif /* CONFIG_ARM64_SME */ 1248 1249 #ifdef CONFIG_ARM64_PTR_AUTH 1250 static int pac_mask_get(struct task_struct *target, 1251 const struct user_regset *regset, 1252 struct membuf to) 1253 { 1254 /* 1255 * The PAC bits can differ across data and instruction pointers 1256 * depending on TCR_EL1.TBID*, which we may make use of in future, so 1257 * we expose separate masks. 1258 */ 1259 unsigned long mask = ptrauth_user_pac_mask(); 1260 struct user_pac_mask uregs = { 1261 .data_mask = mask, 1262 .insn_mask = mask, 1263 }; 1264 1265 if (!system_supports_address_auth()) 1266 return -EINVAL; 1267 1268 return membuf_write(&to, &uregs, sizeof(uregs)); 1269 } 1270 1271 static int pac_enabled_keys_get(struct task_struct *target, 1272 const struct user_regset *regset, 1273 struct membuf to) 1274 { 1275 long enabled_keys = ptrauth_get_enabled_keys(target); 1276 1277 if (IS_ERR_VALUE(enabled_keys)) 1278 return enabled_keys; 1279 1280 return membuf_write(&to, &enabled_keys, sizeof(enabled_keys)); 1281 } 1282 1283 static int pac_enabled_keys_set(struct task_struct *target, 1284 const struct user_regset *regset, 1285 unsigned int pos, unsigned int count, 1286 const void *kbuf, const void __user *ubuf) 1287 { 1288 int ret; 1289 long enabled_keys = ptrauth_get_enabled_keys(target); 1290 1291 if (IS_ERR_VALUE(enabled_keys)) 1292 return enabled_keys; 1293 1294 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0, 1295 sizeof(long)); 1296 if (ret) 1297 return ret; 1298 1299 return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK, 1300 enabled_keys); 1301 } 1302 1303 #ifdef CONFIG_CHECKPOINT_RESTORE 1304 static __uint128_t pac_key_to_user(const struct ptrauth_key *key) 1305 { 1306 return (__uint128_t)key->hi << 64 | key->lo; 1307 } 1308 1309 static struct ptrauth_key pac_key_from_user(__uint128_t ukey) 1310 { 1311 struct ptrauth_key key = { 1312 .lo = (unsigned long)ukey, 1313 .hi = (unsigned long)(ukey >> 64), 1314 }; 1315 1316 return key; 1317 } 1318 1319 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, 1320 const struct ptrauth_keys_user *keys) 1321 { 1322 ukeys->apiakey = pac_key_to_user(&keys->apia); 1323 ukeys->apibkey = pac_key_to_user(&keys->apib); 1324 ukeys->apdakey = pac_key_to_user(&keys->apda); 1325 ukeys->apdbkey = pac_key_to_user(&keys->apdb); 1326 } 1327 1328 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys, 1329 const struct user_pac_address_keys *ukeys) 1330 { 1331 keys->apia = pac_key_from_user(ukeys->apiakey); 1332 keys->apib = pac_key_from_user(ukeys->apibkey); 1333 keys->apda = pac_key_from_user(ukeys->apdakey); 1334 keys->apdb = pac_key_from_user(ukeys->apdbkey); 1335 } 1336 1337 static int pac_address_keys_get(struct task_struct *target, 1338 const struct user_regset *regset, 1339 struct membuf to) 1340 { 1341 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1342 struct user_pac_address_keys user_keys; 1343 1344 if (!system_supports_address_auth()) 1345 return -EINVAL; 1346 1347 pac_address_keys_to_user(&user_keys, keys); 1348 1349 return membuf_write(&to, &user_keys, sizeof(user_keys)); 1350 } 1351 1352 static int pac_address_keys_set(struct task_struct *target, 1353 const struct user_regset *regset, 1354 unsigned int pos, unsigned int count, 1355 const void *kbuf, const void __user *ubuf) 1356 { 1357 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1358 struct user_pac_address_keys user_keys; 1359 int ret; 1360 1361 if (!system_supports_address_auth()) 1362 return -EINVAL; 1363 1364 pac_address_keys_to_user(&user_keys, keys); 1365 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1366 &user_keys, 0, -1); 1367 if (ret) 1368 return ret; 1369 pac_address_keys_from_user(keys, &user_keys); 1370 1371 return 0; 1372 } 1373 1374 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, 1375 const struct ptrauth_keys_user *keys) 1376 { 1377 ukeys->apgakey = pac_key_to_user(&keys->apga); 1378 } 1379 1380 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys, 1381 const struct user_pac_generic_keys *ukeys) 1382 { 1383 keys->apga = pac_key_from_user(ukeys->apgakey); 1384 } 1385 1386 static int pac_generic_keys_get(struct task_struct *target, 1387 const struct user_regset *regset, 1388 struct membuf to) 1389 { 1390 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1391 struct user_pac_generic_keys user_keys; 1392 1393 if (!system_supports_generic_auth()) 1394 return -EINVAL; 1395 1396 pac_generic_keys_to_user(&user_keys, keys); 1397 1398 return membuf_write(&to, &user_keys, sizeof(user_keys)); 1399 } 1400 1401 static int pac_generic_keys_set(struct task_struct *target, 1402 const struct user_regset *regset, 1403 unsigned int pos, unsigned int count, 1404 const void *kbuf, const void __user *ubuf) 1405 { 1406 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1407 struct user_pac_generic_keys user_keys; 1408 int ret; 1409 1410 if (!system_supports_generic_auth()) 1411 return -EINVAL; 1412 1413 pac_generic_keys_to_user(&user_keys, keys); 1414 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1415 &user_keys, 0, -1); 1416 if (ret) 1417 return ret; 1418 pac_generic_keys_from_user(keys, &user_keys); 1419 1420 return 0; 1421 } 1422 #endif /* CONFIG_CHECKPOINT_RESTORE */ 1423 #endif /* CONFIG_ARM64_PTR_AUTH */ 1424 1425 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1426 static int tagged_addr_ctrl_get(struct task_struct *target, 1427 const struct user_regset *regset, 1428 struct membuf to) 1429 { 1430 long ctrl = get_tagged_addr_ctrl(target); 1431 1432 if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl))) 1433 return ctrl; 1434 1435 return membuf_write(&to, &ctrl, sizeof(ctrl)); 1436 } 1437 1438 static int tagged_addr_ctrl_set(struct task_struct *target, const struct 1439 user_regset *regset, unsigned int pos, 1440 unsigned int count, const void *kbuf, const 1441 void __user *ubuf) 1442 { 1443 int ret; 1444 long ctrl; 1445 1446 ctrl = get_tagged_addr_ctrl(target); 1447 if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl))) 1448 return ctrl; 1449 1450 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); 1451 if (ret) 1452 return ret; 1453 1454 return set_tagged_addr_ctrl(target, ctrl); 1455 } 1456 #endif 1457 1458 #ifdef CONFIG_ARM64_POE 1459 static int poe_get(struct task_struct *target, 1460 const struct user_regset *regset, 1461 struct membuf to) 1462 { 1463 if (!system_supports_poe()) 1464 return -EINVAL; 1465 1466 return membuf_write(&to, &target->thread.por_el0, 1467 sizeof(target->thread.por_el0)); 1468 } 1469 1470 static int poe_set(struct task_struct *target, const struct 1471 user_regset *regset, unsigned int pos, 1472 unsigned int count, const void *kbuf, const 1473 void __user *ubuf) 1474 { 1475 int ret; 1476 long ctrl; 1477 1478 if (!system_supports_poe()) 1479 return -EINVAL; 1480 1481 ctrl = target->thread.por_el0; 1482 1483 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); 1484 if (ret) 1485 return ret; 1486 1487 target->thread.por_el0 = ctrl; 1488 1489 return 0; 1490 } 1491 #endif 1492 1493 #ifdef CONFIG_ARM64_GCS 1494 static void task_gcs_to_user(struct user_gcs *user_gcs, 1495 const struct task_struct *target) 1496 { 1497 user_gcs->features_enabled = target->thread.gcs_el0_mode; 1498 user_gcs->features_locked = target->thread.gcs_el0_locked; 1499 user_gcs->gcspr_el0 = target->thread.gcspr_el0; 1500 } 1501 1502 static void task_gcs_from_user(struct task_struct *target, 1503 const struct user_gcs *user_gcs) 1504 { 1505 target->thread.gcs_el0_mode = user_gcs->features_enabled; 1506 target->thread.gcs_el0_locked = user_gcs->features_locked; 1507 target->thread.gcspr_el0 = user_gcs->gcspr_el0; 1508 } 1509 1510 static int gcs_get(struct task_struct *target, 1511 const struct user_regset *regset, 1512 struct membuf to) 1513 { 1514 struct user_gcs user_gcs; 1515 1516 if (!system_supports_gcs()) 1517 return -EINVAL; 1518 1519 if (target == current) 1520 gcs_preserve_current_state(); 1521 1522 task_gcs_to_user(&user_gcs, target); 1523 1524 return membuf_write(&to, &user_gcs, sizeof(user_gcs)); 1525 } 1526 1527 static int gcs_set(struct task_struct *target, const struct 1528 user_regset *regset, unsigned int pos, 1529 unsigned int count, const void *kbuf, const 1530 void __user *ubuf) 1531 { 1532 int ret; 1533 struct user_gcs user_gcs; 1534 1535 if (!system_supports_gcs()) 1536 return -EINVAL; 1537 1538 task_gcs_to_user(&user_gcs, target); 1539 1540 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_gcs, 0, -1); 1541 if (ret) 1542 return ret; 1543 1544 if (user_gcs.features_enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK) 1545 return -EINVAL; 1546 1547 task_gcs_from_user(target, &user_gcs); 1548 1549 return 0; 1550 } 1551 #endif 1552 1553 enum aarch64_regset { 1554 REGSET_GPR, 1555 REGSET_FPR, 1556 REGSET_TLS, 1557 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1558 REGSET_HW_BREAK, 1559 REGSET_HW_WATCH, 1560 #endif 1561 REGSET_FPMR, 1562 REGSET_SYSTEM_CALL, 1563 #ifdef CONFIG_ARM64_SVE 1564 REGSET_SVE, 1565 #endif 1566 #ifdef CONFIG_ARM64_SME 1567 REGSET_SSVE, 1568 REGSET_ZA, 1569 REGSET_ZT, 1570 #endif 1571 #ifdef CONFIG_ARM64_PTR_AUTH 1572 REGSET_PAC_MASK, 1573 REGSET_PAC_ENABLED_KEYS, 1574 #ifdef CONFIG_CHECKPOINT_RESTORE 1575 REGSET_PACA_KEYS, 1576 REGSET_PACG_KEYS, 1577 #endif 1578 #endif 1579 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1580 REGSET_TAGGED_ADDR_CTRL, 1581 #endif 1582 #ifdef CONFIG_ARM64_POE 1583 REGSET_POE, 1584 #endif 1585 #ifdef CONFIG_ARM64_GCS 1586 REGSET_GCS, 1587 #endif 1588 }; 1589 1590 static const struct user_regset aarch64_regsets[] = { 1591 [REGSET_GPR] = { 1592 .core_note_type = NT_PRSTATUS, 1593 .n = sizeof(struct user_pt_regs) / sizeof(u64), 1594 .size = sizeof(u64), 1595 .align = sizeof(u64), 1596 .regset_get = gpr_get, 1597 .set = gpr_set 1598 }, 1599 [REGSET_FPR] = { 1600 .core_note_type = NT_PRFPREG, 1601 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 1602 /* 1603 * We pretend we have 32-bit registers because the fpsr and 1604 * fpcr are 32-bits wide. 1605 */ 1606 .size = sizeof(u32), 1607 .align = sizeof(u32), 1608 .active = fpr_active, 1609 .regset_get = fpr_get, 1610 .set = fpr_set 1611 }, 1612 [REGSET_TLS] = { 1613 .core_note_type = NT_ARM_TLS, 1614 .n = 2, 1615 .size = sizeof(void *), 1616 .align = sizeof(void *), 1617 .regset_get = tls_get, 1618 .set = tls_set, 1619 }, 1620 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1621 [REGSET_HW_BREAK] = { 1622 .core_note_type = NT_ARM_HW_BREAK, 1623 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1624 .size = sizeof(u32), 1625 .align = sizeof(u32), 1626 .regset_get = hw_break_get, 1627 .set = hw_break_set, 1628 }, 1629 [REGSET_HW_WATCH] = { 1630 .core_note_type = NT_ARM_HW_WATCH, 1631 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1632 .size = sizeof(u32), 1633 .align = sizeof(u32), 1634 .regset_get = hw_break_get, 1635 .set = hw_break_set, 1636 }, 1637 #endif 1638 [REGSET_SYSTEM_CALL] = { 1639 .core_note_type = NT_ARM_SYSTEM_CALL, 1640 .n = 1, 1641 .size = sizeof(int), 1642 .align = sizeof(int), 1643 .regset_get = system_call_get, 1644 .set = system_call_set, 1645 }, 1646 [REGSET_FPMR] = { 1647 .core_note_type = NT_ARM_FPMR, 1648 .n = 1, 1649 .size = sizeof(u64), 1650 .align = sizeof(u64), 1651 .regset_get = fpmr_get, 1652 .set = fpmr_set, 1653 }, 1654 #ifdef CONFIG_ARM64_SVE 1655 [REGSET_SVE] = { /* Scalable Vector Extension */ 1656 .core_note_type = NT_ARM_SVE, 1657 .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX, 1658 SVE_PT_REGS_SVE), 1659 SVE_VQ_BYTES), 1660 .size = SVE_VQ_BYTES, 1661 .align = SVE_VQ_BYTES, 1662 .regset_get = sve_get, 1663 .set = sve_set, 1664 }, 1665 #endif 1666 #ifdef CONFIG_ARM64_SME 1667 [REGSET_SSVE] = { /* Streaming mode SVE */ 1668 .core_note_type = NT_ARM_SSVE, 1669 .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE), 1670 SVE_VQ_BYTES), 1671 .size = SVE_VQ_BYTES, 1672 .align = SVE_VQ_BYTES, 1673 .regset_get = ssve_get, 1674 .set = ssve_set, 1675 }, 1676 [REGSET_ZA] = { /* SME ZA */ 1677 .core_note_type = NT_ARM_ZA, 1678 /* 1679 * ZA is a single register but it's variably sized and 1680 * the ptrace core requires that the size of any data 1681 * be an exact multiple of the configured register 1682 * size so report as though we had SVE_VQ_BYTES 1683 * registers. These values aren't exposed to 1684 * userspace. 1685 */ 1686 .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES), 1687 .size = SVE_VQ_BYTES, 1688 .align = SVE_VQ_BYTES, 1689 .regset_get = za_get, 1690 .set = za_set, 1691 }, 1692 [REGSET_ZT] = { /* SME ZT */ 1693 .core_note_type = NT_ARM_ZT, 1694 .n = 1, 1695 .size = ZT_SIG_REG_BYTES, 1696 .align = sizeof(u64), 1697 .regset_get = zt_get, 1698 .set = zt_set, 1699 }, 1700 #endif 1701 #ifdef CONFIG_ARM64_PTR_AUTH 1702 [REGSET_PAC_MASK] = { 1703 .core_note_type = NT_ARM_PAC_MASK, 1704 .n = sizeof(struct user_pac_mask) / sizeof(u64), 1705 .size = sizeof(u64), 1706 .align = sizeof(u64), 1707 .regset_get = pac_mask_get, 1708 /* this cannot be set dynamically */ 1709 }, 1710 [REGSET_PAC_ENABLED_KEYS] = { 1711 .core_note_type = NT_ARM_PAC_ENABLED_KEYS, 1712 .n = 1, 1713 .size = sizeof(long), 1714 .align = sizeof(long), 1715 .regset_get = pac_enabled_keys_get, 1716 .set = pac_enabled_keys_set, 1717 }, 1718 #ifdef CONFIG_CHECKPOINT_RESTORE 1719 [REGSET_PACA_KEYS] = { 1720 .core_note_type = NT_ARM_PACA_KEYS, 1721 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t), 1722 .size = sizeof(__uint128_t), 1723 .align = sizeof(__uint128_t), 1724 .regset_get = pac_address_keys_get, 1725 .set = pac_address_keys_set, 1726 }, 1727 [REGSET_PACG_KEYS] = { 1728 .core_note_type = NT_ARM_PACG_KEYS, 1729 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t), 1730 .size = sizeof(__uint128_t), 1731 .align = sizeof(__uint128_t), 1732 .regset_get = pac_generic_keys_get, 1733 .set = pac_generic_keys_set, 1734 }, 1735 #endif 1736 #endif 1737 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1738 [REGSET_TAGGED_ADDR_CTRL] = { 1739 .core_note_type = NT_ARM_TAGGED_ADDR_CTRL, 1740 .n = 1, 1741 .size = sizeof(long), 1742 .align = sizeof(long), 1743 .regset_get = tagged_addr_ctrl_get, 1744 .set = tagged_addr_ctrl_set, 1745 }, 1746 #endif 1747 #ifdef CONFIG_ARM64_POE 1748 [REGSET_POE] = { 1749 .core_note_type = NT_ARM_POE, 1750 .n = 1, 1751 .size = sizeof(long), 1752 .align = sizeof(long), 1753 .regset_get = poe_get, 1754 .set = poe_set, 1755 }, 1756 #endif 1757 #ifdef CONFIG_ARM64_GCS 1758 [REGSET_GCS] = { 1759 .core_note_type = NT_ARM_GCS, 1760 .n = sizeof(struct user_gcs) / sizeof(u64), 1761 .size = sizeof(u64), 1762 .align = sizeof(u64), 1763 .regset_get = gcs_get, 1764 .set = gcs_set, 1765 }, 1766 #endif 1767 }; 1768 1769 static const struct user_regset_view user_aarch64_view = { 1770 .name = "aarch64", .e_machine = EM_AARCH64, 1771 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 1772 }; 1773 1774 enum compat_regset { 1775 REGSET_COMPAT_GPR, 1776 REGSET_COMPAT_VFP, 1777 }; 1778 1779 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx) 1780 { 1781 struct pt_regs *regs = task_pt_regs(task); 1782 1783 switch (idx) { 1784 case 15: 1785 return regs->pc; 1786 case 16: 1787 return pstate_to_compat_psr(regs->pstate); 1788 case 17: 1789 return regs->orig_x0; 1790 default: 1791 return regs->regs[idx]; 1792 } 1793 } 1794 1795 static int compat_gpr_get(struct task_struct *target, 1796 const struct user_regset *regset, 1797 struct membuf to) 1798 { 1799 int i = 0; 1800 1801 while (to.left) 1802 membuf_store(&to, compat_get_user_reg(target, i++)); 1803 return 0; 1804 } 1805 1806 static int compat_gpr_set(struct task_struct *target, 1807 const struct user_regset *regset, 1808 unsigned int pos, unsigned int count, 1809 const void *kbuf, const void __user *ubuf) 1810 { 1811 struct pt_regs newregs; 1812 int ret = 0; 1813 unsigned int i, start, num_regs; 1814 1815 /* Calculate the number of AArch32 registers contained in count */ 1816 num_regs = count / regset->size; 1817 1818 /* Convert pos into an register number */ 1819 start = pos / regset->size; 1820 1821 if (start + num_regs > regset->n) 1822 return -EIO; 1823 1824 newregs = *task_pt_regs(target); 1825 1826 for (i = 0; i < num_regs; ++i) { 1827 unsigned int idx = start + i; 1828 compat_ulong_t reg; 1829 1830 if (kbuf) { 1831 memcpy(®, kbuf, sizeof(reg)); 1832 kbuf += sizeof(reg); 1833 } else { 1834 ret = copy_from_user(®, ubuf, sizeof(reg)); 1835 if (ret) { 1836 ret = -EFAULT; 1837 break; 1838 } 1839 1840 ubuf += sizeof(reg); 1841 } 1842 1843 switch (idx) { 1844 case 15: 1845 newregs.pc = reg; 1846 break; 1847 case 16: 1848 reg = compat_psr_to_pstate(reg); 1849 newregs.pstate = reg; 1850 break; 1851 case 17: 1852 newregs.orig_x0 = reg; 1853 break; 1854 default: 1855 newregs.regs[idx] = reg; 1856 } 1857 1858 } 1859 1860 if (valid_user_regs(&newregs.user_regs, target)) 1861 *task_pt_regs(target) = newregs; 1862 else 1863 ret = -EINVAL; 1864 1865 return ret; 1866 } 1867 1868 static int compat_vfp_get(struct task_struct *target, 1869 const struct user_regset *regset, 1870 struct membuf to) 1871 { 1872 struct user_fpsimd_state *uregs; 1873 compat_ulong_t fpscr; 1874 1875 if (!system_supports_fpsimd()) 1876 return -EINVAL; 1877 1878 uregs = &target->thread.uw.fpsimd_state; 1879 1880 if (target == current) 1881 fpsimd_preserve_current_state(); 1882 1883 /* 1884 * The VFP registers are packed into the fpsimd_state, so they all sit 1885 * nicely together for us. We just need to create the fpscr separately. 1886 */ 1887 membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t)); 1888 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 1889 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 1890 return membuf_store(&to, fpscr); 1891 } 1892 1893 static int compat_vfp_set(struct task_struct *target, 1894 const struct user_regset *regset, 1895 unsigned int pos, unsigned int count, 1896 const void *kbuf, const void __user *ubuf) 1897 { 1898 struct user_fpsimd_state *uregs; 1899 compat_ulong_t fpscr; 1900 int ret, vregs_end_pos; 1901 1902 if (!system_supports_fpsimd()) 1903 return -EINVAL; 1904 1905 uregs = &target->thread.uw.fpsimd_state; 1906 1907 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1908 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 1909 vregs_end_pos); 1910 1911 if (count && !ret) { 1912 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, 1913 vregs_end_pos, VFP_STATE_SIZE); 1914 if (!ret) { 1915 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 1916 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 1917 } 1918 } 1919 1920 fpsimd_flush_task_state(target); 1921 return ret; 1922 } 1923 1924 static int compat_tls_get(struct task_struct *target, 1925 const struct user_regset *regset, 1926 struct membuf to) 1927 { 1928 return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value); 1929 } 1930 1931 static int compat_tls_set(struct task_struct *target, 1932 const struct user_regset *regset, unsigned int pos, 1933 unsigned int count, const void *kbuf, 1934 const void __user *ubuf) 1935 { 1936 int ret; 1937 compat_ulong_t tls = target->thread.uw.tp_value; 1938 1939 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1940 if (ret) 1941 return ret; 1942 1943 target->thread.uw.tp_value = tls; 1944 return ret; 1945 } 1946 1947 static const struct user_regset aarch32_regsets[] = { 1948 [REGSET_COMPAT_GPR] = { 1949 .core_note_type = NT_PRSTATUS, 1950 .n = COMPAT_ELF_NGREG, 1951 .size = sizeof(compat_elf_greg_t), 1952 .align = sizeof(compat_elf_greg_t), 1953 .regset_get = compat_gpr_get, 1954 .set = compat_gpr_set 1955 }, 1956 [REGSET_COMPAT_VFP] = { 1957 .core_note_type = NT_ARM_VFP, 1958 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1959 .size = sizeof(compat_ulong_t), 1960 .align = sizeof(compat_ulong_t), 1961 .active = fpr_active, 1962 .regset_get = compat_vfp_get, 1963 .set = compat_vfp_set 1964 }, 1965 }; 1966 1967 static const struct user_regset_view user_aarch32_view = { 1968 .name = "aarch32", .e_machine = EM_ARM, 1969 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 1970 }; 1971 1972 static const struct user_regset aarch32_ptrace_regsets[] = { 1973 [REGSET_GPR] = { 1974 .core_note_type = NT_PRSTATUS, 1975 .n = COMPAT_ELF_NGREG, 1976 .size = sizeof(compat_elf_greg_t), 1977 .align = sizeof(compat_elf_greg_t), 1978 .regset_get = compat_gpr_get, 1979 .set = compat_gpr_set 1980 }, 1981 [REGSET_FPR] = { 1982 .core_note_type = NT_ARM_VFP, 1983 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1984 .size = sizeof(compat_ulong_t), 1985 .align = sizeof(compat_ulong_t), 1986 .regset_get = compat_vfp_get, 1987 .set = compat_vfp_set 1988 }, 1989 [REGSET_TLS] = { 1990 .core_note_type = NT_ARM_TLS, 1991 .n = 1, 1992 .size = sizeof(compat_ulong_t), 1993 .align = sizeof(compat_ulong_t), 1994 .regset_get = compat_tls_get, 1995 .set = compat_tls_set, 1996 }, 1997 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1998 [REGSET_HW_BREAK] = { 1999 .core_note_type = NT_ARM_HW_BREAK, 2000 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 2001 .size = sizeof(u32), 2002 .align = sizeof(u32), 2003 .regset_get = hw_break_get, 2004 .set = hw_break_set, 2005 }, 2006 [REGSET_HW_WATCH] = { 2007 .core_note_type = NT_ARM_HW_WATCH, 2008 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 2009 .size = sizeof(u32), 2010 .align = sizeof(u32), 2011 .regset_get = hw_break_get, 2012 .set = hw_break_set, 2013 }, 2014 #endif 2015 [REGSET_SYSTEM_CALL] = { 2016 .core_note_type = NT_ARM_SYSTEM_CALL, 2017 .n = 1, 2018 .size = sizeof(int), 2019 .align = sizeof(int), 2020 .regset_get = system_call_get, 2021 .set = system_call_set, 2022 }, 2023 }; 2024 2025 static const struct user_regset_view user_aarch32_ptrace_view = { 2026 .name = "aarch32", .e_machine = EM_ARM, 2027 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) 2028 }; 2029 2030 #ifdef CONFIG_COMPAT 2031 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 2032 compat_ulong_t __user *ret) 2033 { 2034 compat_ulong_t tmp; 2035 2036 if (off & 3) 2037 return -EIO; 2038 2039 if (off == COMPAT_PT_TEXT_ADDR) 2040 tmp = tsk->mm->start_code; 2041 else if (off == COMPAT_PT_DATA_ADDR) 2042 tmp = tsk->mm->start_data; 2043 else if (off == COMPAT_PT_TEXT_END_ADDR) 2044 tmp = tsk->mm->end_code; 2045 else if (off < sizeof(compat_elf_gregset_t)) 2046 tmp = compat_get_user_reg(tsk, off >> 2); 2047 else if (off >= COMPAT_USER_SZ) 2048 return -EIO; 2049 else 2050 tmp = 0; 2051 2052 return put_user(tmp, ret); 2053 } 2054 2055 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 2056 compat_ulong_t val) 2057 { 2058 struct pt_regs newregs = *task_pt_regs(tsk); 2059 unsigned int idx = off / 4; 2060 2061 if (off & 3 || off >= COMPAT_USER_SZ) 2062 return -EIO; 2063 2064 if (off >= sizeof(compat_elf_gregset_t)) 2065 return 0; 2066 2067 switch (idx) { 2068 case 15: 2069 newregs.pc = val; 2070 break; 2071 case 16: 2072 newregs.pstate = compat_psr_to_pstate(val); 2073 break; 2074 case 17: 2075 newregs.orig_x0 = val; 2076 break; 2077 default: 2078 newregs.regs[idx] = val; 2079 } 2080 2081 if (!valid_user_regs(&newregs.user_regs, tsk)) 2082 return -EINVAL; 2083 2084 *task_pt_regs(tsk) = newregs; 2085 return 0; 2086 } 2087 2088 #ifdef CONFIG_HAVE_HW_BREAKPOINT 2089 2090 /* 2091 * Convert a virtual register number into an index for a thread_info 2092 * breakpoint array. Breakpoints are identified using positive numbers 2093 * whilst watchpoints are negative. The registers are laid out as pairs 2094 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 2095 * Register 0 is reserved for describing resource information. 2096 */ 2097 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 2098 { 2099 return (abs(num) - 1) >> 1; 2100 } 2101 2102 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 2103 { 2104 u8 num_brps, num_wrps, debug_arch, wp_len; 2105 u32 reg = 0; 2106 2107 num_brps = hw_breakpoint_slots(TYPE_INST); 2108 num_wrps = hw_breakpoint_slots(TYPE_DATA); 2109 2110 debug_arch = debug_monitors_arch(); 2111 wp_len = 8; 2112 reg |= debug_arch; 2113 reg <<= 8; 2114 reg |= wp_len; 2115 reg <<= 8; 2116 reg |= num_wrps; 2117 reg <<= 8; 2118 reg |= num_brps; 2119 2120 *kdata = reg; 2121 return 0; 2122 } 2123 2124 static int compat_ptrace_hbp_get(unsigned int note_type, 2125 struct task_struct *tsk, 2126 compat_long_t num, 2127 u32 *kdata) 2128 { 2129 u64 addr = 0; 2130 u32 ctrl = 0; 2131 2132 int err, idx = compat_ptrace_hbp_num_to_idx(num); 2133 2134 if (num & 1) { 2135 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 2136 *kdata = (u32)addr; 2137 } else { 2138 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 2139 *kdata = ctrl; 2140 } 2141 2142 return err; 2143 } 2144 2145 static int compat_ptrace_hbp_set(unsigned int note_type, 2146 struct task_struct *tsk, 2147 compat_long_t num, 2148 u32 *kdata) 2149 { 2150 u64 addr; 2151 u32 ctrl; 2152 2153 int err, idx = compat_ptrace_hbp_num_to_idx(num); 2154 2155 if (num & 1) { 2156 addr = *kdata; 2157 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 2158 } else { 2159 ctrl = *kdata; 2160 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 2161 } 2162 2163 return err; 2164 } 2165 2166 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 2167 compat_ulong_t __user *data) 2168 { 2169 int ret; 2170 u32 kdata; 2171 2172 /* Watchpoint */ 2173 if (num < 0) { 2174 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 2175 /* Resource info */ 2176 } else if (num == 0) { 2177 ret = compat_ptrace_hbp_get_resource_info(&kdata); 2178 /* Breakpoint */ 2179 } else { 2180 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 2181 } 2182 2183 if (!ret) 2184 ret = put_user(kdata, data); 2185 2186 return ret; 2187 } 2188 2189 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 2190 compat_ulong_t __user *data) 2191 { 2192 int ret; 2193 u32 kdata = 0; 2194 2195 if (num == 0) 2196 return 0; 2197 2198 ret = get_user(kdata, data); 2199 if (ret) 2200 return ret; 2201 2202 if (num < 0) 2203 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 2204 else 2205 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 2206 2207 return ret; 2208 } 2209 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 2210 2211 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 2212 compat_ulong_t caddr, compat_ulong_t cdata) 2213 { 2214 unsigned long addr = caddr; 2215 unsigned long data = cdata; 2216 void __user *datap = compat_ptr(data); 2217 int ret; 2218 2219 switch (request) { 2220 case PTRACE_PEEKUSR: 2221 ret = compat_ptrace_read_user(child, addr, datap); 2222 break; 2223 2224 case PTRACE_POKEUSR: 2225 ret = compat_ptrace_write_user(child, addr, data); 2226 break; 2227 2228 case COMPAT_PTRACE_GETREGS: 2229 ret = copy_regset_to_user(child, 2230 &user_aarch32_view, 2231 REGSET_COMPAT_GPR, 2232 0, sizeof(compat_elf_gregset_t), 2233 datap); 2234 break; 2235 2236 case COMPAT_PTRACE_SETREGS: 2237 ret = copy_regset_from_user(child, 2238 &user_aarch32_view, 2239 REGSET_COMPAT_GPR, 2240 0, sizeof(compat_elf_gregset_t), 2241 datap); 2242 break; 2243 2244 case COMPAT_PTRACE_GET_THREAD_AREA: 2245 ret = put_user((compat_ulong_t)child->thread.uw.tp_value, 2246 (compat_ulong_t __user *)datap); 2247 break; 2248 2249 case COMPAT_PTRACE_SET_SYSCALL: 2250 task_pt_regs(child)->syscallno = data; 2251 ret = 0; 2252 break; 2253 2254 case COMPAT_PTRACE_GETVFPREGS: 2255 ret = copy_regset_to_user(child, 2256 &user_aarch32_view, 2257 REGSET_COMPAT_VFP, 2258 0, VFP_STATE_SIZE, 2259 datap); 2260 break; 2261 2262 case COMPAT_PTRACE_SETVFPREGS: 2263 ret = copy_regset_from_user(child, 2264 &user_aarch32_view, 2265 REGSET_COMPAT_VFP, 2266 0, VFP_STATE_SIZE, 2267 datap); 2268 break; 2269 2270 #ifdef CONFIG_HAVE_HW_BREAKPOINT 2271 case COMPAT_PTRACE_GETHBPREGS: 2272 ret = compat_ptrace_gethbpregs(child, addr, datap); 2273 break; 2274 2275 case COMPAT_PTRACE_SETHBPREGS: 2276 ret = compat_ptrace_sethbpregs(child, addr, datap); 2277 break; 2278 #endif 2279 2280 default: 2281 ret = compat_ptrace_request(child, request, addr, 2282 data); 2283 break; 2284 } 2285 2286 return ret; 2287 } 2288 #endif /* CONFIG_COMPAT */ 2289 2290 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 2291 { 2292 /* 2293 * Core dumping of 32-bit tasks or compat ptrace requests must use the 2294 * user_aarch32_view compatible with arm32. Native ptrace requests on 2295 * 32-bit children use an extended user_aarch32_ptrace_view to allow 2296 * access to the TLS register. 2297 */ 2298 if (is_compat_task()) 2299 return &user_aarch32_view; 2300 else if (is_compat_thread(task_thread_info(task))) 2301 return &user_aarch32_ptrace_view; 2302 2303 return &user_aarch64_view; 2304 } 2305 2306 long arch_ptrace(struct task_struct *child, long request, 2307 unsigned long addr, unsigned long data) 2308 { 2309 switch (request) { 2310 case PTRACE_PEEKMTETAGS: 2311 case PTRACE_POKEMTETAGS: 2312 return mte_ptrace_copy_tags(child, request, addr, data); 2313 } 2314 2315 return ptrace_request(child, request, addr, data); 2316 } 2317 2318 enum ptrace_syscall_dir { 2319 PTRACE_SYSCALL_ENTER = 0, 2320 PTRACE_SYSCALL_EXIT, 2321 }; 2322 2323 static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) 2324 { 2325 int regno; 2326 unsigned long saved_reg; 2327 2328 /* 2329 * We have some ABI weirdness here in the way that we handle syscall 2330 * exit stops because we indicate whether or not the stop has been 2331 * signalled from syscall entry or syscall exit by clobbering a general 2332 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee 2333 * and restoring its old value after the stop. This means that: 2334 * 2335 * - Any writes by the tracer to this register during the stop are 2336 * ignored/discarded. 2337 * 2338 * - The actual value of the register is not available during the stop, 2339 * so the tracer cannot save it and restore it later. 2340 * 2341 * - Syscall stops behave differently to seccomp and pseudo-step traps 2342 * (the latter do not nobble any registers). 2343 */ 2344 regno = (is_compat_task() ? 12 : 7); 2345 saved_reg = regs->regs[regno]; 2346 regs->regs[regno] = dir; 2347 2348 if (dir == PTRACE_SYSCALL_ENTER) { 2349 if (ptrace_report_syscall_entry(regs)) 2350 forget_syscall(regs); 2351 regs->regs[regno] = saved_reg; 2352 } else if (!test_thread_flag(TIF_SINGLESTEP)) { 2353 ptrace_report_syscall_exit(regs, 0); 2354 regs->regs[regno] = saved_reg; 2355 } else { 2356 regs->regs[regno] = saved_reg; 2357 2358 /* 2359 * Signal a pseudo-step exception since we are stepping but 2360 * tracer modifications to the registers may have rewound the 2361 * state machine. 2362 */ 2363 ptrace_report_syscall_exit(regs, 1); 2364 } 2365 } 2366 2367 int syscall_trace_enter(struct pt_regs *regs) 2368 { 2369 unsigned long flags = read_thread_flags(); 2370 2371 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { 2372 report_syscall(regs, PTRACE_SYSCALL_ENTER); 2373 if (flags & _TIF_SYSCALL_EMU) 2374 return NO_SYSCALL; 2375 } 2376 2377 /* Do the secure computing after ptrace; failures should be fast. */ 2378 if (secure_computing() == -1) 2379 return NO_SYSCALL; 2380 2381 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 2382 trace_sys_enter(regs, regs->syscallno); 2383 2384 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1], 2385 regs->regs[2], regs->regs[3]); 2386 2387 return regs->syscallno; 2388 } 2389 2390 void syscall_trace_exit(struct pt_regs *regs) 2391 { 2392 unsigned long flags = read_thread_flags(); 2393 2394 audit_syscall_exit(regs); 2395 2396 if (flags & _TIF_SYSCALL_TRACEPOINT) 2397 trace_sys_exit(regs, syscall_get_return_value(current, regs)); 2398 2399 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)) 2400 report_syscall(regs, PTRACE_SYSCALL_EXIT); 2401 2402 rseq_syscall(regs); 2403 } 2404 2405 /* 2406 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. 2407 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is 2408 * not described in ARM DDI 0487D.a. 2409 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may 2410 * be allocated an EL0 meaning in future. 2411 * Userspace cannot use these until they have an architectural meaning. 2412 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. 2413 * We also reserve IL for the kernel; SS is handled dynamically. 2414 */ 2415 #define SPSR_EL1_AARCH64_RES0_BITS \ 2416 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \ 2417 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5)) 2418 #define SPSR_EL1_AARCH32_RES0_BITS \ 2419 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) 2420 2421 static int valid_compat_regs(struct user_pt_regs *regs) 2422 { 2423 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; 2424 2425 if (!system_supports_mixed_endian_el0()) { 2426 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 2427 regs->pstate |= PSR_AA32_E_BIT; 2428 else 2429 regs->pstate &= ~PSR_AA32_E_BIT; 2430 } 2431 2432 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && 2433 (regs->pstate & PSR_AA32_A_BIT) == 0 && 2434 (regs->pstate & PSR_AA32_I_BIT) == 0 && 2435 (regs->pstate & PSR_AA32_F_BIT) == 0) { 2436 return 1; 2437 } 2438 2439 /* 2440 * Force PSR to a valid 32-bit EL0t, preserving the same bits as 2441 * arch/arm. 2442 */ 2443 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT | 2444 PSR_AA32_C_BIT | PSR_AA32_V_BIT | 2445 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK | 2446 PSR_AA32_GE_MASK | PSR_AA32_E_BIT | 2447 PSR_AA32_T_BIT; 2448 regs->pstate |= PSR_MODE32_BIT; 2449 2450 return 0; 2451 } 2452 2453 static int valid_native_regs(struct user_pt_regs *regs) 2454 { 2455 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; 2456 2457 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && 2458 (regs->pstate & PSR_D_BIT) == 0 && 2459 (regs->pstate & PSR_A_BIT) == 0 && 2460 (regs->pstate & PSR_I_BIT) == 0 && 2461 (regs->pstate & PSR_F_BIT) == 0) { 2462 return 1; 2463 } 2464 2465 /* Force PSR to a valid 64-bit EL0t */ 2466 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; 2467 2468 return 0; 2469 } 2470 2471 /* 2472 * Are the current registers suitable for user mode? (used to maintain 2473 * security in signal handlers) 2474 */ 2475 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) 2476 { 2477 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ 2478 user_regs_reset_single_step(regs, task); 2479 2480 if (is_compat_thread(task_thread_info(task))) 2481 return valid_compat_regs(regs); 2482 else 2483 return valid_native_regs(regs); 2484 } 2485