1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/ptrace.c 4 * 5 * By Ross Biro 1/23/92 6 * edited by Linus Torvalds 7 * ARM modifications Copyright (C) 2000 Russell King 8 * Copyright (C) 2012 ARM Ltd. 9 */ 10 11 #include <linux/audit.h> 12 #include <linux/compat.h> 13 #include <linux/kernel.h> 14 #include <linux/sched/signal.h> 15 #include <linux/sched/task_stack.h> 16 #include <linux/mm.h> 17 #include <linux/nospec.h> 18 #include <linux/smp.h> 19 #include <linux/ptrace.h> 20 #include <linux/user.h> 21 #include <linux/seccomp.h> 22 #include <linux/security.h> 23 #include <linux/init.h> 24 #include <linux/signal.h> 25 #include <linux/string.h> 26 #include <linux/uaccess.h> 27 #include <linux/perf_event.h> 28 #include <linux/hw_breakpoint.h> 29 #include <linux/regset.h> 30 #include <linux/elf.h> 31 #include <linux/rseq.h> 32 33 #include <asm/compat.h> 34 #include <asm/cpufeature.h> 35 #include <asm/debug-monitors.h> 36 #include <asm/fpsimd.h> 37 #include <asm/gcs.h> 38 #include <asm/mte.h> 39 #include <asm/pointer_auth.h> 40 #include <asm/stacktrace.h> 41 #include <asm/syscall.h> 42 #include <asm/traps.h> 43 #include <asm/system_misc.h> 44 45 #define CREATE_TRACE_POINTS 46 #include <trace/events/syscalls.h> 47 48 struct pt_regs_offset { 49 const char *name; 50 int offset; 51 }; 52 53 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 54 #define REG_OFFSET_END {.name = NULL, .offset = 0} 55 #define GPR_OFFSET_NAME(r) \ 56 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} 57 58 static const struct pt_regs_offset regoffset_table[] = { 59 GPR_OFFSET_NAME(0), 60 GPR_OFFSET_NAME(1), 61 GPR_OFFSET_NAME(2), 62 GPR_OFFSET_NAME(3), 63 GPR_OFFSET_NAME(4), 64 GPR_OFFSET_NAME(5), 65 GPR_OFFSET_NAME(6), 66 GPR_OFFSET_NAME(7), 67 GPR_OFFSET_NAME(8), 68 GPR_OFFSET_NAME(9), 69 GPR_OFFSET_NAME(10), 70 GPR_OFFSET_NAME(11), 71 GPR_OFFSET_NAME(12), 72 GPR_OFFSET_NAME(13), 73 GPR_OFFSET_NAME(14), 74 GPR_OFFSET_NAME(15), 75 GPR_OFFSET_NAME(16), 76 GPR_OFFSET_NAME(17), 77 GPR_OFFSET_NAME(18), 78 GPR_OFFSET_NAME(19), 79 GPR_OFFSET_NAME(20), 80 GPR_OFFSET_NAME(21), 81 GPR_OFFSET_NAME(22), 82 GPR_OFFSET_NAME(23), 83 GPR_OFFSET_NAME(24), 84 GPR_OFFSET_NAME(25), 85 GPR_OFFSET_NAME(26), 86 GPR_OFFSET_NAME(27), 87 GPR_OFFSET_NAME(28), 88 GPR_OFFSET_NAME(29), 89 GPR_OFFSET_NAME(30), 90 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, 91 REG_OFFSET_NAME(sp), 92 REG_OFFSET_NAME(pc), 93 REG_OFFSET_NAME(pstate), 94 REG_OFFSET_END, 95 }; 96 97 /** 98 * regs_query_register_offset() - query register offset from its name 99 * @name: the name of a register 100 * 101 * regs_query_register_offset() returns the offset of a register in struct 102 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 103 */ 104 int regs_query_register_offset(const char *name) 105 { 106 const struct pt_regs_offset *roff; 107 108 for (roff = regoffset_table; roff->name != NULL; roff++) 109 if (!strcmp(roff->name, name)) 110 return roff->offset; 111 return -EINVAL; 112 } 113 114 /** 115 * regs_within_kernel_stack() - check the address in the stack 116 * @regs: pt_regs which contains kernel stack pointer. 117 * @addr: address which is checked. 118 * 119 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 120 * If @addr is within the kernel stack, it returns true. If not, returns false. 121 */ 122 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 123 { 124 return ((addr & ~(THREAD_SIZE - 1)) == 125 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || 126 on_irq_stack(addr, sizeof(unsigned long)); 127 } 128 129 /** 130 * regs_get_kernel_stack_nth() - get Nth entry of the stack 131 * @regs: pt_regs which contains kernel stack pointer. 132 * @n: stack entry number. 133 * 134 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 135 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 136 * this returns 0. 137 */ 138 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 139 { 140 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 141 142 addr += n; 143 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 144 return READ_ONCE_NOCHECK(*addr); 145 else 146 return 0; 147 } 148 149 /* 150 * TODO: does not yet catch signals sent when the child dies. 151 * in exit.c or in signal.c. 152 */ 153 154 /* 155 * Called by kernel/ptrace.c when detaching.. 156 */ 157 void ptrace_disable(struct task_struct *child) 158 { 159 /* 160 * This would be better off in core code, but PTRACE_DETACH has 161 * grown its fair share of arch-specific worts and changing it 162 * is likely to cause regressions on obscure architectures. 163 */ 164 user_disable_single_step(child); 165 } 166 167 #ifdef CONFIG_HAVE_HW_BREAKPOINT 168 /* 169 * Handle hitting a HW-breakpoint. 170 */ 171 static void ptrace_hbptriggered(struct perf_event *bp, 172 struct perf_sample_data *data, 173 struct pt_regs *regs) 174 { 175 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 176 const char *desc = "Hardware breakpoint trap (ptrace)"; 177 178 if (is_compat_task()) { 179 int si_errno = 0; 180 int i; 181 182 for (i = 0; i < ARM_MAX_BRP; ++i) { 183 if (current->thread.debug.hbp_break[i] == bp) { 184 si_errno = (i << 1) + 1; 185 break; 186 } 187 } 188 189 for (i = 0; i < ARM_MAX_WRP; ++i) { 190 if (current->thread.debug.hbp_watch[i] == bp) { 191 si_errno = -((i << 1) + 1); 192 break; 193 } 194 } 195 arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger, 196 desc); 197 return; 198 } 199 200 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc); 201 } 202 203 /* 204 * Unregister breakpoints from this task and reset the pointers in 205 * the thread_struct. 206 */ 207 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 208 { 209 int i; 210 struct thread_struct *t = &tsk->thread; 211 212 for (i = 0; i < ARM_MAX_BRP; i++) { 213 if (t->debug.hbp_break[i]) { 214 unregister_hw_breakpoint(t->debug.hbp_break[i]); 215 t->debug.hbp_break[i] = NULL; 216 } 217 } 218 219 for (i = 0; i < ARM_MAX_WRP; i++) { 220 if (t->debug.hbp_watch[i]) { 221 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 222 t->debug.hbp_watch[i] = NULL; 223 } 224 } 225 } 226 227 void ptrace_hw_copy_thread(struct task_struct *tsk) 228 { 229 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 230 } 231 232 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 233 struct task_struct *tsk, 234 unsigned long idx) 235 { 236 struct perf_event *bp = ERR_PTR(-EINVAL); 237 238 switch (note_type) { 239 case NT_ARM_HW_BREAK: 240 if (idx >= ARM_MAX_BRP) 241 goto out; 242 idx = array_index_nospec(idx, ARM_MAX_BRP); 243 bp = tsk->thread.debug.hbp_break[idx]; 244 break; 245 case NT_ARM_HW_WATCH: 246 if (idx >= ARM_MAX_WRP) 247 goto out; 248 idx = array_index_nospec(idx, ARM_MAX_WRP); 249 bp = tsk->thread.debug.hbp_watch[idx]; 250 break; 251 } 252 253 out: 254 return bp; 255 } 256 257 static int ptrace_hbp_set_event(unsigned int note_type, 258 struct task_struct *tsk, 259 unsigned long idx, 260 struct perf_event *bp) 261 { 262 int err = -EINVAL; 263 264 switch (note_type) { 265 case NT_ARM_HW_BREAK: 266 if (idx >= ARM_MAX_BRP) 267 goto out; 268 idx = array_index_nospec(idx, ARM_MAX_BRP); 269 tsk->thread.debug.hbp_break[idx] = bp; 270 err = 0; 271 break; 272 case NT_ARM_HW_WATCH: 273 if (idx >= ARM_MAX_WRP) 274 goto out; 275 idx = array_index_nospec(idx, ARM_MAX_WRP); 276 tsk->thread.debug.hbp_watch[idx] = bp; 277 err = 0; 278 break; 279 } 280 281 out: 282 return err; 283 } 284 285 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 286 struct task_struct *tsk, 287 unsigned long idx) 288 { 289 struct perf_event *bp; 290 struct perf_event_attr attr; 291 int err, type; 292 293 switch (note_type) { 294 case NT_ARM_HW_BREAK: 295 type = HW_BREAKPOINT_X; 296 break; 297 case NT_ARM_HW_WATCH: 298 type = HW_BREAKPOINT_RW; 299 break; 300 default: 301 return ERR_PTR(-EINVAL); 302 } 303 304 ptrace_breakpoint_init(&attr); 305 306 /* 307 * Initialise fields to sane defaults 308 * (i.e. values that will pass validation). 309 */ 310 attr.bp_addr = 0; 311 attr.bp_len = HW_BREAKPOINT_LEN_4; 312 attr.bp_type = type; 313 attr.disabled = 1; 314 315 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 316 if (IS_ERR(bp)) 317 return bp; 318 319 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 320 if (err) 321 return ERR_PTR(err); 322 323 return bp; 324 } 325 326 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 327 struct arch_hw_breakpoint_ctrl ctrl, 328 struct perf_event_attr *attr) 329 { 330 int err, len, type, offset, disabled = !ctrl.enabled; 331 332 attr->disabled = disabled; 333 if (disabled) 334 return 0; 335 336 err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 337 if (err) 338 return err; 339 340 switch (note_type) { 341 case NT_ARM_HW_BREAK: 342 if ((type & HW_BREAKPOINT_X) != type) 343 return -EINVAL; 344 break; 345 case NT_ARM_HW_WATCH: 346 if ((type & HW_BREAKPOINT_RW) != type) 347 return -EINVAL; 348 break; 349 default: 350 return -EINVAL; 351 } 352 353 attr->bp_len = len; 354 attr->bp_type = type; 355 attr->bp_addr += offset; 356 357 return 0; 358 } 359 360 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 361 { 362 u8 num; 363 u32 reg = 0; 364 365 switch (note_type) { 366 case NT_ARM_HW_BREAK: 367 num = hw_breakpoint_slots(TYPE_INST); 368 break; 369 case NT_ARM_HW_WATCH: 370 num = hw_breakpoint_slots(TYPE_DATA); 371 break; 372 default: 373 return -EINVAL; 374 } 375 376 reg |= debug_monitors_arch(); 377 reg <<= 8; 378 reg |= num; 379 380 *info = reg; 381 return 0; 382 } 383 384 static int ptrace_hbp_get_ctrl(unsigned int note_type, 385 struct task_struct *tsk, 386 unsigned long idx, 387 u32 *ctrl) 388 { 389 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 390 391 if (IS_ERR(bp)) 392 return PTR_ERR(bp); 393 394 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 395 return 0; 396 } 397 398 static int ptrace_hbp_get_addr(unsigned int note_type, 399 struct task_struct *tsk, 400 unsigned long idx, 401 u64 *addr) 402 { 403 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 404 405 if (IS_ERR(bp)) 406 return PTR_ERR(bp); 407 408 *addr = bp ? counter_arch_bp(bp)->address : 0; 409 return 0; 410 } 411 412 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 413 struct task_struct *tsk, 414 unsigned long idx) 415 { 416 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 417 418 if (!bp) 419 bp = ptrace_hbp_create(note_type, tsk, idx); 420 421 return bp; 422 } 423 424 static int ptrace_hbp_set_ctrl(unsigned int note_type, 425 struct task_struct *tsk, 426 unsigned long idx, 427 u32 uctrl) 428 { 429 int err; 430 struct perf_event *bp; 431 struct perf_event_attr attr; 432 struct arch_hw_breakpoint_ctrl ctrl; 433 434 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 435 if (IS_ERR(bp)) { 436 err = PTR_ERR(bp); 437 return err; 438 } 439 440 attr = bp->attr; 441 decode_ctrl_reg(uctrl, &ctrl); 442 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 443 if (err) 444 return err; 445 446 return modify_user_hw_breakpoint(bp, &attr); 447 } 448 449 static int ptrace_hbp_set_addr(unsigned int note_type, 450 struct task_struct *tsk, 451 unsigned long idx, 452 u64 addr) 453 { 454 int err; 455 struct perf_event *bp; 456 struct perf_event_attr attr; 457 458 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 459 if (IS_ERR(bp)) { 460 err = PTR_ERR(bp); 461 return err; 462 } 463 464 attr = bp->attr; 465 attr.bp_addr = addr; 466 err = modify_user_hw_breakpoint(bp, &attr); 467 return err; 468 } 469 470 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 471 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 472 #define PTRACE_HBP_PAD_SZ sizeof(u32) 473 474 static int hw_break_get(struct task_struct *target, 475 const struct user_regset *regset, 476 struct membuf to) 477 { 478 unsigned int note_type = regset->core_note_type; 479 int ret, idx = 0; 480 u32 info, ctrl; 481 u64 addr; 482 483 /* Resource info */ 484 ret = ptrace_hbp_get_resource_info(note_type, &info); 485 if (ret) 486 return ret; 487 488 membuf_write(&to, &info, sizeof(info)); 489 membuf_zero(&to, sizeof(u32)); 490 /* (address, ctrl) registers */ 491 while (to.left) { 492 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 493 if (ret) 494 return ret; 495 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 496 if (ret) 497 return ret; 498 membuf_store(&to, addr); 499 membuf_store(&to, ctrl); 500 membuf_zero(&to, sizeof(u32)); 501 idx++; 502 } 503 return 0; 504 } 505 506 static int hw_break_set(struct task_struct *target, 507 const struct user_regset *regset, 508 unsigned int pos, unsigned int count, 509 const void *kbuf, const void __user *ubuf) 510 { 511 unsigned int note_type = regset->core_note_type; 512 int ret, idx = 0, offset, limit; 513 u32 ctrl; 514 u64 addr; 515 516 /* Resource info and pad */ 517 offset = offsetof(struct user_hwdebug_state, dbg_regs); 518 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 519 520 /* (address, ctrl) registers */ 521 limit = regset->n * regset->size; 522 while (count && offset < limit) { 523 if (count < PTRACE_HBP_ADDR_SZ) 524 return -EINVAL; 525 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 526 offset, offset + PTRACE_HBP_ADDR_SZ); 527 if (ret) 528 return ret; 529 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 530 if (ret) 531 return ret; 532 offset += PTRACE_HBP_ADDR_SZ; 533 534 if (!count) 535 break; 536 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 537 offset, offset + PTRACE_HBP_CTRL_SZ); 538 if (ret) 539 return ret; 540 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 541 if (ret) 542 return ret; 543 offset += PTRACE_HBP_CTRL_SZ; 544 545 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 546 offset, offset + PTRACE_HBP_PAD_SZ); 547 offset += PTRACE_HBP_PAD_SZ; 548 idx++; 549 } 550 551 return 0; 552 } 553 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 554 555 static int gpr_get(struct task_struct *target, 556 const struct user_regset *regset, 557 struct membuf to) 558 { 559 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 560 return membuf_write(&to, uregs, sizeof(*uregs)); 561 } 562 563 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 564 unsigned int pos, unsigned int count, 565 const void *kbuf, const void __user *ubuf) 566 { 567 int ret; 568 struct user_pt_regs newregs = task_pt_regs(target)->user_regs; 569 570 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 571 if (ret) 572 return ret; 573 574 if (!valid_user_regs(&newregs, target)) 575 return -EINVAL; 576 577 task_pt_regs(target)->user_regs = newregs; 578 return 0; 579 } 580 581 static int fpr_active(struct task_struct *target, const struct user_regset *regset) 582 { 583 if (!system_supports_fpsimd()) 584 return -ENODEV; 585 return regset->n; 586 } 587 588 /* 589 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 590 */ 591 static int __fpr_get(struct task_struct *target, 592 const struct user_regset *regset, 593 struct membuf to) 594 { 595 struct user_fpsimd_state *uregs; 596 597 fpsimd_sync_from_effective_state(target); 598 599 uregs = &target->thread.uw.fpsimd_state; 600 601 return membuf_write(&to, uregs, sizeof(*uregs)); 602 } 603 604 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 605 struct membuf to) 606 { 607 if (!system_supports_fpsimd()) 608 return -EINVAL; 609 610 if (target == current) 611 fpsimd_preserve_current_state(); 612 613 return __fpr_get(target, regset, to); 614 } 615 616 static int __fpr_set(struct task_struct *target, 617 const struct user_regset *regset, 618 unsigned int pos, unsigned int count, 619 const void *kbuf, const void __user *ubuf, 620 unsigned int start_pos) 621 { 622 int ret; 623 struct user_fpsimd_state newstate; 624 625 /* 626 * Ensure target->thread.uw.fpsimd_state is up to date, so that a 627 * short copyin can't resurrect stale data. 628 */ 629 fpsimd_sync_from_effective_state(target); 630 631 newstate = target->thread.uw.fpsimd_state; 632 633 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 634 start_pos, start_pos + sizeof(newstate)); 635 if (ret) 636 return ret; 637 638 target->thread.uw.fpsimd_state = newstate; 639 640 return ret; 641 } 642 643 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 644 unsigned int pos, unsigned int count, 645 const void *kbuf, const void __user *ubuf) 646 { 647 int ret; 648 649 if (!system_supports_fpsimd()) 650 return -EINVAL; 651 652 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); 653 if (ret) 654 return ret; 655 656 fpsimd_sync_to_effective_state_zeropad(target); 657 fpsimd_flush_task_state(target); 658 659 return ret; 660 } 661 662 static int tls_get(struct task_struct *target, const struct user_regset *regset, 663 struct membuf to) 664 { 665 int ret; 666 667 if (target == current) 668 tls_preserve_current_state(); 669 670 ret = membuf_store(&to, target->thread.uw.tp_value); 671 if (system_supports_tpidr2()) 672 ret = membuf_store(&to, target->thread.tpidr2_el0); 673 else 674 ret = membuf_zero(&to, sizeof(u64)); 675 676 return ret; 677 } 678 679 static int tls_set(struct task_struct *target, const struct user_regset *regset, 680 unsigned int pos, unsigned int count, 681 const void *kbuf, const void __user *ubuf) 682 { 683 int ret; 684 unsigned long tls[2]; 685 686 tls[0] = target->thread.uw.tp_value; 687 if (system_supports_tpidr2()) 688 tls[1] = target->thread.tpidr2_el0; 689 690 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count); 691 if (ret) 692 return ret; 693 694 target->thread.uw.tp_value = tls[0]; 695 if (system_supports_tpidr2()) 696 target->thread.tpidr2_el0 = tls[1]; 697 698 return ret; 699 } 700 701 static int fpmr_get(struct task_struct *target, const struct user_regset *regset, 702 struct membuf to) 703 { 704 if (!system_supports_fpmr()) 705 return -EINVAL; 706 707 if (target == current) 708 fpsimd_preserve_current_state(); 709 710 return membuf_store(&to, target->thread.uw.fpmr); 711 } 712 713 static int fpmr_set(struct task_struct *target, const struct user_regset *regset, 714 unsigned int pos, unsigned int count, 715 const void *kbuf, const void __user *ubuf) 716 { 717 int ret; 718 unsigned long fpmr; 719 720 if (!system_supports_fpmr()) 721 return -EINVAL; 722 723 fpmr = target->thread.uw.fpmr; 724 725 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count); 726 if (ret) 727 return ret; 728 729 target->thread.uw.fpmr = fpmr; 730 731 fpsimd_flush_task_state(target); 732 733 return 0; 734 } 735 736 static int system_call_get(struct task_struct *target, 737 const struct user_regset *regset, 738 struct membuf to) 739 { 740 return membuf_store(&to, task_pt_regs(target)->syscallno); 741 } 742 743 static int system_call_set(struct task_struct *target, 744 const struct user_regset *regset, 745 unsigned int pos, unsigned int count, 746 const void *kbuf, const void __user *ubuf) 747 { 748 int syscallno = task_pt_regs(target)->syscallno; 749 int ret; 750 751 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 752 if (ret) 753 return ret; 754 755 task_pt_regs(target)->syscallno = syscallno; 756 return ret; 757 } 758 759 #ifdef CONFIG_ARM64_SVE 760 761 static void sve_init_header_from_task(struct user_sve_header *header, 762 struct task_struct *target, 763 enum vec_type type) 764 { 765 unsigned int vq; 766 bool active; 767 enum vec_type task_type; 768 769 memset(header, 0, sizeof(*header)); 770 771 /* Check if the requested registers are active for the task */ 772 if (thread_sm_enabled(&target->thread)) 773 task_type = ARM64_VEC_SME; 774 else 775 task_type = ARM64_VEC_SVE; 776 active = (task_type == type); 777 778 if (active && target->thread.fp_type == FP_STATE_SVE) 779 header->flags = SVE_PT_REGS_SVE; 780 else 781 header->flags = SVE_PT_REGS_FPSIMD; 782 783 switch (type) { 784 case ARM64_VEC_SVE: 785 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) 786 header->flags |= SVE_PT_VL_INHERIT; 787 break; 788 case ARM64_VEC_SME: 789 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) 790 header->flags |= SVE_PT_VL_INHERIT; 791 break; 792 default: 793 WARN_ON_ONCE(1); 794 return; 795 } 796 797 header->vl = task_get_vl(target, type); 798 vq = sve_vq_from_vl(header->vl); 799 800 header->max_vl = vec_max_vl(type); 801 if (active) 802 header->size = SVE_PT_SIZE(vq, header->flags); 803 else 804 header->size = sizeof(header); 805 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), 806 SVE_PT_REGS_SVE); 807 } 808 809 static unsigned int sve_size_from_header(struct user_sve_header const *header) 810 { 811 return ALIGN(header->size, SVE_VQ_BYTES); 812 } 813 814 static int sve_get_common(struct task_struct *target, 815 const struct user_regset *regset, 816 struct membuf to, 817 enum vec_type type) 818 { 819 struct user_sve_header header; 820 unsigned int vq; 821 unsigned long start, end; 822 823 if (target == current) 824 fpsimd_preserve_current_state(); 825 826 /* Header */ 827 sve_init_header_from_task(&header, target, type); 828 vq = sve_vq_from_vl(header.vl); 829 830 membuf_write(&to, &header, sizeof(header)); 831 832 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 833 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 834 835 /* 836 * When the requested vector type is not active, do not present data 837 * from the other mode to userspace. 838 */ 839 if (header.size == sizeof(header)) 840 return 0; 841 842 switch ((header.flags & SVE_PT_REGS_MASK)) { 843 case SVE_PT_REGS_FPSIMD: 844 return __fpr_get(target, regset, to); 845 846 case SVE_PT_REGS_SVE: 847 start = SVE_PT_SVE_OFFSET; 848 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 849 membuf_write(&to, target->thread.sve_state, end - start); 850 851 start = end; 852 end = SVE_PT_SVE_FPSR_OFFSET(vq); 853 membuf_zero(&to, end - start); 854 855 /* 856 * Copy fpsr, and fpcr which must follow contiguously in 857 * struct fpsimd_state: 858 */ 859 start = end; 860 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 861 membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, 862 end - start); 863 864 start = end; 865 end = sve_size_from_header(&header); 866 return membuf_zero(&to, end - start); 867 868 default: 869 BUILD_BUG(); 870 } 871 } 872 873 static int sve_get(struct task_struct *target, 874 const struct user_regset *regset, 875 struct membuf to) 876 { 877 if (!system_supports_sve()) 878 return -EINVAL; 879 880 return sve_get_common(target, regset, to, ARM64_VEC_SVE); 881 } 882 883 static int sve_set_common(struct task_struct *target, 884 const struct user_regset *regset, 885 unsigned int pos, unsigned int count, 886 const void *kbuf, const void __user *ubuf, 887 enum vec_type type) 888 { 889 int ret; 890 struct user_sve_header header; 891 unsigned int vq; 892 unsigned long start, end; 893 bool fpsimd; 894 895 fpsimd_flush_task_state(target); 896 897 /* Header */ 898 if (count < sizeof(header)) 899 return -EINVAL; 900 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 901 0, sizeof(header)); 902 if (ret) 903 return ret; 904 905 /* 906 * Streaming SVE data is always stored and presented in SVE format. 907 * Require the user to provide SVE formatted data for consistency, and 908 * to avoid the risk that we configure the task into an invalid state. 909 */ 910 fpsimd = (header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD; 911 if (fpsimd && type == ARM64_VEC_SME) 912 return -EINVAL; 913 914 /* 915 * On systems without SVE we accept FPSIMD format writes with 916 * a VL of 0 to allow exiting streaming mode, otherwise a VL 917 * is required. 918 */ 919 if (header.vl) { 920 /* 921 * If the system does not support SVE we can't 922 * configure a SVE VL. 923 */ 924 if (!system_supports_sve() && type == ARM64_VEC_SVE) 925 return -EINVAL; 926 927 /* 928 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are 929 * consumed by vec_set_vector_length(), which will 930 * also validate them for us: 931 */ 932 ret = vec_set_vector_length(target, type, header.vl, 933 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); 934 if (ret) 935 return ret; 936 } else { 937 /* If the system supports SVE we require a VL. */ 938 if (system_supports_sve()) 939 return -EINVAL; 940 941 /* 942 * Only FPSIMD formatted data with no flags set is 943 * supported. 944 */ 945 if (header.flags != SVE_PT_REGS_FPSIMD) 946 return -EINVAL; 947 } 948 949 /* Allocate SME storage if necessary, preserving any existing ZA/ZT state */ 950 if (type == ARM64_VEC_SME) { 951 sme_alloc(target, false); 952 if (!target->thread.sme_state) 953 return -ENOMEM; 954 } 955 956 /* Allocate SVE storage if necessary, zeroing any existing SVE state */ 957 if (!fpsimd) { 958 sve_alloc(target, true); 959 if (!target->thread.sve_state) 960 return -ENOMEM; 961 } 962 963 /* 964 * Actual VL set may be different from what the user asked 965 * for, or we may have configured the _ONEXEC VL not the 966 * current VL: 967 */ 968 vq = sve_vq_from_vl(task_get_vl(target, type)); 969 970 /* Enter/exit streaming mode */ 971 switch (type) { 972 case ARM64_VEC_SVE: 973 target->thread.svcr &= ~SVCR_SM_MASK; 974 set_tsk_thread_flag(target, TIF_SVE); 975 break; 976 case ARM64_VEC_SME: 977 target->thread.svcr |= SVCR_SM_MASK; 978 set_tsk_thread_flag(target, TIF_SME); 979 break; 980 default: 981 WARN_ON_ONCE(1); 982 return -EINVAL; 983 } 984 985 /* Always zero V regs, FPSR, and FPCR */ 986 memset(¤t->thread.uw.fpsimd_state, 0, 987 sizeof(current->thread.uw.fpsimd_state)); 988 989 /* Registers: FPSIMD-only case */ 990 991 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 992 if (fpsimd) { 993 clear_tsk_thread_flag(target, TIF_SVE); 994 target->thread.fp_type = FP_STATE_FPSIMD; 995 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 996 SVE_PT_FPSIMD_OFFSET); 997 return ret; 998 } 999 1000 /* Otherwise: no registers or full SVE case. */ 1001 1002 target->thread.fp_type = FP_STATE_SVE; 1003 1004 /* 1005 * If setting a different VL from the requested VL and there is 1006 * register data, the data layout will be wrong: don't even 1007 * try to set the registers in this case. 1008 */ 1009 if (count && vq != sve_vq_from_vl(header.vl)) 1010 return -EIO; 1011 1012 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 1013 start = SVE_PT_SVE_OFFSET; 1014 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 1015 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1016 target->thread.sve_state, 1017 start, end); 1018 if (ret) 1019 return ret; 1020 1021 start = end; 1022 end = SVE_PT_SVE_FPSR_OFFSET(vq); 1023 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, start, end); 1024 1025 /* 1026 * Copy fpsr, and fpcr which must follow contiguously in 1027 * struct fpsimd_state: 1028 */ 1029 start = end; 1030 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 1031 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1032 &target->thread.uw.fpsimd_state.fpsr, 1033 start, end); 1034 1035 return ret; 1036 } 1037 1038 static int sve_set(struct task_struct *target, 1039 const struct user_regset *regset, 1040 unsigned int pos, unsigned int count, 1041 const void *kbuf, const void __user *ubuf) 1042 { 1043 if (!system_supports_sve() && !system_supports_sme()) 1044 return -EINVAL; 1045 1046 return sve_set_common(target, regset, pos, count, kbuf, ubuf, 1047 ARM64_VEC_SVE); 1048 } 1049 1050 #endif /* CONFIG_ARM64_SVE */ 1051 1052 #ifdef CONFIG_ARM64_SME 1053 1054 static int ssve_get(struct task_struct *target, 1055 const struct user_regset *regset, 1056 struct membuf to) 1057 { 1058 if (!system_supports_sme()) 1059 return -EINVAL; 1060 1061 return sve_get_common(target, regset, to, ARM64_VEC_SME); 1062 } 1063 1064 static int ssve_set(struct task_struct *target, 1065 const struct user_regset *regset, 1066 unsigned int pos, unsigned int count, 1067 const void *kbuf, const void __user *ubuf) 1068 { 1069 if (!system_supports_sme()) 1070 return -EINVAL; 1071 1072 return sve_set_common(target, regset, pos, count, kbuf, ubuf, 1073 ARM64_VEC_SME); 1074 } 1075 1076 static int za_get(struct task_struct *target, 1077 const struct user_regset *regset, 1078 struct membuf to) 1079 { 1080 struct user_za_header header; 1081 unsigned int vq; 1082 unsigned long start, end; 1083 1084 if (!system_supports_sme()) 1085 return -EINVAL; 1086 1087 /* Header */ 1088 memset(&header, 0, sizeof(header)); 1089 1090 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) 1091 header.flags |= ZA_PT_VL_INHERIT; 1092 1093 header.vl = task_get_sme_vl(target); 1094 vq = sve_vq_from_vl(header.vl); 1095 header.max_vl = sme_max_vl(); 1096 header.max_size = ZA_PT_SIZE(vq); 1097 1098 /* If ZA is not active there is only the header */ 1099 if (thread_za_enabled(&target->thread)) 1100 header.size = ZA_PT_SIZE(vq); 1101 else 1102 header.size = ZA_PT_ZA_OFFSET; 1103 1104 membuf_write(&to, &header, sizeof(header)); 1105 1106 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); 1107 end = ZA_PT_ZA_OFFSET; 1108 1109 if (target == current) 1110 fpsimd_preserve_current_state(); 1111 1112 /* Any register data to include? */ 1113 if (thread_za_enabled(&target->thread)) { 1114 start = end; 1115 end = ZA_PT_SIZE(vq); 1116 membuf_write(&to, target->thread.sme_state, end - start); 1117 } 1118 1119 /* Zero any trailing padding */ 1120 start = end; 1121 end = ALIGN(header.size, SVE_VQ_BYTES); 1122 return membuf_zero(&to, end - start); 1123 } 1124 1125 static int za_set(struct task_struct *target, 1126 const struct user_regset *regset, 1127 unsigned int pos, unsigned int count, 1128 const void *kbuf, const void __user *ubuf) 1129 { 1130 int ret; 1131 struct user_za_header header; 1132 unsigned int vq; 1133 unsigned long start, end; 1134 1135 if (!system_supports_sme()) 1136 return -EINVAL; 1137 1138 /* Header */ 1139 if (count < sizeof(header)) 1140 return -EINVAL; 1141 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 1142 0, sizeof(header)); 1143 if (ret) 1144 goto out; 1145 1146 /* 1147 * All current ZA_PT_* flags are consumed by 1148 * vec_set_vector_length(), which will also validate them for 1149 * us: 1150 */ 1151 ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl, 1152 ((unsigned long)header.flags) << 16); 1153 if (ret) 1154 goto out; 1155 1156 /* 1157 * Actual VL set may be different from what the user asked 1158 * for, or we may have configured the _ONEXEC rather than 1159 * current VL: 1160 */ 1161 vq = sve_vq_from_vl(task_get_sme_vl(target)); 1162 1163 /* Ensure there is some SVE storage for streaming mode */ 1164 if (!target->thread.sve_state) { 1165 sve_alloc(target, false); 1166 if (!target->thread.sve_state) { 1167 ret = -ENOMEM; 1168 goto out; 1169 } 1170 } 1171 1172 /* 1173 * Only flush the storage if PSTATE.ZA was not already set, 1174 * otherwise preserve any existing data. 1175 */ 1176 sme_alloc(target, !thread_za_enabled(&target->thread)); 1177 if (!target->thread.sme_state) 1178 return -ENOMEM; 1179 1180 /* If there is no data then disable ZA */ 1181 if (!count) { 1182 target->thread.svcr &= ~SVCR_ZA_MASK; 1183 goto out; 1184 } 1185 1186 /* 1187 * If setting a different VL from the requested VL and there is 1188 * register data, the data layout will be wrong: don't even 1189 * try to set the registers in this case. 1190 */ 1191 if (vq != sve_vq_from_vl(header.vl)) { 1192 ret = -EIO; 1193 goto out; 1194 } 1195 1196 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); 1197 start = ZA_PT_ZA_OFFSET; 1198 end = ZA_PT_SIZE(vq); 1199 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1200 target->thread.sme_state, 1201 start, end); 1202 if (ret) 1203 goto out; 1204 1205 /* Mark ZA as active and let userspace use it */ 1206 set_tsk_thread_flag(target, TIF_SME); 1207 target->thread.svcr |= SVCR_ZA_MASK; 1208 1209 out: 1210 fpsimd_flush_task_state(target); 1211 return ret; 1212 } 1213 1214 static int zt_get(struct task_struct *target, 1215 const struct user_regset *regset, 1216 struct membuf to) 1217 { 1218 if (!system_supports_sme2()) 1219 return -EINVAL; 1220 1221 /* 1222 * If PSTATE.ZA is not set then ZT will be zeroed when it is 1223 * enabled so report the current register value as zero. 1224 */ 1225 if (thread_za_enabled(&target->thread)) 1226 membuf_write(&to, thread_zt_state(&target->thread), 1227 ZT_SIG_REG_BYTES); 1228 else 1229 membuf_zero(&to, ZT_SIG_REG_BYTES); 1230 1231 return 0; 1232 } 1233 1234 static int zt_set(struct task_struct *target, 1235 const struct user_regset *regset, 1236 unsigned int pos, unsigned int count, 1237 const void *kbuf, const void __user *ubuf) 1238 { 1239 int ret; 1240 1241 if (!system_supports_sme2()) 1242 return -EINVAL; 1243 1244 /* Ensure SVE storage in case this is first use of SME */ 1245 sve_alloc(target, false); 1246 if (!target->thread.sve_state) 1247 return -ENOMEM; 1248 1249 if (!thread_za_enabled(&target->thread)) { 1250 sme_alloc(target, true); 1251 if (!target->thread.sme_state) 1252 return -ENOMEM; 1253 } 1254 1255 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1256 thread_zt_state(&target->thread), 1257 0, ZT_SIG_REG_BYTES); 1258 if (ret == 0) { 1259 target->thread.svcr |= SVCR_ZA_MASK; 1260 set_tsk_thread_flag(target, TIF_SME); 1261 } 1262 1263 fpsimd_flush_task_state(target); 1264 1265 return ret; 1266 } 1267 1268 #endif /* CONFIG_ARM64_SME */ 1269 1270 #ifdef CONFIG_ARM64_PTR_AUTH 1271 static int pac_mask_get(struct task_struct *target, 1272 const struct user_regset *regset, 1273 struct membuf to) 1274 { 1275 /* 1276 * The PAC bits can differ across data and instruction pointers 1277 * depending on TCR_EL1.TBID*, which we may make use of in future, so 1278 * we expose separate masks. 1279 */ 1280 unsigned long mask = ptrauth_user_pac_mask(); 1281 struct user_pac_mask uregs = { 1282 .data_mask = mask, 1283 .insn_mask = mask, 1284 }; 1285 1286 if (!system_supports_address_auth()) 1287 return -EINVAL; 1288 1289 return membuf_write(&to, &uregs, sizeof(uregs)); 1290 } 1291 1292 static int pac_enabled_keys_get(struct task_struct *target, 1293 const struct user_regset *regset, 1294 struct membuf to) 1295 { 1296 long enabled_keys = ptrauth_get_enabled_keys(target); 1297 1298 if (IS_ERR_VALUE(enabled_keys)) 1299 return enabled_keys; 1300 1301 return membuf_write(&to, &enabled_keys, sizeof(enabled_keys)); 1302 } 1303 1304 static int pac_enabled_keys_set(struct task_struct *target, 1305 const struct user_regset *regset, 1306 unsigned int pos, unsigned int count, 1307 const void *kbuf, const void __user *ubuf) 1308 { 1309 int ret; 1310 long enabled_keys = ptrauth_get_enabled_keys(target); 1311 1312 if (IS_ERR_VALUE(enabled_keys)) 1313 return enabled_keys; 1314 1315 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0, 1316 sizeof(long)); 1317 if (ret) 1318 return ret; 1319 1320 return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK, 1321 enabled_keys); 1322 } 1323 1324 #ifdef CONFIG_CHECKPOINT_RESTORE 1325 static __uint128_t pac_key_to_user(const struct ptrauth_key *key) 1326 { 1327 return (__uint128_t)key->hi << 64 | key->lo; 1328 } 1329 1330 static struct ptrauth_key pac_key_from_user(__uint128_t ukey) 1331 { 1332 struct ptrauth_key key = { 1333 .lo = (unsigned long)ukey, 1334 .hi = (unsigned long)(ukey >> 64), 1335 }; 1336 1337 return key; 1338 } 1339 1340 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, 1341 const struct ptrauth_keys_user *keys) 1342 { 1343 ukeys->apiakey = pac_key_to_user(&keys->apia); 1344 ukeys->apibkey = pac_key_to_user(&keys->apib); 1345 ukeys->apdakey = pac_key_to_user(&keys->apda); 1346 ukeys->apdbkey = pac_key_to_user(&keys->apdb); 1347 } 1348 1349 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys, 1350 const struct user_pac_address_keys *ukeys) 1351 { 1352 keys->apia = pac_key_from_user(ukeys->apiakey); 1353 keys->apib = pac_key_from_user(ukeys->apibkey); 1354 keys->apda = pac_key_from_user(ukeys->apdakey); 1355 keys->apdb = pac_key_from_user(ukeys->apdbkey); 1356 } 1357 1358 static int pac_address_keys_get(struct task_struct *target, 1359 const struct user_regset *regset, 1360 struct membuf to) 1361 { 1362 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1363 struct user_pac_address_keys user_keys; 1364 1365 if (!system_supports_address_auth()) 1366 return -EINVAL; 1367 1368 pac_address_keys_to_user(&user_keys, keys); 1369 1370 return membuf_write(&to, &user_keys, sizeof(user_keys)); 1371 } 1372 1373 static int pac_address_keys_set(struct task_struct *target, 1374 const struct user_regset *regset, 1375 unsigned int pos, unsigned int count, 1376 const void *kbuf, const void __user *ubuf) 1377 { 1378 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1379 struct user_pac_address_keys user_keys; 1380 int ret; 1381 1382 if (!system_supports_address_auth()) 1383 return -EINVAL; 1384 1385 pac_address_keys_to_user(&user_keys, keys); 1386 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1387 &user_keys, 0, -1); 1388 if (ret) 1389 return ret; 1390 pac_address_keys_from_user(keys, &user_keys); 1391 1392 return 0; 1393 } 1394 1395 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, 1396 const struct ptrauth_keys_user *keys) 1397 { 1398 ukeys->apgakey = pac_key_to_user(&keys->apga); 1399 } 1400 1401 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys, 1402 const struct user_pac_generic_keys *ukeys) 1403 { 1404 keys->apga = pac_key_from_user(ukeys->apgakey); 1405 } 1406 1407 static int pac_generic_keys_get(struct task_struct *target, 1408 const struct user_regset *regset, 1409 struct membuf to) 1410 { 1411 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1412 struct user_pac_generic_keys user_keys; 1413 1414 if (!system_supports_generic_auth()) 1415 return -EINVAL; 1416 1417 pac_generic_keys_to_user(&user_keys, keys); 1418 1419 return membuf_write(&to, &user_keys, sizeof(user_keys)); 1420 } 1421 1422 static int pac_generic_keys_set(struct task_struct *target, 1423 const struct user_regset *regset, 1424 unsigned int pos, unsigned int count, 1425 const void *kbuf, const void __user *ubuf) 1426 { 1427 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1428 struct user_pac_generic_keys user_keys; 1429 int ret; 1430 1431 if (!system_supports_generic_auth()) 1432 return -EINVAL; 1433 1434 pac_generic_keys_to_user(&user_keys, keys); 1435 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1436 &user_keys, 0, -1); 1437 if (ret) 1438 return ret; 1439 pac_generic_keys_from_user(keys, &user_keys); 1440 1441 return 0; 1442 } 1443 #endif /* CONFIG_CHECKPOINT_RESTORE */ 1444 #endif /* CONFIG_ARM64_PTR_AUTH */ 1445 1446 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1447 static int tagged_addr_ctrl_get(struct task_struct *target, 1448 const struct user_regset *regset, 1449 struct membuf to) 1450 { 1451 long ctrl = get_tagged_addr_ctrl(target); 1452 1453 if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl))) 1454 return ctrl; 1455 1456 return membuf_write(&to, &ctrl, sizeof(ctrl)); 1457 } 1458 1459 static int tagged_addr_ctrl_set(struct task_struct *target, const struct 1460 user_regset *regset, unsigned int pos, 1461 unsigned int count, const void *kbuf, const 1462 void __user *ubuf) 1463 { 1464 int ret; 1465 long ctrl; 1466 1467 ctrl = get_tagged_addr_ctrl(target); 1468 if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl))) 1469 return ctrl; 1470 1471 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); 1472 if (ret) 1473 return ret; 1474 1475 return set_tagged_addr_ctrl(target, ctrl); 1476 } 1477 #endif 1478 1479 #ifdef CONFIG_ARM64_POE 1480 static int poe_get(struct task_struct *target, 1481 const struct user_regset *regset, 1482 struct membuf to) 1483 { 1484 if (!system_supports_poe()) 1485 return -EINVAL; 1486 1487 return membuf_write(&to, &target->thread.por_el0, 1488 sizeof(target->thread.por_el0)); 1489 } 1490 1491 static int poe_set(struct task_struct *target, const struct 1492 user_regset *regset, unsigned int pos, 1493 unsigned int count, const void *kbuf, const 1494 void __user *ubuf) 1495 { 1496 int ret; 1497 long ctrl; 1498 1499 if (!system_supports_poe()) 1500 return -EINVAL; 1501 1502 ctrl = target->thread.por_el0; 1503 1504 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); 1505 if (ret) 1506 return ret; 1507 1508 target->thread.por_el0 = ctrl; 1509 1510 return 0; 1511 } 1512 #endif 1513 1514 #ifdef CONFIG_ARM64_GCS 1515 static void task_gcs_to_user(struct user_gcs *user_gcs, 1516 const struct task_struct *target) 1517 { 1518 user_gcs->features_enabled = target->thread.gcs_el0_mode; 1519 user_gcs->features_locked = target->thread.gcs_el0_locked; 1520 user_gcs->gcspr_el0 = target->thread.gcspr_el0; 1521 } 1522 1523 static void task_gcs_from_user(struct task_struct *target, 1524 const struct user_gcs *user_gcs) 1525 { 1526 target->thread.gcs_el0_mode = user_gcs->features_enabled; 1527 target->thread.gcs_el0_locked = user_gcs->features_locked; 1528 target->thread.gcspr_el0 = user_gcs->gcspr_el0; 1529 } 1530 1531 static int gcs_get(struct task_struct *target, 1532 const struct user_regset *regset, 1533 struct membuf to) 1534 { 1535 struct user_gcs user_gcs; 1536 1537 if (!system_supports_gcs()) 1538 return -EINVAL; 1539 1540 if (target == current) 1541 gcs_preserve_current_state(); 1542 1543 task_gcs_to_user(&user_gcs, target); 1544 1545 return membuf_write(&to, &user_gcs, sizeof(user_gcs)); 1546 } 1547 1548 static int gcs_set(struct task_struct *target, const struct 1549 user_regset *regset, unsigned int pos, 1550 unsigned int count, const void *kbuf, const 1551 void __user *ubuf) 1552 { 1553 int ret; 1554 struct user_gcs user_gcs; 1555 1556 if (!system_supports_gcs()) 1557 return -EINVAL; 1558 1559 task_gcs_to_user(&user_gcs, target); 1560 1561 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_gcs, 0, -1); 1562 if (ret) 1563 return ret; 1564 1565 if (user_gcs.features_enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK) 1566 return -EINVAL; 1567 1568 task_gcs_from_user(target, &user_gcs); 1569 1570 return 0; 1571 } 1572 #endif 1573 1574 enum aarch64_regset { 1575 REGSET_GPR, 1576 REGSET_FPR, 1577 REGSET_TLS, 1578 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1579 REGSET_HW_BREAK, 1580 REGSET_HW_WATCH, 1581 #endif 1582 REGSET_FPMR, 1583 REGSET_SYSTEM_CALL, 1584 #ifdef CONFIG_ARM64_SVE 1585 REGSET_SVE, 1586 #endif 1587 #ifdef CONFIG_ARM64_SME 1588 REGSET_SSVE, 1589 REGSET_ZA, 1590 REGSET_ZT, 1591 #endif 1592 #ifdef CONFIG_ARM64_PTR_AUTH 1593 REGSET_PAC_MASK, 1594 REGSET_PAC_ENABLED_KEYS, 1595 #ifdef CONFIG_CHECKPOINT_RESTORE 1596 REGSET_PACA_KEYS, 1597 REGSET_PACG_KEYS, 1598 #endif 1599 #endif 1600 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1601 REGSET_TAGGED_ADDR_CTRL, 1602 #endif 1603 #ifdef CONFIG_ARM64_POE 1604 REGSET_POE, 1605 #endif 1606 #ifdef CONFIG_ARM64_GCS 1607 REGSET_GCS, 1608 #endif 1609 }; 1610 1611 static const struct user_regset aarch64_regsets[] = { 1612 [REGSET_GPR] = { 1613 USER_REGSET_NOTE_TYPE(PRSTATUS), 1614 .n = sizeof(struct user_pt_regs) / sizeof(u64), 1615 .size = sizeof(u64), 1616 .align = sizeof(u64), 1617 .regset_get = gpr_get, 1618 .set = gpr_set 1619 }, 1620 [REGSET_FPR] = { 1621 USER_REGSET_NOTE_TYPE(PRFPREG), 1622 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 1623 /* 1624 * We pretend we have 32-bit registers because the fpsr and 1625 * fpcr are 32-bits wide. 1626 */ 1627 .size = sizeof(u32), 1628 .align = sizeof(u32), 1629 .active = fpr_active, 1630 .regset_get = fpr_get, 1631 .set = fpr_set 1632 }, 1633 [REGSET_TLS] = { 1634 USER_REGSET_NOTE_TYPE(ARM_TLS), 1635 .n = 2, 1636 .size = sizeof(void *), 1637 .align = sizeof(void *), 1638 .regset_get = tls_get, 1639 .set = tls_set, 1640 }, 1641 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1642 [REGSET_HW_BREAK] = { 1643 USER_REGSET_NOTE_TYPE(ARM_HW_BREAK), 1644 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1645 .size = sizeof(u32), 1646 .align = sizeof(u32), 1647 .regset_get = hw_break_get, 1648 .set = hw_break_set, 1649 }, 1650 [REGSET_HW_WATCH] = { 1651 USER_REGSET_NOTE_TYPE(ARM_HW_WATCH), 1652 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1653 .size = sizeof(u32), 1654 .align = sizeof(u32), 1655 .regset_get = hw_break_get, 1656 .set = hw_break_set, 1657 }, 1658 #endif 1659 [REGSET_SYSTEM_CALL] = { 1660 USER_REGSET_NOTE_TYPE(ARM_SYSTEM_CALL), 1661 .n = 1, 1662 .size = sizeof(int), 1663 .align = sizeof(int), 1664 .regset_get = system_call_get, 1665 .set = system_call_set, 1666 }, 1667 [REGSET_FPMR] = { 1668 USER_REGSET_NOTE_TYPE(ARM_FPMR), 1669 .n = 1, 1670 .size = sizeof(u64), 1671 .align = sizeof(u64), 1672 .regset_get = fpmr_get, 1673 .set = fpmr_set, 1674 }, 1675 #ifdef CONFIG_ARM64_SVE 1676 [REGSET_SVE] = { /* Scalable Vector Extension */ 1677 USER_REGSET_NOTE_TYPE(ARM_SVE), 1678 .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX, 1679 SVE_PT_REGS_SVE), 1680 SVE_VQ_BYTES), 1681 .size = SVE_VQ_BYTES, 1682 .align = SVE_VQ_BYTES, 1683 .regset_get = sve_get, 1684 .set = sve_set, 1685 }, 1686 #endif 1687 #ifdef CONFIG_ARM64_SME 1688 [REGSET_SSVE] = { /* Streaming mode SVE */ 1689 USER_REGSET_NOTE_TYPE(ARM_SSVE), 1690 .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE), 1691 SVE_VQ_BYTES), 1692 .size = SVE_VQ_BYTES, 1693 .align = SVE_VQ_BYTES, 1694 .regset_get = ssve_get, 1695 .set = ssve_set, 1696 }, 1697 [REGSET_ZA] = { /* SME ZA */ 1698 USER_REGSET_NOTE_TYPE(ARM_ZA), 1699 /* 1700 * ZA is a single register but it's variably sized and 1701 * the ptrace core requires that the size of any data 1702 * be an exact multiple of the configured register 1703 * size so report as though we had SVE_VQ_BYTES 1704 * registers. These values aren't exposed to 1705 * userspace. 1706 */ 1707 .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES), 1708 .size = SVE_VQ_BYTES, 1709 .align = SVE_VQ_BYTES, 1710 .regset_get = za_get, 1711 .set = za_set, 1712 }, 1713 [REGSET_ZT] = { /* SME ZT */ 1714 USER_REGSET_NOTE_TYPE(ARM_ZT), 1715 .n = 1, 1716 .size = ZT_SIG_REG_BYTES, 1717 .align = sizeof(u64), 1718 .regset_get = zt_get, 1719 .set = zt_set, 1720 }, 1721 #endif 1722 #ifdef CONFIG_ARM64_PTR_AUTH 1723 [REGSET_PAC_MASK] = { 1724 USER_REGSET_NOTE_TYPE(ARM_PAC_MASK), 1725 .n = sizeof(struct user_pac_mask) / sizeof(u64), 1726 .size = sizeof(u64), 1727 .align = sizeof(u64), 1728 .regset_get = pac_mask_get, 1729 /* this cannot be set dynamically */ 1730 }, 1731 [REGSET_PAC_ENABLED_KEYS] = { 1732 USER_REGSET_NOTE_TYPE(ARM_PAC_ENABLED_KEYS), 1733 .n = 1, 1734 .size = sizeof(long), 1735 .align = sizeof(long), 1736 .regset_get = pac_enabled_keys_get, 1737 .set = pac_enabled_keys_set, 1738 }, 1739 #ifdef CONFIG_CHECKPOINT_RESTORE 1740 [REGSET_PACA_KEYS] = { 1741 USER_REGSET_NOTE_TYPE(ARM_PACA_KEYS), 1742 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t), 1743 .size = sizeof(__uint128_t), 1744 .align = sizeof(__uint128_t), 1745 .regset_get = pac_address_keys_get, 1746 .set = pac_address_keys_set, 1747 }, 1748 [REGSET_PACG_KEYS] = { 1749 USER_REGSET_NOTE_TYPE(ARM_PACG_KEYS), 1750 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t), 1751 .size = sizeof(__uint128_t), 1752 .align = sizeof(__uint128_t), 1753 .regset_get = pac_generic_keys_get, 1754 .set = pac_generic_keys_set, 1755 }, 1756 #endif 1757 #endif 1758 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1759 [REGSET_TAGGED_ADDR_CTRL] = { 1760 USER_REGSET_NOTE_TYPE(ARM_TAGGED_ADDR_CTRL), 1761 .n = 1, 1762 .size = sizeof(long), 1763 .align = sizeof(long), 1764 .regset_get = tagged_addr_ctrl_get, 1765 .set = tagged_addr_ctrl_set, 1766 }, 1767 #endif 1768 #ifdef CONFIG_ARM64_POE 1769 [REGSET_POE] = { 1770 USER_REGSET_NOTE_TYPE(ARM_POE), 1771 .n = 1, 1772 .size = sizeof(long), 1773 .align = sizeof(long), 1774 .regset_get = poe_get, 1775 .set = poe_set, 1776 }, 1777 #endif 1778 #ifdef CONFIG_ARM64_GCS 1779 [REGSET_GCS] = { 1780 USER_REGSET_NOTE_TYPE(ARM_GCS), 1781 .n = sizeof(struct user_gcs) / sizeof(u64), 1782 .size = sizeof(u64), 1783 .align = sizeof(u64), 1784 .regset_get = gcs_get, 1785 .set = gcs_set, 1786 }, 1787 #endif 1788 }; 1789 1790 static const struct user_regset_view user_aarch64_view = { 1791 .name = "aarch64", .e_machine = EM_AARCH64, 1792 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 1793 }; 1794 1795 enum compat_regset { 1796 REGSET_COMPAT_GPR, 1797 REGSET_COMPAT_VFP, 1798 }; 1799 1800 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx) 1801 { 1802 struct pt_regs *regs = task_pt_regs(task); 1803 1804 switch (idx) { 1805 case 15: 1806 return regs->pc; 1807 case 16: 1808 return pstate_to_compat_psr(regs->pstate); 1809 case 17: 1810 return regs->orig_x0; 1811 default: 1812 return regs->regs[idx]; 1813 } 1814 } 1815 1816 static int compat_gpr_get(struct task_struct *target, 1817 const struct user_regset *regset, 1818 struct membuf to) 1819 { 1820 int i = 0; 1821 1822 while (to.left) 1823 membuf_store(&to, compat_get_user_reg(target, i++)); 1824 return 0; 1825 } 1826 1827 static int compat_gpr_set(struct task_struct *target, 1828 const struct user_regset *regset, 1829 unsigned int pos, unsigned int count, 1830 const void *kbuf, const void __user *ubuf) 1831 { 1832 struct pt_regs newregs; 1833 int ret = 0; 1834 unsigned int i, start, num_regs; 1835 1836 /* Calculate the number of AArch32 registers contained in count */ 1837 num_regs = count / regset->size; 1838 1839 /* Convert pos into an register number */ 1840 start = pos / regset->size; 1841 1842 if (start + num_regs > regset->n) 1843 return -EIO; 1844 1845 newregs = *task_pt_regs(target); 1846 1847 for (i = 0; i < num_regs; ++i) { 1848 unsigned int idx = start + i; 1849 compat_ulong_t reg; 1850 1851 if (kbuf) { 1852 memcpy(®, kbuf, sizeof(reg)); 1853 kbuf += sizeof(reg); 1854 } else { 1855 ret = copy_from_user(®, ubuf, sizeof(reg)); 1856 if (ret) { 1857 ret = -EFAULT; 1858 break; 1859 } 1860 1861 ubuf += sizeof(reg); 1862 } 1863 1864 switch (idx) { 1865 case 15: 1866 newregs.pc = reg; 1867 break; 1868 case 16: 1869 reg = compat_psr_to_pstate(reg); 1870 newregs.pstate = reg; 1871 break; 1872 case 17: 1873 newregs.orig_x0 = reg; 1874 break; 1875 default: 1876 newregs.regs[idx] = reg; 1877 } 1878 1879 } 1880 1881 if (valid_user_regs(&newregs.user_regs, target)) 1882 *task_pt_regs(target) = newregs; 1883 else 1884 ret = -EINVAL; 1885 1886 return ret; 1887 } 1888 1889 static int compat_vfp_get(struct task_struct *target, 1890 const struct user_regset *regset, 1891 struct membuf to) 1892 { 1893 struct user_fpsimd_state *uregs; 1894 compat_ulong_t fpscr; 1895 1896 if (!system_supports_fpsimd()) 1897 return -EINVAL; 1898 1899 uregs = &target->thread.uw.fpsimd_state; 1900 1901 if (target == current) 1902 fpsimd_preserve_current_state(); 1903 1904 /* 1905 * The VFP registers are packed into the fpsimd_state, so they all sit 1906 * nicely together for us. We just need to create the fpscr separately. 1907 */ 1908 membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t)); 1909 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 1910 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 1911 return membuf_store(&to, fpscr); 1912 } 1913 1914 static int compat_vfp_set(struct task_struct *target, 1915 const struct user_regset *regset, 1916 unsigned int pos, unsigned int count, 1917 const void *kbuf, const void __user *ubuf) 1918 { 1919 struct user_fpsimd_state *uregs; 1920 compat_ulong_t fpscr; 1921 int ret, vregs_end_pos; 1922 1923 if (!system_supports_fpsimd()) 1924 return -EINVAL; 1925 1926 uregs = &target->thread.uw.fpsimd_state; 1927 1928 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1929 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 1930 vregs_end_pos); 1931 1932 if (count && !ret) { 1933 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, 1934 vregs_end_pos, VFP_STATE_SIZE); 1935 if (!ret) { 1936 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 1937 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 1938 } 1939 } 1940 1941 fpsimd_flush_task_state(target); 1942 return ret; 1943 } 1944 1945 static int compat_tls_get(struct task_struct *target, 1946 const struct user_regset *regset, 1947 struct membuf to) 1948 { 1949 return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value); 1950 } 1951 1952 static int compat_tls_set(struct task_struct *target, 1953 const struct user_regset *regset, unsigned int pos, 1954 unsigned int count, const void *kbuf, 1955 const void __user *ubuf) 1956 { 1957 int ret; 1958 compat_ulong_t tls = target->thread.uw.tp_value; 1959 1960 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1961 if (ret) 1962 return ret; 1963 1964 target->thread.uw.tp_value = tls; 1965 return ret; 1966 } 1967 1968 static const struct user_regset aarch32_regsets[] = { 1969 [REGSET_COMPAT_GPR] = { 1970 USER_REGSET_NOTE_TYPE(PRSTATUS), 1971 .n = COMPAT_ELF_NGREG, 1972 .size = sizeof(compat_elf_greg_t), 1973 .align = sizeof(compat_elf_greg_t), 1974 .regset_get = compat_gpr_get, 1975 .set = compat_gpr_set 1976 }, 1977 [REGSET_COMPAT_VFP] = { 1978 USER_REGSET_NOTE_TYPE(ARM_VFP), 1979 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1980 .size = sizeof(compat_ulong_t), 1981 .align = sizeof(compat_ulong_t), 1982 .active = fpr_active, 1983 .regset_get = compat_vfp_get, 1984 .set = compat_vfp_set 1985 }, 1986 }; 1987 1988 static const struct user_regset_view user_aarch32_view = { 1989 .name = "aarch32", .e_machine = EM_ARM, 1990 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 1991 }; 1992 1993 static const struct user_regset aarch32_ptrace_regsets[] = { 1994 [REGSET_GPR] = { 1995 USER_REGSET_NOTE_TYPE(PRSTATUS), 1996 .n = COMPAT_ELF_NGREG, 1997 .size = sizeof(compat_elf_greg_t), 1998 .align = sizeof(compat_elf_greg_t), 1999 .regset_get = compat_gpr_get, 2000 .set = compat_gpr_set 2001 }, 2002 [REGSET_FPR] = { 2003 USER_REGSET_NOTE_TYPE(ARM_VFP), 2004 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 2005 .size = sizeof(compat_ulong_t), 2006 .align = sizeof(compat_ulong_t), 2007 .regset_get = compat_vfp_get, 2008 .set = compat_vfp_set 2009 }, 2010 [REGSET_TLS] = { 2011 USER_REGSET_NOTE_TYPE(ARM_TLS), 2012 .n = 1, 2013 .size = sizeof(compat_ulong_t), 2014 .align = sizeof(compat_ulong_t), 2015 .regset_get = compat_tls_get, 2016 .set = compat_tls_set, 2017 }, 2018 #ifdef CONFIG_HAVE_HW_BREAKPOINT 2019 [REGSET_HW_BREAK] = { 2020 USER_REGSET_NOTE_TYPE(ARM_HW_BREAK), 2021 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 2022 .size = sizeof(u32), 2023 .align = sizeof(u32), 2024 .regset_get = hw_break_get, 2025 .set = hw_break_set, 2026 }, 2027 [REGSET_HW_WATCH] = { 2028 USER_REGSET_NOTE_TYPE(ARM_HW_WATCH), 2029 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 2030 .size = sizeof(u32), 2031 .align = sizeof(u32), 2032 .regset_get = hw_break_get, 2033 .set = hw_break_set, 2034 }, 2035 #endif 2036 [REGSET_SYSTEM_CALL] = { 2037 USER_REGSET_NOTE_TYPE(ARM_SYSTEM_CALL), 2038 .n = 1, 2039 .size = sizeof(int), 2040 .align = sizeof(int), 2041 .regset_get = system_call_get, 2042 .set = system_call_set, 2043 }, 2044 }; 2045 2046 static const struct user_regset_view user_aarch32_ptrace_view = { 2047 .name = "aarch32", .e_machine = EM_ARM, 2048 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) 2049 }; 2050 2051 #ifdef CONFIG_COMPAT 2052 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 2053 compat_ulong_t __user *ret) 2054 { 2055 compat_ulong_t tmp; 2056 2057 if (off & 3) 2058 return -EIO; 2059 2060 if (off == COMPAT_PT_TEXT_ADDR) 2061 tmp = tsk->mm->start_code; 2062 else if (off == COMPAT_PT_DATA_ADDR) 2063 tmp = tsk->mm->start_data; 2064 else if (off == COMPAT_PT_TEXT_END_ADDR) 2065 tmp = tsk->mm->end_code; 2066 else if (off < sizeof(compat_elf_gregset_t)) 2067 tmp = compat_get_user_reg(tsk, off >> 2); 2068 else if (off >= COMPAT_USER_SZ) 2069 return -EIO; 2070 else 2071 tmp = 0; 2072 2073 return put_user(tmp, ret); 2074 } 2075 2076 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 2077 compat_ulong_t val) 2078 { 2079 struct pt_regs newregs = *task_pt_regs(tsk); 2080 unsigned int idx = off / 4; 2081 2082 if (off & 3 || off >= COMPAT_USER_SZ) 2083 return -EIO; 2084 2085 if (off >= sizeof(compat_elf_gregset_t)) 2086 return 0; 2087 2088 switch (idx) { 2089 case 15: 2090 newregs.pc = val; 2091 break; 2092 case 16: 2093 newregs.pstate = compat_psr_to_pstate(val); 2094 break; 2095 case 17: 2096 newregs.orig_x0 = val; 2097 break; 2098 default: 2099 newregs.regs[idx] = val; 2100 } 2101 2102 if (!valid_user_regs(&newregs.user_regs, tsk)) 2103 return -EINVAL; 2104 2105 *task_pt_regs(tsk) = newregs; 2106 return 0; 2107 } 2108 2109 #ifdef CONFIG_HAVE_HW_BREAKPOINT 2110 2111 /* 2112 * Convert a virtual register number into an index for a thread_info 2113 * breakpoint array. Breakpoints are identified using positive numbers 2114 * whilst watchpoints are negative. The registers are laid out as pairs 2115 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 2116 * Register 0 is reserved for describing resource information. 2117 */ 2118 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 2119 { 2120 return (abs(num) - 1) >> 1; 2121 } 2122 2123 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 2124 { 2125 u8 num_brps, num_wrps, debug_arch, wp_len; 2126 u32 reg = 0; 2127 2128 num_brps = hw_breakpoint_slots(TYPE_INST); 2129 num_wrps = hw_breakpoint_slots(TYPE_DATA); 2130 2131 debug_arch = debug_monitors_arch(); 2132 wp_len = 8; 2133 reg |= debug_arch; 2134 reg <<= 8; 2135 reg |= wp_len; 2136 reg <<= 8; 2137 reg |= num_wrps; 2138 reg <<= 8; 2139 reg |= num_brps; 2140 2141 *kdata = reg; 2142 return 0; 2143 } 2144 2145 static int compat_ptrace_hbp_get(unsigned int note_type, 2146 struct task_struct *tsk, 2147 compat_long_t num, 2148 u32 *kdata) 2149 { 2150 u64 addr = 0; 2151 u32 ctrl = 0; 2152 2153 int err, idx = compat_ptrace_hbp_num_to_idx(num); 2154 2155 if (num & 1) { 2156 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 2157 *kdata = (u32)addr; 2158 } else { 2159 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 2160 *kdata = ctrl; 2161 } 2162 2163 return err; 2164 } 2165 2166 static int compat_ptrace_hbp_set(unsigned int note_type, 2167 struct task_struct *tsk, 2168 compat_long_t num, 2169 u32 *kdata) 2170 { 2171 u64 addr; 2172 u32 ctrl; 2173 2174 int err, idx = compat_ptrace_hbp_num_to_idx(num); 2175 2176 if (num & 1) { 2177 addr = *kdata; 2178 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 2179 } else { 2180 ctrl = *kdata; 2181 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 2182 } 2183 2184 return err; 2185 } 2186 2187 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 2188 compat_ulong_t __user *data) 2189 { 2190 int ret; 2191 u32 kdata; 2192 2193 /* Watchpoint */ 2194 if (num < 0) { 2195 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 2196 /* Resource info */ 2197 } else if (num == 0) { 2198 ret = compat_ptrace_hbp_get_resource_info(&kdata); 2199 /* Breakpoint */ 2200 } else { 2201 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 2202 } 2203 2204 if (!ret) 2205 ret = put_user(kdata, data); 2206 2207 return ret; 2208 } 2209 2210 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 2211 compat_ulong_t __user *data) 2212 { 2213 int ret; 2214 u32 kdata = 0; 2215 2216 if (num == 0) 2217 return 0; 2218 2219 ret = get_user(kdata, data); 2220 if (ret) 2221 return ret; 2222 2223 if (num < 0) 2224 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 2225 else 2226 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 2227 2228 return ret; 2229 } 2230 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 2231 2232 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 2233 compat_ulong_t caddr, compat_ulong_t cdata) 2234 { 2235 unsigned long addr = caddr; 2236 unsigned long data = cdata; 2237 void __user *datap = compat_ptr(data); 2238 int ret; 2239 2240 switch (request) { 2241 case PTRACE_PEEKUSR: 2242 ret = compat_ptrace_read_user(child, addr, datap); 2243 break; 2244 2245 case PTRACE_POKEUSR: 2246 ret = compat_ptrace_write_user(child, addr, data); 2247 break; 2248 2249 case COMPAT_PTRACE_GETREGS: 2250 ret = copy_regset_to_user(child, 2251 &user_aarch32_view, 2252 REGSET_COMPAT_GPR, 2253 0, sizeof(compat_elf_gregset_t), 2254 datap); 2255 break; 2256 2257 case COMPAT_PTRACE_SETREGS: 2258 ret = copy_regset_from_user(child, 2259 &user_aarch32_view, 2260 REGSET_COMPAT_GPR, 2261 0, sizeof(compat_elf_gregset_t), 2262 datap); 2263 break; 2264 2265 case COMPAT_PTRACE_GET_THREAD_AREA: 2266 ret = put_user((compat_ulong_t)child->thread.uw.tp_value, 2267 (compat_ulong_t __user *)datap); 2268 break; 2269 2270 case COMPAT_PTRACE_SET_SYSCALL: 2271 task_pt_regs(child)->syscallno = data; 2272 ret = 0; 2273 break; 2274 2275 case COMPAT_PTRACE_GETVFPREGS: 2276 ret = copy_regset_to_user(child, 2277 &user_aarch32_view, 2278 REGSET_COMPAT_VFP, 2279 0, VFP_STATE_SIZE, 2280 datap); 2281 break; 2282 2283 case COMPAT_PTRACE_SETVFPREGS: 2284 ret = copy_regset_from_user(child, 2285 &user_aarch32_view, 2286 REGSET_COMPAT_VFP, 2287 0, VFP_STATE_SIZE, 2288 datap); 2289 break; 2290 2291 #ifdef CONFIG_HAVE_HW_BREAKPOINT 2292 case COMPAT_PTRACE_GETHBPREGS: 2293 ret = compat_ptrace_gethbpregs(child, addr, datap); 2294 break; 2295 2296 case COMPAT_PTRACE_SETHBPREGS: 2297 ret = compat_ptrace_sethbpregs(child, addr, datap); 2298 break; 2299 #endif 2300 2301 default: 2302 ret = compat_ptrace_request(child, request, addr, 2303 data); 2304 break; 2305 } 2306 2307 return ret; 2308 } 2309 #endif /* CONFIG_COMPAT */ 2310 2311 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 2312 { 2313 /* 2314 * Core dumping of 32-bit tasks or compat ptrace requests must use the 2315 * user_aarch32_view compatible with arm32. Native ptrace requests on 2316 * 32-bit children use an extended user_aarch32_ptrace_view to allow 2317 * access to the TLS register. 2318 */ 2319 if (is_compat_task()) 2320 return &user_aarch32_view; 2321 else if (is_compat_thread(task_thread_info(task))) 2322 return &user_aarch32_ptrace_view; 2323 2324 return &user_aarch64_view; 2325 } 2326 2327 long arch_ptrace(struct task_struct *child, long request, 2328 unsigned long addr, unsigned long data) 2329 { 2330 switch (request) { 2331 case PTRACE_PEEKMTETAGS: 2332 case PTRACE_POKEMTETAGS: 2333 return mte_ptrace_copy_tags(child, request, addr, data); 2334 } 2335 2336 return ptrace_request(child, request, addr, data); 2337 } 2338 2339 enum ptrace_syscall_dir { 2340 PTRACE_SYSCALL_ENTER = 0, 2341 PTRACE_SYSCALL_EXIT, 2342 }; 2343 2344 static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) 2345 { 2346 int regno; 2347 unsigned long saved_reg; 2348 2349 /* 2350 * We have some ABI weirdness here in the way that we handle syscall 2351 * exit stops because we indicate whether or not the stop has been 2352 * signalled from syscall entry or syscall exit by clobbering a general 2353 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee 2354 * and restoring its old value after the stop. This means that: 2355 * 2356 * - Any writes by the tracer to this register during the stop are 2357 * ignored/discarded. 2358 * 2359 * - The actual value of the register is not available during the stop, 2360 * so the tracer cannot save it and restore it later. 2361 * 2362 * - Syscall stops behave differently to seccomp and pseudo-step traps 2363 * (the latter do not nobble any registers). 2364 */ 2365 regno = (is_compat_task() ? 12 : 7); 2366 saved_reg = regs->regs[regno]; 2367 regs->regs[regno] = dir; 2368 2369 if (dir == PTRACE_SYSCALL_ENTER) { 2370 if (ptrace_report_syscall_entry(regs)) 2371 forget_syscall(regs); 2372 regs->regs[regno] = saved_reg; 2373 } else if (!test_thread_flag(TIF_SINGLESTEP)) { 2374 ptrace_report_syscall_exit(regs, 0); 2375 regs->regs[regno] = saved_reg; 2376 } else { 2377 regs->regs[regno] = saved_reg; 2378 2379 /* 2380 * Signal a pseudo-step exception since we are stepping but 2381 * tracer modifications to the registers may have rewound the 2382 * state machine. 2383 */ 2384 ptrace_report_syscall_exit(regs, 1); 2385 } 2386 } 2387 2388 int syscall_trace_enter(struct pt_regs *regs) 2389 { 2390 unsigned long flags = read_thread_flags(); 2391 2392 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { 2393 report_syscall(regs, PTRACE_SYSCALL_ENTER); 2394 if (flags & _TIF_SYSCALL_EMU) 2395 return NO_SYSCALL; 2396 } 2397 2398 /* Do the secure computing after ptrace; failures should be fast. */ 2399 if (secure_computing() == -1) 2400 return NO_SYSCALL; 2401 2402 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 2403 trace_sys_enter(regs, regs->syscallno); 2404 2405 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1], 2406 regs->regs[2], regs->regs[3]); 2407 2408 return regs->syscallno; 2409 } 2410 2411 void syscall_trace_exit(struct pt_regs *regs) 2412 { 2413 unsigned long flags = read_thread_flags(); 2414 2415 audit_syscall_exit(regs); 2416 2417 if (flags & _TIF_SYSCALL_TRACEPOINT) 2418 trace_sys_exit(regs, syscall_get_return_value(current, regs)); 2419 2420 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)) 2421 report_syscall(regs, PTRACE_SYSCALL_EXIT); 2422 2423 rseq_syscall(regs); 2424 } 2425 2426 /* 2427 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. 2428 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is 2429 * not described in ARM DDI 0487D.a. 2430 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may 2431 * be allocated an EL0 meaning in future. 2432 * Userspace cannot use these until they have an architectural meaning. 2433 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. 2434 * We also reserve IL for the kernel; SS is handled dynamically. 2435 */ 2436 #define SPSR_EL1_AARCH64_RES0_BITS \ 2437 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \ 2438 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5)) 2439 #define SPSR_EL1_AARCH32_RES0_BITS \ 2440 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) 2441 2442 static int valid_compat_regs(struct user_pt_regs *regs) 2443 { 2444 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; 2445 2446 if (!system_supports_mixed_endian_el0()) { 2447 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 2448 regs->pstate |= PSR_AA32_E_BIT; 2449 else 2450 regs->pstate &= ~PSR_AA32_E_BIT; 2451 } 2452 2453 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && 2454 (regs->pstate & PSR_AA32_A_BIT) == 0 && 2455 (regs->pstate & PSR_AA32_I_BIT) == 0 && 2456 (regs->pstate & PSR_AA32_F_BIT) == 0) { 2457 return 1; 2458 } 2459 2460 /* 2461 * Force PSR to a valid 32-bit EL0t, preserving the same bits as 2462 * arch/arm. 2463 */ 2464 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT | 2465 PSR_AA32_C_BIT | PSR_AA32_V_BIT | 2466 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK | 2467 PSR_AA32_GE_MASK | PSR_AA32_E_BIT | 2468 PSR_AA32_T_BIT; 2469 regs->pstate |= PSR_MODE32_BIT; 2470 2471 return 0; 2472 } 2473 2474 static int valid_native_regs(struct user_pt_regs *regs) 2475 { 2476 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; 2477 2478 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && 2479 (regs->pstate & PSR_D_BIT) == 0 && 2480 (regs->pstate & PSR_A_BIT) == 0 && 2481 (regs->pstate & PSR_I_BIT) == 0 && 2482 (regs->pstate & PSR_F_BIT) == 0) { 2483 return 1; 2484 } 2485 2486 /* Force PSR to a valid 64-bit EL0t */ 2487 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; 2488 2489 return 0; 2490 } 2491 2492 /* 2493 * Are the current registers suitable for user mode? (used to maintain 2494 * security in signal handlers) 2495 */ 2496 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) 2497 { 2498 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ 2499 user_regs_reset_single_step(regs, task); 2500 2501 if (is_compat_thread(task_thread_info(task))) 2502 return valid_compat_regs(regs); 2503 else 2504 return valid_native_regs(regs); 2505 } 2506