1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/ptrace.c 4 * 5 * By Ross Biro 1/23/92 6 * edited by Linus Torvalds 7 * ARM modifications Copyright (C) 2000 Russell King 8 * Copyright (C) 2012 ARM Ltd. 9 */ 10 11 #include <linux/audit.h> 12 #include <linux/compat.h> 13 #include <linux/kernel.h> 14 #include <linux/sched/signal.h> 15 #include <linux/sched/task_stack.h> 16 #include <linux/mm.h> 17 #include <linux/nospec.h> 18 #include <linux/smp.h> 19 #include <linux/ptrace.h> 20 #include <linux/user.h> 21 #include <linux/seccomp.h> 22 #include <linux/security.h> 23 #include <linux/init.h> 24 #include <linux/signal.h> 25 #include <linux/string.h> 26 #include <linux/uaccess.h> 27 #include <linux/perf_event.h> 28 #include <linux/hw_breakpoint.h> 29 #include <linux/regset.h> 30 #include <linux/elf.h> 31 #include <linux/rseq.h> 32 33 #include <asm/compat.h> 34 #include <asm/cpufeature.h> 35 #include <asm/debug-monitors.h> 36 #include <asm/fpsimd.h> 37 #include <asm/gcs.h> 38 #include <asm/mte.h> 39 #include <asm/pointer_auth.h> 40 #include <asm/stacktrace.h> 41 #include <asm/syscall.h> 42 #include <asm/traps.h> 43 #include <asm/system_misc.h> 44 45 #define CREATE_TRACE_POINTS 46 #include <trace/events/syscalls.h> 47 48 struct pt_regs_offset { 49 const char *name; 50 int offset; 51 }; 52 53 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 54 #define REG_OFFSET_END {.name = NULL, .offset = 0} 55 #define GPR_OFFSET_NAME(r) \ 56 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} 57 58 static const struct pt_regs_offset regoffset_table[] = { 59 GPR_OFFSET_NAME(0), 60 GPR_OFFSET_NAME(1), 61 GPR_OFFSET_NAME(2), 62 GPR_OFFSET_NAME(3), 63 GPR_OFFSET_NAME(4), 64 GPR_OFFSET_NAME(5), 65 GPR_OFFSET_NAME(6), 66 GPR_OFFSET_NAME(7), 67 GPR_OFFSET_NAME(8), 68 GPR_OFFSET_NAME(9), 69 GPR_OFFSET_NAME(10), 70 GPR_OFFSET_NAME(11), 71 GPR_OFFSET_NAME(12), 72 GPR_OFFSET_NAME(13), 73 GPR_OFFSET_NAME(14), 74 GPR_OFFSET_NAME(15), 75 GPR_OFFSET_NAME(16), 76 GPR_OFFSET_NAME(17), 77 GPR_OFFSET_NAME(18), 78 GPR_OFFSET_NAME(19), 79 GPR_OFFSET_NAME(20), 80 GPR_OFFSET_NAME(21), 81 GPR_OFFSET_NAME(22), 82 GPR_OFFSET_NAME(23), 83 GPR_OFFSET_NAME(24), 84 GPR_OFFSET_NAME(25), 85 GPR_OFFSET_NAME(26), 86 GPR_OFFSET_NAME(27), 87 GPR_OFFSET_NAME(28), 88 GPR_OFFSET_NAME(29), 89 GPR_OFFSET_NAME(30), 90 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, 91 REG_OFFSET_NAME(sp), 92 REG_OFFSET_NAME(pc), 93 REG_OFFSET_NAME(pstate), 94 REG_OFFSET_END, 95 }; 96 97 /** 98 * regs_query_register_offset() - query register offset from its name 99 * @name: the name of a register 100 * 101 * regs_query_register_offset() returns the offset of a register in struct 102 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 103 */ 104 int regs_query_register_offset(const char *name) 105 { 106 const struct pt_regs_offset *roff; 107 108 for (roff = regoffset_table; roff->name != NULL; roff++) 109 if (!strcmp(roff->name, name)) 110 return roff->offset; 111 return -EINVAL; 112 } 113 114 /** 115 * regs_within_kernel_stack() - check the address in the stack 116 * @regs: pt_regs which contains kernel stack pointer. 117 * @addr: address which is checked. 118 * 119 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 120 * If @addr is within the kernel stack, it returns true. If not, returns false. 121 */ 122 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 123 { 124 return ((addr & ~(THREAD_SIZE - 1)) == 125 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || 126 on_irq_stack(addr, sizeof(unsigned long)); 127 } 128 129 /** 130 * regs_get_kernel_stack_nth() - get Nth entry of the stack 131 * @regs: pt_regs which contains kernel stack pointer. 132 * @n: stack entry number. 133 * 134 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 135 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 136 * this returns 0. 137 */ 138 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 139 { 140 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 141 142 addr += n; 143 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 144 return READ_ONCE_NOCHECK(*addr); 145 else 146 return 0; 147 } 148 149 /* 150 * TODO: does not yet catch signals sent when the child dies. 151 * in exit.c or in signal.c. 152 */ 153 154 /* 155 * Called by kernel/ptrace.c when detaching.. 156 */ 157 void ptrace_disable(struct task_struct *child) 158 { 159 /* 160 * This would be better off in core code, but PTRACE_DETACH has 161 * grown its fair share of arch-specific worts and changing it 162 * is likely to cause regressions on obscure architectures. 163 */ 164 user_disable_single_step(child); 165 } 166 167 #ifdef CONFIG_HAVE_HW_BREAKPOINT 168 /* 169 * Handle hitting a HW-breakpoint. 170 */ 171 static void ptrace_hbptriggered(struct perf_event *bp, 172 struct perf_sample_data *data, 173 struct pt_regs *regs) 174 { 175 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 176 const char *desc = "Hardware breakpoint trap (ptrace)"; 177 178 if (is_compat_task()) { 179 int si_errno = 0; 180 int i; 181 182 for (i = 0; i < ARM_MAX_BRP; ++i) { 183 if (current->thread.debug.hbp_break[i] == bp) { 184 si_errno = (i << 1) + 1; 185 break; 186 } 187 } 188 189 for (i = 0; i < ARM_MAX_WRP; ++i) { 190 if (current->thread.debug.hbp_watch[i] == bp) { 191 si_errno = -((i << 1) + 1); 192 break; 193 } 194 } 195 arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger, 196 desc); 197 return; 198 } 199 200 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc); 201 } 202 203 /* 204 * Unregister breakpoints from this task and reset the pointers in 205 * the thread_struct. 206 */ 207 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 208 { 209 int i; 210 struct thread_struct *t = &tsk->thread; 211 212 for (i = 0; i < ARM_MAX_BRP; i++) { 213 if (t->debug.hbp_break[i]) { 214 unregister_hw_breakpoint(t->debug.hbp_break[i]); 215 t->debug.hbp_break[i] = NULL; 216 } 217 } 218 219 for (i = 0; i < ARM_MAX_WRP; i++) { 220 if (t->debug.hbp_watch[i]) { 221 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 222 t->debug.hbp_watch[i] = NULL; 223 } 224 } 225 } 226 227 void ptrace_hw_copy_thread(struct task_struct *tsk) 228 { 229 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 230 } 231 232 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 233 struct task_struct *tsk, 234 unsigned long idx) 235 { 236 struct perf_event *bp = ERR_PTR(-EINVAL); 237 238 switch (note_type) { 239 case NT_ARM_HW_BREAK: 240 if (idx >= ARM_MAX_BRP) 241 goto out; 242 idx = array_index_nospec(idx, ARM_MAX_BRP); 243 bp = tsk->thread.debug.hbp_break[idx]; 244 break; 245 case NT_ARM_HW_WATCH: 246 if (idx >= ARM_MAX_WRP) 247 goto out; 248 idx = array_index_nospec(idx, ARM_MAX_WRP); 249 bp = tsk->thread.debug.hbp_watch[idx]; 250 break; 251 } 252 253 out: 254 return bp; 255 } 256 257 static int ptrace_hbp_set_event(unsigned int note_type, 258 struct task_struct *tsk, 259 unsigned long idx, 260 struct perf_event *bp) 261 { 262 int err = -EINVAL; 263 264 switch (note_type) { 265 case NT_ARM_HW_BREAK: 266 if (idx >= ARM_MAX_BRP) 267 goto out; 268 idx = array_index_nospec(idx, ARM_MAX_BRP); 269 tsk->thread.debug.hbp_break[idx] = bp; 270 err = 0; 271 break; 272 case NT_ARM_HW_WATCH: 273 if (idx >= ARM_MAX_WRP) 274 goto out; 275 idx = array_index_nospec(idx, ARM_MAX_WRP); 276 tsk->thread.debug.hbp_watch[idx] = bp; 277 err = 0; 278 break; 279 } 280 281 out: 282 return err; 283 } 284 285 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 286 struct task_struct *tsk, 287 unsigned long idx) 288 { 289 struct perf_event *bp; 290 struct perf_event_attr attr; 291 int err, type; 292 293 switch (note_type) { 294 case NT_ARM_HW_BREAK: 295 type = HW_BREAKPOINT_X; 296 break; 297 case NT_ARM_HW_WATCH: 298 type = HW_BREAKPOINT_RW; 299 break; 300 default: 301 return ERR_PTR(-EINVAL); 302 } 303 304 ptrace_breakpoint_init(&attr); 305 306 /* 307 * Initialise fields to sane defaults 308 * (i.e. values that will pass validation). 309 */ 310 attr.bp_addr = 0; 311 attr.bp_len = HW_BREAKPOINT_LEN_4; 312 attr.bp_type = type; 313 attr.disabled = 1; 314 315 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 316 if (IS_ERR(bp)) 317 return bp; 318 319 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 320 if (err) 321 return ERR_PTR(err); 322 323 return bp; 324 } 325 326 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 327 struct arch_hw_breakpoint_ctrl ctrl, 328 struct perf_event_attr *attr) 329 { 330 int err, len, type, offset, disabled = !ctrl.enabled; 331 332 attr->disabled = disabled; 333 if (disabled) 334 return 0; 335 336 err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 337 if (err) 338 return err; 339 340 switch (note_type) { 341 case NT_ARM_HW_BREAK: 342 if ((type & HW_BREAKPOINT_X) != type) 343 return -EINVAL; 344 break; 345 case NT_ARM_HW_WATCH: 346 if ((type & HW_BREAKPOINT_RW) != type) 347 return -EINVAL; 348 break; 349 default: 350 return -EINVAL; 351 } 352 353 attr->bp_len = len; 354 attr->bp_type = type; 355 attr->bp_addr += offset; 356 357 return 0; 358 } 359 360 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 361 { 362 u8 num; 363 u32 reg = 0; 364 365 switch (note_type) { 366 case NT_ARM_HW_BREAK: 367 num = hw_breakpoint_slots(TYPE_INST); 368 break; 369 case NT_ARM_HW_WATCH: 370 num = hw_breakpoint_slots(TYPE_DATA); 371 break; 372 default: 373 return -EINVAL; 374 } 375 376 reg |= debug_monitors_arch(); 377 reg <<= 8; 378 reg |= num; 379 380 *info = reg; 381 return 0; 382 } 383 384 static int ptrace_hbp_get_ctrl(unsigned int note_type, 385 struct task_struct *tsk, 386 unsigned long idx, 387 u32 *ctrl) 388 { 389 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 390 391 if (IS_ERR(bp)) 392 return PTR_ERR(bp); 393 394 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 395 return 0; 396 } 397 398 static int ptrace_hbp_get_addr(unsigned int note_type, 399 struct task_struct *tsk, 400 unsigned long idx, 401 u64 *addr) 402 { 403 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 404 405 if (IS_ERR(bp)) 406 return PTR_ERR(bp); 407 408 *addr = bp ? counter_arch_bp(bp)->address : 0; 409 return 0; 410 } 411 412 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 413 struct task_struct *tsk, 414 unsigned long idx) 415 { 416 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 417 418 if (!bp) 419 bp = ptrace_hbp_create(note_type, tsk, idx); 420 421 return bp; 422 } 423 424 static int ptrace_hbp_set_ctrl(unsigned int note_type, 425 struct task_struct *tsk, 426 unsigned long idx, 427 u32 uctrl) 428 { 429 int err; 430 struct perf_event *bp; 431 struct perf_event_attr attr; 432 struct arch_hw_breakpoint_ctrl ctrl; 433 434 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 435 if (IS_ERR(bp)) { 436 err = PTR_ERR(bp); 437 return err; 438 } 439 440 attr = bp->attr; 441 decode_ctrl_reg(uctrl, &ctrl); 442 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 443 if (err) 444 return err; 445 446 return modify_user_hw_breakpoint(bp, &attr); 447 } 448 449 static int ptrace_hbp_set_addr(unsigned int note_type, 450 struct task_struct *tsk, 451 unsigned long idx, 452 u64 addr) 453 { 454 int err; 455 struct perf_event *bp; 456 struct perf_event_attr attr; 457 458 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 459 if (IS_ERR(bp)) { 460 err = PTR_ERR(bp); 461 return err; 462 } 463 464 attr = bp->attr; 465 attr.bp_addr = addr; 466 err = modify_user_hw_breakpoint(bp, &attr); 467 return err; 468 } 469 470 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 471 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 472 #define PTRACE_HBP_PAD_SZ sizeof(u32) 473 474 static int hw_break_get(struct task_struct *target, 475 const struct user_regset *regset, 476 struct membuf to) 477 { 478 unsigned int note_type = regset->core_note_type; 479 int ret, idx = 0; 480 u32 info, ctrl; 481 u64 addr; 482 483 /* Resource info */ 484 ret = ptrace_hbp_get_resource_info(note_type, &info); 485 if (ret) 486 return ret; 487 488 membuf_write(&to, &info, sizeof(info)); 489 membuf_zero(&to, sizeof(u32)); 490 /* (address, ctrl) registers */ 491 while (to.left) { 492 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 493 if (ret) 494 return ret; 495 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 496 if (ret) 497 return ret; 498 membuf_store(&to, addr); 499 membuf_store(&to, ctrl); 500 membuf_zero(&to, sizeof(u32)); 501 idx++; 502 } 503 return 0; 504 } 505 506 static int hw_break_set(struct task_struct *target, 507 const struct user_regset *regset, 508 unsigned int pos, unsigned int count, 509 const void *kbuf, const void __user *ubuf) 510 { 511 unsigned int note_type = regset->core_note_type; 512 int ret, idx = 0, offset, limit; 513 u32 ctrl; 514 u64 addr; 515 516 /* Resource info and pad */ 517 offset = offsetof(struct user_hwdebug_state, dbg_regs); 518 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 519 520 /* (address, ctrl) registers */ 521 limit = regset->n * regset->size; 522 while (count && offset < limit) { 523 if (count < PTRACE_HBP_ADDR_SZ) 524 return -EINVAL; 525 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 526 offset, offset + PTRACE_HBP_ADDR_SZ); 527 if (ret) 528 return ret; 529 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 530 if (ret) 531 return ret; 532 offset += PTRACE_HBP_ADDR_SZ; 533 534 if (!count) 535 break; 536 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 537 offset, offset + PTRACE_HBP_CTRL_SZ); 538 if (ret) 539 return ret; 540 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 541 if (ret) 542 return ret; 543 offset += PTRACE_HBP_CTRL_SZ; 544 545 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 546 offset, offset + PTRACE_HBP_PAD_SZ); 547 offset += PTRACE_HBP_PAD_SZ; 548 idx++; 549 } 550 551 return 0; 552 } 553 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 554 555 static int gpr_get(struct task_struct *target, 556 const struct user_regset *regset, 557 struct membuf to) 558 { 559 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 560 return membuf_write(&to, uregs, sizeof(*uregs)); 561 } 562 563 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 564 unsigned int pos, unsigned int count, 565 const void *kbuf, const void __user *ubuf) 566 { 567 int ret; 568 struct user_pt_regs newregs = task_pt_regs(target)->user_regs; 569 570 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 571 if (ret) 572 return ret; 573 574 if (!valid_user_regs(&newregs, target)) 575 return -EINVAL; 576 577 task_pt_regs(target)->user_regs = newregs; 578 return 0; 579 } 580 581 static int fpr_active(struct task_struct *target, const struct user_regset *regset) 582 { 583 if (!system_supports_fpsimd()) 584 return -ENODEV; 585 return regset->n; 586 } 587 588 /* 589 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 590 */ 591 static int __fpr_get(struct task_struct *target, 592 const struct user_regset *regset, 593 struct membuf to) 594 { 595 struct user_fpsimd_state *uregs; 596 597 fpsimd_sync_from_effective_state(target); 598 599 uregs = &target->thread.uw.fpsimd_state; 600 601 return membuf_write(&to, uregs, sizeof(*uregs)); 602 } 603 604 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 605 struct membuf to) 606 { 607 if (!system_supports_fpsimd()) 608 return -EINVAL; 609 610 if (target == current) 611 fpsimd_preserve_current_state(); 612 613 return __fpr_get(target, regset, to); 614 } 615 616 static int __fpr_set(struct task_struct *target, 617 const struct user_regset *regset, 618 unsigned int pos, unsigned int count, 619 const void *kbuf, const void __user *ubuf, 620 unsigned int start_pos) 621 { 622 int ret; 623 struct user_fpsimd_state newstate; 624 625 /* 626 * Ensure target->thread.uw.fpsimd_state is up to date, so that a 627 * short copyin can't resurrect stale data. 628 */ 629 fpsimd_sync_from_effective_state(target); 630 631 newstate = target->thread.uw.fpsimd_state; 632 633 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 634 start_pos, start_pos + sizeof(newstate)); 635 if (ret) 636 return ret; 637 638 target->thread.uw.fpsimd_state = newstate; 639 640 return ret; 641 } 642 643 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 644 unsigned int pos, unsigned int count, 645 const void *kbuf, const void __user *ubuf) 646 { 647 int ret; 648 649 if (!system_supports_fpsimd()) 650 return -EINVAL; 651 652 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); 653 if (ret) 654 return ret; 655 656 fpsimd_sync_to_effective_state_zeropad(target); 657 fpsimd_flush_task_state(target); 658 659 return ret; 660 } 661 662 static int tls_get(struct task_struct *target, const struct user_regset *regset, 663 struct membuf to) 664 { 665 int ret; 666 667 if (target == current) 668 tls_preserve_current_state(); 669 670 ret = membuf_store(&to, target->thread.uw.tp_value); 671 if (system_supports_tpidr2()) 672 ret = membuf_store(&to, target->thread.tpidr2_el0); 673 else 674 ret = membuf_zero(&to, sizeof(u64)); 675 676 return ret; 677 } 678 679 static int tls_set(struct task_struct *target, const struct user_regset *regset, 680 unsigned int pos, unsigned int count, 681 const void *kbuf, const void __user *ubuf) 682 { 683 int ret; 684 unsigned long tls[2]; 685 686 tls[0] = target->thread.uw.tp_value; 687 if (system_supports_tpidr2()) 688 tls[1] = target->thread.tpidr2_el0; 689 690 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count); 691 if (ret) 692 return ret; 693 694 target->thread.uw.tp_value = tls[0]; 695 if (system_supports_tpidr2()) 696 target->thread.tpidr2_el0 = tls[1]; 697 698 return ret; 699 } 700 701 static int fpmr_get(struct task_struct *target, const struct user_regset *regset, 702 struct membuf to) 703 { 704 if (!system_supports_fpmr()) 705 return -EINVAL; 706 707 if (target == current) 708 fpsimd_preserve_current_state(); 709 710 return membuf_store(&to, target->thread.uw.fpmr); 711 } 712 713 static int fpmr_set(struct task_struct *target, const struct user_regset *regset, 714 unsigned int pos, unsigned int count, 715 const void *kbuf, const void __user *ubuf) 716 { 717 int ret; 718 unsigned long fpmr; 719 720 if (!system_supports_fpmr()) 721 return -EINVAL; 722 723 fpmr = target->thread.uw.fpmr; 724 725 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count); 726 if (ret) 727 return ret; 728 729 target->thread.uw.fpmr = fpmr; 730 731 fpsimd_flush_task_state(target); 732 733 return 0; 734 } 735 736 static int system_call_get(struct task_struct *target, 737 const struct user_regset *regset, 738 struct membuf to) 739 { 740 return membuf_store(&to, task_pt_regs(target)->syscallno); 741 } 742 743 static int system_call_set(struct task_struct *target, 744 const struct user_regset *regset, 745 unsigned int pos, unsigned int count, 746 const void *kbuf, const void __user *ubuf) 747 { 748 int syscallno = task_pt_regs(target)->syscallno; 749 int ret; 750 751 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 752 if (ret) 753 return ret; 754 755 task_pt_regs(target)->syscallno = syscallno; 756 return ret; 757 } 758 759 #ifdef CONFIG_ARM64_SVE 760 761 static void sve_init_header_from_task(struct user_sve_header *header, 762 struct task_struct *target, 763 enum vec_type type) 764 { 765 unsigned int vq; 766 bool active; 767 enum vec_type task_type; 768 769 memset(header, 0, sizeof(*header)); 770 771 /* Check if the requested registers are active for the task */ 772 if (thread_sm_enabled(&target->thread)) 773 task_type = ARM64_VEC_SME; 774 else 775 task_type = ARM64_VEC_SVE; 776 active = (task_type == type); 777 778 if (active && target->thread.fp_type == FP_STATE_SVE) 779 header->flags = SVE_PT_REGS_SVE; 780 else 781 header->flags = SVE_PT_REGS_FPSIMD; 782 783 switch (type) { 784 case ARM64_VEC_SVE: 785 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) 786 header->flags |= SVE_PT_VL_INHERIT; 787 break; 788 case ARM64_VEC_SME: 789 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) 790 header->flags |= SVE_PT_VL_INHERIT; 791 break; 792 default: 793 WARN_ON_ONCE(1); 794 return; 795 } 796 797 header->vl = task_get_vl(target, type); 798 vq = sve_vq_from_vl(header->vl); 799 800 header->max_vl = vec_max_vl(type); 801 if (active) 802 header->size = SVE_PT_SIZE(vq, header->flags); 803 else 804 header->size = sizeof(header); 805 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), 806 SVE_PT_REGS_SVE); 807 } 808 809 static unsigned int sve_size_from_header(struct user_sve_header const *header) 810 { 811 return ALIGN(header->size, SVE_VQ_BYTES); 812 } 813 814 static int sve_get_common(struct task_struct *target, 815 const struct user_regset *regset, 816 struct membuf to, 817 enum vec_type type) 818 { 819 struct user_sve_header header; 820 unsigned int vq; 821 unsigned long start, end; 822 823 if (target == current) 824 fpsimd_preserve_current_state(); 825 826 /* Header */ 827 sve_init_header_from_task(&header, target, type); 828 vq = sve_vq_from_vl(header.vl); 829 830 membuf_write(&to, &header, sizeof(header)); 831 832 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 833 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 834 835 /* 836 * When the requested vector type is not active, do not present data 837 * from the other mode to userspace. 838 */ 839 if (header.size == sizeof(header)) 840 return 0; 841 842 switch ((header.flags & SVE_PT_REGS_MASK)) { 843 case SVE_PT_REGS_FPSIMD: 844 return __fpr_get(target, regset, to); 845 846 case SVE_PT_REGS_SVE: 847 start = SVE_PT_SVE_OFFSET; 848 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 849 membuf_write(&to, target->thread.sve_state, end - start); 850 851 start = end; 852 end = SVE_PT_SVE_FPSR_OFFSET(vq); 853 membuf_zero(&to, end - start); 854 855 /* 856 * Copy fpsr, and fpcr which must follow contiguously in 857 * struct fpsimd_state: 858 */ 859 start = end; 860 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 861 membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, 862 end - start); 863 864 start = end; 865 end = sve_size_from_header(&header); 866 return membuf_zero(&to, end - start); 867 868 default: 869 BUILD_BUG(); 870 } 871 } 872 873 static int sve_get(struct task_struct *target, 874 const struct user_regset *regset, 875 struct membuf to) 876 { 877 if (!system_supports_sve()) 878 return -EINVAL; 879 880 return sve_get_common(target, regset, to, ARM64_VEC_SVE); 881 } 882 883 static int sve_set_common(struct task_struct *target, 884 const struct user_regset *regset, 885 unsigned int pos, unsigned int count, 886 const void *kbuf, const void __user *ubuf, 887 enum vec_type type) 888 { 889 int ret; 890 struct user_sve_header header; 891 unsigned int vq; 892 unsigned long start, end; 893 bool fpsimd; 894 895 fpsimd_flush_task_state(target); 896 897 /* Header */ 898 if (count < sizeof(header)) 899 return -EINVAL; 900 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 901 0, sizeof(header)); 902 if (ret) 903 return ret; 904 905 /* 906 * Streaming SVE data is always stored and presented in SVE format. 907 * Require the user to provide SVE formatted data for consistency, and 908 * to avoid the risk that we configure the task into an invalid state. 909 */ 910 fpsimd = (header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD; 911 if (fpsimd && type == ARM64_VEC_SME) 912 return -EINVAL; 913 914 /* 915 * On systems without SVE we accept FPSIMD format writes with 916 * a VL of 0 to allow exiting streaming mode, otherwise a VL 917 * is required. 918 */ 919 if (header.vl) { 920 /* 921 * If the system does not support SVE we can't 922 * configure a SVE VL. 923 */ 924 if (!system_supports_sve() && type == ARM64_VEC_SVE) 925 return -EINVAL; 926 927 /* 928 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are 929 * consumed by vec_set_vector_length(), which will 930 * also validate them for us: 931 */ 932 ret = vec_set_vector_length(target, type, header.vl, 933 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); 934 if (ret) 935 return ret; 936 } else { 937 /* If the system supports SVE we require a VL. */ 938 if (system_supports_sve()) 939 return -EINVAL; 940 941 /* 942 * Only FPSIMD formatted data with no flags set is 943 * supported. 944 */ 945 if (header.flags != SVE_PT_REGS_FPSIMD) 946 return -EINVAL; 947 } 948 949 /* Allocate SME storage if necessary, preserving any existing ZA/ZT state */ 950 if (type == ARM64_VEC_SME) { 951 sme_alloc(target, false); 952 if (!target->thread.sme_state) 953 return -ENOMEM; 954 } 955 956 /* Allocate SVE storage if necessary, zeroing any existing SVE state */ 957 if (!fpsimd) { 958 sve_alloc(target, true); 959 if (!target->thread.sve_state) 960 return -ENOMEM; 961 } 962 963 /* 964 * Actual VL set may be different from what the user asked 965 * for, or we may have configured the _ONEXEC VL not the 966 * current VL: 967 */ 968 vq = sve_vq_from_vl(task_get_vl(target, type)); 969 970 /* Enter/exit streaming mode */ 971 if (system_supports_sme()) { 972 switch (type) { 973 case ARM64_VEC_SVE: 974 target->thread.svcr &= ~SVCR_SM_MASK; 975 set_tsk_thread_flag(target, TIF_SVE); 976 break; 977 case ARM64_VEC_SME: 978 target->thread.svcr |= SVCR_SM_MASK; 979 set_tsk_thread_flag(target, TIF_SME); 980 break; 981 default: 982 WARN_ON_ONCE(1); 983 return -EINVAL; 984 } 985 } 986 987 /* Always zero V regs, FPSR, and FPCR */ 988 memset(¤t->thread.uw.fpsimd_state, 0, 989 sizeof(current->thread.uw.fpsimd_state)); 990 991 /* Registers: FPSIMD-only case */ 992 993 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 994 if (fpsimd) { 995 clear_tsk_thread_flag(target, TIF_SVE); 996 target->thread.fp_type = FP_STATE_FPSIMD; 997 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 998 SVE_PT_FPSIMD_OFFSET); 999 return ret; 1000 } 1001 1002 /* Otherwise: no registers or full SVE case. */ 1003 1004 target->thread.fp_type = FP_STATE_SVE; 1005 1006 /* 1007 * If setting a different VL from the requested VL and there is 1008 * register data, the data layout will be wrong: don't even 1009 * try to set the registers in this case. 1010 */ 1011 if (count && vq != sve_vq_from_vl(header.vl)) 1012 return -EIO; 1013 1014 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 1015 start = SVE_PT_SVE_OFFSET; 1016 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 1017 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1018 target->thread.sve_state, 1019 start, end); 1020 if (ret) 1021 return ret; 1022 1023 start = end; 1024 end = SVE_PT_SVE_FPSR_OFFSET(vq); 1025 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, start, end); 1026 1027 /* 1028 * Copy fpsr, and fpcr which must follow contiguously in 1029 * struct fpsimd_state: 1030 */ 1031 start = end; 1032 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 1033 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1034 &target->thread.uw.fpsimd_state.fpsr, 1035 start, end); 1036 1037 return ret; 1038 } 1039 1040 static int sve_set(struct task_struct *target, 1041 const struct user_regset *regset, 1042 unsigned int pos, unsigned int count, 1043 const void *kbuf, const void __user *ubuf) 1044 { 1045 if (!system_supports_sve() && !system_supports_sme()) 1046 return -EINVAL; 1047 1048 return sve_set_common(target, regset, pos, count, kbuf, ubuf, 1049 ARM64_VEC_SVE); 1050 } 1051 1052 #endif /* CONFIG_ARM64_SVE */ 1053 1054 #ifdef CONFIG_ARM64_SME 1055 1056 static int ssve_get(struct task_struct *target, 1057 const struct user_regset *regset, 1058 struct membuf to) 1059 { 1060 if (!system_supports_sme()) 1061 return -EINVAL; 1062 1063 return sve_get_common(target, regset, to, ARM64_VEC_SME); 1064 } 1065 1066 static int ssve_set(struct task_struct *target, 1067 const struct user_regset *regset, 1068 unsigned int pos, unsigned int count, 1069 const void *kbuf, const void __user *ubuf) 1070 { 1071 if (!system_supports_sme()) 1072 return -EINVAL; 1073 1074 return sve_set_common(target, regset, pos, count, kbuf, ubuf, 1075 ARM64_VEC_SME); 1076 } 1077 1078 static int za_get(struct task_struct *target, 1079 const struct user_regset *regset, 1080 struct membuf to) 1081 { 1082 struct user_za_header header; 1083 unsigned int vq; 1084 unsigned long start, end; 1085 1086 if (!system_supports_sme()) 1087 return -EINVAL; 1088 1089 /* Header */ 1090 memset(&header, 0, sizeof(header)); 1091 1092 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) 1093 header.flags |= ZA_PT_VL_INHERIT; 1094 1095 header.vl = task_get_sme_vl(target); 1096 vq = sve_vq_from_vl(header.vl); 1097 header.max_vl = sme_max_vl(); 1098 header.max_size = ZA_PT_SIZE(vq); 1099 1100 /* If ZA is not active there is only the header */ 1101 if (thread_za_enabled(&target->thread)) 1102 header.size = ZA_PT_SIZE(vq); 1103 else 1104 header.size = ZA_PT_ZA_OFFSET; 1105 1106 membuf_write(&to, &header, sizeof(header)); 1107 1108 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); 1109 end = ZA_PT_ZA_OFFSET; 1110 1111 if (target == current) 1112 fpsimd_preserve_current_state(); 1113 1114 /* Any register data to include? */ 1115 if (thread_za_enabled(&target->thread)) { 1116 start = end; 1117 end = ZA_PT_SIZE(vq); 1118 membuf_write(&to, target->thread.sme_state, end - start); 1119 } 1120 1121 /* Zero any trailing padding */ 1122 start = end; 1123 end = ALIGN(header.size, SVE_VQ_BYTES); 1124 return membuf_zero(&to, end - start); 1125 } 1126 1127 static int za_set(struct task_struct *target, 1128 const struct user_regset *regset, 1129 unsigned int pos, unsigned int count, 1130 const void *kbuf, const void __user *ubuf) 1131 { 1132 int ret; 1133 struct user_za_header header; 1134 unsigned int vq; 1135 unsigned long start, end; 1136 1137 if (!system_supports_sme()) 1138 return -EINVAL; 1139 1140 /* Header */ 1141 if (count < sizeof(header)) 1142 return -EINVAL; 1143 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 1144 0, sizeof(header)); 1145 if (ret) 1146 goto out; 1147 1148 /* 1149 * All current ZA_PT_* flags are consumed by 1150 * vec_set_vector_length(), which will also validate them for 1151 * us: 1152 */ 1153 ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl, 1154 ((unsigned long)header.flags) << 16); 1155 if (ret) 1156 goto out; 1157 1158 /* 1159 * Actual VL set may be different from what the user asked 1160 * for, or we may have configured the _ONEXEC rather than 1161 * current VL: 1162 */ 1163 vq = sve_vq_from_vl(task_get_sme_vl(target)); 1164 1165 /* Ensure there is some SVE storage for streaming mode */ 1166 if (!target->thread.sve_state) { 1167 sve_alloc(target, false); 1168 if (!target->thread.sve_state) { 1169 ret = -ENOMEM; 1170 goto out; 1171 } 1172 } 1173 1174 /* 1175 * Only flush the storage if PSTATE.ZA was not already set, 1176 * otherwise preserve any existing data. 1177 */ 1178 sme_alloc(target, !thread_za_enabled(&target->thread)); 1179 if (!target->thread.sme_state) 1180 return -ENOMEM; 1181 1182 /* If there is no data then disable ZA */ 1183 if (!count) { 1184 target->thread.svcr &= ~SVCR_ZA_MASK; 1185 goto out; 1186 } 1187 1188 /* 1189 * If setting a different VL from the requested VL and there is 1190 * register data, the data layout will be wrong: don't even 1191 * try to set the registers in this case. 1192 */ 1193 if (vq != sve_vq_from_vl(header.vl)) { 1194 ret = -EIO; 1195 goto out; 1196 } 1197 1198 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); 1199 start = ZA_PT_ZA_OFFSET; 1200 end = ZA_PT_SIZE(vq); 1201 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1202 target->thread.sme_state, 1203 start, end); 1204 if (ret) 1205 goto out; 1206 1207 /* Mark ZA as active and let userspace use it */ 1208 set_tsk_thread_flag(target, TIF_SME); 1209 target->thread.svcr |= SVCR_ZA_MASK; 1210 1211 out: 1212 fpsimd_flush_task_state(target); 1213 return ret; 1214 } 1215 1216 static int zt_get(struct task_struct *target, 1217 const struct user_regset *regset, 1218 struct membuf to) 1219 { 1220 if (!system_supports_sme2()) 1221 return -EINVAL; 1222 1223 /* 1224 * If PSTATE.ZA is not set then ZT will be zeroed when it is 1225 * enabled so report the current register value as zero. 1226 */ 1227 if (thread_za_enabled(&target->thread)) 1228 membuf_write(&to, thread_zt_state(&target->thread), 1229 ZT_SIG_REG_BYTES); 1230 else 1231 membuf_zero(&to, ZT_SIG_REG_BYTES); 1232 1233 return 0; 1234 } 1235 1236 static int zt_set(struct task_struct *target, 1237 const struct user_regset *regset, 1238 unsigned int pos, unsigned int count, 1239 const void *kbuf, const void __user *ubuf) 1240 { 1241 int ret; 1242 1243 if (!system_supports_sme2()) 1244 return -EINVAL; 1245 1246 /* Ensure SVE storage in case this is first use of SME */ 1247 sve_alloc(target, false); 1248 if (!target->thread.sve_state) 1249 return -ENOMEM; 1250 1251 if (!thread_za_enabled(&target->thread)) { 1252 sme_alloc(target, true); 1253 if (!target->thread.sme_state) 1254 return -ENOMEM; 1255 } 1256 1257 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1258 thread_zt_state(&target->thread), 1259 0, ZT_SIG_REG_BYTES); 1260 if (ret == 0) { 1261 target->thread.svcr |= SVCR_ZA_MASK; 1262 set_tsk_thread_flag(target, TIF_SME); 1263 } 1264 1265 fpsimd_flush_task_state(target); 1266 1267 return ret; 1268 } 1269 1270 #endif /* CONFIG_ARM64_SME */ 1271 1272 #ifdef CONFIG_ARM64_PTR_AUTH 1273 static int pac_mask_get(struct task_struct *target, 1274 const struct user_regset *regset, 1275 struct membuf to) 1276 { 1277 /* 1278 * The PAC bits can differ across data and instruction pointers 1279 * depending on TCR_EL1.TBID*, which we may make use of in future, so 1280 * we expose separate masks. 1281 */ 1282 unsigned long mask = ptrauth_user_pac_mask(); 1283 struct user_pac_mask uregs = { 1284 .data_mask = mask, 1285 .insn_mask = mask, 1286 }; 1287 1288 if (!system_supports_address_auth()) 1289 return -EINVAL; 1290 1291 return membuf_write(&to, &uregs, sizeof(uregs)); 1292 } 1293 1294 static int pac_enabled_keys_get(struct task_struct *target, 1295 const struct user_regset *regset, 1296 struct membuf to) 1297 { 1298 long enabled_keys = ptrauth_get_enabled_keys(target); 1299 1300 if (IS_ERR_VALUE(enabled_keys)) 1301 return enabled_keys; 1302 1303 return membuf_write(&to, &enabled_keys, sizeof(enabled_keys)); 1304 } 1305 1306 static int pac_enabled_keys_set(struct task_struct *target, 1307 const struct user_regset *regset, 1308 unsigned int pos, unsigned int count, 1309 const void *kbuf, const void __user *ubuf) 1310 { 1311 int ret; 1312 long enabled_keys = ptrauth_get_enabled_keys(target); 1313 1314 if (IS_ERR_VALUE(enabled_keys)) 1315 return enabled_keys; 1316 1317 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0, 1318 sizeof(long)); 1319 if (ret) 1320 return ret; 1321 1322 return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK, 1323 enabled_keys); 1324 } 1325 1326 #ifdef CONFIG_CHECKPOINT_RESTORE 1327 static __uint128_t pac_key_to_user(const struct ptrauth_key *key) 1328 { 1329 return (__uint128_t)key->hi << 64 | key->lo; 1330 } 1331 1332 static struct ptrauth_key pac_key_from_user(__uint128_t ukey) 1333 { 1334 struct ptrauth_key key = { 1335 .lo = (unsigned long)ukey, 1336 .hi = (unsigned long)(ukey >> 64), 1337 }; 1338 1339 return key; 1340 } 1341 1342 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, 1343 const struct ptrauth_keys_user *keys) 1344 { 1345 ukeys->apiakey = pac_key_to_user(&keys->apia); 1346 ukeys->apibkey = pac_key_to_user(&keys->apib); 1347 ukeys->apdakey = pac_key_to_user(&keys->apda); 1348 ukeys->apdbkey = pac_key_to_user(&keys->apdb); 1349 } 1350 1351 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys, 1352 const struct user_pac_address_keys *ukeys) 1353 { 1354 keys->apia = pac_key_from_user(ukeys->apiakey); 1355 keys->apib = pac_key_from_user(ukeys->apibkey); 1356 keys->apda = pac_key_from_user(ukeys->apdakey); 1357 keys->apdb = pac_key_from_user(ukeys->apdbkey); 1358 } 1359 1360 static int pac_address_keys_get(struct task_struct *target, 1361 const struct user_regset *regset, 1362 struct membuf to) 1363 { 1364 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1365 struct user_pac_address_keys user_keys; 1366 1367 if (!system_supports_address_auth()) 1368 return -EINVAL; 1369 1370 pac_address_keys_to_user(&user_keys, keys); 1371 1372 return membuf_write(&to, &user_keys, sizeof(user_keys)); 1373 } 1374 1375 static int pac_address_keys_set(struct task_struct *target, 1376 const struct user_regset *regset, 1377 unsigned int pos, unsigned int count, 1378 const void *kbuf, const void __user *ubuf) 1379 { 1380 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1381 struct user_pac_address_keys user_keys; 1382 int ret; 1383 1384 if (!system_supports_address_auth()) 1385 return -EINVAL; 1386 1387 pac_address_keys_to_user(&user_keys, keys); 1388 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1389 &user_keys, 0, -1); 1390 if (ret) 1391 return ret; 1392 pac_address_keys_from_user(keys, &user_keys); 1393 1394 return 0; 1395 } 1396 1397 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, 1398 const struct ptrauth_keys_user *keys) 1399 { 1400 ukeys->apgakey = pac_key_to_user(&keys->apga); 1401 } 1402 1403 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys, 1404 const struct user_pac_generic_keys *ukeys) 1405 { 1406 keys->apga = pac_key_from_user(ukeys->apgakey); 1407 } 1408 1409 static int pac_generic_keys_get(struct task_struct *target, 1410 const struct user_regset *regset, 1411 struct membuf to) 1412 { 1413 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1414 struct user_pac_generic_keys user_keys; 1415 1416 if (!system_supports_generic_auth()) 1417 return -EINVAL; 1418 1419 pac_generic_keys_to_user(&user_keys, keys); 1420 1421 return membuf_write(&to, &user_keys, sizeof(user_keys)); 1422 } 1423 1424 static int pac_generic_keys_set(struct task_struct *target, 1425 const struct user_regset *regset, 1426 unsigned int pos, unsigned int count, 1427 const void *kbuf, const void __user *ubuf) 1428 { 1429 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1430 struct user_pac_generic_keys user_keys; 1431 int ret; 1432 1433 if (!system_supports_generic_auth()) 1434 return -EINVAL; 1435 1436 pac_generic_keys_to_user(&user_keys, keys); 1437 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1438 &user_keys, 0, -1); 1439 if (ret) 1440 return ret; 1441 pac_generic_keys_from_user(keys, &user_keys); 1442 1443 return 0; 1444 } 1445 #endif /* CONFIG_CHECKPOINT_RESTORE */ 1446 #endif /* CONFIG_ARM64_PTR_AUTH */ 1447 1448 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1449 static int tagged_addr_ctrl_get(struct task_struct *target, 1450 const struct user_regset *regset, 1451 struct membuf to) 1452 { 1453 long ctrl = get_tagged_addr_ctrl(target); 1454 1455 if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl))) 1456 return ctrl; 1457 1458 return membuf_write(&to, &ctrl, sizeof(ctrl)); 1459 } 1460 1461 static int tagged_addr_ctrl_set(struct task_struct *target, const struct 1462 user_regset *regset, unsigned int pos, 1463 unsigned int count, const void *kbuf, const 1464 void __user *ubuf) 1465 { 1466 int ret; 1467 long ctrl; 1468 1469 ctrl = get_tagged_addr_ctrl(target); 1470 if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl))) 1471 return ctrl; 1472 1473 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); 1474 if (ret) 1475 return ret; 1476 1477 return set_tagged_addr_ctrl(target, ctrl); 1478 } 1479 #endif 1480 1481 #ifdef CONFIG_ARM64_POE 1482 static int poe_get(struct task_struct *target, 1483 const struct user_regset *regset, 1484 struct membuf to) 1485 { 1486 if (!system_supports_poe()) 1487 return -EINVAL; 1488 1489 return membuf_write(&to, &target->thread.por_el0, 1490 sizeof(target->thread.por_el0)); 1491 } 1492 1493 static int poe_set(struct task_struct *target, const struct 1494 user_regset *regset, unsigned int pos, 1495 unsigned int count, const void *kbuf, const 1496 void __user *ubuf) 1497 { 1498 int ret; 1499 long ctrl; 1500 1501 if (!system_supports_poe()) 1502 return -EINVAL; 1503 1504 ctrl = target->thread.por_el0; 1505 1506 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); 1507 if (ret) 1508 return ret; 1509 1510 target->thread.por_el0 = ctrl; 1511 1512 return 0; 1513 } 1514 #endif 1515 1516 #ifdef CONFIG_ARM64_GCS 1517 static void task_gcs_to_user(struct user_gcs *user_gcs, 1518 const struct task_struct *target) 1519 { 1520 user_gcs->features_enabled = target->thread.gcs_el0_mode; 1521 user_gcs->features_locked = target->thread.gcs_el0_locked; 1522 user_gcs->gcspr_el0 = target->thread.gcspr_el0; 1523 } 1524 1525 static void task_gcs_from_user(struct task_struct *target, 1526 const struct user_gcs *user_gcs) 1527 { 1528 target->thread.gcs_el0_mode = user_gcs->features_enabled; 1529 target->thread.gcs_el0_locked = user_gcs->features_locked; 1530 target->thread.gcspr_el0 = user_gcs->gcspr_el0; 1531 } 1532 1533 static int gcs_get(struct task_struct *target, 1534 const struct user_regset *regset, 1535 struct membuf to) 1536 { 1537 struct user_gcs user_gcs; 1538 1539 if (!system_supports_gcs()) 1540 return -EINVAL; 1541 1542 if (target == current) 1543 gcs_preserve_current_state(); 1544 1545 task_gcs_to_user(&user_gcs, target); 1546 1547 return membuf_write(&to, &user_gcs, sizeof(user_gcs)); 1548 } 1549 1550 static int gcs_set(struct task_struct *target, const struct 1551 user_regset *regset, unsigned int pos, 1552 unsigned int count, const void *kbuf, const 1553 void __user *ubuf) 1554 { 1555 int ret; 1556 struct user_gcs user_gcs; 1557 1558 if (!system_supports_gcs()) 1559 return -EINVAL; 1560 1561 task_gcs_to_user(&user_gcs, target); 1562 1563 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_gcs, 0, -1); 1564 if (ret) 1565 return ret; 1566 1567 if (user_gcs.features_enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK) 1568 return -EINVAL; 1569 1570 task_gcs_from_user(target, &user_gcs); 1571 1572 return 0; 1573 } 1574 #endif 1575 1576 enum aarch64_regset { 1577 REGSET_GPR, 1578 REGSET_FPR, 1579 REGSET_TLS, 1580 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1581 REGSET_HW_BREAK, 1582 REGSET_HW_WATCH, 1583 #endif 1584 REGSET_FPMR, 1585 REGSET_SYSTEM_CALL, 1586 #ifdef CONFIG_ARM64_SVE 1587 REGSET_SVE, 1588 #endif 1589 #ifdef CONFIG_ARM64_SME 1590 REGSET_SSVE, 1591 REGSET_ZA, 1592 REGSET_ZT, 1593 #endif 1594 #ifdef CONFIG_ARM64_PTR_AUTH 1595 REGSET_PAC_MASK, 1596 REGSET_PAC_ENABLED_KEYS, 1597 #ifdef CONFIG_CHECKPOINT_RESTORE 1598 REGSET_PACA_KEYS, 1599 REGSET_PACG_KEYS, 1600 #endif 1601 #endif 1602 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1603 REGSET_TAGGED_ADDR_CTRL, 1604 #endif 1605 #ifdef CONFIG_ARM64_POE 1606 REGSET_POE, 1607 #endif 1608 #ifdef CONFIG_ARM64_GCS 1609 REGSET_GCS, 1610 #endif 1611 }; 1612 1613 static const struct user_regset aarch64_regsets[] = { 1614 [REGSET_GPR] = { 1615 USER_REGSET_NOTE_TYPE(PRSTATUS), 1616 .n = sizeof(struct user_pt_regs) / sizeof(u64), 1617 .size = sizeof(u64), 1618 .align = sizeof(u64), 1619 .regset_get = gpr_get, 1620 .set = gpr_set 1621 }, 1622 [REGSET_FPR] = { 1623 USER_REGSET_NOTE_TYPE(PRFPREG), 1624 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 1625 /* 1626 * We pretend we have 32-bit registers because the fpsr and 1627 * fpcr are 32-bits wide. 1628 */ 1629 .size = sizeof(u32), 1630 .align = sizeof(u32), 1631 .active = fpr_active, 1632 .regset_get = fpr_get, 1633 .set = fpr_set 1634 }, 1635 [REGSET_TLS] = { 1636 USER_REGSET_NOTE_TYPE(ARM_TLS), 1637 .n = 2, 1638 .size = sizeof(void *), 1639 .align = sizeof(void *), 1640 .regset_get = tls_get, 1641 .set = tls_set, 1642 }, 1643 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1644 [REGSET_HW_BREAK] = { 1645 USER_REGSET_NOTE_TYPE(ARM_HW_BREAK), 1646 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1647 .size = sizeof(u32), 1648 .align = sizeof(u32), 1649 .regset_get = hw_break_get, 1650 .set = hw_break_set, 1651 }, 1652 [REGSET_HW_WATCH] = { 1653 USER_REGSET_NOTE_TYPE(ARM_HW_WATCH), 1654 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1655 .size = sizeof(u32), 1656 .align = sizeof(u32), 1657 .regset_get = hw_break_get, 1658 .set = hw_break_set, 1659 }, 1660 #endif 1661 [REGSET_SYSTEM_CALL] = { 1662 USER_REGSET_NOTE_TYPE(ARM_SYSTEM_CALL), 1663 .n = 1, 1664 .size = sizeof(int), 1665 .align = sizeof(int), 1666 .regset_get = system_call_get, 1667 .set = system_call_set, 1668 }, 1669 [REGSET_FPMR] = { 1670 USER_REGSET_NOTE_TYPE(ARM_FPMR), 1671 .n = 1, 1672 .size = sizeof(u64), 1673 .align = sizeof(u64), 1674 .regset_get = fpmr_get, 1675 .set = fpmr_set, 1676 }, 1677 #ifdef CONFIG_ARM64_SVE 1678 [REGSET_SVE] = { /* Scalable Vector Extension */ 1679 USER_REGSET_NOTE_TYPE(ARM_SVE), 1680 .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX, 1681 SVE_PT_REGS_SVE), 1682 SVE_VQ_BYTES), 1683 .size = SVE_VQ_BYTES, 1684 .align = SVE_VQ_BYTES, 1685 .regset_get = sve_get, 1686 .set = sve_set, 1687 }, 1688 #endif 1689 #ifdef CONFIG_ARM64_SME 1690 [REGSET_SSVE] = { /* Streaming mode SVE */ 1691 USER_REGSET_NOTE_TYPE(ARM_SSVE), 1692 .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE), 1693 SVE_VQ_BYTES), 1694 .size = SVE_VQ_BYTES, 1695 .align = SVE_VQ_BYTES, 1696 .regset_get = ssve_get, 1697 .set = ssve_set, 1698 }, 1699 [REGSET_ZA] = { /* SME ZA */ 1700 USER_REGSET_NOTE_TYPE(ARM_ZA), 1701 /* 1702 * ZA is a single register but it's variably sized and 1703 * the ptrace core requires that the size of any data 1704 * be an exact multiple of the configured register 1705 * size so report as though we had SVE_VQ_BYTES 1706 * registers. These values aren't exposed to 1707 * userspace. 1708 */ 1709 .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES), 1710 .size = SVE_VQ_BYTES, 1711 .align = SVE_VQ_BYTES, 1712 .regset_get = za_get, 1713 .set = za_set, 1714 }, 1715 [REGSET_ZT] = { /* SME ZT */ 1716 USER_REGSET_NOTE_TYPE(ARM_ZT), 1717 .n = 1, 1718 .size = ZT_SIG_REG_BYTES, 1719 .align = sizeof(u64), 1720 .regset_get = zt_get, 1721 .set = zt_set, 1722 }, 1723 #endif 1724 #ifdef CONFIG_ARM64_PTR_AUTH 1725 [REGSET_PAC_MASK] = { 1726 USER_REGSET_NOTE_TYPE(ARM_PAC_MASK), 1727 .n = sizeof(struct user_pac_mask) / sizeof(u64), 1728 .size = sizeof(u64), 1729 .align = sizeof(u64), 1730 .regset_get = pac_mask_get, 1731 /* this cannot be set dynamically */ 1732 }, 1733 [REGSET_PAC_ENABLED_KEYS] = { 1734 USER_REGSET_NOTE_TYPE(ARM_PAC_ENABLED_KEYS), 1735 .n = 1, 1736 .size = sizeof(long), 1737 .align = sizeof(long), 1738 .regset_get = pac_enabled_keys_get, 1739 .set = pac_enabled_keys_set, 1740 }, 1741 #ifdef CONFIG_CHECKPOINT_RESTORE 1742 [REGSET_PACA_KEYS] = { 1743 USER_REGSET_NOTE_TYPE(ARM_PACA_KEYS), 1744 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t), 1745 .size = sizeof(__uint128_t), 1746 .align = sizeof(__uint128_t), 1747 .regset_get = pac_address_keys_get, 1748 .set = pac_address_keys_set, 1749 }, 1750 [REGSET_PACG_KEYS] = { 1751 USER_REGSET_NOTE_TYPE(ARM_PACG_KEYS), 1752 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t), 1753 .size = sizeof(__uint128_t), 1754 .align = sizeof(__uint128_t), 1755 .regset_get = pac_generic_keys_get, 1756 .set = pac_generic_keys_set, 1757 }, 1758 #endif 1759 #endif 1760 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1761 [REGSET_TAGGED_ADDR_CTRL] = { 1762 USER_REGSET_NOTE_TYPE(ARM_TAGGED_ADDR_CTRL), 1763 .n = 1, 1764 .size = sizeof(long), 1765 .align = sizeof(long), 1766 .regset_get = tagged_addr_ctrl_get, 1767 .set = tagged_addr_ctrl_set, 1768 }, 1769 #endif 1770 #ifdef CONFIG_ARM64_POE 1771 [REGSET_POE] = { 1772 USER_REGSET_NOTE_TYPE(ARM_POE), 1773 .n = 1, 1774 .size = sizeof(long), 1775 .align = sizeof(long), 1776 .regset_get = poe_get, 1777 .set = poe_set, 1778 }, 1779 #endif 1780 #ifdef CONFIG_ARM64_GCS 1781 [REGSET_GCS] = { 1782 USER_REGSET_NOTE_TYPE(ARM_GCS), 1783 .n = sizeof(struct user_gcs) / sizeof(u64), 1784 .size = sizeof(u64), 1785 .align = sizeof(u64), 1786 .regset_get = gcs_get, 1787 .set = gcs_set, 1788 }, 1789 #endif 1790 }; 1791 1792 static const struct user_regset_view user_aarch64_view = { 1793 .name = "aarch64", .e_machine = EM_AARCH64, 1794 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 1795 }; 1796 1797 enum compat_regset { 1798 REGSET_COMPAT_GPR, 1799 REGSET_COMPAT_VFP, 1800 }; 1801 1802 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx) 1803 { 1804 struct pt_regs *regs = task_pt_regs(task); 1805 1806 switch (idx) { 1807 case 15: 1808 return regs->pc; 1809 case 16: 1810 return pstate_to_compat_psr(regs->pstate); 1811 case 17: 1812 return regs->orig_x0; 1813 default: 1814 return regs->regs[idx]; 1815 } 1816 } 1817 1818 static int compat_gpr_get(struct task_struct *target, 1819 const struct user_regset *regset, 1820 struct membuf to) 1821 { 1822 int i = 0; 1823 1824 while (to.left) 1825 membuf_store(&to, compat_get_user_reg(target, i++)); 1826 return 0; 1827 } 1828 1829 static int compat_gpr_set(struct task_struct *target, 1830 const struct user_regset *regset, 1831 unsigned int pos, unsigned int count, 1832 const void *kbuf, const void __user *ubuf) 1833 { 1834 struct pt_regs newregs; 1835 int ret = 0; 1836 unsigned int i, start, num_regs; 1837 1838 /* Calculate the number of AArch32 registers contained in count */ 1839 num_regs = count / regset->size; 1840 1841 /* Convert pos into an register number */ 1842 start = pos / regset->size; 1843 1844 if (start + num_regs > regset->n) 1845 return -EIO; 1846 1847 newregs = *task_pt_regs(target); 1848 1849 for (i = 0; i < num_regs; ++i) { 1850 unsigned int idx = start + i; 1851 compat_ulong_t reg; 1852 1853 if (kbuf) { 1854 memcpy(®, kbuf, sizeof(reg)); 1855 kbuf += sizeof(reg); 1856 } else { 1857 ret = copy_from_user(®, ubuf, sizeof(reg)); 1858 if (ret) { 1859 ret = -EFAULT; 1860 break; 1861 } 1862 1863 ubuf += sizeof(reg); 1864 } 1865 1866 switch (idx) { 1867 case 15: 1868 newregs.pc = reg; 1869 break; 1870 case 16: 1871 reg = compat_psr_to_pstate(reg); 1872 newregs.pstate = reg; 1873 break; 1874 case 17: 1875 newregs.orig_x0 = reg; 1876 break; 1877 default: 1878 newregs.regs[idx] = reg; 1879 } 1880 1881 } 1882 1883 if (valid_user_regs(&newregs.user_regs, target)) 1884 *task_pt_regs(target) = newregs; 1885 else 1886 ret = -EINVAL; 1887 1888 return ret; 1889 } 1890 1891 static int compat_vfp_get(struct task_struct *target, 1892 const struct user_regset *regset, 1893 struct membuf to) 1894 { 1895 struct user_fpsimd_state *uregs; 1896 compat_ulong_t fpscr; 1897 1898 if (!system_supports_fpsimd()) 1899 return -EINVAL; 1900 1901 uregs = &target->thread.uw.fpsimd_state; 1902 1903 if (target == current) 1904 fpsimd_preserve_current_state(); 1905 1906 /* 1907 * The VFP registers are packed into the fpsimd_state, so they all sit 1908 * nicely together for us. We just need to create the fpscr separately. 1909 */ 1910 membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t)); 1911 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 1912 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 1913 return membuf_store(&to, fpscr); 1914 } 1915 1916 static int compat_vfp_set(struct task_struct *target, 1917 const struct user_regset *regset, 1918 unsigned int pos, unsigned int count, 1919 const void *kbuf, const void __user *ubuf) 1920 { 1921 struct user_fpsimd_state *uregs; 1922 compat_ulong_t fpscr; 1923 int ret, vregs_end_pos; 1924 1925 if (!system_supports_fpsimd()) 1926 return -EINVAL; 1927 1928 uregs = &target->thread.uw.fpsimd_state; 1929 1930 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1931 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 1932 vregs_end_pos); 1933 1934 if (count && !ret) { 1935 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, 1936 vregs_end_pos, VFP_STATE_SIZE); 1937 if (!ret) { 1938 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 1939 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 1940 } 1941 } 1942 1943 fpsimd_flush_task_state(target); 1944 return ret; 1945 } 1946 1947 static int compat_tls_get(struct task_struct *target, 1948 const struct user_regset *regset, 1949 struct membuf to) 1950 { 1951 return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value); 1952 } 1953 1954 static int compat_tls_set(struct task_struct *target, 1955 const struct user_regset *regset, unsigned int pos, 1956 unsigned int count, const void *kbuf, 1957 const void __user *ubuf) 1958 { 1959 int ret; 1960 compat_ulong_t tls = target->thread.uw.tp_value; 1961 1962 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1963 if (ret) 1964 return ret; 1965 1966 target->thread.uw.tp_value = tls; 1967 return ret; 1968 } 1969 1970 static const struct user_regset aarch32_regsets[] = { 1971 [REGSET_COMPAT_GPR] = { 1972 USER_REGSET_NOTE_TYPE(PRSTATUS), 1973 .n = COMPAT_ELF_NGREG, 1974 .size = sizeof(compat_elf_greg_t), 1975 .align = sizeof(compat_elf_greg_t), 1976 .regset_get = compat_gpr_get, 1977 .set = compat_gpr_set 1978 }, 1979 [REGSET_COMPAT_VFP] = { 1980 USER_REGSET_NOTE_TYPE(ARM_VFP), 1981 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1982 .size = sizeof(compat_ulong_t), 1983 .align = sizeof(compat_ulong_t), 1984 .active = fpr_active, 1985 .regset_get = compat_vfp_get, 1986 .set = compat_vfp_set 1987 }, 1988 }; 1989 1990 static const struct user_regset_view user_aarch32_view = { 1991 .name = "aarch32", .e_machine = EM_ARM, 1992 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 1993 }; 1994 1995 static const struct user_regset aarch32_ptrace_regsets[] = { 1996 [REGSET_GPR] = { 1997 USER_REGSET_NOTE_TYPE(PRSTATUS), 1998 .n = COMPAT_ELF_NGREG, 1999 .size = sizeof(compat_elf_greg_t), 2000 .align = sizeof(compat_elf_greg_t), 2001 .regset_get = compat_gpr_get, 2002 .set = compat_gpr_set 2003 }, 2004 [REGSET_FPR] = { 2005 USER_REGSET_NOTE_TYPE(ARM_VFP), 2006 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 2007 .size = sizeof(compat_ulong_t), 2008 .align = sizeof(compat_ulong_t), 2009 .regset_get = compat_vfp_get, 2010 .set = compat_vfp_set 2011 }, 2012 [REGSET_TLS] = { 2013 USER_REGSET_NOTE_TYPE(ARM_TLS), 2014 .n = 1, 2015 .size = sizeof(compat_ulong_t), 2016 .align = sizeof(compat_ulong_t), 2017 .regset_get = compat_tls_get, 2018 .set = compat_tls_set, 2019 }, 2020 #ifdef CONFIG_HAVE_HW_BREAKPOINT 2021 [REGSET_HW_BREAK] = { 2022 USER_REGSET_NOTE_TYPE(ARM_HW_BREAK), 2023 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 2024 .size = sizeof(u32), 2025 .align = sizeof(u32), 2026 .regset_get = hw_break_get, 2027 .set = hw_break_set, 2028 }, 2029 [REGSET_HW_WATCH] = { 2030 USER_REGSET_NOTE_TYPE(ARM_HW_WATCH), 2031 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 2032 .size = sizeof(u32), 2033 .align = sizeof(u32), 2034 .regset_get = hw_break_get, 2035 .set = hw_break_set, 2036 }, 2037 #endif 2038 [REGSET_SYSTEM_CALL] = { 2039 USER_REGSET_NOTE_TYPE(ARM_SYSTEM_CALL), 2040 .n = 1, 2041 .size = sizeof(int), 2042 .align = sizeof(int), 2043 .regset_get = system_call_get, 2044 .set = system_call_set, 2045 }, 2046 }; 2047 2048 static const struct user_regset_view user_aarch32_ptrace_view = { 2049 .name = "aarch32", .e_machine = EM_ARM, 2050 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) 2051 }; 2052 2053 #ifdef CONFIG_COMPAT 2054 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 2055 compat_ulong_t __user *ret) 2056 { 2057 compat_ulong_t tmp; 2058 2059 if (off & 3) 2060 return -EIO; 2061 2062 if (off == COMPAT_PT_TEXT_ADDR) 2063 tmp = tsk->mm->start_code; 2064 else if (off == COMPAT_PT_DATA_ADDR) 2065 tmp = tsk->mm->start_data; 2066 else if (off == COMPAT_PT_TEXT_END_ADDR) 2067 tmp = tsk->mm->end_code; 2068 else if (off < sizeof(compat_elf_gregset_t)) 2069 tmp = compat_get_user_reg(tsk, off >> 2); 2070 else if (off >= COMPAT_USER_SZ) 2071 return -EIO; 2072 else 2073 tmp = 0; 2074 2075 return put_user(tmp, ret); 2076 } 2077 2078 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 2079 compat_ulong_t val) 2080 { 2081 struct pt_regs newregs = *task_pt_regs(tsk); 2082 unsigned int idx = off / 4; 2083 2084 if (off & 3 || off >= COMPAT_USER_SZ) 2085 return -EIO; 2086 2087 if (off >= sizeof(compat_elf_gregset_t)) 2088 return 0; 2089 2090 switch (idx) { 2091 case 15: 2092 newregs.pc = val; 2093 break; 2094 case 16: 2095 newregs.pstate = compat_psr_to_pstate(val); 2096 break; 2097 case 17: 2098 newregs.orig_x0 = val; 2099 break; 2100 default: 2101 newregs.regs[idx] = val; 2102 } 2103 2104 if (!valid_user_regs(&newregs.user_regs, tsk)) 2105 return -EINVAL; 2106 2107 *task_pt_regs(tsk) = newregs; 2108 return 0; 2109 } 2110 2111 #ifdef CONFIG_HAVE_HW_BREAKPOINT 2112 2113 /* 2114 * Convert a virtual register number into an index for a thread_info 2115 * breakpoint array. Breakpoints are identified using positive numbers 2116 * whilst watchpoints are negative. The registers are laid out as pairs 2117 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 2118 * Register 0 is reserved for describing resource information. 2119 */ 2120 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 2121 { 2122 return (abs(num) - 1) >> 1; 2123 } 2124 2125 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 2126 { 2127 u8 num_brps, num_wrps, debug_arch, wp_len; 2128 u32 reg = 0; 2129 2130 num_brps = hw_breakpoint_slots(TYPE_INST); 2131 num_wrps = hw_breakpoint_slots(TYPE_DATA); 2132 2133 debug_arch = debug_monitors_arch(); 2134 wp_len = 8; 2135 reg |= debug_arch; 2136 reg <<= 8; 2137 reg |= wp_len; 2138 reg <<= 8; 2139 reg |= num_wrps; 2140 reg <<= 8; 2141 reg |= num_brps; 2142 2143 *kdata = reg; 2144 return 0; 2145 } 2146 2147 static int compat_ptrace_hbp_get(unsigned int note_type, 2148 struct task_struct *tsk, 2149 compat_long_t num, 2150 u32 *kdata) 2151 { 2152 u64 addr = 0; 2153 u32 ctrl = 0; 2154 2155 int err, idx = compat_ptrace_hbp_num_to_idx(num); 2156 2157 if (num & 1) { 2158 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 2159 *kdata = (u32)addr; 2160 } else { 2161 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 2162 *kdata = ctrl; 2163 } 2164 2165 return err; 2166 } 2167 2168 static int compat_ptrace_hbp_set(unsigned int note_type, 2169 struct task_struct *tsk, 2170 compat_long_t num, 2171 u32 *kdata) 2172 { 2173 u64 addr; 2174 u32 ctrl; 2175 2176 int err, idx = compat_ptrace_hbp_num_to_idx(num); 2177 2178 if (num & 1) { 2179 addr = *kdata; 2180 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 2181 } else { 2182 ctrl = *kdata; 2183 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 2184 } 2185 2186 return err; 2187 } 2188 2189 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 2190 compat_ulong_t __user *data) 2191 { 2192 int ret; 2193 u32 kdata; 2194 2195 /* Watchpoint */ 2196 if (num < 0) { 2197 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 2198 /* Resource info */ 2199 } else if (num == 0) { 2200 ret = compat_ptrace_hbp_get_resource_info(&kdata); 2201 /* Breakpoint */ 2202 } else { 2203 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 2204 } 2205 2206 if (!ret) 2207 ret = put_user(kdata, data); 2208 2209 return ret; 2210 } 2211 2212 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 2213 compat_ulong_t __user *data) 2214 { 2215 int ret; 2216 u32 kdata = 0; 2217 2218 if (num == 0) 2219 return 0; 2220 2221 ret = get_user(kdata, data); 2222 if (ret) 2223 return ret; 2224 2225 if (num < 0) 2226 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 2227 else 2228 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 2229 2230 return ret; 2231 } 2232 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 2233 2234 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 2235 compat_ulong_t caddr, compat_ulong_t cdata) 2236 { 2237 unsigned long addr = caddr; 2238 unsigned long data = cdata; 2239 void __user *datap = compat_ptr(data); 2240 int ret; 2241 2242 switch (request) { 2243 case PTRACE_PEEKUSR: 2244 ret = compat_ptrace_read_user(child, addr, datap); 2245 break; 2246 2247 case PTRACE_POKEUSR: 2248 ret = compat_ptrace_write_user(child, addr, data); 2249 break; 2250 2251 case COMPAT_PTRACE_GETREGS: 2252 ret = copy_regset_to_user(child, 2253 &user_aarch32_view, 2254 REGSET_COMPAT_GPR, 2255 0, sizeof(compat_elf_gregset_t), 2256 datap); 2257 break; 2258 2259 case COMPAT_PTRACE_SETREGS: 2260 ret = copy_regset_from_user(child, 2261 &user_aarch32_view, 2262 REGSET_COMPAT_GPR, 2263 0, sizeof(compat_elf_gregset_t), 2264 datap); 2265 break; 2266 2267 case COMPAT_PTRACE_GET_THREAD_AREA: 2268 ret = put_user((compat_ulong_t)child->thread.uw.tp_value, 2269 (compat_ulong_t __user *)datap); 2270 break; 2271 2272 case COMPAT_PTRACE_SET_SYSCALL: 2273 task_pt_regs(child)->syscallno = data; 2274 ret = 0; 2275 break; 2276 2277 case COMPAT_PTRACE_GETVFPREGS: 2278 ret = copy_regset_to_user(child, 2279 &user_aarch32_view, 2280 REGSET_COMPAT_VFP, 2281 0, VFP_STATE_SIZE, 2282 datap); 2283 break; 2284 2285 case COMPAT_PTRACE_SETVFPREGS: 2286 ret = copy_regset_from_user(child, 2287 &user_aarch32_view, 2288 REGSET_COMPAT_VFP, 2289 0, VFP_STATE_SIZE, 2290 datap); 2291 break; 2292 2293 #ifdef CONFIG_HAVE_HW_BREAKPOINT 2294 case COMPAT_PTRACE_GETHBPREGS: 2295 ret = compat_ptrace_gethbpregs(child, addr, datap); 2296 break; 2297 2298 case COMPAT_PTRACE_SETHBPREGS: 2299 ret = compat_ptrace_sethbpregs(child, addr, datap); 2300 break; 2301 #endif 2302 2303 default: 2304 ret = compat_ptrace_request(child, request, addr, 2305 data); 2306 break; 2307 } 2308 2309 return ret; 2310 } 2311 #endif /* CONFIG_COMPAT */ 2312 2313 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 2314 { 2315 /* 2316 * Core dumping of 32-bit tasks or compat ptrace requests must use the 2317 * user_aarch32_view compatible with arm32. Native ptrace requests on 2318 * 32-bit children use an extended user_aarch32_ptrace_view to allow 2319 * access to the TLS register. 2320 */ 2321 if (is_compat_task()) 2322 return &user_aarch32_view; 2323 else if (is_compat_thread(task_thread_info(task))) 2324 return &user_aarch32_ptrace_view; 2325 2326 return &user_aarch64_view; 2327 } 2328 2329 long arch_ptrace(struct task_struct *child, long request, 2330 unsigned long addr, unsigned long data) 2331 { 2332 switch (request) { 2333 case PTRACE_PEEKMTETAGS: 2334 case PTRACE_POKEMTETAGS: 2335 return mte_ptrace_copy_tags(child, request, addr, data); 2336 } 2337 2338 return ptrace_request(child, request, addr, data); 2339 } 2340 2341 enum ptrace_syscall_dir { 2342 PTRACE_SYSCALL_ENTER = 0, 2343 PTRACE_SYSCALL_EXIT, 2344 }; 2345 2346 static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) 2347 { 2348 int regno; 2349 unsigned long saved_reg; 2350 2351 /* 2352 * We have some ABI weirdness here in the way that we handle syscall 2353 * exit stops because we indicate whether or not the stop has been 2354 * signalled from syscall entry or syscall exit by clobbering a general 2355 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee 2356 * and restoring its old value after the stop. This means that: 2357 * 2358 * - Any writes by the tracer to this register during the stop are 2359 * ignored/discarded. 2360 * 2361 * - The actual value of the register is not available during the stop, 2362 * so the tracer cannot save it and restore it later. 2363 * 2364 * - Syscall stops behave differently to seccomp and pseudo-step traps 2365 * (the latter do not nobble any registers). 2366 */ 2367 regno = (is_compat_task() ? 12 : 7); 2368 saved_reg = regs->regs[regno]; 2369 regs->regs[regno] = dir; 2370 2371 if (dir == PTRACE_SYSCALL_ENTER) { 2372 if (ptrace_report_syscall_entry(regs)) 2373 forget_syscall(regs); 2374 regs->regs[regno] = saved_reg; 2375 } else if (!test_thread_flag(TIF_SINGLESTEP)) { 2376 ptrace_report_syscall_exit(regs, 0); 2377 regs->regs[regno] = saved_reg; 2378 } else { 2379 regs->regs[regno] = saved_reg; 2380 2381 /* 2382 * Signal a pseudo-step exception since we are stepping but 2383 * tracer modifications to the registers may have rewound the 2384 * state machine. 2385 */ 2386 ptrace_report_syscall_exit(regs, 1); 2387 } 2388 } 2389 2390 int syscall_trace_enter(struct pt_regs *regs) 2391 { 2392 unsigned long flags = read_thread_flags(); 2393 2394 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { 2395 report_syscall(regs, PTRACE_SYSCALL_ENTER); 2396 if (flags & _TIF_SYSCALL_EMU) 2397 return NO_SYSCALL; 2398 } 2399 2400 /* Do the secure computing after ptrace; failures should be fast. */ 2401 if (secure_computing() == -1) 2402 return NO_SYSCALL; 2403 2404 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 2405 trace_sys_enter(regs, regs->syscallno); 2406 2407 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1], 2408 regs->regs[2], regs->regs[3]); 2409 2410 return regs->syscallno; 2411 } 2412 2413 void syscall_trace_exit(struct pt_regs *regs) 2414 { 2415 unsigned long flags = read_thread_flags(); 2416 2417 audit_syscall_exit(regs); 2418 2419 if (flags & _TIF_SYSCALL_TRACEPOINT) 2420 trace_sys_exit(regs, syscall_get_return_value(current, regs)); 2421 2422 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)) 2423 report_syscall(regs, PTRACE_SYSCALL_EXIT); 2424 2425 rseq_syscall(regs); 2426 } 2427 2428 /* 2429 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. 2430 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is 2431 * not described in ARM DDI 0487D.a. 2432 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may 2433 * be allocated an EL0 meaning in future. 2434 * Userspace cannot use these until they have an architectural meaning. 2435 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. 2436 * We also reserve IL for the kernel; SS is handled dynamically. 2437 */ 2438 #define SPSR_EL1_AARCH64_RES0_BITS \ 2439 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \ 2440 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5)) 2441 #define SPSR_EL1_AARCH32_RES0_BITS \ 2442 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) 2443 2444 static int valid_compat_regs(struct user_pt_regs *regs) 2445 { 2446 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; 2447 2448 if (!system_supports_mixed_endian_el0()) { 2449 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 2450 regs->pstate |= PSR_AA32_E_BIT; 2451 else 2452 regs->pstate &= ~PSR_AA32_E_BIT; 2453 } 2454 2455 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && 2456 (regs->pstate & PSR_AA32_A_BIT) == 0 && 2457 (regs->pstate & PSR_AA32_I_BIT) == 0 && 2458 (regs->pstate & PSR_AA32_F_BIT) == 0) { 2459 return 1; 2460 } 2461 2462 /* 2463 * Force PSR to a valid 32-bit EL0t, preserving the same bits as 2464 * arch/arm. 2465 */ 2466 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT | 2467 PSR_AA32_C_BIT | PSR_AA32_V_BIT | 2468 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK | 2469 PSR_AA32_GE_MASK | PSR_AA32_E_BIT | 2470 PSR_AA32_T_BIT; 2471 regs->pstate |= PSR_MODE32_BIT; 2472 2473 return 0; 2474 } 2475 2476 static int valid_native_regs(struct user_pt_regs *regs) 2477 { 2478 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; 2479 2480 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && 2481 (regs->pstate & PSR_D_BIT) == 0 && 2482 (regs->pstate & PSR_A_BIT) == 0 && 2483 (regs->pstate & PSR_I_BIT) == 0 && 2484 (regs->pstate & PSR_F_BIT) == 0) { 2485 return 1; 2486 } 2487 2488 /* Force PSR to a valid 64-bit EL0t */ 2489 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; 2490 2491 return 0; 2492 } 2493 2494 /* 2495 * Are the current registers suitable for user mode? (used to maintain 2496 * security in signal handlers) 2497 */ 2498 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) 2499 { 2500 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ 2501 user_regs_reset_single_step(regs, task); 2502 2503 if (is_compat_thread(task_thread_info(task))) 2504 return valid_compat_regs(regs); 2505 else 2506 return valid_native_regs(regs); 2507 } 2508