1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/ptrace.c 4 * 5 * By Ross Biro 1/23/92 6 * edited by Linus Torvalds 7 * ARM modifications Copyright (C) 2000 Russell King 8 * Copyright (C) 2012 ARM Ltd. 9 */ 10 11 #include <linux/audit.h> 12 #include <linux/compat.h> 13 #include <linux/kernel.h> 14 #include <linux/sched/signal.h> 15 #include <linux/sched/task_stack.h> 16 #include <linux/mm.h> 17 #include <linux/nospec.h> 18 #include <linux/smp.h> 19 #include <linux/ptrace.h> 20 #include <linux/user.h> 21 #include <linux/seccomp.h> 22 #include <linux/security.h> 23 #include <linux/init.h> 24 #include <linux/signal.h> 25 #include <linux/string.h> 26 #include <linux/uaccess.h> 27 #include <linux/perf_event.h> 28 #include <linux/hw_breakpoint.h> 29 #include <linux/regset.h> 30 #include <linux/elf.h> 31 #include <linux/rseq.h> 32 33 #include <asm/compat.h> 34 #include <asm/cpufeature.h> 35 #include <asm/debug-monitors.h> 36 #include <asm/fpsimd.h> 37 #include <asm/gcs.h> 38 #include <asm/mte.h> 39 #include <asm/pointer_auth.h> 40 #include <asm/stacktrace.h> 41 #include <asm/syscall.h> 42 #include <asm/traps.h> 43 #include <asm/system_misc.h> 44 45 #define CREATE_TRACE_POINTS 46 #include <trace/events/syscalls.h> 47 48 struct pt_regs_offset { 49 const char *name; 50 int offset; 51 }; 52 53 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 54 #define REG_OFFSET_END {.name = NULL, .offset = 0} 55 #define GPR_OFFSET_NAME(r) \ 56 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} 57 58 static const struct pt_regs_offset regoffset_table[] = { 59 GPR_OFFSET_NAME(0), 60 GPR_OFFSET_NAME(1), 61 GPR_OFFSET_NAME(2), 62 GPR_OFFSET_NAME(3), 63 GPR_OFFSET_NAME(4), 64 GPR_OFFSET_NAME(5), 65 GPR_OFFSET_NAME(6), 66 GPR_OFFSET_NAME(7), 67 GPR_OFFSET_NAME(8), 68 GPR_OFFSET_NAME(9), 69 GPR_OFFSET_NAME(10), 70 GPR_OFFSET_NAME(11), 71 GPR_OFFSET_NAME(12), 72 GPR_OFFSET_NAME(13), 73 GPR_OFFSET_NAME(14), 74 GPR_OFFSET_NAME(15), 75 GPR_OFFSET_NAME(16), 76 GPR_OFFSET_NAME(17), 77 GPR_OFFSET_NAME(18), 78 GPR_OFFSET_NAME(19), 79 GPR_OFFSET_NAME(20), 80 GPR_OFFSET_NAME(21), 81 GPR_OFFSET_NAME(22), 82 GPR_OFFSET_NAME(23), 83 GPR_OFFSET_NAME(24), 84 GPR_OFFSET_NAME(25), 85 GPR_OFFSET_NAME(26), 86 GPR_OFFSET_NAME(27), 87 GPR_OFFSET_NAME(28), 88 GPR_OFFSET_NAME(29), 89 GPR_OFFSET_NAME(30), 90 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, 91 REG_OFFSET_NAME(sp), 92 REG_OFFSET_NAME(pc), 93 REG_OFFSET_NAME(pstate), 94 REG_OFFSET_END, 95 }; 96 97 /** 98 * regs_query_register_offset() - query register offset from its name 99 * @name: the name of a register 100 * 101 * regs_query_register_offset() returns the offset of a register in struct 102 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 103 */ 104 int regs_query_register_offset(const char *name) 105 { 106 const struct pt_regs_offset *roff; 107 108 for (roff = regoffset_table; roff->name != NULL; roff++) 109 if (!strcmp(roff->name, name)) 110 return roff->offset; 111 return -EINVAL; 112 } 113 114 /** 115 * regs_within_kernel_stack() - check the address in the stack 116 * @regs: pt_regs which contains kernel stack pointer. 117 * @addr: address which is checked. 118 * 119 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 120 * If @addr is within the kernel stack, it returns true. If not, returns false. 121 */ 122 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 123 { 124 return ((addr & ~(THREAD_SIZE - 1)) == 125 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || 126 on_irq_stack(addr, sizeof(unsigned long)); 127 } 128 129 /** 130 * regs_get_kernel_stack_nth() - get Nth entry of the stack 131 * @regs: pt_regs which contains kernel stack pointer. 132 * @n: stack entry number. 133 * 134 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 135 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 136 * this returns 0. 137 */ 138 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 139 { 140 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 141 142 addr += n; 143 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 144 return *addr; 145 else 146 return 0; 147 } 148 149 /* 150 * TODO: does not yet catch signals sent when the child dies. 151 * in exit.c or in signal.c. 152 */ 153 154 /* 155 * Called by kernel/ptrace.c when detaching.. 156 */ 157 void ptrace_disable(struct task_struct *child) 158 { 159 /* 160 * This would be better off in core code, but PTRACE_DETACH has 161 * grown its fair share of arch-specific worts and changing it 162 * is likely to cause regressions on obscure architectures. 163 */ 164 user_disable_single_step(child); 165 } 166 167 #ifdef CONFIG_HAVE_HW_BREAKPOINT 168 /* 169 * Handle hitting a HW-breakpoint. 170 */ 171 static void ptrace_hbptriggered(struct perf_event *bp, 172 struct perf_sample_data *data, 173 struct pt_regs *regs) 174 { 175 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 176 const char *desc = "Hardware breakpoint trap (ptrace)"; 177 178 if (is_compat_task()) { 179 int si_errno = 0; 180 int i; 181 182 for (i = 0; i < ARM_MAX_BRP; ++i) { 183 if (current->thread.debug.hbp_break[i] == bp) { 184 si_errno = (i << 1) + 1; 185 break; 186 } 187 } 188 189 for (i = 0; i < ARM_MAX_WRP; ++i) { 190 if (current->thread.debug.hbp_watch[i] == bp) { 191 si_errno = -((i << 1) + 1); 192 break; 193 } 194 } 195 arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger, 196 desc); 197 return; 198 } 199 200 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc); 201 } 202 203 /* 204 * Unregister breakpoints from this task and reset the pointers in 205 * the thread_struct. 206 */ 207 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 208 { 209 int i; 210 struct thread_struct *t = &tsk->thread; 211 212 for (i = 0; i < ARM_MAX_BRP; i++) { 213 if (t->debug.hbp_break[i]) { 214 unregister_hw_breakpoint(t->debug.hbp_break[i]); 215 t->debug.hbp_break[i] = NULL; 216 } 217 } 218 219 for (i = 0; i < ARM_MAX_WRP; i++) { 220 if (t->debug.hbp_watch[i]) { 221 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 222 t->debug.hbp_watch[i] = NULL; 223 } 224 } 225 } 226 227 void ptrace_hw_copy_thread(struct task_struct *tsk) 228 { 229 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 230 } 231 232 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 233 struct task_struct *tsk, 234 unsigned long idx) 235 { 236 struct perf_event *bp = ERR_PTR(-EINVAL); 237 238 switch (note_type) { 239 case NT_ARM_HW_BREAK: 240 if (idx >= ARM_MAX_BRP) 241 goto out; 242 idx = array_index_nospec(idx, ARM_MAX_BRP); 243 bp = tsk->thread.debug.hbp_break[idx]; 244 break; 245 case NT_ARM_HW_WATCH: 246 if (idx >= ARM_MAX_WRP) 247 goto out; 248 idx = array_index_nospec(idx, ARM_MAX_WRP); 249 bp = tsk->thread.debug.hbp_watch[idx]; 250 break; 251 } 252 253 out: 254 return bp; 255 } 256 257 static int ptrace_hbp_set_event(unsigned int note_type, 258 struct task_struct *tsk, 259 unsigned long idx, 260 struct perf_event *bp) 261 { 262 int err = -EINVAL; 263 264 switch (note_type) { 265 case NT_ARM_HW_BREAK: 266 if (idx >= ARM_MAX_BRP) 267 goto out; 268 idx = array_index_nospec(idx, ARM_MAX_BRP); 269 tsk->thread.debug.hbp_break[idx] = bp; 270 err = 0; 271 break; 272 case NT_ARM_HW_WATCH: 273 if (idx >= ARM_MAX_WRP) 274 goto out; 275 idx = array_index_nospec(idx, ARM_MAX_WRP); 276 tsk->thread.debug.hbp_watch[idx] = bp; 277 err = 0; 278 break; 279 } 280 281 out: 282 return err; 283 } 284 285 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 286 struct task_struct *tsk, 287 unsigned long idx) 288 { 289 struct perf_event *bp; 290 struct perf_event_attr attr; 291 int err, type; 292 293 switch (note_type) { 294 case NT_ARM_HW_BREAK: 295 type = HW_BREAKPOINT_X; 296 break; 297 case NT_ARM_HW_WATCH: 298 type = HW_BREAKPOINT_RW; 299 break; 300 default: 301 return ERR_PTR(-EINVAL); 302 } 303 304 ptrace_breakpoint_init(&attr); 305 306 /* 307 * Initialise fields to sane defaults 308 * (i.e. values that will pass validation). 309 */ 310 attr.bp_addr = 0; 311 attr.bp_len = HW_BREAKPOINT_LEN_4; 312 attr.bp_type = type; 313 attr.disabled = 1; 314 315 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 316 if (IS_ERR(bp)) 317 return bp; 318 319 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 320 if (err) 321 return ERR_PTR(err); 322 323 return bp; 324 } 325 326 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 327 struct arch_hw_breakpoint_ctrl ctrl, 328 struct perf_event_attr *attr) 329 { 330 int err, len, type, offset, disabled = !ctrl.enabled; 331 332 attr->disabled = disabled; 333 if (disabled) 334 return 0; 335 336 err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 337 if (err) 338 return err; 339 340 switch (note_type) { 341 case NT_ARM_HW_BREAK: 342 if ((type & HW_BREAKPOINT_X) != type) 343 return -EINVAL; 344 break; 345 case NT_ARM_HW_WATCH: 346 if ((type & HW_BREAKPOINT_RW) != type) 347 return -EINVAL; 348 break; 349 default: 350 return -EINVAL; 351 } 352 353 attr->bp_len = len; 354 attr->bp_type = type; 355 attr->bp_addr += offset; 356 357 return 0; 358 } 359 360 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 361 { 362 u8 num; 363 u32 reg = 0; 364 365 switch (note_type) { 366 case NT_ARM_HW_BREAK: 367 num = hw_breakpoint_slots(TYPE_INST); 368 break; 369 case NT_ARM_HW_WATCH: 370 num = hw_breakpoint_slots(TYPE_DATA); 371 break; 372 default: 373 return -EINVAL; 374 } 375 376 reg |= debug_monitors_arch(); 377 reg <<= 8; 378 reg |= num; 379 380 *info = reg; 381 return 0; 382 } 383 384 static int ptrace_hbp_get_ctrl(unsigned int note_type, 385 struct task_struct *tsk, 386 unsigned long idx, 387 u32 *ctrl) 388 { 389 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 390 391 if (IS_ERR(bp)) 392 return PTR_ERR(bp); 393 394 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 395 return 0; 396 } 397 398 static int ptrace_hbp_get_addr(unsigned int note_type, 399 struct task_struct *tsk, 400 unsigned long idx, 401 u64 *addr) 402 { 403 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 404 405 if (IS_ERR(bp)) 406 return PTR_ERR(bp); 407 408 *addr = bp ? counter_arch_bp(bp)->address : 0; 409 return 0; 410 } 411 412 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 413 struct task_struct *tsk, 414 unsigned long idx) 415 { 416 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 417 418 if (!bp) 419 bp = ptrace_hbp_create(note_type, tsk, idx); 420 421 return bp; 422 } 423 424 static int ptrace_hbp_set_ctrl(unsigned int note_type, 425 struct task_struct *tsk, 426 unsigned long idx, 427 u32 uctrl) 428 { 429 int err; 430 struct perf_event *bp; 431 struct perf_event_attr attr; 432 struct arch_hw_breakpoint_ctrl ctrl; 433 434 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 435 if (IS_ERR(bp)) { 436 err = PTR_ERR(bp); 437 return err; 438 } 439 440 attr = bp->attr; 441 decode_ctrl_reg(uctrl, &ctrl); 442 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 443 if (err) 444 return err; 445 446 return modify_user_hw_breakpoint(bp, &attr); 447 } 448 449 static int ptrace_hbp_set_addr(unsigned int note_type, 450 struct task_struct *tsk, 451 unsigned long idx, 452 u64 addr) 453 { 454 int err; 455 struct perf_event *bp; 456 struct perf_event_attr attr; 457 458 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 459 if (IS_ERR(bp)) { 460 err = PTR_ERR(bp); 461 return err; 462 } 463 464 attr = bp->attr; 465 attr.bp_addr = addr; 466 err = modify_user_hw_breakpoint(bp, &attr); 467 return err; 468 } 469 470 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 471 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 472 #define PTRACE_HBP_PAD_SZ sizeof(u32) 473 474 static int hw_break_get(struct task_struct *target, 475 const struct user_regset *regset, 476 struct membuf to) 477 { 478 unsigned int note_type = regset->core_note_type; 479 int ret, idx = 0; 480 u32 info, ctrl; 481 u64 addr; 482 483 /* Resource info */ 484 ret = ptrace_hbp_get_resource_info(note_type, &info); 485 if (ret) 486 return ret; 487 488 membuf_write(&to, &info, sizeof(info)); 489 membuf_zero(&to, sizeof(u32)); 490 /* (address, ctrl) registers */ 491 while (to.left) { 492 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 493 if (ret) 494 return ret; 495 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 496 if (ret) 497 return ret; 498 membuf_store(&to, addr); 499 membuf_store(&to, ctrl); 500 membuf_zero(&to, sizeof(u32)); 501 idx++; 502 } 503 return 0; 504 } 505 506 static int hw_break_set(struct task_struct *target, 507 const struct user_regset *regset, 508 unsigned int pos, unsigned int count, 509 const void *kbuf, const void __user *ubuf) 510 { 511 unsigned int note_type = regset->core_note_type; 512 int ret, idx = 0, offset, limit; 513 u32 ctrl; 514 u64 addr; 515 516 /* Resource info and pad */ 517 offset = offsetof(struct user_hwdebug_state, dbg_regs); 518 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 519 520 /* (address, ctrl) registers */ 521 limit = regset->n * regset->size; 522 while (count && offset < limit) { 523 if (count < PTRACE_HBP_ADDR_SZ) 524 return -EINVAL; 525 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 526 offset, offset + PTRACE_HBP_ADDR_SZ); 527 if (ret) 528 return ret; 529 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 530 if (ret) 531 return ret; 532 offset += PTRACE_HBP_ADDR_SZ; 533 534 if (!count) 535 break; 536 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 537 offset, offset + PTRACE_HBP_CTRL_SZ); 538 if (ret) 539 return ret; 540 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 541 if (ret) 542 return ret; 543 offset += PTRACE_HBP_CTRL_SZ; 544 545 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 546 offset, offset + PTRACE_HBP_PAD_SZ); 547 offset += PTRACE_HBP_PAD_SZ; 548 idx++; 549 } 550 551 return 0; 552 } 553 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 554 555 static int gpr_get(struct task_struct *target, 556 const struct user_regset *regset, 557 struct membuf to) 558 { 559 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 560 return membuf_write(&to, uregs, sizeof(*uregs)); 561 } 562 563 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 564 unsigned int pos, unsigned int count, 565 const void *kbuf, const void __user *ubuf) 566 { 567 int ret; 568 struct user_pt_regs newregs = task_pt_regs(target)->user_regs; 569 570 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 571 if (ret) 572 return ret; 573 574 if (!valid_user_regs(&newregs, target)) 575 return -EINVAL; 576 577 task_pt_regs(target)->user_regs = newregs; 578 return 0; 579 } 580 581 static int fpr_active(struct task_struct *target, const struct user_regset *regset) 582 { 583 if (!system_supports_fpsimd()) 584 return -ENODEV; 585 return regset->n; 586 } 587 588 /* 589 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 590 */ 591 static int __fpr_get(struct task_struct *target, 592 const struct user_regset *regset, 593 struct membuf to) 594 { 595 struct user_fpsimd_state *uregs; 596 597 sve_sync_to_fpsimd(target); 598 599 uregs = &target->thread.uw.fpsimd_state; 600 601 return membuf_write(&to, uregs, sizeof(*uregs)); 602 } 603 604 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 605 struct membuf to) 606 { 607 if (!system_supports_fpsimd()) 608 return -EINVAL; 609 610 if (target == current) 611 fpsimd_preserve_current_state(); 612 613 return __fpr_get(target, regset, to); 614 } 615 616 static int __fpr_set(struct task_struct *target, 617 const struct user_regset *regset, 618 unsigned int pos, unsigned int count, 619 const void *kbuf, const void __user *ubuf, 620 unsigned int start_pos) 621 { 622 int ret; 623 struct user_fpsimd_state newstate; 624 625 /* 626 * Ensure target->thread.uw.fpsimd_state is up to date, so that a 627 * short copyin can't resurrect stale data. 628 */ 629 sve_sync_to_fpsimd(target); 630 631 newstate = target->thread.uw.fpsimd_state; 632 633 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 634 start_pos, start_pos + sizeof(newstate)); 635 if (ret) 636 return ret; 637 638 target->thread.uw.fpsimd_state = newstate; 639 640 return ret; 641 } 642 643 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 644 unsigned int pos, unsigned int count, 645 const void *kbuf, const void __user *ubuf) 646 { 647 int ret; 648 649 if (!system_supports_fpsimd()) 650 return -EINVAL; 651 652 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); 653 if (ret) 654 return ret; 655 656 sve_sync_from_fpsimd_zeropad(target); 657 fpsimd_flush_task_state(target); 658 659 return ret; 660 } 661 662 static int tls_get(struct task_struct *target, const struct user_regset *regset, 663 struct membuf to) 664 { 665 int ret; 666 667 if (target == current) 668 tls_preserve_current_state(); 669 670 ret = membuf_store(&to, target->thread.uw.tp_value); 671 if (system_supports_tpidr2()) 672 ret = membuf_store(&to, target->thread.tpidr2_el0); 673 else 674 ret = membuf_zero(&to, sizeof(u64)); 675 676 return ret; 677 } 678 679 static int tls_set(struct task_struct *target, const struct user_regset *regset, 680 unsigned int pos, unsigned int count, 681 const void *kbuf, const void __user *ubuf) 682 { 683 int ret; 684 unsigned long tls[2]; 685 686 tls[0] = target->thread.uw.tp_value; 687 if (system_supports_tpidr2()) 688 tls[1] = target->thread.tpidr2_el0; 689 690 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count); 691 if (ret) 692 return ret; 693 694 target->thread.uw.tp_value = tls[0]; 695 if (system_supports_tpidr2()) 696 target->thread.tpidr2_el0 = tls[1]; 697 698 return ret; 699 } 700 701 static int fpmr_get(struct task_struct *target, const struct user_regset *regset, 702 struct membuf to) 703 { 704 if (!system_supports_fpmr()) 705 return -EINVAL; 706 707 if (target == current) 708 fpsimd_preserve_current_state(); 709 710 return membuf_store(&to, target->thread.uw.fpmr); 711 } 712 713 static int fpmr_set(struct task_struct *target, const struct user_regset *regset, 714 unsigned int pos, unsigned int count, 715 const void *kbuf, const void __user *ubuf) 716 { 717 int ret; 718 unsigned long fpmr; 719 720 if (!system_supports_fpmr()) 721 return -EINVAL; 722 723 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count); 724 if (ret) 725 return ret; 726 727 target->thread.uw.fpmr = fpmr; 728 729 fpsimd_flush_task_state(target); 730 731 return 0; 732 } 733 734 static int system_call_get(struct task_struct *target, 735 const struct user_regset *regset, 736 struct membuf to) 737 { 738 return membuf_store(&to, task_pt_regs(target)->syscallno); 739 } 740 741 static int system_call_set(struct task_struct *target, 742 const struct user_regset *regset, 743 unsigned int pos, unsigned int count, 744 const void *kbuf, const void __user *ubuf) 745 { 746 int syscallno = task_pt_regs(target)->syscallno; 747 int ret; 748 749 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 750 if (ret) 751 return ret; 752 753 task_pt_regs(target)->syscallno = syscallno; 754 return ret; 755 } 756 757 #ifdef CONFIG_ARM64_SVE 758 759 static void sve_init_header_from_task(struct user_sve_header *header, 760 struct task_struct *target, 761 enum vec_type type) 762 { 763 unsigned int vq; 764 bool active; 765 enum vec_type task_type; 766 767 memset(header, 0, sizeof(*header)); 768 769 /* Check if the requested registers are active for the task */ 770 if (thread_sm_enabled(&target->thread)) 771 task_type = ARM64_VEC_SME; 772 else 773 task_type = ARM64_VEC_SVE; 774 active = (task_type == type); 775 776 switch (type) { 777 case ARM64_VEC_SVE: 778 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) 779 header->flags |= SVE_PT_VL_INHERIT; 780 break; 781 case ARM64_VEC_SME: 782 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) 783 header->flags |= SVE_PT_VL_INHERIT; 784 break; 785 default: 786 WARN_ON_ONCE(1); 787 return; 788 } 789 790 if (active) { 791 if (target->thread.fp_type == FP_STATE_FPSIMD) { 792 header->flags |= SVE_PT_REGS_FPSIMD; 793 } else { 794 header->flags |= SVE_PT_REGS_SVE; 795 } 796 } 797 798 header->vl = task_get_vl(target, type); 799 vq = sve_vq_from_vl(header->vl); 800 801 header->max_vl = vec_max_vl(type); 802 header->size = SVE_PT_SIZE(vq, header->flags); 803 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), 804 SVE_PT_REGS_SVE); 805 } 806 807 static unsigned int sve_size_from_header(struct user_sve_header const *header) 808 { 809 return ALIGN(header->size, SVE_VQ_BYTES); 810 } 811 812 static int sve_get_common(struct task_struct *target, 813 const struct user_regset *regset, 814 struct membuf to, 815 enum vec_type type) 816 { 817 struct user_sve_header header; 818 unsigned int vq; 819 unsigned long start, end; 820 821 /* Header */ 822 sve_init_header_from_task(&header, target, type); 823 vq = sve_vq_from_vl(header.vl); 824 825 membuf_write(&to, &header, sizeof(header)); 826 827 if (target == current) 828 fpsimd_preserve_current_state(); 829 830 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 831 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 832 833 switch ((header.flags & SVE_PT_REGS_MASK)) { 834 case SVE_PT_REGS_FPSIMD: 835 return __fpr_get(target, regset, to); 836 837 case SVE_PT_REGS_SVE: 838 start = SVE_PT_SVE_OFFSET; 839 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 840 membuf_write(&to, target->thread.sve_state, end - start); 841 842 start = end; 843 end = SVE_PT_SVE_FPSR_OFFSET(vq); 844 membuf_zero(&to, end - start); 845 846 /* 847 * Copy fpsr, and fpcr which must follow contiguously in 848 * struct fpsimd_state: 849 */ 850 start = end; 851 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 852 membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, 853 end - start); 854 855 start = end; 856 end = sve_size_from_header(&header); 857 return membuf_zero(&to, end - start); 858 859 default: 860 return 0; 861 } 862 } 863 864 static int sve_get(struct task_struct *target, 865 const struct user_regset *regset, 866 struct membuf to) 867 { 868 if (!system_supports_sve()) 869 return -EINVAL; 870 871 return sve_get_common(target, regset, to, ARM64_VEC_SVE); 872 } 873 874 static int sve_set_common(struct task_struct *target, 875 const struct user_regset *regset, 876 unsigned int pos, unsigned int count, 877 const void *kbuf, const void __user *ubuf, 878 enum vec_type type) 879 { 880 int ret; 881 struct user_sve_header header; 882 unsigned int vq; 883 unsigned long start, end; 884 885 /* Header */ 886 if (count < sizeof(header)) 887 return -EINVAL; 888 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 889 0, sizeof(header)); 890 if (ret) 891 goto out; 892 893 /* 894 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by 895 * vec_set_vector_length(), which will also validate them for us: 896 */ 897 ret = vec_set_vector_length(target, type, header.vl, 898 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); 899 if (ret) 900 goto out; 901 902 /* 903 * Actual VL set may be different from what the user asked 904 * for, or we may have configured the _ONEXEC VL not the 905 * current VL: 906 */ 907 vq = sve_vq_from_vl(task_get_vl(target, type)); 908 909 /* Enter/exit streaming mode */ 910 if (system_supports_sme()) { 911 u64 old_svcr = target->thread.svcr; 912 913 switch (type) { 914 case ARM64_VEC_SVE: 915 target->thread.svcr &= ~SVCR_SM_MASK; 916 break; 917 case ARM64_VEC_SME: 918 target->thread.svcr |= SVCR_SM_MASK; 919 920 /* 921 * Disable traps and ensure there is SME storage but 922 * preserve any currently set values in ZA/ZT. 923 */ 924 sme_alloc(target, false); 925 set_tsk_thread_flag(target, TIF_SME); 926 break; 927 default: 928 WARN_ON_ONCE(1); 929 ret = -EINVAL; 930 goto out; 931 } 932 933 /* 934 * If we switched then invalidate any existing SVE 935 * state and ensure there's storage. 936 */ 937 if (target->thread.svcr != old_svcr) 938 sve_alloc(target, true); 939 } 940 941 /* Registers: FPSIMD-only case */ 942 943 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 944 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) { 945 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 946 SVE_PT_FPSIMD_OFFSET); 947 clear_tsk_thread_flag(target, TIF_SVE); 948 target->thread.fp_type = FP_STATE_FPSIMD; 949 goto out; 950 } 951 952 /* 953 * Otherwise: no registers or full SVE case. For backwards 954 * compatibility reasons we treat empty flags as SVE registers. 955 */ 956 957 /* 958 * If setting a different VL from the requested VL and there is 959 * register data, the data layout will be wrong: don't even 960 * try to set the registers in this case. 961 */ 962 if (count && vq != sve_vq_from_vl(header.vl)) { 963 ret = -EIO; 964 goto out; 965 } 966 967 sve_alloc(target, true); 968 if (!target->thread.sve_state) { 969 ret = -ENOMEM; 970 clear_tsk_thread_flag(target, TIF_SVE); 971 target->thread.fp_type = FP_STATE_FPSIMD; 972 goto out; 973 } 974 975 /* 976 * Ensure target->thread.sve_state is up to date with target's 977 * FPSIMD regs, so that a short copyin leaves trailing 978 * registers unmodified. Only enable SVE if we are 979 * configuring normal SVE, a system with streaming SVE may not 980 * have normal SVE. 981 */ 982 fpsimd_sync_to_sve(target); 983 if (type == ARM64_VEC_SVE) 984 set_tsk_thread_flag(target, TIF_SVE); 985 target->thread.fp_type = FP_STATE_SVE; 986 987 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 988 start = SVE_PT_SVE_OFFSET; 989 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 990 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 991 target->thread.sve_state, 992 start, end); 993 if (ret) 994 goto out; 995 996 start = end; 997 end = SVE_PT_SVE_FPSR_OFFSET(vq); 998 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, start, end); 999 1000 /* 1001 * Copy fpsr, and fpcr which must follow contiguously in 1002 * struct fpsimd_state: 1003 */ 1004 start = end; 1005 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 1006 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1007 &target->thread.uw.fpsimd_state.fpsr, 1008 start, end); 1009 1010 out: 1011 fpsimd_flush_task_state(target); 1012 return ret; 1013 } 1014 1015 static int sve_set(struct task_struct *target, 1016 const struct user_regset *regset, 1017 unsigned int pos, unsigned int count, 1018 const void *kbuf, const void __user *ubuf) 1019 { 1020 if (!system_supports_sve()) 1021 return -EINVAL; 1022 1023 return sve_set_common(target, regset, pos, count, kbuf, ubuf, 1024 ARM64_VEC_SVE); 1025 } 1026 1027 #endif /* CONFIG_ARM64_SVE */ 1028 1029 #ifdef CONFIG_ARM64_SME 1030 1031 static int ssve_get(struct task_struct *target, 1032 const struct user_regset *regset, 1033 struct membuf to) 1034 { 1035 if (!system_supports_sme()) 1036 return -EINVAL; 1037 1038 return sve_get_common(target, regset, to, ARM64_VEC_SME); 1039 } 1040 1041 static int ssve_set(struct task_struct *target, 1042 const struct user_regset *regset, 1043 unsigned int pos, unsigned int count, 1044 const void *kbuf, const void __user *ubuf) 1045 { 1046 if (!system_supports_sme()) 1047 return -EINVAL; 1048 1049 return sve_set_common(target, regset, pos, count, kbuf, ubuf, 1050 ARM64_VEC_SME); 1051 } 1052 1053 static int za_get(struct task_struct *target, 1054 const struct user_regset *regset, 1055 struct membuf to) 1056 { 1057 struct user_za_header header; 1058 unsigned int vq; 1059 unsigned long start, end; 1060 1061 if (!system_supports_sme()) 1062 return -EINVAL; 1063 1064 /* Header */ 1065 memset(&header, 0, sizeof(header)); 1066 1067 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) 1068 header.flags |= ZA_PT_VL_INHERIT; 1069 1070 header.vl = task_get_sme_vl(target); 1071 vq = sve_vq_from_vl(header.vl); 1072 header.max_vl = sme_max_vl(); 1073 header.max_size = ZA_PT_SIZE(vq); 1074 1075 /* If ZA is not active there is only the header */ 1076 if (thread_za_enabled(&target->thread)) 1077 header.size = ZA_PT_SIZE(vq); 1078 else 1079 header.size = ZA_PT_ZA_OFFSET; 1080 1081 membuf_write(&to, &header, sizeof(header)); 1082 1083 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); 1084 end = ZA_PT_ZA_OFFSET; 1085 1086 if (target == current) 1087 fpsimd_preserve_current_state(); 1088 1089 /* Any register data to include? */ 1090 if (thread_za_enabled(&target->thread)) { 1091 start = end; 1092 end = ZA_PT_SIZE(vq); 1093 membuf_write(&to, target->thread.sme_state, end - start); 1094 } 1095 1096 /* Zero any trailing padding */ 1097 start = end; 1098 end = ALIGN(header.size, SVE_VQ_BYTES); 1099 return membuf_zero(&to, end - start); 1100 } 1101 1102 static int za_set(struct task_struct *target, 1103 const struct user_regset *regset, 1104 unsigned int pos, unsigned int count, 1105 const void *kbuf, const void __user *ubuf) 1106 { 1107 int ret; 1108 struct user_za_header header; 1109 unsigned int vq; 1110 unsigned long start, end; 1111 1112 if (!system_supports_sme()) 1113 return -EINVAL; 1114 1115 /* Header */ 1116 if (count < sizeof(header)) 1117 return -EINVAL; 1118 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 1119 0, sizeof(header)); 1120 if (ret) 1121 goto out; 1122 1123 /* 1124 * All current ZA_PT_* flags are consumed by 1125 * vec_set_vector_length(), which will also validate them for 1126 * us: 1127 */ 1128 ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl, 1129 ((unsigned long)header.flags) << 16); 1130 if (ret) 1131 goto out; 1132 1133 /* 1134 * Actual VL set may be different from what the user asked 1135 * for, or we may have configured the _ONEXEC rather than 1136 * current VL: 1137 */ 1138 vq = sve_vq_from_vl(task_get_sme_vl(target)); 1139 1140 /* Ensure there is some SVE storage for streaming mode */ 1141 if (!target->thread.sve_state) { 1142 sve_alloc(target, false); 1143 if (!target->thread.sve_state) { 1144 ret = -ENOMEM; 1145 goto out; 1146 } 1147 } 1148 1149 /* 1150 * Only flush the storage if PSTATE.ZA was not already set, 1151 * otherwise preserve any existing data. 1152 */ 1153 sme_alloc(target, !thread_za_enabled(&target->thread)); 1154 if (!target->thread.sme_state) 1155 return -ENOMEM; 1156 1157 /* If there is no data then disable ZA */ 1158 if (!count) { 1159 target->thread.svcr &= ~SVCR_ZA_MASK; 1160 goto out; 1161 } 1162 1163 /* 1164 * If setting a different VL from the requested VL and there is 1165 * register data, the data layout will be wrong: don't even 1166 * try to set the registers in this case. 1167 */ 1168 if (vq != sve_vq_from_vl(header.vl)) { 1169 ret = -EIO; 1170 goto out; 1171 } 1172 1173 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); 1174 start = ZA_PT_ZA_OFFSET; 1175 end = ZA_PT_SIZE(vq); 1176 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1177 target->thread.sme_state, 1178 start, end); 1179 if (ret) 1180 goto out; 1181 1182 /* Mark ZA as active and let userspace use it */ 1183 set_tsk_thread_flag(target, TIF_SME); 1184 target->thread.svcr |= SVCR_ZA_MASK; 1185 1186 out: 1187 fpsimd_flush_task_state(target); 1188 return ret; 1189 } 1190 1191 static int zt_get(struct task_struct *target, 1192 const struct user_regset *regset, 1193 struct membuf to) 1194 { 1195 if (!system_supports_sme2()) 1196 return -EINVAL; 1197 1198 /* 1199 * If PSTATE.ZA is not set then ZT will be zeroed when it is 1200 * enabled so report the current register value as zero. 1201 */ 1202 if (thread_za_enabled(&target->thread)) 1203 membuf_write(&to, thread_zt_state(&target->thread), 1204 ZT_SIG_REG_BYTES); 1205 else 1206 membuf_zero(&to, ZT_SIG_REG_BYTES); 1207 1208 return 0; 1209 } 1210 1211 static int zt_set(struct task_struct *target, 1212 const struct user_regset *regset, 1213 unsigned int pos, unsigned int count, 1214 const void *kbuf, const void __user *ubuf) 1215 { 1216 int ret; 1217 1218 if (!system_supports_sme2()) 1219 return -EINVAL; 1220 1221 /* Ensure SVE storage in case this is first use of SME */ 1222 sve_alloc(target, false); 1223 if (!target->thread.sve_state) 1224 return -ENOMEM; 1225 1226 if (!thread_za_enabled(&target->thread)) { 1227 sme_alloc(target, true); 1228 if (!target->thread.sme_state) 1229 return -ENOMEM; 1230 } 1231 1232 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1233 thread_zt_state(&target->thread), 1234 0, ZT_SIG_REG_BYTES); 1235 if (ret == 0) { 1236 target->thread.svcr |= SVCR_ZA_MASK; 1237 set_tsk_thread_flag(target, TIF_SME); 1238 } 1239 1240 fpsimd_flush_task_state(target); 1241 1242 return ret; 1243 } 1244 1245 #endif /* CONFIG_ARM64_SME */ 1246 1247 #ifdef CONFIG_ARM64_PTR_AUTH 1248 static int pac_mask_get(struct task_struct *target, 1249 const struct user_regset *regset, 1250 struct membuf to) 1251 { 1252 /* 1253 * The PAC bits can differ across data and instruction pointers 1254 * depending on TCR_EL1.TBID*, which we may make use of in future, so 1255 * we expose separate masks. 1256 */ 1257 unsigned long mask = ptrauth_user_pac_mask(); 1258 struct user_pac_mask uregs = { 1259 .data_mask = mask, 1260 .insn_mask = mask, 1261 }; 1262 1263 if (!system_supports_address_auth()) 1264 return -EINVAL; 1265 1266 return membuf_write(&to, &uregs, sizeof(uregs)); 1267 } 1268 1269 static int pac_enabled_keys_get(struct task_struct *target, 1270 const struct user_regset *regset, 1271 struct membuf to) 1272 { 1273 long enabled_keys = ptrauth_get_enabled_keys(target); 1274 1275 if (IS_ERR_VALUE(enabled_keys)) 1276 return enabled_keys; 1277 1278 return membuf_write(&to, &enabled_keys, sizeof(enabled_keys)); 1279 } 1280 1281 static int pac_enabled_keys_set(struct task_struct *target, 1282 const struct user_regset *regset, 1283 unsigned int pos, unsigned int count, 1284 const void *kbuf, const void __user *ubuf) 1285 { 1286 int ret; 1287 long enabled_keys = ptrauth_get_enabled_keys(target); 1288 1289 if (IS_ERR_VALUE(enabled_keys)) 1290 return enabled_keys; 1291 1292 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0, 1293 sizeof(long)); 1294 if (ret) 1295 return ret; 1296 1297 return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK, 1298 enabled_keys); 1299 } 1300 1301 #ifdef CONFIG_CHECKPOINT_RESTORE 1302 static __uint128_t pac_key_to_user(const struct ptrauth_key *key) 1303 { 1304 return (__uint128_t)key->hi << 64 | key->lo; 1305 } 1306 1307 static struct ptrauth_key pac_key_from_user(__uint128_t ukey) 1308 { 1309 struct ptrauth_key key = { 1310 .lo = (unsigned long)ukey, 1311 .hi = (unsigned long)(ukey >> 64), 1312 }; 1313 1314 return key; 1315 } 1316 1317 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, 1318 const struct ptrauth_keys_user *keys) 1319 { 1320 ukeys->apiakey = pac_key_to_user(&keys->apia); 1321 ukeys->apibkey = pac_key_to_user(&keys->apib); 1322 ukeys->apdakey = pac_key_to_user(&keys->apda); 1323 ukeys->apdbkey = pac_key_to_user(&keys->apdb); 1324 } 1325 1326 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys, 1327 const struct user_pac_address_keys *ukeys) 1328 { 1329 keys->apia = pac_key_from_user(ukeys->apiakey); 1330 keys->apib = pac_key_from_user(ukeys->apibkey); 1331 keys->apda = pac_key_from_user(ukeys->apdakey); 1332 keys->apdb = pac_key_from_user(ukeys->apdbkey); 1333 } 1334 1335 static int pac_address_keys_get(struct task_struct *target, 1336 const struct user_regset *regset, 1337 struct membuf to) 1338 { 1339 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1340 struct user_pac_address_keys user_keys; 1341 1342 if (!system_supports_address_auth()) 1343 return -EINVAL; 1344 1345 pac_address_keys_to_user(&user_keys, keys); 1346 1347 return membuf_write(&to, &user_keys, sizeof(user_keys)); 1348 } 1349 1350 static int pac_address_keys_set(struct task_struct *target, 1351 const struct user_regset *regset, 1352 unsigned int pos, unsigned int count, 1353 const void *kbuf, const void __user *ubuf) 1354 { 1355 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1356 struct user_pac_address_keys user_keys; 1357 int ret; 1358 1359 if (!system_supports_address_auth()) 1360 return -EINVAL; 1361 1362 pac_address_keys_to_user(&user_keys, keys); 1363 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1364 &user_keys, 0, -1); 1365 if (ret) 1366 return ret; 1367 pac_address_keys_from_user(keys, &user_keys); 1368 1369 return 0; 1370 } 1371 1372 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, 1373 const struct ptrauth_keys_user *keys) 1374 { 1375 ukeys->apgakey = pac_key_to_user(&keys->apga); 1376 } 1377 1378 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys, 1379 const struct user_pac_generic_keys *ukeys) 1380 { 1381 keys->apga = pac_key_from_user(ukeys->apgakey); 1382 } 1383 1384 static int pac_generic_keys_get(struct task_struct *target, 1385 const struct user_regset *regset, 1386 struct membuf to) 1387 { 1388 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1389 struct user_pac_generic_keys user_keys; 1390 1391 if (!system_supports_generic_auth()) 1392 return -EINVAL; 1393 1394 pac_generic_keys_to_user(&user_keys, keys); 1395 1396 return membuf_write(&to, &user_keys, sizeof(user_keys)); 1397 } 1398 1399 static int pac_generic_keys_set(struct task_struct *target, 1400 const struct user_regset *regset, 1401 unsigned int pos, unsigned int count, 1402 const void *kbuf, const void __user *ubuf) 1403 { 1404 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1405 struct user_pac_generic_keys user_keys; 1406 int ret; 1407 1408 if (!system_supports_generic_auth()) 1409 return -EINVAL; 1410 1411 pac_generic_keys_to_user(&user_keys, keys); 1412 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1413 &user_keys, 0, -1); 1414 if (ret) 1415 return ret; 1416 pac_generic_keys_from_user(keys, &user_keys); 1417 1418 return 0; 1419 } 1420 #endif /* CONFIG_CHECKPOINT_RESTORE */ 1421 #endif /* CONFIG_ARM64_PTR_AUTH */ 1422 1423 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1424 static int tagged_addr_ctrl_get(struct task_struct *target, 1425 const struct user_regset *regset, 1426 struct membuf to) 1427 { 1428 long ctrl = get_tagged_addr_ctrl(target); 1429 1430 if (IS_ERR_VALUE(ctrl)) 1431 return ctrl; 1432 1433 return membuf_write(&to, &ctrl, sizeof(ctrl)); 1434 } 1435 1436 static int tagged_addr_ctrl_set(struct task_struct *target, const struct 1437 user_regset *regset, unsigned int pos, 1438 unsigned int count, const void *kbuf, const 1439 void __user *ubuf) 1440 { 1441 int ret; 1442 long ctrl; 1443 1444 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); 1445 if (ret) 1446 return ret; 1447 1448 return set_tagged_addr_ctrl(target, ctrl); 1449 } 1450 #endif 1451 1452 #ifdef CONFIG_ARM64_POE 1453 static int poe_get(struct task_struct *target, 1454 const struct user_regset *regset, 1455 struct membuf to) 1456 { 1457 if (!system_supports_poe()) 1458 return -EINVAL; 1459 1460 return membuf_write(&to, &target->thread.por_el0, 1461 sizeof(target->thread.por_el0)); 1462 } 1463 1464 static int poe_set(struct task_struct *target, const struct 1465 user_regset *regset, unsigned int pos, 1466 unsigned int count, const void *kbuf, const 1467 void __user *ubuf) 1468 { 1469 int ret; 1470 long ctrl; 1471 1472 if (!system_supports_poe()) 1473 return -EINVAL; 1474 1475 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); 1476 if (ret) 1477 return ret; 1478 1479 target->thread.por_el0 = ctrl; 1480 1481 return 0; 1482 } 1483 #endif 1484 1485 #ifdef CONFIG_ARM64_GCS 1486 static int gcs_get(struct task_struct *target, 1487 const struct user_regset *regset, 1488 struct membuf to) 1489 { 1490 struct user_gcs user_gcs; 1491 1492 if (!system_supports_gcs()) 1493 return -EINVAL; 1494 1495 if (target == current) 1496 gcs_preserve_current_state(); 1497 1498 user_gcs.features_enabled = target->thread.gcs_el0_mode; 1499 user_gcs.features_locked = target->thread.gcs_el0_locked; 1500 user_gcs.gcspr_el0 = target->thread.gcspr_el0; 1501 1502 return membuf_write(&to, &user_gcs, sizeof(user_gcs)); 1503 } 1504 1505 static int gcs_set(struct task_struct *target, const struct 1506 user_regset *regset, unsigned int pos, 1507 unsigned int count, const void *kbuf, const 1508 void __user *ubuf) 1509 { 1510 int ret; 1511 struct user_gcs user_gcs; 1512 1513 if (!system_supports_gcs()) 1514 return -EINVAL; 1515 1516 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_gcs, 0, -1); 1517 if (ret) 1518 return ret; 1519 1520 if (user_gcs.features_enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK) 1521 return -EINVAL; 1522 1523 target->thread.gcs_el0_mode = user_gcs.features_enabled; 1524 target->thread.gcs_el0_locked = user_gcs.features_locked; 1525 target->thread.gcspr_el0 = user_gcs.gcspr_el0; 1526 1527 return 0; 1528 } 1529 #endif 1530 1531 enum aarch64_regset { 1532 REGSET_GPR, 1533 REGSET_FPR, 1534 REGSET_TLS, 1535 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1536 REGSET_HW_BREAK, 1537 REGSET_HW_WATCH, 1538 #endif 1539 REGSET_FPMR, 1540 REGSET_SYSTEM_CALL, 1541 #ifdef CONFIG_ARM64_SVE 1542 REGSET_SVE, 1543 #endif 1544 #ifdef CONFIG_ARM64_SME 1545 REGSET_SSVE, 1546 REGSET_ZA, 1547 REGSET_ZT, 1548 #endif 1549 #ifdef CONFIG_ARM64_PTR_AUTH 1550 REGSET_PAC_MASK, 1551 REGSET_PAC_ENABLED_KEYS, 1552 #ifdef CONFIG_CHECKPOINT_RESTORE 1553 REGSET_PACA_KEYS, 1554 REGSET_PACG_KEYS, 1555 #endif 1556 #endif 1557 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1558 REGSET_TAGGED_ADDR_CTRL, 1559 #endif 1560 #ifdef CONFIG_ARM64_POE 1561 REGSET_POE, 1562 #endif 1563 #ifdef CONFIG_ARM64_GCS 1564 REGSET_GCS, 1565 #endif 1566 }; 1567 1568 static const struct user_regset aarch64_regsets[] = { 1569 [REGSET_GPR] = { 1570 .core_note_type = NT_PRSTATUS, 1571 .n = sizeof(struct user_pt_regs) / sizeof(u64), 1572 .size = sizeof(u64), 1573 .align = sizeof(u64), 1574 .regset_get = gpr_get, 1575 .set = gpr_set 1576 }, 1577 [REGSET_FPR] = { 1578 .core_note_type = NT_PRFPREG, 1579 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 1580 /* 1581 * We pretend we have 32-bit registers because the fpsr and 1582 * fpcr are 32-bits wide. 1583 */ 1584 .size = sizeof(u32), 1585 .align = sizeof(u32), 1586 .active = fpr_active, 1587 .regset_get = fpr_get, 1588 .set = fpr_set 1589 }, 1590 [REGSET_TLS] = { 1591 .core_note_type = NT_ARM_TLS, 1592 .n = 2, 1593 .size = sizeof(void *), 1594 .align = sizeof(void *), 1595 .regset_get = tls_get, 1596 .set = tls_set, 1597 }, 1598 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1599 [REGSET_HW_BREAK] = { 1600 .core_note_type = NT_ARM_HW_BREAK, 1601 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1602 .size = sizeof(u32), 1603 .align = sizeof(u32), 1604 .regset_get = hw_break_get, 1605 .set = hw_break_set, 1606 }, 1607 [REGSET_HW_WATCH] = { 1608 .core_note_type = NT_ARM_HW_WATCH, 1609 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1610 .size = sizeof(u32), 1611 .align = sizeof(u32), 1612 .regset_get = hw_break_get, 1613 .set = hw_break_set, 1614 }, 1615 #endif 1616 [REGSET_SYSTEM_CALL] = { 1617 .core_note_type = NT_ARM_SYSTEM_CALL, 1618 .n = 1, 1619 .size = sizeof(int), 1620 .align = sizeof(int), 1621 .regset_get = system_call_get, 1622 .set = system_call_set, 1623 }, 1624 [REGSET_FPMR] = { 1625 .core_note_type = NT_ARM_FPMR, 1626 .n = 1, 1627 .size = sizeof(u64), 1628 .align = sizeof(u64), 1629 .regset_get = fpmr_get, 1630 .set = fpmr_set, 1631 }, 1632 #ifdef CONFIG_ARM64_SVE 1633 [REGSET_SVE] = { /* Scalable Vector Extension */ 1634 .core_note_type = NT_ARM_SVE, 1635 .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX, 1636 SVE_PT_REGS_SVE), 1637 SVE_VQ_BYTES), 1638 .size = SVE_VQ_BYTES, 1639 .align = SVE_VQ_BYTES, 1640 .regset_get = sve_get, 1641 .set = sve_set, 1642 }, 1643 #endif 1644 #ifdef CONFIG_ARM64_SME 1645 [REGSET_SSVE] = { /* Streaming mode SVE */ 1646 .core_note_type = NT_ARM_SSVE, 1647 .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE), 1648 SVE_VQ_BYTES), 1649 .size = SVE_VQ_BYTES, 1650 .align = SVE_VQ_BYTES, 1651 .regset_get = ssve_get, 1652 .set = ssve_set, 1653 }, 1654 [REGSET_ZA] = { /* SME ZA */ 1655 .core_note_type = NT_ARM_ZA, 1656 /* 1657 * ZA is a single register but it's variably sized and 1658 * the ptrace core requires that the size of any data 1659 * be an exact multiple of the configured register 1660 * size so report as though we had SVE_VQ_BYTES 1661 * registers. These values aren't exposed to 1662 * userspace. 1663 */ 1664 .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES), 1665 .size = SVE_VQ_BYTES, 1666 .align = SVE_VQ_BYTES, 1667 .regset_get = za_get, 1668 .set = za_set, 1669 }, 1670 [REGSET_ZT] = { /* SME ZT */ 1671 .core_note_type = NT_ARM_ZT, 1672 .n = 1, 1673 .size = ZT_SIG_REG_BYTES, 1674 .align = sizeof(u64), 1675 .regset_get = zt_get, 1676 .set = zt_set, 1677 }, 1678 #endif 1679 #ifdef CONFIG_ARM64_PTR_AUTH 1680 [REGSET_PAC_MASK] = { 1681 .core_note_type = NT_ARM_PAC_MASK, 1682 .n = sizeof(struct user_pac_mask) / sizeof(u64), 1683 .size = sizeof(u64), 1684 .align = sizeof(u64), 1685 .regset_get = pac_mask_get, 1686 /* this cannot be set dynamically */ 1687 }, 1688 [REGSET_PAC_ENABLED_KEYS] = { 1689 .core_note_type = NT_ARM_PAC_ENABLED_KEYS, 1690 .n = 1, 1691 .size = sizeof(long), 1692 .align = sizeof(long), 1693 .regset_get = pac_enabled_keys_get, 1694 .set = pac_enabled_keys_set, 1695 }, 1696 #ifdef CONFIG_CHECKPOINT_RESTORE 1697 [REGSET_PACA_KEYS] = { 1698 .core_note_type = NT_ARM_PACA_KEYS, 1699 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t), 1700 .size = sizeof(__uint128_t), 1701 .align = sizeof(__uint128_t), 1702 .regset_get = pac_address_keys_get, 1703 .set = pac_address_keys_set, 1704 }, 1705 [REGSET_PACG_KEYS] = { 1706 .core_note_type = NT_ARM_PACG_KEYS, 1707 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t), 1708 .size = sizeof(__uint128_t), 1709 .align = sizeof(__uint128_t), 1710 .regset_get = pac_generic_keys_get, 1711 .set = pac_generic_keys_set, 1712 }, 1713 #endif 1714 #endif 1715 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1716 [REGSET_TAGGED_ADDR_CTRL] = { 1717 .core_note_type = NT_ARM_TAGGED_ADDR_CTRL, 1718 .n = 1, 1719 .size = sizeof(long), 1720 .align = sizeof(long), 1721 .regset_get = tagged_addr_ctrl_get, 1722 .set = tagged_addr_ctrl_set, 1723 }, 1724 #endif 1725 #ifdef CONFIG_ARM64_POE 1726 [REGSET_POE] = { 1727 .core_note_type = NT_ARM_POE, 1728 .n = 1, 1729 .size = sizeof(long), 1730 .align = sizeof(long), 1731 .regset_get = poe_get, 1732 .set = poe_set, 1733 }, 1734 #endif 1735 #ifdef CONFIG_ARM64_GCS 1736 [REGSET_GCS] = { 1737 .core_note_type = NT_ARM_GCS, 1738 .n = sizeof(struct user_gcs) / sizeof(u64), 1739 .size = sizeof(u64), 1740 .align = sizeof(u64), 1741 .regset_get = gcs_get, 1742 .set = gcs_set, 1743 }, 1744 #endif 1745 }; 1746 1747 static const struct user_regset_view user_aarch64_view = { 1748 .name = "aarch64", .e_machine = EM_AARCH64, 1749 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 1750 }; 1751 1752 enum compat_regset { 1753 REGSET_COMPAT_GPR, 1754 REGSET_COMPAT_VFP, 1755 }; 1756 1757 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx) 1758 { 1759 struct pt_regs *regs = task_pt_regs(task); 1760 1761 switch (idx) { 1762 case 15: 1763 return regs->pc; 1764 case 16: 1765 return pstate_to_compat_psr(regs->pstate); 1766 case 17: 1767 return regs->orig_x0; 1768 default: 1769 return regs->regs[idx]; 1770 } 1771 } 1772 1773 static int compat_gpr_get(struct task_struct *target, 1774 const struct user_regset *regset, 1775 struct membuf to) 1776 { 1777 int i = 0; 1778 1779 while (to.left) 1780 membuf_store(&to, compat_get_user_reg(target, i++)); 1781 return 0; 1782 } 1783 1784 static int compat_gpr_set(struct task_struct *target, 1785 const struct user_regset *regset, 1786 unsigned int pos, unsigned int count, 1787 const void *kbuf, const void __user *ubuf) 1788 { 1789 struct pt_regs newregs; 1790 int ret = 0; 1791 unsigned int i, start, num_regs; 1792 1793 /* Calculate the number of AArch32 registers contained in count */ 1794 num_regs = count / regset->size; 1795 1796 /* Convert pos into an register number */ 1797 start = pos / regset->size; 1798 1799 if (start + num_regs > regset->n) 1800 return -EIO; 1801 1802 newregs = *task_pt_regs(target); 1803 1804 for (i = 0; i < num_regs; ++i) { 1805 unsigned int idx = start + i; 1806 compat_ulong_t reg; 1807 1808 if (kbuf) { 1809 memcpy(®, kbuf, sizeof(reg)); 1810 kbuf += sizeof(reg); 1811 } else { 1812 ret = copy_from_user(®, ubuf, sizeof(reg)); 1813 if (ret) { 1814 ret = -EFAULT; 1815 break; 1816 } 1817 1818 ubuf += sizeof(reg); 1819 } 1820 1821 switch (idx) { 1822 case 15: 1823 newregs.pc = reg; 1824 break; 1825 case 16: 1826 reg = compat_psr_to_pstate(reg); 1827 newregs.pstate = reg; 1828 break; 1829 case 17: 1830 newregs.orig_x0 = reg; 1831 break; 1832 default: 1833 newregs.regs[idx] = reg; 1834 } 1835 1836 } 1837 1838 if (valid_user_regs(&newregs.user_regs, target)) 1839 *task_pt_regs(target) = newregs; 1840 else 1841 ret = -EINVAL; 1842 1843 return ret; 1844 } 1845 1846 static int compat_vfp_get(struct task_struct *target, 1847 const struct user_regset *regset, 1848 struct membuf to) 1849 { 1850 struct user_fpsimd_state *uregs; 1851 compat_ulong_t fpscr; 1852 1853 if (!system_supports_fpsimd()) 1854 return -EINVAL; 1855 1856 uregs = &target->thread.uw.fpsimd_state; 1857 1858 if (target == current) 1859 fpsimd_preserve_current_state(); 1860 1861 /* 1862 * The VFP registers are packed into the fpsimd_state, so they all sit 1863 * nicely together for us. We just need to create the fpscr separately. 1864 */ 1865 membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t)); 1866 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 1867 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 1868 return membuf_store(&to, fpscr); 1869 } 1870 1871 static int compat_vfp_set(struct task_struct *target, 1872 const struct user_regset *regset, 1873 unsigned int pos, unsigned int count, 1874 const void *kbuf, const void __user *ubuf) 1875 { 1876 struct user_fpsimd_state *uregs; 1877 compat_ulong_t fpscr; 1878 int ret, vregs_end_pos; 1879 1880 if (!system_supports_fpsimd()) 1881 return -EINVAL; 1882 1883 uregs = &target->thread.uw.fpsimd_state; 1884 1885 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1886 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 1887 vregs_end_pos); 1888 1889 if (count && !ret) { 1890 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, 1891 vregs_end_pos, VFP_STATE_SIZE); 1892 if (!ret) { 1893 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 1894 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 1895 } 1896 } 1897 1898 fpsimd_flush_task_state(target); 1899 return ret; 1900 } 1901 1902 static int compat_tls_get(struct task_struct *target, 1903 const struct user_regset *regset, 1904 struct membuf to) 1905 { 1906 return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value); 1907 } 1908 1909 static int compat_tls_set(struct task_struct *target, 1910 const struct user_regset *regset, unsigned int pos, 1911 unsigned int count, const void *kbuf, 1912 const void __user *ubuf) 1913 { 1914 int ret; 1915 compat_ulong_t tls = target->thread.uw.tp_value; 1916 1917 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1918 if (ret) 1919 return ret; 1920 1921 target->thread.uw.tp_value = tls; 1922 return ret; 1923 } 1924 1925 static const struct user_regset aarch32_regsets[] = { 1926 [REGSET_COMPAT_GPR] = { 1927 .core_note_type = NT_PRSTATUS, 1928 .n = COMPAT_ELF_NGREG, 1929 .size = sizeof(compat_elf_greg_t), 1930 .align = sizeof(compat_elf_greg_t), 1931 .regset_get = compat_gpr_get, 1932 .set = compat_gpr_set 1933 }, 1934 [REGSET_COMPAT_VFP] = { 1935 .core_note_type = NT_ARM_VFP, 1936 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1937 .size = sizeof(compat_ulong_t), 1938 .align = sizeof(compat_ulong_t), 1939 .active = fpr_active, 1940 .regset_get = compat_vfp_get, 1941 .set = compat_vfp_set 1942 }, 1943 }; 1944 1945 static const struct user_regset_view user_aarch32_view = { 1946 .name = "aarch32", .e_machine = EM_ARM, 1947 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 1948 }; 1949 1950 static const struct user_regset aarch32_ptrace_regsets[] = { 1951 [REGSET_GPR] = { 1952 .core_note_type = NT_PRSTATUS, 1953 .n = COMPAT_ELF_NGREG, 1954 .size = sizeof(compat_elf_greg_t), 1955 .align = sizeof(compat_elf_greg_t), 1956 .regset_get = compat_gpr_get, 1957 .set = compat_gpr_set 1958 }, 1959 [REGSET_FPR] = { 1960 .core_note_type = NT_ARM_VFP, 1961 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1962 .size = sizeof(compat_ulong_t), 1963 .align = sizeof(compat_ulong_t), 1964 .regset_get = compat_vfp_get, 1965 .set = compat_vfp_set 1966 }, 1967 [REGSET_TLS] = { 1968 .core_note_type = NT_ARM_TLS, 1969 .n = 1, 1970 .size = sizeof(compat_ulong_t), 1971 .align = sizeof(compat_ulong_t), 1972 .regset_get = compat_tls_get, 1973 .set = compat_tls_set, 1974 }, 1975 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1976 [REGSET_HW_BREAK] = { 1977 .core_note_type = NT_ARM_HW_BREAK, 1978 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1979 .size = sizeof(u32), 1980 .align = sizeof(u32), 1981 .regset_get = hw_break_get, 1982 .set = hw_break_set, 1983 }, 1984 [REGSET_HW_WATCH] = { 1985 .core_note_type = NT_ARM_HW_WATCH, 1986 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1987 .size = sizeof(u32), 1988 .align = sizeof(u32), 1989 .regset_get = hw_break_get, 1990 .set = hw_break_set, 1991 }, 1992 #endif 1993 [REGSET_SYSTEM_CALL] = { 1994 .core_note_type = NT_ARM_SYSTEM_CALL, 1995 .n = 1, 1996 .size = sizeof(int), 1997 .align = sizeof(int), 1998 .regset_get = system_call_get, 1999 .set = system_call_set, 2000 }, 2001 }; 2002 2003 static const struct user_regset_view user_aarch32_ptrace_view = { 2004 .name = "aarch32", .e_machine = EM_ARM, 2005 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) 2006 }; 2007 2008 #ifdef CONFIG_COMPAT 2009 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 2010 compat_ulong_t __user *ret) 2011 { 2012 compat_ulong_t tmp; 2013 2014 if (off & 3) 2015 return -EIO; 2016 2017 if (off == COMPAT_PT_TEXT_ADDR) 2018 tmp = tsk->mm->start_code; 2019 else if (off == COMPAT_PT_DATA_ADDR) 2020 tmp = tsk->mm->start_data; 2021 else if (off == COMPAT_PT_TEXT_END_ADDR) 2022 tmp = tsk->mm->end_code; 2023 else if (off < sizeof(compat_elf_gregset_t)) 2024 tmp = compat_get_user_reg(tsk, off >> 2); 2025 else if (off >= COMPAT_USER_SZ) 2026 return -EIO; 2027 else 2028 tmp = 0; 2029 2030 return put_user(tmp, ret); 2031 } 2032 2033 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 2034 compat_ulong_t val) 2035 { 2036 struct pt_regs newregs = *task_pt_regs(tsk); 2037 unsigned int idx = off / 4; 2038 2039 if (off & 3 || off >= COMPAT_USER_SZ) 2040 return -EIO; 2041 2042 if (off >= sizeof(compat_elf_gregset_t)) 2043 return 0; 2044 2045 switch (idx) { 2046 case 15: 2047 newregs.pc = val; 2048 break; 2049 case 16: 2050 newregs.pstate = compat_psr_to_pstate(val); 2051 break; 2052 case 17: 2053 newregs.orig_x0 = val; 2054 break; 2055 default: 2056 newregs.regs[idx] = val; 2057 } 2058 2059 if (!valid_user_regs(&newregs.user_regs, tsk)) 2060 return -EINVAL; 2061 2062 *task_pt_regs(tsk) = newregs; 2063 return 0; 2064 } 2065 2066 #ifdef CONFIG_HAVE_HW_BREAKPOINT 2067 2068 /* 2069 * Convert a virtual register number into an index for a thread_info 2070 * breakpoint array. Breakpoints are identified using positive numbers 2071 * whilst watchpoints are negative. The registers are laid out as pairs 2072 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 2073 * Register 0 is reserved for describing resource information. 2074 */ 2075 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 2076 { 2077 return (abs(num) - 1) >> 1; 2078 } 2079 2080 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 2081 { 2082 u8 num_brps, num_wrps, debug_arch, wp_len; 2083 u32 reg = 0; 2084 2085 num_brps = hw_breakpoint_slots(TYPE_INST); 2086 num_wrps = hw_breakpoint_slots(TYPE_DATA); 2087 2088 debug_arch = debug_monitors_arch(); 2089 wp_len = 8; 2090 reg |= debug_arch; 2091 reg <<= 8; 2092 reg |= wp_len; 2093 reg <<= 8; 2094 reg |= num_wrps; 2095 reg <<= 8; 2096 reg |= num_brps; 2097 2098 *kdata = reg; 2099 return 0; 2100 } 2101 2102 static int compat_ptrace_hbp_get(unsigned int note_type, 2103 struct task_struct *tsk, 2104 compat_long_t num, 2105 u32 *kdata) 2106 { 2107 u64 addr = 0; 2108 u32 ctrl = 0; 2109 2110 int err, idx = compat_ptrace_hbp_num_to_idx(num); 2111 2112 if (num & 1) { 2113 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 2114 *kdata = (u32)addr; 2115 } else { 2116 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 2117 *kdata = ctrl; 2118 } 2119 2120 return err; 2121 } 2122 2123 static int compat_ptrace_hbp_set(unsigned int note_type, 2124 struct task_struct *tsk, 2125 compat_long_t num, 2126 u32 *kdata) 2127 { 2128 u64 addr; 2129 u32 ctrl; 2130 2131 int err, idx = compat_ptrace_hbp_num_to_idx(num); 2132 2133 if (num & 1) { 2134 addr = *kdata; 2135 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 2136 } else { 2137 ctrl = *kdata; 2138 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 2139 } 2140 2141 return err; 2142 } 2143 2144 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 2145 compat_ulong_t __user *data) 2146 { 2147 int ret; 2148 u32 kdata; 2149 2150 /* Watchpoint */ 2151 if (num < 0) { 2152 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 2153 /* Resource info */ 2154 } else if (num == 0) { 2155 ret = compat_ptrace_hbp_get_resource_info(&kdata); 2156 /* Breakpoint */ 2157 } else { 2158 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 2159 } 2160 2161 if (!ret) 2162 ret = put_user(kdata, data); 2163 2164 return ret; 2165 } 2166 2167 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 2168 compat_ulong_t __user *data) 2169 { 2170 int ret; 2171 u32 kdata = 0; 2172 2173 if (num == 0) 2174 return 0; 2175 2176 ret = get_user(kdata, data); 2177 if (ret) 2178 return ret; 2179 2180 if (num < 0) 2181 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 2182 else 2183 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 2184 2185 return ret; 2186 } 2187 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 2188 2189 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 2190 compat_ulong_t caddr, compat_ulong_t cdata) 2191 { 2192 unsigned long addr = caddr; 2193 unsigned long data = cdata; 2194 void __user *datap = compat_ptr(data); 2195 int ret; 2196 2197 switch (request) { 2198 case PTRACE_PEEKUSR: 2199 ret = compat_ptrace_read_user(child, addr, datap); 2200 break; 2201 2202 case PTRACE_POKEUSR: 2203 ret = compat_ptrace_write_user(child, addr, data); 2204 break; 2205 2206 case COMPAT_PTRACE_GETREGS: 2207 ret = copy_regset_to_user(child, 2208 &user_aarch32_view, 2209 REGSET_COMPAT_GPR, 2210 0, sizeof(compat_elf_gregset_t), 2211 datap); 2212 break; 2213 2214 case COMPAT_PTRACE_SETREGS: 2215 ret = copy_regset_from_user(child, 2216 &user_aarch32_view, 2217 REGSET_COMPAT_GPR, 2218 0, sizeof(compat_elf_gregset_t), 2219 datap); 2220 break; 2221 2222 case COMPAT_PTRACE_GET_THREAD_AREA: 2223 ret = put_user((compat_ulong_t)child->thread.uw.tp_value, 2224 (compat_ulong_t __user *)datap); 2225 break; 2226 2227 case COMPAT_PTRACE_SET_SYSCALL: 2228 task_pt_regs(child)->syscallno = data; 2229 ret = 0; 2230 break; 2231 2232 case COMPAT_PTRACE_GETVFPREGS: 2233 ret = copy_regset_to_user(child, 2234 &user_aarch32_view, 2235 REGSET_COMPAT_VFP, 2236 0, VFP_STATE_SIZE, 2237 datap); 2238 break; 2239 2240 case COMPAT_PTRACE_SETVFPREGS: 2241 ret = copy_regset_from_user(child, 2242 &user_aarch32_view, 2243 REGSET_COMPAT_VFP, 2244 0, VFP_STATE_SIZE, 2245 datap); 2246 break; 2247 2248 #ifdef CONFIG_HAVE_HW_BREAKPOINT 2249 case COMPAT_PTRACE_GETHBPREGS: 2250 ret = compat_ptrace_gethbpregs(child, addr, datap); 2251 break; 2252 2253 case COMPAT_PTRACE_SETHBPREGS: 2254 ret = compat_ptrace_sethbpregs(child, addr, datap); 2255 break; 2256 #endif 2257 2258 default: 2259 ret = compat_ptrace_request(child, request, addr, 2260 data); 2261 break; 2262 } 2263 2264 return ret; 2265 } 2266 #endif /* CONFIG_COMPAT */ 2267 2268 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 2269 { 2270 /* 2271 * Core dumping of 32-bit tasks or compat ptrace requests must use the 2272 * user_aarch32_view compatible with arm32. Native ptrace requests on 2273 * 32-bit children use an extended user_aarch32_ptrace_view to allow 2274 * access to the TLS register. 2275 */ 2276 if (is_compat_task()) 2277 return &user_aarch32_view; 2278 else if (is_compat_thread(task_thread_info(task))) 2279 return &user_aarch32_ptrace_view; 2280 2281 return &user_aarch64_view; 2282 } 2283 2284 long arch_ptrace(struct task_struct *child, long request, 2285 unsigned long addr, unsigned long data) 2286 { 2287 switch (request) { 2288 case PTRACE_PEEKMTETAGS: 2289 case PTRACE_POKEMTETAGS: 2290 return mte_ptrace_copy_tags(child, request, addr, data); 2291 } 2292 2293 return ptrace_request(child, request, addr, data); 2294 } 2295 2296 enum ptrace_syscall_dir { 2297 PTRACE_SYSCALL_ENTER = 0, 2298 PTRACE_SYSCALL_EXIT, 2299 }; 2300 2301 static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) 2302 { 2303 int regno; 2304 unsigned long saved_reg; 2305 2306 /* 2307 * We have some ABI weirdness here in the way that we handle syscall 2308 * exit stops because we indicate whether or not the stop has been 2309 * signalled from syscall entry or syscall exit by clobbering a general 2310 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee 2311 * and restoring its old value after the stop. This means that: 2312 * 2313 * - Any writes by the tracer to this register during the stop are 2314 * ignored/discarded. 2315 * 2316 * - The actual value of the register is not available during the stop, 2317 * so the tracer cannot save it and restore it later. 2318 * 2319 * - Syscall stops behave differently to seccomp and pseudo-step traps 2320 * (the latter do not nobble any registers). 2321 */ 2322 regno = (is_compat_task() ? 12 : 7); 2323 saved_reg = regs->regs[regno]; 2324 regs->regs[regno] = dir; 2325 2326 if (dir == PTRACE_SYSCALL_ENTER) { 2327 if (ptrace_report_syscall_entry(regs)) 2328 forget_syscall(regs); 2329 regs->regs[regno] = saved_reg; 2330 } else if (!test_thread_flag(TIF_SINGLESTEP)) { 2331 ptrace_report_syscall_exit(regs, 0); 2332 regs->regs[regno] = saved_reg; 2333 } else { 2334 regs->regs[regno] = saved_reg; 2335 2336 /* 2337 * Signal a pseudo-step exception since we are stepping but 2338 * tracer modifications to the registers may have rewound the 2339 * state machine. 2340 */ 2341 ptrace_report_syscall_exit(regs, 1); 2342 } 2343 } 2344 2345 int syscall_trace_enter(struct pt_regs *regs) 2346 { 2347 unsigned long flags = read_thread_flags(); 2348 2349 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { 2350 report_syscall(regs, PTRACE_SYSCALL_ENTER); 2351 if (flags & _TIF_SYSCALL_EMU) 2352 return NO_SYSCALL; 2353 } 2354 2355 /* Do the secure computing after ptrace; failures should be fast. */ 2356 if (secure_computing() == -1) 2357 return NO_SYSCALL; 2358 2359 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 2360 trace_sys_enter(regs, regs->syscallno); 2361 2362 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1], 2363 regs->regs[2], regs->regs[3]); 2364 2365 return regs->syscallno; 2366 } 2367 2368 void syscall_trace_exit(struct pt_regs *regs) 2369 { 2370 unsigned long flags = read_thread_flags(); 2371 2372 audit_syscall_exit(regs); 2373 2374 if (flags & _TIF_SYSCALL_TRACEPOINT) 2375 trace_sys_exit(regs, syscall_get_return_value(current, regs)); 2376 2377 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)) 2378 report_syscall(regs, PTRACE_SYSCALL_EXIT); 2379 2380 rseq_syscall(regs); 2381 } 2382 2383 /* 2384 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. 2385 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is 2386 * not described in ARM DDI 0487D.a. 2387 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may 2388 * be allocated an EL0 meaning in future. 2389 * Userspace cannot use these until they have an architectural meaning. 2390 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. 2391 * We also reserve IL for the kernel; SS is handled dynamically. 2392 */ 2393 #define SPSR_EL1_AARCH64_RES0_BITS \ 2394 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \ 2395 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5)) 2396 #define SPSR_EL1_AARCH32_RES0_BITS \ 2397 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) 2398 2399 static int valid_compat_regs(struct user_pt_regs *regs) 2400 { 2401 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; 2402 2403 if (!system_supports_mixed_endian_el0()) { 2404 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 2405 regs->pstate |= PSR_AA32_E_BIT; 2406 else 2407 regs->pstate &= ~PSR_AA32_E_BIT; 2408 } 2409 2410 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && 2411 (regs->pstate & PSR_AA32_A_BIT) == 0 && 2412 (regs->pstate & PSR_AA32_I_BIT) == 0 && 2413 (regs->pstate & PSR_AA32_F_BIT) == 0) { 2414 return 1; 2415 } 2416 2417 /* 2418 * Force PSR to a valid 32-bit EL0t, preserving the same bits as 2419 * arch/arm. 2420 */ 2421 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT | 2422 PSR_AA32_C_BIT | PSR_AA32_V_BIT | 2423 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK | 2424 PSR_AA32_GE_MASK | PSR_AA32_E_BIT | 2425 PSR_AA32_T_BIT; 2426 regs->pstate |= PSR_MODE32_BIT; 2427 2428 return 0; 2429 } 2430 2431 static int valid_native_regs(struct user_pt_regs *regs) 2432 { 2433 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; 2434 2435 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && 2436 (regs->pstate & PSR_D_BIT) == 0 && 2437 (regs->pstate & PSR_A_BIT) == 0 && 2438 (regs->pstate & PSR_I_BIT) == 0 && 2439 (regs->pstate & PSR_F_BIT) == 0) { 2440 return 1; 2441 } 2442 2443 /* Force PSR to a valid 64-bit EL0t */ 2444 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; 2445 2446 return 0; 2447 } 2448 2449 /* 2450 * Are the current registers suitable for user mode? (used to maintain 2451 * security in signal handlers) 2452 */ 2453 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) 2454 { 2455 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ 2456 user_regs_reset_single_step(regs, task); 2457 2458 if (is_compat_thread(task_thread_info(task))) 2459 return valid_compat_regs(regs); 2460 else 2461 return valid_native_regs(regs); 2462 } 2463