1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/ptrace.c 4 * 5 * By Ross Biro 1/23/92 6 * edited by Linus Torvalds 7 * ARM modifications Copyright (C) 2000 Russell King 8 * Copyright (C) 2012 ARM Ltd. 9 */ 10 11 #include <linux/audit.h> 12 #include <linux/compat.h> 13 #include <linux/kernel.h> 14 #include <linux/sched/signal.h> 15 #include <linux/sched/task_stack.h> 16 #include <linux/mm.h> 17 #include <linux/nospec.h> 18 #include <linux/smp.h> 19 #include <linux/ptrace.h> 20 #include <linux/user.h> 21 #include <linux/seccomp.h> 22 #include <linux/security.h> 23 #include <linux/init.h> 24 #include <linux/signal.h> 25 #include <linux/string.h> 26 #include <linux/uaccess.h> 27 #include <linux/perf_event.h> 28 #include <linux/hw_breakpoint.h> 29 #include <linux/regset.h> 30 #include <linux/elf.h> 31 #include <linux/rseq.h> 32 33 #include <asm/compat.h> 34 #include <asm/cpufeature.h> 35 #include <asm/debug-monitors.h> 36 #include <asm/fpsimd.h> 37 #include <asm/gcs.h> 38 #include <asm/mte.h> 39 #include <asm/pointer_auth.h> 40 #include <asm/stacktrace.h> 41 #include <asm/syscall.h> 42 #include <asm/traps.h> 43 #include <asm/system_misc.h> 44 45 #define CREATE_TRACE_POINTS 46 #include <trace/events/syscalls.h> 47 48 struct pt_regs_offset { 49 const char *name; 50 int offset; 51 }; 52 53 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 54 #define REG_OFFSET_END {.name = NULL, .offset = 0} 55 #define GPR_OFFSET_NAME(r) \ 56 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} 57 58 static const struct pt_regs_offset regoffset_table[] = { 59 GPR_OFFSET_NAME(0), 60 GPR_OFFSET_NAME(1), 61 GPR_OFFSET_NAME(2), 62 GPR_OFFSET_NAME(3), 63 GPR_OFFSET_NAME(4), 64 GPR_OFFSET_NAME(5), 65 GPR_OFFSET_NAME(6), 66 GPR_OFFSET_NAME(7), 67 GPR_OFFSET_NAME(8), 68 GPR_OFFSET_NAME(9), 69 GPR_OFFSET_NAME(10), 70 GPR_OFFSET_NAME(11), 71 GPR_OFFSET_NAME(12), 72 GPR_OFFSET_NAME(13), 73 GPR_OFFSET_NAME(14), 74 GPR_OFFSET_NAME(15), 75 GPR_OFFSET_NAME(16), 76 GPR_OFFSET_NAME(17), 77 GPR_OFFSET_NAME(18), 78 GPR_OFFSET_NAME(19), 79 GPR_OFFSET_NAME(20), 80 GPR_OFFSET_NAME(21), 81 GPR_OFFSET_NAME(22), 82 GPR_OFFSET_NAME(23), 83 GPR_OFFSET_NAME(24), 84 GPR_OFFSET_NAME(25), 85 GPR_OFFSET_NAME(26), 86 GPR_OFFSET_NAME(27), 87 GPR_OFFSET_NAME(28), 88 GPR_OFFSET_NAME(29), 89 GPR_OFFSET_NAME(30), 90 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, 91 REG_OFFSET_NAME(sp), 92 REG_OFFSET_NAME(pc), 93 REG_OFFSET_NAME(pstate), 94 REG_OFFSET_END, 95 }; 96 97 /** 98 * regs_query_register_offset() - query register offset from its name 99 * @name: the name of a register 100 * 101 * regs_query_register_offset() returns the offset of a register in struct 102 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 103 */ 104 int regs_query_register_offset(const char *name) 105 { 106 const struct pt_regs_offset *roff; 107 108 for (roff = regoffset_table; roff->name != NULL; roff++) 109 if (!strcmp(roff->name, name)) 110 return roff->offset; 111 return -EINVAL; 112 } 113 114 /** 115 * regs_within_kernel_stack() - check the address in the stack 116 * @regs: pt_regs which contains kernel stack pointer. 117 * @addr: address which is checked. 118 * 119 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 120 * If @addr is within the kernel stack, it returns true. If not, returns false. 121 */ 122 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 123 { 124 return ((addr & ~(THREAD_SIZE - 1)) == 125 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || 126 on_irq_stack(addr, sizeof(unsigned long)); 127 } 128 129 /** 130 * regs_get_kernel_stack_nth() - get Nth entry of the stack 131 * @regs: pt_regs which contains kernel stack pointer. 132 * @n: stack entry number. 133 * 134 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 135 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 136 * this returns 0. 137 */ 138 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 139 { 140 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 141 142 addr += n; 143 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 144 return *addr; 145 else 146 return 0; 147 } 148 149 /* 150 * TODO: does not yet catch signals sent when the child dies. 151 * in exit.c or in signal.c. 152 */ 153 154 /* 155 * Called by kernel/ptrace.c when detaching.. 156 */ 157 void ptrace_disable(struct task_struct *child) 158 { 159 /* 160 * This would be better off in core code, but PTRACE_DETACH has 161 * grown its fair share of arch-specific worts and changing it 162 * is likely to cause regressions on obscure architectures. 163 */ 164 user_disable_single_step(child); 165 } 166 167 #ifdef CONFIG_HAVE_HW_BREAKPOINT 168 /* 169 * Handle hitting a HW-breakpoint. 170 */ 171 static void ptrace_hbptriggered(struct perf_event *bp, 172 struct perf_sample_data *data, 173 struct pt_regs *regs) 174 { 175 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 176 const char *desc = "Hardware breakpoint trap (ptrace)"; 177 178 if (is_compat_task()) { 179 int si_errno = 0; 180 int i; 181 182 for (i = 0; i < ARM_MAX_BRP; ++i) { 183 if (current->thread.debug.hbp_break[i] == bp) { 184 si_errno = (i << 1) + 1; 185 break; 186 } 187 } 188 189 for (i = 0; i < ARM_MAX_WRP; ++i) { 190 if (current->thread.debug.hbp_watch[i] == bp) { 191 si_errno = -((i << 1) + 1); 192 break; 193 } 194 } 195 arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger, 196 desc); 197 return; 198 } 199 200 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc); 201 } 202 203 /* 204 * Unregister breakpoints from this task and reset the pointers in 205 * the thread_struct. 206 */ 207 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 208 { 209 int i; 210 struct thread_struct *t = &tsk->thread; 211 212 for (i = 0; i < ARM_MAX_BRP; i++) { 213 if (t->debug.hbp_break[i]) { 214 unregister_hw_breakpoint(t->debug.hbp_break[i]); 215 t->debug.hbp_break[i] = NULL; 216 } 217 } 218 219 for (i = 0; i < ARM_MAX_WRP; i++) { 220 if (t->debug.hbp_watch[i]) { 221 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 222 t->debug.hbp_watch[i] = NULL; 223 } 224 } 225 } 226 227 void ptrace_hw_copy_thread(struct task_struct *tsk) 228 { 229 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 230 } 231 232 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 233 struct task_struct *tsk, 234 unsigned long idx) 235 { 236 struct perf_event *bp = ERR_PTR(-EINVAL); 237 238 switch (note_type) { 239 case NT_ARM_HW_BREAK: 240 if (idx >= ARM_MAX_BRP) 241 goto out; 242 idx = array_index_nospec(idx, ARM_MAX_BRP); 243 bp = tsk->thread.debug.hbp_break[idx]; 244 break; 245 case NT_ARM_HW_WATCH: 246 if (idx >= ARM_MAX_WRP) 247 goto out; 248 idx = array_index_nospec(idx, ARM_MAX_WRP); 249 bp = tsk->thread.debug.hbp_watch[idx]; 250 break; 251 } 252 253 out: 254 return bp; 255 } 256 257 static int ptrace_hbp_set_event(unsigned int note_type, 258 struct task_struct *tsk, 259 unsigned long idx, 260 struct perf_event *bp) 261 { 262 int err = -EINVAL; 263 264 switch (note_type) { 265 case NT_ARM_HW_BREAK: 266 if (idx >= ARM_MAX_BRP) 267 goto out; 268 idx = array_index_nospec(idx, ARM_MAX_BRP); 269 tsk->thread.debug.hbp_break[idx] = bp; 270 err = 0; 271 break; 272 case NT_ARM_HW_WATCH: 273 if (idx >= ARM_MAX_WRP) 274 goto out; 275 idx = array_index_nospec(idx, ARM_MAX_WRP); 276 tsk->thread.debug.hbp_watch[idx] = bp; 277 err = 0; 278 break; 279 } 280 281 out: 282 return err; 283 } 284 285 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 286 struct task_struct *tsk, 287 unsigned long idx) 288 { 289 struct perf_event *bp; 290 struct perf_event_attr attr; 291 int err, type; 292 293 switch (note_type) { 294 case NT_ARM_HW_BREAK: 295 type = HW_BREAKPOINT_X; 296 break; 297 case NT_ARM_HW_WATCH: 298 type = HW_BREAKPOINT_RW; 299 break; 300 default: 301 return ERR_PTR(-EINVAL); 302 } 303 304 ptrace_breakpoint_init(&attr); 305 306 /* 307 * Initialise fields to sane defaults 308 * (i.e. values that will pass validation). 309 */ 310 attr.bp_addr = 0; 311 attr.bp_len = HW_BREAKPOINT_LEN_4; 312 attr.bp_type = type; 313 attr.disabled = 1; 314 315 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 316 if (IS_ERR(bp)) 317 return bp; 318 319 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 320 if (err) 321 return ERR_PTR(err); 322 323 return bp; 324 } 325 326 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 327 struct arch_hw_breakpoint_ctrl ctrl, 328 struct perf_event_attr *attr) 329 { 330 int err, len, type, offset, disabled = !ctrl.enabled; 331 332 attr->disabled = disabled; 333 if (disabled) 334 return 0; 335 336 err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 337 if (err) 338 return err; 339 340 switch (note_type) { 341 case NT_ARM_HW_BREAK: 342 if ((type & HW_BREAKPOINT_X) != type) 343 return -EINVAL; 344 break; 345 case NT_ARM_HW_WATCH: 346 if ((type & HW_BREAKPOINT_RW) != type) 347 return -EINVAL; 348 break; 349 default: 350 return -EINVAL; 351 } 352 353 attr->bp_len = len; 354 attr->bp_type = type; 355 attr->bp_addr += offset; 356 357 return 0; 358 } 359 360 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 361 { 362 u8 num; 363 u32 reg = 0; 364 365 switch (note_type) { 366 case NT_ARM_HW_BREAK: 367 num = hw_breakpoint_slots(TYPE_INST); 368 break; 369 case NT_ARM_HW_WATCH: 370 num = hw_breakpoint_slots(TYPE_DATA); 371 break; 372 default: 373 return -EINVAL; 374 } 375 376 reg |= debug_monitors_arch(); 377 reg <<= 8; 378 reg |= num; 379 380 *info = reg; 381 return 0; 382 } 383 384 static int ptrace_hbp_get_ctrl(unsigned int note_type, 385 struct task_struct *tsk, 386 unsigned long idx, 387 u32 *ctrl) 388 { 389 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 390 391 if (IS_ERR(bp)) 392 return PTR_ERR(bp); 393 394 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 395 return 0; 396 } 397 398 static int ptrace_hbp_get_addr(unsigned int note_type, 399 struct task_struct *tsk, 400 unsigned long idx, 401 u64 *addr) 402 { 403 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 404 405 if (IS_ERR(bp)) 406 return PTR_ERR(bp); 407 408 *addr = bp ? counter_arch_bp(bp)->address : 0; 409 return 0; 410 } 411 412 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 413 struct task_struct *tsk, 414 unsigned long idx) 415 { 416 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 417 418 if (!bp) 419 bp = ptrace_hbp_create(note_type, tsk, idx); 420 421 return bp; 422 } 423 424 static int ptrace_hbp_set_ctrl(unsigned int note_type, 425 struct task_struct *tsk, 426 unsigned long idx, 427 u32 uctrl) 428 { 429 int err; 430 struct perf_event *bp; 431 struct perf_event_attr attr; 432 struct arch_hw_breakpoint_ctrl ctrl; 433 434 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 435 if (IS_ERR(bp)) { 436 err = PTR_ERR(bp); 437 return err; 438 } 439 440 attr = bp->attr; 441 decode_ctrl_reg(uctrl, &ctrl); 442 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 443 if (err) 444 return err; 445 446 return modify_user_hw_breakpoint(bp, &attr); 447 } 448 449 static int ptrace_hbp_set_addr(unsigned int note_type, 450 struct task_struct *tsk, 451 unsigned long idx, 452 u64 addr) 453 { 454 int err; 455 struct perf_event *bp; 456 struct perf_event_attr attr; 457 458 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 459 if (IS_ERR(bp)) { 460 err = PTR_ERR(bp); 461 return err; 462 } 463 464 attr = bp->attr; 465 attr.bp_addr = addr; 466 err = modify_user_hw_breakpoint(bp, &attr); 467 return err; 468 } 469 470 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 471 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 472 #define PTRACE_HBP_PAD_SZ sizeof(u32) 473 474 static int hw_break_get(struct task_struct *target, 475 const struct user_regset *regset, 476 struct membuf to) 477 { 478 unsigned int note_type = regset->core_note_type; 479 int ret, idx = 0; 480 u32 info, ctrl; 481 u64 addr; 482 483 /* Resource info */ 484 ret = ptrace_hbp_get_resource_info(note_type, &info); 485 if (ret) 486 return ret; 487 488 membuf_write(&to, &info, sizeof(info)); 489 membuf_zero(&to, sizeof(u32)); 490 /* (address, ctrl) registers */ 491 while (to.left) { 492 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 493 if (ret) 494 return ret; 495 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 496 if (ret) 497 return ret; 498 membuf_store(&to, addr); 499 membuf_store(&to, ctrl); 500 membuf_zero(&to, sizeof(u32)); 501 idx++; 502 } 503 return 0; 504 } 505 506 static int hw_break_set(struct task_struct *target, 507 const struct user_regset *regset, 508 unsigned int pos, unsigned int count, 509 const void *kbuf, const void __user *ubuf) 510 { 511 unsigned int note_type = regset->core_note_type; 512 int ret, idx = 0, offset, limit; 513 u32 ctrl; 514 u64 addr; 515 516 /* Resource info and pad */ 517 offset = offsetof(struct user_hwdebug_state, dbg_regs); 518 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 519 520 /* (address, ctrl) registers */ 521 limit = regset->n * regset->size; 522 while (count && offset < limit) { 523 if (count < PTRACE_HBP_ADDR_SZ) 524 return -EINVAL; 525 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 526 offset, offset + PTRACE_HBP_ADDR_SZ); 527 if (ret) 528 return ret; 529 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 530 if (ret) 531 return ret; 532 offset += PTRACE_HBP_ADDR_SZ; 533 534 if (!count) 535 break; 536 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 537 offset, offset + PTRACE_HBP_CTRL_SZ); 538 if (ret) 539 return ret; 540 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 541 if (ret) 542 return ret; 543 offset += PTRACE_HBP_CTRL_SZ; 544 545 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 546 offset, offset + PTRACE_HBP_PAD_SZ); 547 offset += PTRACE_HBP_PAD_SZ; 548 idx++; 549 } 550 551 return 0; 552 } 553 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 554 555 static int gpr_get(struct task_struct *target, 556 const struct user_regset *regset, 557 struct membuf to) 558 { 559 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 560 return membuf_write(&to, uregs, sizeof(*uregs)); 561 } 562 563 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 564 unsigned int pos, unsigned int count, 565 const void *kbuf, const void __user *ubuf) 566 { 567 int ret; 568 struct user_pt_regs newregs = task_pt_regs(target)->user_regs; 569 570 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 571 if (ret) 572 return ret; 573 574 if (!valid_user_regs(&newregs, target)) 575 return -EINVAL; 576 577 task_pt_regs(target)->user_regs = newregs; 578 return 0; 579 } 580 581 static int fpr_active(struct task_struct *target, const struct user_regset *regset) 582 { 583 if (!system_supports_fpsimd()) 584 return -ENODEV; 585 return regset->n; 586 } 587 588 /* 589 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 590 */ 591 static int __fpr_get(struct task_struct *target, 592 const struct user_regset *regset, 593 struct membuf to) 594 { 595 struct user_fpsimd_state *uregs; 596 597 sve_sync_to_fpsimd(target); 598 599 uregs = &target->thread.uw.fpsimd_state; 600 601 return membuf_write(&to, uregs, sizeof(*uregs)); 602 } 603 604 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 605 struct membuf to) 606 { 607 if (!system_supports_fpsimd()) 608 return -EINVAL; 609 610 if (target == current) 611 fpsimd_preserve_current_state(); 612 613 return __fpr_get(target, regset, to); 614 } 615 616 static int __fpr_set(struct task_struct *target, 617 const struct user_regset *regset, 618 unsigned int pos, unsigned int count, 619 const void *kbuf, const void __user *ubuf, 620 unsigned int start_pos) 621 { 622 int ret; 623 struct user_fpsimd_state newstate; 624 625 /* 626 * Ensure target->thread.uw.fpsimd_state is up to date, so that a 627 * short copyin can't resurrect stale data. 628 */ 629 sve_sync_to_fpsimd(target); 630 631 newstate = target->thread.uw.fpsimd_state; 632 633 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 634 start_pos, start_pos + sizeof(newstate)); 635 if (ret) 636 return ret; 637 638 target->thread.uw.fpsimd_state = newstate; 639 640 return ret; 641 } 642 643 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 644 unsigned int pos, unsigned int count, 645 const void *kbuf, const void __user *ubuf) 646 { 647 int ret; 648 649 if (!system_supports_fpsimd()) 650 return -EINVAL; 651 652 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); 653 if (ret) 654 return ret; 655 656 sve_sync_from_fpsimd_zeropad(target); 657 fpsimd_flush_task_state(target); 658 659 return ret; 660 } 661 662 static int tls_get(struct task_struct *target, const struct user_regset *regset, 663 struct membuf to) 664 { 665 int ret; 666 667 if (target == current) 668 tls_preserve_current_state(); 669 670 ret = membuf_store(&to, target->thread.uw.tp_value); 671 if (system_supports_tpidr2()) 672 ret = membuf_store(&to, target->thread.tpidr2_el0); 673 else 674 ret = membuf_zero(&to, sizeof(u64)); 675 676 return ret; 677 } 678 679 static int tls_set(struct task_struct *target, const struct user_regset *regset, 680 unsigned int pos, unsigned int count, 681 const void *kbuf, const void __user *ubuf) 682 { 683 int ret; 684 unsigned long tls[2]; 685 686 tls[0] = target->thread.uw.tp_value; 687 if (system_supports_tpidr2()) 688 tls[1] = target->thread.tpidr2_el0; 689 690 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count); 691 if (ret) 692 return ret; 693 694 target->thread.uw.tp_value = tls[0]; 695 if (system_supports_tpidr2()) 696 target->thread.tpidr2_el0 = tls[1]; 697 698 return ret; 699 } 700 701 static int fpmr_get(struct task_struct *target, const struct user_regset *regset, 702 struct membuf to) 703 { 704 if (!system_supports_fpmr()) 705 return -EINVAL; 706 707 if (target == current) 708 fpsimd_preserve_current_state(); 709 710 return membuf_store(&to, target->thread.uw.fpmr); 711 } 712 713 static int fpmr_set(struct task_struct *target, const struct user_regset *regset, 714 unsigned int pos, unsigned int count, 715 const void *kbuf, const void __user *ubuf) 716 { 717 int ret; 718 unsigned long fpmr; 719 720 if (!system_supports_fpmr()) 721 return -EINVAL; 722 723 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count); 724 if (ret) 725 return ret; 726 727 target->thread.uw.fpmr = fpmr; 728 729 fpsimd_flush_task_state(target); 730 731 return 0; 732 } 733 734 static int system_call_get(struct task_struct *target, 735 const struct user_regset *regset, 736 struct membuf to) 737 { 738 return membuf_store(&to, task_pt_regs(target)->syscallno); 739 } 740 741 static int system_call_set(struct task_struct *target, 742 const struct user_regset *regset, 743 unsigned int pos, unsigned int count, 744 const void *kbuf, const void __user *ubuf) 745 { 746 int syscallno = task_pt_regs(target)->syscallno; 747 int ret; 748 749 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 750 if (ret) 751 return ret; 752 753 task_pt_regs(target)->syscallno = syscallno; 754 return ret; 755 } 756 757 #ifdef CONFIG_ARM64_SVE 758 759 static void sve_init_header_from_task(struct user_sve_header *header, 760 struct task_struct *target, 761 enum vec_type type) 762 { 763 unsigned int vq; 764 bool active; 765 enum vec_type task_type; 766 767 memset(header, 0, sizeof(*header)); 768 769 /* Check if the requested registers are active for the task */ 770 if (thread_sm_enabled(&target->thread)) 771 task_type = ARM64_VEC_SME; 772 else 773 task_type = ARM64_VEC_SVE; 774 active = (task_type == type); 775 776 switch (type) { 777 case ARM64_VEC_SVE: 778 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) 779 header->flags |= SVE_PT_VL_INHERIT; 780 break; 781 case ARM64_VEC_SME: 782 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) 783 header->flags |= SVE_PT_VL_INHERIT; 784 break; 785 default: 786 WARN_ON_ONCE(1); 787 return; 788 } 789 790 if (active) { 791 if (target->thread.fp_type == FP_STATE_FPSIMD) { 792 header->flags |= SVE_PT_REGS_FPSIMD; 793 } else { 794 header->flags |= SVE_PT_REGS_SVE; 795 } 796 } 797 798 header->vl = task_get_vl(target, type); 799 vq = sve_vq_from_vl(header->vl); 800 801 header->max_vl = vec_max_vl(type); 802 header->size = SVE_PT_SIZE(vq, header->flags); 803 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), 804 SVE_PT_REGS_SVE); 805 } 806 807 static unsigned int sve_size_from_header(struct user_sve_header const *header) 808 { 809 return ALIGN(header->size, SVE_VQ_BYTES); 810 } 811 812 static int sve_get_common(struct task_struct *target, 813 const struct user_regset *regset, 814 struct membuf to, 815 enum vec_type type) 816 { 817 struct user_sve_header header; 818 unsigned int vq; 819 unsigned long start, end; 820 821 /* Header */ 822 sve_init_header_from_task(&header, target, type); 823 vq = sve_vq_from_vl(header.vl); 824 825 membuf_write(&to, &header, sizeof(header)); 826 827 if (target == current) 828 fpsimd_preserve_current_state(); 829 830 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 831 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 832 833 switch ((header.flags & SVE_PT_REGS_MASK)) { 834 case SVE_PT_REGS_FPSIMD: 835 return __fpr_get(target, regset, to); 836 837 case SVE_PT_REGS_SVE: 838 start = SVE_PT_SVE_OFFSET; 839 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 840 membuf_write(&to, target->thread.sve_state, end - start); 841 842 start = end; 843 end = SVE_PT_SVE_FPSR_OFFSET(vq); 844 membuf_zero(&to, end - start); 845 846 /* 847 * Copy fpsr, and fpcr which must follow contiguously in 848 * struct fpsimd_state: 849 */ 850 start = end; 851 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 852 membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, 853 end - start); 854 855 start = end; 856 end = sve_size_from_header(&header); 857 return membuf_zero(&to, end - start); 858 859 default: 860 return 0; 861 } 862 } 863 864 static int sve_get(struct task_struct *target, 865 const struct user_regset *regset, 866 struct membuf to) 867 { 868 if (!system_supports_sve()) 869 return -EINVAL; 870 871 return sve_get_common(target, regset, to, ARM64_VEC_SVE); 872 } 873 874 static int sve_set_common(struct task_struct *target, 875 const struct user_regset *regset, 876 unsigned int pos, unsigned int count, 877 const void *kbuf, const void __user *ubuf, 878 enum vec_type type) 879 { 880 int ret; 881 struct user_sve_header header; 882 unsigned int vq; 883 unsigned long start, end; 884 885 /* Header */ 886 if (count < sizeof(header)) 887 return -EINVAL; 888 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 889 0, sizeof(header)); 890 if (ret) 891 goto out; 892 893 /* 894 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by 895 * vec_set_vector_length(), which will also validate them for us: 896 */ 897 ret = vec_set_vector_length(target, type, header.vl, 898 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); 899 if (ret) 900 goto out; 901 902 /* Actual VL set may be less than the user asked for: */ 903 vq = sve_vq_from_vl(task_get_vl(target, type)); 904 905 /* Enter/exit streaming mode */ 906 if (system_supports_sme()) { 907 u64 old_svcr = target->thread.svcr; 908 909 switch (type) { 910 case ARM64_VEC_SVE: 911 target->thread.svcr &= ~SVCR_SM_MASK; 912 break; 913 case ARM64_VEC_SME: 914 target->thread.svcr |= SVCR_SM_MASK; 915 916 /* 917 * Disable traps and ensure there is SME storage but 918 * preserve any currently set values in ZA/ZT. 919 */ 920 sme_alloc(target, false); 921 set_tsk_thread_flag(target, TIF_SME); 922 break; 923 default: 924 WARN_ON_ONCE(1); 925 ret = -EINVAL; 926 goto out; 927 } 928 929 /* 930 * If we switched then invalidate any existing SVE 931 * state and ensure there's storage. 932 */ 933 if (target->thread.svcr != old_svcr) 934 sve_alloc(target, true); 935 } 936 937 /* Registers: FPSIMD-only case */ 938 939 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 940 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) { 941 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 942 SVE_PT_FPSIMD_OFFSET); 943 clear_tsk_thread_flag(target, TIF_SVE); 944 target->thread.fp_type = FP_STATE_FPSIMD; 945 goto out; 946 } 947 948 /* 949 * Otherwise: no registers or full SVE case. For backwards 950 * compatibility reasons we treat empty flags as SVE registers. 951 */ 952 953 /* 954 * If setting a different VL from the requested VL and there is 955 * register data, the data layout will be wrong: don't even 956 * try to set the registers in this case. 957 */ 958 if (count && vq != sve_vq_from_vl(header.vl)) { 959 ret = -EIO; 960 goto out; 961 } 962 963 sve_alloc(target, true); 964 if (!target->thread.sve_state) { 965 ret = -ENOMEM; 966 clear_tsk_thread_flag(target, TIF_SVE); 967 target->thread.fp_type = FP_STATE_FPSIMD; 968 goto out; 969 } 970 971 /* 972 * Ensure target->thread.sve_state is up to date with target's 973 * FPSIMD regs, so that a short copyin leaves trailing 974 * registers unmodified. Only enable SVE if we are 975 * configuring normal SVE, a system with streaming SVE may not 976 * have normal SVE. 977 */ 978 fpsimd_sync_to_sve(target); 979 if (type == ARM64_VEC_SVE) 980 set_tsk_thread_flag(target, TIF_SVE); 981 target->thread.fp_type = FP_STATE_SVE; 982 983 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 984 start = SVE_PT_SVE_OFFSET; 985 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 986 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 987 target->thread.sve_state, 988 start, end); 989 if (ret) 990 goto out; 991 992 start = end; 993 end = SVE_PT_SVE_FPSR_OFFSET(vq); 994 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, start, end); 995 996 /* 997 * Copy fpsr, and fpcr which must follow contiguously in 998 * struct fpsimd_state: 999 */ 1000 start = end; 1001 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 1002 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1003 &target->thread.uw.fpsimd_state.fpsr, 1004 start, end); 1005 1006 out: 1007 fpsimd_flush_task_state(target); 1008 return ret; 1009 } 1010 1011 static int sve_set(struct task_struct *target, 1012 const struct user_regset *regset, 1013 unsigned int pos, unsigned int count, 1014 const void *kbuf, const void __user *ubuf) 1015 { 1016 if (!system_supports_sve()) 1017 return -EINVAL; 1018 1019 return sve_set_common(target, regset, pos, count, kbuf, ubuf, 1020 ARM64_VEC_SVE); 1021 } 1022 1023 #endif /* CONFIG_ARM64_SVE */ 1024 1025 #ifdef CONFIG_ARM64_SME 1026 1027 static int ssve_get(struct task_struct *target, 1028 const struct user_regset *regset, 1029 struct membuf to) 1030 { 1031 if (!system_supports_sme()) 1032 return -EINVAL; 1033 1034 return sve_get_common(target, regset, to, ARM64_VEC_SME); 1035 } 1036 1037 static int ssve_set(struct task_struct *target, 1038 const struct user_regset *regset, 1039 unsigned int pos, unsigned int count, 1040 const void *kbuf, const void __user *ubuf) 1041 { 1042 if (!system_supports_sme()) 1043 return -EINVAL; 1044 1045 return sve_set_common(target, regset, pos, count, kbuf, ubuf, 1046 ARM64_VEC_SME); 1047 } 1048 1049 static int za_get(struct task_struct *target, 1050 const struct user_regset *regset, 1051 struct membuf to) 1052 { 1053 struct user_za_header header; 1054 unsigned int vq; 1055 unsigned long start, end; 1056 1057 if (!system_supports_sme()) 1058 return -EINVAL; 1059 1060 /* Header */ 1061 memset(&header, 0, sizeof(header)); 1062 1063 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) 1064 header.flags |= ZA_PT_VL_INHERIT; 1065 1066 header.vl = task_get_sme_vl(target); 1067 vq = sve_vq_from_vl(header.vl); 1068 header.max_vl = sme_max_vl(); 1069 header.max_size = ZA_PT_SIZE(vq); 1070 1071 /* If ZA is not active there is only the header */ 1072 if (thread_za_enabled(&target->thread)) 1073 header.size = ZA_PT_SIZE(vq); 1074 else 1075 header.size = ZA_PT_ZA_OFFSET; 1076 1077 membuf_write(&to, &header, sizeof(header)); 1078 1079 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); 1080 end = ZA_PT_ZA_OFFSET; 1081 1082 if (target == current) 1083 fpsimd_preserve_current_state(); 1084 1085 /* Any register data to include? */ 1086 if (thread_za_enabled(&target->thread)) { 1087 start = end; 1088 end = ZA_PT_SIZE(vq); 1089 membuf_write(&to, target->thread.sme_state, end - start); 1090 } 1091 1092 /* Zero any trailing padding */ 1093 start = end; 1094 end = ALIGN(header.size, SVE_VQ_BYTES); 1095 return membuf_zero(&to, end - start); 1096 } 1097 1098 static int za_set(struct task_struct *target, 1099 const struct user_regset *regset, 1100 unsigned int pos, unsigned int count, 1101 const void *kbuf, const void __user *ubuf) 1102 { 1103 int ret; 1104 struct user_za_header header; 1105 unsigned int vq; 1106 unsigned long start, end; 1107 1108 if (!system_supports_sme()) 1109 return -EINVAL; 1110 1111 /* Header */ 1112 if (count < sizeof(header)) 1113 return -EINVAL; 1114 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 1115 0, sizeof(header)); 1116 if (ret) 1117 goto out; 1118 1119 /* 1120 * All current ZA_PT_* flags are consumed by 1121 * vec_set_vector_length(), which will also validate them for 1122 * us: 1123 */ 1124 ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl, 1125 ((unsigned long)header.flags) << 16); 1126 if (ret) 1127 goto out; 1128 1129 /* Actual VL set may be less than the user asked for: */ 1130 vq = sve_vq_from_vl(task_get_sme_vl(target)); 1131 1132 /* Ensure there is some SVE storage for streaming mode */ 1133 if (!target->thread.sve_state) { 1134 sve_alloc(target, false); 1135 if (!target->thread.sve_state) { 1136 ret = -ENOMEM; 1137 goto out; 1138 } 1139 } 1140 1141 /* 1142 * Only flush the storage if PSTATE.ZA was not already set, 1143 * otherwise preserve any existing data. 1144 */ 1145 sme_alloc(target, !thread_za_enabled(&target->thread)); 1146 if (!target->thread.sme_state) 1147 return -ENOMEM; 1148 1149 /* If there is no data then disable ZA */ 1150 if (!count) { 1151 target->thread.svcr &= ~SVCR_ZA_MASK; 1152 goto out; 1153 } 1154 1155 /* 1156 * If setting a different VL from the requested VL and there is 1157 * register data, the data layout will be wrong: don't even 1158 * try to set the registers in this case. 1159 */ 1160 if (vq != sve_vq_from_vl(header.vl)) { 1161 ret = -EIO; 1162 goto out; 1163 } 1164 1165 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); 1166 start = ZA_PT_ZA_OFFSET; 1167 end = ZA_PT_SIZE(vq); 1168 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1169 target->thread.sme_state, 1170 start, end); 1171 if (ret) 1172 goto out; 1173 1174 /* Mark ZA as active and let userspace use it */ 1175 set_tsk_thread_flag(target, TIF_SME); 1176 target->thread.svcr |= SVCR_ZA_MASK; 1177 1178 out: 1179 fpsimd_flush_task_state(target); 1180 return ret; 1181 } 1182 1183 static int zt_get(struct task_struct *target, 1184 const struct user_regset *regset, 1185 struct membuf to) 1186 { 1187 if (!system_supports_sme2()) 1188 return -EINVAL; 1189 1190 /* 1191 * If PSTATE.ZA is not set then ZT will be zeroed when it is 1192 * enabled so report the current register value as zero. 1193 */ 1194 if (thread_za_enabled(&target->thread)) 1195 membuf_write(&to, thread_zt_state(&target->thread), 1196 ZT_SIG_REG_BYTES); 1197 else 1198 membuf_zero(&to, ZT_SIG_REG_BYTES); 1199 1200 return 0; 1201 } 1202 1203 static int zt_set(struct task_struct *target, 1204 const struct user_regset *regset, 1205 unsigned int pos, unsigned int count, 1206 const void *kbuf, const void __user *ubuf) 1207 { 1208 int ret; 1209 1210 if (!system_supports_sme2()) 1211 return -EINVAL; 1212 1213 /* Ensure SVE storage in case this is first use of SME */ 1214 sve_alloc(target, false); 1215 if (!target->thread.sve_state) 1216 return -ENOMEM; 1217 1218 if (!thread_za_enabled(&target->thread)) { 1219 sme_alloc(target, true); 1220 if (!target->thread.sme_state) 1221 return -ENOMEM; 1222 } 1223 1224 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1225 thread_zt_state(&target->thread), 1226 0, ZT_SIG_REG_BYTES); 1227 if (ret == 0) { 1228 target->thread.svcr |= SVCR_ZA_MASK; 1229 set_tsk_thread_flag(target, TIF_SME); 1230 } 1231 1232 fpsimd_flush_task_state(target); 1233 1234 return ret; 1235 } 1236 1237 #endif /* CONFIG_ARM64_SME */ 1238 1239 #ifdef CONFIG_ARM64_PTR_AUTH 1240 static int pac_mask_get(struct task_struct *target, 1241 const struct user_regset *regset, 1242 struct membuf to) 1243 { 1244 /* 1245 * The PAC bits can differ across data and instruction pointers 1246 * depending on TCR_EL1.TBID*, which we may make use of in future, so 1247 * we expose separate masks. 1248 */ 1249 unsigned long mask = ptrauth_user_pac_mask(); 1250 struct user_pac_mask uregs = { 1251 .data_mask = mask, 1252 .insn_mask = mask, 1253 }; 1254 1255 if (!system_supports_address_auth()) 1256 return -EINVAL; 1257 1258 return membuf_write(&to, &uregs, sizeof(uregs)); 1259 } 1260 1261 static int pac_enabled_keys_get(struct task_struct *target, 1262 const struct user_regset *regset, 1263 struct membuf to) 1264 { 1265 long enabled_keys = ptrauth_get_enabled_keys(target); 1266 1267 if (IS_ERR_VALUE(enabled_keys)) 1268 return enabled_keys; 1269 1270 return membuf_write(&to, &enabled_keys, sizeof(enabled_keys)); 1271 } 1272 1273 static int pac_enabled_keys_set(struct task_struct *target, 1274 const struct user_regset *regset, 1275 unsigned int pos, unsigned int count, 1276 const void *kbuf, const void __user *ubuf) 1277 { 1278 int ret; 1279 long enabled_keys = ptrauth_get_enabled_keys(target); 1280 1281 if (IS_ERR_VALUE(enabled_keys)) 1282 return enabled_keys; 1283 1284 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0, 1285 sizeof(long)); 1286 if (ret) 1287 return ret; 1288 1289 return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK, 1290 enabled_keys); 1291 } 1292 1293 #ifdef CONFIG_CHECKPOINT_RESTORE 1294 static __uint128_t pac_key_to_user(const struct ptrauth_key *key) 1295 { 1296 return (__uint128_t)key->hi << 64 | key->lo; 1297 } 1298 1299 static struct ptrauth_key pac_key_from_user(__uint128_t ukey) 1300 { 1301 struct ptrauth_key key = { 1302 .lo = (unsigned long)ukey, 1303 .hi = (unsigned long)(ukey >> 64), 1304 }; 1305 1306 return key; 1307 } 1308 1309 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, 1310 const struct ptrauth_keys_user *keys) 1311 { 1312 ukeys->apiakey = pac_key_to_user(&keys->apia); 1313 ukeys->apibkey = pac_key_to_user(&keys->apib); 1314 ukeys->apdakey = pac_key_to_user(&keys->apda); 1315 ukeys->apdbkey = pac_key_to_user(&keys->apdb); 1316 } 1317 1318 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys, 1319 const struct user_pac_address_keys *ukeys) 1320 { 1321 keys->apia = pac_key_from_user(ukeys->apiakey); 1322 keys->apib = pac_key_from_user(ukeys->apibkey); 1323 keys->apda = pac_key_from_user(ukeys->apdakey); 1324 keys->apdb = pac_key_from_user(ukeys->apdbkey); 1325 } 1326 1327 static int pac_address_keys_get(struct task_struct *target, 1328 const struct user_regset *regset, 1329 struct membuf to) 1330 { 1331 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1332 struct user_pac_address_keys user_keys; 1333 1334 if (!system_supports_address_auth()) 1335 return -EINVAL; 1336 1337 pac_address_keys_to_user(&user_keys, keys); 1338 1339 return membuf_write(&to, &user_keys, sizeof(user_keys)); 1340 } 1341 1342 static int pac_address_keys_set(struct task_struct *target, 1343 const struct user_regset *regset, 1344 unsigned int pos, unsigned int count, 1345 const void *kbuf, const void __user *ubuf) 1346 { 1347 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1348 struct user_pac_address_keys user_keys; 1349 int ret; 1350 1351 if (!system_supports_address_auth()) 1352 return -EINVAL; 1353 1354 pac_address_keys_to_user(&user_keys, keys); 1355 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1356 &user_keys, 0, -1); 1357 if (ret) 1358 return ret; 1359 pac_address_keys_from_user(keys, &user_keys); 1360 1361 return 0; 1362 } 1363 1364 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, 1365 const struct ptrauth_keys_user *keys) 1366 { 1367 ukeys->apgakey = pac_key_to_user(&keys->apga); 1368 } 1369 1370 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys, 1371 const struct user_pac_generic_keys *ukeys) 1372 { 1373 keys->apga = pac_key_from_user(ukeys->apgakey); 1374 } 1375 1376 static int pac_generic_keys_get(struct task_struct *target, 1377 const struct user_regset *regset, 1378 struct membuf to) 1379 { 1380 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1381 struct user_pac_generic_keys user_keys; 1382 1383 if (!system_supports_generic_auth()) 1384 return -EINVAL; 1385 1386 pac_generic_keys_to_user(&user_keys, keys); 1387 1388 return membuf_write(&to, &user_keys, sizeof(user_keys)); 1389 } 1390 1391 static int pac_generic_keys_set(struct task_struct *target, 1392 const struct user_regset *regset, 1393 unsigned int pos, unsigned int count, 1394 const void *kbuf, const void __user *ubuf) 1395 { 1396 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1397 struct user_pac_generic_keys user_keys; 1398 int ret; 1399 1400 if (!system_supports_generic_auth()) 1401 return -EINVAL; 1402 1403 pac_generic_keys_to_user(&user_keys, keys); 1404 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1405 &user_keys, 0, -1); 1406 if (ret) 1407 return ret; 1408 pac_generic_keys_from_user(keys, &user_keys); 1409 1410 return 0; 1411 } 1412 #endif /* CONFIG_CHECKPOINT_RESTORE */ 1413 #endif /* CONFIG_ARM64_PTR_AUTH */ 1414 1415 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1416 static int tagged_addr_ctrl_get(struct task_struct *target, 1417 const struct user_regset *regset, 1418 struct membuf to) 1419 { 1420 long ctrl = get_tagged_addr_ctrl(target); 1421 1422 if (IS_ERR_VALUE(ctrl)) 1423 return ctrl; 1424 1425 return membuf_write(&to, &ctrl, sizeof(ctrl)); 1426 } 1427 1428 static int tagged_addr_ctrl_set(struct task_struct *target, const struct 1429 user_regset *regset, unsigned int pos, 1430 unsigned int count, const void *kbuf, const 1431 void __user *ubuf) 1432 { 1433 int ret; 1434 long ctrl; 1435 1436 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); 1437 if (ret) 1438 return ret; 1439 1440 return set_tagged_addr_ctrl(target, ctrl); 1441 } 1442 #endif 1443 1444 #ifdef CONFIG_ARM64_POE 1445 static int poe_get(struct task_struct *target, 1446 const struct user_regset *regset, 1447 struct membuf to) 1448 { 1449 if (!system_supports_poe()) 1450 return -EINVAL; 1451 1452 return membuf_write(&to, &target->thread.por_el0, 1453 sizeof(target->thread.por_el0)); 1454 } 1455 1456 static int poe_set(struct task_struct *target, const struct 1457 user_regset *regset, unsigned int pos, 1458 unsigned int count, const void *kbuf, const 1459 void __user *ubuf) 1460 { 1461 int ret; 1462 long ctrl; 1463 1464 if (!system_supports_poe()) 1465 return -EINVAL; 1466 1467 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); 1468 if (ret) 1469 return ret; 1470 1471 target->thread.por_el0 = ctrl; 1472 1473 return 0; 1474 } 1475 #endif 1476 1477 #ifdef CONFIG_ARM64_GCS 1478 static int gcs_get(struct task_struct *target, 1479 const struct user_regset *regset, 1480 struct membuf to) 1481 { 1482 struct user_gcs user_gcs; 1483 1484 if (!system_supports_gcs()) 1485 return -EINVAL; 1486 1487 if (target == current) 1488 gcs_preserve_current_state(); 1489 1490 user_gcs.features_enabled = target->thread.gcs_el0_mode; 1491 user_gcs.features_locked = target->thread.gcs_el0_locked; 1492 user_gcs.gcspr_el0 = target->thread.gcspr_el0; 1493 1494 return membuf_write(&to, &user_gcs, sizeof(user_gcs)); 1495 } 1496 1497 static int gcs_set(struct task_struct *target, const struct 1498 user_regset *regset, unsigned int pos, 1499 unsigned int count, const void *kbuf, const 1500 void __user *ubuf) 1501 { 1502 int ret; 1503 struct user_gcs user_gcs; 1504 1505 if (!system_supports_gcs()) 1506 return -EINVAL; 1507 1508 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_gcs, 0, -1); 1509 if (ret) 1510 return ret; 1511 1512 if (user_gcs.features_enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK) 1513 return -EINVAL; 1514 1515 target->thread.gcs_el0_mode = user_gcs.features_enabled; 1516 target->thread.gcs_el0_locked = user_gcs.features_locked; 1517 target->thread.gcspr_el0 = user_gcs.gcspr_el0; 1518 1519 return 0; 1520 } 1521 #endif 1522 1523 enum aarch64_regset { 1524 REGSET_GPR, 1525 REGSET_FPR, 1526 REGSET_TLS, 1527 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1528 REGSET_HW_BREAK, 1529 REGSET_HW_WATCH, 1530 #endif 1531 REGSET_FPMR, 1532 REGSET_SYSTEM_CALL, 1533 #ifdef CONFIG_ARM64_SVE 1534 REGSET_SVE, 1535 #endif 1536 #ifdef CONFIG_ARM64_SME 1537 REGSET_SSVE, 1538 REGSET_ZA, 1539 REGSET_ZT, 1540 #endif 1541 #ifdef CONFIG_ARM64_PTR_AUTH 1542 REGSET_PAC_MASK, 1543 REGSET_PAC_ENABLED_KEYS, 1544 #ifdef CONFIG_CHECKPOINT_RESTORE 1545 REGSET_PACA_KEYS, 1546 REGSET_PACG_KEYS, 1547 #endif 1548 #endif 1549 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1550 REGSET_TAGGED_ADDR_CTRL, 1551 #endif 1552 #ifdef CONFIG_ARM64_POE 1553 REGSET_POE, 1554 #endif 1555 #ifdef CONFIG_ARM64_GCS 1556 REGSET_GCS, 1557 #endif 1558 }; 1559 1560 static const struct user_regset aarch64_regsets[] = { 1561 [REGSET_GPR] = { 1562 .core_note_type = NT_PRSTATUS, 1563 .n = sizeof(struct user_pt_regs) / sizeof(u64), 1564 .size = sizeof(u64), 1565 .align = sizeof(u64), 1566 .regset_get = gpr_get, 1567 .set = gpr_set 1568 }, 1569 [REGSET_FPR] = { 1570 .core_note_type = NT_PRFPREG, 1571 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 1572 /* 1573 * We pretend we have 32-bit registers because the fpsr and 1574 * fpcr are 32-bits wide. 1575 */ 1576 .size = sizeof(u32), 1577 .align = sizeof(u32), 1578 .active = fpr_active, 1579 .regset_get = fpr_get, 1580 .set = fpr_set 1581 }, 1582 [REGSET_TLS] = { 1583 .core_note_type = NT_ARM_TLS, 1584 .n = 2, 1585 .size = sizeof(void *), 1586 .align = sizeof(void *), 1587 .regset_get = tls_get, 1588 .set = tls_set, 1589 }, 1590 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1591 [REGSET_HW_BREAK] = { 1592 .core_note_type = NT_ARM_HW_BREAK, 1593 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1594 .size = sizeof(u32), 1595 .align = sizeof(u32), 1596 .regset_get = hw_break_get, 1597 .set = hw_break_set, 1598 }, 1599 [REGSET_HW_WATCH] = { 1600 .core_note_type = NT_ARM_HW_WATCH, 1601 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1602 .size = sizeof(u32), 1603 .align = sizeof(u32), 1604 .regset_get = hw_break_get, 1605 .set = hw_break_set, 1606 }, 1607 #endif 1608 [REGSET_SYSTEM_CALL] = { 1609 .core_note_type = NT_ARM_SYSTEM_CALL, 1610 .n = 1, 1611 .size = sizeof(int), 1612 .align = sizeof(int), 1613 .regset_get = system_call_get, 1614 .set = system_call_set, 1615 }, 1616 [REGSET_FPMR] = { 1617 .core_note_type = NT_ARM_FPMR, 1618 .n = 1, 1619 .size = sizeof(u64), 1620 .align = sizeof(u64), 1621 .regset_get = fpmr_get, 1622 .set = fpmr_set, 1623 }, 1624 #ifdef CONFIG_ARM64_SVE 1625 [REGSET_SVE] = { /* Scalable Vector Extension */ 1626 .core_note_type = NT_ARM_SVE, 1627 .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX, 1628 SVE_PT_REGS_SVE), 1629 SVE_VQ_BYTES), 1630 .size = SVE_VQ_BYTES, 1631 .align = SVE_VQ_BYTES, 1632 .regset_get = sve_get, 1633 .set = sve_set, 1634 }, 1635 #endif 1636 #ifdef CONFIG_ARM64_SME 1637 [REGSET_SSVE] = { /* Streaming mode SVE */ 1638 .core_note_type = NT_ARM_SSVE, 1639 .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE), 1640 SVE_VQ_BYTES), 1641 .size = SVE_VQ_BYTES, 1642 .align = SVE_VQ_BYTES, 1643 .regset_get = ssve_get, 1644 .set = ssve_set, 1645 }, 1646 [REGSET_ZA] = { /* SME ZA */ 1647 .core_note_type = NT_ARM_ZA, 1648 /* 1649 * ZA is a single register but it's variably sized and 1650 * the ptrace core requires that the size of any data 1651 * be an exact multiple of the configured register 1652 * size so report as though we had SVE_VQ_BYTES 1653 * registers. These values aren't exposed to 1654 * userspace. 1655 */ 1656 .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES), 1657 .size = SVE_VQ_BYTES, 1658 .align = SVE_VQ_BYTES, 1659 .regset_get = za_get, 1660 .set = za_set, 1661 }, 1662 [REGSET_ZT] = { /* SME ZT */ 1663 .core_note_type = NT_ARM_ZT, 1664 .n = 1, 1665 .size = ZT_SIG_REG_BYTES, 1666 .align = sizeof(u64), 1667 .regset_get = zt_get, 1668 .set = zt_set, 1669 }, 1670 #endif 1671 #ifdef CONFIG_ARM64_PTR_AUTH 1672 [REGSET_PAC_MASK] = { 1673 .core_note_type = NT_ARM_PAC_MASK, 1674 .n = sizeof(struct user_pac_mask) / sizeof(u64), 1675 .size = sizeof(u64), 1676 .align = sizeof(u64), 1677 .regset_get = pac_mask_get, 1678 /* this cannot be set dynamically */ 1679 }, 1680 [REGSET_PAC_ENABLED_KEYS] = { 1681 .core_note_type = NT_ARM_PAC_ENABLED_KEYS, 1682 .n = 1, 1683 .size = sizeof(long), 1684 .align = sizeof(long), 1685 .regset_get = pac_enabled_keys_get, 1686 .set = pac_enabled_keys_set, 1687 }, 1688 #ifdef CONFIG_CHECKPOINT_RESTORE 1689 [REGSET_PACA_KEYS] = { 1690 .core_note_type = NT_ARM_PACA_KEYS, 1691 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t), 1692 .size = sizeof(__uint128_t), 1693 .align = sizeof(__uint128_t), 1694 .regset_get = pac_address_keys_get, 1695 .set = pac_address_keys_set, 1696 }, 1697 [REGSET_PACG_KEYS] = { 1698 .core_note_type = NT_ARM_PACG_KEYS, 1699 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t), 1700 .size = sizeof(__uint128_t), 1701 .align = sizeof(__uint128_t), 1702 .regset_get = pac_generic_keys_get, 1703 .set = pac_generic_keys_set, 1704 }, 1705 #endif 1706 #endif 1707 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1708 [REGSET_TAGGED_ADDR_CTRL] = { 1709 .core_note_type = NT_ARM_TAGGED_ADDR_CTRL, 1710 .n = 1, 1711 .size = sizeof(long), 1712 .align = sizeof(long), 1713 .regset_get = tagged_addr_ctrl_get, 1714 .set = tagged_addr_ctrl_set, 1715 }, 1716 #endif 1717 #ifdef CONFIG_ARM64_POE 1718 [REGSET_POE] = { 1719 .core_note_type = NT_ARM_POE, 1720 .n = 1, 1721 .size = sizeof(long), 1722 .align = sizeof(long), 1723 .regset_get = poe_get, 1724 .set = poe_set, 1725 }, 1726 #endif 1727 #ifdef CONFIG_ARM64_GCS 1728 [REGSET_GCS] = { 1729 .core_note_type = NT_ARM_GCS, 1730 .n = sizeof(struct user_gcs) / sizeof(u64), 1731 .size = sizeof(u64), 1732 .align = sizeof(u64), 1733 .regset_get = gcs_get, 1734 .set = gcs_set, 1735 }, 1736 #endif 1737 }; 1738 1739 static const struct user_regset_view user_aarch64_view = { 1740 .name = "aarch64", .e_machine = EM_AARCH64, 1741 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 1742 }; 1743 1744 enum compat_regset { 1745 REGSET_COMPAT_GPR, 1746 REGSET_COMPAT_VFP, 1747 }; 1748 1749 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx) 1750 { 1751 struct pt_regs *regs = task_pt_regs(task); 1752 1753 switch (idx) { 1754 case 15: 1755 return regs->pc; 1756 case 16: 1757 return pstate_to_compat_psr(regs->pstate); 1758 case 17: 1759 return regs->orig_x0; 1760 default: 1761 return regs->regs[idx]; 1762 } 1763 } 1764 1765 static int compat_gpr_get(struct task_struct *target, 1766 const struct user_regset *regset, 1767 struct membuf to) 1768 { 1769 int i = 0; 1770 1771 while (to.left) 1772 membuf_store(&to, compat_get_user_reg(target, i++)); 1773 return 0; 1774 } 1775 1776 static int compat_gpr_set(struct task_struct *target, 1777 const struct user_regset *regset, 1778 unsigned int pos, unsigned int count, 1779 const void *kbuf, const void __user *ubuf) 1780 { 1781 struct pt_regs newregs; 1782 int ret = 0; 1783 unsigned int i, start, num_regs; 1784 1785 /* Calculate the number of AArch32 registers contained in count */ 1786 num_regs = count / regset->size; 1787 1788 /* Convert pos into an register number */ 1789 start = pos / regset->size; 1790 1791 if (start + num_regs > regset->n) 1792 return -EIO; 1793 1794 newregs = *task_pt_regs(target); 1795 1796 for (i = 0; i < num_regs; ++i) { 1797 unsigned int idx = start + i; 1798 compat_ulong_t reg; 1799 1800 if (kbuf) { 1801 memcpy(®, kbuf, sizeof(reg)); 1802 kbuf += sizeof(reg); 1803 } else { 1804 ret = copy_from_user(®, ubuf, sizeof(reg)); 1805 if (ret) { 1806 ret = -EFAULT; 1807 break; 1808 } 1809 1810 ubuf += sizeof(reg); 1811 } 1812 1813 switch (idx) { 1814 case 15: 1815 newregs.pc = reg; 1816 break; 1817 case 16: 1818 reg = compat_psr_to_pstate(reg); 1819 newregs.pstate = reg; 1820 break; 1821 case 17: 1822 newregs.orig_x0 = reg; 1823 break; 1824 default: 1825 newregs.regs[idx] = reg; 1826 } 1827 1828 } 1829 1830 if (valid_user_regs(&newregs.user_regs, target)) 1831 *task_pt_regs(target) = newregs; 1832 else 1833 ret = -EINVAL; 1834 1835 return ret; 1836 } 1837 1838 static int compat_vfp_get(struct task_struct *target, 1839 const struct user_regset *regset, 1840 struct membuf to) 1841 { 1842 struct user_fpsimd_state *uregs; 1843 compat_ulong_t fpscr; 1844 1845 if (!system_supports_fpsimd()) 1846 return -EINVAL; 1847 1848 uregs = &target->thread.uw.fpsimd_state; 1849 1850 if (target == current) 1851 fpsimd_preserve_current_state(); 1852 1853 /* 1854 * The VFP registers are packed into the fpsimd_state, so they all sit 1855 * nicely together for us. We just need to create the fpscr separately. 1856 */ 1857 membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t)); 1858 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 1859 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 1860 return membuf_store(&to, fpscr); 1861 } 1862 1863 static int compat_vfp_set(struct task_struct *target, 1864 const struct user_regset *regset, 1865 unsigned int pos, unsigned int count, 1866 const void *kbuf, const void __user *ubuf) 1867 { 1868 struct user_fpsimd_state *uregs; 1869 compat_ulong_t fpscr; 1870 int ret, vregs_end_pos; 1871 1872 if (!system_supports_fpsimd()) 1873 return -EINVAL; 1874 1875 uregs = &target->thread.uw.fpsimd_state; 1876 1877 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1878 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 1879 vregs_end_pos); 1880 1881 if (count && !ret) { 1882 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, 1883 vregs_end_pos, VFP_STATE_SIZE); 1884 if (!ret) { 1885 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 1886 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 1887 } 1888 } 1889 1890 fpsimd_flush_task_state(target); 1891 return ret; 1892 } 1893 1894 static int compat_tls_get(struct task_struct *target, 1895 const struct user_regset *regset, 1896 struct membuf to) 1897 { 1898 return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value); 1899 } 1900 1901 static int compat_tls_set(struct task_struct *target, 1902 const struct user_regset *regset, unsigned int pos, 1903 unsigned int count, const void *kbuf, 1904 const void __user *ubuf) 1905 { 1906 int ret; 1907 compat_ulong_t tls = target->thread.uw.tp_value; 1908 1909 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1910 if (ret) 1911 return ret; 1912 1913 target->thread.uw.tp_value = tls; 1914 return ret; 1915 } 1916 1917 static const struct user_regset aarch32_regsets[] = { 1918 [REGSET_COMPAT_GPR] = { 1919 .core_note_type = NT_PRSTATUS, 1920 .n = COMPAT_ELF_NGREG, 1921 .size = sizeof(compat_elf_greg_t), 1922 .align = sizeof(compat_elf_greg_t), 1923 .regset_get = compat_gpr_get, 1924 .set = compat_gpr_set 1925 }, 1926 [REGSET_COMPAT_VFP] = { 1927 .core_note_type = NT_ARM_VFP, 1928 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1929 .size = sizeof(compat_ulong_t), 1930 .align = sizeof(compat_ulong_t), 1931 .active = fpr_active, 1932 .regset_get = compat_vfp_get, 1933 .set = compat_vfp_set 1934 }, 1935 }; 1936 1937 static const struct user_regset_view user_aarch32_view = { 1938 .name = "aarch32", .e_machine = EM_ARM, 1939 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 1940 }; 1941 1942 static const struct user_regset aarch32_ptrace_regsets[] = { 1943 [REGSET_GPR] = { 1944 .core_note_type = NT_PRSTATUS, 1945 .n = COMPAT_ELF_NGREG, 1946 .size = sizeof(compat_elf_greg_t), 1947 .align = sizeof(compat_elf_greg_t), 1948 .regset_get = compat_gpr_get, 1949 .set = compat_gpr_set 1950 }, 1951 [REGSET_FPR] = { 1952 .core_note_type = NT_ARM_VFP, 1953 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1954 .size = sizeof(compat_ulong_t), 1955 .align = sizeof(compat_ulong_t), 1956 .regset_get = compat_vfp_get, 1957 .set = compat_vfp_set 1958 }, 1959 [REGSET_TLS] = { 1960 .core_note_type = NT_ARM_TLS, 1961 .n = 1, 1962 .size = sizeof(compat_ulong_t), 1963 .align = sizeof(compat_ulong_t), 1964 .regset_get = compat_tls_get, 1965 .set = compat_tls_set, 1966 }, 1967 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1968 [REGSET_HW_BREAK] = { 1969 .core_note_type = NT_ARM_HW_BREAK, 1970 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1971 .size = sizeof(u32), 1972 .align = sizeof(u32), 1973 .regset_get = hw_break_get, 1974 .set = hw_break_set, 1975 }, 1976 [REGSET_HW_WATCH] = { 1977 .core_note_type = NT_ARM_HW_WATCH, 1978 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1979 .size = sizeof(u32), 1980 .align = sizeof(u32), 1981 .regset_get = hw_break_get, 1982 .set = hw_break_set, 1983 }, 1984 #endif 1985 [REGSET_SYSTEM_CALL] = { 1986 .core_note_type = NT_ARM_SYSTEM_CALL, 1987 .n = 1, 1988 .size = sizeof(int), 1989 .align = sizeof(int), 1990 .regset_get = system_call_get, 1991 .set = system_call_set, 1992 }, 1993 }; 1994 1995 static const struct user_regset_view user_aarch32_ptrace_view = { 1996 .name = "aarch32", .e_machine = EM_ARM, 1997 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) 1998 }; 1999 2000 #ifdef CONFIG_COMPAT 2001 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 2002 compat_ulong_t __user *ret) 2003 { 2004 compat_ulong_t tmp; 2005 2006 if (off & 3) 2007 return -EIO; 2008 2009 if (off == COMPAT_PT_TEXT_ADDR) 2010 tmp = tsk->mm->start_code; 2011 else if (off == COMPAT_PT_DATA_ADDR) 2012 tmp = tsk->mm->start_data; 2013 else if (off == COMPAT_PT_TEXT_END_ADDR) 2014 tmp = tsk->mm->end_code; 2015 else if (off < sizeof(compat_elf_gregset_t)) 2016 tmp = compat_get_user_reg(tsk, off >> 2); 2017 else if (off >= COMPAT_USER_SZ) 2018 return -EIO; 2019 else 2020 tmp = 0; 2021 2022 return put_user(tmp, ret); 2023 } 2024 2025 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 2026 compat_ulong_t val) 2027 { 2028 struct pt_regs newregs = *task_pt_regs(tsk); 2029 unsigned int idx = off / 4; 2030 2031 if (off & 3 || off >= COMPAT_USER_SZ) 2032 return -EIO; 2033 2034 if (off >= sizeof(compat_elf_gregset_t)) 2035 return 0; 2036 2037 switch (idx) { 2038 case 15: 2039 newregs.pc = val; 2040 break; 2041 case 16: 2042 newregs.pstate = compat_psr_to_pstate(val); 2043 break; 2044 case 17: 2045 newregs.orig_x0 = val; 2046 break; 2047 default: 2048 newregs.regs[idx] = val; 2049 } 2050 2051 if (!valid_user_regs(&newregs.user_regs, tsk)) 2052 return -EINVAL; 2053 2054 *task_pt_regs(tsk) = newregs; 2055 return 0; 2056 } 2057 2058 #ifdef CONFIG_HAVE_HW_BREAKPOINT 2059 2060 /* 2061 * Convert a virtual register number into an index for a thread_info 2062 * breakpoint array. Breakpoints are identified using positive numbers 2063 * whilst watchpoints are negative. The registers are laid out as pairs 2064 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 2065 * Register 0 is reserved for describing resource information. 2066 */ 2067 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 2068 { 2069 return (abs(num) - 1) >> 1; 2070 } 2071 2072 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 2073 { 2074 u8 num_brps, num_wrps, debug_arch, wp_len; 2075 u32 reg = 0; 2076 2077 num_brps = hw_breakpoint_slots(TYPE_INST); 2078 num_wrps = hw_breakpoint_slots(TYPE_DATA); 2079 2080 debug_arch = debug_monitors_arch(); 2081 wp_len = 8; 2082 reg |= debug_arch; 2083 reg <<= 8; 2084 reg |= wp_len; 2085 reg <<= 8; 2086 reg |= num_wrps; 2087 reg <<= 8; 2088 reg |= num_brps; 2089 2090 *kdata = reg; 2091 return 0; 2092 } 2093 2094 static int compat_ptrace_hbp_get(unsigned int note_type, 2095 struct task_struct *tsk, 2096 compat_long_t num, 2097 u32 *kdata) 2098 { 2099 u64 addr = 0; 2100 u32 ctrl = 0; 2101 2102 int err, idx = compat_ptrace_hbp_num_to_idx(num); 2103 2104 if (num & 1) { 2105 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 2106 *kdata = (u32)addr; 2107 } else { 2108 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 2109 *kdata = ctrl; 2110 } 2111 2112 return err; 2113 } 2114 2115 static int compat_ptrace_hbp_set(unsigned int note_type, 2116 struct task_struct *tsk, 2117 compat_long_t num, 2118 u32 *kdata) 2119 { 2120 u64 addr; 2121 u32 ctrl; 2122 2123 int err, idx = compat_ptrace_hbp_num_to_idx(num); 2124 2125 if (num & 1) { 2126 addr = *kdata; 2127 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 2128 } else { 2129 ctrl = *kdata; 2130 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 2131 } 2132 2133 return err; 2134 } 2135 2136 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 2137 compat_ulong_t __user *data) 2138 { 2139 int ret; 2140 u32 kdata; 2141 2142 /* Watchpoint */ 2143 if (num < 0) { 2144 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 2145 /* Resource info */ 2146 } else if (num == 0) { 2147 ret = compat_ptrace_hbp_get_resource_info(&kdata); 2148 /* Breakpoint */ 2149 } else { 2150 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 2151 } 2152 2153 if (!ret) 2154 ret = put_user(kdata, data); 2155 2156 return ret; 2157 } 2158 2159 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 2160 compat_ulong_t __user *data) 2161 { 2162 int ret; 2163 u32 kdata = 0; 2164 2165 if (num == 0) 2166 return 0; 2167 2168 ret = get_user(kdata, data); 2169 if (ret) 2170 return ret; 2171 2172 if (num < 0) 2173 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 2174 else 2175 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 2176 2177 return ret; 2178 } 2179 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 2180 2181 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 2182 compat_ulong_t caddr, compat_ulong_t cdata) 2183 { 2184 unsigned long addr = caddr; 2185 unsigned long data = cdata; 2186 void __user *datap = compat_ptr(data); 2187 int ret; 2188 2189 switch (request) { 2190 case PTRACE_PEEKUSR: 2191 ret = compat_ptrace_read_user(child, addr, datap); 2192 break; 2193 2194 case PTRACE_POKEUSR: 2195 ret = compat_ptrace_write_user(child, addr, data); 2196 break; 2197 2198 case COMPAT_PTRACE_GETREGS: 2199 ret = copy_regset_to_user(child, 2200 &user_aarch32_view, 2201 REGSET_COMPAT_GPR, 2202 0, sizeof(compat_elf_gregset_t), 2203 datap); 2204 break; 2205 2206 case COMPAT_PTRACE_SETREGS: 2207 ret = copy_regset_from_user(child, 2208 &user_aarch32_view, 2209 REGSET_COMPAT_GPR, 2210 0, sizeof(compat_elf_gregset_t), 2211 datap); 2212 break; 2213 2214 case COMPAT_PTRACE_GET_THREAD_AREA: 2215 ret = put_user((compat_ulong_t)child->thread.uw.tp_value, 2216 (compat_ulong_t __user *)datap); 2217 break; 2218 2219 case COMPAT_PTRACE_SET_SYSCALL: 2220 task_pt_regs(child)->syscallno = data; 2221 ret = 0; 2222 break; 2223 2224 case COMPAT_PTRACE_GETVFPREGS: 2225 ret = copy_regset_to_user(child, 2226 &user_aarch32_view, 2227 REGSET_COMPAT_VFP, 2228 0, VFP_STATE_SIZE, 2229 datap); 2230 break; 2231 2232 case COMPAT_PTRACE_SETVFPREGS: 2233 ret = copy_regset_from_user(child, 2234 &user_aarch32_view, 2235 REGSET_COMPAT_VFP, 2236 0, VFP_STATE_SIZE, 2237 datap); 2238 break; 2239 2240 #ifdef CONFIG_HAVE_HW_BREAKPOINT 2241 case COMPAT_PTRACE_GETHBPREGS: 2242 ret = compat_ptrace_gethbpregs(child, addr, datap); 2243 break; 2244 2245 case COMPAT_PTRACE_SETHBPREGS: 2246 ret = compat_ptrace_sethbpregs(child, addr, datap); 2247 break; 2248 #endif 2249 2250 default: 2251 ret = compat_ptrace_request(child, request, addr, 2252 data); 2253 break; 2254 } 2255 2256 return ret; 2257 } 2258 #endif /* CONFIG_COMPAT */ 2259 2260 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 2261 { 2262 /* 2263 * Core dumping of 32-bit tasks or compat ptrace requests must use the 2264 * user_aarch32_view compatible with arm32. Native ptrace requests on 2265 * 32-bit children use an extended user_aarch32_ptrace_view to allow 2266 * access to the TLS register. 2267 */ 2268 if (is_compat_task()) 2269 return &user_aarch32_view; 2270 else if (is_compat_thread(task_thread_info(task))) 2271 return &user_aarch32_ptrace_view; 2272 2273 return &user_aarch64_view; 2274 } 2275 2276 long arch_ptrace(struct task_struct *child, long request, 2277 unsigned long addr, unsigned long data) 2278 { 2279 switch (request) { 2280 case PTRACE_PEEKMTETAGS: 2281 case PTRACE_POKEMTETAGS: 2282 return mte_ptrace_copy_tags(child, request, addr, data); 2283 } 2284 2285 return ptrace_request(child, request, addr, data); 2286 } 2287 2288 enum ptrace_syscall_dir { 2289 PTRACE_SYSCALL_ENTER = 0, 2290 PTRACE_SYSCALL_EXIT, 2291 }; 2292 2293 static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) 2294 { 2295 int regno; 2296 unsigned long saved_reg; 2297 2298 /* 2299 * We have some ABI weirdness here in the way that we handle syscall 2300 * exit stops because we indicate whether or not the stop has been 2301 * signalled from syscall entry or syscall exit by clobbering a general 2302 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee 2303 * and restoring its old value after the stop. This means that: 2304 * 2305 * - Any writes by the tracer to this register during the stop are 2306 * ignored/discarded. 2307 * 2308 * - The actual value of the register is not available during the stop, 2309 * so the tracer cannot save it and restore it later. 2310 * 2311 * - Syscall stops behave differently to seccomp and pseudo-step traps 2312 * (the latter do not nobble any registers). 2313 */ 2314 regno = (is_compat_task() ? 12 : 7); 2315 saved_reg = regs->regs[regno]; 2316 regs->regs[regno] = dir; 2317 2318 if (dir == PTRACE_SYSCALL_ENTER) { 2319 if (ptrace_report_syscall_entry(regs)) 2320 forget_syscall(regs); 2321 regs->regs[regno] = saved_reg; 2322 } else if (!test_thread_flag(TIF_SINGLESTEP)) { 2323 ptrace_report_syscall_exit(regs, 0); 2324 regs->regs[regno] = saved_reg; 2325 } else { 2326 regs->regs[regno] = saved_reg; 2327 2328 /* 2329 * Signal a pseudo-step exception since we are stepping but 2330 * tracer modifications to the registers may have rewound the 2331 * state machine. 2332 */ 2333 ptrace_report_syscall_exit(regs, 1); 2334 } 2335 } 2336 2337 int syscall_trace_enter(struct pt_regs *regs) 2338 { 2339 unsigned long flags = read_thread_flags(); 2340 2341 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { 2342 report_syscall(regs, PTRACE_SYSCALL_ENTER); 2343 if (flags & _TIF_SYSCALL_EMU) 2344 return NO_SYSCALL; 2345 } 2346 2347 /* Do the secure computing after ptrace; failures should be fast. */ 2348 if (secure_computing() == -1) 2349 return NO_SYSCALL; 2350 2351 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 2352 trace_sys_enter(regs, regs->syscallno); 2353 2354 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1], 2355 regs->regs[2], regs->regs[3]); 2356 2357 return regs->syscallno; 2358 } 2359 2360 void syscall_trace_exit(struct pt_regs *regs) 2361 { 2362 unsigned long flags = read_thread_flags(); 2363 2364 audit_syscall_exit(regs); 2365 2366 if (flags & _TIF_SYSCALL_TRACEPOINT) 2367 trace_sys_exit(regs, syscall_get_return_value(current, regs)); 2368 2369 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)) 2370 report_syscall(regs, PTRACE_SYSCALL_EXIT); 2371 2372 rseq_syscall(regs); 2373 } 2374 2375 /* 2376 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. 2377 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is 2378 * not described in ARM DDI 0487D.a. 2379 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may 2380 * be allocated an EL0 meaning in future. 2381 * Userspace cannot use these until they have an architectural meaning. 2382 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. 2383 * We also reserve IL for the kernel; SS is handled dynamically. 2384 */ 2385 #define SPSR_EL1_AARCH64_RES0_BITS \ 2386 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \ 2387 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5)) 2388 #define SPSR_EL1_AARCH32_RES0_BITS \ 2389 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) 2390 2391 static int valid_compat_regs(struct user_pt_regs *regs) 2392 { 2393 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; 2394 2395 if (!system_supports_mixed_endian_el0()) { 2396 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 2397 regs->pstate |= PSR_AA32_E_BIT; 2398 else 2399 regs->pstate &= ~PSR_AA32_E_BIT; 2400 } 2401 2402 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && 2403 (regs->pstate & PSR_AA32_A_BIT) == 0 && 2404 (regs->pstate & PSR_AA32_I_BIT) == 0 && 2405 (regs->pstate & PSR_AA32_F_BIT) == 0) { 2406 return 1; 2407 } 2408 2409 /* 2410 * Force PSR to a valid 32-bit EL0t, preserving the same bits as 2411 * arch/arm. 2412 */ 2413 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT | 2414 PSR_AA32_C_BIT | PSR_AA32_V_BIT | 2415 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK | 2416 PSR_AA32_GE_MASK | PSR_AA32_E_BIT | 2417 PSR_AA32_T_BIT; 2418 regs->pstate |= PSR_MODE32_BIT; 2419 2420 return 0; 2421 } 2422 2423 static int valid_native_regs(struct user_pt_regs *regs) 2424 { 2425 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; 2426 2427 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && 2428 (regs->pstate & PSR_D_BIT) == 0 && 2429 (regs->pstate & PSR_A_BIT) == 0 && 2430 (regs->pstate & PSR_I_BIT) == 0 && 2431 (regs->pstate & PSR_F_BIT) == 0) { 2432 return 1; 2433 } 2434 2435 /* Force PSR to a valid 64-bit EL0t */ 2436 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; 2437 2438 return 0; 2439 } 2440 2441 /* 2442 * Are the current registers suitable for user mode? (used to maintain 2443 * security in signal handlers) 2444 */ 2445 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) 2446 { 2447 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ 2448 user_regs_reset_single_step(regs, task); 2449 2450 if (is_compat_thread(task_thread_info(task))) 2451 return valid_compat_regs(regs); 2452 else 2453 return valid_native_regs(regs); 2454 } 2455