1 /* 2 * Based on arch/arm/kernel/ptrace.c 3 * 4 * By Ross Biro 1/23/92 5 * edited by Linus Torvalds 6 * ARM modifications Copyright (C) 2000 Russell King 7 * Copyright (C) 2012 ARM Ltd. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/sched.h> 24 #include <linux/mm.h> 25 #include <linux/smp.h> 26 #include <linux/ptrace.h> 27 #include <linux/user.h> 28 #include <linux/security.h> 29 #include <linux/init.h> 30 #include <linux/signal.h> 31 #include <linux/uaccess.h> 32 #include <linux/perf_event.h> 33 #include <linux/hw_breakpoint.h> 34 #include <linux/regset.h> 35 #include <linux/tracehook.h> 36 #include <linux/elf.h> 37 38 #include <asm/compat.h> 39 #include <asm/debug-monitors.h> 40 #include <asm/pgtable.h> 41 #include <asm/traps.h> 42 #include <asm/system_misc.h> 43 44 /* 45 * TODO: does not yet catch signals sent when the child dies. 46 * in exit.c or in signal.c. 47 */ 48 49 /* 50 * Called by kernel/ptrace.c when detaching.. 51 */ 52 void ptrace_disable(struct task_struct *child) 53 { 54 } 55 56 /* 57 * Handle hitting a breakpoint. 58 */ 59 static int ptrace_break(struct pt_regs *regs) 60 { 61 siginfo_t info = { 62 .si_signo = SIGTRAP, 63 .si_errno = 0, 64 .si_code = TRAP_BRKPT, 65 .si_addr = (void __user *)instruction_pointer(regs), 66 }; 67 68 force_sig_info(SIGTRAP, &info, current); 69 return 0; 70 } 71 72 static int arm64_break_trap(unsigned long addr, unsigned int esr, 73 struct pt_regs *regs) 74 { 75 return ptrace_break(regs); 76 } 77 78 #ifdef CONFIG_HAVE_HW_BREAKPOINT 79 /* 80 * Handle hitting a HW-breakpoint. 81 */ 82 static void ptrace_hbptriggered(struct perf_event *bp, 83 struct perf_sample_data *data, 84 struct pt_regs *regs) 85 { 86 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 87 siginfo_t info = { 88 .si_signo = SIGTRAP, 89 .si_errno = 0, 90 .si_code = TRAP_HWBKPT, 91 .si_addr = (void __user *)(bkpt->trigger), 92 }; 93 94 #ifdef CONFIG_COMPAT 95 int i; 96 97 if (!is_compat_task()) 98 goto send_sig; 99 100 for (i = 0; i < ARM_MAX_BRP; ++i) { 101 if (current->thread.debug.hbp_break[i] == bp) { 102 info.si_errno = (i << 1) + 1; 103 break; 104 } 105 } 106 for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) { 107 if (current->thread.debug.hbp_watch[i] == bp) { 108 info.si_errno = -((i << 1) + 1); 109 break; 110 } 111 } 112 113 send_sig: 114 #endif 115 force_sig_info(SIGTRAP, &info, current); 116 } 117 118 /* 119 * Unregister breakpoints from this task and reset the pointers in 120 * the thread_struct. 121 */ 122 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 123 { 124 int i; 125 struct thread_struct *t = &tsk->thread; 126 127 for (i = 0; i < ARM_MAX_BRP; i++) { 128 if (t->debug.hbp_break[i]) { 129 unregister_hw_breakpoint(t->debug.hbp_break[i]); 130 t->debug.hbp_break[i] = NULL; 131 } 132 } 133 134 for (i = 0; i < ARM_MAX_WRP; i++) { 135 if (t->debug.hbp_watch[i]) { 136 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 137 t->debug.hbp_watch[i] = NULL; 138 } 139 } 140 } 141 142 void ptrace_hw_copy_thread(struct task_struct *tsk) 143 { 144 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 145 } 146 147 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 148 struct task_struct *tsk, 149 unsigned long idx) 150 { 151 struct perf_event *bp = ERR_PTR(-EINVAL); 152 153 switch (note_type) { 154 case NT_ARM_HW_BREAK: 155 if (idx < ARM_MAX_BRP) 156 bp = tsk->thread.debug.hbp_break[idx]; 157 break; 158 case NT_ARM_HW_WATCH: 159 if (idx < ARM_MAX_WRP) 160 bp = tsk->thread.debug.hbp_watch[idx]; 161 break; 162 } 163 164 return bp; 165 } 166 167 static int ptrace_hbp_set_event(unsigned int note_type, 168 struct task_struct *tsk, 169 unsigned long idx, 170 struct perf_event *bp) 171 { 172 int err = -EINVAL; 173 174 switch (note_type) { 175 case NT_ARM_HW_BREAK: 176 if (idx < ARM_MAX_BRP) { 177 tsk->thread.debug.hbp_break[idx] = bp; 178 err = 0; 179 } 180 break; 181 case NT_ARM_HW_WATCH: 182 if (idx < ARM_MAX_WRP) { 183 tsk->thread.debug.hbp_watch[idx] = bp; 184 err = 0; 185 } 186 break; 187 } 188 189 return err; 190 } 191 192 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 193 struct task_struct *tsk, 194 unsigned long idx) 195 { 196 struct perf_event *bp; 197 struct perf_event_attr attr; 198 int err, type; 199 200 switch (note_type) { 201 case NT_ARM_HW_BREAK: 202 type = HW_BREAKPOINT_X; 203 break; 204 case NT_ARM_HW_WATCH: 205 type = HW_BREAKPOINT_RW; 206 break; 207 default: 208 return ERR_PTR(-EINVAL); 209 } 210 211 ptrace_breakpoint_init(&attr); 212 213 /* 214 * Initialise fields to sane defaults 215 * (i.e. values that will pass validation). 216 */ 217 attr.bp_addr = 0; 218 attr.bp_len = HW_BREAKPOINT_LEN_4; 219 attr.bp_type = type; 220 attr.disabled = 1; 221 222 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 223 if (IS_ERR(bp)) 224 return bp; 225 226 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 227 if (err) 228 return ERR_PTR(err); 229 230 return bp; 231 } 232 233 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 234 struct arch_hw_breakpoint_ctrl ctrl, 235 struct perf_event_attr *attr) 236 { 237 int err, len, type; 238 239 err = arch_bp_generic_fields(ctrl, &len, &type); 240 if (err) 241 return err; 242 243 switch (note_type) { 244 case NT_ARM_HW_BREAK: 245 if ((type & HW_BREAKPOINT_X) != type) 246 return -EINVAL; 247 break; 248 case NT_ARM_HW_WATCH: 249 if ((type & HW_BREAKPOINT_RW) != type) 250 return -EINVAL; 251 break; 252 default: 253 return -EINVAL; 254 } 255 256 attr->bp_len = len; 257 attr->bp_type = type; 258 attr->disabled = !ctrl.enabled; 259 260 return 0; 261 } 262 263 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 264 { 265 u8 num; 266 u32 reg = 0; 267 268 switch (note_type) { 269 case NT_ARM_HW_BREAK: 270 num = hw_breakpoint_slots(TYPE_INST); 271 break; 272 case NT_ARM_HW_WATCH: 273 num = hw_breakpoint_slots(TYPE_DATA); 274 break; 275 default: 276 return -EINVAL; 277 } 278 279 reg |= debug_monitors_arch(); 280 reg <<= 8; 281 reg |= num; 282 283 *info = reg; 284 return 0; 285 } 286 287 static int ptrace_hbp_get_ctrl(unsigned int note_type, 288 struct task_struct *tsk, 289 unsigned long idx, 290 u32 *ctrl) 291 { 292 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 293 294 if (IS_ERR(bp)) 295 return PTR_ERR(bp); 296 297 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 298 return 0; 299 } 300 301 static int ptrace_hbp_get_addr(unsigned int note_type, 302 struct task_struct *tsk, 303 unsigned long idx, 304 u64 *addr) 305 { 306 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 307 308 if (IS_ERR(bp)) 309 return PTR_ERR(bp); 310 311 *addr = bp ? bp->attr.bp_addr : 0; 312 return 0; 313 } 314 315 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 316 struct task_struct *tsk, 317 unsigned long idx) 318 { 319 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 320 321 if (!bp) 322 bp = ptrace_hbp_create(note_type, tsk, idx); 323 324 return bp; 325 } 326 327 static int ptrace_hbp_set_ctrl(unsigned int note_type, 328 struct task_struct *tsk, 329 unsigned long idx, 330 u32 uctrl) 331 { 332 int err; 333 struct perf_event *bp; 334 struct perf_event_attr attr; 335 struct arch_hw_breakpoint_ctrl ctrl; 336 337 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 338 if (IS_ERR(bp)) { 339 err = PTR_ERR(bp); 340 return err; 341 } 342 343 attr = bp->attr; 344 decode_ctrl_reg(uctrl, &ctrl); 345 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 346 if (err) 347 return err; 348 349 return modify_user_hw_breakpoint(bp, &attr); 350 } 351 352 static int ptrace_hbp_set_addr(unsigned int note_type, 353 struct task_struct *tsk, 354 unsigned long idx, 355 u64 addr) 356 { 357 int err; 358 struct perf_event *bp; 359 struct perf_event_attr attr; 360 361 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 362 if (IS_ERR(bp)) { 363 err = PTR_ERR(bp); 364 return err; 365 } 366 367 attr = bp->attr; 368 attr.bp_addr = addr; 369 err = modify_user_hw_breakpoint(bp, &attr); 370 return err; 371 } 372 373 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 374 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 375 #define PTRACE_HBP_REG_OFF sizeof(u32) 376 377 static int hw_break_get(struct task_struct *target, 378 const struct user_regset *regset, 379 unsigned int pos, unsigned int count, 380 void *kbuf, void __user *ubuf) 381 { 382 unsigned int note_type = regset->core_note_type; 383 int ret, idx = 0, offset = PTRACE_HBP_REG_OFF, limit; 384 u32 info, ctrl; 385 u64 addr; 386 387 /* Resource info */ 388 ret = ptrace_hbp_get_resource_info(note_type, &info); 389 if (ret) 390 return ret; 391 392 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, 4); 393 if (ret) 394 return ret; 395 396 /* (address, ctrl) registers */ 397 limit = regset->n * regset->size; 398 while (count && offset < limit) { 399 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 400 if (ret) 401 return ret; 402 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr, 403 offset, offset + PTRACE_HBP_ADDR_SZ); 404 if (ret) 405 return ret; 406 offset += PTRACE_HBP_ADDR_SZ; 407 408 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 409 if (ret) 410 return ret; 411 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl, 412 offset, offset + PTRACE_HBP_CTRL_SZ); 413 if (ret) 414 return ret; 415 offset += PTRACE_HBP_CTRL_SZ; 416 idx++; 417 } 418 419 return 0; 420 } 421 422 static int hw_break_set(struct task_struct *target, 423 const struct user_regset *regset, 424 unsigned int pos, unsigned int count, 425 const void *kbuf, const void __user *ubuf) 426 { 427 unsigned int note_type = regset->core_note_type; 428 int ret, idx = 0, offset = PTRACE_HBP_REG_OFF, limit; 429 u32 ctrl; 430 u64 addr; 431 432 /* Resource info */ 433 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4); 434 if (ret) 435 return ret; 436 437 /* (address, ctrl) registers */ 438 limit = regset->n * regset->size; 439 while (count && offset < limit) { 440 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 441 offset, offset + PTRACE_HBP_ADDR_SZ); 442 if (ret) 443 return ret; 444 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 445 if (ret) 446 return ret; 447 offset += PTRACE_HBP_ADDR_SZ; 448 449 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 450 offset, offset + PTRACE_HBP_CTRL_SZ); 451 if (ret) 452 return ret; 453 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 454 if (ret) 455 return ret; 456 offset += PTRACE_HBP_CTRL_SZ; 457 idx++; 458 } 459 460 return 0; 461 } 462 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 463 464 static int gpr_get(struct task_struct *target, 465 const struct user_regset *regset, 466 unsigned int pos, unsigned int count, 467 void *kbuf, void __user *ubuf) 468 { 469 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 470 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 471 } 472 473 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 474 unsigned int pos, unsigned int count, 475 const void *kbuf, const void __user *ubuf) 476 { 477 int ret; 478 struct user_pt_regs newregs; 479 480 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 481 if (ret) 482 return ret; 483 484 if (!valid_user_regs(&newregs)) 485 return -EINVAL; 486 487 task_pt_regs(target)->user_regs = newregs; 488 return 0; 489 } 490 491 /* 492 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 493 */ 494 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 495 unsigned int pos, unsigned int count, 496 void *kbuf, void __user *ubuf) 497 { 498 struct user_fpsimd_state *uregs; 499 uregs = &target->thread.fpsimd_state.user_fpsimd; 500 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 501 } 502 503 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 504 unsigned int pos, unsigned int count, 505 const void *kbuf, const void __user *ubuf) 506 { 507 int ret; 508 struct user_fpsimd_state newstate; 509 510 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); 511 if (ret) 512 return ret; 513 514 target->thread.fpsimd_state.user_fpsimd = newstate; 515 return ret; 516 } 517 518 static int tls_get(struct task_struct *target, const struct user_regset *regset, 519 unsigned int pos, unsigned int count, 520 void *kbuf, void __user *ubuf) 521 { 522 unsigned long *tls = &target->thread.tp_value; 523 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1); 524 } 525 526 static int tls_set(struct task_struct *target, const struct user_regset *regset, 527 unsigned int pos, unsigned int count, 528 const void *kbuf, const void __user *ubuf) 529 { 530 int ret; 531 unsigned long tls; 532 533 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 534 if (ret) 535 return ret; 536 537 target->thread.tp_value = tls; 538 return ret; 539 } 540 541 enum aarch64_regset { 542 REGSET_GPR, 543 REGSET_FPR, 544 REGSET_TLS, 545 #ifdef CONFIG_HAVE_HW_BREAKPOINT 546 REGSET_HW_BREAK, 547 REGSET_HW_WATCH, 548 #endif 549 }; 550 551 static const struct user_regset aarch64_regsets[] = { 552 [REGSET_GPR] = { 553 .core_note_type = NT_PRSTATUS, 554 .n = sizeof(struct user_pt_regs) / sizeof(u64), 555 .size = sizeof(u64), 556 .align = sizeof(u64), 557 .get = gpr_get, 558 .set = gpr_set 559 }, 560 [REGSET_FPR] = { 561 .core_note_type = NT_PRFPREG, 562 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 563 /* 564 * We pretend we have 32-bit registers because the fpsr and 565 * fpcr are 32-bits wide. 566 */ 567 .size = sizeof(u32), 568 .align = sizeof(u32), 569 .get = fpr_get, 570 .set = fpr_set 571 }, 572 [REGSET_TLS] = { 573 .core_note_type = NT_ARM_TLS, 574 .n = 1, 575 .size = sizeof(void *), 576 .align = sizeof(void *), 577 .get = tls_get, 578 .set = tls_set, 579 }, 580 #ifdef CONFIG_HAVE_HW_BREAKPOINT 581 [REGSET_HW_BREAK] = { 582 .core_note_type = NT_ARM_HW_BREAK, 583 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 584 .size = sizeof(u32), 585 .align = sizeof(u32), 586 .get = hw_break_get, 587 .set = hw_break_set, 588 }, 589 [REGSET_HW_WATCH] = { 590 .core_note_type = NT_ARM_HW_WATCH, 591 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 592 .size = sizeof(u32), 593 .align = sizeof(u32), 594 .get = hw_break_get, 595 .set = hw_break_set, 596 }, 597 #endif 598 }; 599 600 static const struct user_regset_view user_aarch64_view = { 601 .name = "aarch64", .e_machine = EM_AARCH64, 602 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 603 }; 604 605 #ifdef CONFIG_COMPAT 606 #include <linux/compat.h> 607 608 enum compat_regset { 609 REGSET_COMPAT_GPR, 610 REGSET_COMPAT_VFP, 611 }; 612 613 static int compat_gpr_get(struct task_struct *target, 614 const struct user_regset *regset, 615 unsigned int pos, unsigned int count, 616 void *kbuf, void __user *ubuf) 617 { 618 int ret = 0; 619 unsigned int i, start, num_regs; 620 621 /* Calculate the number of AArch32 registers contained in count */ 622 num_regs = count / regset->size; 623 624 /* Convert pos into an register number */ 625 start = pos / regset->size; 626 627 if (start + num_regs > regset->n) 628 return -EIO; 629 630 for (i = 0; i < num_regs; ++i) { 631 unsigned int idx = start + i; 632 void *reg; 633 634 switch (idx) { 635 case 15: 636 reg = (void *)&task_pt_regs(target)->pc; 637 break; 638 case 16: 639 reg = (void *)&task_pt_regs(target)->pstate; 640 break; 641 case 17: 642 reg = (void *)&task_pt_regs(target)->orig_x0; 643 break; 644 default: 645 reg = (void *)&task_pt_regs(target)->regs[idx]; 646 } 647 648 ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t)); 649 650 if (ret) 651 break; 652 else 653 ubuf += sizeof(compat_ulong_t); 654 } 655 656 return ret; 657 } 658 659 static int compat_gpr_set(struct task_struct *target, 660 const struct user_regset *regset, 661 unsigned int pos, unsigned int count, 662 const void *kbuf, const void __user *ubuf) 663 { 664 struct pt_regs newregs; 665 int ret = 0; 666 unsigned int i, start, num_regs; 667 668 /* Calculate the number of AArch32 registers contained in count */ 669 num_regs = count / regset->size; 670 671 /* Convert pos into an register number */ 672 start = pos / regset->size; 673 674 if (start + num_regs > regset->n) 675 return -EIO; 676 677 newregs = *task_pt_regs(target); 678 679 for (i = 0; i < num_regs; ++i) { 680 unsigned int idx = start + i; 681 void *reg; 682 683 switch (idx) { 684 case 15: 685 reg = (void *)&newregs.pc; 686 break; 687 case 16: 688 reg = (void *)&newregs.pstate; 689 break; 690 case 17: 691 reg = (void *)&newregs.orig_x0; 692 break; 693 default: 694 reg = (void *)&newregs.regs[idx]; 695 } 696 697 ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t)); 698 699 if (ret) 700 goto out; 701 else 702 ubuf += sizeof(compat_ulong_t); 703 } 704 705 if (valid_user_regs(&newregs.user_regs)) 706 *task_pt_regs(target) = newregs; 707 else 708 ret = -EINVAL; 709 710 out: 711 return ret; 712 } 713 714 static int compat_vfp_get(struct task_struct *target, 715 const struct user_regset *regset, 716 unsigned int pos, unsigned int count, 717 void *kbuf, void __user *ubuf) 718 { 719 struct user_fpsimd_state *uregs; 720 compat_ulong_t fpscr; 721 int ret; 722 723 uregs = &target->thread.fpsimd_state.user_fpsimd; 724 725 /* 726 * The VFP registers are packed into the fpsimd_state, so they all sit 727 * nicely together for us. We just need to create the fpscr separately. 728 */ 729 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, 730 VFP_STATE_SIZE - sizeof(compat_ulong_t)); 731 732 if (count && !ret) { 733 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 734 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 735 ret = put_user(fpscr, (compat_ulong_t *)ubuf); 736 } 737 738 return ret; 739 } 740 741 static int compat_vfp_set(struct task_struct *target, 742 const struct user_regset *regset, 743 unsigned int pos, unsigned int count, 744 const void *kbuf, const void __user *ubuf) 745 { 746 struct user_fpsimd_state *uregs; 747 compat_ulong_t fpscr; 748 int ret; 749 750 if (pos + count > VFP_STATE_SIZE) 751 return -EIO; 752 753 uregs = &target->thread.fpsimd_state.user_fpsimd; 754 755 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 756 VFP_STATE_SIZE - sizeof(compat_ulong_t)); 757 758 if (count && !ret) { 759 ret = get_user(fpscr, (compat_ulong_t *)ubuf); 760 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 761 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 762 } 763 764 return ret; 765 } 766 767 static const struct user_regset aarch32_regsets[] = { 768 [REGSET_COMPAT_GPR] = { 769 .core_note_type = NT_PRSTATUS, 770 .n = COMPAT_ELF_NGREG, 771 .size = sizeof(compat_elf_greg_t), 772 .align = sizeof(compat_elf_greg_t), 773 .get = compat_gpr_get, 774 .set = compat_gpr_set 775 }, 776 [REGSET_COMPAT_VFP] = { 777 .core_note_type = NT_ARM_VFP, 778 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 779 .size = sizeof(compat_ulong_t), 780 .align = sizeof(compat_ulong_t), 781 .get = compat_vfp_get, 782 .set = compat_vfp_set 783 }, 784 }; 785 786 static const struct user_regset_view user_aarch32_view = { 787 .name = "aarch32", .e_machine = EM_ARM, 788 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 789 }; 790 791 int aarch32_break_trap(struct pt_regs *regs) 792 { 793 unsigned int instr; 794 bool bp = false; 795 void __user *pc = (void __user *)instruction_pointer(regs); 796 797 if (compat_thumb_mode(regs)) { 798 /* get 16-bit Thumb instruction */ 799 get_user(instr, (u16 __user *)pc); 800 if (instr == AARCH32_BREAK_THUMB2_LO) { 801 /* get second half of 32-bit Thumb-2 instruction */ 802 get_user(instr, (u16 __user *)(pc + 2)); 803 bp = instr == AARCH32_BREAK_THUMB2_HI; 804 } else { 805 bp = instr == AARCH32_BREAK_THUMB; 806 } 807 } else { 808 /* 32-bit ARM instruction */ 809 get_user(instr, (u32 __user *)pc); 810 bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM; 811 } 812 813 if (bp) 814 return ptrace_break(regs); 815 return 1; 816 } 817 818 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 819 compat_ulong_t __user *ret) 820 { 821 compat_ulong_t tmp; 822 823 if (off & 3) 824 return -EIO; 825 826 if (off == PT_TEXT_ADDR) 827 tmp = tsk->mm->start_code; 828 else if (off == PT_DATA_ADDR) 829 tmp = tsk->mm->start_data; 830 else if (off == PT_TEXT_END_ADDR) 831 tmp = tsk->mm->end_code; 832 else if (off < sizeof(compat_elf_gregset_t)) 833 return copy_regset_to_user(tsk, &user_aarch32_view, 834 REGSET_COMPAT_GPR, off, 835 sizeof(compat_ulong_t), ret); 836 else if (off >= COMPAT_USER_SZ) 837 return -EIO; 838 else 839 tmp = 0; 840 841 return put_user(tmp, ret); 842 } 843 844 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 845 compat_ulong_t val) 846 { 847 int ret; 848 849 if (off & 3 || off >= COMPAT_USER_SZ) 850 return -EIO; 851 852 if (off >= sizeof(compat_elf_gregset_t)) 853 return 0; 854 855 ret = copy_regset_from_user(tsk, &user_aarch32_view, 856 REGSET_COMPAT_GPR, off, 857 sizeof(compat_ulong_t), 858 &val); 859 return ret; 860 } 861 862 #ifdef CONFIG_HAVE_HW_BREAKPOINT 863 864 /* 865 * Convert a virtual register number into an index for a thread_info 866 * breakpoint array. Breakpoints are identified using positive numbers 867 * whilst watchpoints are negative. The registers are laid out as pairs 868 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 869 * Register 0 is reserved for describing resource information. 870 */ 871 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 872 { 873 return (abs(num) - 1) >> 1; 874 } 875 876 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 877 { 878 u8 num_brps, num_wrps, debug_arch, wp_len; 879 u32 reg = 0; 880 881 num_brps = hw_breakpoint_slots(TYPE_INST); 882 num_wrps = hw_breakpoint_slots(TYPE_DATA); 883 884 debug_arch = debug_monitors_arch(); 885 wp_len = 8; 886 reg |= debug_arch; 887 reg <<= 8; 888 reg |= wp_len; 889 reg <<= 8; 890 reg |= num_wrps; 891 reg <<= 8; 892 reg |= num_brps; 893 894 *kdata = reg; 895 return 0; 896 } 897 898 static int compat_ptrace_hbp_get(unsigned int note_type, 899 struct task_struct *tsk, 900 compat_long_t num, 901 u32 *kdata) 902 { 903 u64 addr = 0; 904 u32 ctrl = 0; 905 906 int err, idx = compat_ptrace_hbp_num_to_idx(num);; 907 908 if (num & 1) { 909 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 910 *kdata = (u32)addr; 911 } else { 912 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 913 *kdata = ctrl; 914 } 915 916 return err; 917 } 918 919 static int compat_ptrace_hbp_set(unsigned int note_type, 920 struct task_struct *tsk, 921 compat_long_t num, 922 u32 *kdata) 923 { 924 u64 addr; 925 u32 ctrl; 926 927 int err, idx = compat_ptrace_hbp_num_to_idx(num); 928 929 if (num & 1) { 930 addr = *kdata; 931 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 932 } else { 933 ctrl = *kdata; 934 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 935 } 936 937 return err; 938 } 939 940 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 941 compat_ulong_t __user *data) 942 { 943 int ret; 944 u32 kdata; 945 mm_segment_t old_fs = get_fs(); 946 947 set_fs(KERNEL_DS); 948 /* Watchpoint */ 949 if (num < 0) { 950 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 951 /* Resource info */ 952 } else if (num == 0) { 953 ret = compat_ptrace_hbp_get_resource_info(&kdata); 954 /* Breakpoint */ 955 } else { 956 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 957 } 958 set_fs(old_fs); 959 960 if (!ret) 961 ret = put_user(kdata, data); 962 963 return ret; 964 } 965 966 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 967 compat_ulong_t __user *data) 968 { 969 int ret; 970 u32 kdata = 0; 971 mm_segment_t old_fs = get_fs(); 972 973 if (num == 0) 974 return 0; 975 976 ret = get_user(kdata, data); 977 if (ret) 978 return ret; 979 980 set_fs(KERNEL_DS); 981 if (num < 0) 982 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 983 else 984 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 985 set_fs(old_fs); 986 987 return ret; 988 } 989 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 990 991 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 992 compat_ulong_t caddr, compat_ulong_t cdata) 993 { 994 unsigned long addr = caddr; 995 unsigned long data = cdata; 996 void __user *datap = compat_ptr(data); 997 int ret; 998 999 switch (request) { 1000 case PTRACE_PEEKUSR: 1001 ret = compat_ptrace_read_user(child, addr, datap); 1002 break; 1003 1004 case PTRACE_POKEUSR: 1005 ret = compat_ptrace_write_user(child, addr, data); 1006 break; 1007 1008 case COMPAT_PTRACE_GETREGS: 1009 ret = copy_regset_to_user(child, 1010 &user_aarch32_view, 1011 REGSET_COMPAT_GPR, 1012 0, sizeof(compat_elf_gregset_t), 1013 datap); 1014 break; 1015 1016 case COMPAT_PTRACE_SETREGS: 1017 ret = copy_regset_from_user(child, 1018 &user_aarch32_view, 1019 REGSET_COMPAT_GPR, 1020 0, sizeof(compat_elf_gregset_t), 1021 datap); 1022 break; 1023 1024 case COMPAT_PTRACE_GET_THREAD_AREA: 1025 ret = put_user((compat_ulong_t)child->thread.tp_value, 1026 (compat_ulong_t __user *)datap); 1027 break; 1028 1029 case COMPAT_PTRACE_SET_SYSCALL: 1030 task_pt_regs(child)->syscallno = data; 1031 ret = 0; 1032 break; 1033 1034 case COMPAT_PTRACE_GETVFPREGS: 1035 ret = copy_regset_to_user(child, 1036 &user_aarch32_view, 1037 REGSET_COMPAT_VFP, 1038 0, VFP_STATE_SIZE, 1039 datap); 1040 break; 1041 1042 case COMPAT_PTRACE_SETVFPREGS: 1043 ret = copy_regset_from_user(child, 1044 &user_aarch32_view, 1045 REGSET_COMPAT_VFP, 1046 0, VFP_STATE_SIZE, 1047 datap); 1048 break; 1049 1050 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1051 case COMPAT_PTRACE_GETHBPREGS: 1052 ret = compat_ptrace_gethbpregs(child, addr, datap); 1053 break; 1054 1055 case COMPAT_PTRACE_SETHBPREGS: 1056 ret = compat_ptrace_sethbpregs(child, addr, datap); 1057 break; 1058 #endif 1059 1060 default: 1061 ret = compat_ptrace_request(child, request, addr, 1062 data); 1063 break; 1064 } 1065 1066 return ret; 1067 } 1068 #endif /* CONFIG_COMPAT */ 1069 1070 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1071 { 1072 #ifdef CONFIG_COMPAT 1073 if (is_compat_thread(task_thread_info(task))) 1074 return &user_aarch32_view; 1075 #endif 1076 return &user_aarch64_view; 1077 } 1078 1079 long arch_ptrace(struct task_struct *child, long request, 1080 unsigned long addr, unsigned long data) 1081 { 1082 return ptrace_request(child, request, addr, data); 1083 } 1084 1085 1086 static int __init ptrace_break_init(void) 1087 { 1088 hook_debug_fault_code(DBG_ESR_EVT_BRK, arm64_break_trap, SIGTRAP, 1089 TRAP_BRKPT, "ptrace BRK handler"); 1090 return 0; 1091 } 1092 core_initcall(ptrace_break_init); 1093 1094 1095 asmlinkage int syscall_trace(int dir, struct pt_regs *regs) 1096 { 1097 unsigned long saved_reg; 1098 1099 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 1100 return regs->syscallno; 1101 1102 if (is_compat_task()) { 1103 /* AArch32 uses ip (r12) for scratch */ 1104 saved_reg = regs->regs[12]; 1105 regs->regs[12] = dir; 1106 } else { 1107 /* 1108 * Save X7. X7 is used to denote syscall entry/exit: 1109 * X7 = 0 -> entry, = 1 -> exit 1110 */ 1111 saved_reg = regs->regs[7]; 1112 regs->regs[7] = dir; 1113 } 1114 1115 if (dir) 1116 tracehook_report_syscall_exit(regs, 0); 1117 else if (tracehook_report_syscall_entry(regs)) 1118 regs->syscallno = ~0UL; 1119 1120 if (is_compat_task()) 1121 regs->regs[12] = saved_reg; 1122 else 1123 regs->regs[7] = saved_reg; 1124 1125 return regs->syscallno; 1126 } 1127