1 /* 2 * linux/arch/arm/kernel/ptrace.c 3 * 4 * By Ross Biro 1/23/92 5 * edited by Linus Torvalds 6 * ARM modifications Copyright (C) 2000 Russell King 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 #include <linux/kernel.h> 13 #include <linux/sched.h> 14 #include <linux/mm.h> 15 #include <linux/elf.h> 16 #include <linux/smp.h> 17 #include <linux/ptrace.h> 18 #include <linux/user.h> 19 #include <linux/security.h> 20 #include <linux/init.h> 21 #include <linux/signal.h> 22 #include <linux/uaccess.h> 23 #include <linux/perf_event.h> 24 #include <linux/hw_breakpoint.h> 25 #include <linux/regset.h> 26 27 #include <asm/pgtable.h> 28 #include <asm/system.h> 29 #include <asm/traps.h> 30 31 #define REG_PC 15 32 #define REG_PSR 16 33 /* 34 * does not yet catch signals sent when the child dies. 35 * in exit.c or in signal.c. 36 */ 37 38 #if 0 39 /* 40 * Breakpoint SWI instruction: SWI &9F0001 41 */ 42 #define BREAKINST_ARM 0xef9f0001 43 #define BREAKINST_THUMB 0xdf00 /* fill this in later */ 44 #else 45 /* 46 * New breakpoints - use an undefined instruction. The ARM architecture 47 * reference manual guarantees that the following instruction space 48 * will produce an undefined instruction exception on all CPUs: 49 * 50 * ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx 51 * Thumb: 1101 1110 xxxx xxxx 52 */ 53 #define BREAKINST_ARM 0xe7f001f0 54 #define BREAKINST_THUMB 0xde01 55 #endif 56 57 struct pt_regs_offset { 58 const char *name; 59 int offset; 60 }; 61 62 #define REG_OFFSET_NAME(r) \ 63 {.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)} 64 #define REG_OFFSET_END {.name = NULL, .offset = 0} 65 66 static const struct pt_regs_offset regoffset_table[] = { 67 REG_OFFSET_NAME(r0), 68 REG_OFFSET_NAME(r1), 69 REG_OFFSET_NAME(r2), 70 REG_OFFSET_NAME(r3), 71 REG_OFFSET_NAME(r4), 72 REG_OFFSET_NAME(r5), 73 REG_OFFSET_NAME(r6), 74 REG_OFFSET_NAME(r7), 75 REG_OFFSET_NAME(r8), 76 REG_OFFSET_NAME(r9), 77 REG_OFFSET_NAME(r10), 78 REG_OFFSET_NAME(fp), 79 REG_OFFSET_NAME(ip), 80 REG_OFFSET_NAME(sp), 81 REG_OFFSET_NAME(lr), 82 REG_OFFSET_NAME(pc), 83 REG_OFFSET_NAME(cpsr), 84 REG_OFFSET_NAME(ORIG_r0), 85 REG_OFFSET_END, 86 }; 87 88 /** 89 * regs_query_register_offset() - query register offset from its name 90 * @name: the name of a register 91 * 92 * regs_query_register_offset() returns the offset of a register in struct 93 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 94 */ 95 int regs_query_register_offset(const char *name) 96 { 97 const struct pt_regs_offset *roff; 98 for (roff = regoffset_table; roff->name != NULL; roff++) 99 if (!strcmp(roff->name, name)) 100 return roff->offset; 101 return -EINVAL; 102 } 103 104 /** 105 * regs_query_register_name() - query register name from its offset 106 * @offset: the offset of a register in struct pt_regs. 107 * 108 * regs_query_register_name() returns the name of a register from its 109 * offset in struct pt_regs. If the @offset is invalid, this returns NULL; 110 */ 111 const char *regs_query_register_name(unsigned int offset) 112 { 113 const struct pt_regs_offset *roff; 114 for (roff = regoffset_table; roff->name != NULL; roff++) 115 if (roff->offset == offset) 116 return roff->name; 117 return NULL; 118 } 119 120 /** 121 * regs_within_kernel_stack() - check the address in the stack 122 * @regs: pt_regs which contains kernel stack pointer. 123 * @addr: address which is checked. 124 * 125 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 126 * If @addr is within the kernel stack, it returns true. If not, returns false. 127 */ 128 bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 129 { 130 return ((addr & ~(THREAD_SIZE - 1)) == 131 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); 132 } 133 134 /** 135 * regs_get_kernel_stack_nth() - get Nth entry of the stack 136 * @regs: pt_regs which contains kernel stack pointer. 137 * @n: stack entry number. 138 * 139 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 140 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 141 * this returns 0. 142 */ 143 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 144 { 145 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 146 addr += n; 147 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 148 return *addr; 149 else 150 return 0; 151 } 152 153 /* 154 * this routine will get a word off of the processes privileged stack. 155 * the offset is how far from the base addr as stored in the THREAD. 156 * this routine assumes that all the privileged stacks are in our 157 * data space. 158 */ 159 static inline long get_user_reg(struct task_struct *task, int offset) 160 { 161 return task_pt_regs(task)->uregs[offset]; 162 } 163 164 /* 165 * this routine will put a word on the processes privileged stack. 166 * the offset is how far from the base addr as stored in the THREAD. 167 * this routine assumes that all the privileged stacks are in our 168 * data space. 169 */ 170 static inline int 171 put_user_reg(struct task_struct *task, int offset, long data) 172 { 173 struct pt_regs newregs, *regs = task_pt_regs(task); 174 int ret = -EINVAL; 175 176 newregs = *regs; 177 newregs.uregs[offset] = data; 178 179 if (valid_user_regs(&newregs)) { 180 regs->uregs[offset] = data; 181 ret = 0; 182 } 183 184 return ret; 185 } 186 187 /* 188 * Called by kernel/ptrace.c when detaching.. 189 */ 190 void ptrace_disable(struct task_struct *child) 191 { 192 /* Nothing to do. */ 193 } 194 195 /* 196 * Handle hitting a breakpoint. 197 */ 198 void ptrace_break(struct task_struct *tsk, struct pt_regs *regs) 199 { 200 siginfo_t info; 201 202 info.si_signo = SIGTRAP; 203 info.si_errno = 0; 204 info.si_code = TRAP_BRKPT; 205 info.si_addr = (void __user *)instruction_pointer(regs); 206 207 force_sig_info(SIGTRAP, &info, tsk); 208 } 209 210 static int break_trap(struct pt_regs *regs, unsigned int instr) 211 { 212 ptrace_break(current, regs); 213 return 0; 214 } 215 216 static struct undef_hook arm_break_hook = { 217 .instr_mask = 0x0fffffff, 218 .instr_val = 0x07f001f0, 219 .cpsr_mask = PSR_T_BIT, 220 .cpsr_val = 0, 221 .fn = break_trap, 222 }; 223 224 static struct undef_hook thumb_break_hook = { 225 .instr_mask = 0xffff, 226 .instr_val = 0xde01, 227 .cpsr_mask = PSR_T_BIT, 228 .cpsr_val = PSR_T_BIT, 229 .fn = break_trap, 230 }; 231 232 static struct undef_hook thumb2_break_hook = { 233 .instr_mask = 0xffffffff, 234 .instr_val = 0xf7f0a000, 235 .cpsr_mask = PSR_T_BIT, 236 .cpsr_val = PSR_T_BIT, 237 .fn = break_trap, 238 }; 239 240 static int __init ptrace_break_init(void) 241 { 242 register_undef_hook(&arm_break_hook); 243 register_undef_hook(&thumb_break_hook); 244 register_undef_hook(&thumb2_break_hook); 245 return 0; 246 } 247 248 core_initcall(ptrace_break_init); 249 250 /* 251 * Read the word at offset "off" into the "struct user". We 252 * actually access the pt_regs stored on the kernel stack. 253 */ 254 static int ptrace_read_user(struct task_struct *tsk, unsigned long off, 255 unsigned long __user *ret) 256 { 257 unsigned long tmp; 258 259 if (off & 3 || off >= sizeof(struct user)) 260 return -EIO; 261 262 tmp = 0; 263 if (off == PT_TEXT_ADDR) 264 tmp = tsk->mm->start_code; 265 else if (off == PT_DATA_ADDR) 266 tmp = tsk->mm->start_data; 267 else if (off == PT_TEXT_END_ADDR) 268 tmp = tsk->mm->end_code; 269 else if (off < sizeof(struct pt_regs)) 270 tmp = get_user_reg(tsk, off >> 2); 271 272 return put_user(tmp, ret); 273 } 274 275 /* 276 * Write the word at offset "off" into "struct user". We 277 * actually access the pt_regs stored on the kernel stack. 278 */ 279 static int ptrace_write_user(struct task_struct *tsk, unsigned long off, 280 unsigned long val) 281 { 282 if (off & 3 || off >= sizeof(struct user)) 283 return -EIO; 284 285 if (off >= sizeof(struct pt_regs)) 286 return 0; 287 288 return put_user_reg(tsk, off >> 2, val); 289 } 290 291 #ifdef CONFIG_IWMMXT 292 293 /* 294 * Get the child iWMMXt state. 295 */ 296 static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp) 297 { 298 struct thread_info *thread = task_thread_info(tsk); 299 300 if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) 301 return -ENODATA; 302 iwmmxt_task_disable(thread); /* force it to ram */ 303 return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE) 304 ? -EFAULT : 0; 305 } 306 307 /* 308 * Set the child iWMMXt state. 309 */ 310 static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp) 311 { 312 struct thread_info *thread = task_thread_info(tsk); 313 314 if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) 315 return -EACCES; 316 iwmmxt_task_release(thread); /* force a reload */ 317 return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE) 318 ? -EFAULT : 0; 319 } 320 321 #endif 322 323 #ifdef CONFIG_CRUNCH 324 /* 325 * Get the child Crunch state. 326 */ 327 static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp) 328 { 329 struct thread_info *thread = task_thread_info(tsk); 330 331 crunch_task_disable(thread); /* force it to ram */ 332 return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE) 333 ? -EFAULT : 0; 334 } 335 336 /* 337 * Set the child Crunch state. 338 */ 339 static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp) 340 { 341 struct thread_info *thread = task_thread_info(tsk); 342 343 crunch_task_release(thread); /* force a reload */ 344 return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE) 345 ? -EFAULT : 0; 346 } 347 #endif 348 349 #ifdef CONFIG_HAVE_HW_BREAKPOINT 350 /* 351 * Convert a virtual register number into an index for a thread_info 352 * breakpoint array. Breakpoints are identified using positive numbers 353 * whilst watchpoints are negative. The registers are laid out as pairs 354 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 355 * Register 0 is reserved for describing resource information. 356 */ 357 static int ptrace_hbp_num_to_idx(long num) 358 { 359 if (num < 0) 360 num = (ARM_MAX_BRP << 1) - num; 361 return (num - 1) >> 1; 362 } 363 364 /* 365 * Returns the virtual register number for the address of the 366 * breakpoint at index idx. 367 */ 368 static long ptrace_hbp_idx_to_num(int idx) 369 { 370 long mid = ARM_MAX_BRP << 1; 371 long num = (idx << 1) + 1; 372 return num > mid ? mid - num : num; 373 } 374 375 /* 376 * Handle hitting a HW-breakpoint. 377 */ 378 static void ptrace_hbptriggered(struct perf_event *bp, 379 struct perf_sample_data *data, 380 struct pt_regs *regs) 381 { 382 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 383 long num; 384 int i; 385 siginfo_t info; 386 387 for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i) 388 if (current->thread.debug.hbp[i] == bp) 389 break; 390 391 num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i); 392 393 info.si_signo = SIGTRAP; 394 info.si_errno = (int)num; 395 info.si_code = TRAP_HWBKPT; 396 info.si_addr = (void __user *)(bkpt->trigger); 397 398 force_sig_info(SIGTRAP, &info, current); 399 } 400 401 /* 402 * Set ptrace breakpoint pointers to zero for this task. 403 * This is required in order to prevent child processes from unregistering 404 * breakpoints held by their parent. 405 */ 406 void clear_ptrace_hw_breakpoint(struct task_struct *tsk) 407 { 408 memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp)); 409 } 410 411 /* 412 * Unregister breakpoints from this task and reset the pointers in 413 * the thread_struct. 414 */ 415 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 416 { 417 int i; 418 struct thread_struct *t = &tsk->thread; 419 420 for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) { 421 if (t->debug.hbp[i]) { 422 unregister_hw_breakpoint(t->debug.hbp[i]); 423 t->debug.hbp[i] = NULL; 424 } 425 } 426 } 427 428 static u32 ptrace_get_hbp_resource_info(void) 429 { 430 u8 num_brps, num_wrps, debug_arch, wp_len; 431 u32 reg = 0; 432 433 num_brps = hw_breakpoint_slots(TYPE_INST); 434 num_wrps = hw_breakpoint_slots(TYPE_DATA); 435 debug_arch = arch_get_debug_arch(); 436 wp_len = arch_get_max_wp_len(); 437 438 reg |= debug_arch; 439 reg <<= 8; 440 reg |= wp_len; 441 reg <<= 8; 442 reg |= num_wrps; 443 reg <<= 8; 444 reg |= num_brps; 445 446 return reg; 447 } 448 449 static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type) 450 { 451 struct perf_event_attr attr; 452 453 ptrace_breakpoint_init(&attr); 454 455 /* Initialise fields to sane defaults. */ 456 attr.bp_addr = 0; 457 attr.bp_len = HW_BREAKPOINT_LEN_4; 458 attr.bp_type = type; 459 attr.disabled = 1; 460 461 return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, 462 tsk); 463 } 464 465 static int ptrace_gethbpregs(struct task_struct *tsk, long num, 466 unsigned long __user *data) 467 { 468 u32 reg; 469 int idx, ret = 0; 470 struct perf_event *bp; 471 struct arch_hw_breakpoint_ctrl arch_ctrl; 472 473 if (num == 0) { 474 reg = ptrace_get_hbp_resource_info(); 475 } else { 476 idx = ptrace_hbp_num_to_idx(num); 477 if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) { 478 ret = -EINVAL; 479 goto out; 480 } 481 482 bp = tsk->thread.debug.hbp[idx]; 483 if (!bp) { 484 reg = 0; 485 goto put; 486 } 487 488 arch_ctrl = counter_arch_bp(bp)->ctrl; 489 490 /* 491 * Fix up the len because we may have adjusted it 492 * to compensate for an unaligned address. 493 */ 494 while (!(arch_ctrl.len & 0x1)) 495 arch_ctrl.len >>= 1; 496 497 if (num & 0x1) 498 reg = bp->attr.bp_addr; 499 else 500 reg = encode_ctrl_reg(arch_ctrl); 501 } 502 503 put: 504 if (put_user(reg, data)) 505 ret = -EFAULT; 506 507 out: 508 return ret; 509 } 510 511 static int ptrace_sethbpregs(struct task_struct *tsk, long num, 512 unsigned long __user *data) 513 { 514 int idx, gen_len, gen_type, implied_type, ret = 0; 515 u32 user_val; 516 struct perf_event *bp; 517 struct arch_hw_breakpoint_ctrl ctrl; 518 struct perf_event_attr attr; 519 520 if (num == 0) 521 goto out; 522 else if (num < 0) 523 implied_type = HW_BREAKPOINT_RW; 524 else 525 implied_type = HW_BREAKPOINT_X; 526 527 idx = ptrace_hbp_num_to_idx(num); 528 if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) { 529 ret = -EINVAL; 530 goto out; 531 } 532 533 if (get_user(user_val, data)) { 534 ret = -EFAULT; 535 goto out; 536 } 537 538 bp = tsk->thread.debug.hbp[idx]; 539 if (!bp) { 540 bp = ptrace_hbp_create(tsk, implied_type); 541 if (IS_ERR(bp)) { 542 ret = PTR_ERR(bp); 543 goto out; 544 } 545 tsk->thread.debug.hbp[idx] = bp; 546 } 547 548 attr = bp->attr; 549 550 if (num & 0x1) { 551 /* Address */ 552 attr.bp_addr = user_val; 553 } else { 554 /* Control */ 555 decode_ctrl_reg(user_val, &ctrl); 556 ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type); 557 if (ret) 558 goto out; 559 560 if ((gen_type & implied_type) != gen_type) { 561 ret = -EINVAL; 562 goto out; 563 } 564 565 attr.bp_len = gen_len; 566 attr.bp_type = gen_type; 567 attr.disabled = !ctrl.enabled; 568 } 569 570 ret = modify_user_hw_breakpoint(bp, &attr); 571 out: 572 return ret; 573 } 574 #endif 575 576 /* regset get/set implementations */ 577 578 static int gpr_get(struct task_struct *target, 579 const struct user_regset *regset, 580 unsigned int pos, unsigned int count, 581 void *kbuf, void __user *ubuf) 582 { 583 struct pt_regs *regs = task_pt_regs(target); 584 585 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 586 regs, 587 0, sizeof(*regs)); 588 } 589 590 static int gpr_set(struct task_struct *target, 591 const struct user_regset *regset, 592 unsigned int pos, unsigned int count, 593 const void *kbuf, const void __user *ubuf) 594 { 595 int ret; 596 struct pt_regs newregs; 597 598 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 599 &newregs, 600 0, sizeof(newregs)); 601 if (ret) 602 return ret; 603 604 if (!valid_user_regs(&newregs)) 605 return -EINVAL; 606 607 *task_pt_regs(target) = newregs; 608 return 0; 609 } 610 611 static int fpa_get(struct task_struct *target, 612 const struct user_regset *regset, 613 unsigned int pos, unsigned int count, 614 void *kbuf, void __user *ubuf) 615 { 616 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 617 &task_thread_info(target)->fpstate, 618 0, sizeof(struct user_fp)); 619 } 620 621 static int fpa_set(struct task_struct *target, 622 const struct user_regset *regset, 623 unsigned int pos, unsigned int count, 624 const void *kbuf, const void __user *ubuf) 625 { 626 struct thread_info *thread = task_thread_info(target); 627 628 thread->used_cp[1] = thread->used_cp[2] = 1; 629 630 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 631 &thread->fpstate, 632 0, sizeof(struct user_fp)); 633 } 634 635 #ifdef CONFIG_VFP 636 /* 637 * VFP register get/set implementations. 638 * 639 * With respect to the kernel, struct user_fp is divided into three chunks: 640 * 16 or 32 real VFP registers (d0-d15 or d0-31) 641 * These are transferred to/from the real registers in the task's 642 * vfp_hard_struct. The number of registers depends on the kernel 643 * configuration. 644 * 645 * 16 or 0 fake VFP registers (d16-d31 or empty) 646 * i.e., the user_vfp structure has space for 32 registers even if 647 * the kernel doesn't have them all. 648 * 649 * vfp_get() reads this chunk as zero where applicable 650 * vfp_set() ignores this chunk 651 * 652 * 1 word for the FPSCR 653 * 654 * The bounds-checking logic built into user_regset_copyout and friends 655 * means that we can make a simple sequence of calls to map the relevant data 656 * to/from the specified slice of the user regset structure. 657 */ 658 static int vfp_get(struct task_struct *target, 659 const struct user_regset *regset, 660 unsigned int pos, unsigned int count, 661 void *kbuf, void __user *ubuf) 662 { 663 int ret; 664 struct thread_info *thread = task_thread_info(target); 665 struct vfp_hard_struct const *vfp = &thread->vfpstate.hard; 666 const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs); 667 const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); 668 669 vfp_sync_hwstate(thread); 670 671 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 672 &vfp->fpregs, 673 user_fpregs_offset, 674 user_fpregs_offset + sizeof(vfp->fpregs)); 675 if (ret) 676 return ret; 677 678 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 679 user_fpregs_offset + sizeof(vfp->fpregs), 680 user_fpscr_offset); 681 if (ret) 682 return ret; 683 684 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 685 &vfp->fpscr, 686 user_fpscr_offset, 687 user_fpscr_offset + sizeof(vfp->fpscr)); 688 } 689 690 /* 691 * For vfp_set() a read-modify-write is done on the VFP registers, 692 * in order to avoid writing back a half-modified set of registers on 693 * failure. 694 */ 695 static int vfp_set(struct task_struct *target, 696 const struct user_regset *regset, 697 unsigned int pos, unsigned int count, 698 const void *kbuf, const void __user *ubuf) 699 { 700 int ret; 701 struct thread_info *thread = task_thread_info(target); 702 struct vfp_hard_struct new_vfp = thread->vfpstate.hard; 703 const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs); 704 const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); 705 706 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 707 &new_vfp.fpregs, 708 user_fpregs_offset, 709 user_fpregs_offset + sizeof(new_vfp.fpregs)); 710 if (ret) 711 return ret; 712 713 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 714 user_fpregs_offset + sizeof(new_vfp.fpregs), 715 user_fpscr_offset); 716 if (ret) 717 return ret; 718 719 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 720 &new_vfp.fpscr, 721 user_fpscr_offset, 722 user_fpscr_offset + sizeof(new_vfp.fpscr)); 723 if (ret) 724 return ret; 725 726 vfp_sync_hwstate(thread); 727 thread->vfpstate.hard = new_vfp; 728 vfp_flush_hwstate(thread); 729 730 return 0; 731 } 732 #endif /* CONFIG_VFP */ 733 734 enum arm_regset { 735 REGSET_GPR, 736 REGSET_FPR, 737 #ifdef CONFIG_VFP 738 REGSET_VFP, 739 #endif 740 }; 741 742 static const struct user_regset arm_regsets[] = { 743 [REGSET_GPR] = { 744 .core_note_type = NT_PRSTATUS, 745 .n = ELF_NGREG, 746 .size = sizeof(u32), 747 .align = sizeof(u32), 748 .get = gpr_get, 749 .set = gpr_set 750 }, 751 [REGSET_FPR] = { 752 /* 753 * For the FPA regs in fpstate, the real fields are a mixture 754 * of sizes, so pretend that the registers are word-sized: 755 */ 756 .core_note_type = NT_PRFPREG, 757 .n = sizeof(struct user_fp) / sizeof(u32), 758 .size = sizeof(u32), 759 .align = sizeof(u32), 760 .get = fpa_get, 761 .set = fpa_set 762 }, 763 #ifdef CONFIG_VFP 764 [REGSET_VFP] = { 765 /* 766 * Pretend that the VFP regs are word-sized, since the FPSCR is 767 * a single word dangling at the end of struct user_vfp: 768 */ 769 .core_note_type = NT_ARM_VFP, 770 .n = ARM_VFPREGS_SIZE / sizeof(u32), 771 .size = sizeof(u32), 772 .align = sizeof(u32), 773 .get = vfp_get, 774 .set = vfp_set 775 }, 776 #endif /* CONFIG_VFP */ 777 }; 778 779 static const struct user_regset_view user_arm_view = { 780 .name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI, 781 .regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets) 782 }; 783 784 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 785 { 786 return &user_arm_view; 787 } 788 789 long arch_ptrace(struct task_struct *child, long request, 790 unsigned long addr, unsigned long data) 791 { 792 int ret; 793 unsigned long __user *datap = (unsigned long __user *) data; 794 795 switch (request) { 796 case PTRACE_PEEKUSR: 797 ret = ptrace_read_user(child, addr, datap); 798 break; 799 800 case PTRACE_POKEUSR: 801 ret = ptrace_write_user(child, addr, data); 802 break; 803 804 case PTRACE_GETREGS: 805 ret = copy_regset_to_user(child, 806 &user_arm_view, REGSET_GPR, 807 0, sizeof(struct pt_regs), 808 datap); 809 break; 810 811 case PTRACE_SETREGS: 812 ret = copy_regset_from_user(child, 813 &user_arm_view, REGSET_GPR, 814 0, sizeof(struct pt_regs), 815 datap); 816 break; 817 818 case PTRACE_GETFPREGS: 819 ret = copy_regset_to_user(child, 820 &user_arm_view, REGSET_FPR, 821 0, sizeof(union fp_state), 822 datap); 823 break; 824 825 case PTRACE_SETFPREGS: 826 ret = copy_regset_from_user(child, 827 &user_arm_view, REGSET_FPR, 828 0, sizeof(union fp_state), 829 datap); 830 break; 831 832 #ifdef CONFIG_IWMMXT 833 case PTRACE_GETWMMXREGS: 834 ret = ptrace_getwmmxregs(child, datap); 835 break; 836 837 case PTRACE_SETWMMXREGS: 838 ret = ptrace_setwmmxregs(child, datap); 839 break; 840 #endif 841 842 case PTRACE_GET_THREAD_AREA: 843 ret = put_user(task_thread_info(child)->tp_value, 844 datap); 845 break; 846 847 case PTRACE_SET_SYSCALL: 848 task_thread_info(child)->syscall = data; 849 ret = 0; 850 break; 851 852 #ifdef CONFIG_CRUNCH 853 case PTRACE_GETCRUNCHREGS: 854 ret = ptrace_getcrunchregs(child, datap); 855 break; 856 857 case PTRACE_SETCRUNCHREGS: 858 ret = ptrace_setcrunchregs(child, datap); 859 break; 860 #endif 861 862 #ifdef CONFIG_VFP 863 case PTRACE_GETVFPREGS: 864 ret = copy_regset_to_user(child, 865 &user_arm_view, REGSET_VFP, 866 0, ARM_VFPREGS_SIZE, 867 datap); 868 break; 869 870 case PTRACE_SETVFPREGS: 871 ret = copy_regset_from_user(child, 872 &user_arm_view, REGSET_VFP, 873 0, ARM_VFPREGS_SIZE, 874 datap); 875 break; 876 #endif 877 878 #ifdef CONFIG_HAVE_HW_BREAKPOINT 879 case PTRACE_GETHBPREGS: 880 if (ptrace_get_breakpoints(child) < 0) 881 return -ESRCH; 882 883 ret = ptrace_gethbpregs(child, addr, 884 (unsigned long __user *)data); 885 ptrace_put_breakpoints(child); 886 break; 887 case PTRACE_SETHBPREGS: 888 if (ptrace_get_breakpoints(child) < 0) 889 return -ESRCH; 890 891 ret = ptrace_sethbpregs(child, addr, 892 (unsigned long __user *)data); 893 ptrace_put_breakpoints(child); 894 break; 895 #endif 896 897 default: 898 ret = ptrace_request(child, request, addr, data); 899 break; 900 } 901 902 return ret; 903 } 904 905 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) 906 { 907 unsigned long ip; 908 909 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 910 return scno; 911 if (!(current->ptrace & PT_PTRACED)) 912 return scno; 913 914 /* 915 * Save IP. IP is used to denote syscall entry/exit: 916 * IP = 0 -> entry, = 1 -> exit 917 */ 918 ip = regs->ARM_ip; 919 regs->ARM_ip = why; 920 921 current_thread_info()->syscall = scno; 922 923 /* the 0x80 provides a way for the tracing parent to distinguish 924 between a syscall stop and SIGTRAP delivery */ 925 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) 926 ? 0x80 : 0)); 927 /* 928 * this isn't the same as continuing with a signal, but it will do 929 * for normal use. strace only continues with a signal if the 930 * stopping signal is not SIGTRAP. -brl 931 */ 932 if (current->exit_code) { 933 send_sig(current->exit_code, current, 1); 934 current->exit_code = 0; 935 } 936 regs->ARM_ip = ip; 937 938 return current_thread_info()->syscall; 939 } 940