1 /* 2 * linux/kernel/ptrace.c 3 * 4 * (C) Copyright 1999 Linus Torvalds 5 * 6 * Common interfaces for "ptrace()" which we do not want 7 * to continually duplicate across every architecture. 8 */ 9 10 #include <linux/capability.h> 11 #include <linux/module.h> 12 #include <linux/sched.h> 13 #include <linux/errno.h> 14 #include <linux/mm.h> 15 #include <linux/highmem.h> 16 #include <linux/pagemap.h> 17 #include <linux/ptrace.h> 18 #include <linux/security.h> 19 #include <linux/signal.h> 20 #include <linux/audit.h> 21 #include <linux/pid_namespace.h> 22 #include <linux/syscalls.h> 23 #include <linux/uaccess.h> 24 #include <linux/regset.h> 25 26 27 /* 28 * ptrace a task: make the debugger its new parent and 29 * move it to the ptrace list. 30 * 31 * Must be called with the tasklist lock write-held. 32 */ 33 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) 34 { 35 BUG_ON(!list_empty(&child->ptrace_entry)); 36 list_add(&child->ptrace_entry, &new_parent->ptraced); 37 child->parent = new_parent; 38 } 39 40 /* 41 * Turn a tracing stop into a normal stop now, since with no tracer there 42 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a 43 * signal sent that would resume the child, but didn't because it was in 44 * TASK_TRACED, resume it now. 45 * Requires that irqs be disabled. 46 */ 47 static void ptrace_untrace(struct task_struct *child) 48 { 49 spin_lock(&child->sighand->siglock); 50 if (task_is_traced(child)) { 51 /* 52 * If the group stop is completed or in progress, 53 * this thread was already counted as stopped. 54 */ 55 if (child->signal->flags & SIGNAL_STOP_STOPPED || 56 child->signal->group_stop_count) 57 __set_task_state(child, TASK_STOPPED); 58 else 59 signal_wake_up(child, 1); 60 } 61 spin_unlock(&child->sighand->siglock); 62 } 63 64 /* 65 * unptrace a task: move it back to its original parent and 66 * remove it from the ptrace list. 67 * 68 * Must be called with the tasklist lock write-held. 69 */ 70 void __ptrace_unlink(struct task_struct *child) 71 { 72 BUG_ON(!child->ptrace); 73 74 child->ptrace = 0; 75 child->parent = child->real_parent; 76 list_del_init(&child->ptrace_entry); 77 78 if (task_is_traced(child)) 79 ptrace_untrace(child); 80 } 81 82 /* 83 * Check that we have indeed attached to the thing.. 84 */ 85 int ptrace_check_attach(struct task_struct *child, int kill) 86 { 87 int ret = -ESRCH; 88 89 /* 90 * We take the read lock around doing both checks to close a 91 * possible race where someone else was tracing our child and 92 * detached between these two checks. After this locked check, 93 * we are sure that this is our traced child and that can only 94 * be changed by us so it's not changing right after this. 95 */ 96 read_lock(&tasklist_lock); 97 if ((child->ptrace & PT_PTRACED) && child->parent == current) { 98 ret = 0; 99 /* 100 * child->sighand can't be NULL, release_task() 101 * does ptrace_unlink() before __exit_signal(). 102 */ 103 spin_lock_irq(&child->sighand->siglock); 104 if (task_is_stopped(child)) 105 child->state = TASK_TRACED; 106 else if (!task_is_traced(child) && !kill) 107 ret = -ESRCH; 108 spin_unlock_irq(&child->sighand->siglock); 109 } 110 read_unlock(&tasklist_lock); 111 112 if (!ret && !kill) 113 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; 114 115 /* All systems go.. */ 116 return ret; 117 } 118 119 int __ptrace_may_access(struct task_struct *task, unsigned int mode) 120 { 121 const struct cred *cred = current_cred(), *tcred; 122 123 /* May we inspect the given task? 124 * This check is used both for attaching with ptrace 125 * and for allowing access to sensitive information in /proc. 126 * 127 * ptrace_attach denies several cases that /proc allows 128 * because setting up the necessary parent/child relationship 129 * or halting the specified task is impossible. 130 */ 131 int dumpable = 0; 132 /* Don't let security modules deny introspection */ 133 if (task == current) 134 return 0; 135 rcu_read_lock(); 136 tcred = __task_cred(task); 137 if (cred->user->user_ns == tcred->user->user_ns && 138 (cred->uid == tcred->euid && 139 cred->uid == tcred->suid && 140 cred->uid == tcred->uid && 141 cred->gid == tcred->egid && 142 cred->gid == tcred->sgid && 143 cred->gid == tcred->gid)) 144 goto ok; 145 if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)) 146 goto ok; 147 rcu_read_unlock(); 148 return -EPERM; 149 ok: 150 rcu_read_unlock(); 151 smp_rmb(); 152 if (task->mm) 153 dumpable = get_dumpable(task->mm); 154 if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE)) 155 return -EPERM; 156 157 return security_ptrace_access_check(task, mode); 158 } 159 160 bool ptrace_may_access(struct task_struct *task, unsigned int mode) 161 { 162 int err; 163 task_lock(task); 164 err = __ptrace_may_access(task, mode); 165 task_unlock(task); 166 return !err; 167 } 168 169 static int ptrace_attach(struct task_struct *task) 170 { 171 int retval; 172 173 audit_ptrace(task); 174 175 retval = -EPERM; 176 if (unlikely(task->flags & PF_KTHREAD)) 177 goto out; 178 if (same_thread_group(task, current)) 179 goto out; 180 181 /* 182 * Protect exec's credential calculations against our interference; 183 * interference; SUID, SGID and LSM creds get determined differently 184 * under ptrace. 185 */ 186 retval = -ERESTARTNOINTR; 187 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) 188 goto out; 189 190 task_lock(task); 191 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); 192 task_unlock(task); 193 if (retval) 194 goto unlock_creds; 195 196 write_lock_irq(&tasklist_lock); 197 retval = -EPERM; 198 if (unlikely(task->exit_state)) 199 goto unlock_tasklist; 200 if (task->ptrace) 201 goto unlock_tasklist; 202 203 task->ptrace = PT_PTRACED; 204 if (task_ns_capable(task, CAP_SYS_PTRACE)) 205 task->ptrace |= PT_PTRACE_CAP; 206 207 __ptrace_link(task, current); 208 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); 209 210 retval = 0; 211 unlock_tasklist: 212 write_unlock_irq(&tasklist_lock); 213 unlock_creds: 214 mutex_unlock(&task->signal->cred_guard_mutex); 215 out: 216 return retval; 217 } 218 219 /** 220 * ptrace_traceme -- helper for PTRACE_TRACEME 221 * 222 * Performs checks and sets PT_PTRACED. 223 * Should be used by all ptrace implementations for PTRACE_TRACEME. 224 */ 225 static int ptrace_traceme(void) 226 { 227 int ret = -EPERM; 228 229 write_lock_irq(&tasklist_lock); 230 /* Are we already being traced? */ 231 if (!current->ptrace) { 232 ret = security_ptrace_traceme(current->parent); 233 /* 234 * Check PF_EXITING to ensure ->real_parent has not passed 235 * exit_ptrace(). Otherwise we don't report the error but 236 * pretend ->real_parent untraces us right after return. 237 */ 238 if (!ret && !(current->real_parent->flags & PF_EXITING)) { 239 current->ptrace = PT_PTRACED; 240 __ptrace_link(current, current->real_parent); 241 } 242 } 243 write_unlock_irq(&tasklist_lock); 244 245 return ret; 246 } 247 248 /* 249 * Called with irqs disabled, returns true if childs should reap themselves. 250 */ 251 static int ignoring_children(struct sighand_struct *sigh) 252 { 253 int ret; 254 spin_lock(&sigh->siglock); 255 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || 256 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); 257 spin_unlock(&sigh->siglock); 258 return ret; 259 } 260 261 /* 262 * Called with tasklist_lock held for writing. 263 * Unlink a traced task, and clean it up if it was a traced zombie. 264 * Return true if it needs to be reaped with release_task(). 265 * (We can't call release_task() here because we already hold tasklist_lock.) 266 * 267 * If it's a zombie, our attachedness prevented normal parent notification 268 * or self-reaping. Do notification now if it would have happened earlier. 269 * If it should reap itself, return true. 270 * 271 * If it's our own child, there is no notification to do. But if our normal 272 * children self-reap, then this child was prevented by ptrace and we must 273 * reap it now, in that case we must also wake up sub-threads sleeping in 274 * do_wait(). 275 */ 276 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) 277 { 278 __ptrace_unlink(p); 279 280 if (p->exit_state == EXIT_ZOMBIE) { 281 if (!task_detached(p) && thread_group_empty(p)) { 282 if (!same_thread_group(p->real_parent, tracer)) 283 do_notify_parent(p, p->exit_signal); 284 else if (ignoring_children(tracer->sighand)) { 285 __wake_up_parent(p, tracer); 286 p->exit_signal = -1; 287 } 288 } 289 if (task_detached(p)) { 290 /* Mark it as in the process of being reaped. */ 291 p->exit_state = EXIT_DEAD; 292 return true; 293 } 294 } 295 296 return false; 297 } 298 299 static int ptrace_detach(struct task_struct *child, unsigned int data) 300 { 301 bool dead = false; 302 303 if (!valid_signal(data)) 304 return -EIO; 305 306 /* Architecture-specific hardware disable .. */ 307 ptrace_disable(child); 308 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 309 310 write_lock_irq(&tasklist_lock); 311 /* 312 * This child can be already killed. Make sure de_thread() or 313 * our sub-thread doing do_wait() didn't do release_task() yet. 314 */ 315 if (child->ptrace) { 316 child->exit_code = data; 317 dead = __ptrace_detach(current, child); 318 if (!child->exit_state) 319 wake_up_state(child, TASK_TRACED | TASK_STOPPED); 320 } 321 write_unlock_irq(&tasklist_lock); 322 323 if (unlikely(dead)) 324 release_task(child); 325 326 return 0; 327 } 328 329 /* 330 * Detach all tasks we were using ptrace on. Called with tasklist held 331 * for writing, and returns with it held too. But note it can release 332 * and reacquire the lock. 333 */ 334 void exit_ptrace(struct task_struct *tracer) 335 __releases(&tasklist_lock) 336 __acquires(&tasklist_lock) 337 { 338 struct task_struct *p, *n; 339 LIST_HEAD(ptrace_dead); 340 341 if (likely(list_empty(&tracer->ptraced))) 342 return; 343 344 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { 345 if (__ptrace_detach(tracer, p)) 346 list_add(&p->ptrace_entry, &ptrace_dead); 347 } 348 349 write_unlock_irq(&tasklist_lock); 350 BUG_ON(!list_empty(&tracer->ptraced)); 351 352 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { 353 list_del_init(&p->ptrace_entry); 354 release_task(p); 355 } 356 357 write_lock_irq(&tasklist_lock); 358 } 359 360 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 361 { 362 int copied = 0; 363 364 while (len > 0) { 365 char buf[128]; 366 int this_len, retval; 367 368 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 369 retval = access_process_vm(tsk, src, buf, this_len, 0); 370 if (!retval) { 371 if (copied) 372 break; 373 return -EIO; 374 } 375 if (copy_to_user(dst, buf, retval)) 376 return -EFAULT; 377 copied += retval; 378 src += retval; 379 dst += retval; 380 len -= retval; 381 } 382 return copied; 383 } 384 385 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) 386 { 387 int copied = 0; 388 389 while (len > 0) { 390 char buf[128]; 391 int this_len, retval; 392 393 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 394 if (copy_from_user(buf, src, this_len)) 395 return -EFAULT; 396 retval = access_process_vm(tsk, dst, buf, this_len, 1); 397 if (!retval) { 398 if (copied) 399 break; 400 return -EIO; 401 } 402 copied += retval; 403 src += retval; 404 dst += retval; 405 len -= retval; 406 } 407 return copied; 408 } 409 410 static int ptrace_setoptions(struct task_struct *child, unsigned long data) 411 { 412 child->ptrace &= ~PT_TRACE_MASK; 413 414 if (data & PTRACE_O_TRACESYSGOOD) 415 child->ptrace |= PT_TRACESYSGOOD; 416 417 if (data & PTRACE_O_TRACEFORK) 418 child->ptrace |= PT_TRACE_FORK; 419 420 if (data & PTRACE_O_TRACEVFORK) 421 child->ptrace |= PT_TRACE_VFORK; 422 423 if (data & PTRACE_O_TRACECLONE) 424 child->ptrace |= PT_TRACE_CLONE; 425 426 if (data & PTRACE_O_TRACEEXEC) 427 child->ptrace |= PT_TRACE_EXEC; 428 429 if (data & PTRACE_O_TRACEVFORKDONE) 430 child->ptrace |= PT_TRACE_VFORK_DONE; 431 432 if (data & PTRACE_O_TRACEEXIT) 433 child->ptrace |= PT_TRACE_EXIT; 434 435 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0; 436 } 437 438 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) 439 { 440 unsigned long flags; 441 int error = -ESRCH; 442 443 if (lock_task_sighand(child, &flags)) { 444 error = -EINVAL; 445 if (likely(child->last_siginfo != NULL)) { 446 *info = *child->last_siginfo; 447 error = 0; 448 } 449 unlock_task_sighand(child, &flags); 450 } 451 return error; 452 } 453 454 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) 455 { 456 unsigned long flags; 457 int error = -ESRCH; 458 459 if (lock_task_sighand(child, &flags)) { 460 error = -EINVAL; 461 if (likely(child->last_siginfo != NULL)) { 462 *child->last_siginfo = *info; 463 error = 0; 464 } 465 unlock_task_sighand(child, &flags); 466 } 467 return error; 468 } 469 470 471 #ifdef PTRACE_SINGLESTEP 472 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) 473 #else 474 #define is_singlestep(request) 0 475 #endif 476 477 #ifdef PTRACE_SINGLEBLOCK 478 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) 479 #else 480 #define is_singleblock(request) 0 481 #endif 482 483 #ifdef PTRACE_SYSEMU 484 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) 485 #else 486 #define is_sysemu_singlestep(request) 0 487 #endif 488 489 static int ptrace_resume(struct task_struct *child, long request, 490 unsigned long data) 491 { 492 if (!valid_signal(data)) 493 return -EIO; 494 495 if (request == PTRACE_SYSCALL) 496 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 497 else 498 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 499 500 #ifdef TIF_SYSCALL_EMU 501 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) 502 set_tsk_thread_flag(child, TIF_SYSCALL_EMU); 503 else 504 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 505 #endif 506 507 if (is_singleblock(request)) { 508 if (unlikely(!arch_has_block_step())) 509 return -EIO; 510 user_enable_block_step(child); 511 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { 512 if (unlikely(!arch_has_single_step())) 513 return -EIO; 514 user_enable_single_step(child); 515 } else { 516 user_disable_single_step(child); 517 } 518 519 child->exit_code = data; 520 wake_up_process(child); 521 522 return 0; 523 } 524 525 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 526 527 static const struct user_regset * 528 find_regset(const struct user_regset_view *view, unsigned int type) 529 { 530 const struct user_regset *regset; 531 int n; 532 533 for (n = 0; n < view->n; ++n) { 534 regset = view->regsets + n; 535 if (regset->core_note_type == type) 536 return regset; 537 } 538 539 return NULL; 540 } 541 542 static int ptrace_regset(struct task_struct *task, int req, unsigned int type, 543 struct iovec *kiov) 544 { 545 const struct user_regset_view *view = task_user_regset_view(task); 546 const struct user_regset *regset = find_regset(view, type); 547 int regset_no; 548 549 if (!regset || (kiov->iov_len % regset->size) != 0) 550 return -EINVAL; 551 552 regset_no = regset - view->regsets; 553 kiov->iov_len = min(kiov->iov_len, 554 (__kernel_size_t) (regset->n * regset->size)); 555 556 if (req == PTRACE_GETREGSET) 557 return copy_regset_to_user(task, view, regset_no, 0, 558 kiov->iov_len, kiov->iov_base); 559 else 560 return copy_regset_from_user(task, view, regset_no, 0, 561 kiov->iov_len, kiov->iov_base); 562 } 563 564 #endif 565 566 int ptrace_request(struct task_struct *child, long request, 567 unsigned long addr, unsigned long data) 568 { 569 int ret = -EIO; 570 siginfo_t siginfo; 571 void __user *datavp = (void __user *) data; 572 unsigned long __user *datalp = datavp; 573 574 switch (request) { 575 case PTRACE_PEEKTEXT: 576 case PTRACE_PEEKDATA: 577 return generic_ptrace_peekdata(child, addr, data); 578 case PTRACE_POKETEXT: 579 case PTRACE_POKEDATA: 580 return generic_ptrace_pokedata(child, addr, data); 581 582 #ifdef PTRACE_OLDSETOPTIONS 583 case PTRACE_OLDSETOPTIONS: 584 #endif 585 case PTRACE_SETOPTIONS: 586 ret = ptrace_setoptions(child, data); 587 break; 588 case PTRACE_GETEVENTMSG: 589 ret = put_user(child->ptrace_message, datalp); 590 break; 591 592 case PTRACE_GETSIGINFO: 593 ret = ptrace_getsiginfo(child, &siginfo); 594 if (!ret) 595 ret = copy_siginfo_to_user(datavp, &siginfo); 596 break; 597 598 case PTRACE_SETSIGINFO: 599 if (copy_from_user(&siginfo, datavp, sizeof siginfo)) 600 ret = -EFAULT; 601 else 602 ret = ptrace_setsiginfo(child, &siginfo); 603 break; 604 605 case PTRACE_DETACH: /* detach a process that was attached. */ 606 ret = ptrace_detach(child, data); 607 break; 608 609 #ifdef CONFIG_BINFMT_ELF_FDPIC 610 case PTRACE_GETFDPIC: { 611 struct mm_struct *mm = get_task_mm(child); 612 unsigned long tmp = 0; 613 614 ret = -ESRCH; 615 if (!mm) 616 break; 617 618 switch (addr) { 619 case PTRACE_GETFDPIC_EXEC: 620 tmp = mm->context.exec_fdpic_loadmap; 621 break; 622 case PTRACE_GETFDPIC_INTERP: 623 tmp = mm->context.interp_fdpic_loadmap; 624 break; 625 default: 626 break; 627 } 628 mmput(mm); 629 630 ret = put_user(tmp, datalp); 631 break; 632 } 633 #endif 634 635 #ifdef PTRACE_SINGLESTEP 636 case PTRACE_SINGLESTEP: 637 #endif 638 #ifdef PTRACE_SINGLEBLOCK 639 case PTRACE_SINGLEBLOCK: 640 #endif 641 #ifdef PTRACE_SYSEMU 642 case PTRACE_SYSEMU: 643 case PTRACE_SYSEMU_SINGLESTEP: 644 #endif 645 case PTRACE_SYSCALL: 646 case PTRACE_CONT: 647 return ptrace_resume(child, request, data); 648 649 case PTRACE_KILL: 650 if (child->exit_state) /* already dead */ 651 return 0; 652 return ptrace_resume(child, request, SIGKILL); 653 654 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 655 case PTRACE_GETREGSET: 656 case PTRACE_SETREGSET: 657 { 658 struct iovec kiov; 659 struct iovec __user *uiov = datavp; 660 661 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 662 return -EFAULT; 663 664 if (__get_user(kiov.iov_base, &uiov->iov_base) || 665 __get_user(kiov.iov_len, &uiov->iov_len)) 666 return -EFAULT; 667 668 ret = ptrace_regset(child, request, addr, &kiov); 669 if (!ret) 670 ret = __put_user(kiov.iov_len, &uiov->iov_len); 671 break; 672 } 673 #endif 674 default: 675 break; 676 } 677 678 return ret; 679 } 680 681 static struct task_struct *ptrace_get_task_struct(pid_t pid) 682 { 683 struct task_struct *child; 684 685 rcu_read_lock(); 686 child = find_task_by_vpid(pid); 687 if (child) 688 get_task_struct(child); 689 rcu_read_unlock(); 690 691 if (!child) 692 return ERR_PTR(-ESRCH); 693 return child; 694 } 695 696 #ifndef arch_ptrace_attach 697 #define arch_ptrace_attach(child) do { } while (0) 698 #endif 699 700 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, 701 unsigned long, data) 702 { 703 struct task_struct *child; 704 long ret; 705 706 if (request == PTRACE_TRACEME) { 707 ret = ptrace_traceme(); 708 if (!ret) 709 arch_ptrace_attach(current); 710 goto out; 711 } 712 713 child = ptrace_get_task_struct(pid); 714 if (IS_ERR(child)) { 715 ret = PTR_ERR(child); 716 goto out; 717 } 718 719 if (request == PTRACE_ATTACH) { 720 ret = ptrace_attach(child); 721 /* 722 * Some architectures need to do book-keeping after 723 * a ptrace attach. 724 */ 725 if (!ret) 726 arch_ptrace_attach(child); 727 goto out_put_task_struct; 728 } 729 730 ret = ptrace_check_attach(child, request == PTRACE_KILL); 731 if (ret < 0) 732 goto out_put_task_struct; 733 734 ret = arch_ptrace(child, request, addr, data); 735 736 out_put_task_struct: 737 put_task_struct(child); 738 out: 739 return ret; 740 } 741 742 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, 743 unsigned long data) 744 { 745 unsigned long tmp; 746 int copied; 747 748 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); 749 if (copied != sizeof(tmp)) 750 return -EIO; 751 return put_user(tmp, (unsigned long __user *)data); 752 } 753 754 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, 755 unsigned long data) 756 { 757 int copied; 758 759 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); 760 return (copied == sizeof(data)) ? 0 : -EIO; 761 } 762 763 #if defined CONFIG_COMPAT 764 #include <linux/compat.h> 765 766 int compat_ptrace_request(struct task_struct *child, compat_long_t request, 767 compat_ulong_t addr, compat_ulong_t data) 768 { 769 compat_ulong_t __user *datap = compat_ptr(data); 770 compat_ulong_t word; 771 siginfo_t siginfo; 772 int ret; 773 774 switch (request) { 775 case PTRACE_PEEKTEXT: 776 case PTRACE_PEEKDATA: 777 ret = access_process_vm(child, addr, &word, sizeof(word), 0); 778 if (ret != sizeof(word)) 779 ret = -EIO; 780 else 781 ret = put_user(word, datap); 782 break; 783 784 case PTRACE_POKETEXT: 785 case PTRACE_POKEDATA: 786 ret = access_process_vm(child, addr, &data, sizeof(data), 1); 787 ret = (ret != sizeof(data) ? -EIO : 0); 788 break; 789 790 case PTRACE_GETEVENTMSG: 791 ret = put_user((compat_ulong_t) child->ptrace_message, datap); 792 break; 793 794 case PTRACE_GETSIGINFO: 795 ret = ptrace_getsiginfo(child, &siginfo); 796 if (!ret) 797 ret = copy_siginfo_to_user32( 798 (struct compat_siginfo __user *) datap, 799 &siginfo); 800 break; 801 802 case PTRACE_SETSIGINFO: 803 memset(&siginfo, 0, sizeof siginfo); 804 if (copy_siginfo_from_user32( 805 &siginfo, (struct compat_siginfo __user *) datap)) 806 ret = -EFAULT; 807 else 808 ret = ptrace_setsiginfo(child, &siginfo); 809 break; 810 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 811 case PTRACE_GETREGSET: 812 case PTRACE_SETREGSET: 813 { 814 struct iovec kiov; 815 struct compat_iovec __user *uiov = 816 (struct compat_iovec __user *) datap; 817 compat_uptr_t ptr; 818 compat_size_t len; 819 820 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 821 return -EFAULT; 822 823 if (__get_user(ptr, &uiov->iov_base) || 824 __get_user(len, &uiov->iov_len)) 825 return -EFAULT; 826 827 kiov.iov_base = compat_ptr(ptr); 828 kiov.iov_len = len; 829 830 ret = ptrace_regset(child, request, addr, &kiov); 831 if (!ret) 832 ret = __put_user(kiov.iov_len, &uiov->iov_len); 833 break; 834 } 835 #endif 836 837 default: 838 ret = ptrace_request(child, request, addr, data); 839 } 840 841 return ret; 842 } 843 844 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, 845 compat_long_t addr, compat_long_t data) 846 { 847 struct task_struct *child; 848 long ret; 849 850 if (request == PTRACE_TRACEME) { 851 ret = ptrace_traceme(); 852 goto out; 853 } 854 855 child = ptrace_get_task_struct(pid); 856 if (IS_ERR(child)) { 857 ret = PTR_ERR(child); 858 goto out; 859 } 860 861 if (request == PTRACE_ATTACH) { 862 ret = ptrace_attach(child); 863 /* 864 * Some architectures need to do book-keeping after 865 * a ptrace attach. 866 */ 867 if (!ret) 868 arch_ptrace_attach(child); 869 goto out_put_task_struct; 870 } 871 872 ret = ptrace_check_attach(child, request == PTRACE_KILL); 873 if (!ret) 874 ret = compat_arch_ptrace(child, request, addr, data); 875 876 out_put_task_struct: 877 put_task_struct(child); 878 out: 879 return ret; 880 } 881 #endif /* CONFIG_COMPAT */ 882