1 /* 2 * linux/kernel/ptrace.c 3 * 4 * (C) Copyright 1999 Linus Torvalds 5 * 6 * Common interfaces for "ptrace()" which we do not want 7 * to continually duplicate across every architecture. 8 */ 9 10 #include <linux/capability.h> 11 #include <linux/module.h> 12 #include <linux/sched.h> 13 #include <linux/errno.h> 14 #include <linux/mm.h> 15 #include <linux/highmem.h> 16 #include <linux/pagemap.h> 17 #include <linux/ptrace.h> 18 #include <linux/security.h> 19 #include <linux/signal.h> 20 #include <linux/audit.h> 21 #include <linux/pid_namespace.h> 22 #include <linux/syscalls.h> 23 #include <linux/uaccess.h> 24 #include <linux/regset.h> 25 #include <linux/hw_breakpoint.h> 26 27 28 /* 29 * ptrace a task: make the debugger its new parent and 30 * move it to the ptrace list. 31 * 32 * Must be called with the tasklist lock write-held. 33 */ 34 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) 35 { 36 BUG_ON(!list_empty(&child->ptrace_entry)); 37 list_add(&child->ptrace_entry, &new_parent->ptraced); 38 child->parent = new_parent; 39 } 40 41 /** 42 * __ptrace_unlink - unlink ptracee and restore its execution state 43 * @child: ptracee to be unlinked 44 * 45 * Remove @child from the ptrace list, move it back to the original parent, 46 * and restore the execution state so that it conforms to the group stop 47 * state. 48 * 49 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer 50 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between 51 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. 52 * If the ptracer is exiting, the ptracee can be in any state. 53 * 54 * After detach, the ptracee should be in a state which conforms to the 55 * group stop. If the group is stopped or in the process of stopping, the 56 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken 57 * up from TASK_TRACED. 58 * 59 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, 60 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar 61 * to but in the opposite direction of what happens while attaching to a 62 * stopped task. However, in this direction, the intermediate RUNNING 63 * state is not hidden even from the current ptracer and if it immediately 64 * re-attaches and performs a WNOHANG wait(2), it may fail. 65 * 66 * CONTEXT: 67 * write_lock_irq(tasklist_lock) 68 */ 69 void __ptrace_unlink(struct task_struct *child) 70 { 71 BUG_ON(!child->ptrace); 72 73 child->ptrace = 0; 74 child->parent = child->real_parent; 75 list_del_init(&child->ptrace_entry); 76 77 spin_lock(&child->sighand->siglock); 78 79 /* 80 * Reinstate GROUP_STOP_PENDING if group stop is in effect and 81 * @child isn't dead. 82 */ 83 if (!(child->flags & PF_EXITING) && 84 (child->signal->flags & SIGNAL_STOP_STOPPED || 85 child->signal->group_stop_count)) 86 child->group_stop |= GROUP_STOP_PENDING; 87 88 /* 89 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick 90 * @child in the butt. Note that @resume should be used iff @child 91 * is in TASK_TRACED; otherwise, we might unduly disrupt 92 * TASK_KILLABLE sleeps. 93 */ 94 if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child)) 95 signal_wake_up(child, task_is_traced(child)); 96 97 spin_unlock(&child->sighand->siglock); 98 } 99 100 /* 101 * Check that we have indeed attached to the thing.. 102 */ 103 int ptrace_check_attach(struct task_struct *child, int kill) 104 { 105 int ret = -ESRCH; 106 107 /* 108 * We take the read lock around doing both checks to close a 109 * possible race where someone else was tracing our child and 110 * detached between these two checks. After this locked check, 111 * we are sure that this is our traced child and that can only 112 * be changed by us so it's not changing right after this. 113 */ 114 read_lock(&tasklist_lock); 115 if ((child->ptrace & PT_PTRACED) && child->parent == current) { 116 /* 117 * child->sighand can't be NULL, release_task() 118 * does ptrace_unlink() before __exit_signal(). 119 */ 120 spin_lock_irq(&child->sighand->siglock); 121 WARN_ON_ONCE(task_is_stopped(child)); 122 if (task_is_traced(child) || kill) 123 ret = 0; 124 spin_unlock_irq(&child->sighand->siglock); 125 } 126 read_unlock(&tasklist_lock); 127 128 if (!ret && !kill) 129 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; 130 131 /* All systems go.. */ 132 return ret; 133 } 134 135 int __ptrace_may_access(struct task_struct *task, unsigned int mode) 136 { 137 const struct cred *cred = current_cred(), *tcred; 138 139 /* May we inspect the given task? 140 * This check is used both for attaching with ptrace 141 * and for allowing access to sensitive information in /proc. 142 * 143 * ptrace_attach denies several cases that /proc allows 144 * because setting up the necessary parent/child relationship 145 * or halting the specified task is impossible. 146 */ 147 int dumpable = 0; 148 /* Don't let security modules deny introspection */ 149 if (task == current) 150 return 0; 151 rcu_read_lock(); 152 tcred = __task_cred(task); 153 if (cred->user->user_ns == tcred->user->user_ns && 154 (cred->uid == tcred->euid && 155 cred->uid == tcred->suid && 156 cred->uid == tcred->uid && 157 cred->gid == tcred->egid && 158 cred->gid == tcred->sgid && 159 cred->gid == tcred->gid)) 160 goto ok; 161 if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)) 162 goto ok; 163 rcu_read_unlock(); 164 return -EPERM; 165 ok: 166 rcu_read_unlock(); 167 smp_rmb(); 168 if (task->mm) 169 dumpable = get_dumpable(task->mm); 170 if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE)) 171 return -EPERM; 172 173 return security_ptrace_access_check(task, mode); 174 } 175 176 bool ptrace_may_access(struct task_struct *task, unsigned int mode) 177 { 178 int err; 179 task_lock(task); 180 err = __ptrace_may_access(task, mode); 181 task_unlock(task); 182 return !err; 183 } 184 185 static int ptrace_attach(struct task_struct *task) 186 { 187 bool wait_trap = false; 188 int retval; 189 190 audit_ptrace(task); 191 192 retval = -EPERM; 193 if (unlikely(task->flags & PF_KTHREAD)) 194 goto out; 195 if (same_thread_group(task, current)) 196 goto out; 197 198 /* 199 * Protect exec's credential calculations against our interference; 200 * interference; SUID, SGID and LSM creds get determined differently 201 * under ptrace. 202 */ 203 retval = -ERESTARTNOINTR; 204 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) 205 goto out; 206 207 task_lock(task); 208 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); 209 task_unlock(task); 210 if (retval) 211 goto unlock_creds; 212 213 write_lock_irq(&tasklist_lock); 214 retval = -EPERM; 215 if (unlikely(task->exit_state)) 216 goto unlock_tasklist; 217 if (task->ptrace) 218 goto unlock_tasklist; 219 220 task->ptrace = PT_PTRACED; 221 if (task_ns_capable(task, CAP_SYS_PTRACE)) 222 task->ptrace |= PT_PTRACE_CAP; 223 224 __ptrace_link(task, current); 225 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); 226 227 spin_lock(&task->sighand->siglock); 228 229 /* 230 * If the task is already STOPPED, set GROUP_STOP_PENDING and 231 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING 232 * will be cleared if the child completes the transition or any 233 * event which clears the group stop states happens. We'll wait 234 * for the transition to complete before returning from this 235 * function. 236 * 237 * This hides STOPPED -> RUNNING -> TRACED transition from the 238 * attaching thread but a different thread in the same group can 239 * still observe the transient RUNNING state. IOW, if another 240 * thread's WNOHANG wait(2) on the stopped tracee races against 241 * ATTACH, the wait(2) may fail due to the transient RUNNING. 242 * 243 * The following task_is_stopped() test is safe as both transitions 244 * in and out of STOPPED are protected by siglock. 245 */ 246 if (task_is_stopped(task)) { 247 task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING; 248 signal_wake_up(task, 1); 249 wait_trap = true; 250 } 251 252 spin_unlock(&task->sighand->siglock); 253 254 retval = 0; 255 unlock_tasklist: 256 write_unlock_irq(&tasklist_lock); 257 unlock_creds: 258 mutex_unlock(&task->signal->cred_guard_mutex); 259 out: 260 if (wait_trap) 261 wait_event(current->signal->wait_chldexit, 262 !(task->group_stop & GROUP_STOP_TRAPPING)); 263 return retval; 264 } 265 266 /** 267 * ptrace_traceme -- helper for PTRACE_TRACEME 268 * 269 * Performs checks and sets PT_PTRACED. 270 * Should be used by all ptrace implementations for PTRACE_TRACEME. 271 */ 272 static int ptrace_traceme(void) 273 { 274 int ret = -EPERM; 275 276 write_lock_irq(&tasklist_lock); 277 /* Are we already being traced? */ 278 if (!current->ptrace) { 279 ret = security_ptrace_traceme(current->parent); 280 /* 281 * Check PF_EXITING to ensure ->real_parent has not passed 282 * exit_ptrace(). Otherwise we don't report the error but 283 * pretend ->real_parent untraces us right after return. 284 */ 285 if (!ret && !(current->real_parent->flags & PF_EXITING)) { 286 current->ptrace = PT_PTRACED; 287 __ptrace_link(current, current->real_parent); 288 } 289 } 290 write_unlock_irq(&tasklist_lock); 291 292 return ret; 293 } 294 295 /* 296 * Called with irqs disabled, returns true if childs should reap themselves. 297 */ 298 static int ignoring_children(struct sighand_struct *sigh) 299 { 300 int ret; 301 spin_lock(&sigh->siglock); 302 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || 303 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); 304 spin_unlock(&sigh->siglock); 305 return ret; 306 } 307 308 /* 309 * Called with tasklist_lock held for writing. 310 * Unlink a traced task, and clean it up if it was a traced zombie. 311 * Return true if it needs to be reaped with release_task(). 312 * (We can't call release_task() here because we already hold tasklist_lock.) 313 * 314 * If it's a zombie, our attachedness prevented normal parent notification 315 * or self-reaping. Do notification now if it would have happened earlier. 316 * If it should reap itself, return true. 317 * 318 * If it's our own child, there is no notification to do. But if our normal 319 * children self-reap, then this child was prevented by ptrace and we must 320 * reap it now, in that case we must also wake up sub-threads sleeping in 321 * do_wait(). 322 */ 323 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) 324 { 325 __ptrace_unlink(p); 326 327 if (p->exit_state == EXIT_ZOMBIE) { 328 if (!task_detached(p) && thread_group_empty(p)) { 329 if (!same_thread_group(p->real_parent, tracer)) 330 do_notify_parent(p, p->exit_signal); 331 else if (ignoring_children(tracer->sighand)) { 332 __wake_up_parent(p, tracer); 333 p->exit_signal = -1; 334 } 335 } 336 if (task_detached(p)) { 337 /* Mark it as in the process of being reaped. */ 338 p->exit_state = EXIT_DEAD; 339 return true; 340 } 341 } 342 343 return false; 344 } 345 346 static int ptrace_detach(struct task_struct *child, unsigned int data) 347 { 348 bool dead = false; 349 350 if (!valid_signal(data)) 351 return -EIO; 352 353 /* Architecture-specific hardware disable .. */ 354 ptrace_disable(child); 355 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 356 357 write_lock_irq(&tasklist_lock); 358 /* 359 * This child can be already killed. Make sure de_thread() or 360 * our sub-thread doing do_wait() didn't do release_task() yet. 361 */ 362 if (child->ptrace) { 363 child->exit_code = data; 364 dead = __ptrace_detach(current, child); 365 } 366 write_unlock_irq(&tasklist_lock); 367 368 if (unlikely(dead)) 369 release_task(child); 370 371 return 0; 372 } 373 374 /* 375 * Detach all tasks we were using ptrace on. Called with tasklist held 376 * for writing, and returns with it held too. But note it can release 377 * and reacquire the lock. 378 */ 379 void exit_ptrace(struct task_struct *tracer) 380 __releases(&tasklist_lock) 381 __acquires(&tasklist_lock) 382 { 383 struct task_struct *p, *n; 384 LIST_HEAD(ptrace_dead); 385 386 if (likely(list_empty(&tracer->ptraced))) 387 return; 388 389 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { 390 if (__ptrace_detach(tracer, p)) 391 list_add(&p->ptrace_entry, &ptrace_dead); 392 } 393 394 write_unlock_irq(&tasklist_lock); 395 BUG_ON(!list_empty(&tracer->ptraced)); 396 397 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { 398 list_del_init(&p->ptrace_entry); 399 release_task(p); 400 } 401 402 write_lock_irq(&tasklist_lock); 403 } 404 405 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 406 { 407 int copied = 0; 408 409 while (len > 0) { 410 char buf[128]; 411 int this_len, retval; 412 413 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 414 retval = access_process_vm(tsk, src, buf, this_len, 0); 415 if (!retval) { 416 if (copied) 417 break; 418 return -EIO; 419 } 420 if (copy_to_user(dst, buf, retval)) 421 return -EFAULT; 422 copied += retval; 423 src += retval; 424 dst += retval; 425 len -= retval; 426 } 427 return copied; 428 } 429 430 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) 431 { 432 int copied = 0; 433 434 while (len > 0) { 435 char buf[128]; 436 int this_len, retval; 437 438 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 439 if (copy_from_user(buf, src, this_len)) 440 return -EFAULT; 441 retval = access_process_vm(tsk, dst, buf, this_len, 1); 442 if (!retval) { 443 if (copied) 444 break; 445 return -EIO; 446 } 447 copied += retval; 448 src += retval; 449 dst += retval; 450 len -= retval; 451 } 452 return copied; 453 } 454 455 static int ptrace_setoptions(struct task_struct *child, unsigned long data) 456 { 457 child->ptrace &= ~PT_TRACE_MASK; 458 459 if (data & PTRACE_O_TRACESYSGOOD) 460 child->ptrace |= PT_TRACESYSGOOD; 461 462 if (data & PTRACE_O_TRACEFORK) 463 child->ptrace |= PT_TRACE_FORK; 464 465 if (data & PTRACE_O_TRACEVFORK) 466 child->ptrace |= PT_TRACE_VFORK; 467 468 if (data & PTRACE_O_TRACECLONE) 469 child->ptrace |= PT_TRACE_CLONE; 470 471 if (data & PTRACE_O_TRACEEXEC) 472 child->ptrace |= PT_TRACE_EXEC; 473 474 if (data & PTRACE_O_TRACEVFORKDONE) 475 child->ptrace |= PT_TRACE_VFORK_DONE; 476 477 if (data & PTRACE_O_TRACEEXIT) 478 child->ptrace |= PT_TRACE_EXIT; 479 480 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0; 481 } 482 483 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) 484 { 485 unsigned long flags; 486 int error = -ESRCH; 487 488 if (lock_task_sighand(child, &flags)) { 489 error = -EINVAL; 490 if (likely(child->last_siginfo != NULL)) { 491 *info = *child->last_siginfo; 492 error = 0; 493 } 494 unlock_task_sighand(child, &flags); 495 } 496 return error; 497 } 498 499 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) 500 { 501 unsigned long flags; 502 int error = -ESRCH; 503 504 if (lock_task_sighand(child, &flags)) { 505 error = -EINVAL; 506 if (likely(child->last_siginfo != NULL)) { 507 *child->last_siginfo = *info; 508 error = 0; 509 } 510 unlock_task_sighand(child, &flags); 511 } 512 return error; 513 } 514 515 516 #ifdef PTRACE_SINGLESTEP 517 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) 518 #else 519 #define is_singlestep(request) 0 520 #endif 521 522 #ifdef PTRACE_SINGLEBLOCK 523 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) 524 #else 525 #define is_singleblock(request) 0 526 #endif 527 528 #ifdef PTRACE_SYSEMU 529 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) 530 #else 531 #define is_sysemu_singlestep(request) 0 532 #endif 533 534 static int ptrace_resume(struct task_struct *child, long request, 535 unsigned long data) 536 { 537 if (!valid_signal(data)) 538 return -EIO; 539 540 if (request == PTRACE_SYSCALL) 541 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 542 else 543 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 544 545 #ifdef TIF_SYSCALL_EMU 546 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) 547 set_tsk_thread_flag(child, TIF_SYSCALL_EMU); 548 else 549 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 550 #endif 551 552 if (is_singleblock(request)) { 553 if (unlikely(!arch_has_block_step())) 554 return -EIO; 555 user_enable_block_step(child); 556 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { 557 if (unlikely(!arch_has_single_step())) 558 return -EIO; 559 user_enable_single_step(child); 560 } else { 561 user_disable_single_step(child); 562 } 563 564 child->exit_code = data; 565 wake_up_state(child, __TASK_TRACED); 566 567 return 0; 568 } 569 570 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 571 572 static const struct user_regset * 573 find_regset(const struct user_regset_view *view, unsigned int type) 574 { 575 const struct user_regset *regset; 576 int n; 577 578 for (n = 0; n < view->n; ++n) { 579 regset = view->regsets + n; 580 if (regset->core_note_type == type) 581 return regset; 582 } 583 584 return NULL; 585 } 586 587 static int ptrace_regset(struct task_struct *task, int req, unsigned int type, 588 struct iovec *kiov) 589 { 590 const struct user_regset_view *view = task_user_regset_view(task); 591 const struct user_regset *regset = find_regset(view, type); 592 int regset_no; 593 594 if (!regset || (kiov->iov_len % regset->size) != 0) 595 return -EINVAL; 596 597 regset_no = regset - view->regsets; 598 kiov->iov_len = min(kiov->iov_len, 599 (__kernel_size_t) (regset->n * regset->size)); 600 601 if (req == PTRACE_GETREGSET) 602 return copy_regset_to_user(task, view, regset_no, 0, 603 kiov->iov_len, kiov->iov_base); 604 else 605 return copy_regset_from_user(task, view, regset_no, 0, 606 kiov->iov_len, kiov->iov_base); 607 } 608 609 #endif 610 611 int ptrace_request(struct task_struct *child, long request, 612 unsigned long addr, unsigned long data) 613 { 614 int ret = -EIO; 615 siginfo_t siginfo; 616 void __user *datavp = (void __user *) data; 617 unsigned long __user *datalp = datavp; 618 619 switch (request) { 620 case PTRACE_PEEKTEXT: 621 case PTRACE_PEEKDATA: 622 return generic_ptrace_peekdata(child, addr, data); 623 case PTRACE_POKETEXT: 624 case PTRACE_POKEDATA: 625 return generic_ptrace_pokedata(child, addr, data); 626 627 #ifdef PTRACE_OLDSETOPTIONS 628 case PTRACE_OLDSETOPTIONS: 629 #endif 630 case PTRACE_SETOPTIONS: 631 ret = ptrace_setoptions(child, data); 632 break; 633 case PTRACE_GETEVENTMSG: 634 ret = put_user(child->ptrace_message, datalp); 635 break; 636 637 case PTRACE_GETSIGINFO: 638 ret = ptrace_getsiginfo(child, &siginfo); 639 if (!ret) 640 ret = copy_siginfo_to_user(datavp, &siginfo); 641 break; 642 643 case PTRACE_SETSIGINFO: 644 if (copy_from_user(&siginfo, datavp, sizeof siginfo)) 645 ret = -EFAULT; 646 else 647 ret = ptrace_setsiginfo(child, &siginfo); 648 break; 649 650 case PTRACE_DETACH: /* detach a process that was attached. */ 651 ret = ptrace_detach(child, data); 652 break; 653 654 #ifdef CONFIG_BINFMT_ELF_FDPIC 655 case PTRACE_GETFDPIC: { 656 struct mm_struct *mm = get_task_mm(child); 657 unsigned long tmp = 0; 658 659 ret = -ESRCH; 660 if (!mm) 661 break; 662 663 switch (addr) { 664 case PTRACE_GETFDPIC_EXEC: 665 tmp = mm->context.exec_fdpic_loadmap; 666 break; 667 case PTRACE_GETFDPIC_INTERP: 668 tmp = mm->context.interp_fdpic_loadmap; 669 break; 670 default: 671 break; 672 } 673 mmput(mm); 674 675 ret = put_user(tmp, datalp); 676 break; 677 } 678 #endif 679 680 #ifdef PTRACE_SINGLESTEP 681 case PTRACE_SINGLESTEP: 682 #endif 683 #ifdef PTRACE_SINGLEBLOCK 684 case PTRACE_SINGLEBLOCK: 685 #endif 686 #ifdef PTRACE_SYSEMU 687 case PTRACE_SYSEMU: 688 case PTRACE_SYSEMU_SINGLESTEP: 689 #endif 690 case PTRACE_SYSCALL: 691 case PTRACE_CONT: 692 return ptrace_resume(child, request, data); 693 694 case PTRACE_KILL: 695 if (child->exit_state) /* already dead */ 696 return 0; 697 return ptrace_resume(child, request, SIGKILL); 698 699 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 700 case PTRACE_GETREGSET: 701 case PTRACE_SETREGSET: 702 { 703 struct iovec kiov; 704 struct iovec __user *uiov = datavp; 705 706 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 707 return -EFAULT; 708 709 if (__get_user(kiov.iov_base, &uiov->iov_base) || 710 __get_user(kiov.iov_len, &uiov->iov_len)) 711 return -EFAULT; 712 713 ret = ptrace_regset(child, request, addr, &kiov); 714 if (!ret) 715 ret = __put_user(kiov.iov_len, &uiov->iov_len); 716 break; 717 } 718 #endif 719 default: 720 break; 721 } 722 723 return ret; 724 } 725 726 static struct task_struct *ptrace_get_task_struct(pid_t pid) 727 { 728 struct task_struct *child; 729 730 rcu_read_lock(); 731 child = find_task_by_vpid(pid); 732 if (child) 733 get_task_struct(child); 734 rcu_read_unlock(); 735 736 if (!child) 737 return ERR_PTR(-ESRCH); 738 return child; 739 } 740 741 #ifndef arch_ptrace_attach 742 #define arch_ptrace_attach(child) do { } while (0) 743 #endif 744 745 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, 746 unsigned long, data) 747 { 748 struct task_struct *child; 749 long ret; 750 751 if (request == PTRACE_TRACEME) { 752 ret = ptrace_traceme(); 753 if (!ret) 754 arch_ptrace_attach(current); 755 goto out; 756 } 757 758 child = ptrace_get_task_struct(pid); 759 if (IS_ERR(child)) { 760 ret = PTR_ERR(child); 761 goto out; 762 } 763 764 if (request == PTRACE_ATTACH) { 765 ret = ptrace_attach(child); 766 /* 767 * Some architectures need to do book-keeping after 768 * a ptrace attach. 769 */ 770 if (!ret) 771 arch_ptrace_attach(child); 772 goto out_put_task_struct; 773 } 774 775 ret = ptrace_check_attach(child, request == PTRACE_KILL); 776 if (ret < 0) 777 goto out_put_task_struct; 778 779 ret = arch_ptrace(child, request, addr, data); 780 781 out_put_task_struct: 782 put_task_struct(child); 783 out: 784 return ret; 785 } 786 787 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, 788 unsigned long data) 789 { 790 unsigned long tmp; 791 int copied; 792 793 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); 794 if (copied != sizeof(tmp)) 795 return -EIO; 796 return put_user(tmp, (unsigned long __user *)data); 797 } 798 799 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, 800 unsigned long data) 801 { 802 int copied; 803 804 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); 805 return (copied == sizeof(data)) ? 0 : -EIO; 806 } 807 808 #if defined CONFIG_COMPAT 809 #include <linux/compat.h> 810 811 int compat_ptrace_request(struct task_struct *child, compat_long_t request, 812 compat_ulong_t addr, compat_ulong_t data) 813 { 814 compat_ulong_t __user *datap = compat_ptr(data); 815 compat_ulong_t word; 816 siginfo_t siginfo; 817 int ret; 818 819 switch (request) { 820 case PTRACE_PEEKTEXT: 821 case PTRACE_PEEKDATA: 822 ret = access_process_vm(child, addr, &word, sizeof(word), 0); 823 if (ret != sizeof(word)) 824 ret = -EIO; 825 else 826 ret = put_user(word, datap); 827 break; 828 829 case PTRACE_POKETEXT: 830 case PTRACE_POKEDATA: 831 ret = access_process_vm(child, addr, &data, sizeof(data), 1); 832 ret = (ret != sizeof(data) ? -EIO : 0); 833 break; 834 835 case PTRACE_GETEVENTMSG: 836 ret = put_user((compat_ulong_t) child->ptrace_message, datap); 837 break; 838 839 case PTRACE_GETSIGINFO: 840 ret = ptrace_getsiginfo(child, &siginfo); 841 if (!ret) 842 ret = copy_siginfo_to_user32( 843 (struct compat_siginfo __user *) datap, 844 &siginfo); 845 break; 846 847 case PTRACE_SETSIGINFO: 848 memset(&siginfo, 0, sizeof siginfo); 849 if (copy_siginfo_from_user32( 850 &siginfo, (struct compat_siginfo __user *) datap)) 851 ret = -EFAULT; 852 else 853 ret = ptrace_setsiginfo(child, &siginfo); 854 break; 855 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 856 case PTRACE_GETREGSET: 857 case PTRACE_SETREGSET: 858 { 859 struct iovec kiov; 860 struct compat_iovec __user *uiov = 861 (struct compat_iovec __user *) datap; 862 compat_uptr_t ptr; 863 compat_size_t len; 864 865 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 866 return -EFAULT; 867 868 if (__get_user(ptr, &uiov->iov_base) || 869 __get_user(len, &uiov->iov_len)) 870 return -EFAULT; 871 872 kiov.iov_base = compat_ptr(ptr); 873 kiov.iov_len = len; 874 875 ret = ptrace_regset(child, request, addr, &kiov); 876 if (!ret) 877 ret = __put_user(kiov.iov_len, &uiov->iov_len); 878 break; 879 } 880 #endif 881 882 default: 883 ret = ptrace_request(child, request, addr, data); 884 } 885 886 return ret; 887 } 888 889 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, 890 compat_long_t addr, compat_long_t data) 891 { 892 struct task_struct *child; 893 long ret; 894 895 if (request == PTRACE_TRACEME) { 896 ret = ptrace_traceme(); 897 goto out; 898 } 899 900 child = ptrace_get_task_struct(pid); 901 if (IS_ERR(child)) { 902 ret = PTR_ERR(child); 903 goto out; 904 } 905 906 if (request == PTRACE_ATTACH) { 907 ret = ptrace_attach(child); 908 /* 909 * Some architectures need to do book-keeping after 910 * a ptrace attach. 911 */ 912 if (!ret) 913 arch_ptrace_attach(child); 914 goto out_put_task_struct; 915 } 916 917 ret = ptrace_check_attach(child, request == PTRACE_KILL); 918 if (!ret) 919 ret = compat_arch_ptrace(child, request, addr, data); 920 921 out_put_task_struct: 922 put_task_struct(child); 923 out: 924 return ret; 925 } 926 #endif /* CONFIG_COMPAT */ 927 928 #ifdef CONFIG_HAVE_HW_BREAKPOINT 929 int ptrace_get_breakpoints(struct task_struct *tsk) 930 { 931 if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt)) 932 return 0; 933 934 return -1; 935 } 936 937 void ptrace_put_breakpoints(struct task_struct *tsk) 938 { 939 if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt)) 940 flush_ptrace_hw_breakpoint(tsk); 941 } 942 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 943