1 /* 2 * linux/kernel/ptrace.c 3 * 4 * (C) Copyright 1999 Linus Torvalds 5 * 6 * Common interfaces for "ptrace()" which we do not want 7 * to continually duplicate across every architecture. 8 */ 9 10 #include <linux/capability.h> 11 #include <linux/export.h> 12 #include <linux/sched.h> 13 #include <linux/errno.h> 14 #include <linux/mm.h> 15 #include <linux/highmem.h> 16 #include <linux/pagemap.h> 17 #include <linux/ptrace.h> 18 #include <linux/security.h> 19 #include <linux/signal.h> 20 #include <linux/audit.h> 21 #include <linux/pid_namespace.h> 22 #include <linux/syscalls.h> 23 #include <linux/uaccess.h> 24 #include <linux/regset.h> 25 #include <linux/hw_breakpoint.h> 26 #include <linux/cn_proc.h> 27 28 29 static int ptrace_trapping_sleep_fn(void *flags) 30 { 31 schedule(); 32 return 0; 33 } 34 35 /* 36 * ptrace a task: make the debugger its new parent and 37 * move it to the ptrace list. 38 * 39 * Must be called with the tasklist lock write-held. 40 */ 41 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) 42 { 43 BUG_ON(!list_empty(&child->ptrace_entry)); 44 list_add(&child->ptrace_entry, &new_parent->ptraced); 45 child->parent = new_parent; 46 } 47 48 /** 49 * __ptrace_unlink - unlink ptracee and restore its execution state 50 * @child: ptracee to be unlinked 51 * 52 * Remove @child from the ptrace list, move it back to the original parent, 53 * and restore the execution state so that it conforms to the group stop 54 * state. 55 * 56 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer 57 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between 58 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. 59 * If the ptracer is exiting, the ptracee can be in any state. 60 * 61 * After detach, the ptracee should be in a state which conforms to the 62 * group stop. If the group is stopped or in the process of stopping, the 63 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken 64 * up from TASK_TRACED. 65 * 66 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, 67 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar 68 * to but in the opposite direction of what happens while attaching to a 69 * stopped task. However, in this direction, the intermediate RUNNING 70 * state is not hidden even from the current ptracer and if it immediately 71 * re-attaches and performs a WNOHANG wait(2), it may fail. 72 * 73 * CONTEXT: 74 * write_lock_irq(tasklist_lock) 75 */ 76 void __ptrace_unlink(struct task_struct *child) 77 { 78 BUG_ON(!child->ptrace); 79 80 child->ptrace = 0; 81 child->parent = child->real_parent; 82 list_del_init(&child->ptrace_entry); 83 84 spin_lock(&child->sighand->siglock); 85 86 /* 87 * Clear all pending traps and TRAPPING. TRAPPING should be 88 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly. 89 */ 90 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK); 91 task_clear_jobctl_trapping(child); 92 93 /* 94 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and 95 * @child isn't dead. 96 */ 97 if (!(child->flags & PF_EXITING) && 98 (child->signal->flags & SIGNAL_STOP_STOPPED || 99 child->signal->group_stop_count)) { 100 child->jobctl |= JOBCTL_STOP_PENDING; 101 102 /* 103 * This is only possible if this thread was cloned by the 104 * traced task running in the stopped group, set the signal 105 * for the future reports. 106 * FIXME: we should change ptrace_init_task() to handle this 107 * case. 108 */ 109 if (!(child->jobctl & JOBCTL_STOP_SIGMASK)) 110 child->jobctl |= SIGSTOP; 111 } 112 113 /* 114 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick 115 * @child in the butt. Note that @resume should be used iff @child 116 * is in TASK_TRACED; otherwise, we might unduly disrupt 117 * TASK_KILLABLE sleeps. 118 */ 119 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) 120 ptrace_signal_wake_up(child, true); 121 122 spin_unlock(&child->sighand->siglock); 123 } 124 125 /* Ensure that nothing can wake it up, even SIGKILL */ 126 static bool ptrace_freeze_traced(struct task_struct *task) 127 { 128 bool ret = false; 129 130 /* Lockless, nobody but us can set this flag */ 131 if (task->jobctl & JOBCTL_LISTENING) 132 return ret; 133 134 spin_lock_irq(&task->sighand->siglock); 135 if (task_is_traced(task) && !__fatal_signal_pending(task)) { 136 task->state = __TASK_TRACED; 137 ret = true; 138 } 139 spin_unlock_irq(&task->sighand->siglock); 140 141 return ret; 142 } 143 144 static void ptrace_unfreeze_traced(struct task_struct *task) 145 { 146 if (task->state != __TASK_TRACED) 147 return; 148 149 WARN_ON(!task->ptrace || task->parent != current); 150 151 spin_lock_irq(&task->sighand->siglock); 152 if (__fatal_signal_pending(task)) 153 wake_up_state(task, __TASK_TRACED); 154 else 155 task->state = TASK_TRACED; 156 spin_unlock_irq(&task->sighand->siglock); 157 } 158 159 /** 160 * ptrace_check_attach - check whether ptracee is ready for ptrace operation 161 * @child: ptracee to check for 162 * @ignore_state: don't check whether @child is currently %TASK_TRACED 163 * 164 * Check whether @child is being ptraced by %current and ready for further 165 * ptrace operations. If @ignore_state is %false, @child also should be in 166 * %TASK_TRACED state and on return the child is guaranteed to be traced 167 * and not executing. If @ignore_state is %true, @child can be in any 168 * state. 169 * 170 * CONTEXT: 171 * Grabs and releases tasklist_lock and @child->sighand->siglock. 172 * 173 * RETURNS: 174 * 0 on success, -ESRCH if %child is not ready. 175 */ 176 static int ptrace_check_attach(struct task_struct *child, bool ignore_state) 177 { 178 int ret = -ESRCH; 179 180 /* 181 * We take the read lock around doing both checks to close a 182 * possible race where someone else was tracing our child and 183 * detached between these two checks. After this locked check, 184 * we are sure that this is our traced child and that can only 185 * be changed by us so it's not changing right after this. 186 */ 187 read_lock(&tasklist_lock); 188 if (child->ptrace && child->parent == current) { 189 WARN_ON(child->state == __TASK_TRACED); 190 /* 191 * child->sighand can't be NULL, release_task() 192 * does ptrace_unlink() before __exit_signal(). 193 */ 194 if (ignore_state || ptrace_freeze_traced(child)) 195 ret = 0; 196 } 197 read_unlock(&tasklist_lock); 198 199 if (!ret && !ignore_state) { 200 if (!wait_task_inactive(child, __TASK_TRACED)) { 201 /* 202 * This can only happen if may_ptrace_stop() fails and 203 * ptrace_stop() changes ->state back to TASK_RUNNING, 204 * so we should not worry about leaking __TASK_TRACED. 205 */ 206 WARN_ON(child->state == __TASK_TRACED); 207 ret = -ESRCH; 208 } 209 } 210 211 return ret; 212 } 213 214 static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) 215 { 216 if (mode & PTRACE_MODE_NOAUDIT) 217 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); 218 else 219 return has_ns_capability(current, ns, CAP_SYS_PTRACE); 220 } 221 222 /* Returns 0 on success, -errno on denial. */ 223 static int __ptrace_may_access(struct task_struct *task, unsigned int mode) 224 { 225 const struct cred *cred = current_cred(), *tcred; 226 227 /* May we inspect the given task? 228 * This check is used both for attaching with ptrace 229 * and for allowing access to sensitive information in /proc. 230 * 231 * ptrace_attach denies several cases that /proc allows 232 * because setting up the necessary parent/child relationship 233 * or halting the specified task is impossible. 234 */ 235 int dumpable = 0; 236 /* Don't let security modules deny introspection */ 237 if (task == current) 238 return 0; 239 rcu_read_lock(); 240 tcred = __task_cred(task); 241 if (uid_eq(cred->uid, tcred->euid) && 242 uid_eq(cred->uid, tcred->suid) && 243 uid_eq(cred->uid, tcred->uid) && 244 gid_eq(cred->gid, tcred->egid) && 245 gid_eq(cred->gid, tcred->sgid) && 246 gid_eq(cred->gid, tcred->gid)) 247 goto ok; 248 if (ptrace_has_cap(tcred->user_ns, mode)) 249 goto ok; 250 rcu_read_unlock(); 251 return -EPERM; 252 ok: 253 rcu_read_unlock(); 254 smp_rmb(); 255 if (task->mm) 256 dumpable = get_dumpable(task->mm); 257 rcu_read_lock(); 258 if (!dumpable && !ptrace_has_cap(__task_cred(task)->user_ns, mode)) { 259 rcu_read_unlock(); 260 return -EPERM; 261 } 262 rcu_read_unlock(); 263 264 return security_ptrace_access_check(task, mode); 265 } 266 267 bool ptrace_may_access(struct task_struct *task, unsigned int mode) 268 { 269 int err; 270 task_lock(task); 271 err = __ptrace_may_access(task, mode); 272 task_unlock(task); 273 return !err; 274 } 275 276 static int ptrace_attach(struct task_struct *task, long request, 277 unsigned long addr, 278 unsigned long flags) 279 { 280 bool seize = (request == PTRACE_SEIZE); 281 int retval; 282 283 retval = -EIO; 284 if (seize) { 285 if (addr != 0) 286 goto out; 287 if (flags & ~(unsigned long)PTRACE_O_MASK) 288 goto out; 289 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT); 290 } else { 291 flags = PT_PTRACED; 292 } 293 294 audit_ptrace(task); 295 296 retval = -EPERM; 297 if (unlikely(task->flags & PF_KTHREAD)) 298 goto out; 299 if (same_thread_group(task, current)) 300 goto out; 301 302 /* 303 * Protect exec's credential calculations against our interference; 304 * SUID, SGID and LSM creds get determined differently 305 * under ptrace. 306 */ 307 retval = -ERESTARTNOINTR; 308 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) 309 goto out; 310 311 task_lock(task); 312 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); 313 task_unlock(task); 314 if (retval) 315 goto unlock_creds; 316 317 write_lock_irq(&tasklist_lock); 318 retval = -EPERM; 319 if (unlikely(task->exit_state)) 320 goto unlock_tasklist; 321 if (task->ptrace) 322 goto unlock_tasklist; 323 324 if (seize) 325 flags |= PT_SEIZED; 326 rcu_read_lock(); 327 if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE)) 328 flags |= PT_PTRACE_CAP; 329 rcu_read_unlock(); 330 task->ptrace = flags; 331 332 __ptrace_link(task, current); 333 334 /* SEIZE doesn't trap tracee on attach */ 335 if (!seize) 336 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); 337 338 spin_lock(&task->sighand->siglock); 339 340 /* 341 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and 342 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING 343 * will be cleared if the child completes the transition or any 344 * event which clears the group stop states happens. We'll wait 345 * for the transition to complete before returning from this 346 * function. 347 * 348 * This hides STOPPED -> RUNNING -> TRACED transition from the 349 * attaching thread but a different thread in the same group can 350 * still observe the transient RUNNING state. IOW, if another 351 * thread's WNOHANG wait(2) on the stopped tracee races against 352 * ATTACH, the wait(2) may fail due to the transient RUNNING. 353 * 354 * The following task_is_stopped() test is safe as both transitions 355 * in and out of STOPPED are protected by siglock. 356 */ 357 if (task_is_stopped(task) && 358 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) 359 signal_wake_up_state(task, __TASK_STOPPED); 360 361 spin_unlock(&task->sighand->siglock); 362 363 retval = 0; 364 unlock_tasklist: 365 write_unlock_irq(&tasklist_lock); 366 unlock_creds: 367 mutex_unlock(&task->signal->cred_guard_mutex); 368 out: 369 if (!retval) { 370 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, 371 ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE); 372 proc_ptrace_connector(task, PTRACE_ATTACH); 373 } 374 375 return retval; 376 } 377 378 /** 379 * ptrace_traceme -- helper for PTRACE_TRACEME 380 * 381 * Performs checks and sets PT_PTRACED. 382 * Should be used by all ptrace implementations for PTRACE_TRACEME. 383 */ 384 static int ptrace_traceme(void) 385 { 386 int ret = -EPERM; 387 388 write_lock_irq(&tasklist_lock); 389 /* Are we already being traced? */ 390 if (!current->ptrace) { 391 ret = security_ptrace_traceme(current->parent); 392 /* 393 * Check PF_EXITING to ensure ->real_parent has not passed 394 * exit_ptrace(). Otherwise we don't report the error but 395 * pretend ->real_parent untraces us right after return. 396 */ 397 if (!ret && !(current->real_parent->flags & PF_EXITING)) { 398 current->ptrace = PT_PTRACED; 399 __ptrace_link(current, current->real_parent); 400 } 401 } 402 write_unlock_irq(&tasklist_lock); 403 404 return ret; 405 } 406 407 /* 408 * Called with irqs disabled, returns true if childs should reap themselves. 409 */ 410 static int ignoring_children(struct sighand_struct *sigh) 411 { 412 int ret; 413 spin_lock(&sigh->siglock); 414 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || 415 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); 416 spin_unlock(&sigh->siglock); 417 return ret; 418 } 419 420 /* 421 * Called with tasklist_lock held for writing. 422 * Unlink a traced task, and clean it up if it was a traced zombie. 423 * Return true if it needs to be reaped with release_task(). 424 * (We can't call release_task() here because we already hold tasklist_lock.) 425 * 426 * If it's a zombie, our attachedness prevented normal parent notification 427 * or self-reaping. Do notification now if it would have happened earlier. 428 * If it should reap itself, return true. 429 * 430 * If it's our own child, there is no notification to do. But if our normal 431 * children self-reap, then this child was prevented by ptrace and we must 432 * reap it now, in that case we must also wake up sub-threads sleeping in 433 * do_wait(). 434 */ 435 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) 436 { 437 bool dead; 438 439 __ptrace_unlink(p); 440 441 if (p->exit_state != EXIT_ZOMBIE) 442 return false; 443 444 dead = !thread_group_leader(p); 445 446 if (!dead && thread_group_empty(p)) { 447 if (!same_thread_group(p->real_parent, tracer)) 448 dead = do_notify_parent(p, p->exit_signal); 449 else if (ignoring_children(tracer->sighand)) { 450 __wake_up_parent(p, tracer); 451 dead = true; 452 } 453 } 454 /* Mark it as in the process of being reaped. */ 455 if (dead) 456 p->exit_state = EXIT_DEAD; 457 return dead; 458 } 459 460 static int ptrace_detach(struct task_struct *child, unsigned int data) 461 { 462 bool dead = false; 463 464 if (!valid_signal(data)) 465 return -EIO; 466 467 /* Architecture-specific hardware disable .. */ 468 ptrace_disable(child); 469 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 470 471 write_lock_irq(&tasklist_lock); 472 /* 473 * This child can be already killed. Make sure de_thread() or 474 * our sub-thread doing do_wait() didn't do release_task() yet. 475 */ 476 if (child->ptrace) { 477 child->exit_code = data; 478 dead = __ptrace_detach(current, child); 479 } 480 write_unlock_irq(&tasklist_lock); 481 482 proc_ptrace_connector(child, PTRACE_DETACH); 483 if (unlikely(dead)) 484 release_task(child); 485 486 return 0; 487 } 488 489 /* 490 * Detach all tasks we were using ptrace on. Called with tasklist held 491 * for writing, and returns with it held too. But note it can release 492 * and reacquire the lock. 493 */ 494 void exit_ptrace(struct task_struct *tracer) 495 __releases(&tasklist_lock) 496 __acquires(&tasklist_lock) 497 { 498 struct task_struct *p, *n; 499 LIST_HEAD(ptrace_dead); 500 501 if (likely(list_empty(&tracer->ptraced))) 502 return; 503 504 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { 505 if (unlikely(p->ptrace & PT_EXITKILL)) 506 send_sig_info(SIGKILL, SEND_SIG_FORCED, p); 507 508 if (__ptrace_detach(tracer, p)) 509 list_add(&p->ptrace_entry, &ptrace_dead); 510 } 511 512 write_unlock_irq(&tasklist_lock); 513 BUG_ON(!list_empty(&tracer->ptraced)); 514 515 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { 516 list_del_init(&p->ptrace_entry); 517 release_task(p); 518 } 519 520 write_lock_irq(&tasklist_lock); 521 } 522 523 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 524 { 525 int copied = 0; 526 527 while (len > 0) { 528 char buf[128]; 529 int this_len, retval; 530 531 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 532 retval = access_process_vm(tsk, src, buf, this_len, 0); 533 if (!retval) { 534 if (copied) 535 break; 536 return -EIO; 537 } 538 if (copy_to_user(dst, buf, retval)) 539 return -EFAULT; 540 copied += retval; 541 src += retval; 542 dst += retval; 543 len -= retval; 544 } 545 return copied; 546 } 547 548 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) 549 { 550 int copied = 0; 551 552 while (len > 0) { 553 char buf[128]; 554 int this_len, retval; 555 556 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 557 if (copy_from_user(buf, src, this_len)) 558 return -EFAULT; 559 retval = access_process_vm(tsk, dst, buf, this_len, 1); 560 if (!retval) { 561 if (copied) 562 break; 563 return -EIO; 564 } 565 copied += retval; 566 src += retval; 567 dst += retval; 568 len -= retval; 569 } 570 return copied; 571 } 572 573 static int ptrace_setoptions(struct task_struct *child, unsigned long data) 574 { 575 unsigned flags; 576 577 if (data & ~(unsigned long)PTRACE_O_MASK) 578 return -EINVAL; 579 580 /* Avoid intermediate state when all opts are cleared */ 581 flags = child->ptrace; 582 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT); 583 flags |= (data << PT_OPT_FLAG_SHIFT); 584 child->ptrace = flags; 585 586 return 0; 587 } 588 589 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) 590 { 591 unsigned long flags; 592 int error = -ESRCH; 593 594 if (lock_task_sighand(child, &flags)) { 595 error = -EINVAL; 596 if (likely(child->last_siginfo != NULL)) { 597 *info = *child->last_siginfo; 598 error = 0; 599 } 600 unlock_task_sighand(child, &flags); 601 } 602 return error; 603 } 604 605 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) 606 { 607 unsigned long flags; 608 int error = -ESRCH; 609 610 if (lock_task_sighand(child, &flags)) { 611 error = -EINVAL; 612 if (likely(child->last_siginfo != NULL)) { 613 *child->last_siginfo = *info; 614 error = 0; 615 } 616 unlock_task_sighand(child, &flags); 617 } 618 return error; 619 } 620 621 622 #ifdef PTRACE_SINGLESTEP 623 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) 624 #else 625 #define is_singlestep(request) 0 626 #endif 627 628 #ifdef PTRACE_SINGLEBLOCK 629 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) 630 #else 631 #define is_singleblock(request) 0 632 #endif 633 634 #ifdef PTRACE_SYSEMU 635 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) 636 #else 637 #define is_sysemu_singlestep(request) 0 638 #endif 639 640 static int ptrace_resume(struct task_struct *child, long request, 641 unsigned long data) 642 { 643 if (!valid_signal(data)) 644 return -EIO; 645 646 if (request == PTRACE_SYSCALL) 647 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 648 else 649 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 650 651 #ifdef TIF_SYSCALL_EMU 652 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) 653 set_tsk_thread_flag(child, TIF_SYSCALL_EMU); 654 else 655 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 656 #endif 657 658 if (is_singleblock(request)) { 659 if (unlikely(!arch_has_block_step())) 660 return -EIO; 661 user_enable_block_step(child); 662 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { 663 if (unlikely(!arch_has_single_step())) 664 return -EIO; 665 user_enable_single_step(child); 666 } else { 667 user_disable_single_step(child); 668 } 669 670 child->exit_code = data; 671 wake_up_state(child, __TASK_TRACED); 672 673 return 0; 674 } 675 676 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 677 678 static const struct user_regset * 679 find_regset(const struct user_regset_view *view, unsigned int type) 680 { 681 const struct user_regset *regset; 682 int n; 683 684 for (n = 0; n < view->n; ++n) { 685 regset = view->regsets + n; 686 if (regset->core_note_type == type) 687 return regset; 688 } 689 690 return NULL; 691 } 692 693 static int ptrace_regset(struct task_struct *task, int req, unsigned int type, 694 struct iovec *kiov) 695 { 696 const struct user_regset_view *view = task_user_regset_view(task); 697 const struct user_regset *regset = find_regset(view, type); 698 int regset_no; 699 700 if (!regset || (kiov->iov_len % regset->size) != 0) 701 return -EINVAL; 702 703 regset_no = regset - view->regsets; 704 kiov->iov_len = min(kiov->iov_len, 705 (__kernel_size_t) (regset->n * regset->size)); 706 707 if (req == PTRACE_GETREGSET) 708 return copy_regset_to_user(task, view, regset_no, 0, 709 kiov->iov_len, kiov->iov_base); 710 else 711 return copy_regset_from_user(task, view, regset_no, 0, 712 kiov->iov_len, kiov->iov_base); 713 } 714 715 #endif 716 717 int ptrace_request(struct task_struct *child, long request, 718 unsigned long addr, unsigned long data) 719 { 720 bool seized = child->ptrace & PT_SEIZED; 721 int ret = -EIO; 722 siginfo_t siginfo, *si; 723 void __user *datavp = (void __user *) data; 724 unsigned long __user *datalp = datavp; 725 unsigned long flags; 726 727 switch (request) { 728 case PTRACE_PEEKTEXT: 729 case PTRACE_PEEKDATA: 730 return generic_ptrace_peekdata(child, addr, data); 731 case PTRACE_POKETEXT: 732 case PTRACE_POKEDATA: 733 return generic_ptrace_pokedata(child, addr, data); 734 735 #ifdef PTRACE_OLDSETOPTIONS 736 case PTRACE_OLDSETOPTIONS: 737 #endif 738 case PTRACE_SETOPTIONS: 739 ret = ptrace_setoptions(child, data); 740 break; 741 case PTRACE_GETEVENTMSG: 742 ret = put_user(child->ptrace_message, datalp); 743 break; 744 745 case PTRACE_GETSIGINFO: 746 ret = ptrace_getsiginfo(child, &siginfo); 747 if (!ret) 748 ret = copy_siginfo_to_user(datavp, &siginfo); 749 break; 750 751 case PTRACE_SETSIGINFO: 752 if (copy_from_user(&siginfo, datavp, sizeof siginfo)) 753 ret = -EFAULT; 754 else 755 ret = ptrace_setsiginfo(child, &siginfo); 756 break; 757 758 case PTRACE_INTERRUPT: 759 /* 760 * Stop tracee without any side-effect on signal or job 761 * control. At least one trap is guaranteed to happen 762 * after this request. If @child is already trapped, the 763 * current trap is not disturbed and another trap will 764 * happen after the current trap is ended with PTRACE_CONT. 765 * 766 * The actual trap might not be PTRACE_EVENT_STOP trap but 767 * the pending condition is cleared regardless. 768 */ 769 if (unlikely(!seized || !lock_task_sighand(child, &flags))) 770 break; 771 772 /* 773 * INTERRUPT doesn't disturb existing trap sans one 774 * exception. If ptracer issued LISTEN for the current 775 * STOP, this INTERRUPT should clear LISTEN and re-trap 776 * tracee into STOP. 777 */ 778 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) 779 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); 780 781 unlock_task_sighand(child, &flags); 782 ret = 0; 783 break; 784 785 case PTRACE_LISTEN: 786 /* 787 * Listen for events. Tracee must be in STOP. It's not 788 * resumed per-se but is not considered to be in TRACED by 789 * wait(2) or ptrace(2). If an async event (e.g. group 790 * stop state change) happens, tracee will enter STOP trap 791 * again. Alternatively, ptracer can issue INTERRUPT to 792 * finish listening and re-trap tracee into STOP. 793 */ 794 if (unlikely(!seized || !lock_task_sighand(child, &flags))) 795 break; 796 797 si = child->last_siginfo; 798 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { 799 child->jobctl |= JOBCTL_LISTENING; 800 /* 801 * If NOTIFY is set, it means event happened between 802 * start of this trap and now. Trigger re-trap. 803 */ 804 if (child->jobctl & JOBCTL_TRAP_NOTIFY) 805 ptrace_signal_wake_up(child, true); 806 ret = 0; 807 } 808 unlock_task_sighand(child, &flags); 809 break; 810 811 case PTRACE_DETACH: /* detach a process that was attached. */ 812 ret = ptrace_detach(child, data); 813 break; 814 815 #ifdef CONFIG_BINFMT_ELF_FDPIC 816 case PTRACE_GETFDPIC: { 817 struct mm_struct *mm = get_task_mm(child); 818 unsigned long tmp = 0; 819 820 ret = -ESRCH; 821 if (!mm) 822 break; 823 824 switch (addr) { 825 case PTRACE_GETFDPIC_EXEC: 826 tmp = mm->context.exec_fdpic_loadmap; 827 break; 828 case PTRACE_GETFDPIC_INTERP: 829 tmp = mm->context.interp_fdpic_loadmap; 830 break; 831 default: 832 break; 833 } 834 mmput(mm); 835 836 ret = put_user(tmp, datalp); 837 break; 838 } 839 #endif 840 841 #ifdef PTRACE_SINGLESTEP 842 case PTRACE_SINGLESTEP: 843 #endif 844 #ifdef PTRACE_SINGLEBLOCK 845 case PTRACE_SINGLEBLOCK: 846 #endif 847 #ifdef PTRACE_SYSEMU 848 case PTRACE_SYSEMU: 849 case PTRACE_SYSEMU_SINGLESTEP: 850 #endif 851 case PTRACE_SYSCALL: 852 case PTRACE_CONT: 853 return ptrace_resume(child, request, data); 854 855 case PTRACE_KILL: 856 if (child->exit_state) /* already dead */ 857 return 0; 858 return ptrace_resume(child, request, SIGKILL); 859 860 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 861 case PTRACE_GETREGSET: 862 case PTRACE_SETREGSET: 863 { 864 struct iovec kiov; 865 struct iovec __user *uiov = datavp; 866 867 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 868 return -EFAULT; 869 870 if (__get_user(kiov.iov_base, &uiov->iov_base) || 871 __get_user(kiov.iov_len, &uiov->iov_len)) 872 return -EFAULT; 873 874 ret = ptrace_regset(child, request, addr, &kiov); 875 if (!ret) 876 ret = __put_user(kiov.iov_len, &uiov->iov_len); 877 break; 878 } 879 #endif 880 default: 881 break; 882 } 883 884 return ret; 885 } 886 887 static struct task_struct *ptrace_get_task_struct(pid_t pid) 888 { 889 struct task_struct *child; 890 891 rcu_read_lock(); 892 child = find_task_by_vpid(pid); 893 if (child) 894 get_task_struct(child); 895 rcu_read_unlock(); 896 897 if (!child) 898 return ERR_PTR(-ESRCH); 899 return child; 900 } 901 902 #ifndef arch_ptrace_attach 903 #define arch_ptrace_attach(child) do { } while (0) 904 #endif 905 906 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, 907 unsigned long, data) 908 { 909 struct task_struct *child; 910 long ret; 911 912 if (request == PTRACE_TRACEME) { 913 ret = ptrace_traceme(); 914 if (!ret) 915 arch_ptrace_attach(current); 916 goto out; 917 } 918 919 child = ptrace_get_task_struct(pid); 920 if (IS_ERR(child)) { 921 ret = PTR_ERR(child); 922 goto out; 923 } 924 925 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 926 ret = ptrace_attach(child, request, addr, data); 927 /* 928 * Some architectures need to do book-keeping after 929 * a ptrace attach. 930 */ 931 if (!ret) 932 arch_ptrace_attach(child); 933 goto out_put_task_struct; 934 } 935 936 ret = ptrace_check_attach(child, request == PTRACE_KILL || 937 request == PTRACE_INTERRUPT); 938 if (ret < 0) 939 goto out_put_task_struct; 940 941 ret = arch_ptrace(child, request, addr, data); 942 if (ret || request != PTRACE_DETACH) 943 ptrace_unfreeze_traced(child); 944 945 out_put_task_struct: 946 put_task_struct(child); 947 out: 948 return ret; 949 } 950 951 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, 952 unsigned long data) 953 { 954 unsigned long tmp; 955 int copied; 956 957 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); 958 if (copied != sizeof(tmp)) 959 return -EIO; 960 return put_user(tmp, (unsigned long __user *)data); 961 } 962 963 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, 964 unsigned long data) 965 { 966 int copied; 967 968 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); 969 return (copied == sizeof(data)) ? 0 : -EIO; 970 } 971 972 #if defined CONFIG_COMPAT 973 #include <linux/compat.h> 974 975 int compat_ptrace_request(struct task_struct *child, compat_long_t request, 976 compat_ulong_t addr, compat_ulong_t data) 977 { 978 compat_ulong_t __user *datap = compat_ptr(data); 979 compat_ulong_t word; 980 siginfo_t siginfo; 981 int ret; 982 983 switch (request) { 984 case PTRACE_PEEKTEXT: 985 case PTRACE_PEEKDATA: 986 ret = access_process_vm(child, addr, &word, sizeof(word), 0); 987 if (ret != sizeof(word)) 988 ret = -EIO; 989 else 990 ret = put_user(word, datap); 991 break; 992 993 case PTRACE_POKETEXT: 994 case PTRACE_POKEDATA: 995 ret = access_process_vm(child, addr, &data, sizeof(data), 1); 996 ret = (ret != sizeof(data) ? -EIO : 0); 997 break; 998 999 case PTRACE_GETEVENTMSG: 1000 ret = put_user((compat_ulong_t) child->ptrace_message, datap); 1001 break; 1002 1003 case PTRACE_GETSIGINFO: 1004 ret = ptrace_getsiginfo(child, &siginfo); 1005 if (!ret) 1006 ret = copy_siginfo_to_user32( 1007 (struct compat_siginfo __user *) datap, 1008 &siginfo); 1009 break; 1010 1011 case PTRACE_SETSIGINFO: 1012 memset(&siginfo, 0, sizeof siginfo); 1013 if (copy_siginfo_from_user32( 1014 &siginfo, (struct compat_siginfo __user *) datap)) 1015 ret = -EFAULT; 1016 else 1017 ret = ptrace_setsiginfo(child, &siginfo); 1018 break; 1019 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 1020 case PTRACE_GETREGSET: 1021 case PTRACE_SETREGSET: 1022 { 1023 struct iovec kiov; 1024 struct compat_iovec __user *uiov = 1025 (struct compat_iovec __user *) datap; 1026 compat_uptr_t ptr; 1027 compat_size_t len; 1028 1029 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 1030 return -EFAULT; 1031 1032 if (__get_user(ptr, &uiov->iov_base) || 1033 __get_user(len, &uiov->iov_len)) 1034 return -EFAULT; 1035 1036 kiov.iov_base = compat_ptr(ptr); 1037 kiov.iov_len = len; 1038 1039 ret = ptrace_regset(child, request, addr, &kiov); 1040 if (!ret) 1041 ret = __put_user(kiov.iov_len, &uiov->iov_len); 1042 break; 1043 } 1044 #endif 1045 1046 default: 1047 ret = ptrace_request(child, request, addr, data); 1048 } 1049 1050 return ret; 1051 } 1052 1053 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, 1054 compat_long_t addr, compat_long_t data) 1055 { 1056 struct task_struct *child; 1057 long ret; 1058 1059 if (request == PTRACE_TRACEME) { 1060 ret = ptrace_traceme(); 1061 goto out; 1062 } 1063 1064 child = ptrace_get_task_struct(pid); 1065 if (IS_ERR(child)) { 1066 ret = PTR_ERR(child); 1067 goto out; 1068 } 1069 1070 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 1071 ret = ptrace_attach(child, request, addr, data); 1072 /* 1073 * Some architectures need to do book-keeping after 1074 * a ptrace attach. 1075 */ 1076 if (!ret) 1077 arch_ptrace_attach(child); 1078 goto out_put_task_struct; 1079 } 1080 1081 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1082 request == PTRACE_INTERRUPT); 1083 if (!ret) { 1084 ret = compat_arch_ptrace(child, request, addr, data); 1085 if (ret || request != PTRACE_DETACH) 1086 ptrace_unfreeze_traced(child); 1087 } 1088 1089 out_put_task_struct: 1090 put_task_struct(child); 1091 out: 1092 return ret; 1093 } 1094 #endif /* CONFIG_COMPAT */ 1095 1096 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1097 int ptrace_get_breakpoints(struct task_struct *tsk) 1098 { 1099 if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt)) 1100 return 0; 1101 1102 return -1; 1103 } 1104 1105 void ptrace_put_breakpoints(struct task_struct *tsk) 1106 { 1107 if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt)) 1108 flush_ptrace_hw_breakpoint(tsk); 1109 } 1110 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1111