1 /* 2 * linux/kernel/ptrace.c 3 * 4 * (C) Copyright 1999 Linus Torvalds 5 * 6 * Common interfaces for "ptrace()" which we do not want 7 * to continually duplicate across every architecture. 8 */ 9 10 #include <linux/capability.h> 11 #include <linux/export.h> 12 #include <linux/sched.h> 13 #include <linux/errno.h> 14 #include <linux/mm.h> 15 #include <linux/highmem.h> 16 #include <linux/pagemap.h> 17 #include <linux/ptrace.h> 18 #include <linux/security.h> 19 #include <linux/signal.h> 20 #include <linux/audit.h> 21 #include <linux/pid_namespace.h> 22 #include <linux/syscalls.h> 23 #include <linux/uaccess.h> 24 #include <linux/regset.h> 25 #include <linux/hw_breakpoint.h> 26 #include <linux/cn_proc.h> 27 28 29 static int ptrace_trapping_sleep_fn(void *flags) 30 { 31 schedule(); 32 return 0; 33 } 34 35 /* 36 * ptrace a task: make the debugger its new parent and 37 * move it to the ptrace list. 38 * 39 * Must be called with the tasklist lock write-held. 40 */ 41 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) 42 { 43 BUG_ON(!list_empty(&child->ptrace_entry)); 44 list_add(&child->ptrace_entry, &new_parent->ptraced); 45 child->parent = new_parent; 46 } 47 48 /** 49 * __ptrace_unlink - unlink ptracee and restore its execution state 50 * @child: ptracee to be unlinked 51 * 52 * Remove @child from the ptrace list, move it back to the original parent, 53 * and restore the execution state so that it conforms to the group stop 54 * state. 55 * 56 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer 57 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between 58 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. 59 * If the ptracer is exiting, the ptracee can be in any state. 60 * 61 * After detach, the ptracee should be in a state which conforms to the 62 * group stop. If the group is stopped or in the process of stopping, the 63 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken 64 * up from TASK_TRACED. 65 * 66 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, 67 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar 68 * to but in the opposite direction of what happens while attaching to a 69 * stopped task. However, in this direction, the intermediate RUNNING 70 * state is not hidden even from the current ptracer and if it immediately 71 * re-attaches and performs a WNOHANG wait(2), it may fail. 72 * 73 * CONTEXT: 74 * write_lock_irq(tasklist_lock) 75 */ 76 void __ptrace_unlink(struct task_struct *child) 77 { 78 BUG_ON(!child->ptrace); 79 80 child->ptrace = 0; 81 child->parent = child->real_parent; 82 list_del_init(&child->ptrace_entry); 83 84 spin_lock(&child->sighand->siglock); 85 86 /* 87 * Clear all pending traps and TRAPPING. TRAPPING should be 88 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly. 89 */ 90 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK); 91 task_clear_jobctl_trapping(child); 92 93 /* 94 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and 95 * @child isn't dead. 96 */ 97 if (!(child->flags & PF_EXITING) && 98 (child->signal->flags & SIGNAL_STOP_STOPPED || 99 child->signal->group_stop_count)) { 100 child->jobctl |= JOBCTL_STOP_PENDING; 101 102 /* 103 * This is only possible if this thread was cloned by the 104 * traced task running in the stopped group, set the signal 105 * for the future reports. 106 * FIXME: we should change ptrace_init_task() to handle this 107 * case. 108 */ 109 if (!(child->jobctl & JOBCTL_STOP_SIGMASK)) 110 child->jobctl |= SIGSTOP; 111 } 112 113 /* 114 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick 115 * @child in the butt. Note that @resume should be used iff @child 116 * is in TASK_TRACED; otherwise, we might unduly disrupt 117 * TASK_KILLABLE sleeps. 118 */ 119 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) 120 signal_wake_up(child, task_is_traced(child)); 121 122 spin_unlock(&child->sighand->siglock); 123 } 124 125 /** 126 * ptrace_check_attach - check whether ptracee is ready for ptrace operation 127 * @child: ptracee to check for 128 * @ignore_state: don't check whether @child is currently %TASK_TRACED 129 * 130 * Check whether @child is being ptraced by %current and ready for further 131 * ptrace operations. If @ignore_state is %false, @child also should be in 132 * %TASK_TRACED state and on return the child is guaranteed to be traced 133 * and not executing. If @ignore_state is %true, @child can be in any 134 * state. 135 * 136 * CONTEXT: 137 * Grabs and releases tasklist_lock and @child->sighand->siglock. 138 * 139 * RETURNS: 140 * 0 on success, -ESRCH if %child is not ready. 141 */ 142 int ptrace_check_attach(struct task_struct *child, bool ignore_state) 143 { 144 int ret = -ESRCH; 145 146 /* 147 * We take the read lock around doing both checks to close a 148 * possible race where someone else was tracing our child and 149 * detached between these two checks. After this locked check, 150 * we are sure that this is our traced child and that can only 151 * be changed by us so it's not changing right after this. 152 */ 153 read_lock(&tasklist_lock); 154 if ((child->ptrace & PT_PTRACED) && child->parent == current) { 155 /* 156 * child->sighand can't be NULL, release_task() 157 * does ptrace_unlink() before __exit_signal(). 158 */ 159 spin_lock_irq(&child->sighand->siglock); 160 WARN_ON_ONCE(task_is_stopped(child)); 161 if (ignore_state || (task_is_traced(child) && 162 !(child->jobctl & JOBCTL_LISTENING))) 163 ret = 0; 164 spin_unlock_irq(&child->sighand->siglock); 165 } 166 read_unlock(&tasklist_lock); 167 168 if (!ret && !ignore_state) 169 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; 170 171 /* All systems go.. */ 172 return ret; 173 } 174 175 static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode) 176 { 177 if (mode & PTRACE_MODE_NOAUDIT) 178 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE); 179 else 180 return has_ns_capability(current, ns, CAP_SYS_PTRACE); 181 } 182 183 int __ptrace_may_access(struct task_struct *task, unsigned int mode) 184 { 185 const struct cred *cred = current_cred(), *tcred; 186 187 /* May we inspect the given task? 188 * This check is used both for attaching with ptrace 189 * and for allowing access to sensitive information in /proc. 190 * 191 * ptrace_attach denies several cases that /proc allows 192 * because setting up the necessary parent/child relationship 193 * or halting the specified task is impossible. 194 */ 195 int dumpable = 0; 196 /* Don't let security modules deny introspection */ 197 if (task == current) 198 return 0; 199 rcu_read_lock(); 200 tcred = __task_cred(task); 201 if (cred->user->user_ns == tcred->user->user_ns && 202 (cred->uid == tcred->euid && 203 cred->uid == tcred->suid && 204 cred->uid == tcred->uid && 205 cred->gid == tcred->egid && 206 cred->gid == tcred->sgid && 207 cred->gid == tcred->gid)) 208 goto ok; 209 if (ptrace_has_cap(tcred->user->user_ns, mode)) 210 goto ok; 211 rcu_read_unlock(); 212 return -EPERM; 213 ok: 214 rcu_read_unlock(); 215 smp_rmb(); 216 if (task->mm) 217 dumpable = get_dumpable(task->mm); 218 if (!dumpable && !ptrace_has_cap(task_user_ns(task), mode)) 219 return -EPERM; 220 221 return security_ptrace_access_check(task, mode); 222 } 223 224 bool ptrace_may_access(struct task_struct *task, unsigned int mode) 225 { 226 int err; 227 task_lock(task); 228 err = __ptrace_may_access(task, mode); 229 task_unlock(task); 230 return !err; 231 } 232 233 static int ptrace_attach(struct task_struct *task, long request, 234 unsigned long flags) 235 { 236 bool seize = (request == PTRACE_SEIZE); 237 int retval; 238 239 /* 240 * SEIZE will enable new ptrace behaviors which will be implemented 241 * gradually. SEIZE_DEVEL is used to prevent applications 242 * expecting full SEIZE behaviors trapping on kernel commits which 243 * are still in the process of implementing them. 244 * 245 * Only test programs for new ptrace behaviors being implemented 246 * should set SEIZE_DEVEL. If unset, SEIZE will fail with -EIO. 247 * 248 * Once SEIZE behaviors are completely implemented, this flag and 249 * the following test will be removed. 250 */ 251 retval = -EIO; 252 if (seize && !(flags & PTRACE_SEIZE_DEVEL)) 253 goto out; 254 255 audit_ptrace(task); 256 257 retval = -EPERM; 258 if (unlikely(task->flags & PF_KTHREAD)) 259 goto out; 260 if (same_thread_group(task, current)) 261 goto out; 262 263 /* 264 * Protect exec's credential calculations against our interference; 265 * interference; SUID, SGID and LSM creds get determined differently 266 * under ptrace. 267 */ 268 retval = -ERESTARTNOINTR; 269 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) 270 goto out; 271 272 task_lock(task); 273 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); 274 task_unlock(task); 275 if (retval) 276 goto unlock_creds; 277 278 write_lock_irq(&tasklist_lock); 279 retval = -EPERM; 280 if (unlikely(task->exit_state)) 281 goto unlock_tasklist; 282 if (task->ptrace) 283 goto unlock_tasklist; 284 285 task->ptrace = PT_PTRACED; 286 if (seize) 287 task->ptrace |= PT_SEIZED; 288 if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE)) 289 task->ptrace |= PT_PTRACE_CAP; 290 291 __ptrace_link(task, current); 292 293 /* SEIZE doesn't trap tracee on attach */ 294 if (!seize) 295 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); 296 297 spin_lock(&task->sighand->siglock); 298 299 /* 300 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and 301 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING 302 * will be cleared if the child completes the transition or any 303 * event which clears the group stop states happens. We'll wait 304 * for the transition to complete before returning from this 305 * function. 306 * 307 * This hides STOPPED -> RUNNING -> TRACED transition from the 308 * attaching thread but a different thread in the same group can 309 * still observe the transient RUNNING state. IOW, if another 310 * thread's WNOHANG wait(2) on the stopped tracee races against 311 * ATTACH, the wait(2) may fail due to the transient RUNNING. 312 * 313 * The following task_is_stopped() test is safe as both transitions 314 * in and out of STOPPED are protected by siglock. 315 */ 316 if (task_is_stopped(task) && 317 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) 318 signal_wake_up(task, 1); 319 320 spin_unlock(&task->sighand->siglock); 321 322 retval = 0; 323 unlock_tasklist: 324 write_unlock_irq(&tasklist_lock); 325 unlock_creds: 326 mutex_unlock(&task->signal->cred_guard_mutex); 327 out: 328 if (!retval) { 329 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, 330 ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE); 331 proc_ptrace_connector(task, PTRACE_ATTACH); 332 } 333 334 return retval; 335 } 336 337 /** 338 * ptrace_traceme -- helper for PTRACE_TRACEME 339 * 340 * Performs checks and sets PT_PTRACED. 341 * Should be used by all ptrace implementations for PTRACE_TRACEME. 342 */ 343 static int ptrace_traceme(void) 344 { 345 int ret = -EPERM; 346 347 write_lock_irq(&tasklist_lock); 348 /* Are we already being traced? */ 349 if (!current->ptrace) { 350 ret = security_ptrace_traceme(current->parent); 351 /* 352 * Check PF_EXITING to ensure ->real_parent has not passed 353 * exit_ptrace(). Otherwise we don't report the error but 354 * pretend ->real_parent untraces us right after return. 355 */ 356 if (!ret && !(current->real_parent->flags & PF_EXITING)) { 357 current->ptrace = PT_PTRACED; 358 __ptrace_link(current, current->real_parent); 359 } 360 } 361 write_unlock_irq(&tasklist_lock); 362 363 return ret; 364 } 365 366 /* 367 * Called with irqs disabled, returns true if childs should reap themselves. 368 */ 369 static int ignoring_children(struct sighand_struct *sigh) 370 { 371 int ret; 372 spin_lock(&sigh->siglock); 373 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || 374 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); 375 spin_unlock(&sigh->siglock); 376 return ret; 377 } 378 379 /* 380 * Called with tasklist_lock held for writing. 381 * Unlink a traced task, and clean it up if it was a traced zombie. 382 * Return true if it needs to be reaped with release_task(). 383 * (We can't call release_task() here because we already hold tasklist_lock.) 384 * 385 * If it's a zombie, our attachedness prevented normal parent notification 386 * or self-reaping. Do notification now if it would have happened earlier. 387 * If it should reap itself, return true. 388 * 389 * If it's our own child, there is no notification to do. But if our normal 390 * children self-reap, then this child was prevented by ptrace and we must 391 * reap it now, in that case we must also wake up sub-threads sleeping in 392 * do_wait(). 393 */ 394 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) 395 { 396 bool dead; 397 398 __ptrace_unlink(p); 399 400 if (p->exit_state != EXIT_ZOMBIE) 401 return false; 402 403 dead = !thread_group_leader(p); 404 405 if (!dead && thread_group_empty(p)) { 406 if (!same_thread_group(p->real_parent, tracer)) 407 dead = do_notify_parent(p, p->exit_signal); 408 else if (ignoring_children(tracer->sighand)) { 409 __wake_up_parent(p, tracer); 410 dead = true; 411 } 412 } 413 /* Mark it as in the process of being reaped. */ 414 if (dead) 415 p->exit_state = EXIT_DEAD; 416 return dead; 417 } 418 419 static int ptrace_detach(struct task_struct *child, unsigned int data) 420 { 421 bool dead = false; 422 423 if (!valid_signal(data)) 424 return -EIO; 425 426 /* Architecture-specific hardware disable .. */ 427 ptrace_disable(child); 428 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 429 430 write_lock_irq(&tasklist_lock); 431 /* 432 * This child can be already killed. Make sure de_thread() or 433 * our sub-thread doing do_wait() didn't do release_task() yet. 434 */ 435 if (child->ptrace) { 436 child->exit_code = data; 437 dead = __ptrace_detach(current, child); 438 } 439 write_unlock_irq(&tasklist_lock); 440 441 proc_ptrace_connector(child, PTRACE_DETACH); 442 if (unlikely(dead)) 443 release_task(child); 444 445 return 0; 446 } 447 448 /* 449 * Detach all tasks we were using ptrace on. Called with tasklist held 450 * for writing, and returns with it held too. But note it can release 451 * and reacquire the lock. 452 */ 453 void exit_ptrace(struct task_struct *tracer) 454 __releases(&tasklist_lock) 455 __acquires(&tasklist_lock) 456 { 457 struct task_struct *p, *n; 458 LIST_HEAD(ptrace_dead); 459 460 if (likely(list_empty(&tracer->ptraced))) 461 return; 462 463 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { 464 if (__ptrace_detach(tracer, p)) 465 list_add(&p->ptrace_entry, &ptrace_dead); 466 } 467 468 write_unlock_irq(&tasklist_lock); 469 BUG_ON(!list_empty(&tracer->ptraced)); 470 471 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { 472 list_del_init(&p->ptrace_entry); 473 release_task(p); 474 } 475 476 write_lock_irq(&tasklist_lock); 477 } 478 479 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 480 { 481 int copied = 0; 482 483 while (len > 0) { 484 char buf[128]; 485 int this_len, retval; 486 487 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 488 retval = access_process_vm(tsk, src, buf, this_len, 0); 489 if (!retval) { 490 if (copied) 491 break; 492 return -EIO; 493 } 494 if (copy_to_user(dst, buf, retval)) 495 return -EFAULT; 496 copied += retval; 497 src += retval; 498 dst += retval; 499 len -= retval; 500 } 501 return copied; 502 } 503 504 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) 505 { 506 int copied = 0; 507 508 while (len > 0) { 509 char buf[128]; 510 int this_len, retval; 511 512 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 513 if (copy_from_user(buf, src, this_len)) 514 return -EFAULT; 515 retval = access_process_vm(tsk, dst, buf, this_len, 1); 516 if (!retval) { 517 if (copied) 518 break; 519 return -EIO; 520 } 521 copied += retval; 522 src += retval; 523 dst += retval; 524 len -= retval; 525 } 526 return copied; 527 } 528 529 static int ptrace_setoptions(struct task_struct *child, unsigned long data) 530 { 531 child->ptrace &= ~PT_TRACE_MASK; 532 533 if (data & PTRACE_O_TRACESYSGOOD) 534 child->ptrace |= PT_TRACESYSGOOD; 535 536 if (data & PTRACE_O_TRACEFORK) 537 child->ptrace |= PT_TRACE_FORK; 538 539 if (data & PTRACE_O_TRACEVFORK) 540 child->ptrace |= PT_TRACE_VFORK; 541 542 if (data & PTRACE_O_TRACECLONE) 543 child->ptrace |= PT_TRACE_CLONE; 544 545 if (data & PTRACE_O_TRACEEXEC) 546 child->ptrace |= PT_TRACE_EXEC; 547 548 if (data & PTRACE_O_TRACEVFORKDONE) 549 child->ptrace |= PT_TRACE_VFORK_DONE; 550 551 if (data & PTRACE_O_TRACEEXIT) 552 child->ptrace |= PT_TRACE_EXIT; 553 554 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0; 555 } 556 557 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) 558 { 559 unsigned long flags; 560 int error = -ESRCH; 561 562 if (lock_task_sighand(child, &flags)) { 563 error = -EINVAL; 564 if (likely(child->last_siginfo != NULL)) { 565 *info = *child->last_siginfo; 566 error = 0; 567 } 568 unlock_task_sighand(child, &flags); 569 } 570 return error; 571 } 572 573 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) 574 { 575 unsigned long flags; 576 int error = -ESRCH; 577 578 if (lock_task_sighand(child, &flags)) { 579 error = -EINVAL; 580 if (likely(child->last_siginfo != NULL)) { 581 *child->last_siginfo = *info; 582 error = 0; 583 } 584 unlock_task_sighand(child, &flags); 585 } 586 return error; 587 } 588 589 590 #ifdef PTRACE_SINGLESTEP 591 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) 592 #else 593 #define is_singlestep(request) 0 594 #endif 595 596 #ifdef PTRACE_SINGLEBLOCK 597 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) 598 #else 599 #define is_singleblock(request) 0 600 #endif 601 602 #ifdef PTRACE_SYSEMU 603 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) 604 #else 605 #define is_sysemu_singlestep(request) 0 606 #endif 607 608 static int ptrace_resume(struct task_struct *child, long request, 609 unsigned long data) 610 { 611 if (!valid_signal(data)) 612 return -EIO; 613 614 if (request == PTRACE_SYSCALL) 615 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 616 else 617 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 618 619 #ifdef TIF_SYSCALL_EMU 620 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) 621 set_tsk_thread_flag(child, TIF_SYSCALL_EMU); 622 else 623 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 624 #endif 625 626 if (is_singleblock(request)) { 627 if (unlikely(!arch_has_block_step())) 628 return -EIO; 629 user_enable_block_step(child); 630 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { 631 if (unlikely(!arch_has_single_step())) 632 return -EIO; 633 user_enable_single_step(child); 634 } else { 635 user_disable_single_step(child); 636 } 637 638 child->exit_code = data; 639 wake_up_state(child, __TASK_TRACED); 640 641 return 0; 642 } 643 644 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 645 646 static const struct user_regset * 647 find_regset(const struct user_regset_view *view, unsigned int type) 648 { 649 const struct user_regset *regset; 650 int n; 651 652 for (n = 0; n < view->n; ++n) { 653 regset = view->regsets + n; 654 if (regset->core_note_type == type) 655 return regset; 656 } 657 658 return NULL; 659 } 660 661 static int ptrace_regset(struct task_struct *task, int req, unsigned int type, 662 struct iovec *kiov) 663 { 664 const struct user_regset_view *view = task_user_regset_view(task); 665 const struct user_regset *regset = find_regset(view, type); 666 int regset_no; 667 668 if (!regset || (kiov->iov_len % regset->size) != 0) 669 return -EINVAL; 670 671 regset_no = regset - view->regsets; 672 kiov->iov_len = min(kiov->iov_len, 673 (__kernel_size_t) (regset->n * regset->size)); 674 675 if (req == PTRACE_GETREGSET) 676 return copy_regset_to_user(task, view, regset_no, 0, 677 kiov->iov_len, kiov->iov_base); 678 else 679 return copy_regset_from_user(task, view, regset_no, 0, 680 kiov->iov_len, kiov->iov_base); 681 } 682 683 #endif 684 685 int ptrace_request(struct task_struct *child, long request, 686 unsigned long addr, unsigned long data) 687 { 688 bool seized = child->ptrace & PT_SEIZED; 689 int ret = -EIO; 690 siginfo_t siginfo, *si; 691 void __user *datavp = (void __user *) data; 692 unsigned long __user *datalp = datavp; 693 unsigned long flags; 694 695 switch (request) { 696 case PTRACE_PEEKTEXT: 697 case PTRACE_PEEKDATA: 698 return generic_ptrace_peekdata(child, addr, data); 699 case PTRACE_POKETEXT: 700 case PTRACE_POKEDATA: 701 return generic_ptrace_pokedata(child, addr, data); 702 703 #ifdef PTRACE_OLDSETOPTIONS 704 case PTRACE_OLDSETOPTIONS: 705 #endif 706 case PTRACE_SETOPTIONS: 707 ret = ptrace_setoptions(child, data); 708 break; 709 case PTRACE_GETEVENTMSG: 710 ret = put_user(child->ptrace_message, datalp); 711 break; 712 713 case PTRACE_GETSIGINFO: 714 ret = ptrace_getsiginfo(child, &siginfo); 715 if (!ret) 716 ret = copy_siginfo_to_user(datavp, &siginfo); 717 break; 718 719 case PTRACE_SETSIGINFO: 720 if (copy_from_user(&siginfo, datavp, sizeof siginfo)) 721 ret = -EFAULT; 722 else 723 ret = ptrace_setsiginfo(child, &siginfo); 724 break; 725 726 case PTRACE_INTERRUPT: 727 /* 728 * Stop tracee without any side-effect on signal or job 729 * control. At least one trap is guaranteed to happen 730 * after this request. If @child is already trapped, the 731 * current trap is not disturbed and another trap will 732 * happen after the current trap is ended with PTRACE_CONT. 733 * 734 * The actual trap might not be PTRACE_EVENT_STOP trap but 735 * the pending condition is cleared regardless. 736 */ 737 if (unlikely(!seized || !lock_task_sighand(child, &flags))) 738 break; 739 740 /* 741 * INTERRUPT doesn't disturb existing trap sans one 742 * exception. If ptracer issued LISTEN for the current 743 * STOP, this INTERRUPT should clear LISTEN and re-trap 744 * tracee into STOP. 745 */ 746 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) 747 signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); 748 749 unlock_task_sighand(child, &flags); 750 ret = 0; 751 break; 752 753 case PTRACE_LISTEN: 754 /* 755 * Listen for events. Tracee must be in STOP. It's not 756 * resumed per-se but is not considered to be in TRACED by 757 * wait(2) or ptrace(2). If an async event (e.g. group 758 * stop state change) happens, tracee will enter STOP trap 759 * again. Alternatively, ptracer can issue INTERRUPT to 760 * finish listening and re-trap tracee into STOP. 761 */ 762 if (unlikely(!seized || !lock_task_sighand(child, &flags))) 763 break; 764 765 si = child->last_siginfo; 766 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { 767 child->jobctl |= JOBCTL_LISTENING; 768 /* 769 * If NOTIFY is set, it means event happened between 770 * start of this trap and now. Trigger re-trap. 771 */ 772 if (child->jobctl & JOBCTL_TRAP_NOTIFY) 773 signal_wake_up(child, true); 774 ret = 0; 775 } 776 unlock_task_sighand(child, &flags); 777 break; 778 779 case PTRACE_DETACH: /* detach a process that was attached. */ 780 ret = ptrace_detach(child, data); 781 break; 782 783 #ifdef CONFIG_BINFMT_ELF_FDPIC 784 case PTRACE_GETFDPIC: { 785 struct mm_struct *mm = get_task_mm(child); 786 unsigned long tmp = 0; 787 788 ret = -ESRCH; 789 if (!mm) 790 break; 791 792 switch (addr) { 793 case PTRACE_GETFDPIC_EXEC: 794 tmp = mm->context.exec_fdpic_loadmap; 795 break; 796 case PTRACE_GETFDPIC_INTERP: 797 tmp = mm->context.interp_fdpic_loadmap; 798 break; 799 default: 800 break; 801 } 802 mmput(mm); 803 804 ret = put_user(tmp, datalp); 805 break; 806 } 807 #endif 808 809 #ifdef PTRACE_SINGLESTEP 810 case PTRACE_SINGLESTEP: 811 #endif 812 #ifdef PTRACE_SINGLEBLOCK 813 case PTRACE_SINGLEBLOCK: 814 #endif 815 #ifdef PTRACE_SYSEMU 816 case PTRACE_SYSEMU: 817 case PTRACE_SYSEMU_SINGLESTEP: 818 #endif 819 case PTRACE_SYSCALL: 820 case PTRACE_CONT: 821 return ptrace_resume(child, request, data); 822 823 case PTRACE_KILL: 824 if (child->exit_state) /* already dead */ 825 return 0; 826 return ptrace_resume(child, request, SIGKILL); 827 828 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 829 case PTRACE_GETREGSET: 830 case PTRACE_SETREGSET: 831 { 832 struct iovec kiov; 833 struct iovec __user *uiov = datavp; 834 835 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 836 return -EFAULT; 837 838 if (__get_user(kiov.iov_base, &uiov->iov_base) || 839 __get_user(kiov.iov_len, &uiov->iov_len)) 840 return -EFAULT; 841 842 ret = ptrace_regset(child, request, addr, &kiov); 843 if (!ret) 844 ret = __put_user(kiov.iov_len, &uiov->iov_len); 845 break; 846 } 847 #endif 848 default: 849 break; 850 } 851 852 return ret; 853 } 854 855 static struct task_struct *ptrace_get_task_struct(pid_t pid) 856 { 857 struct task_struct *child; 858 859 rcu_read_lock(); 860 child = find_task_by_vpid(pid); 861 if (child) 862 get_task_struct(child); 863 rcu_read_unlock(); 864 865 if (!child) 866 return ERR_PTR(-ESRCH); 867 return child; 868 } 869 870 #ifndef arch_ptrace_attach 871 #define arch_ptrace_attach(child) do { } while (0) 872 #endif 873 874 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, 875 unsigned long, data) 876 { 877 struct task_struct *child; 878 long ret; 879 880 if (request == PTRACE_TRACEME) { 881 ret = ptrace_traceme(); 882 if (!ret) 883 arch_ptrace_attach(current); 884 goto out; 885 } 886 887 child = ptrace_get_task_struct(pid); 888 if (IS_ERR(child)) { 889 ret = PTR_ERR(child); 890 goto out; 891 } 892 893 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 894 ret = ptrace_attach(child, request, data); 895 /* 896 * Some architectures need to do book-keeping after 897 * a ptrace attach. 898 */ 899 if (!ret) 900 arch_ptrace_attach(child); 901 goto out_put_task_struct; 902 } 903 904 ret = ptrace_check_attach(child, request == PTRACE_KILL || 905 request == PTRACE_INTERRUPT); 906 if (ret < 0) 907 goto out_put_task_struct; 908 909 ret = arch_ptrace(child, request, addr, data); 910 911 out_put_task_struct: 912 put_task_struct(child); 913 out: 914 return ret; 915 } 916 917 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, 918 unsigned long data) 919 { 920 unsigned long tmp; 921 int copied; 922 923 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); 924 if (copied != sizeof(tmp)) 925 return -EIO; 926 return put_user(tmp, (unsigned long __user *)data); 927 } 928 929 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, 930 unsigned long data) 931 { 932 int copied; 933 934 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); 935 return (copied == sizeof(data)) ? 0 : -EIO; 936 } 937 938 #if defined CONFIG_COMPAT 939 #include <linux/compat.h> 940 941 int compat_ptrace_request(struct task_struct *child, compat_long_t request, 942 compat_ulong_t addr, compat_ulong_t data) 943 { 944 compat_ulong_t __user *datap = compat_ptr(data); 945 compat_ulong_t word; 946 siginfo_t siginfo; 947 int ret; 948 949 switch (request) { 950 case PTRACE_PEEKTEXT: 951 case PTRACE_PEEKDATA: 952 ret = access_process_vm(child, addr, &word, sizeof(word), 0); 953 if (ret != sizeof(word)) 954 ret = -EIO; 955 else 956 ret = put_user(word, datap); 957 break; 958 959 case PTRACE_POKETEXT: 960 case PTRACE_POKEDATA: 961 ret = access_process_vm(child, addr, &data, sizeof(data), 1); 962 ret = (ret != sizeof(data) ? -EIO : 0); 963 break; 964 965 case PTRACE_GETEVENTMSG: 966 ret = put_user((compat_ulong_t) child->ptrace_message, datap); 967 break; 968 969 case PTRACE_GETSIGINFO: 970 ret = ptrace_getsiginfo(child, &siginfo); 971 if (!ret) 972 ret = copy_siginfo_to_user32( 973 (struct compat_siginfo __user *) datap, 974 &siginfo); 975 break; 976 977 case PTRACE_SETSIGINFO: 978 memset(&siginfo, 0, sizeof siginfo); 979 if (copy_siginfo_from_user32( 980 &siginfo, (struct compat_siginfo __user *) datap)) 981 ret = -EFAULT; 982 else 983 ret = ptrace_setsiginfo(child, &siginfo); 984 break; 985 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 986 case PTRACE_GETREGSET: 987 case PTRACE_SETREGSET: 988 { 989 struct iovec kiov; 990 struct compat_iovec __user *uiov = 991 (struct compat_iovec __user *) datap; 992 compat_uptr_t ptr; 993 compat_size_t len; 994 995 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) 996 return -EFAULT; 997 998 if (__get_user(ptr, &uiov->iov_base) || 999 __get_user(len, &uiov->iov_len)) 1000 return -EFAULT; 1001 1002 kiov.iov_base = compat_ptr(ptr); 1003 kiov.iov_len = len; 1004 1005 ret = ptrace_regset(child, request, addr, &kiov); 1006 if (!ret) 1007 ret = __put_user(kiov.iov_len, &uiov->iov_len); 1008 break; 1009 } 1010 #endif 1011 1012 default: 1013 ret = ptrace_request(child, request, addr, data); 1014 } 1015 1016 return ret; 1017 } 1018 1019 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, 1020 compat_long_t addr, compat_long_t data) 1021 { 1022 struct task_struct *child; 1023 long ret; 1024 1025 if (request == PTRACE_TRACEME) { 1026 ret = ptrace_traceme(); 1027 goto out; 1028 } 1029 1030 child = ptrace_get_task_struct(pid); 1031 if (IS_ERR(child)) { 1032 ret = PTR_ERR(child); 1033 goto out; 1034 } 1035 1036 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 1037 ret = ptrace_attach(child, request, data); 1038 /* 1039 * Some architectures need to do book-keeping after 1040 * a ptrace attach. 1041 */ 1042 if (!ret) 1043 arch_ptrace_attach(child); 1044 goto out_put_task_struct; 1045 } 1046 1047 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1048 request == PTRACE_INTERRUPT); 1049 if (!ret) 1050 ret = compat_arch_ptrace(child, request, addr, data); 1051 1052 out_put_task_struct: 1053 put_task_struct(child); 1054 out: 1055 return ret; 1056 } 1057 #endif /* CONFIG_COMPAT */ 1058 1059 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1060 int ptrace_get_breakpoints(struct task_struct *tsk) 1061 { 1062 if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt)) 1063 return 0; 1064 1065 return -1; 1066 } 1067 1068 void ptrace_put_breakpoints(struct task_struct *tsk) 1069 { 1070 if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt)) 1071 flush_ptrace_hw_breakpoint(tsk); 1072 } 1073 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1074