1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/ptrace.c 4 * 5 * (C) Copyright 1999 Linus Torvalds 6 * 7 * Common interfaces for "ptrace()" which we do not want 8 * to continually duplicate across every architecture. 9 */ 10 11 #include <linux/capability.h> 12 #include <linux/export.h> 13 #include <linux/sched.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/coredump.h> 16 #include <linux/sched/task.h> 17 #include <linux/errno.h> 18 #include <linux/mm.h> 19 #include <linux/highmem.h> 20 #include <linux/pagemap.h> 21 #include <linux/ptrace.h> 22 #include <linux/security.h> 23 #include <linux/signal.h> 24 #include <linux/uio.h> 25 #include <linux/audit.h> 26 #include <linux/pid_namespace.h> 27 #include <linux/syscalls.h> 28 #include <linux/uaccess.h> 29 #include <linux/regset.h> 30 #include <linux/hw_breakpoint.h> 31 #include <linux/cn_proc.h> 32 #include <linux/compat.h> 33 #include <linux/sched/signal.h> 34 #include <linux/minmax.h> 35 #include <linux/syscall_user_dispatch.h> 36 37 #include <asm/syscall.h> /* for syscall_get_* */ 38 39 /* 40 * Access another process' address space via ptrace. 41 * Source/target buffer must be kernel space, 42 * Do not walk the page table directly, use get_user_pages 43 */ 44 int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, 45 void *buf, int len, unsigned int gup_flags) 46 { 47 struct mm_struct *mm; 48 int ret; 49 50 mm = get_task_mm(tsk); 51 if (!mm) 52 return 0; 53 54 if (!tsk->ptrace || 55 (current != tsk->parent) || 56 ((get_dumpable(mm) != SUID_DUMP_USER) && 57 !ptracer_capable(tsk, mm->user_ns))) { 58 mmput(mm); 59 return 0; 60 } 61 62 ret = access_remote_vm(mm, addr, buf, len, gup_flags); 63 mmput(mm); 64 65 return ret; 66 } 67 68 69 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, 70 const struct cred *ptracer_cred) 71 { 72 BUG_ON(!list_empty(&child->ptrace_entry)); 73 list_add(&child->ptrace_entry, &new_parent->ptraced); 74 child->parent = new_parent; 75 child->ptracer_cred = get_cred(ptracer_cred); 76 } 77 78 /* 79 * ptrace a task: make the debugger its new parent and 80 * move it to the ptrace list. 81 * 82 * Must be called with the tasklist lock write-held. 83 */ 84 static void ptrace_link(struct task_struct *child, struct task_struct *new_parent) 85 { 86 __ptrace_link(child, new_parent, current_cred()); 87 } 88 89 /** 90 * __ptrace_unlink - unlink ptracee and restore its execution state 91 * @child: ptracee to be unlinked 92 * 93 * Remove @child from the ptrace list, move it back to the original parent, 94 * and restore the execution state so that it conforms to the group stop 95 * state. 96 * 97 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer 98 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between 99 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. 100 * If the ptracer is exiting, the ptracee can be in any state. 101 * 102 * After detach, the ptracee should be in a state which conforms to the 103 * group stop. If the group is stopped or in the process of stopping, the 104 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken 105 * up from TASK_TRACED. 106 * 107 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, 108 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar 109 * to but in the opposite direction of what happens while attaching to a 110 * stopped task. However, in this direction, the intermediate RUNNING 111 * state is not hidden even from the current ptracer and if it immediately 112 * re-attaches and performs a WNOHANG wait(2), it may fail. 113 * 114 * CONTEXT: 115 * write_lock_irq(tasklist_lock) 116 */ 117 void __ptrace_unlink(struct task_struct *child) 118 { 119 const struct cred *old_cred; 120 BUG_ON(!child->ptrace); 121 122 clear_task_syscall_work(child, SYSCALL_TRACE); 123 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU) 124 clear_task_syscall_work(child, SYSCALL_EMU); 125 #endif 126 127 child->parent = child->real_parent; 128 list_del_init(&child->ptrace_entry); 129 old_cred = child->ptracer_cred; 130 child->ptracer_cred = NULL; 131 put_cred(old_cred); 132 133 spin_lock(&child->sighand->siglock); 134 child->ptrace = 0; 135 /* 136 * Clear all pending traps and TRAPPING. TRAPPING should be 137 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly. 138 */ 139 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK); 140 task_clear_jobctl_trapping(child); 141 142 /* 143 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and 144 * @child isn't dead. 145 */ 146 if (!(child->flags & PF_EXITING) && 147 (child->signal->flags & SIGNAL_STOP_STOPPED || 148 child->signal->group_stop_count)) 149 child->jobctl |= JOBCTL_STOP_PENDING; 150 151 /* 152 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick 153 * @child in the butt. Note that @resume should be used iff @child 154 * is in TASK_TRACED; otherwise, we might unduly disrupt 155 * TASK_KILLABLE sleeps. 156 */ 157 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) 158 ptrace_signal_wake_up(child, true); 159 160 spin_unlock(&child->sighand->siglock); 161 } 162 163 static bool looks_like_a_spurious_pid(struct task_struct *task) 164 { 165 if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP)) 166 return false; 167 168 if (task_pid_vnr(task) == task->ptrace_message) 169 return false; 170 /* 171 * The tracee changed its pid but the PTRACE_EVENT_EXEC event 172 * was not wait()'ed, most probably debugger targets the old 173 * leader which was destroyed in de_thread(). 174 */ 175 return true; 176 } 177 178 /* 179 * Ensure that nothing can wake it up, even SIGKILL 180 * 181 * A task is switched to this state while a ptrace operation is in progress; 182 * such that the ptrace operation is uninterruptible. 183 */ 184 static bool ptrace_freeze_traced(struct task_struct *task) 185 { 186 bool ret = false; 187 188 /* Lockless, nobody but us can set this flag */ 189 if (task->jobctl & JOBCTL_LISTENING) 190 return ret; 191 192 spin_lock_irq(&task->sighand->siglock); 193 if (task_is_traced(task) && !looks_like_a_spurious_pid(task) && 194 !__fatal_signal_pending(task)) { 195 task->jobctl |= JOBCTL_PTRACE_FROZEN; 196 ret = true; 197 } 198 spin_unlock_irq(&task->sighand->siglock); 199 200 return ret; 201 } 202 203 static void ptrace_unfreeze_traced(struct task_struct *task) 204 { 205 unsigned long flags; 206 207 /* 208 * The child may be awake and may have cleared 209 * JOBCTL_PTRACE_FROZEN (see ptrace_resume). The child will 210 * not set JOBCTL_PTRACE_FROZEN or enter __TASK_TRACED anew. 211 */ 212 if (lock_task_sighand(task, &flags)) { 213 task->jobctl &= ~JOBCTL_PTRACE_FROZEN; 214 if (__fatal_signal_pending(task)) { 215 task->jobctl &= ~JOBCTL_TRACED; 216 wake_up_state(task, __TASK_TRACED); 217 } 218 unlock_task_sighand(task, &flags); 219 } 220 } 221 222 /** 223 * ptrace_check_attach - check whether ptracee is ready for ptrace operation 224 * @child: ptracee to check for 225 * @ignore_state: don't check whether @child is currently %TASK_TRACED 226 * 227 * Check whether @child is being ptraced by %current and ready for further 228 * ptrace operations. If @ignore_state is %false, @child also should be in 229 * %TASK_TRACED state and on return the child is guaranteed to be traced 230 * and not executing. If @ignore_state is %true, @child can be in any 231 * state. 232 * 233 * CONTEXT: 234 * Grabs and releases tasklist_lock and @child->sighand->siglock. 235 * 236 * RETURNS: 237 * 0 on success, -ESRCH if %child is not ready. 238 */ 239 static int ptrace_check_attach(struct task_struct *child, bool ignore_state) 240 { 241 int ret = -ESRCH; 242 243 /* 244 * We take the read lock around doing both checks to close a 245 * possible race where someone else was tracing our child and 246 * detached between these two checks. After this locked check, 247 * we are sure that this is our traced child and that can only 248 * be changed by us so it's not changing right after this. 249 */ 250 read_lock(&tasklist_lock); 251 if (child->ptrace && child->parent == current) { 252 /* 253 * child->sighand can't be NULL, release_task() 254 * does ptrace_unlink() before __exit_signal(). 255 */ 256 if (ignore_state || ptrace_freeze_traced(child)) 257 ret = 0; 258 } 259 read_unlock(&tasklist_lock); 260 261 if (!ret && !ignore_state && 262 WARN_ON_ONCE(!wait_task_inactive(child, __TASK_TRACED|TASK_FROZEN))) 263 ret = -ESRCH; 264 265 return ret; 266 } 267 268 static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode) 269 { 270 if (mode & PTRACE_MODE_NOAUDIT) 271 return ns_capable_noaudit(ns, CAP_SYS_PTRACE); 272 return ns_capable(ns, CAP_SYS_PTRACE); 273 } 274 275 /* Returns 0 on success, -errno on denial. */ 276 static int __ptrace_may_access(struct task_struct *task, unsigned int mode) 277 { 278 const struct cred *cred = current_cred(), *tcred; 279 struct mm_struct *mm; 280 kuid_t caller_uid; 281 kgid_t caller_gid; 282 283 if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) { 284 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n"); 285 return -EPERM; 286 } 287 288 /* May we inspect the given task? 289 * This check is used both for attaching with ptrace 290 * and for allowing access to sensitive information in /proc. 291 * 292 * ptrace_attach denies several cases that /proc allows 293 * because setting up the necessary parent/child relationship 294 * or halting the specified task is impossible. 295 */ 296 297 /* Don't let security modules deny introspection */ 298 if (same_thread_group(task, current)) 299 return 0; 300 rcu_read_lock(); 301 if (mode & PTRACE_MODE_FSCREDS) { 302 caller_uid = cred->fsuid; 303 caller_gid = cred->fsgid; 304 } else { 305 /* 306 * Using the euid would make more sense here, but something 307 * in userland might rely on the old behavior, and this 308 * shouldn't be a security problem since 309 * PTRACE_MODE_REALCREDS implies that the caller explicitly 310 * used a syscall that requests access to another process 311 * (and not a filesystem syscall to procfs). 312 */ 313 caller_uid = cred->uid; 314 caller_gid = cred->gid; 315 } 316 tcred = __task_cred(task); 317 if (uid_eq(caller_uid, tcred->euid) && 318 uid_eq(caller_uid, tcred->suid) && 319 uid_eq(caller_uid, tcred->uid) && 320 gid_eq(caller_gid, tcred->egid) && 321 gid_eq(caller_gid, tcred->sgid) && 322 gid_eq(caller_gid, tcred->gid)) 323 goto ok; 324 if (ptrace_has_cap(tcred->user_ns, mode)) 325 goto ok; 326 rcu_read_unlock(); 327 return -EPERM; 328 ok: 329 rcu_read_unlock(); 330 /* 331 * If a task drops privileges and becomes nondumpable (through a syscall 332 * like setresuid()) while we are trying to access it, we must ensure 333 * that the dumpability is read after the credentials; otherwise, 334 * we may be able to attach to a task that we shouldn't be able to 335 * attach to (as if the task had dropped privileges without becoming 336 * nondumpable). 337 * Pairs with a write barrier in commit_creds(). 338 */ 339 smp_rmb(); 340 mm = task->mm; 341 if (mm && 342 ((get_dumpable(mm) != SUID_DUMP_USER) && 343 !ptrace_has_cap(mm->user_ns, mode))) 344 return -EPERM; 345 346 return security_ptrace_access_check(task, mode); 347 } 348 349 bool ptrace_may_access(struct task_struct *task, unsigned int mode) 350 { 351 int err; 352 task_lock(task); 353 err = __ptrace_may_access(task, mode); 354 task_unlock(task); 355 return !err; 356 } 357 358 static int check_ptrace_options(unsigned long data) 359 { 360 if (data & ~(unsigned long)PTRACE_O_MASK) 361 return -EINVAL; 362 363 if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) { 364 if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) || 365 !IS_ENABLED(CONFIG_SECCOMP)) 366 return -EINVAL; 367 368 if (!capable(CAP_SYS_ADMIN)) 369 return -EPERM; 370 371 if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED || 372 current->ptrace & PT_SUSPEND_SECCOMP) 373 return -EPERM; 374 } 375 return 0; 376 } 377 378 static inline void ptrace_set_stopped(struct task_struct *task) 379 { 380 guard(spinlock)(&task->sighand->siglock); 381 382 /* 383 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and 384 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING 385 * will be cleared if the child completes the transition or any 386 * event which clears the group stop states happens. We'll wait 387 * for the transition to complete before returning from this 388 * function. 389 * 390 * This hides STOPPED -> RUNNING -> TRACED transition from the 391 * attaching thread but a different thread in the same group can 392 * still observe the transient RUNNING state. IOW, if another 393 * thread's WNOHANG wait(2) on the stopped tracee races against 394 * ATTACH, the wait(2) may fail due to the transient RUNNING. 395 * 396 * The following task_is_stopped() test is safe as both transitions 397 * in and out of STOPPED are protected by siglock. 398 */ 399 if (task_is_stopped(task) && 400 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) { 401 task->jobctl &= ~JOBCTL_STOPPED; 402 signal_wake_up_state(task, __TASK_STOPPED); 403 } 404 } 405 406 static int ptrace_attach(struct task_struct *task, long request, 407 unsigned long addr, 408 unsigned long flags) 409 { 410 bool seize = (request == PTRACE_SEIZE); 411 int retval; 412 413 if (seize) { 414 if (addr != 0) 415 return -EIO; 416 /* 417 * This duplicates the check in check_ptrace_options() because 418 * ptrace_attach() and ptrace_setoptions() have historically 419 * used different error codes for unknown ptrace options. 420 */ 421 if (flags & ~(unsigned long)PTRACE_O_MASK) 422 return -EIO; 423 424 retval = check_ptrace_options(flags); 425 if (retval) 426 return retval; 427 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT); 428 } else { 429 flags = PT_PTRACED; 430 } 431 432 audit_ptrace(task); 433 434 if (unlikely(task->flags & PF_KTHREAD)) 435 return -EPERM; 436 if (same_thread_group(task, current)) 437 return -EPERM; 438 439 /* 440 * Protect exec's credential calculations against our interference; 441 * SUID, SGID and LSM creds get determined differently 442 * under ptrace. 443 */ 444 scoped_cond_guard (mutex_intr, return -ERESTARTNOINTR, 445 &task->signal->cred_guard_mutex) { 446 447 scoped_guard (task_lock, task) { 448 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS); 449 if (retval) 450 return retval; 451 } 452 453 scoped_guard (write_lock_irq, &tasklist_lock) { 454 if (unlikely(task->exit_state)) 455 return -EPERM; 456 if (task->ptrace) 457 return -EPERM; 458 459 task->ptrace = flags; 460 461 ptrace_link(task, current); 462 463 /* SEIZE doesn't trap tracee on attach */ 464 if (!seize) 465 send_sig_info(SIGSTOP, SEND_SIG_PRIV, task); 466 467 ptrace_set_stopped(task); 468 } 469 } 470 471 /* 472 * We do not bother to change retval or clear JOBCTL_TRAPPING 473 * if wait_on_bit() was interrupted by SIGKILL. The tracer will 474 * not return to user-mode, it will exit and clear this bit in 475 * __ptrace_unlink() if it wasn't already cleared by the tracee; 476 * and until then nobody can ptrace this task. 477 */ 478 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE); 479 proc_ptrace_connector(task, PTRACE_ATTACH); 480 481 return 0; 482 } 483 484 /** 485 * ptrace_traceme -- helper for PTRACE_TRACEME 486 * 487 * Performs checks and sets PT_PTRACED. 488 * Should be used by all ptrace implementations for PTRACE_TRACEME. 489 */ 490 static int ptrace_traceme(void) 491 { 492 int ret = -EPERM; 493 494 write_lock_irq(&tasklist_lock); 495 /* Are we already being traced? */ 496 if (!current->ptrace) { 497 ret = security_ptrace_traceme(current->parent); 498 /* 499 * Check PF_EXITING to ensure ->real_parent has not passed 500 * exit_ptrace(). Otherwise we don't report the error but 501 * pretend ->real_parent untraces us right after return. 502 */ 503 if (!ret && !(current->real_parent->flags & PF_EXITING)) { 504 current->ptrace = PT_PTRACED; 505 ptrace_link(current, current->real_parent); 506 } 507 } 508 write_unlock_irq(&tasklist_lock); 509 510 return ret; 511 } 512 513 /* 514 * Called with irqs disabled, returns true if childs should reap themselves. 515 */ 516 static int ignoring_children(struct sighand_struct *sigh) 517 { 518 int ret; 519 spin_lock(&sigh->siglock); 520 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || 521 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); 522 spin_unlock(&sigh->siglock); 523 return ret; 524 } 525 526 /* 527 * Called with tasklist_lock held for writing. 528 * Unlink a traced task, and clean it up if it was a traced zombie. 529 * Return true if it needs to be reaped with release_task(). 530 * (We can't call release_task() here because we already hold tasklist_lock.) 531 * 532 * If it's a zombie, our attachedness prevented normal parent notification 533 * or self-reaping. Do notification now if it would have happened earlier. 534 * If it should reap itself, return true. 535 * 536 * If it's our own child, there is no notification to do. But if our normal 537 * children self-reap, then this child was prevented by ptrace and we must 538 * reap it now, in that case we must also wake up sub-threads sleeping in 539 * do_wait(). 540 */ 541 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) 542 { 543 bool dead; 544 545 __ptrace_unlink(p); 546 547 if (p->exit_state != EXIT_ZOMBIE) 548 return false; 549 550 dead = !thread_group_leader(p); 551 552 if (!dead && thread_group_empty(p)) { 553 if (!same_thread_group(p->real_parent, tracer)) 554 dead = do_notify_parent(p, p->exit_signal); 555 else if (ignoring_children(tracer->sighand)) { 556 __wake_up_parent(p, tracer); 557 dead = true; 558 } 559 } 560 /* Mark it as in the process of being reaped. */ 561 if (dead) 562 p->exit_state = EXIT_DEAD; 563 return dead; 564 } 565 566 static int ptrace_detach(struct task_struct *child, unsigned int data) 567 { 568 if (!valid_signal(data)) 569 return -EIO; 570 571 /* Architecture-specific hardware disable .. */ 572 ptrace_disable(child); 573 574 write_lock_irq(&tasklist_lock); 575 /* 576 * We rely on ptrace_freeze_traced(). It can't be killed and 577 * untraced by another thread, it can't be a zombie. 578 */ 579 WARN_ON(!child->ptrace || child->exit_state); 580 /* 581 * tasklist_lock avoids the race with wait_task_stopped(), see 582 * the comment in ptrace_resume(). 583 */ 584 child->exit_code = data; 585 __ptrace_detach(current, child); 586 write_unlock_irq(&tasklist_lock); 587 588 proc_ptrace_connector(child, PTRACE_DETACH); 589 590 return 0; 591 } 592 593 /* 594 * Detach all tasks we were using ptrace on. Called with tasklist held 595 * for writing. 596 */ 597 void exit_ptrace(struct task_struct *tracer, struct list_head *dead) 598 { 599 struct task_struct *p, *n; 600 601 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { 602 if (unlikely(p->ptrace & PT_EXITKILL)) 603 send_sig_info(SIGKILL, SEND_SIG_PRIV, p); 604 605 if (__ptrace_detach(tracer, p)) 606 list_add(&p->ptrace_entry, dead); 607 } 608 } 609 610 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 611 { 612 int copied = 0; 613 614 while (len > 0) { 615 char buf[128]; 616 int this_len, retval; 617 618 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 619 retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE); 620 621 if (!retval) { 622 if (copied) 623 break; 624 return -EIO; 625 } 626 if (copy_to_user(dst, buf, retval)) 627 return -EFAULT; 628 copied += retval; 629 src += retval; 630 dst += retval; 631 len -= retval; 632 } 633 return copied; 634 } 635 636 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) 637 { 638 int copied = 0; 639 640 while (len > 0) { 641 char buf[128]; 642 int this_len, retval; 643 644 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 645 if (copy_from_user(buf, src, this_len)) 646 return -EFAULT; 647 retval = ptrace_access_vm(tsk, dst, buf, this_len, 648 FOLL_FORCE | FOLL_WRITE); 649 if (!retval) { 650 if (copied) 651 break; 652 return -EIO; 653 } 654 copied += retval; 655 src += retval; 656 dst += retval; 657 len -= retval; 658 } 659 return copied; 660 } 661 662 static int ptrace_setoptions(struct task_struct *child, unsigned long data) 663 { 664 unsigned flags; 665 int ret; 666 667 ret = check_ptrace_options(data); 668 if (ret) 669 return ret; 670 671 /* Avoid intermediate state when all opts are cleared */ 672 flags = child->ptrace; 673 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT); 674 flags |= (data << PT_OPT_FLAG_SHIFT); 675 child->ptrace = flags; 676 677 return 0; 678 } 679 680 static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info) 681 { 682 unsigned long flags; 683 int error = -ESRCH; 684 685 if (lock_task_sighand(child, &flags)) { 686 error = -EINVAL; 687 if (likely(child->last_siginfo != NULL)) { 688 copy_siginfo(info, child->last_siginfo); 689 error = 0; 690 } 691 unlock_task_sighand(child, &flags); 692 } 693 return error; 694 } 695 696 static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info) 697 { 698 unsigned long flags; 699 int error = -ESRCH; 700 701 if (lock_task_sighand(child, &flags)) { 702 error = -EINVAL; 703 if (likely(child->last_siginfo != NULL)) { 704 copy_siginfo(child->last_siginfo, info); 705 error = 0; 706 } 707 unlock_task_sighand(child, &flags); 708 } 709 return error; 710 } 711 712 static int ptrace_peek_siginfo(struct task_struct *child, 713 unsigned long addr, 714 unsigned long data) 715 { 716 struct ptrace_peeksiginfo_args arg; 717 struct sigpending *pending; 718 struct sigqueue *q; 719 int ret, i; 720 721 ret = copy_from_user(&arg, (void __user *) addr, 722 sizeof(struct ptrace_peeksiginfo_args)); 723 if (ret) 724 return -EFAULT; 725 726 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED) 727 return -EINVAL; /* unknown flags */ 728 729 if (arg.nr < 0) 730 return -EINVAL; 731 732 /* Ensure arg.off fits in an unsigned long */ 733 if (arg.off > ULONG_MAX) 734 return 0; 735 736 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED) 737 pending = &child->signal->shared_pending; 738 else 739 pending = &child->pending; 740 741 for (i = 0; i < arg.nr; ) { 742 kernel_siginfo_t info; 743 unsigned long off = arg.off + i; 744 bool found = false; 745 746 spin_lock_irq(&child->sighand->siglock); 747 list_for_each_entry(q, &pending->list, list) { 748 if (!off--) { 749 found = true; 750 copy_siginfo(&info, &q->info); 751 break; 752 } 753 } 754 spin_unlock_irq(&child->sighand->siglock); 755 756 if (!found) /* beyond the end of the list */ 757 break; 758 759 #ifdef CONFIG_COMPAT 760 if (unlikely(in_compat_syscall())) { 761 compat_siginfo_t __user *uinfo = compat_ptr(data); 762 763 if (copy_siginfo_to_user32(uinfo, &info)) { 764 ret = -EFAULT; 765 break; 766 } 767 768 } else 769 #endif 770 { 771 siginfo_t __user *uinfo = (siginfo_t __user *) data; 772 773 if (copy_siginfo_to_user(uinfo, &info)) { 774 ret = -EFAULT; 775 break; 776 } 777 } 778 779 data += sizeof(siginfo_t); 780 i++; 781 782 if (signal_pending(current)) 783 break; 784 785 cond_resched(); 786 } 787 788 if (i > 0) 789 return i; 790 791 return ret; 792 } 793 794 #ifdef CONFIG_RSEQ 795 static long ptrace_get_rseq_configuration(struct task_struct *task, 796 unsigned long size, void __user *data) 797 { 798 struct ptrace_rseq_configuration conf = { 799 .rseq_abi_pointer = (u64)(uintptr_t)task->rseq, 800 .rseq_abi_size = task->rseq_len, 801 .signature = task->rseq_sig, 802 .flags = 0, 803 }; 804 805 size = min_t(unsigned long, size, sizeof(conf)); 806 if (copy_to_user(data, &conf, size)) 807 return -EFAULT; 808 return sizeof(conf); 809 } 810 #endif 811 812 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) 813 814 #ifdef PTRACE_SINGLEBLOCK 815 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) 816 #else 817 #define is_singleblock(request) 0 818 #endif 819 820 #ifdef PTRACE_SYSEMU 821 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) 822 #else 823 #define is_sysemu_singlestep(request) 0 824 #endif 825 826 static int ptrace_resume(struct task_struct *child, long request, 827 unsigned long data) 828 { 829 if (!valid_signal(data)) 830 return -EIO; 831 832 if (request == PTRACE_SYSCALL) 833 set_task_syscall_work(child, SYSCALL_TRACE); 834 else 835 clear_task_syscall_work(child, SYSCALL_TRACE); 836 837 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU) 838 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) 839 set_task_syscall_work(child, SYSCALL_EMU); 840 else 841 clear_task_syscall_work(child, SYSCALL_EMU); 842 #endif 843 844 if (is_singleblock(request)) { 845 if (unlikely(!arch_has_block_step())) 846 return -EIO; 847 user_enable_block_step(child); 848 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { 849 if (unlikely(!arch_has_single_step())) 850 return -EIO; 851 user_enable_single_step(child); 852 } else { 853 user_disable_single_step(child); 854 } 855 856 /* 857 * Change ->exit_code and ->state under siglock to avoid the race 858 * with wait_task_stopped() in between; a non-zero ->exit_code will 859 * wrongly look like another report from tracee. 860 * 861 * Note that we need siglock even if ->exit_code == data and/or this 862 * status was not reported yet, the new status must not be cleared by 863 * wait_task_stopped() after resume. 864 */ 865 spin_lock_irq(&child->sighand->siglock); 866 child->exit_code = data; 867 child->jobctl &= ~JOBCTL_TRACED; 868 wake_up_state(child, __TASK_TRACED); 869 spin_unlock_irq(&child->sighand->siglock); 870 871 return 0; 872 } 873 874 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 875 876 static const struct user_regset * 877 find_regset(const struct user_regset_view *view, unsigned int type) 878 { 879 const struct user_regset *regset; 880 int n; 881 882 for (n = 0; n < view->n; ++n) { 883 regset = view->regsets + n; 884 if (regset->core_note_type == type) 885 return regset; 886 } 887 888 return NULL; 889 } 890 891 static int ptrace_regset(struct task_struct *task, int req, unsigned int type, 892 struct iovec *kiov) 893 { 894 const struct user_regset_view *view = task_user_regset_view(task); 895 const struct user_regset *regset = find_regset(view, type); 896 int regset_no; 897 898 if (!regset || (kiov->iov_len % regset->size) != 0) 899 return -EINVAL; 900 901 regset_no = regset - view->regsets; 902 kiov->iov_len = min(kiov->iov_len, 903 (__kernel_size_t) (regset->n * regset->size)); 904 905 if (req == PTRACE_GETREGSET) 906 return copy_regset_to_user(task, view, regset_no, 0, 907 kiov->iov_len, kiov->iov_base); 908 else 909 return copy_regset_from_user(task, view, regset_no, 0, 910 kiov->iov_len, kiov->iov_base); 911 } 912 913 /* 914 * This is declared in linux/regset.h and defined in machine-dependent 915 * code. We put the export here, near the primary machine-neutral use, 916 * to ensure no machine forgets it. 917 */ 918 EXPORT_SYMBOL_GPL(task_user_regset_view); 919 920 static unsigned long 921 ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs, 922 struct ptrace_syscall_info *info) 923 { 924 unsigned long args[ARRAY_SIZE(info->entry.args)]; 925 int i; 926 927 info->op = PTRACE_SYSCALL_INFO_ENTRY; 928 info->entry.nr = syscall_get_nr(child, regs); 929 syscall_get_arguments(child, regs, args); 930 for (i = 0; i < ARRAY_SIZE(args); i++) 931 info->entry.args[i] = args[i]; 932 933 /* args is the last field in struct ptrace_syscall_info.entry */ 934 return offsetofend(struct ptrace_syscall_info, entry.args); 935 } 936 937 static unsigned long 938 ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs, 939 struct ptrace_syscall_info *info) 940 { 941 /* 942 * As struct ptrace_syscall_info.entry is currently a subset 943 * of struct ptrace_syscall_info.seccomp, it makes sense to 944 * initialize that subset using ptrace_get_syscall_info_entry(). 945 * This can be reconsidered in the future if these structures 946 * diverge significantly enough. 947 */ 948 ptrace_get_syscall_info_entry(child, regs, info); 949 info->op = PTRACE_SYSCALL_INFO_SECCOMP; 950 info->seccomp.ret_data = child->ptrace_message; 951 952 /* ret_data is the last field in struct ptrace_syscall_info.seccomp */ 953 return offsetofend(struct ptrace_syscall_info, seccomp.ret_data); 954 } 955 956 static unsigned long 957 ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs, 958 struct ptrace_syscall_info *info) 959 { 960 info->op = PTRACE_SYSCALL_INFO_EXIT; 961 info->exit.rval = syscall_get_error(child, regs); 962 info->exit.is_error = !!info->exit.rval; 963 if (!info->exit.is_error) 964 info->exit.rval = syscall_get_return_value(child, regs); 965 966 /* is_error is the last field in struct ptrace_syscall_info.exit */ 967 return offsetofend(struct ptrace_syscall_info, exit.is_error); 968 } 969 970 static int 971 ptrace_get_syscall_info(struct task_struct *child, unsigned long user_size, 972 void __user *datavp) 973 { 974 struct pt_regs *regs = task_pt_regs(child); 975 struct ptrace_syscall_info info = { 976 .op = PTRACE_SYSCALL_INFO_NONE, 977 .arch = syscall_get_arch(child), 978 .instruction_pointer = instruction_pointer(regs), 979 .stack_pointer = user_stack_pointer(regs), 980 }; 981 unsigned long actual_size = offsetof(struct ptrace_syscall_info, entry); 982 unsigned long write_size; 983 984 /* 985 * This does not need lock_task_sighand() to access 986 * child->last_siginfo because ptrace_freeze_traced() 987 * called earlier by ptrace_check_attach() ensures that 988 * the tracee cannot go away and clear its last_siginfo. 989 */ 990 switch (child->last_siginfo ? child->last_siginfo->si_code : 0) { 991 case SIGTRAP | 0x80: 992 switch (child->ptrace_message) { 993 case PTRACE_EVENTMSG_SYSCALL_ENTRY: 994 actual_size = ptrace_get_syscall_info_entry(child, regs, 995 &info); 996 break; 997 case PTRACE_EVENTMSG_SYSCALL_EXIT: 998 actual_size = ptrace_get_syscall_info_exit(child, regs, 999 &info); 1000 break; 1001 } 1002 break; 1003 case SIGTRAP | (PTRACE_EVENT_SECCOMP << 8): 1004 actual_size = ptrace_get_syscall_info_seccomp(child, regs, 1005 &info); 1006 break; 1007 } 1008 1009 write_size = min(actual_size, user_size); 1010 return copy_to_user(datavp, &info, write_size) ? -EFAULT : actual_size; 1011 } 1012 #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */ 1013 1014 int ptrace_request(struct task_struct *child, long request, 1015 unsigned long addr, unsigned long data) 1016 { 1017 bool seized = child->ptrace & PT_SEIZED; 1018 int ret = -EIO; 1019 kernel_siginfo_t siginfo, *si; 1020 void __user *datavp = (void __user *) data; 1021 unsigned long __user *datalp = datavp; 1022 unsigned long flags; 1023 1024 switch (request) { 1025 case PTRACE_PEEKTEXT: 1026 case PTRACE_PEEKDATA: 1027 return generic_ptrace_peekdata(child, addr, data); 1028 case PTRACE_POKETEXT: 1029 case PTRACE_POKEDATA: 1030 return generic_ptrace_pokedata(child, addr, data); 1031 1032 #ifdef PTRACE_OLDSETOPTIONS 1033 case PTRACE_OLDSETOPTIONS: 1034 #endif 1035 case PTRACE_SETOPTIONS: 1036 ret = ptrace_setoptions(child, data); 1037 break; 1038 case PTRACE_GETEVENTMSG: 1039 ret = put_user(child->ptrace_message, datalp); 1040 break; 1041 1042 case PTRACE_PEEKSIGINFO: 1043 ret = ptrace_peek_siginfo(child, addr, data); 1044 break; 1045 1046 case PTRACE_GETSIGINFO: 1047 ret = ptrace_getsiginfo(child, &siginfo); 1048 if (!ret) 1049 ret = copy_siginfo_to_user(datavp, &siginfo); 1050 break; 1051 1052 case PTRACE_SETSIGINFO: 1053 ret = copy_siginfo_from_user(&siginfo, datavp); 1054 if (!ret) 1055 ret = ptrace_setsiginfo(child, &siginfo); 1056 break; 1057 1058 case PTRACE_GETSIGMASK: { 1059 sigset_t *mask; 1060 1061 if (addr != sizeof(sigset_t)) { 1062 ret = -EINVAL; 1063 break; 1064 } 1065 1066 if (test_tsk_restore_sigmask(child)) 1067 mask = &child->saved_sigmask; 1068 else 1069 mask = &child->blocked; 1070 1071 if (copy_to_user(datavp, mask, sizeof(sigset_t))) 1072 ret = -EFAULT; 1073 else 1074 ret = 0; 1075 1076 break; 1077 } 1078 1079 case PTRACE_SETSIGMASK: { 1080 sigset_t new_set; 1081 1082 if (addr != sizeof(sigset_t)) { 1083 ret = -EINVAL; 1084 break; 1085 } 1086 1087 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) { 1088 ret = -EFAULT; 1089 break; 1090 } 1091 1092 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1093 1094 /* 1095 * Every thread does recalc_sigpending() after resume, so 1096 * retarget_shared_pending() and recalc_sigpending() are not 1097 * called here. 1098 */ 1099 spin_lock_irq(&child->sighand->siglock); 1100 child->blocked = new_set; 1101 spin_unlock_irq(&child->sighand->siglock); 1102 1103 clear_tsk_restore_sigmask(child); 1104 1105 ret = 0; 1106 break; 1107 } 1108 1109 case PTRACE_INTERRUPT: 1110 /* 1111 * Stop tracee without any side-effect on signal or job 1112 * control. At least one trap is guaranteed to happen 1113 * after this request. If @child is already trapped, the 1114 * current trap is not disturbed and another trap will 1115 * happen after the current trap is ended with PTRACE_CONT. 1116 * 1117 * The actual trap might not be PTRACE_EVENT_STOP trap but 1118 * the pending condition is cleared regardless. 1119 */ 1120 if (unlikely(!seized || !lock_task_sighand(child, &flags))) 1121 break; 1122 1123 /* 1124 * INTERRUPT doesn't disturb existing trap sans one 1125 * exception. If ptracer issued LISTEN for the current 1126 * STOP, this INTERRUPT should clear LISTEN and re-trap 1127 * tracee into STOP. 1128 */ 1129 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) 1130 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); 1131 1132 unlock_task_sighand(child, &flags); 1133 ret = 0; 1134 break; 1135 1136 case PTRACE_LISTEN: 1137 /* 1138 * Listen for events. Tracee must be in STOP. It's not 1139 * resumed per-se but is not considered to be in TRACED by 1140 * wait(2) or ptrace(2). If an async event (e.g. group 1141 * stop state change) happens, tracee will enter STOP trap 1142 * again. Alternatively, ptracer can issue INTERRUPT to 1143 * finish listening and re-trap tracee into STOP. 1144 */ 1145 if (unlikely(!seized || !lock_task_sighand(child, &flags))) 1146 break; 1147 1148 si = child->last_siginfo; 1149 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { 1150 child->jobctl |= JOBCTL_LISTENING; 1151 /* 1152 * If NOTIFY is set, it means event happened between 1153 * start of this trap and now. Trigger re-trap. 1154 */ 1155 if (child->jobctl & JOBCTL_TRAP_NOTIFY) 1156 ptrace_signal_wake_up(child, true); 1157 ret = 0; 1158 } 1159 unlock_task_sighand(child, &flags); 1160 break; 1161 1162 case PTRACE_DETACH: /* detach a process that was attached. */ 1163 ret = ptrace_detach(child, data); 1164 break; 1165 1166 #ifdef CONFIG_BINFMT_ELF_FDPIC 1167 case PTRACE_GETFDPIC: { 1168 struct mm_struct *mm = get_task_mm(child); 1169 unsigned long tmp = 0; 1170 1171 ret = -ESRCH; 1172 if (!mm) 1173 break; 1174 1175 switch (addr) { 1176 case PTRACE_GETFDPIC_EXEC: 1177 tmp = mm->context.exec_fdpic_loadmap; 1178 break; 1179 case PTRACE_GETFDPIC_INTERP: 1180 tmp = mm->context.interp_fdpic_loadmap; 1181 break; 1182 default: 1183 break; 1184 } 1185 mmput(mm); 1186 1187 ret = put_user(tmp, datalp); 1188 break; 1189 } 1190 #endif 1191 1192 case PTRACE_SINGLESTEP: 1193 #ifdef PTRACE_SINGLEBLOCK 1194 case PTRACE_SINGLEBLOCK: 1195 #endif 1196 #ifdef PTRACE_SYSEMU 1197 case PTRACE_SYSEMU: 1198 case PTRACE_SYSEMU_SINGLESTEP: 1199 #endif 1200 case PTRACE_SYSCALL: 1201 case PTRACE_CONT: 1202 return ptrace_resume(child, request, data); 1203 1204 case PTRACE_KILL: 1205 send_sig_info(SIGKILL, SEND_SIG_NOINFO, child); 1206 return 0; 1207 1208 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 1209 case PTRACE_GETREGSET: 1210 case PTRACE_SETREGSET: { 1211 struct iovec kiov; 1212 struct iovec __user *uiov = datavp; 1213 1214 if (!access_ok(uiov, sizeof(*uiov))) 1215 return -EFAULT; 1216 1217 if (__get_user(kiov.iov_base, &uiov->iov_base) || 1218 __get_user(kiov.iov_len, &uiov->iov_len)) 1219 return -EFAULT; 1220 1221 ret = ptrace_regset(child, request, addr, &kiov); 1222 if (!ret) 1223 ret = __put_user(kiov.iov_len, &uiov->iov_len); 1224 break; 1225 } 1226 1227 case PTRACE_GET_SYSCALL_INFO: 1228 ret = ptrace_get_syscall_info(child, addr, datavp); 1229 break; 1230 #endif 1231 1232 case PTRACE_SECCOMP_GET_FILTER: 1233 ret = seccomp_get_filter(child, addr, datavp); 1234 break; 1235 1236 case PTRACE_SECCOMP_GET_METADATA: 1237 ret = seccomp_get_metadata(child, addr, datavp); 1238 break; 1239 1240 #ifdef CONFIG_RSEQ 1241 case PTRACE_GET_RSEQ_CONFIGURATION: 1242 ret = ptrace_get_rseq_configuration(child, addr, datavp); 1243 break; 1244 #endif 1245 1246 case PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG: 1247 ret = syscall_user_dispatch_set_config(child, addr, datavp); 1248 break; 1249 1250 case PTRACE_GET_SYSCALL_USER_DISPATCH_CONFIG: 1251 ret = syscall_user_dispatch_get_config(child, addr, datavp); 1252 break; 1253 1254 default: 1255 break; 1256 } 1257 1258 return ret; 1259 } 1260 1261 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, 1262 unsigned long, data) 1263 { 1264 struct task_struct *child; 1265 long ret; 1266 1267 if (request == PTRACE_TRACEME) { 1268 ret = ptrace_traceme(); 1269 goto out; 1270 } 1271 1272 child = find_get_task_by_vpid(pid); 1273 if (!child) { 1274 ret = -ESRCH; 1275 goto out; 1276 } 1277 1278 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 1279 ret = ptrace_attach(child, request, addr, data); 1280 goto out_put_task_struct; 1281 } 1282 1283 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1284 request == PTRACE_INTERRUPT); 1285 if (ret < 0) 1286 goto out_put_task_struct; 1287 1288 ret = arch_ptrace(child, request, addr, data); 1289 if (ret || request != PTRACE_DETACH) 1290 ptrace_unfreeze_traced(child); 1291 1292 out_put_task_struct: 1293 put_task_struct(child); 1294 out: 1295 return ret; 1296 } 1297 1298 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, 1299 unsigned long data) 1300 { 1301 unsigned long tmp; 1302 int copied; 1303 1304 copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE); 1305 if (copied != sizeof(tmp)) 1306 return -EIO; 1307 return put_user(tmp, (unsigned long __user *)data); 1308 } 1309 1310 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, 1311 unsigned long data) 1312 { 1313 int copied; 1314 1315 copied = ptrace_access_vm(tsk, addr, &data, sizeof(data), 1316 FOLL_FORCE | FOLL_WRITE); 1317 return (copied == sizeof(data)) ? 0 : -EIO; 1318 } 1319 1320 #if defined CONFIG_COMPAT 1321 1322 int compat_ptrace_request(struct task_struct *child, compat_long_t request, 1323 compat_ulong_t addr, compat_ulong_t data) 1324 { 1325 compat_ulong_t __user *datap = compat_ptr(data); 1326 compat_ulong_t word; 1327 kernel_siginfo_t siginfo; 1328 int ret; 1329 1330 switch (request) { 1331 case PTRACE_PEEKTEXT: 1332 case PTRACE_PEEKDATA: 1333 ret = ptrace_access_vm(child, addr, &word, sizeof(word), 1334 FOLL_FORCE); 1335 if (ret != sizeof(word)) 1336 ret = -EIO; 1337 else 1338 ret = put_user(word, datap); 1339 break; 1340 1341 case PTRACE_POKETEXT: 1342 case PTRACE_POKEDATA: 1343 ret = ptrace_access_vm(child, addr, &data, sizeof(data), 1344 FOLL_FORCE | FOLL_WRITE); 1345 ret = (ret != sizeof(data) ? -EIO : 0); 1346 break; 1347 1348 case PTRACE_GETEVENTMSG: 1349 ret = put_user((compat_ulong_t) child->ptrace_message, datap); 1350 break; 1351 1352 case PTRACE_GETSIGINFO: 1353 ret = ptrace_getsiginfo(child, &siginfo); 1354 if (!ret) 1355 ret = copy_siginfo_to_user32( 1356 (struct compat_siginfo __user *) datap, 1357 &siginfo); 1358 break; 1359 1360 case PTRACE_SETSIGINFO: 1361 ret = copy_siginfo_from_user32( 1362 &siginfo, (struct compat_siginfo __user *) datap); 1363 if (!ret) 1364 ret = ptrace_setsiginfo(child, &siginfo); 1365 break; 1366 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 1367 case PTRACE_GETREGSET: 1368 case PTRACE_SETREGSET: 1369 { 1370 struct iovec kiov; 1371 struct compat_iovec __user *uiov = 1372 (struct compat_iovec __user *) datap; 1373 compat_uptr_t ptr; 1374 compat_size_t len; 1375 1376 if (!access_ok(uiov, sizeof(*uiov))) 1377 return -EFAULT; 1378 1379 if (__get_user(ptr, &uiov->iov_base) || 1380 __get_user(len, &uiov->iov_len)) 1381 return -EFAULT; 1382 1383 kiov.iov_base = compat_ptr(ptr); 1384 kiov.iov_len = len; 1385 1386 ret = ptrace_regset(child, request, addr, &kiov); 1387 if (!ret) 1388 ret = __put_user(kiov.iov_len, &uiov->iov_len); 1389 break; 1390 } 1391 #endif 1392 1393 default: 1394 ret = ptrace_request(child, request, addr, data); 1395 } 1396 1397 return ret; 1398 } 1399 1400 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid, 1401 compat_long_t, addr, compat_long_t, data) 1402 { 1403 struct task_struct *child; 1404 long ret; 1405 1406 if (request == PTRACE_TRACEME) { 1407 ret = ptrace_traceme(); 1408 goto out; 1409 } 1410 1411 child = find_get_task_by_vpid(pid); 1412 if (!child) { 1413 ret = -ESRCH; 1414 goto out; 1415 } 1416 1417 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 1418 ret = ptrace_attach(child, request, addr, data); 1419 goto out_put_task_struct; 1420 } 1421 1422 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1423 request == PTRACE_INTERRUPT); 1424 if (!ret) { 1425 ret = compat_arch_ptrace(child, request, addr, data); 1426 if (ret || request != PTRACE_DETACH) 1427 ptrace_unfreeze_traced(child); 1428 } 1429 1430 out_put_task_struct: 1431 put_task_struct(child); 1432 out: 1433 return ret; 1434 } 1435 #endif /* CONFIG_COMPAT */ 1436