1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/ptrace.c 4 * 5 * (C) Copyright 1999 Linus Torvalds 6 * 7 * Common interfaces for "ptrace()" which we do not want 8 * to continually duplicate across every architecture. 9 */ 10 11 #include <linux/capability.h> 12 #include <linux/export.h> 13 #include <linux/sched.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/coredump.h> 16 #include <linux/sched/task.h> 17 #include <linux/errno.h> 18 #include <linux/mm.h> 19 #include <linux/highmem.h> 20 #include <linux/pagemap.h> 21 #include <linux/ptrace.h> 22 #include <linux/security.h> 23 #include <linux/signal.h> 24 #include <linux/uio.h> 25 #include <linux/audit.h> 26 #include <linux/pid_namespace.h> 27 #include <linux/syscalls.h> 28 #include <linux/uaccess.h> 29 #include <linux/regset.h> 30 #include <linux/hw_breakpoint.h> 31 #include <linux/cn_proc.h> 32 #include <linux/compat.h> 33 #include <linux/sched/signal.h> 34 #include <linux/minmax.h> 35 #include <linux/syscall_user_dispatch.h> 36 37 #include <asm/syscall.h> /* for syscall_get_* */ 38 39 /* 40 * Access another process' address space via ptrace. 41 * Source/target buffer must be kernel space, 42 * Do not walk the page table directly, use get_user_pages 43 */ 44 int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, 45 void *buf, int len, unsigned int gup_flags) 46 { 47 struct mm_struct *mm; 48 int ret; 49 50 mm = get_task_mm(tsk); 51 if (!mm) 52 return 0; 53 54 if (!tsk->ptrace || 55 (current != tsk->parent) || 56 ((get_dumpable(mm) != SUID_DUMP_USER) && 57 !ptracer_capable(tsk, mm->user_ns))) { 58 mmput(mm); 59 return 0; 60 } 61 62 ret = access_remote_vm(mm, addr, buf, len, gup_flags); 63 mmput(mm); 64 65 return ret; 66 } 67 68 69 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, 70 const struct cred *ptracer_cred) 71 { 72 BUG_ON(!list_empty(&child->ptrace_entry)); 73 list_add(&child->ptrace_entry, &new_parent->ptraced); 74 child->parent = new_parent; 75 child->ptracer_cred = get_cred(ptracer_cred); 76 } 77 78 /* 79 * ptrace a task: make the debugger its new parent and 80 * move it to the ptrace list. 81 * 82 * Must be called with the tasklist lock write-held. 83 */ 84 static void ptrace_link(struct task_struct *child, struct task_struct *new_parent) 85 { 86 __ptrace_link(child, new_parent, current_cred()); 87 } 88 89 /** 90 * __ptrace_unlink - unlink ptracee and restore its execution state 91 * @child: ptracee to be unlinked 92 * 93 * Remove @child from the ptrace list, move it back to the original parent, 94 * and restore the execution state so that it conforms to the group stop 95 * state. 96 * 97 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer 98 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between 99 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. 100 * If the ptracer is exiting, the ptracee can be in any state. 101 * 102 * After detach, the ptracee should be in a state which conforms to the 103 * group stop. If the group is stopped or in the process of stopping, the 104 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken 105 * up from TASK_TRACED. 106 * 107 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, 108 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar 109 * to but in the opposite direction of what happens while attaching to a 110 * stopped task. However, in this direction, the intermediate RUNNING 111 * state is not hidden even from the current ptracer and if it immediately 112 * re-attaches and performs a WNOHANG wait(2), it may fail. 113 * 114 * CONTEXT: 115 * write_lock_irq(tasklist_lock) 116 */ 117 void __ptrace_unlink(struct task_struct *child) 118 { 119 const struct cred *old_cred; 120 BUG_ON(!child->ptrace); 121 122 clear_task_syscall_work(child, SYSCALL_TRACE); 123 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU) 124 clear_task_syscall_work(child, SYSCALL_EMU); 125 #endif 126 127 child->parent = child->real_parent; 128 list_del_init(&child->ptrace_entry); 129 old_cred = child->ptracer_cred; 130 child->ptracer_cred = NULL; 131 put_cred(old_cred); 132 133 spin_lock(&child->sighand->siglock); 134 child->ptrace = 0; 135 /* 136 * Clear all pending traps and TRAPPING. TRAPPING should be 137 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly. 138 */ 139 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK); 140 task_clear_jobctl_trapping(child); 141 142 /* 143 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and 144 * @child isn't dead. 145 */ 146 if (!(child->flags & PF_EXITING) && 147 (child->signal->flags & SIGNAL_STOP_STOPPED || 148 child->signal->group_stop_count)) 149 child->jobctl |= JOBCTL_STOP_PENDING; 150 151 /* 152 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick 153 * @child in the butt. Note that @resume should be used iff @child 154 * is in TASK_TRACED; otherwise, we might unduly disrupt 155 * TASK_KILLABLE sleeps. 156 */ 157 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) 158 ptrace_signal_wake_up(child, true); 159 160 spin_unlock(&child->sighand->siglock); 161 } 162 163 static bool looks_like_a_spurious_pid(struct task_struct *task) 164 { 165 if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP)) 166 return false; 167 168 if (task_pid_vnr(task) == task->ptrace_message) 169 return false; 170 /* 171 * The tracee changed its pid but the PTRACE_EVENT_EXEC event 172 * was not wait()'ed, most probably debugger targets the old 173 * leader which was destroyed in de_thread(). 174 */ 175 return true; 176 } 177 178 /* 179 * Ensure that nothing can wake it up, even SIGKILL 180 * 181 * A task is switched to this state while a ptrace operation is in progress; 182 * such that the ptrace operation is uninterruptible. 183 */ 184 static bool ptrace_freeze_traced(struct task_struct *task) 185 { 186 bool ret = false; 187 188 /* Lockless, nobody but us can set this flag */ 189 if (task->jobctl & JOBCTL_LISTENING) 190 return ret; 191 192 spin_lock_irq(&task->sighand->siglock); 193 if (task_is_traced(task) && !looks_like_a_spurious_pid(task) && 194 !__fatal_signal_pending(task)) { 195 task->jobctl |= JOBCTL_PTRACE_FROZEN; 196 ret = true; 197 } 198 spin_unlock_irq(&task->sighand->siglock); 199 200 return ret; 201 } 202 203 static void ptrace_unfreeze_traced(struct task_struct *task) 204 { 205 unsigned long flags; 206 207 /* 208 * The child may be awake and may have cleared 209 * JOBCTL_PTRACE_FROZEN (see ptrace_resume). The child will 210 * not set JOBCTL_PTRACE_FROZEN or enter __TASK_TRACED anew. 211 */ 212 if (lock_task_sighand(task, &flags)) { 213 task->jobctl &= ~JOBCTL_PTRACE_FROZEN; 214 if (__fatal_signal_pending(task)) { 215 task->jobctl &= ~JOBCTL_TRACED; 216 wake_up_state(task, __TASK_TRACED); 217 } 218 unlock_task_sighand(task, &flags); 219 } 220 } 221 222 /** 223 * ptrace_check_attach - check whether ptracee is ready for ptrace operation 224 * @child: ptracee to check for 225 * @ignore_state: don't check whether @child is currently %TASK_TRACED 226 * 227 * Check whether @child is being ptraced by %current and ready for further 228 * ptrace operations. If @ignore_state is %false, @child also should be in 229 * %TASK_TRACED state and on return the child is guaranteed to be traced 230 * and not executing. If @ignore_state is %true, @child can be in any 231 * state. 232 * 233 * CONTEXT: 234 * Grabs and releases tasklist_lock and @child->sighand->siglock. 235 * 236 * RETURNS: 237 * 0 on success, -ESRCH if %child is not ready. 238 */ 239 static int ptrace_check_attach(struct task_struct *child, bool ignore_state) 240 { 241 int ret = -ESRCH; 242 243 /* 244 * We take the read lock around doing both checks to close a 245 * possible race where someone else was tracing our child and 246 * detached between these two checks. After this locked check, 247 * we are sure that this is our traced child and that can only 248 * be changed by us so it's not changing right after this. 249 */ 250 read_lock(&tasklist_lock); 251 if (child->ptrace && child->parent == current) { 252 /* 253 * child->sighand can't be NULL, release_task() 254 * does ptrace_unlink() before __exit_signal(). 255 */ 256 if (ignore_state || ptrace_freeze_traced(child)) 257 ret = 0; 258 } 259 read_unlock(&tasklist_lock); 260 261 if (!ret && !ignore_state && 262 WARN_ON_ONCE(!wait_task_inactive(child, __TASK_TRACED|TASK_FROZEN))) 263 ret = -ESRCH; 264 265 return ret; 266 } 267 268 static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode) 269 { 270 if (mode & PTRACE_MODE_NOAUDIT) 271 return ns_capable_noaudit(ns, CAP_SYS_PTRACE); 272 return ns_capable(ns, CAP_SYS_PTRACE); 273 } 274 275 /* Returns 0 on success, -errno on denial. */ 276 static int __ptrace_may_access(struct task_struct *task, unsigned int mode) 277 { 278 const struct cred *cred = current_cred(), *tcred; 279 struct mm_struct *mm; 280 kuid_t caller_uid; 281 kgid_t caller_gid; 282 283 if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) { 284 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n"); 285 return -EPERM; 286 } 287 288 /* May we inspect the given task? 289 * This check is used both for attaching with ptrace 290 * and for allowing access to sensitive information in /proc. 291 * 292 * ptrace_attach denies several cases that /proc allows 293 * because setting up the necessary parent/child relationship 294 * or halting the specified task is impossible. 295 */ 296 297 /* Don't let security modules deny introspection */ 298 if (same_thread_group(task, current)) 299 return 0; 300 rcu_read_lock(); 301 if (mode & PTRACE_MODE_FSCREDS) { 302 caller_uid = cred->fsuid; 303 caller_gid = cred->fsgid; 304 } else { 305 /* 306 * Using the euid would make more sense here, but something 307 * in userland might rely on the old behavior, and this 308 * shouldn't be a security problem since 309 * PTRACE_MODE_REALCREDS implies that the caller explicitly 310 * used a syscall that requests access to another process 311 * (and not a filesystem syscall to procfs). 312 */ 313 caller_uid = cred->uid; 314 caller_gid = cred->gid; 315 } 316 tcred = __task_cred(task); 317 if (uid_eq(caller_uid, tcred->euid) && 318 uid_eq(caller_uid, tcred->suid) && 319 uid_eq(caller_uid, tcred->uid) && 320 gid_eq(caller_gid, tcred->egid) && 321 gid_eq(caller_gid, tcred->sgid) && 322 gid_eq(caller_gid, tcred->gid)) 323 goto ok; 324 if (ptrace_has_cap(tcred->user_ns, mode)) 325 goto ok; 326 rcu_read_unlock(); 327 return -EPERM; 328 ok: 329 rcu_read_unlock(); 330 /* 331 * If a task drops privileges and becomes nondumpable (through a syscall 332 * like setresuid()) while we are trying to access it, we must ensure 333 * that the dumpability is read after the credentials; otherwise, 334 * we may be able to attach to a task that we shouldn't be able to 335 * attach to (as if the task had dropped privileges without becoming 336 * nondumpable). 337 * Pairs with a write barrier in commit_creds(). 338 */ 339 smp_rmb(); 340 mm = task->mm; 341 if (mm && 342 ((get_dumpable(mm) != SUID_DUMP_USER) && 343 !ptrace_has_cap(mm->user_ns, mode))) 344 return -EPERM; 345 346 return security_ptrace_access_check(task, mode); 347 } 348 349 bool ptrace_may_access(struct task_struct *task, unsigned int mode) 350 { 351 int err; 352 task_lock(task); 353 err = __ptrace_may_access(task, mode); 354 task_unlock(task); 355 return !err; 356 } 357 358 static int check_ptrace_options(unsigned long data) 359 { 360 if (data & ~(unsigned long)PTRACE_O_MASK) 361 return -EINVAL; 362 363 if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) { 364 if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) || 365 !IS_ENABLED(CONFIG_SECCOMP)) 366 return -EINVAL; 367 368 if (!capable(CAP_SYS_ADMIN)) 369 return -EPERM; 370 371 if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED || 372 current->ptrace & PT_SUSPEND_SECCOMP) 373 return -EPERM; 374 } 375 return 0; 376 } 377 378 static inline void ptrace_set_stopped(struct task_struct *task, bool seize) 379 { 380 guard(spinlock)(&task->sighand->siglock); 381 382 /* SEIZE doesn't trap tracee on attach */ 383 if (!seize) 384 send_signal_locked(SIGSTOP, SEND_SIG_PRIV, task, PIDTYPE_PID); 385 /* 386 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and 387 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING 388 * will be cleared if the child completes the transition or any 389 * event which clears the group stop states happens. We'll wait 390 * for the transition to complete before returning from this 391 * function. 392 * 393 * This hides STOPPED -> RUNNING -> TRACED transition from the 394 * attaching thread but a different thread in the same group can 395 * still observe the transient RUNNING state. IOW, if another 396 * thread's WNOHANG wait(2) on the stopped tracee races against 397 * ATTACH, the wait(2) may fail due to the transient RUNNING. 398 * 399 * The following task_is_stopped() test is safe as both transitions 400 * in and out of STOPPED are protected by siglock. 401 */ 402 if (task_is_stopped(task) && 403 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) { 404 task->jobctl &= ~JOBCTL_STOPPED; 405 signal_wake_up_state(task, __TASK_STOPPED); 406 } 407 } 408 409 static int ptrace_attach(struct task_struct *task, long request, 410 unsigned long addr, 411 unsigned long flags) 412 { 413 bool seize = (request == PTRACE_SEIZE); 414 int retval; 415 416 if (seize) { 417 if (addr != 0) 418 return -EIO; 419 /* 420 * This duplicates the check in check_ptrace_options() because 421 * ptrace_attach() and ptrace_setoptions() have historically 422 * used different error codes for unknown ptrace options. 423 */ 424 if (flags & ~(unsigned long)PTRACE_O_MASK) 425 return -EIO; 426 427 retval = check_ptrace_options(flags); 428 if (retval) 429 return retval; 430 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT); 431 } else { 432 flags = PT_PTRACED; 433 } 434 435 audit_ptrace(task); 436 437 if (unlikely(task->flags & PF_KTHREAD)) 438 return -EPERM; 439 if (same_thread_group(task, current)) 440 return -EPERM; 441 442 /* 443 * Protect exec's credential calculations against our interference; 444 * SUID, SGID and LSM creds get determined differently 445 * under ptrace. 446 */ 447 scoped_cond_guard (mutex_intr, return -ERESTARTNOINTR, 448 &task->signal->cred_guard_mutex) { 449 450 scoped_guard (task_lock, task) { 451 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS); 452 if (retval) 453 return retval; 454 } 455 456 scoped_guard (write_lock_irq, &tasklist_lock) { 457 if (unlikely(task->exit_state)) 458 return -EPERM; 459 if (task->ptrace) 460 return -EPERM; 461 462 task->ptrace = flags; 463 ptrace_link(task, current); 464 ptrace_set_stopped(task, seize); 465 } 466 } 467 468 /* 469 * We do not bother to change retval or clear JOBCTL_TRAPPING 470 * if wait_on_bit() was interrupted by SIGKILL. The tracer will 471 * not return to user-mode, it will exit and clear this bit in 472 * __ptrace_unlink() if it wasn't already cleared by the tracee; 473 * and until then nobody can ptrace this task. 474 */ 475 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE); 476 proc_ptrace_connector(task, PTRACE_ATTACH); 477 478 return 0; 479 } 480 481 /** 482 * ptrace_traceme -- helper for PTRACE_TRACEME 483 * 484 * Performs checks and sets PT_PTRACED. 485 * Should be used by all ptrace implementations for PTRACE_TRACEME. 486 */ 487 static int ptrace_traceme(void) 488 { 489 int ret = -EPERM; 490 491 write_lock_irq(&tasklist_lock); 492 /* Are we already being traced? */ 493 if (!current->ptrace) { 494 ret = security_ptrace_traceme(current->parent); 495 /* 496 * Check PF_EXITING to ensure ->real_parent has not passed 497 * exit_ptrace(). Otherwise we don't report the error but 498 * pretend ->real_parent untraces us right after return. 499 */ 500 if (!ret && !(current->real_parent->flags & PF_EXITING)) { 501 current->ptrace = PT_PTRACED; 502 ptrace_link(current, current->real_parent); 503 } 504 } 505 write_unlock_irq(&tasklist_lock); 506 507 return ret; 508 } 509 510 /* 511 * Called with irqs disabled, returns true if childs should reap themselves. 512 */ 513 static int ignoring_children(struct sighand_struct *sigh) 514 { 515 int ret; 516 spin_lock(&sigh->siglock); 517 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || 518 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); 519 spin_unlock(&sigh->siglock); 520 return ret; 521 } 522 523 /* 524 * Called with tasklist_lock held for writing. 525 * Unlink a traced task, and clean it up if it was a traced zombie. 526 * Return true if it needs to be reaped with release_task(). 527 * (We can't call release_task() here because we already hold tasklist_lock.) 528 * 529 * If it's a zombie, our attachedness prevented normal parent notification 530 * or self-reaping. Do notification now if it would have happened earlier. 531 * If it should reap itself, return true. 532 * 533 * If it's our own child, there is no notification to do. But if our normal 534 * children self-reap, then this child was prevented by ptrace and we must 535 * reap it now, in that case we must also wake up sub-threads sleeping in 536 * do_wait(). 537 */ 538 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) 539 { 540 bool dead; 541 542 __ptrace_unlink(p); 543 544 if (p->exit_state != EXIT_ZOMBIE) 545 return false; 546 547 dead = !thread_group_leader(p); 548 549 if (!dead && thread_group_empty(p)) { 550 if (!same_thread_group(p->real_parent, tracer)) 551 dead = do_notify_parent(p, p->exit_signal); 552 else if (ignoring_children(tracer->sighand)) { 553 __wake_up_parent(p, tracer); 554 dead = true; 555 } 556 } 557 /* Mark it as in the process of being reaped. */ 558 if (dead) 559 p->exit_state = EXIT_DEAD; 560 return dead; 561 } 562 563 static int ptrace_detach(struct task_struct *child, unsigned int data) 564 { 565 if (!valid_signal(data)) 566 return -EIO; 567 568 /* Architecture-specific hardware disable .. */ 569 ptrace_disable(child); 570 571 write_lock_irq(&tasklist_lock); 572 /* 573 * We rely on ptrace_freeze_traced(). It can't be killed and 574 * untraced by another thread, it can't be a zombie. 575 */ 576 WARN_ON(!child->ptrace || child->exit_state); 577 /* 578 * tasklist_lock avoids the race with wait_task_stopped(), see 579 * the comment in ptrace_resume(). 580 */ 581 child->exit_code = data; 582 __ptrace_detach(current, child); 583 write_unlock_irq(&tasklist_lock); 584 585 proc_ptrace_connector(child, PTRACE_DETACH); 586 587 return 0; 588 } 589 590 /* 591 * Detach all tasks we were using ptrace on. Called with tasklist held 592 * for writing. 593 */ 594 void exit_ptrace(struct task_struct *tracer, struct list_head *dead) 595 { 596 struct task_struct *p, *n; 597 598 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { 599 if (unlikely(p->ptrace & PT_EXITKILL)) 600 send_sig_info(SIGKILL, SEND_SIG_PRIV, p); 601 602 if (__ptrace_detach(tracer, p)) 603 list_add(&p->ptrace_entry, dead); 604 } 605 } 606 607 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 608 { 609 int copied = 0; 610 611 while (len > 0) { 612 char buf[128]; 613 int this_len, retval; 614 615 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 616 retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE); 617 618 if (!retval) { 619 if (copied) 620 break; 621 return -EIO; 622 } 623 if (copy_to_user(dst, buf, retval)) 624 return -EFAULT; 625 copied += retval; 626 src += retval; 627 dst += retval; 628 len -= retval; 629 } 630 return copied; 631 } 632 633 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) 634 { 635 int copied = 0; 636 637 while (len > 0) { 638 char buf[128]; 639 int this_len, retval; 640 641 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 642 if (copy_from_user(buf, src, this_len)) 643 return -EFAULT; 644 retval = ptrace_access_vm(tsk, dst, buf, this_len, 645 FOLL_FORCE | FOLL_WRITE); 646 if (!retval) { 647 if (copied) 648 break; 649 return -EIO; 650 } 651 copied += retval; 652 src += retval; 653 dst += retval; 654 len -= retval; 655 } 656 return copied; 657 } 658 659 static int ptrace_setoptions(struct task_struct *child, unsigned long data) 660 { 661 unsigned flags; 662 int ret; 663 664 ret = check_ptrace_options(data); 665 if (ret) 666 return ret; 667 668 /* Avoid intermediate state when all opts are cleared */ 669 flags = child->ptrace; 670 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT); 671 flags |= (data << PT_OPT_FLAG_SHIFT); 672 child->ptrace = flags; 673 674 return 0; 675 } 676 677 static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info) 678 { 679 unsigned long flags; 680 int error = -ESRCH; 681 682 if (lock_task_sighand(child, &flags)) { 683 error = -EINVAL; 684 if (likely(child->last_siginfo != NULL)) { 685 copy_siginfo(info, child->last_siginfo); 686 error = 0; 687 } 688 unlock_task_sighand(child, &flags); 689 } 690 return error; 691 } 692 693 static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info) 694 { 695 unsigned long flags; 696 int error = -ESRCH; 697 698 if (lock_task_sighand(child, &flags)) { 699 error = -EINVAL; 700 if (likely(child->last_siginfo != NULL)) { 701 copy_siginfo(child->last_siginfo, info); 702 error = 0; 703 } 704 unlock_task_sighand(child, &flags); 705 } 706 return error; 707 } 708 709 static int ptrace_peek_siginfo(struct task_struct *child, 710 unsigned long addr, 711 unsigned long data) 712 { 713 struct ptrace_peeksiginfo_args arg; 714 struct sigpending *pending; 715 struct sigqueue *q; 716 int ret, i; 717 718 ret = copy_from_user(&arg, (void __user *) addr, 719 sizeof(struct ptrace_peeksiginfo_args)); 720 if (ret) 721 return -EFAULT; 722 723 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED) 724 return -EINVAL; /* unknown flags */ 725 726 if (arg.nr < 0) 727 return -EINVAL; 728 729 /* Ensure arg.off fits in an unsigned long */ 730 if (arg.off > ULONG_MAX) 731 return 0; 732 733 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED) 734 pending = &child->signal->shared_pending; 735 else 736 pending = &child->pending; 737 738 for (i = 0; i < arg.nr; ) { 739 kernel_siginfo_t info; 740 unsigned long off = arg.off + i; 741 bool found = false; 742 743 spin_lock_irq(&child->sighand->siglock); 744 list_for_each_entry(q, &pending->list, list) { 745 if (!off--) { 746 found = true; 747 copy_siginfo(&info, &q->info); 748 break; 749 } 750 } 751 spin_unlock_irq(&child->sighand->siglock); 752 753 if (!found) /* beyond the end of the list */ 754 break; 755 756 #ifdef CONFIG_COMPAT 757 if (unlikely(in_compat_syscall())) { 758 compat_siginfo_t __user *uinfo = compat_ptr(data); 759 760 if (copy_siginfo_to_user32(uinfo, &info)) { 761 ret = -EFAULT; 762 break; 763 } 764 765 } else 766 #endif 767 { 768 siginfo_t __user *uinfo = (siginfo_t __user *) data; 769 770 if (copy_siginfo_to_user(uinfo, &info)) { 771 ret = -EFAULT; 772 break; 773 } 774 } 775 776 data += sizeof(siginfo_t); 777 i++; 778 779 if (signal_pending(current)) 780 break; 781 782 cond_resched(); 783 } 784 785 if (i > 0) 786 return i; 787 788 return ret; 789 } 790 791 #ifdef CONFIG_RSEQ 792 static long ptrace_get_rseq_configuration(struct task_struct *task, 793 unsigned long size, void __user *data) 794 { 795 struct ptrace_rseq_configuration conf = { 796 .rseq_abi_pointer = (u64)(uintptr_t)task->rseq, 797 .rseq_abi_size = task->rseq_len, 798 .signature = task->rseq_sig, 799 .flags = 0, 800 }; 801 802 size = min_t(unsigned long, size, sizeof(conf)); 803 if (copy_to_user(data, &conf, size)) 804 return -EFAULT; 805 return sizeof(conf); 806 } 807 #endif 808 809 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) 810 811 #ifdef PTRACE_SINGLEBLOCK 812 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) 813 #else 814 #define is_singleblock(request) 0 815 #endif 816 817 #ifdef PTRACE_SYSEMU 818 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) 819 #else 820 #define is_sysemu_singlestep(request) 0 821 #endif 822 823 static int ptrace_resume(struct task_struct *child, long request, 824 unsigned long data) 825 { 826 if (!valid_signal(data)) 827 return -EIO; 828 829 if (request == PTRACE_SYSCALL) 830 set_task_syscall_work(child, SYSCALL_TRACE); 831 else 832 clear_task_syscall_work(child, SYSCALL_TRACE); 833 834 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU) 835 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) 836 set_task_syscall_work(child, SYSCALL_EMU); 837 else 838 clear_task_syscall_work(child, SYSCALL_EMU); 839 #endif 840 841 if (is_singleblock(request)) { 842 if (unlikely(!arch_has_block_step())) 843 return -EIO; 844 user_enable_block_step(child); 845 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { 846 if (unlikely(!arch_has_single_step())) 847 return -EIO; 848 user_enable_single_step(child); 849 } else { 850 user_disable_single_step(child); 851 } 852 853 /* 854 * Change ->exit_code and ->state under siglock to avoid the race 855 * with wait_task_stopped() in between; a non-zero ->exit_code will 856 * wrongly look like another report from tracee. 857 * 858 * Note that we need siglock even if ->exit_code == data and/or this 859 * status was not reported yet, the new status must not be cleared by 860 * wait_task_stopped() after resume. 861 */ 862 spin_lock_irq(&child->sighand->siglock); 863 child->exit_code = data; 864 child->jobctl &= ~JOBCTL_TRACED; 865 wake_up_state(child, __TASK_TRACED); 866 spin_unlock_irq(&child->sighand->siglock); 867 868 return 0; 869 } 870 871 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 872 873 static const struct user_regset * 874 find_regset(const struct user_regset_view *view, unsigned int type) 875 { 876 const struct user_regset *regset; 877 int n; 878 879 for (n = 0; n < view->n; ++n) { 880 regset = view->regsets + n; 881 if (regset->core_note_type == type) 882 return regset; 883 } 884 885 return NULL; 886 } 887 888 static int ptrace_regset(struct task_struct *task, int req, unsigned int type, 889 struct iovec *kiov) 890 { 891 const struct user_regset_view *view = task_user_regset_view(task); 892 const struct user_regset *regset = find_regset(view, type); 893 int regset_no; 894 895 if (!regset || (kiov->iov_len % regset->size) != 0) 896 return -EINVAL; 897 898 regset_no = regset - view->regsets; 899 kiov->iov_len = min(kiov->iov_len, 900 (__kernel_size_t) (regset->n * regset->size)); 901 902 if (req == PTRACE_GETREGSET) 903 return copy_regset_to_user(task, view, regset_no, 0, 904 kiov->iov_len, kiov->iov_base); 905 else 906 return copy_regset_from_user(task, view, regset_no, 0, 907 kiov->iov_len, kiov->iov_base); 908 } 909 910 /* 911 * This is declared in linux/regset.h and defined in machine-dependent 912 * code. We put the export here, near the primary machine-neutral use, 913 * to ensure no machine forgets it. 914 */ 915 EXPORT_SYMBOL_GPL(task_user_regset_view); 916 917 static unsigned long 918 ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs, 919 struct ptrace_syscall_info *info) 920 { 921 unsigned long args[ARRAY_SIZE(info->entry.args)]; 922 int i; 923 924 info->op = PTRACE_SYSCALL_INFO_ENTRY; 925 info->entry.nr = syscall_get_nr(child, regs); 926 syscall_get_arguments(child, regs, args); 927 for (i = 0; i < ARRAY_SIZE(args); i++) 928 info->entry.args[i] = args[i]; 929 930 /* args is the last field in struct ptrace_syscall_info.entry */ 931 return offsetofend(struct ptrace_syscall_info, entry.args); 932 } 933 934 static unsigned long 935 ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs, 936 struct ptrace_syscall_info *info) 937 { 938 /* 939 * As struct ptrace_syscall_info.entry is currently a subset 940 * of struct ptrace_syscall_info.seccomp, it makes sense to 941 * initialize that subset using ptrace_get_syscall_info_entry(). 942 * This can be reconsidered in the future if these structures 943 * diverge significantly enough. 944 */ 945 ptrace_get_syscall_info_entry(child, regs, info); 946 info->op = PTRACE_SYSCALL_INFO_SECCOMP; 947 info->seccomp.ret_data = child->ptrace_message; 948 949 /* ret_data is the last field in struct ptrace_syscall_info.seccomp */ 950 return offsetofend(struct ptrace_syscall_info, seccomp.ret_data); 951 } 952 953 static unsigned long 954 ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs, 955 struct ptrace_syscall_info *info) 956 { 957 info->op = PTRACE_SYSCALL_INFO_EXIT; 958 info->exit.rval = syscall_get_error(child, regs); 959 info->exit.is_error = !!info->exit.rval; 960 if (!info->exit.is_error) 961 info->exit.rval = syscall_get_return_value(child, regs); 962 963 /* is_error is the last field in struct ptrace_syscall_info.exit */ 964 return offsetofend(struct ptrace_syscall_info, exit.is_error); 965 } 966 967 static int 968 ptrace_get_syscall_info(struct task_struct *child, unsigned long user_size, 969 void __user *datavp) 970 { 971 struct pt_regs *regs = task_pt_regs(child); 972 struct ptrace_syscall_info info = { 973 .op = PTRACE_SYSCALL_INFO_NONE, 974 .arch = syscall_get_arch(child), 975 .instruction_pointer = instruction_pointer(regs), 976 .stack_pointer = user_stack_pointer(regs), 977 }; 978 unsigned long actual_size = offsetof(struct ptrace_syscall_info, entry); 979 unsigned long write_size; 980 981 /* 982 * This does not need lock_task_sighand() to access 983 * child->last_siginfo because ptrace_freeze_traced() 984 * called earlier by ptrace_check_attach() ensures that 985 * the tracee cannot go away and clear its last_siginfo. 986 */ 987 switch (child->last_siginfo ? child->last_siginfo->si_code : 0) { 988 case SIGTRAP | 0x80: 989 switch (child->ptrace_message) { 990 case PTRACE_EVENTMSG_SYSCALL_ENTRY: 991 actual_size = ptrace_get_syscall_info_entry(child, regs, 992 &info); 993 break; 994 case PTRACE_EVENTMSG_SYSCALL_EXIT: 995 actual_size = ptrace_get_syscall_info_exit(child, regs, 996 &info); 997 break; 998 } 999 break; 1000 case SIGTRAP | (PTRACE_EVENT_SECCOMP << 8): 1001 actual_size = ptrace_get_syscall_info_seccomp(child, regs, 1002 &info); 1003 break; 1004 } 1005 1006 write_size = min(actual_size, user_size); 1007 return copy_to_user(datavp, &info, write_size) ? -EFAULT : actual_size; 1008 } 1009 #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */ 1010 1011 int ptrace_request(struct task_struct *child, long request, 1012 unsigned long addr, unsigned long data) 1013 { 1014 bool seized = child->ptrace & PT_SEIZED; 1015 int ret = -EIO; 1016 kernel_siginfo_t siginfo, *si; 1017 void __user *datavp = (void __user *) data; 1018 unsigned long __user *datalp = datavp; 1019 unsigned long flags; 1020 1021 switch (request) { 1022 case PTRACE_PEEKTEXT: 1023 case PTRACE_PEEKDATA: 1024 return generic_ptrace_peekdata(child, addr, data); 1025 case PTRACE_POKETEXT: 1026 case PTRACE_POKEDATA: 1027 return generic_ptrace_pokedata(child, addr, data); 1028 1029 #ifdef PTRACE_OLDSETOPTIONS 1030 case PTRACE_OLDSETOPTIONS: 1031 #endif 1032 case PTRACE_SETOPTIONS: 1033 ret = ptrace_setoptions(child, data); 1034 break; 1035 case PTRACE_GETEVENTMSG: 1036 ret = put_user(child->ptrace_message, datalp); 1037 break; 1038 1039 case PTRACE_PEEKSIGINFO: 1040 ret = ptrace_peek_siginfo(child, addr, data); 1041 break; 1042 1043 case PTRACE_GETSIGINFO: 1044 ret = ptrace_getsiginfo(child, &siginfo); 1045 if (!ret) 1046 ret = copy_siginfo_to_user(datavp, &siginfo); 1047 break; 1048 1049 case PTRACE_SETSIGINFO: 1050 ret = copy_siginfo_from_user(&siginfo, datavp); 1051 if (!ret) 1052 ret = ptrace_setsiginfo(child, &siginfo); 1053 break; 1054 1055 case PTRACE_GETSIGMASK: { 1056 sigset_t *mask; 1057 1058 if (addr != sizeof(sigset_t)) { 1059 ret = -EINVAL; 1060 break; 1061 } 1062 1063 if (test_tsk_restore_sigmask(child)) 1064 mask = &child->saved_sigmask; 1065 else 1066 mask = &child->blocked; 1067 1068 if (copy_to_user(datavp, mask, sizeof(sigset_t))) 1069 ret = -EFAULT; 1070 else 1071 ret = 0; 1072 1073 break; 1074 } 1075 1076 case PTRACE_SETSIGMASK: { 1077 sigset_t new_set; 1078 1079 if (addr != sizeof(sigset_t)) { 1080 ret = -EINVAL; 1081 break; 1082 } 1083 1084 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) { 1085 ret = -EFAULT; 1086 break; 1087 } 1088 1089 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1090 1091 /* 1092 * Every thread does recalc_sigpending() after resume, so 1093 * retarget_shared_pending() and recalc_sigpending() are not 1094 * called here. 1095 */ 1096 spin_lock_irq(&child->sighand->siglock); 1097 child->blocked = new_set; 1098 spin_unlock_irq(&child->sighand->siglock); 1099 1100 clear_tsk_restore_sigmask(child); 1101 1102 ret = 0; 1103 break; 1104 } 1105 1106 case PTRACE_INTERRUPT: 1107 /* 1108 * Stop tracee without any side-effect on signal or job 1109 * control. At least one trap is guaranteed to happen 1110 * after this request. If @child is already trapped, the 1111 * current trap is not disturbed and another trap will 1112 * happen after the current trap is ended with PTRACE_CONT. 1113 * 1114 * The actual trap might not be PTRACE_EVENT_STOP trap but 1115 * the pending condition is cleared regardless. 1116 */ 1117 if (unlikely(!seized || !lock_task_sighand(child, &flags))) 1118 break; 1119 1120 /* 1121 * INTERRUPT doesn't disturb existing trap sans one 1122 * exception. If ptracer issued LISTEN for the current 1123 * STOP, this INTERRUPT should clear LISTEN and re-trap 1124 * tracee into STOP. 1125 */ 1126 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) 1127 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); 1128 1129 unlock_task_sighand(child, &flags); 1130 ret = 0; 1131 break; 1132 1133 case PTRACE_LISTEN: 1134 /* 1135 * Listen for events. Tracee must be in STOP. It's not 1136 * resumed per-se but is not considered to be in TRACED by 1137 * wait(2) or ptrace(2). If an async event (e.g. group 1138 * stop state change) happens, tracee will enter STOP trap 1139 * again. Alternatively, ptracer can issue INTERRUPT to 1140 * finish listening and re-trap tracee into STOP. 1141 */ 1142 if (unlikely(!seized || !lock_task_sighand(child, &flags))) 1143 break; 1144 1145 si = child->last_siginfo; 1146 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { 1147 child->jobctl |= JOBCTL_LISTENING; 1148 /* 1149 * If NOTIFY is set, it means event happened between 1150 * start of this trap and now. Trigger re-trap. 1151 */ 1152 if (child->jobctl & JOBCTL_TRAP_NOTIFY) 1153 ptrace_signal_wake_up(child, true); 1154 ret = 0; 1155 } 1156 unlock_task_sighand(child, &flags); 1157 break; 1158 1159 case PTRACE_DETACH: /* detach a process that was attached. */ 1160 ret = ptrace_detach(child, data); 1161 break; 1162 1163 #ifdef CONFIG_BINFMT_ELF_FDPIC 1164 case PTRACE_GETFDPIC: { 1165 struct mm_struct *mm = get_task_mm(child); 1166 unsigned long tmp = 0; 1167 1168 ret = -ESRCH; 1169 if (!mm) 1170 break; 1171 1172 switch (addr) { 1173 case PTRACE_GETFDPIC_EXEC: 1174 tmp = mm->context.exec_fdpic_loadmap; 1175 break; 1176 case PTRACE_GETFDPIC_INTERP: 1177 tmp = mm->context.interp_fdpic_loadmap; 1178 break; 1179 default: 1180 break; 1181 } 1182 mmput(mm); 1183 1184 ret = put_user(tmp, datalp); 1185 break; 1186 } 1187 #endif 1188 1189 case PTRACE_SINGLESTEP: 1190 #ifdef PTRACE_SINGLEBLOCK 1191 case PTRACE_SINGLEBLOCK: 1192 #endif 1193 #ifdef PTRACE_SYSEMU 1194 case PTRACE_SYSEMU: 1195 case PTRACE_SYSEMU_SINGLESTEP: 1196 #endif 1197 case PTRACE_SYSCALL: 1198 case PTRACE_CONT: 1199 return ptrace_resume(child, request, data); 1200 1201 case PTRACE_KILL: 1202 send_sig_info(SIGKILL, SEND_SIG_NOINFO, child); 1203 return 0; 1204 1205 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 1206 case PTRACE_GETREGSET: 1207 case PTRACE_SETREGSET: { 1208 struct iovec kiov; 1209 struct iovec __user *uiov = datavp; 1210 1211 if (!access_ok(uiov, sizeof(*uiov))) 1212 return -EFAULT; 1213 1214 if (__get_user(kiov.iov_base, &uiov->iov_base) || 1215 __get_user(kiov.iov_len, &uiov->iov_len)) 1216 return -EFAULT; 1217 1218 ret = ptrace_regset(child, request, addr, &kiov); 1219 if (!ret) 1220 ret = __put_user(kiov.iov_len, &uiov->iov_len); 1221 break; 1222 } 1223 1224 case PTRACE_GET_SYSCALL_INFO: 1225 ret = ptrace_get_syscall_info(child, addr, datavp); 1226 break; 1227 #endif 1228 1229 case PTRACE_SECCOMP_GET_FILTER: 1230 ret = seccomp_get_filter(child, addr, datavp); 1231 break; 1232 1233 case PTRACE_SECCOMP_GET_METADATA: 1234 ret = seccomp_get_metadata(child, addr, datavp); 1235 break; 1236 1237 #ifdef CONFIG_RSEQ 1238 case PTRACE_GET_RSEQ_CONFIGURATION: 1239 ret = ptrace_get_rseq_configuration(child, addr, datavp); 1240 break; 1241 #endif 1242 1243 case PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG: 1244 ret = syscall_user_dispatch_set_config(child, addr, datavp); 1245 break; 1246 1247 case PTRACE_GET_SYSCALL_USER_DISPATCH_CONFIG: 1248 ret = syscall_user_dispatch_get_config(child, addr, datavp); 1249 break; 1250 1251 default: 1252 break; 1253 } 1254 1255 return ret; 1256 } 1257 1258 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, 1259 unsigned long, data) 1260 { 1261 struct task_struct *child; 1262 long ret; 1263 1264 if (request == PTRACE_TRACEME) { 1265 ret = ptrace_traceme(); 1266 goto out; 1267 } 1268 1269 child = find_get_task_by_vpid(pid); 1270 if (!child) { 1271 ret = -ESRCH; 1272 goto out; 1273 } 1274 1275 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 1276 ret = ptrace_attach(child, request, addr, data); 1277 goto out_put_task_struct; 1278 } 1279 1280 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1281 request == PTRACE_INTERRUPT); 1282 if (ret < 0) 1283 goto out_put_task_struct; 1284 1285 ret = arch_ptrace(child, request, addr, data); 1286 if (ret || request != PTRACE_DETACH) 1287 ptrace_unfreeze_traced(child); 1288 1289 out_put_task_struct: 1290 put_task_struct(child); 1291 out: 1292 return ret; 1293 } 1294 1295 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, 1296 unsigned long data) 1297 { 1298 unsigned long tmp; 1299 int copied; 1300 1301 copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE); 1302 if (copied != sizeof(tmp)) 1303 return -EIO; 1304 return put_user(tmp, (unsigned long __user *)data); 1305 } 1306 1307 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, 1308 unsigned long data) 1309 { 1310 int copied; 1311 1312 copied = ptrace_access_vm(tsk, addr, &data, sizeof(data), 1313 FOLL_FORCE | FOLL_WRITE); 1314 return (copied == sizeof(data)) ? 0 : -EIO; 1315 } 1316 1317 #if defined CONFIG_COMPAT 1318 1319 int compat_ptrace_request(struct task_struct *child, compat_long_t request, 1320 compat_ulong_t addr, compat_ulong_t data) 1321 { 1322 compat_ulong_t __user *datap = compat_ptr(data); 1323 compat_ulong_t word; 1324 kernel_siginfo_t siginfo; 1325 int ret; 1326 1327 switch (request) { 1328 case PTRACE_PEEKTEXT: 1329 case PTRACE_PEEKDATA: 1330 ret = ptrace_access_vm(child, addr, &word, sizeof(word), 1331 FOLL_FORCE); 1332 if (ret != sizeof(word)) 1333 ret = -EIO; 1334 else 1335 ret = put_user(word, datap); 1336 break; 1337 1338 case PTRACE_POKETEXT: 1339 case PTRACE_POKEDATA: 1340 ret = ptrace_access_vm(child, addr, &data, sizeof(data), 1341 FOLL_FORCE | FOLL_WRITE); 1342 ret = (ret != sizeof(data) ? -EIO : 0); 1343 break; 1344 1345 case PTRACE_GETEVENTMSG: 1346 ret = put_user((compat_ulong_t) child->ptrace_message, datap); 1347 break; 1348 1349 case PTRACE_GETSIGINFO: 1350 ret = ptrace_getsiginfo(child, &siginfo); 1351 if (!ret) 1352 ret = copy_siginfo_to_user32( 1353 (struct compat_siginfo __user *) datap, 1354 &siginfo); 1355 break; 1356 1357 case PTRACE_SETSIGINFO: 1358 ret = copy_siginfo_from_user32( 1359 &siginfo, (struct compat_siginfo __user *) datap); 1360 if (!ret) 1361 ret = ptrace_setsiginfo(child, &siginfo); 1362 break; 1363 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 1364 case PTRACE_GETREGSET: 1365 case PTRACE_SETREGSET: 1366 { 1367 struct iovec kiov; 1368 struct compat_iovec __user *uiov = 1369 (struct compat_iovec __user *) datap; 1370 compat_uptr_t ptr; 1371 compat_size_t len; 1372 1373 if (!access_ok(uiov, sizeof(*uiov))) 1374 return -EFAULT; 1375 1376 if (__get_user(ptr, &uiov->iov_base) || 1377 __get_user(len, &uiov->iov_len)) 1378 return -EFAULT; 1379 1380 kiov.iov_base = compat_ptr(ptr); 1381 kiov.iov_len = len; 1382 1383 ret = ptrace_regset(child, request, addr, &kiov); 1384 if (!ret) 1385 ret = __put_user(kiov.iov_len, &uiov->iov_len); 1386 break; 1387 } 1388 #endif 1389 1390 default: 1391 ret = ptrace_request(child, request, addr, data); 1392 } 1393 1394 return ret; 1395 } 1396 1397 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid, 1398 compat_long_t, addr, compat_long_t, data) 1399 { 1400 struct task_struct *child; 1401 long ret; 1402 1403 if (request == PTRACE_TRACEME) { 1404 ret = ptrace_traceme(); 1405 goto out; 1406 } 1407 1408 child = find_get_task_by_vpid(pid); 1409 if (!child) { 1410 ret = -ESRCH; 1411 goto out; 1412 } 1413 1414 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 1415 ret = ptrace_attach(child, request, addr, data); 1416 goto out_put_task_struct; 1417 } 1418 1419 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1420 request == PTRACE_INTERRUPT); 1421 if (!ret) { 1422 ret = compat_arch_ptrace(child, request, addr, data); 1423 if (ret || request != PTRACE_DETACH) 1424 ptrace_unfreeze_traced(child); 1425 } 1426 1427 out_put_task_struct: 1428 put_task_struct(child); 1429 out: 1430 return ret; 1431 } 1432 #endif /* CONFIG_COMPAT */ 1433