1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/ptrace.c 4 * 5 * (C) Copyright 1999 Linus Torvalds 6 * 7 * Common interfaces for "ptrace()" which we do not want 8 * to continually duplicate across every architecture. 9 */ 10 11 #include <linux/capability.h> 12 #include <linux/export.h> 13 #include <linux/sched.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/coredump.h> 16 #include <linux/sched/task.h> 17 #include <linux/errno.h> 18 #include <linux/mm.h> 19 #include <linux/highmem.h> 20 #include <linux/pagemap.h> 21 #include <linux/ptrace.h> 22 #include <linux/security.h> 23 #include <linux/signal.h> 24 #include <linux/uio.h> 25 #include <linux/audit.h> 26 #include <linux/pid_namespace.h> 27 #include <linux/syscalls.h> 28 #include <linux/uaccess.h> 29 #include <linux/regset.h> 30 #include <linux/hw_breakpoint.h> 31 #include <linux/cn_proc.h> 32 #include <linux/compat.h> 33 #include <linux/sched/signal.h> 34 35 #include <asm/syscall.h> /* for syscall_get_* */ 36 37 /* 38 * Access another process' address space via ptrace. 39 * Source/target buffer must be kernel space, 40 * Do not walk the page table directly, use get_user_pages 41 */ 42 int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, 43 void *buf, int len, unsigned int gup_flags) 44 { 45 struct mm_struct *mm; 46 int ret; 47 48 mm = get_task_mm(tsk); 49 if (!mm) 50 return 0; 51 52 if (!tsk->ptrace || 53 (current != tsk->parent) || 54 ((get_dumpable(mm) != SUID_DUMP_USER) && 55 !ptracer_capable(tsk, mm->user_ns))) { 56 mmput(mm); 57 return 0; 58 } 59 60 ret = __access_remote_vm(mm, addr, buf, len, gup_flags); 61 mmput(mm); 62 63 return ret; 64 } 65 66 67 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, 68 const struct cred *ptracer_cred) 69 { 70 BUG_ON(!list_empty(&child->ptrace_entry)); 71 list_add(&child->ptrace_entry, &new_parent->ptraced); 72 child->parent = new_parent; 73 child->ptracer_cred = get_cred(ptracer_cred); 74 } 75 76 /* 77 * ptrace a task: make the debugger its new parent and 78 * move it to the ptrace list. 79 * 80 * Must be called with the tasklist lock write-held. 81 */ 82 static void ptrace_link(struct task_struct *child, struct task_struct *new_parent) 83 { 84 __ptrace_link(child, new_parent, current_cred()); 85 } 86 87 /** 88 * __ptrace_unlink - unlink ptracee and restore its execution state 89 * @child: ptracee to be unlinked 90 * 91 * Remove @child from the ptrace list, move it back to the original parent, 92 * and restore the execution state so that it conforms to the group stop 93 * state. 94 * 95 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer 96 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between 97 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. 98 * If the ptracer is exiting, the ptracee can be in any state. 99 * 100 * After detach, the ptracee should be in a state which conforms to the 101 * group stop. If the group is stopped or in the process of stopping, the 102 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken 103 * up from TASK_TRACED. 104 * 105 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, 106 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar 107 * to but in the opposite direction of what happens while attaching to a 108 * stopped task. However, in this direction, the intermediate RUNNING 109 * state is not hidden even from the current ptracer and if it immediately 110 * re-attaches and performs a WNOHANG wait(2), it may fail. 111 * 112 * CONTEXT: 113 * write_lock_irq(tasklist_lock) 114 */ 115 void __ptrace_unlink(struct task_struct *child) 116 { 117 const struct cred *old_cred; 118 BUG_ON(!child->ptrace); 119 120 clear_task_syscall_work(child, SYSCALL_TRACE); 121 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU) 122 clear_task_syscall_work(child, SYSCALL_EMU); 123 #endif 124 125 child->parent = child->real_parent; 126 list_del_init(&child->ptrace_entry); 127 old_cred = child->ptracer_cred; 128 child->ptracer_cred = NULL; 129 put_cred(old_cred); 130 131 spin_lock(&child->sighand->siglock); 132 child->ptrace = 0; 133 /* 134 * Clear all pending traps and TRAPPING. TRAPPING should be 135 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly. 136 */ 137 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK); 138 task_clear_jobctl_trapping(child); 139 140 /* 141 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and 142 * @child isn't dead. 143 */ 144 if (!(child->flags & PF_EXITING) && 145 (child->signal->flags & SIGNAL_STOP_STOPPED || 146 child->signal->group_stop_count)) { 147 child->jobctl |= JOBCTL_STOP_PENDING; 148 149 /* 150 * This is only possible if this thread was cloned by the 151 * traced task running in the stopped group, set the signal 152 * for the future reports. 153 * FIXME: we should change ptrace_init_task() to handle this 154 * case. 155 */ 156 if (!(child->jobctl & JOBCTL_STOP_SIGMASK)) 157 child->jobctl |= SIGSTOP; 158 } 159 160 /* 161 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick 162 * @child in the butt. Note that @resume should be used iff @child 163 * is in TASK_TRACED; otherwise, we might unduly disrupt 164 * TASK_KILLABLE sleeps. 165 */ 166 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) 167 ptrace_signal_wake_up(child, true); 168 169 spin_unlock(&child->sighand->siglock); 170 } 171 172 /* Ensure that nothing can wake it up, even SIGKILL */ 173 static bool ptrace_freeze_traced(struct task_struct *task) 174 { 175 bool ret = false; 176 177 /* Lockless, nobody but us can set this flag */ 178 if (task->jobctl & JOBCTL_LISTENING) 179 return ret; 180 181 spin_lock_irq(&task->sighand->siglock); 182 if (task_is_traced(task) && !__fatal_signal_pending(task)) { 183 task->state = __TASK_TRACED; 184 ret = true; 185 } 186 spin_unlock_irq(&task->sighand->siglock); 187 188 return ret; 189 } 190 191 static void ptrace_unfreeze_traced(struct task_struct *task) 192 { 193 if (task->state != __TASK_TRACED) 194 return; 195 196 WARN_ON(!task->ptrace || task->parent != current); 197 198 /* 199 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely. 200 * Recheck state under the lock to close this race. 201 */ 202 spin_lock_irq(&task->sighand->siglock); 203 if (task->state == __TASK_TRACED) { 204 if (__fatal_signal_pending(task)) 205 wake_up_state(task, __TASK_TRACED); 206 else 207 task->state = TASK_TRACED; 208 } 209 spin_unlock_irq(&task->sighand->siglock); 210 } 211 212 /** 213 * ptrace_check_attach - check whether ptracee is ready for ptrace operation 214 * @child: ptracee to check for 215 * @ignore_state: don't check whether @child is currently %TASK_TRACED 216 * 217 * Check whether @child is being ptraced by %current and ready for further 218 * ptrace operations. If @ignore_state is %false, @child also should be in 219 * %TASK_TRACED state and on return the child is guaranteed to be traced 220 * and not executing. If @ignore_state is %true, @child can be in any 221 * state. 222 * 223 * CONTEXT: 224 * Grabs and releases tasklist_lock and @child->sighand->siglock. 225 * 226 * RETURNS: 227 * 0 on success, -ESRCH if %child is not ready. 228 */ 229 static int ptrace_check_attach(struct task_struct *child, bool ignore_state) 230 { 231 int ret = -ESRCH; 232 233 /* 234 * We take the read lock around doing both checks to close a 235 * possible race where someone else was tracing our child and 236 * detached between these two checks. After this locked check, 237 * we are sure that this is our traced child and that can only 238 * be changed by us so it's not changing right after this. 239 */ 240 read_lock(&tasklist_lock); 241 if (child->ptrace && child->parent == current) { 242 WARN_ON(child->state == __TASK_TRACED); 243 /* 244 * child->sighand can't be NULL, release_task() 245 * does ptrace_unlink() before __exit_signal(). 246 */ 247 if (ignore_state || ptrace_freeze_traced(child)) 248 ret = 0; 249 } 250 read_unlock(&tasklist_lock); 251 252 if (!ret && !ignore_state) { 253 if (!wait_task_inactive(child, __TASK_TRACED)) { 254 /* 255 * This can only happen if may_ptrace_stop() fails and 256 * ptrace_stop() changes ->state back to TASK_RUNNING, 257 * so we should not worry about leaking __TASK_TRACED. 258 */ 259 WARN_ON(child->state == __TASK_TRACED); 260 ret = -ESRCH; 261 } 262 } 263 264 return ret; 265 } 266 267 static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode) 268 { 269 if (mode & PTRACE_MODE_NOAUDIT) 270 return ns_capable_noaudit(ns, CAP_SYS_PTRACE); 271 return ns_capable(ns, CAP_SYS_PTRACE); 272 } 273 274 /* Returns 0 on success, -errno on denial. */ 275 static int __ptrace_may_access(struct task_struct *task, unsigned int mode) 276 { 277 const struct cred *cred = current_cred(), *tcred; 278 struct mm_struct *mm; 279 kuid_t caller_uid; 280 kgid_t caller_gid; 281 282 if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) { 283 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n"); 284 return -EPERM; 285 } 286 287 /* May we inspect the given task? 288 * This check is used both for attaching with ptrace 289 * and for allowing access to sensitive information in /proc. 290 * 291 * ptrace_attach denies several cases that /proc allows 292 * because setting up the necessary parent/child relationship 293 * or halting the specified task is impossible. 294 */ 295 296 /* Don't let security modules deny introspection */ 297 if (same_thread_group(task, current)) 298 return 0; 299 rcu_read_lock(); 300 if (mode & PTRACE_MODE_FSCREDS) { 301 caller_uid = cred->fsuid; 302 caller_gid = cred->fsgid; 303 } else { 304 /* 305 * Using the euid would make more sense here, but something 306 * in userland might rely on the old behavior, and this 307 * shouldn't be a security problem since 308 * PTRACE_MODE_REALCREDS implies that the caller explicitly 309 * used a syscall that requests access to another process 310 * (and not a filesystem syscall to procfs). 311 */ 312 caller_uid = cred->uid; 313 caller_gid = cred->gid; 314 } 315 tcred = __task_cred(task); 316 if (uid_eq(caller_uid, tcred->euid) && 317 uid_eq(caller_uid, tcred->suid) && 318 uid_eq(caller_uid, tcred->uid) && 319 gid_eq(caller_gid, tcred->egid) && 320 gid_eq(caller_gid, tcred->sgid) && 321 gid_eq(caller_gid, tcred->gid)) 322 goto ok; 323 if (ptrace_has_cap(tcred->user_ns, mode)) 324 goto ok; 325 rcu_read_unlock(); 326 return -EPERM; 327 ok: 328 rcu_read_unlock(); 329 /* 330 * If a task drops privileges and becomes nondumpable (through a syscall 331 * like setresuid()) while we are trying to access it, we must ensure 332 * that the dumpability is read after the credentials; otherwise, 333 * we may be able to attach to a task that we shouldn't be able to 334 * attach to (as if the task had dropped privileges without becoming 335 * nondumpable). 336 * Pairs with a write barrier in commit_creds(). 337 */ 338 smp_rmb(); 339 mm = task->mm; 340 if (mm && 341 ((get_dumpable(mm) != SUID_DUMP_USER) && 342 !ptrace_has_cap(mm->user_ns, mode))) 343 return -EPERM; 344 345 return security_ptrace_access_check(task, mode); 346 } 347 348 bool ptrace_may_access(struct task_struct *task, unsigned int mode) 349 { 350 int err; 351 task_lock(task); 352 err = __ptrace_may_access(task, mode); 353 task_unlock(task); 354 return !err; 355 } 356 357 static int ptrace_attach(struct task_struct *task, long request, 358 unsigned long addr, 359 unsigned long flags) 360 { 361 bool seize = (request == PTRACE_SEIZE); 362 int retval; 363 364 retval = -EIO; 365 if (seize) { 366 if (addr != 0) 367 goto out; 368 if (flags & ~(unsigned long)PTRACE_O_MASK) 369 goto out; 370 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT); 371 } else { 372 flags = PT_PTRACED; 373 } 374 375 audit_ptrace(task); 376 377 retval = -EPERM; 378 if (unlikely(task->flags & PF_KTHREAD)) 379 goto out; 380 if (same_thread_group(task, current)) 381 goto out; 382 383 /* 384 * Protect exec's credential calculations against our interference; 385 * SUID, SGID and LSM creds get determined differently 386 * under ptrace. 387 */ 388 retval = -ERESTARTNOINTR; 389 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) 390 goto out; 391 392 task_lock(task); 393 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS); 394 task_unlock(task); 395 if (retval) 396 goto unlock_creds; 397 398 write_lock_irq(&tasklist_lock); 399 retval = -EPERM; 400 if (unlikely(task->exit_state)) 401 goto unlock_tasklist; 402 if (task->ptrace) 403 goto unlock_tasklist; 404 405 if (seize) 406 flags |= PT_SEIZED; 407 task->ptrace = flags; 408 409 ptrace_link(task, current); 410 411 /* SEIZE doesn't trap tracee on attach */ 412 if (!seize) 413 send_sig_info(SIGSTOP, SEND_SIG_PRIV, task); 414 415 spin_lock(&task->sighand->siglock); 416 417 /* 418 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and 419 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING 420 * will be cleared if the child completes the transition or any 421 * event which clears the group stop states happens. We'll wait 422 * for the transition to complete before returning from this 423 * function. 424 * 425 * This hides STOPPED -> RUNNING -> TRACED transition from the 426 * attaching thread but a different thread in the same group can 427 * still observe the transient RUNNING state. IOW, if another 428 * thread's WNOHANG wait(2) on the stopped tracee races against 429 * ATTACH, the wait(2) may fail due to the transient RUNNING. 430 * 431 * The following task_is_stopped() test is safe as both transitions 432 * in and out of STOPPED are protected by siglock. 433 */ 434 if (task_is_stopped(task) && 435 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) 436 signal_wake_up_state(task, __TASK_STOPPED); 437 438 spin_unlock(&task->sighand->siglock); 439 440 retval = 0; 441 unlock_tasklist: 442 write_unlock_irq(&tasklist_lock); 443 unlock_creds: 444 mutex_unlock(&task->signal->cred_guard_mutex); 445 out: 446 if (!retval) { 447 /* 448 * We do not bother to change retval or clear JOBCTL_TRAPPING 449 * if wait_on_bit() was interrupted by SIGKILL. The tracer will 450 * not return to user-mode, it will exit and clear this bit in 451 * __ptrace_unlink() if it wasn't already cleared by the tracee; 452 * and until then nobody can ptrace this task. 453 */ 454 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE); 455 proc_ptrace_connector(task, PTRACE_ATTACH); 456 } 457 458 return retval; 459 } 460 461 /** 462 * ptrace_traceme -- helper for PTRACE_TRACEME 463 * 464 * Performs checks and sets PT_PTRACED. 465 * Should be used by all ptrace implementations for PTRACE_TRACEME. 466 */ 467 static int ptrace_traceme(void) 468 { 469 int ret = -EPERM; 470 471 write_lock_irq(&tasklist_lock); 472 /* Are we already being traced? */ 473 if (!current->ptrace) { 474 ret = security_ptrace_traceme(current->parent); 475 /* 476 * Check PF_EXITING to ensure ->real_parent has not passed 477 * exit_ptrace(). Otherwise we don't report the error but 478 * pretend ->real_parent untraces us right after return. 479 */ 480 if (!ret && !(current->real_parent->flags & PF_EXITING)) { 481 current->ptrace = PT_PTRACED; 482 ptrace_link(current, current->real_parent); 483 } 484 } 485 write_unlock_irq(&tasklist_lock); 486 487 return ret; 488 } 489 490 /* 491 * Called with irqs disabled, returns true if childs should reap themselves. 492 */ 493 static int ignoring_children(struct sighand_struct *sigh) 494 { 495 int ret; 496 spin_lock(&sigh->siglock); 497 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || 498 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); 499 spin_unlock(&sigh->siglock); 500 return ret; 501 } 502 503 /* 504 * Called with tasklist_lock held for writing. 505 * Unlink a traced task, and clean it up if it was a traced zombie. 506 * Return true if it needs to be reaped with release_task(). 507 * (We can't call release_task() here because we already hold tasklist_lock.) 508 * 509 * If it's a zombie, our attachedness prevented normal parent notification 510 * or self-reaping. Do notification now if it would have happened earlier. 511 * If it should reap itself, return true. 512 * 513 * If it's our own child, there is no notification to do. But if our normal 514 * children self-reap, then this child was prevented by ptrace and we must 515 * reap it now, in that case we must also wake up sub-threads sleeping in 516 * do_wait(). 517 */ 518 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) 519 { 520 bool dead; 521 522 __ptrace_unlink(p); 523 524 if (p->exit_state != EXIT_ZOMBIE) 525 return false; 526 527 dead = !thread_group_leader(p); 528 529 if (!dead && thread_group_empty(p)) { 530 if (!same_thread_group(p->real_parent, tracer)) 531 dead = do_notify_parent(p, p->exit_signal); 532 else if (ignoring_children(tracer->sighand)) { 533 __wake_up_parent(p, tracer); 534 dead = true; 535 } 536 } 537 /* Mark it as in the process of being reaped. */ 538 if (dead) 539 p->exit_state = EXIT_DEAD; 540 return dead; 541 } 542 543 static int ptrace_detach(struct task_struct *child, unsigned int data) 544 { 545 if (!valid_signal(data)) 546 return -EIO; 547 548 /* Architecture-specific hardware disable .. */ 549 ptrace_disable(child); 550 551 write_lock_irq(&tasklist_lock); 552 /* 553 * We rely on ptrace_freeze_traced(). It can't be killed and 554 * untraced by another thread, it can't be a zombie. 555 */ 556 WARN_ON(!child->ptrace || child->exit_state); 557 /* 558 * tasklist_lock avoids the race with wait_task_stopped(), see 559 * the comment in ptrace_resume(). 560 */ 561 child->exit_code = data; 562 __ptrace_detach(current, child); 563 write_unlock_irq(&tasklist_lock); 564 565 proc_ptrace_connector(child, PTRACE_DETACH); 566 567 return 0; 568 } 569 570 /* 571 * Detach all tasks we were using ptrace on. Called with tasklist held 572 * for writing. 573 */ 574 void exit_ptrace(struct task_struct *tracer, struct list_head *dead) 575 { 576 struct task_struct *p, *n; 577 578 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { 579 if (unlikely(p->ptrace & PT_EXITKILL)) 580 send_sig_info(SIGKILL, SEND_SIG_PRIV, p); 581 582 if (__ptrace_detach(tracer, p)) 583 list_add(&p->ptrace_entry, dead); 584 } 585 } 586 587 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 588 { 589 int copied = 0; 590 591 while (len > 0) { 592 char buf[128]; 593 int this_len, retval; 594 595 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 596 retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE); 597 598 if (!retval) { 599 if (copied) 600 break; 601 return -EIO; 602 } 603 if (copy_to_user(dst, buf, retval)) 604 return -EFAULT; 605 copied += retval; 606 src += retval; 607 dst += retval; 608 len -= retval; 609 } 610 return copied; 611 } 612 613 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) 614 { 615 int copied = 0; 616 617 while (len > 0) { 618 char buf[128]; 619 int this_len, retval; 620 621 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 622 if (copy_from_user(buf, src, this_len)) 623 return -EFAULT; 624 retval = ptrace_access_vm(tsk, dst, buf, this_len, 625 FOLL_FORCE | FOLL_WRITE); 626 if (!retval) { 627 if (copied) 628 break; 629 return -EIO; 630 } 631 copied += retval; 632 src += retval; 633 dst += retval; 634 len -= retval; 635 } 636 return copied; 637 } 638 639 static int ptrace_setoptions(struct task_struct *child, unsigned long data) 640 { 641 unsigned flags; 642 643 if (data & ~(unsigned long)PTRACE_O_MASK) 644 return -EINVAL; 645 646 if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) { 647 if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) || 648 !IS_ENABLED(CONFIG_SECCOMP)) 649 return -EINVAL; 650 651 if (!capable(CAP_SYS_ADMIN)) 652 return -EPERM; 653 654 if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED || 655 current->ptrace & PT_SUSPEND_SECCOMP) 656 return -EPERM; 657 } 658 659 /* Avoid intermediate state when all opts are cleared */ 660 flags = child->ptrace; 661 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT); 662 flags |= (data << PT_OPT_FLAG_SHIFT); 663 child->ptrace = flags; 664 665 return 0; 666 } 667 668 static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info) 669 { 670 unsigned long flags; 671 int error = -ESRCH; 672 673 if (lock_task_sighand(child, &flags)) { 674 error = -EINVAL; 675 if (likely(child->last_siginfo != NULL)) { 676 copy_siginfo(info, child->last_siginfo); 677 error = 0; 678 } 679 unlock_task_sighand(child, &flags); 680 } 681 return error; 682 } 683 684 static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info) 685 { 686 unsigned long flags; 687 int error = -ESRCH; 688 689 if (lock_task_sighand(child, &flags)) { 690 error = -EINVAL; 691 if (likely(child->last_siginfo != NULL)) { 692 copy_siginfo(child->last_siginfo, info); 693 error = 0; 694 } 695 unlock_task_sighand(child, &flags); 696 } 697 return error; 698 } 699 700 static int ptrace_peek_siginfo(struct task_struct *child, 701 unsigned long addr, 702 unsigned long data) 703 { 704 struct ptrace_peeksiginfo_args arg; 705 struct sigpending *pending; 706 struct sigqueue *q; 707 int ret, i; 708 709 ret = copy_from_user(&arg, (void __user *) addr, 710 sizeof(struct ptrace_peeksiginfo_args)); 711 if (ret) 712 return -EFAULT; 713 714 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED) 715 return -EINVAL; /* unknown flags */ 716 717 if (arg.nr < 0) 718 return -EINVAL; 719 720 /* Ensure arg.off fits in an unsigned long */ 721 if (arg.off > ULONG_MAX) 722 return 0; 723 724 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED) 725 pending = &child->signal->shared_pending; 726 else 727 pending = &child->pending; 728 729 for (i = 0; i < arg.nr; ) { 730 kernel_siginfo_t info; 731 unsigned long off = arg.off + i; 732 bool found = false; 733 734 spin_lock_irq(&child->sighand->siglock); 735 list_for_each_entry(q, &pending->list, list) { 736 if (!off--) { 737 found = true; 738 copy_siginfo(&info, &q->info); 739 break; 740 } 741 } 742 spin_unlock_irq(&child->sighand->siglock); 743 744 if (!found) /* beyond the end of the list */ 745 break; 746 747 #ifdef CONFIG_COMPAT 748 if (unlikely(in_compat_syscall())) { 749 compat_siginfo_t __user *uinfo = compat_ptr(data); 750 751 if (copy_siginfo_to_user32(uinfo, &info)) { 752 ret = -EFAULT; 753 break; 754 } 755 756 } else 757 #endif 758 { 759 siginfo_t __user *uinfo = (siginfo_t __user *) data; 760 761 if (copy_siginfo_to_user(uinfo, &info)) { 762 ret = -EFAULT; 763 break; 764 } 765 } 766 767 data += sizeof(siginfo_t); 768 i++; 769 770 if (signal_pending(current)) 771 break; 772 773 cond_resched(); 774 } 775 776 if (i > 0) 777 return i; 778 779 return ret; 780 } 781 782 #ifdef PTRACE_SINGLESTEP 783 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) 784 #else 785 #define is_singlestep(request) 0 786 #endif 787 788 #ifdef PTRACE_SINGLEBLOCK 789 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) 790 #else 791 #define is_singleblock(request) 0 792 #endif 793 794 #ifdef PTRACE_SYSEMU 795 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) 796 #else 797 #define is_sysemu_singlestep(request) 0 798 #endif 799 800 static int ptrace_resume(struct task_struct *child, long request, 801 unsigned long data) 802 { 803 bool need_siglock; 804 805 if (!valid_signal(data)) 806 return -EIO; 807 808 if (request == PTRACE_SYSCALL) 809 set_task_syscall_work(child, SYSCALL_TRACE); 810 else 811 clear_task_syscall_work(child, SYSCALL_TRACE); 812 813 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU) 814 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) 815 set_task_syscall_work(child, SYSCALL_EMU); 816 else 817 clear_task_syscall_work(child, SYSCALL_EMU); 818 #endif 819 820 if (is_singleblock(request)) { 821 if (unlikely(!arch_has_block_step())) 822 return -EIO; 823 user_enable_block_step(child); 824 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { 825 if (unlikely(!arch_has_single_step())) 826 return -EIO; 827 user_enable_single_step(child); 828 } else { 829 user_disable_single_step(child); 830 } 831 832 /* 833 * Change ->exit_code and ->state under siglock to avoid the race 834 * with wait_task_stopped() in between; a non-zero ->exit_code will 835 * wrongly look like another report from tracee. 836 * 837 * Note that we need siglock even if ->exit_code == data and/or this 838 * status was not reported yet, the new status must not be cleared by 839 * wait_task_stopped() after resume. 840 * 841 * If data == 0 we do not care if wait_task_stopped() reports the old 842 * status and clears the code too; this can't race with the tracee, it 843 * takes siglock after resume. 844 */ 845 need_siglock = data && !thread_group_empty(current); 846 if (need_siglock) 847 spin_lock_irq(&child->sighand->siglock); 848 child->exit_code = data; 849 wake_up_state(child, __TASK_TRACED); 850 if (need_siglock) 851 spin_unlock_irq(&child->sighand->siglock); 852 853 return 0; 854 } 855 856 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 857 858 static const struct user_regset * 859 find_regset(const struct user_regset_view *view, unsigned int type) 860 { 861 const struct user_regset *regset; 862 int n; 863 864 for (n = 0; n < view->n; ++n) { 865 regset = view->regsets + n; 866 if (regset->core_note_type == type) 867 return regset; 868 } 869 870 return NULL; 871 } 872 873 static int ptrace_regset(struct task_struct *task, int req, unsigned int type, 874 struct iovec *kiov) 875 { 876 const struct user_regset_view *view = task_user_regset_view(task); 877 const struct user_regset *regset = find_regset(view, type); 878 int regset_no; 879 880 if (!regset || (kiov->iov_len % regset->size) != 0) 881 return -EINVAL; 882 883 regset_no = regset - view->regsets; 884 kiov->iov_len = min(kiov->iov_len, 885 (__kernel_size_t) (regset->n * regset->size)); 886 887 if (req == PTRACE_GETREGSET) 888 return copy_regset_to_user(task, view, regset_no, 0, 889 kiov->iov_len, kiov->iov_base); 890 else 891 return copy_regset_from_user(task, view, regset_no, 0, 892 kiov->iov_len, kiov->iov_base); 893 } 894 895 /* 896 * This is declared in linux/regset.h and defined in machine-dependent 897 * code. We put the export here, near the primary machine-neutral use, 898 * to ensure no machine forgets it. 899 */ 900 EXPORT_SYMBOL_GPL(task_user_regset_view); 901 902 static unsigned long 903 ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs, 904 struct ptrace_syscall_info *info) 905 { 906 unsigned long args[ARRAY_SIZE(info->entry.args)]; 907 int i; 908 909 info->op = PTRACE_SYSCALL_INFO_ENTRY; 910 info->entry.nr = syscall_get_nr(child, regs); 911 syscall_get_arguments(child, regs, args); 912 for (i = 0; i < ARRAY_SIZE(args); i++) 913 info->entry.args[i] = args[i]; 914 915 /* args is the last field in struct ptrace_syscall_info.entry */ 916 return offsetofend(struct ptrace_syscall_info, entry.args); 917 } 918 919 static unsigned long 920 ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs, 921 struct ptrace_syscall_info *info) 922 { 923 /* 924 * As struct ptrace_syscall_info.entry is currently a subset 925 * of struct ptrace_syscall_info.seccomp, it makes sense to 926 * initialize that subset using ptrace_get_syscall_info_entry(). 927 * This can be reconsidered in the future if these structures 928 * diverge significantly enough. 929 */ 930 ptrace_get_syscall_info_entry(child, regs, info); 931 info->op = PTRACE_SYSCALL_INFO_SECCOMP; 932 info->seccomp.ret_data = child->ptrace_message; 933 934 /* ret_data is the last field in struct ptrace_syscall_info.seccomp */ 935 return offsetofend(struct ptrace_syscall_info, seccomp.ret_data); 936 } 937 938 static unsigned long 939 ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs, 940 struct ptrace_syscall_info *info) 941 { 942 info->op = PTRACE_SYSCALL_INFO_EXIT; 943 info->exit.rval = syscall_get_error(child, regs); 944 info->exit.is_error = !!info->exit.rval; 945 if (!info->exit.is_error) 946 info->exit.rval = syscall_get_return_value(child, regs); 947 948 /* is_error is the last field in struct ptrace_syscall_info.exit */ 949 return offsetofend(struct ptrace_syscall_info, exit.is_error); 950 } 951 952 static int 953 ptrace_get_syscall_info(struct task_struct *child, unsigned long user_size, 954 void __user *datavp) 955 { 956 struct pt_regs *regs = task_pt_regs(child); 957 struct ptrace_syscall_info info = { 958 .op = PTRACE_SYSCALL_INFO_NONE, 959 .arch = syscall_get_arch(child), 960 .instruction_pointer = instruction_pointer(regs), 961 .stack_pointer = user_stack_pointer(regs), 962 }; 963 unsigned long actual_size = offsetof(struct ptrace_syscall_info, entry); 964 unsigned long write_size; 965 966 /* 967 * This does not need lock_task_sighand() to access 968 * child->last_siginfo because ptrace_freeze_traced() 969 * called earlier by ptrace_check_attach() ensures that 970 * the tracee cannot go away and clear its last_siginfo. 971 */ 972 switch (child->last_siginfo ? child->last_siginfo->si_code : 0) { 973 case SIGTRAP | 0x80: 974 switch (child->ptrace_message) { 975 case PTRACE_EVENTMSG_SYSCALL_ENTRY: 976 actual_size = ptrace_get_syscall_info_entry(child, regs, 977 &info); 978 break; 979 case PTRACE_EVENTMSG_SYSCALL_EXIT: 980 actual_size = ptrace_get_syscall_info_exit(child, regs, 981 &info); 982 break; 983 } 984 break; 985 case SIGTRAP | (PTRACE_EVENT_SECCOMP << 8): 986 actual_size = ptrace_get_syscall_info_seccomp(child, regs, 987 &info); 988 break; 989 } 990 991 write_size = min(actual_size, user_size); 992 return copy_to_user(datavp, &info, write_size) ? -EFAULT : actual_size; 993 } 994 #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */ 995 996 int ptrace_request(struct task_struct *child, long request, 997 unsigned long addr, unsigned long data) 998 { 999 bool seized = child->ptrace & PT_SEIZED; 1000 int ret = -EIO; 1001 kernel_siginfo_t siginfo, *si; 1002 void __user *datavp = (void __user *) data; 1003 unsigned long __user *datalp = datavp; 1004 unsigned long flags; 1005 1006 switch (request) { 1007 case PTRACE_PEEKTEXT: 1008 case PTRACE_PEEKDATA: 1009 return generic_ptrace_peekdata(child, addr, data); 1010 case PTRACE_POKETEXT: 1011 case PTRACE_POKEDATA: 1012 return generic_ptrace_pokedata(child, addr, data); 1013 1014 #ifdef PTRACE_OLDSETOPTIONS 1015 case PTRACE_OLDSETOPTIONS: 1016 #endif 1017 case PTRACE_SETOPTIONS: 1018 ret = ptrace_setoptions(child, data); 1019 break; 1020 case PTRACE_GETEVENTMSG: 1021 ret = put_user(child->ptrace_message, datalp); 1022 break; 1023 1024 case PTRACE_PEEKSIGINFO: 1025 ret = ptrace_peek_siginfo(child, addr, data); 1026 break; 1027 1028 case PTRACE_GETSIGINFO: 1029 ret = ptrace_getsiginfo(child, &siginfo); 1030 if (!ret) 1031 ret = copy_siginfo_to_user(datavp, &siginfo); 1032 break; 1033 1034 case PTRACE_SETSIGINFO: 1035 ret = copy_siginfo_from_user(&siginfo, datavp); 1036 if (!ret) 1037 ret = ptrace_setsiginfo(child, &siginfo); 1038 break; 1039 1040 case PTRACE_GETSIGMASK: { 1041 sigset_t *mask; 1042 1043 if (addr != sizeof(sigset_t)) { 1044 ret = -EINVAL; 1045 break; 1046 } 1047 1048 if (test_tsk_restore_sigmask(child)) 1049 mask = &child->saved_sigmask; 1050 else 1051 mask = &child->blocked; 1052 1053 if (copy_to_user(datavp, mask, sizeof(sigset_t))) 1054 ret = -EFAULT; 1055 else 1056 ret = 0; 1057 1058 break; 1059 } 1060 1061 case PTRACE_SETSIGMASK: { 1062 sigset_t new_set; 1063 1064 if (addr != sizeof(sigset_t)) { 1065 ret = -EINVAL; 1066 break; 1067 } 1068 1069 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) { 1070 ret = -EFAULT; 1071 break; 1072 } 1073 1074 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1075 1076 /* 1077 * Every thread does recalc_sigpending() after resume, so 1078 * retarget_shared_pending() and recalc_sigpending() are not 1079 * called here. 1080 */ 1081 spin_lock_irq(&child->sighand->siglock); 1082 child->blocked = new_set; 1083 spin_unlock_irq(&child->sighand->siglock); 1084 1085 clear_tsk_restore_sigmask(child); 1086 1087 ret = 0; 1088 break; 1089 } 1090 1091 case PTRACE_INTERRUPT: 1092 /* 1093 * Stop tracee without any side-effect on signal or job 1094 * control. At least one trap is guaranteed to happen 1095 * after this request. If @child is already trapped, the 1096 * current trap is not disturbed and another trap will 1097 * happen after the current trap is ended with PTRACE_CONT. 1098 * 1099 * The actual trap might not be PTRACE_EVENT_STOP trap but 1100 * the pending condition is cleared regardless. 1101 */ 1102 if (unlikely(!seized || !lock_task_sighand(child, &flags))) 1103 break; 1104 1105 /* 1106 * INTERRUPT doesn't disturb existing trap sans one 1107 * exception. If ptracer issued LISTEN for the current 1108 * STOP, this INTERRUPT should clear LISTEN and re-trap 1109 * tracee into STOP. 1110 */ 1111 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) 1112 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); 1113 1114 unlock_task_sighand(child, &flags); 1115 ret = 0; 1116 break; 1117 1118 case PTRACE_LISTEN: 1119 /* 1120 * Listen for events. Tracee must be in STOP. It's not 1121 * resumed per-se but is not considered to be in TRACED by 1122 * wait(2) or ptrace(2). If an async event (e.g. group 1123 * stop state change) happens, tracee will enter STOP trap 1124 * again. Alternatively, ptracer can issue INTERRUPT to 1125 * finish listening and re-trap tracee into STOP. 1126 */ 1127 if (unlikely(!seized || !lock_task_sighand(child, &flags))) 1128 break; 1129 1130 si = child->last_siginfo; 1131 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { 1132 child->jobctl |= JOBCTL_LISTENING; 1133 /* 1134 * If NOTIFY is set, it means event happened between 1135 * start of this trap and now. Trigger re-trap. 1136 */ 1137 if (child->jobctl & JOBCTL_TRAP_NOTIFY) 1138 ptrace_signal_wake_up(child, true); 1139 ret = 0; 1140 } 1141 unlock_task_sighand(child, &flags); 1142 break; 1143 1144 case PTRACE_DETACH: /* detach a process that was attached. */ 1145 ret = ptrace_detach(child, data); 1146 break; 1147 1148 #ifdef CONFIG_BINFMT_ELF_FDPIC 1149 case PTRACE_GETFDPIC: { 1150 struct mm_struct *mm = get_task_mm(child); 1151 unsigned long tmp = 0; 1152 1153 ret = -ESRCH; 1154 if (!mm) 1155 break; 1156 1157 switch (addr) { 1158 case PTRACE_GETFDPIC_EXEC: 1159 tmp = mm->context.exec_fdpic_loadmap; 1160 break; 1161 case PTRACE_GETFDPIC_INTERP: 1162 tmp = mm->context.interp_fdpic_loadmap; 1163 break; 1164 default: 1165 break; 1166 } 1167 mmput(mm); 1168 1169 ret = put_user(tmp, datalp); 1170 break; 1171 } 1172 #endif 1173 1174 #ifdef PTRACE_SINGLESTEP 1175 case PTRACE_SINGLESTEP: 1176 #endif 1177 #ifdef PTRACE_SINGLEBLOCK 1178 case PTRACE_SINGLEBLOCK: 1179 #endif 1180 #ifdef PTRACE_SYSEMU 1181 case PTRACE_SYSEMU: 1182 case PTRACE_SYSEMU_SINGLESTEP: 1183 #endif 1184 case PTRACE_SYSCALL: 1185 case PTRACE_CONT: 1186 return ptrace_resume(child, request, data); 1187 1188 case PTRACE_KILL: 1189 if (child->exit_state) /* already dead */ 1190 return 0; 1191 return ptrace_resume(child, request, SIGKILL); 1192 1193 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 1194 case PTRACE_GETREGSET: 1195 case PTRACE_SETREGSET: { 1196 struct iovec kiov; 1197 struct iovec __user *uiov = datavp; 1198 1199 if (!access_ok(uiov, sizeof(*uiov))) 1200 return -EFAULT; 1201 1202 if (__get_user(kiov.iov_base, &uiov->iov_base) || 1203 __get_user(kiov.iov_len, &uiov->iov_len)) 1204 return -EFAULT; 1205 1206 ret = ptrace_regset(child, request, addr, &kiov); 1207 if (!ret) 1208 ret = __put_user(kiov.iov_len, &uiov->iov_len); 1209 break; 1210 } 1211 1212 case PTRACE_GET_SYSCALL_INFO: 1213 ret = ptrace_get_syscall_info(child, addr, datavp); 1214 break; 1215 #endif 1216 1217 case PTRACE_SECCOMP_GET_FILTER: 1218 ret = seccomp_get_filter(child, addr, datavp); 1219 break; 1220 1221 case PTRACE_SECCOMP_GET_METADATA: 1222 ret = seccomp_get_metadata(child, addr, datavp); 1223 break; 1224 1225 default: 1226 break; 1227 } 1228 1229 return ret; 1230 } 1231 1232 #ifndef arch_ptrace_attach 1233 #define arch_ptrace_attach(child) do { } while (0) 1234 #endif 1235 1236 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, 1237 unsigned long, data) 1238 { 1239 struct task_struct *child; 1240 long ret; 1241 1242 if (request == PTRACE_TRACEME) { 1243 ret = ptrace_traceme(); 1244 if (!ret) 1245 arch_ptrace_attach(current); 1246 goto out; 1247 } 1248 1249 child = find_get_task_by_vpid(pid); 1250 if (!child) { 1251 ret = -ESRCH; 1252 goto out; 1253 } 1254 1255 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 1256 ret = ptrace_attach(child, request, addr, data); 1257 /* 1258 * Some architectures need to do book-keeping after 1259 * a ptrace attach. 1260 */ 1261 if (!ret) 1262 arch_ptrace_attach(child); 1263 goto out_put_task_struct; 1264 } 1265 1266 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1267 request == PTRACE_INTERRUPT); 1268 if (ret < 0) 1269 goto out_put_task_struct; 1270 1271 ret = arch_ptrace(child, request, addr, data); 1272 if (ret || request != PTRACE_DETACH) 1273 ptrace_unfreeze_traced(child); 1274 1275 out_put_task_struct: 1276 put_task_struct(child); 1277 out: 1278 return ret; 1279 } 1280 1281 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, 1282 unsigned long data) 1283 { 1284 unsigned long tmp; 1285 int copied; 1286 1287 copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE); 1288 if (copied != sizeof(tmp)) 1289 return -EIO; 1290 return put_user(tmp, (unsigned long __user *)data); 1291 } 1292 1293 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, 1294 unsigned long data) 1295 { 1296 int copied; 1297 1298 copied = ptrace_access_vm(tsk, addr, &data, sizeof(data), 1299 FOLL_FORCE | FOLL_WRITE); 1300 return (copied == sizeof(data)) ? 0 : -EIO; 1301 } 1302 1303 #if defined CONFIG_COMPAT 1304 1305 int compat_ptrace_request(struct task_struct *child, compat_long_t request, 1306 compat_ulong_t addr, compat_ulong_t data) 1307 { 1308 compat_ulong_t __user *datap = compat_ptr(data); 1309 compat_ulong_t word; 1310 kernel_siginfo_t siginfo; 1311 int ret; 1312 1313 switch (request) { 1314 case PTRACE_PEEKTEXT: 1315 case PTRACE_PEEKDATA: 1316 ret = ptrace_access_vm(child, addr, &word, sizeof(word), 1317 FOLL_FORCE); 1318 if (ret != sizeof(word)) 1319 ret = -EIO; 1320 else 1321 ret = put_user(word, datap); 1322 break; 1323 1324 case PTRACE_POKETEXT: 1325 case PTRACE_POKEDATA: 1326 ret = ptrace_access_vm(child, addr, &data, sizeof(data), 1327 FOLL_FORCE | FOLL_WRITE); 1328 ret = (ret != sizeof(data) ? -EIO : 0); 1329 break; 1330 1331 case PTRACE_GETEVENTMSG: 1332 ret = put_user((compat_ulong_t) child->ptrace_message, datap); 1333 break; 1334 1335 case PTRACE_GETSIGINFO: 1336 ret = ptrace_getsiginfo(child, &siginfo); 1337 if (!ret) 1338 ret = copy_siginfo_to_user32( 1339 (struct compat_siginfo __user *) datap, 1340 &siginfo); 1341 break; 1342 1343 case PTRACE_SETSIGINFO: 1344 ret = copy_siginfo_from_user32( 1345 &siginfo, (struct compat_siginfo __user *) datap); 1346 if (!ret) 1347 ret = ptrace_setsiginfo(child, &siginfo); 1348 break; 1349 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 1350 case PTRACE_GETREGSET: 1351 case PTRACE_SETREGSET: 1352 { 1353 struct iovec kiov; 1354 struct compat_iovec __user *uiov = 1355 (struct compat_iovec __user *) datap; 1356 compat_uptr_t ptr; 1357 compat_size_t len; 1358 1359 if (!access_ok(uiov, sizeof(*uiov))) 1360 return -EFAULT; 1361 1362 if (__get_user(ptr, &uiov->iov_base) || 1363 __get_user(len, &uiov->iov_len)) 1364 return -EFAULT; 1365 1366 kiov.iov_base = compat_ptr(ptr); 1367 kiov.iov_len = len; 1368 1369 ret = ptrace_regset(child, request, addr, &kiov); 1370 if (!ret) 1371 ret = __put_user(kiov.iov_len, &uiov->iov_len); 1372 break; 1373 } 1374 #endif 1375 1376 default: 1377 ret = ptrace_request(child, request, addr, data); 1378 } 1379 1380 return ret; 1381 } 1382 1383 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid, 1384 compat_long_t, addr, compat_long_t, data) 1385 { 1386 struct task_struct *child; 1387 long ret; 1388 1389 if (request == PTRACE_TRACEME) { 1390 ret = ptrace_traceme(); 1391 goto out; 1392 } 1393 1394 child = find_get_task_by_vpid(pid); 1395 if (!child) { 1396 ret = -ESRCH; 1397 goto out; 1398 } 1399 1400 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { 1401 ret = ptrace_attach(child, request, addr, data); 1402 /* 1403 * Some architectures need to do book-keeping after 1404 * a ptrace attach. 1405 */ 1406 if (!ret) 1407 arch_ptrace_attach(child); 1408 goto out_put_task_struct; 1409 } 1410 1411 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1412 request == PTRACE_INTERRUPT); 1413 if (!ret) { 1414 ret = compat_arch_ptrace(child, request, addr, data); 1415 if (ret || request != PTRACE_DETACH) 1416 ptrace_unfreeze_traced(child); 1417 } 1418 1419 out_put_task_struct: 1420 put_task_struct(child); 1421 out: 1422 return ret; 1423 } 1424 #endif /* CONFIG_COMPAT */ 1425