1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/exit.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/slab.h> 10 #include <linux/sched/autogroup.h> 11 #include <linux/sched/mm.h> 12 #include <linux/sched/stat.h> 13 #include <linux/sched/task.h> 14 #include <linux/sched/task_stack.h> 15 #include <linux/sched/cputime.h> 16 #include <linux/interrupt.h> 17 #include <linux/module.h> 18 #include <linux/capability.h> 19 #include <linux/completion.h> 20 #include <linux/personality.h> 21 #include <linux/tty.h> 22 #include <linux/iocontext.h> 23 #include <linux/key.h> 24 #include <linux/cpu.h> 25 #include <linux/acct.h> 26 #include <linux/tsacct_kern.h> 27 #include <linux/file.h> 28 #include <linux/fdtable.h> 29 #include <linux/freezer.h> 30 #include <linux/binfmts.h> 31 #include <linux/nsproxy.h> 32 #include <linux/pid_namespace.h> 33 #include <linux/ptrace.h> 34 #include <linux/profile.h> 35 #include <linux/mount.h> 36 #include <linux/proc_fs.h> 37 #include <linux/kthread.h> 38 #include <linux/mempolicy.h> 39 #include <linux/taskstats_kern.h> 40 #include <linux/delayacct.h> 41 #include <linux/cgroup.h> 42 #include <linux/syscalls.h> 43 #include <linux/signal.h> 44 #include <linux/posix-timers.h> 45 #include <linux/cn_proc.h> 46 #include <linux/mutex.h> 47 #include <linux/futex.h> 48 #include <linux/pipe_fs_i.h> 49 #include <linux/audit.h> /* for audit_free() */ 50 #include <linux/resource.h> 51 #include <linux/task_io_accounting_ops.h> 52 #include <linux/blkdev.h> 53 #include <linux/task_work.h> 54 #include <linux/fs_struct.h> 55 #include <linux/init_task.h> 56 #include <linux/perf_event.h> 57 #include <trace/events/sched.h> 58 #include <linux/hw_breakpoint.h> 59 #include <linux/oom.h> 60 #include <linux/writeback.h> 61 #include <linux/shm.h> 62 #include <linux/kcov.h> 63 #include <linux/kmsan.h> 64 #include <linux/random.h> 65 #include <linux/rcuwait.h> 66 #include <linux/compat.h> 67 #include <linux/io_uring.h> 68 #include <linux/kprobes.h> 69 #include <linux/rethook.h> 70 #include <linux/sysfs.h> 71 #include <linux/user_events.h> 72 73 #include <linux/uaccess.h> 74 #include <asm/unistd.h> 75 #include <asm/mmu_context.h> 76 77 #include "exit.h" 78 79 /* 80 * The default value should be high enough to not crash a system that randomly 81 * crashes its kernel from time to time, but low enough to at least not permit 82 * overflowing 32-bit refcounts or the ldsem writer count. 83 */ 84 static unsigned int oops_limit = 10000; 85 86 #ifdef CONFIG_SYSCTL 87 static struct ctl_table kern_exit_table[] = { 88 { 89 .procname = "oops_limit", 90 .data = &oops_limit, 91 .maxlen = sizeof(oops_limit), 92 .mode = 0644, 93 .proc_handler = proc_douintvec, 94 }, 95 { } 96 }; 97 98 static __init int kernel_exit_sysctls_init(void) 99 { 100 register_sysctl_init("kernel", kern_exit_table); 101 return 0; 102 } 103 late_initcall(kernel_exit_sysctls_init); 104 #endif 105 106 static atomic_t oops_count = ATOMIC_INIT(0); 107 108 #ifdef CONFIG_SYSFS 109 static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr, 110 char *page) 111 { 112 return sysfs_emit(page, "%d\n", atomic_read(&oops_count)); 113 } 114 115 static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count); 116 117 static __init int kernel_exit_sysfs_init(void) 118 { 119 sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL); 120 return 0; 121 } 122 late_initcall(kernel_exit_sysfs_init); 123 #endif 124 125 static void __unhash_process(struct task_struct *p, bool group_dead) 126 { 127 nr_threads--; 128 detach_pid(p, PIDTYPE_PID); 129 if (group_dead) { 130 detach_pid(p, PIDTYPE_TGID); 131 detach_pid(p, PIDTYPE_PGID); 132 detach_pid(p, PIDTYPE_SID); 133 134 list_del_rcu(&p->tasks); 135 list_del_init(&p->sibling); 136 __this_cpu_dec(process_counts); 137 } 138 list_del_rcu(&p->thread_group); 139 list_del_rcu(&p->thread_node); 140 } 141 142 /* 143 * This function expects the tasklist_lock write-locked. 144 */ 145 static void __exit_signal(struct task_struct *tsk) 146 { 147 struct signal_struct *sig = tsk->signal; 148 bool group_dead = thread_group_leader(tsk); 149 struct sighand_struct *sighand; 150 struct tty_struct *tty; 151 u64 utime, stime; 152 153 sighand = rcu_dereference_check(tsk->sighand, 154 lockdep_tasklist_lock_is_held()); 155 spin_lock(&sighand->siglock); 156 157 #ifdef CONFIG_POSIX_TIMERS 158 posix_cpu_timers_exit(tsk); 159 if (group_dead) 160 posix_cpu_timers_exit_group(tsk); 161 #endif 162 163 if (group_dead) { 164 tty = sig->tty; 165 sig->tty = NULL; 166 } else { 167 /* 168 * If there is any task waiting for the group exit 169 * then notify it: 170 */ 171 if (sig->notify_count > 0 && !--sig->notify_count) 172 wake_up_process(sig->group_exec_task); 173 174 if (tsk == sig->curr_target) 175 sig->curr_target = next_thread(tsk); 176 } 177 178 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, 179 sizeof(unsigned long long)); 180 181 /* 182 * Accumulate here the counters for all threads as they die. We could 183 * skip the group leader because it is the last user of signal_struct, 184 * but we want to avoid the race with thread_group_cputime() which can 185 * see the empty ->thread_head list. 186 */ 187 task_cputime(tsk, &utime, &stime); 188 write_seqlock(&sig->stats_lock); 189 sig->utime += utime; 190 sig->stime += stime; 191 sig->gtime += task_gtime(tsk); 192 sig->min_flt += tsk->min_flt; 193 sig->maj_flt += tsk->maj_flt; 194 sig->nvcsw += tsk->nvcsw; 195 sig->nivcsw += tsk->nivcsw; 196 sig->inblock += task_io_get_inblock(tsk); 197 sig->oublock += task_io_get_oublock(tsk); 198 task_io_accounting_add(&sig->ioac, &tsk->ioac); 199 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 200 sig->nr_threads--; 201 __unhash_process(tsk, group_dead); 202 write_sequnlock(&sig->stats_lock); 203 204 /* 205 * Do this under ->siglock, we can race with another thread 206 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. 207 */ 208 flush_sigqueue(&tsk->pending); 209 tsk->sighand = NULL; 210 spin_unlock(&sighand->siglock); 211 212 __cleanup_sighand(sighand); 213 clear_tsk_thread_flag(tsk, TIF_SIGPENDING); 214 if (group_dead) { 215 flush_sigqueue(&sig->shared_pending); 216 tty_kref_put(tty); 217 } 218 } 219 220 static void delayed_put_task_struct(struct rcu_head *rhp) 221 { 222 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 223 224 kprobe_flush_task(tsk); 225 rethook_flush_task(tsk); 226 perf_event_delayed_put(tsk); 227 trace_sched_process_free(tsk); 228 put_task_struct(tsk); 229 } 230 231 void put_task_struct_rcu_user(struct task_struct *task) 232 { 233 if (refcount_dec_and_test(&task->rcu_users)) 234 call_rcu(&task->rcu, delayed_put_task_struct); 235 } 236 237 void __weak release_thread(struct task_struct *dead_task) 238 { 239 } 240 241 void release_task(struct task_struct *p) 242 { 243 struct task_struct *leader; 244 struct pid *thread_pid; 245 int zap_leader; 246 repeat: 247 /* don't need to get the RCU readlock here - the process is dead and 248 * can't be modifying its own credentials. But shut RCU-lockdep up */ 249 rcu_read_lock(); 250 dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); 251 rcu_read_unlock(); 252 253 cgroup_release(p); 254 255 write_lock_irq(&tasklist_lock); 256 ptrace_release_task(p); 257 thread_pid = get_pid(p->thread_pid); 258 __exit_signal(p); 259 260 /* 261 * If we are the last non-leader member of the thread 262 * group, and the leader is zombie, then notify the 263 * group leader's parent process. (if it wants notification.) 264 */ 265 zap_leader = 0; 266 leader = p->group_leader; 267 if (leader != p && thread_group_empty(leader) 268 && leader->exit_state == EXIT_ZOMBIE) { 269 /* 270 * If we were the last child thread and the leader has 271 * exited already, and the leader's parent ignores SIGCHLD, 272 * then we are the one who should release the leader. 273 */ 274 zap_leader = do_notify_parent(leader, leader->exit_signal); 275 if (zap_leader) 276 leader->exit_state = EXIT_DEAD; 277 } 278 279 write_unlock_irq(&tasklist_lock); 280 seccomp_filter_release(p); 281 proc_flush_pid(thread_pid); 282 put_pid(thread_pid); 283 release_thread(p); 284 put_task_struct_rcu_user(p); 285 286 p = leader; 287 if (unlikely(zap_leader)) 288 goto repeat; 289 } 290 291 int rcuwait_wake_up(struct rcuwait *w) 292 { 293 int ret = 0; 294 struct task_struct *task; 295 296 rcu_read_lock(); 297 298 /* 299 * Order condition vs @task, such that everything prior to the load 300 * of @task is visible. This is the condition as to why the user called 301 * rcuwait_wake() in the first place. Pairs with set_current_state() 302 * barrier (A) in rcuwait_wait_event(). 303 * 304 * WAIT WAKE 305 * [S] tsk = current [S] cond = true 306 * MB (A) MB (B) 307 * [L] cond [L] tsk 308 */ 309 smp_mb(); /* (B) */ 310 311 task = rcu_dereference(w->task); 312 if (task) 313 ret = wake_up_process(task); 314 rcu_read_unlock(); 315 316 return ret; 317 } 318 EXPORT_SYMBOL_GPL(rcuwait_wake_up); 319 320 /* 321 * Determine if a process group is "orphaned", according to the POSIX 322 * definition in 2.2.2.52. Orphaned process groups are not to be affected 323 * by terminal-generated stop signals. Newly orphaned process groups are 324 * to receive a SIGHUP and a SIGCONT. 325 * 326 * "I ask you, have you ever known what it is to be an orphan?" 327 */ 328 static int will_become_orphaned_pgrp(struct pid *pgrp, 329 struct task_struct *ignored_task) 330 { 331 struct task_struct *p; 332 333 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 334 if ((p == ignored_task) || 335 (p->exit_state && thread_group_empty(p)) || 336 is_global_init(p->real_parent)) 337 continue; 338 339 if (task_pgrp(p->real_parent) != pgrp && 340 task_session(p->real_parent) == task_session(p)) 341 return 0; 342 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 343 344 return 1; 345 } 346 347 int is_current_pgrp_orphaned(void) 348 { 349 int retval; 350 351 read_lock(&tasklist_lock); 352 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); 353 read_unlock(&tasklist_lock); 354 355 return retval; 356 } 357 358 static bool has_stopped_jobs(struct pid *pgrp) 359 { 360 struct task_struct *p; 361 362 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 363 if (p->signal->flags & SIGNAL_STOP_STOPPED) 364 return true; 365 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 366 367 return false; 368 } 369 370 /* 371 * Check to see if any process groups have become orphaned as 372 * a result of our exiting, and if they have any stopped jobs, 373 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 374 */ 375 static void 376 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) 377 { 378 struct pid *pgrp = task_pgrp(tsk); 379 struct task_struct *ignored_task = tsk; 380 381 if (!parent) 382 /* exit: our father is in a different pgrp than 383 * we are and we were the only connection outside. 384 */ 385 parent = tsk->real_parent; 386 else 387 /* reparent: our child is in a different pgrp than 388 * we are, and it was the only connection outside. 389 */ 390 ignored_task = NULL; 391 392 if (task_pgrp(parent) != pgrp && 393 task_session(parent) == task_session(tsk) && 394 will_become_orphaned_pgrp(pgrp, ignored_task) && 395 has_stopped_jobs(pgrp)) { 396 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); 397 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); 398 } 399 } 400 401 static void coredump_task_exit(struct task_struct *tsk) 402 { 403 struct core_state *core_state; 404 405 /* 406 * Serialize with any possible pending coredump. 407 * We must hold siglock around checking core_state 408 * and setting PF_POSTCOREDUMP. The core-inducing thread 409 * will increment ->nr_threads for each thread in the 410 * group without PF_POSTCOREDUMP set. 411 */ 412 spin_lock_irq(&tsk->sighand->siglock); 413 tsk->flags |= PF_POSTCOREDUMP; 414 core_state = tsk->signal->core_state; 415 spin_unlock_irq(&tsk->sighand->siglock); 416 417 /* The vhost_worker does not particpate in coredumps */ 418 if (core_state && 419 ((tsk->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)) { 420 struct core_thread self; 421 422 self.task = current; 423 if (self.task->flags & PF_SIGNALED) 424 self.next = xchg(&core_state->dumper.next, &self); 425 else 426 self.task = NULL; 427 /* 428 * Implies mb(), the result of xchg() must be visible 429 * to core_state->dumper. 430 */ 431 if (atomic_dec_and_test(&core_state->nr_threads)) 432 complete(&core_state->startup); 433 434 for (;;) { 435 set_current_state(TASK_UNINTERRUPTIBLE|TASK_FREEZABLE); 436 if (!self.task) /* see coredump_finish() */ 437 break; 438 schedule(); 439 } 440 __set_current_state(TASK_RUNNING); 441 } 442 } 443 444 #ifdef CONFIG_MEMCG 445 /* 446 * A task is exiting. If it owned this mm, find a new owner for the mm. 447 */ 448 void mm_update_next_owner(struct mm_struct *mm) 449 { 450 struct task_struct *c, *g, *p = current; 451 452 retry: 453 /* 454 * If the exiting or execing task is not the owner, it's 455 * someone else's problem. 456 */ 457 if (mm->owner != p) 458 return; 459 /* 460 * The current owner is exiting/execing and there are no other 461 * candidates. Do not leave the mm pointing to a possibly 462 * freed task structure. 463 */ 464 if (atomic_read(&mm->mm_users) <= 1) { 465 WRITE_ONCE(mm->owner, NULL); 466 return; 467 } 468 469 read_lock(&tasklist_lock); 470 /* 471 * Search in the children 472 */ 473 list_for_each_entry(c, &p->children, sibling) { 474 if (c->mm == mm) 475 goto assign_new_owner; 476 } 477 478 /* 479 * Search in the siblings 480 */ 481 list_for_each_entry(c, &p->real_parent->children, sibling) { 482 if (c->mm == mm) 483 goto assign_new_owner; 484 } 485 486 /* 487 * Search through everything else, we should not get here often. 488 */ 489 for_each_process(g) { 490 if (g->flags & PF_KTHREAD) 491 continue; 492 for_each_thread(g, c) { 493 if (c->mm == mm) 494 goto assign_new_owner; 495 if (c->mm) 496 break; 497 } 498 } 499 read_unlock(&tasklist_lock); 500 /* 501 * We found no owner yet mm_users > 1: this implies that we are 502 * most likely racing with swapoff (try_to_unuse()) or /proc or 503 * ptrace or page migration (get_task_mm()). Mark owner as NULL. 504 */ 505 WRITE_ONCE(mm->owner, NULL); 506 return; 507 508 assign_new_owner: 509 BUG_ON(c == p); 510 get_task_struct(c); 511 /* 512 * The task_lock protects c->mm from changing. 513 * We always want mm->owner->mm == mm 514 */ 515 task_lock(c); 516 /* 517 * Delay read_unlock() till we have the task_lock() 518 * to ensure that c does not slip away underneath us 519 */ 520 read_unlock(&tasklist_lock); 521 if (c->mm != mm) { 522 task_unlock(c); 523 put_task_struct(c); 524 goto retry; 525 } 526 WRITE_ONCE(mm->owner, c); 527 lru_gen_migrate_mm(mm); 528 task_unlock(c); 529 put_task_struct(c); 530 } 531 #endif /* CONFIG_MEMCG */ 532 533 /* 534 * Turn us into a lazy TLB process if we 535 * aren't already.. 536 */ 537 static void exit_mm(void) 538 { 539 struct mm_struct *mm = current->mm; 540 541 exit_mm_release(current, mm); 542 if (!mm) 543 return; 544 sync_mm_rss(mm); 545 mmap_read_lock(mm); 546 mmgrab_lazy_tlb(mm); 547 BUG_ON(mm != current->active_mm); 548 /* more a memory barrier than a real lock */ 549 task_lock(current); 550 /* 551 * When a thread stops operating on an address space, the loop 552 * in membarrier_private_expedited() may not observe that 553 * tsk->mm, and the loop in membarrier_global_expedited() may 554 * not observe a MEMBARRIER_STATE_GLOBAL_EXPEDITED 555 * rq->membarrier_state, so those would not issue an IPI. 556 * Membarrier requires a memory barrier after accessing 557 * user-space memory, before clearing tsk->mm or the 558 * rq->membarrier_state. 559 */ 560 smp_mb__after_spinlock(); 561 local_irq_disable(); 562 current->mm = NULL; 563 membarrier_update_current_mm(NULL); 564 enter_lazy_tlb(mm, current); 565 local_irq_enable(); 566 task_unlock(current); 567 mmap_read_unlock(mm); 568 mm_update_next_owner(mm); 569 mmput(mm); 570 if (test_thread_flag(TIF_MEMDIE)) 571 exit_oom_victim(); 572 } 573 574 static struct task_struct *find_alive_thread(struct task_struct *p) 575 { 576 struct task_struct *t; 577 578 for_each_thread(p, t) { 579 if (!(t->flags & PF_EXITING)) 580 return t; 581 } 582 return NULL; 583 } 584 585 static struct task_struct *find_child_reaper(struct task_struct *father, 586 struct list_head *dead) 587 __releases(&tasklist_lock) 588 __acquires(&tasklist_lock) 589 { 590 struct pid_namespace *pid_ns = task_active_pid_ns(father); 591 struct task_struct *reaper = pid_ns->child_reaper; 592 struct task_struct *p, *n; 593 594 if (likely(reaper != father)) 595 return reaper; 596 597 reaper = find_alive_thread(father); 598 if (reaper) { 599 pid_ns->child_reaper = reaper; 600 return reaper; 601 } 602 603 write_unlock_irq(&tasklist_lock); 604 605 list_for_each_entry_safe(p, n, dead, ptrace_entry) { 606 list_del_init(&p->ptrace_entry); 607 release_task(p); 608 } 609 610 zap_pid_ns_processes(pid_ns); 611 write_lock_irq(&tasklist_lock); 612 613 return father; 614 } 615 616 /* 617 * When we die, we re-parent all our children, and try to: 618 * 1. give them to another thread in our thread group, if such a member exists 619 * 2. give it to the first ancestor process which prctl'd itself as a 620 * child_subreaper for its children (like a service manager) 621 * 3. give it to the init process (PID 1) in our pid namespace 622 */ 623 static struct task_struct *find_new_reaper(struct task_struct *father, 624 struct task_struct *child_reaper) 625 { 626 struct task_struct *thread, *reaper; 627 628 thread = find_alive_thread(father); 629 if (thread) 630 return thread; 631 632 if (father->signal->has_child_subreaper) { 633 unsigned int ns_level = task_pid(father)->level; 634 /* 635 * Find the first ->is_child_subreaper ancestor in our pid_ns. 636 * We can't check reaper != child_reaper to ensure we do not 637 * cross the namespaces, the exiting parent could be injected 638 * by setns() + fork(). 639 * We check pid->level, this is slightly more efficient than 640 * task_active_pid_ns(reaper) != task_active_pid_ns(father). 641 */ 642 for (reaper = father->real_parent; 643 task_pid(reaper)->level == ns_level; 644 reaper = reaper->real_parent) { 645 if (reaper == &init_task) 646 break; 647 if (!reaper->signal->is_child_subreaper) 648 continue; 649 thread = find_alive_thread(reaper); 650 if (thread) 651 return thread; 652 } 653 } 654 655 return child_reaper; 656 } 657 658 /* 659 * Any that need to be release_task'd are put on the @dead list. 660 */ 661 static void reparent_leader(struct task_struct *father, struct task_struct *p, 662 struct list_head *dead) 663 { 664 if (unlikely(p->exit_state == EXIT_DEAD)) 665 return; 666 667 /* We don't want people slaying init. */ 668 p->exit_signal = SIGCHLD; 669 670 /* If it has exited notify the new parent about this child's death. */ 671 if (!p->ptrace && 672 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { 673 if (do_notify_parent(p, p->exit_signal)) { 674 p->exit_state = EXIT_DEAD; 675 list_add(&p->ptrace_entry, dead); 676 } 677 } 678 679 kill_orphaned_pgrp(p, father); 680 } 681 682 /* 683 * This does two things: 684 * 685 * A. Make init inherit all the child processes 686 * B. Check to see if any process groups have become orphaned 687 * as a result of our exiting, and if they have any stopped 688 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 689 */ 690 static void forget_original_parent(struct task_struct *father, 691 struct list_head *dead) 692 { 693 struct task_struct *p, *t, *reaper; 694 695 if (unlikely(!list_empty(&father->ptraced))) 696 exit_ptrace(father, dead); 697 698 /* Can drop and reacquire tasklist_lock */ 699 reaper = find_child_reaper(father, dead); 700 if (list_empty(&father->children)) 701 return; 702 703 reaper = find_new_reaper(father, reaper); 704 list_for_each_entry(p, &father->children, sibling) { 705 for_each_thread(p, t) { 706 RCU_INIT_POINTER(t->real_parent, reaper); 707 BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father)); 708 if (likely(!t->ptrace)) 709 t->parent = t->real_parent; 710 if (t->pdeath_signal) 711 group_send_sig_info(t->pdeath_signal, 712 SEND_SIG_NOINFO, t, 713 PIDTYPE_TGID); 714 } 715 /* 716 * If this is a threaded reparent there is no need to 717 * notify anyone anything has happened. 718 */ 719 if (!same_thread_group(reaper, father)) 720 reparent_leader(father, p, dead); 721 } 722 list_splice_tail_init(&father->children, &reaper->children); 723 } 724 725 /* 726 * Send signals to all our closest relatives so that they know 727 * to properly mourn us.. 728 */ 729 static void exit_notify(struct task_struct *tsk, int group_dead) 730 { 731 bool autoreap; 732 struct task_struct *p, *n; 733 LIST_HEAD(dead); 734 735 write_lock_irq(&tasklist_lock); 736 forget_original_parent(tsk, &dead); 737 738 if (group_dead) 739 kill_orphaned_pgrp(tsk->group_leader, NULL); 740 741 tsk->exit_state = EXIT_ZOMBIE; 742 if (unlikely(tsk->ptrace)) { 743 int sig = thread_group_leader(tsk) && 744 thread_group_empty(tsk) && 745 !ptrace_reparented(tsk) ? 746 tsk->exit_signal : SIGCHLD; 747 autoreap = do_notify_parent(tsk, sig); 748 } else if (thread_group_leader(tsk)) { 749 autoreap = thread_group_empty(tsk) && 750 do_notify_parent(tsk, tsk->exit_signal); 751 } else { 752 autoreap = true; 753 } 754 755 if (autoreap) { 756 tsk->exit_state = EXIT_DEAD; 757 list_add(&tsk->ptrace_entry, &dead); 758 } 759 760 /* mt-exec, de_thread() is waiting for group leader */ 761 if (unlikely(tsk->signal->notify_count < 0)) 762 wake_up_process(tsk->signal->group_exec_task); 763 write_unlock_irq(&tasklist_lock); 764 765 list_for_each_entry_safe(p, n, &dead, ptrace_entry) { 766 list_del_init(&p->ptrace_entry); 767 release_task(p); 768 } 769 } 770 771 #ifdef CONFIG_DEBUG_STACK_USAGE 772 static void check_stack_usage(void) 773 { 774 static DEFINE_SPINLOCK(low_water_lock); 775 static int lowest_to_date = THREAD_SIZE; 776 unsigned long free; 777 778 free = stack_not_used(current); 779 780 if (free >= lowest_to_date) 781 return; 782 783 spin_lock(&low_water_lock); 784 if (free < lowest_to_date) { 785 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n", 786 current->comm, task_pid_nr(current), free); 787 lowest_to_date = free; 788 } 789 spin_unlock(&low_water_lock); 790 } 791 #else 792 static inline void check_stack_usage(void) {} 793 #endif 794 795 static void synchronize_group_exit(struct task_struct *tsk, long code) 796 { 797 struct sighand_struct *sighand = tsk->sighand; 798 struct signal_struct *signal = tsk->signal; 799 800 spin_lock_irq(&sighand->siglock); 801 signal->quick_threads--; 802 if ((signal->quick_threads == 0) && 803 !(signal->flags & SIGNAL_GROUP_EXIT)) { 804 signal->flags = SIGNAL_GROUP_EXIT; 805 signal->group_exit_code = code; 806 signal->group_stop_count = 0; 807 } 808 spin_unlock_irq(&sighand->siglock); 809 } 810 811 void __noreturn do_exit(long code) 812 { 813 struct task_struct *tsk = current; 814 int group_dead; 815 816 WARN_ON(irqs_disabled()); 817 818 synchronize_group_exit(tsk, code); 819 820 WARN_ON(tsk->plug); 821 822 kcov_task_exit(tsk); 823 kmsan_task_exit(tsk); 824 825 coredump_task_exit(tsk); 826 ptrace_event(PTRACE_EVENT_EXIT, code); 827 user_events_exit(tsk); 828 829 validate_creds_for_do_exit(tsk); 830 831 io_uring_files_cancel(); 832 exit_signals(tsk); /* sets PF_EXITING */ 833 834 /* sync mm's RSS info before statistics gathering */ 835 if (tsk->mm) 836 sync_mm_rss(tsk->mm); 837 acct_update_integrals(tsk); 838 group_dead = atomic_dec_and_test(&tsk->signal->live); 839 if (group_dead) { 840 /* 841 * If the last thread of global init has exited, panic 842 * immediately to get a useable coredump. 843 */ 844 if (unlikely(is_global_init(tsk))) 845 panic("Attempted to kill init! exitcode=0x%08x\n", 846 tsk->signal->group_exit_code ?: (int)code); 847 848 #ifdef CONFIG_POSIX_TIMERS 849 hrtimer_cancel(&tsk->signal->real_timer); 850 exit_itimers(tsk); 851 #endif 852 if (tsk->mm) 853 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); 854 } 855 acct_collect(code, group_dead); 856 if (group_dead) 857 tty_audit_exit(); 858 audit_free(tsk); 859 860 tsk->exit_code = code; 861 taskstats_exit(tsk, group_dead); 862 863 exit_mm(); 864 865 if (group_dead) 866 acct_process(); 867 trace_sched_process_exit(tsk); 868 869 exit_sem(tsk); 870 exit_shm(tsk); 871 exit_files(tsk); 872 exit_fs(tsk); 873 if (group_dead) 874 disassociate_ctty(1); 875 exit_task_namespaces(tsk); 876 exit_task_work(tsk); 877 exit_thread(tsk); 878 879 /* 880 * Flush inherited counters to the parent - before the parent 881 * gets woken up by child-exit notifications. 882 * 883 * because of cgroup mode, must be called before cgroup_exit() 884 */ 885 perf_event_exit_task(tsk); 886 887 sched_autogroup_exit_task(tsk); 888 cgroup_exit(tsk); 889 890 /* 891 * FIXME: do that only when needed, using sched_exit tracepoint 892 */ 893 flush_ptrace_hw_breakpoint(tsk); 894 895 exit_tasks_rcu_start(); 896 exit_notify(tsk, group_dead); 897 proc_exit_connector(tsk); 898 mpol_put_task_policy(tsk); 899 #ifdef CONFIG_FUTEX 900 if (unlikely(current->pi_state_cache)) 901 kfree(current->pi_state_cache); 902 #endif 903 /* 904 * Make sure we are holding no locks: 905 */ 906 debug_check_no_locks_held(); 907 908 if (tsk->io_context) 909 exit_io_context(tsk); 910 911 if (tsk->splice_pipe) 912 free_pipe_info(tsk->splice_pipe); 913 914 if (tsk->task_frag.page) 915 put_page(tsk->task_frag.page); 916 917 validate_creds_for_do_exit(tsk); 918 exit_task_stack_account(tsk); 919 920 check_stack_usage(); 921 preempt_disable(); 922 if (tsk->nr_dirtied) 923 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); 924 exit_rcu(); 925 exit_tasks_rcu_finish(); 926 927 lockdep_free_task(tsk); 928 do_task_dead(); 929 } 930 931 void __noreturn make_task_dead(int signr) 932 { 933 /* 934 * Take the task off the cpu after something catastrophic has 935 * happened. 936 * 937 * We can get here from a kernel oops, sometimes with preemption off. 938 * Start by checking for critical errors. 939 * Then fix up important state like USER_DS and preemption. 940 * Then do everything else. 941 */ 942 struct task_struct *tsk = current; 943 unsigned int limit; 944 945 if (unlikely(in_interrupt())) 946 panic("Aiee, killing interrupt handler!"); 947 if (unlikely(!tsk->pid)) 948 panic("Attempted to kill the idle task!"); 949 950 if (unlikely(irqs_disabled())) { 951 pr_info("note: %s[%d] exited with irqs disabled\n", 952 current->comm, task_pid_nr(current)); 953 local_irq_enable(); 954 } 955 if (unlikely(in_atomic())) { 956 pr_info("note: %s[%d] exited with preempt_count %d\n", 957 current->comm, task_pid_nr(current), 958 preempt_count()); 959 preempt_count_set(PREEMPT_ENABLED); 960 } 961 962 /* 963 * Every time the system oopses, if the oops happens while a reference 964 * to an object was held, the reference leaks. 965 * If the oops doesn't also leak memory, repeated oopsing can cause 966 * reference counters to wrap around (if they're not using refcount_t). 967 * This means that repeated oopsing can make unexploitable-looking bugs 968 * exploitable through repeated oopsing. 969 * To make sure this can't happen, place an upper bound on how often the 970 * kernel may oops without panic(). 971 */ 972 limit = READ_ONCE(oops_limit); 973 if (atomic_inc_return(&oops_count) >= limit && limit) 974 panic("Oopsed too often (kernel.oops_limit is %d)", limit); 975 976 /* 977 * We're taking recursive faults here in make_task_dead. Safest is to just 978 * leave this task alone and wait for reboot. 979 */ 980 if (unlikely(tsk->flags & PF_EXITING)) { 981 pr_alert("Fixing recursive fault but reboot is needed!\n"); 982 futex_exit_recursive(tsk); 983 tsk->exit_state = EXIT_DEAD; 984 refcount_inc(&tsk->rcu_users); 985 do_task_dead(); 986 } 987 988 do_exit(signr); 989 } 990 991 SYSCALL_DEFINE1(exit, int, error_code) 992 { 993 do_exit((error_code&0xff)<<8); 994 } 995 996 /* 997 * Take down every thread in the group. This is called by fatal signals 998 * as well as by sys_exit_group (below). 999 */ 1000 void __noreturn 1001 do_group_exit(int exit_code) 1002 { 1003 struct signal_struct *sig = current->signal; 1004 1005 if (sig->flags & SIGNAL_GROUP_EXIT) 1006 exit_code = sig->group_exit_code; 1007 else if (sig->group_exec_task) 1008 exit_code = 0; 1009 else { 1010 struct sighand_struct *const sighand = current->sighand; 1011 1012 spin_lock_irq(&sighand->siglock); 1013 if (sig->flags & SIGNAL_GROUP_EXIT) 1014 /* Another thread got here before we took the lock. */ 1015 exit_code = sig->group_exit_code; 1016 else if (sig->group_exec_task) 1017 exit_code = 0; 1018 else { 1019 sig->group_exit_code = exit_code; 1020 sig->flags = SIGNAL_GROUP_EXIT; 1021 zap_other_threads(current); 1022 } 1023 spin_unlock_irq(&sighand->siglock); 1024 } 1025 1026 do_exit(exit_code); 1027 /* NOTREACHED */ 1028 } 1029 1030 /* 1031 * this kills every thread in the thread group. Note that any externally 1032 * wait4()-ing process will get the correct exit code - even if this 1033 * thread is not the thread group leader. 1034 */ 1035 SYSCALL_DEFINE1(exit_group, int, error_code) 1036 { 1037 do_group_exit((error_code & 0xff) << 8); 1038 /* NOTREACHED */ 1039 return 0; 1040 } 1041 1042 static int eligible_pid(struct wait_opts *wo, struct task_struct *p) 1043 { 1044 return wo->wo_type == PIDTYPE_MAX || 1045 task_pid_type(p, wo->wo_type) == wo->wo_pid; 1046 } 1047 1048 static int 1049 eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p) 1050 { 1051 if (!eligible_pid(wo, p)) 1052 return 0; 1053 1054 /* 1055 * Wait for all children (clone and not) if __WALL is set or 1056 * if it is traced by us. 1057 */ 1058 if (ptrace || (wo->wo_flags & __WALL)) 1059 return 1; 1060 1061 /* 1062 * Otherwise, wait for clone children *only* if __WCLONE is set; 1063 * otherwise, wait for non-clone children *only*. 1064 * 1065 * Note: a "clone" child here is one that reports to its parent 1066 * using a signal other than SIGCHLD, or a non-leader thread which 1067 * we can only see if it is traced by us. 1068 */ 1069 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) 1070 return 0; 1071 1072 return 1; 1073 } 1074 1075 /* 1076 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold 1077 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1078 * the lock and this task is uninteresting. If we return nonzero, we have 1079 * released the lock and the system call should return. 1080 */ 1081 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) 1082 { 1083 int state, status; 1084 pid_t pid = task_pid_vnr(p); 1085 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1086 struct waitid_info *infop; 1087 1088 if (!likely(wo->wo_flags & WEXITED)) 1089 return 0; 1090 1091 if (unlikely(wo->wo_flags & WNOWAIT)) { 1092 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1093 ? p->signal->group_exit_code : p->exit_code; 1094 get_task_struct(p); 1095 read_unlock(&tasklist_lock); 1096 sched_annotate_sleep(); 1097 if (wo->wo_rusage) 1098 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1099 put_task_struct(p); 1100 goto out_info; 1101 } 1102 /* 1103 * Move the task's state to DEAD/TRACE, only one thread can do this. 1104 */ 1105 state = (ptrace_reparented(p) && thread_group_leader(p)) ? 1106 EXIT_TRACE : EXIT_DEAD; 1107 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) 1108 return 0; 1109 /* 1110 * We own this thread, nobody else can reap it. 1111 */ 1112 read_unlock(&tasklist_lock); 1113 sched_annotate_sleep(); 1114 1115 /* 1116 * Check thread_group_leader() to exclude the traced sub-threads. 1117 */ 1118 if (state == EXIT_DEAD && thread_group_leader(p)) { 1119 struct signal_struct *sig = p->signal; 1120 struct signal_struct *psig = current->signal; 1121 unsigned long maxrss; 1122 u64 tgutime, tgstime; 1123 1124 /* 1125 * The resource counters for the group leader are in its 1126 * own task_struct. Those for dead threads in the group 1127 * are in its signal_struct, as are those for the child 1128 * processes it has previously reaped. All these 1129 * accumulate in the parent's signal_struct c* fields. 1130 * 1131 * We don't bother to take a lock here to protect these 1132 * p->signal fields because the whole thread group is dead 1133 * and nobody can change them. 1134 * 1135 * psig->stats_lock also protects us from our sub-threads 1136 * which can reap other children at the same time. Until 1137 * we change k_getrusage()-like users to rely on this lock 1138 * we have to take ->siglock as well. 1139 * 1140 * We use thread_group_cputime_adjusted() to get times for 1141 * the thread group, which consolidates times for all threads 1142 * in the group including the group leader. 1143 */ 1144 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1145 spin_lock_irq(¤t->sighand->siglock); 1146 write_seqlock(&psig->stats_lock); 1147 psig->cutime += tgutime + sig->cutime; 1148 psig->cstime += tgstime + sig->cstime; 1149 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; 1150 psig->cmin_flt += 1151 p->min_flt + sig->min_flt + sig->cmin_flt; 1152 psig->cmaj_flt += 1153 p->maj_flt + sig->maj_flt + sig->cmaj_flt; 1154 psig->cnvcsw += 1155 p->nvcsw + sig->nvcsw + sig->cnvcsw; 1156 psig->cnivcsw += 1157 p->nivcsw + sig->nivcsw + sig->cnivcsw; 1158 psig->cinblock += 1159 task_io_get_inblock(p) + 1160 sig->inblock + sig->cinblock; 1161 psig->coublock += 1162 task_io_get_oublock(p) + 1163 sig->oublock + sig->coublock; 1164 maxrss = max(sig->maxrss, sig->cmaxrss); 1165 if (psig->cmaxrss < maxrss) 1166 psig->cmaxrss = maxrss; 1167 task_io_accounting_add(&psig->ioac, &p->ioac); 1168 task_io_accounting_add(&psig->ioac, &sig->ioac); 1169 write_sequnlock(&psig->stats_lock); 1170 spin_unlock_irq(¤t->sighand->siglock); 1171 } 1172 1173 if (wo->wo_rusage) 1174 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1175 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1176 ? p->signal->group_exit_code : p->exit_code; 1177 wo->wo_stat = status; 1178 1179 if (state == EXIT_TRACE) { 1180 write_lock_irq(&tasklist_lock); 1181 /* We dropped tasklist, ptracer could die and untrace */ 1182 ptrace_unlink(p); 1183 1184 /* If parent wants a zombie, don't release it now */ 1185 state = EXIT_ZOMBIE; 1186 if (do_notify_parent(p, p->exit_signal)) 1187 state = EXIT_DEAD; 1188 p->exit_state = state; 1189 write_unlock_irq(&tasklist_lock); 1190 } 1191 if (state == EXIT_DEAD) 1192 release_task(p); 1193 1194 out_info: 1195 infop = wo->wo_info; 1196 if (infop) { 1197 if ((status & 0x7f) == 0) { 1198 infop->cause = CLD_EXITED; 1199 infop->status = status >> 8; 1200 } else { 1201 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; 1202 infop->status = status & 0x7f; 1203 } 1204 infop->pid = pid; 1205 infop->uid = uid; 1206 } 1207 1208 return pid; 1209 } 1210 1211 static int *task_stopped_code(struct task_struct *p, bool ptrace) 1212 { 1213 if (ptrace) { 1214 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING)) 1215 return &p->exit_code; 1216 } else { 1217 if (p->signal->flags & SIGNAL_STOP_STOPPED) 1218 return &p->signal->group_exit_code; 1219 } 1220 return NULL; 1221 } 1222 1223 /** 1224 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED 1225 * @wo: wait options 1226 * @ptrace: is the wait for ptrace 1227 * @p: task to wait for 1228 * 1229 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. 1230 * 1231 * CONTEXT: 1232 * read_lock(&tasklist_lock), which is released if return value is 1233 * non-zero. Also, grabs and releases @p->sighand->siglock. 1234 * 1235 * RETURNS: 1236 * 0 if wait condition didn't exist and search for other wait conditions 1237 * should continue. Non-zero return, -errno on failure and @p's pid on 1238 * success, implies that tasklist_lock is released and wait condition 1239 * search should terminate. 1240 */ 1241 static int wait_task_stopped(struct wait_opts *wo, 1242 int ptrace, struct task_struct *p) 1243 { 1244 struct waitid_info *infop; 1245 int exit_code, *p_code, why; 1246 uid_t uid = 0; /* unneeded, required by compiler */ 1247 pid_t pid; 1248 1249 /* 1250 * Traditionally we see ptrace'd stopped tasks regardless of options. 1251 */ 1252 if (!ptrace && !(wo->wo_flags & WUNTRACED)) 1253 return 0; 1254 1255 if (!task_stopped_code(p, ptrace)) 1256 return 0; 1257 1258 exit_code = 0; 1259 spin_lock_irq(&p->sighand->siglock); 1260 1261 p_code = task_stopped_code(p, ptrace); 1262 if (unlikely(!p_code)) 1263 goto unlock_sig; 1264 1265 exit_code = *p_code; 1266 if (!exit_code) 1267 goto unlock_sig; 1268 1269 if (!unlikely(wo->wo_flags & WNOWAIT)) 1270 *p_code = 0; 1271 1272 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1273 unlock_sig: 1274 spin_unlock_irq(&p->sighand->siglock); 1275 if (!exit_code) 1276 return 0; 1277 1278 /* 1279 * Now we are pretty sure this task is interesting. 1280 * Make sure it doesn't get reaped out from under us while we 1281 * give up the lock and then examine it below. We don't want to 1282 * keep holding onto the tasklist_lock while we call getrusage and 1283 * possibly take page faults for user memory. 1284 */ 1285 get_task_struct(p); 1286 pid = task_pid_vnr(p); 1287 why = ptrace ? CLD_TRAPPED : CLD_STOPPED; 1288 read_unlock(&tasklist_lock); 1289 sched_annotate_sleep(); 1290 if (wo->wo_rusage) 1291 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1292 put_task_struct(p); 1293 1294 if (likely(!(wo->wo_flags & WNOWAIT))) 1295 wo->wo_stat = (exit_code << 8) | 0x7f; 1296 1297 infop = wo->wo_info; 1298 if (infop) { 1299 infop->cause = why; 1300 infop->status = exit_code; 1301 infop->pid = pid; 1302 infop->uid = uid; 1303 } 1304 return pid; 1305 } 1306 1307 /* 1308 * Handle do_wait work for one task in a live, non-stopped state. 1309 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1310 * the lock and this task is uninteresting. If we return nonzero, we have 1311 * released the lock and the system call should return. 1312 */ 1313 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) 1314 { 1315 struct waitid_info *infop; 1316 pid_t pid; 1317 uid_t uid; 1318 1319 if (!unlikely(wo->wo_flags & WCONTINUED)) 1320 return 0; 1321 1322 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 1323 return 0; 1324 1325 spin_lock_irq(&p->sighand->siglock); 1326 /* Re-check with the lock held. */ 1327 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { 1328 spin_unlock_irq(&p->sighand->siglock); 1329 return 0; 1330 } 1331 if (!unlikely(wo->wo_flags & WNOWAIT)) 1332 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1333 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1334 spin_unlock_irq(&p->sighand->siglock); 1335 1336 pid = task_pid_vnr(p); 1337 get_task_struct(p); 1338 read_unlock(&tasklist_lock); 1339 sched_annotate_sleep(); 1340 if (wo->wo_rusage) 1341 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1342 put_task_struct(p); 1343 1344 infop = wo->wo_info; 1345 if (!infop) { 1346 wo->wo_stat = 0xffff; 1347 } else { 1348 infop->cause = CLD_CONTINUED; 1349 infop->pid = pid; 1350 infop->uid = uid; 1351 infop->status = SIGCONT; 1352 } 1353 return pid; 1354 } 1355 1356 /* 1357 * Consider @p for a wait by @parent. 1358 * 1359 * -ECHILD should be in ->notask_error before the first call. 1360 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1361 * Returns zero if the search for a child should continue; 1362 * then ->notask_error is 0 if @p is an eligible child, 1363 * or still -ECHILD. 1364 */ 1365 static int wait_consider_task(struct wait_opts *wo, int ptrace, 1366 struct task_struct *p) 1367 { 1368 /* 1369 * We can race with wait_task_zombie() from another thread. 1370 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition 1371 * can't confuse the checks below. 1372 */ 1373 int exit_state = READ_ONCE(p->exit_state); 1374 int ret; 1375 1376 if (unlikely(exit_state == EXIT_DEAD)) 1377 return 0; 1378 1379 ret = eligible_child(wo, ptrace, p); 1380 if (!ret) 1381 return ret; 1382 1383 if (unlikely(exit_state == EXIT_TRACE)) { 1384 /* 1385 * ptrace == 0 means we are the natural parent. In this case 1386 * we should clear notask_error, debugger will notify us. 1387 */ 1388 if (likely(!ptrace)) 1389 wo->notask_error = 0; 1390 return 0; 1391 } 1392 1393 if (likely(!ptrace) && unlikely(p->ptrace)) { 1394 /* 1395 * If it is traced by its real parent's group, just pretend 1396 * the caller is ptrace_do_wait() and reap this child if it 1397 * is zombie. 1398 * 1399 * This also hides group stop state from real parent; otherwise 1400 * a single stop can be reported twice as group and ptrace stop. 1401 * If a ptracer wants to distinguish these two events for its 1402 * own children it should create a separate process which takes 1403 * the role of real parent. 1404 */ 1405 if (!ptrace_reparented(p)) 1406 ptrace = 1; 1407 } 1408 1409 /* slay zombie? */ 1410 if (exit_state == EXIT_ZOMBIE) { 1411 /* we don't reap group leaders with subthreads */ 1412 if (!delay_group_leader(p)) { 1413 /* 1414 * A zombie ptracee is only visible to its ptracer. 1415 * Notification and reaping will be cascaded to the 1416 * real parent when the ptracer detaches. 1417 */ 1418 if (unlikely(ptrace) || likely(!p->ptrace)) 1419 return wait_task_zombie(wo, p); 1420 } 1421 1422 /* 1423 * Allow access to stopped/continued state via zombie by 1424 * falling through. Clearing of notask_error is complex. 1425 * 1426 * When !@ptrace: 1427 * 1428 * If WEXITED is set, notask_error should naturally be 1429 * cleared. If not, subset of WSTOPPED|WCONTINUED is set, 1430 * so, if there are live subthreads, there are events to 1431 * wait for. If all subthreads are dead, it's still safe 1432 * to clear - this function will be called again in finite 1433 * amount time once all the subthreads are released and 1434 * will then return without clearing. 1435 * 1436 * When @ptrace: 1437 * 1438 * Stopped state is per-task and thus can't change once the 1439 * target task dies. Only continued and exited can happen. 1440 * Clear notask_error if WCONTINUED | WEXITED. 1441 */ 1442 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) 1443 wo->notask_error = 0; 1444 } else { 1445 /* 1446 * @p is alive and it's gonna stop, continue or exit, so 1447 * there always is something to wait for. 1448 */ 1449 wo->notask_error = 0; 1450 } 1451 1452 /* 1453 * Wait for stopped. Depending on @ptrace, different stopped state 1454 * is used and the two don't interact with each other. 1455 */ 1456 ret = wait_task_stopped(wo, ptrace, p); 1457 if (ret) 1458 return ret; 1459 1460 /* 1461 * Wait for continued. There's only one continued state and the 1462 * ptracer can consume it which can confuse the real parent. Don't 1463 * use WCONTINUED from ptracer. You don't need or want it. 1464 */ 1465 return wait_task_continued(wo, p); 1466 } 1467 1468 /* 1469 * Do the work of do_wait() for one thread in the group, @tsk. 1470 * 1471 * -ECHILD should be in ->notask_error before the first call. 1472 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1473 * Returns zero if the search for a child should continue; then 1474 * ->notask_error is 0 if there were any eligible children, 1475 * or still -ECHILD. 1476 */ 1477 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) 1478 { 1479 struct task_struct *p; 1480 1481 list_for_each_entry(p, &tsk->children, sibling) { 1482 int ret = wait_consider_task(wo, 0, p); 1483 1484 if (ret) 1485 return ret; 1486 } 1487 1488 return 0; 1489 } 1490 1491 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) 1492 { 1493 struct task_struct *p; 1494 1495 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { 1496 int ret = wait_consider_task(wo, 1, p); 1497 1498 if (ret) 1499 return ret; 1500 } 1501 1502 return 0; 1503 } 1504 1505 bool pid_child_should_wake(struct wait_opts *wo, struct task_struct *p) 1506 { 1507 if (!eligible_pid(wo, p)) 1508 return false; 1509 1510 if ((wo->wo_flags & __WNOTHREAD) && wo->child_wait.private != p->parent) 1511 return false; 1512 1513 return true; 1514 } 1515 1516 static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode, 1517 int sync, void *key) 1518 { 1519 struct wait_opts *wo = container_of(wait, struct wait_opts, 1520 child_wait); 1521 struct task_struct *p = key; 1522 1523 if (pid_child_should_wake(wo, p)) 1524 return default_wake_function(wait, mode, sync, key); 1525 1526 return 0; 1527 } 1528 1529 void __wake_up_parent(struct task_struct *p, struct task_struct *parent) 1530 { 1531 __wake_up_sync_key(&parent->signal->wait_chldexit, 1532 TASK_INTERRUPTIBLE, p); 1533 } 1534 1535 static bool is_effectively_child(struct wait_opts *wo, bool ptrace, 1536 struct task_struct *target) 1537 { 1538 struct task_struct *parent = 1539 !ptrace ? target->real_parent : target->parent; 1540 1541 return current == parent || (!(wo->wo_flags & __WNOTHREAD) && 1542 same_thread_group(current, parent)); 1543 } 1544 1545 /* 1546 * Optimization for waiting on PIDTYPE_PID. No need to iterate through child 1547 * and tracee lists to find the target task. 1548 */ 1549 static int do_wait_pid(struct wait_opts *wo) 1550 { 1551 bool ptrace; 1552 struct task_struct *target; 1553 int retval; 1554 1555 ptrace = false; 1556 target = pid_task(wo->wo_pid, PIDTYPE_TGID); 1557 if (target && is_effectively_child(wo, ptrace, target)) { 1558 retval = wait_consider_task(wo, ptrace, target); 1559 if (retval) 1560 return retval; 1561 } 1562 1563 ptrace = true; 1564 target = pid_task(wo->wo_pid, PIDTYPE_PID); 1565 if (target && target->ptrace && 1566 is_effectively_child(wo, ptrace, target)) { 1567 retval = wait_consider_task(wo, ptrace, target); 1568 if (retval) 1569 return retval; 1570 } 1571 1572 return 0; 1573 } 1574 1575 long __do_wait(struct wait_opts *wo) 1576 { 1577 long retval; 1578 1579 /* 1580 * If there is nothing that can match our criteria, just get out. 1581 * We will clear ->notask_error to zero if we see any child that 1582 * might later match our criteria, even if we are not able to reap 1583 * it yet. 1584 */ 1585 wo->notask_error = -ECHILD; 1586 if ((wo->wo_type < PIDTYPE_MAX) && 1587 (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type))) 1588 goto notask; 1589 1590 read_lock(&tasklist_lock); 1591 1592 if (wo->wo_type == PIDTYPE_PID) { 1593 retval = do_wait_pid(wo); 1594 if (retval) 1595 return retval; 1596 } else { 1597 struct task_struct *tsk = current; 1598 1599 do { 1600 retval = do_wait_thread(wo, tsk); 1601 if (retval) 1602 return retval; 1603 1604 retval = ptrace_do_wait(wo, tsk); 1605 if (retval) 1606 return retval; 1607 1608 if (wo->wo_flags & __WNOTHREAD) 1609 break; 1610 } while_each_thread(current, tsk); 1611 } 1612 read_unlock(&tasklist_lock); 1613 1614 notask: 1615 retval = wo->notask_error; 1616 if (!retval && !(wo->wo_flags & WNOHANG)) 1617 return -ERESTARTSYS; 1618 1619 return retval; 1620 } 1621 1622 static long do_wait(struct wait_opts *wo) 1623 { 1624 int retval; 1625 1626 trace_sched_process_wait(wo->wo_pid); 1627 1628 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); 1629 wo->child_wait.private = current; 1630 add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1631 1632 do { 1633 set_current_state(TASK_INTERRUPTIBLE); 1634 retval = __do_wait(wo); 1635 if (retval != -ERESTARTSYS) 1636 break; 1637 if (signal_pending(current)) 1638 break; 1639 schedule(); 1640 } while (1); 1641 1642 __set_current_state(TASK_RUNNING); 1643 remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1644 return retval; 1645 } 1646 1647 int kernel_waitid_prepare(struct wait_opts *wo, int which, pid_t upid, 1648 struct waitid_info *infop, int options, 1649 struct rusage *ru) 1650 { 1651 unsigned int f_flags = 0; 1652 struct pid *pid = NULL; 1653 enum pid_type type; 1654 1655 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED| 1656 __WNOTHREAD|__WCLONE|__WALL)) 1657 return -EINVAL; 1658 if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) 1659 return -EINVAL; 1660 1661 switch (which) { 1662 case P_ALL: 1663 type = PIDTYPE_MAX; 1664 break; 1665 case P_PID: 1666 type = PIDTYPE_PID; 1667 if (upid <= 0) 1668 return -EINVAL; 1669 1670 pid = find_get_pid(upid); 1671 break; 1672 case P_PGID: 1673 type = PIDTYPE_PGID; 1674 if (upid < 0) 1675 return -EINVAL; 1676 1677 if (upid) 1678 pid = find_get_pid(upid); 1679 else 1680 pid = get_task_pid(current, PIDTYPE_PGID); 1681 break; 1682 case P_PIDFD: 1683 type = PIDTYPE_PID; 1684 if (upid < 0) 1685 return -EINVAL; 1686 1687 pid = pidfd_get_pid(upid, &f_flags); 1688 if (IS_ERR(pid)) 1689 return PTR_ERR(pid); 1690 1691 break; 1692 default: 1693 return -EINVAL; 1694 } 1695 1696 wo->wo_type = type; 1697 wo->wo_pid = pid; 1698 wo->wo_flags = options; 1699 wo->wo_info = infop; 1700 wo->wo_rusage = ru; 1701 if (f_flags & O_NONBLOCK) 1702 wo->wo_flags |= WNOHANG; 1703 1704 return 0; 1705 } 1706 1707 static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop, 1708 int options, struct rusage *ru) 1709 { 1710 struct wait_opts wo; 1711 long ret; 1712 1713 ret = kernel_waitid_prepare(&wo, which, upid, infop, options, ru); 1714 if (ret) 1715 return ret; 1716 1717 ret = do_wait(&wo); 1718 if (!ret && !(options & WNOHANG) && (wo.wo_flags & WNOHANG)) 1719 ret = -EAGAIN; 1720 1721 put_pid(wo.wo_pid); 1722 return ret; 1723 } 1724 1725 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, 1726 infop, int, options, struct rusage __user *, ru) 1727 { 1728 struct rusage r; 1729 struct waitid_info info = {.status = 0}; 1730 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL); 1731 int signo = 0; 1732 1733 if (err > 0) { 1734 signo = SIGCHLD; 1735 err = 0; 1736 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1737 return -EFAULT; 1738 } 1739 if (!infop) 1740 return err; 1741 1742 if (!user_write_access_begin(infop, sizeof(*infop))) 1743 return -EFAULT; 1744 1745 unsafe_put_user(signo, &infop->si_signo, Efault); 1746 unsafe_put_user(0, &infop->si_errno, Efault); 1747 unsafe_put_user(info.cause, &infop->si_code, Efault); 1748 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1749 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1750 unsafe_put_user(info.status, &infop->si_status, Efault); 1751 user_write_access_end(); 1752 return err; 1753 Efault: 1754 user_write_access_end(); 1755 return -EFAULT; 1756 } 1757 1758 long kernel_wait4(pid_t upid, int __user *stat_addr, int options, 1759 struct rusage *ru) 1760 { 1761 struct wait_opts wo; 1762 struct pid *pid = NULL; 1763 enum pid_type type; 1764 long ret; 1765 1766 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| 1767 __WNOTHREAD|__WCLONE|__WALL)) 1768 return -EINVAL; 1769 1770 /* -INT_MIN is not defined */ 1771 if (upid == INT_MIN) 1772 return -ESRCH; 1773 1774 if (upid == -1) 1775 type = PIDTYPE_MAX; 1776 else if (upid < 0) { 1777 type = PIDTYPE_PGID; 1778 pid = find_get_pid(-upid); 1779 } else if (upid == 0) { 1780 type = PIDTYPE_PGID; 1781 pid = get_task_pid(current, PIDTYPE_PGID); 1782 } else /* upid > 0 */ { 1783 type = PIDTYPE_PID; 1784 pid = find_get_pid(upid); 1785 } 1786 1787 wo.wo_type = type; 1788 wo.wo_pid = pid; 1789 wo.wo_flags = options | WEXITED; 1790 wo.wo_info = NULL; 1791 wo.wo_stat = 0; 1792 wo.wo_rusage = ru; 1793 ret = do_wait(&wo); 1794 put_pid(pid); 1795 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr)) 1796 ret = -EFAULT; 1797 1798 return ret; 1799 } 1800 1801 int kernel_wait(pid_t pid, int *stat) 1802 { 1803 struct wait_opts wo = { 1804 .wo_type = PIDTYPE_PID, 1805 .wo_pid = find_get_pid(pid), 1806 .wo_flags = WEXITED, 1807 }; 1808 int ret; 1809 1810 ret = do_wait(&wo); 1811 if (ret > 0 && wo.wo_stat) 1812 *stat = wo.wo_stat; 1813 put_pid(wo.wo_pid); 1814 return ret; 1815 } 1816 1817 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, 1818 int, options, struct rusage __user *, ru) 1819 { 1820 struct rusage r; 1821 long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL); 1822 1823 if (err > 0) { 1824 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1825 return -EFAULT; 1826 } 1827 return err; 1828 } 1829 1830 #ifdef __ARCH_WANT_SYS_WAITPID 1831 1832 /* 1833 * sys_waitpid() remains for compatibility. waitpid() should be 1834 * implemented by calling sys_wait4() from libc.a. 1835 */ 1836 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) 1837 { 1838 return kernel_wait4(pid, stat_addr, options, NULL); 1839 } 1840 1841 #endif 1842 1843 #ifdef CONFIG_COMPAT 1844 COMPAT_SYSCALL_DEFINE4(wait4, 1845 compat_pid_t, pid, 1846 compat_uint_t __user *, stat_addr, 1847 int, options, 1848 struct compat_rusage __user *, ru) 1849 { 1850 struct rusage r; 1851 long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL); 1852 if (err > 0) { 1853 if (ru && put_compat_rusage(&r, ru)) 1854 return -EFAULT; 1855 } 1856 return err; 1857 } 1858 1859 COMPAT_SYSCALL_DEFINE5(waitid, 1860 int, which, compat_pid_t, pid, 1861 struct compat_siginfo __user *, infop, int, options, 1862 struct compat_rusage __user *, uru) 1863 { 1864 struct rusage ru; 1865 struct waitid_info info = {.status = 0}; 1866 long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL); 1867 int signo = 0; 1868 if (err > 0) { 1869 signo = SIGCHLD; 1870 err = 0; 1871 if (uru) { 1872 /* kernel_waitid() overwrites everything in ru */ 1873 if (COMPAT_USE_64BIT_TIME) 1874 err = copy_to_user(uru, &ru, sizeof(ru)); 1875 else 1876 err = put_compat_rusage(&ru, uru); 1877 if (err) 1878 return -EFAULT; 1879 } 1880 } 1881 1882 if (!infop) 1883 return err; 1884 1885 if (!user_write_access_begin(infop, sizeof(*infop))) 1886 return -EFAULT; 1887 1888 unsafe_put_user(signo, &infop->si_signo, Efault); 1889 unsafe_put_user(0, &infop->si_errno, Efault); 1890 unsafe_put_user(info.cause, &infop->si_code, Efault); 1891 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1892 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1893 unsafe_put_user(info.status, &infop->si_status, Efault); 1894 user_write_access_end(); 1895 return err; 1896 Efault: 1897 user_write_access_end(); 1898 return -EFAULT; 1899 } 1900 #endif 1901 1902 /** 1903 * thread_group_exited - check that a thread group has exited 1904 * @pid: tgid of thread group to be checked. 1905 * 1906 * Test if the thread group represented by tgid has exited (all 1907 * threads are zombies, dead or completely gone). 1908 * 1909 * Return: true if the thread group has exited. false otherwise. 1910 */ 1911 bool thread_group_exited(struct pid *pid) 1912 { 1913 struct task_struct *task; 1914 bool exited; 1915 1916 rcu_read_lock(); 1917 task = pid_task(pid, PIDTYPE_PID); 1918 exited = !task || 1919 (READ_ONCE(task->exit_state) && thread_group_empty(task)); 1920 rcu_read_unlock(); 1921 1922 return exited; 1923 } 1924 EXPORT_SYMBOL(thread_group_exited); 1925 1926 /* 1927 * This needs to be __function_aligned as GCC implicitly makes any 1928 * implementation of abort() cold and drops alignment specified by 1929 * -falign-functions=N. 1930 * 1931 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c11 1932 */ 1933 __weak __function_aligned void abort(void) 1934 { 1935 BUG(); 1936 1937 /* if that doesn't kill us, halt */ 1938 panic("Oops failed to kill thread"); 1939 } 1940 EXPORT_SYMBOL(abort); 1941