1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/exit.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/slab.h> 10 #include <linux/sched/autogroup.h> 11 #include <linux/sched/mm.h> 12 #include <linux/sched/stat.h> 13 #include <linux/sched/task.h> 14 #include <linux/sched/task_stack.h> 15 #include <linux/sched/cputime.h> 16 #include <linux/interrupt.h> 17 #include <linux/module.h> 18 #include <linux/capability.h> 19 #include <linux/completion.h> 20 #include <linux/personality.h> 21 #include <linux/tty.h> 22 #include <linux/iocontext.h> 23 #include <linux/key.h> 24 #include <linux/cpu.h> 25 #include <linux/acct.h> 26 #include <linux/tsacct_kern.h> 27 #include <linux/file.h> 28 #include <linux/fdtable.h> 29 #include <linux/freezer.h> 30 #include <linux/binfmts.h> 31 #include <linux/nsproxy.h> 32 #include <linux/pid_namespace.h> 33 #include <linux/ptrace.h> 34 #include <linux/profile.h> 35 #include <linux/mount.h> 36 #include <linux/proc_fs.h> 37 #include <linux/kthread.h> 38 #include <linux/mempolicy.h> 39 #include <linux/taskstats_kern.h> 40 #include <linux/delayacct.h> 41 #include <linux/cgroup.h> 42 #include <linux/syscalls.h> 43 #include <linux/signal.h> 44 #include <linux/posix-timers.h> 45 #include <linux/cn_proc.h> 46 #include <linux/mutex.h> 47 #include <linux/futex.h> 48 #include <linux/pipe_fs_i.h> 49 #include <linux/audit.h> /* for audit_free() */ 50 #include <linux/resource.h> 51 #include <linux/task_io_accounting_ops.h> 52 #include <linux/blkdev.h> 53 #include <linux/task_work.h> 54 #include <linux/fs_struct.h> 55 #include <linux/init_task.h> 56 #include <linux/perf_event.h> 57 #include <trace/events/sched.h> 58 #include <linux/hw_breakpoint.h> 59 #include <linux/oom.h> 60 #include <linux/writeback.h> 61 #include <linux/shm.h> 62 #include <linux/kcov.h> 63 #include <linux/kmsan.h> 64 #include <linux/random.h> 65 #include <linux/rcuwait.h> 66 #include <linux/compat.h> 67 #include <linux/io_uring.h> 68 #include <linux/kprobes.h> 69 #include <linux/rethook.h> 70 #include <linux/sysfs.h> 71 #include <linux/user_events.h> 72 #include <linux/uaccess.h> 73 74 #include <uapi/linux/wait.h> 75 76 #include <asm/unistd.h> 77 #include <asm/mmu_context.h> 78 79 #include "exit.h" 80 81 /* 82 * The default value should be high enough to not crash a system that randomly 83 * crashes its kernel from time to time, but low enough to at least not permit 84 * overflowing 32-bit refcounts or the ldsem writer count. 85 */ 86 static unsigned int oops_limit = 10000; 87 88 #ifdef CONFIG_SYSCTL 89 static struct ctl_table kern_exit_table[] = { 90 { 91 .procname = "oops_limit", 92 .data = &oops_limit, 93 .maxlen = sizeof(oops_limit), 94 .mode = 0644, 95 .proc_handler = proc_douintvec, 96 }, 97 { } 98 }; 99 100 static __init int kernel_exit_sysctls_init(void) 101 { 102 register_sysctl_init("kernel", kern_exit_table); 103 return 0; 104 } 105 late_initcall(kernel_exit_sysctls_init); 106 #endif 107 108 static atomic_t oops_count = ATOMIC_INIT(0); 109 110 #ifdef CONFIG_SYSFS 111 static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr, 112 char *page) 113 { 114 return sysfs_emit(page, "%d\n", atomic_read(&oops_count)); 115 } 116 117 static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count); 118 119 static __init int kernel_exit_sysfs_init(void) 120 { 121 sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL); 122 return 0; 123 } 124 late_initcall(kernel_exit_sysfs_init); 125 #endif 126 127 static void __unhash_process(struct task_struct *p, bool group_dead) 128 { 129 nr_threads--; 130 detach_pid(p, PIDTYPE_PID); 131 if (group_dead) { 132 detach_pid(p, PIDTYPE_TGID); 133 detach_pid(p, PIDTYPE_PGID); 134 detach_pid(p, PIDTYPE_SID); 135 136 list_del_rcu(&p->tasks); 137 list_del_init(&p->sibling); 138 __this_cpu_dec(process_counts); 139 } 140 list_del_rcu(&p->thread_node); 141 } 142 143 /* 144 * This function expects the tasklist_lock write-locked. 145 */ 146 static void __exit_signal(struct task_struct *tsk) 147 { 148 struct signal_struct *sig = tsk->signal; 149 bool group_dead = thread_group_leader(tsk); 150 struct sighand_struct *sighand; 151 struct tty_struct *tty; 152 u64 utime, stime; 153 154 sighand = rcu_dereference_check(tsk->sighand, 155 lockdep_tasklist_lock_is_held()); 156 spin_lock(&sighand->siglock); 157 158 #ifdef CONFIG_POSIX_TIMERS 159 posix_cpu_timers_exit(tsk); 160 if (group_dead) 161 posix_cpu_timers_exit_group(tsk); 162 #endif 163 164 if (group_dead) { 165 tty = sig->tty; 166 sig->tty = NULL; 167 } else { 168 /* 169 * If there is any task waiting for the group exit 170 * then notify it: 171 */ 172 if (sig->notify_count > 0 && !--sig->notify_count) 173 wake_up_process(sig->group_exec_task); 174 175 if (tsk == sig->curr_target) 176 sig->curr_target = next_thread(tsk); 177 } 178 179 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, 180 sizeof(unsigned long long)); 181 182 /* 183 * Accumulate here the counters for all threads as they die. We could 184 * skip the group leader because it is the last user of signal_struct, 185 * but we want to avoid the race with thread_group_cputime() which can 186 * see the empty ->thread_head list. 187 */ 188 task_cputime(tsk, &utime, &stime); 189 write_seqlock(&sig->stats_lock); 190 sig->utime += utime; 191 sig->stime += stime; 192 sig->gtime += task_gtime(tsk); 193 sig->min_flt += tsk->min_flt; 194 sig->maj_flt += tsk->maj_flt; 195 sig->nvcsw += tsk->nvcsw; 196 sig->nivcsw += tsk->nivcsw; 197 sig->inblock += task_io_get_inblock(tsk); 198 sig->oublock += task_io_get_oublock(tsk); 199 task_io_accounting_add(&sig->ioac, &tsk->ioac); 200 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 201 sig->nr_threads--; 202 __unhash_process(tsk, group_dead); 203 write_sequnlock(&sig->stats_lock); 204 205 /* 206 * Do this under ->siglock, we can race with another thread 207 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. 208 */ 209 flush_sigqueue(&tsk->pending); 210 tsk->sighand = NULL; 211 spin_unlock(&sighand->siglock); 212 213 __cleanup_sighand(sighand); 214 clear_tsk_thread_flag(tsk, TIF_SIGPENDING); 215 if (group_dead) { 216 flush_sigqueue(&sig->shared_pending); 217 tty_kref_put(tty); 218 } 219 } 220 221 static void delayed_put_task_struct(struct rcu_head *rhp) 222 { 223 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 224 225 kprobe_flush_task(tsk); 226 rethook_flush_task(tsk); 227 perf_event_delayed_put(tsk); 228 trace_sched_process_free(tsk); 229 put_task_struct(tsk); 230 } 231 232 void put_task_struct_rcu_user(struct task_struct *task) 233 { 234 if (refcount_dec_and_test(&task->rcu_users)) 235 call_rcu(&task->rcu, delayed_put_task_struct); 236 } 237 238 void __weak release_thread(struct task_struct *dead_task) 239 { 240 } 241 242 void release_task(struct task_struct *p) 243 { 244 struct task_struct *leader; 245 struct pid *thread_pid; 246 int zap_leader; 247 repeat: 248 /* don't need to get the RCU readlock here - the process is dead and 249 * can't be modifying its own credentials. But shut RCU-lockdep up */ 250 rcu_read_lock(); 251 dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); 252 rcu_read_unlock(); 253 254 cgroup_release(p); 255 256 write_lock_irq(&tasklist_lock); 257 ptrace_release_task(p); 258 thread_pid = get_pid(p->thread_pid); 259 __exit_signal(p); 260 261 /* 262 * If we are the last non-leader member of the thread 263 * group, and the leader is zombie, then notify the 264 * group leader's parent process. (if it wants notification.) 265 */ 266 zap_leader = 0; 267 leader = p->group_leader; 268 if (leader != p && thread_group_empty(leader) 269 && leader->exit_state == EXIT_ZOMBIE) { 270 /* 271 * If we were the last child thread and the leader has 272 * exited already, and the leader's parent ignores SIGCHLD, 273 * then we are the one who should release the leader. 274 */ 275 zap_leader = do_notify_parent(leader, leader->exit_signal); 276 if (zap_leader) 277 leader->exit_state = EXIT_DEAD; 278 } 279 280 write_unlock_irq(&tasklist_lock); 281 seccomp_filter_release(p); 282 proc_flush_pid(thread_pid); 283 put_pid(thread_pid); 284 release_thread(p); 285 put_task_struct_rcu_user(p); 286 287 p = leader; 288 if (unlikely(zap_leader)) 289 goto repeat; 290 } 291 292 int rcuwait_wake_up(struct rcuwait *w) 293 { 294 int ret = 0; 295 struct task_struct *task; 296 297 rcu_read_lock(); 298 299 /* 300 * Order condition vs @task, such that everything prior to the load 301 * of @task is visible. This is the condition as to why the user called 302 * rcuwait_wake() in the first place. Pairs with set_current_state() 303 * barrier (A) in rcuwait_wait_event(). 304 * 305 * WAIT WAKE 306 * [S] tsk = current [S] cond = true 307 * MB (A) MB (B) 308 * [L] cond [L] tsk 309 */ 310 smp_mb(); /* (B) */ 311 312 task = rcu_dereference(w->task); 313 if (task) 314 ret = wake_up_process(task); 315 rcu_read_unlock(); 316 317 return ret; 318 } 319 EXPORT_SYMBOL_GPL(rcuwait_wake_up); 320 321 /* 322 * Determine if a process group is "orphaned", according to the POSIX 323 * definition in 2.2.2.52. Orphaned process groups are not to be affected 324 * by terminal-generated stop signals. Newly orphaned process groups are 325 * to receive a SIGHUP and a SIGCONT. 326 * 327 * "I ask you, have you ever known what it is to be an orphan?" 328 */ 329 static int will_become_orphaned_pgrp(struct pid *pgrp, 330 struct task_struct *ignored_task) 331 { 332 struct task_struct *p; 333 334 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 335 if ((p == ignored_task) || 336 (p->exit_state && thread_group_empty(p)) || 337 is_global_init(p->real_parent)) 338 continue; 339 340 if (task_pgrp(p->real_parent) != pgrp && 341 task_session(p->real_parent) == task_session(p)) 342 return 0; 343 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 344 345 return 1; 346 } 347 348 int is_current_pgrp_orphaned(void) 349 { 350 int retval; 351 352 read_lock(&tasklist_lock); 353 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); 354 read_unlock(&tasklist_lock); 355 356 return retval; 357 } 358 359 static bool has_stopped_jobs(struct pid *pgrp) 360 { 361 struct task_struct *p; 362 363 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 364 if (p->signal->flags & SIGNAL_STOP_STOPPED) 365 return true; 366 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 367 368 return false; 369 } 370 371 /* 372 * Check to see if any process groups have become orphaned as 373 * a result of our exiting, and if they have any stopped jobs, 374 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 375 */ 376 static void 377 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) 378 { 379 struct pid *pgrp = task_pgrp(tsk); 380 struct task_struct *ignored_task = tsk; 381 382 if (!parent) 383 /* exit: our father is in a different pgrp than 384 * we are and we were the only connection outside. 385 */ 386 parent = tsk->real_parent; 387 else 388 /* reparent: our child is in a different pgrp than 389 * we are, and it was the only connection outside. 390 */ 391 ignored_task = NULL; 392 393 if (task_pgrp(parent) != pgrp && 394 task_session(parent) == task_session(tsk) && 395 will_become_orphaned_pgrp(pgrp, ignored_task) && 396 has_stopped_jobs(pgrp)) { 397 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); 398 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); 399 } 400 } 401 402 static void coredump_task_exit(struct task_struct *tsk) 403 { 404 struct core_state *core_state; 405 406 /* 407 * Serialize with any possible pending coredump. 408 * We must hold siglock around checking core_state 409 * and setting PF_POSTCOREDUMP. The core-inducing thread 410 * will increment ->nr_threads for each thread in the 411 * group without PF_POSTCOREDUMP set. 412 */ 413 spin_lock_irq(&tsk->sighand->siglock); 414 tsk->flags |= PF_POSTCOREDUMP; 415 core_state = tsk->signal->core_state; 416 spin_unlock_irq(&tsk->sighand->siglock); 417 418 /* The vhost_worker does not particpate in coredumps */ 419 if (core_state && 420 ((tsk->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)) { 421 struct core_thread self; 422 423 self.task = current; 424 if (self.task->flags & PF_SIGNALED) 425 self.next = xchg(&core_state->dumper.next, &self); 426 else 427 self.task = NULL; 428 /* 429 * Implies mb(), the result of xchg() must be visible 430 * to core_state->dumper. 431 */ 432 if (atomic_dec_and_test(&core_state->nr_threads)) 433 complete(&core_state->startup); 434 435 for (;;) { 436 set_current_state(TASK_UNINTERRUPTIBLE|TASK_FREEZABLE); 437 if (!self.task) /* see coredump_finish() */ 438 break; 439 schedule(); 440 } 441 __set_current_state(TASK_RUNNING); 442 } 443 } 444 445 #ifdef CONFIG_MEMCG 446 /* 447 * A task is exiting. If it owned this mm, find a new owner for the mm. 448 */ 449 void mm_update_next_owner(struct mm_struct *mm) 450 { 451 struct task_struct *c, *g, *p = current; 452 453 retry: 454 /* 455 * If the exiting or execing task is not the owner, it's 456 * someone else's problem. 457 */ 458 if (mm->owner != p) 459 return; 460 /* 461 * The current owner is exiting/execing and there are no other 462 * candidates. Do not leave the mm pointing to a possibly 463 * freed task structure. 464 */ 465 if (atomic_read(&mm->mm_users) <= 1) { 466 WRITE_ONCE(mm->owner, NULL); 467 return; 468 } 469 470 read_lock(&tasklist_lock); 471 /* 472 * Search in the children 473 */ 474 list_for_each_entry(c, &p->children, sibling) { 475 if (c->mm == mm) 476 goto assign_new_owner; 477 } 478 479 /* 480 * Search in the siblings 481 */ 482 list_for_each_entry(c, &p->real_parent->children, sibling) { 483 if (c->mm == mm) 484 goto assign_new_owner; 485 } 486 487 /* 488 * Search through everything else, we should not get here often. 489 */ 490 for_each_process(g) { 491 if (g->flags & PF_KTHREAD) 492 continue; 493 for_each_thread(g, c) { 494 if (c->mm == mm) 495 goto assign_new_owner; 496 if (c->mm) 497 break; 498 } 499 } 500 read_unlock(&tasklist_lock); 501 /* 502 * We found no owner yet mm_users > 1: this implies that we are 503 * most likely racing with swapoff (try_to_unuse()) or /proc or 504 * ptrace or page migration (get_task_mm()). Mark owner as NULL. 505 */ 506 WRITE_ONCE(mm->owner, NULL); 507 return; 508 509 assign_new_owner: 510 BUG_ON(c == p); 511 get_task_struct(c); 512 /* 513 * The task_lock protects c->mm from changing. 514 * We always want mm->owner->mm == mm 515 */ 516 task_lock(c); 517 /* 518 * Delay read_unlock() till we have the task_lock() 519 * to ensure that c does not slip away underneath us 520 */ 521 read_unlock(&tasklist_lock); 522 if (c->mm != mm) { 523 task_unlock(c); 524 put_task_struct(c); 525 goto retry; 526 } 527 WRITE_ONCE(mm->owner, c); 528 lru_gen_migrate_mm(mm); 529 task_unlock(c); 530 put_task_struct(c); 531 } 532 #endif /* CONFIG_MEMCG */ 533 534 /* 535 * Turn us into a lazy TLB process if we 536 * aren't already.. 537 */ 538 static void exit_mm(void) 539 { 540 struct mm_struct *mm = current->mm; 541 542 exit_mm_release(current, mm); 543 if (!mm) 544 return; 545 mmap_read_lock(mm); 546 mmgrab_lazy_tlb(mm); 547 BUG_ON(mm != current->active_mm); 548 /* more a memory barrier than a real lock */ 549 task_lock(current); 550 /* 551 * When a thread stops operating on an address space, the loop 552 * in membarrier_private_expedited() may not observe that 553 * tsk->mm, and the loop in membarrier_global_expedited() may 554 * not observe a MEMBARRIER_STATE_GLOBAL_EXPEDITED 555 * rq->membarrier_state, so those would not issue an IPI. 556 * Membarrier requires a memory barrier after accessing 557 * user-space memory, before clearing tsk->mm or the 558 * rq->membarrier_state. 559 */ 560 smp_mb__after_spinlock(); 561 local_irq_disable(); 562 current->mm = NULL; 563 membarrier_update_current_mm(NULL); 564 enter_lazy_tlb(mm, current); 565 local_irq_enable(); 566 task_unlock(current); 567 mmap_read_unlock(mm); 568 mm_update_next_owner(mm); 569 mmput(mm); 570 if (test_thread_flag(TIF_MEMDIE)) 571 exit_oom_victim(); 572 } 573 574 static struct task_struct *find_alive_thread(struct task_struct *p) 575 { 576 struct task_struct *t; 577 578 for_each_thread(p, t) { 579 if (!(t->flags & PF_EXITING)) 580 return t; 581 } 582 return NULL; 583 } 584 585 static struct task_struct *find_child_reaper(struct task_struct *father, 586 struct list_head *dead) 587 __releases(&tasklist_lock) 588 __acquires(&tasklist_lock) 589 { 590 struct pid_namespace *pid_ns = task_active_pid_ns(father); 591 struct task_struct *reaper = pid_ns->child_reaper; 592 struct task_struct *p, *n; 593 594 if (likely(reaper != father)) 595 return reaper; 596 597 reaper = find_alive_thread(father); 598 if (reaper) { 599 pid_ns->child_reaper = reaper; 600 return reaper; 601 } 602 603 write_unlock_irq(&tasklist_lock); 604 605 list_for_each_entry_safe(p, n, dead, ptrace_entry) { 606 list_del_init(&p->ptrace_entry); 607 release_task(p); 608 } 609 610 zap_pid_ns_processes(pid_ns); 611 write_lock_irq(&tasklist_lock); 612 613 return father; 614 } 615 616 /* 617 * When we die, we re-parent all our children, and try to: 618 * 1. give them to another thread in our thread group, if such a member exists 619 * 2. give it to the first ancestor process which prctl'd itself as a 620 * child_subreaper for its children (like a service manager) 621 * 3. give it to the init process (PID 1) in our pid namespace 622 */ 623 static struct task_struct *find_new_reaper(struct task_struct *father, 624 struct task_struct *child_reaper) 625 { 626 struct task_struct *thread, *reaper; 627 628 thread = find_alive_thread(father); 629 if (thread) 630 return thread; 631 632 if (father->signal->has_child_subreaper) { 633 unsigned int ns_level = task_pid(father)->level; 634 /* 635 * Find the first ->is_child_subreaper ancestor in our pid_ns. 636 * We can't check reaper != child_reaper to ensure we do not 637 * cross the namespaces, the exiting parent could be injected 638 * by setns() + fork(). 639 * We check pid->level, this is slightly more efficient than 640 * task_active_pid_ns(reaper) != task_active_pid_ns(father). 641 */ 642 for (reaper = father->real_parent; 643 task_pid(reaper)->level == ns_level; 644 reaper = reaper->real_parent) { 645 if (reaper == &init_task) 646 break; 647 if (!reaper->signal->is_child_subreaper) 648 continue; 649 thread = find_alive_thread(reaper); 650 if (thread) 651 return thread; 652 } 653 } 654 655 return child_reaper; 656 } 657 658 /* 659 * Any that need to be release_task'd are put on the @dead list. 660 */ 661 static void reparent_leader(struct task_struct *father, struct task_struct *p, 662 struct list_head *dead) 663 { 664 if (unlikely(p->exit_state == EXIT_DEAD)) 665 return; 666 667 /* We don't want people slaying init. */ 668 p->exit_signal = SIGCHLD; 669 670 /* If it has exited notify the new parent about this child's death. */ 671 if (!p->ptrace && 672 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { 673 if (do_notify_parent(p, p->exit_signal)) { 674 p->exit_state = EXIT_DEAD; 675 list_add(&p->ptrace_entry, dead); 676 } 677 } 678 679 kill_orphaned_pgrp(p, father); 680 } 681 682 /* 683 * This does two things: 684 * 685 * A. Make init inherit all the child processes 686 * B. Check to see if any process groups have become orphaned 687 * as a result of our exiting, and if they have any stopped 688 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 689 */ 690 static void forget_original_parent(struct task_struct *father, 691 struct list_head *dead) 692 { 693 struct task_struct *p, *t, *reaper; 694 695 if (unlikely(!list_empty(&father->ptraced))) 696 exit_ptrace(father, dead); 697 698 /* Can drop and reacquire tasklist_lock */ 699 reaper = find_child_reaper(father, dead); 700 if (list_empty(&father->children)) 701 return; 702 703 reaper = find_new_reaper(father, reaper); 704 list_for_each_entry(p, &father->children, sibling) { 705 for_each_thread(p, t) { 706 RCU_INIT_POINTER(t->real_parent, reaper); 707 BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father)); 708 if (likely(!t->ptrace)) 709 t->parent = t->real_parent; 710 if (t->pdeath_signal) 711 group_send_sig_info(t->pdeath_signal, 712 SEND_SIG_NOINFO, t, 713 PIDTYPE_TGID); 714 } 715 /* 716 * If this is a threaded reparent there is no need to 717 * notify anyone anything has happened. 718 */ 719 if (!same_thread_group(reaper, father)) 720 reparent_leader(father, p, dead); 721 } 722 list_splice_tail_init(&father->children, &reaper->children); 723 } 724 725 /* 726 * Send signals to all our closest relatives so that they know 727 * to properly mourn us.. 728 */ 729 static void exit_notify(struct task_struct *tsk, int group_dead) 730 { 731 bool autoreap; 732 struct task_struct *p, *n; 733 LIST_HEAD(dead); 734 735 write_lock_irq(&tasklist_lock); 736 forget_original_parent(tsk, &dead); 737 738 if (group_dead) 739 kill_orphaned_pgrp(tsk->group_leader, NULL); 740 741 tsk->exit_state = EXIT_ZOMBIE; 742 if (unlikely(tsk->ptrace)) { 743 int sig = thread_group_leader(tsk) && 744 thread_group_empty(tsk) && 745 !ptrace_reparented(tsk) ? 746 tsk->exit_signal : SIGCHLD; 747 autoreap = do_notify_parent(tsk, sig); 748 } else if (thread_group_leader(tsk)) { 749 autoreap = thread_group_empty(tsk) && 750 do_notify_parent(tsk, tsk->exit_signal); 751 } else { 752 autoreap = true; 753 } 754 755 if (autoreap) { 756 tsk->exit_state = EXIT_DEAD; 757 list_add(&tsk->ptrace_entry, &dead); 758 } 759 760 /* mt-exec, de_thread() is waiting for group leader */ 761 if (unlikely(tsk->signal->notify_count < 0)) 762 wake_up_process(tsk->signal->group_exec_task); 763 write_unlock_irq(&tasklist_lock); 764 765 list_for_each_entry_safe(p, n, &dead, ptrace_entry) { 766 list_del_init(&p->ptrace_entry); 767 release_task(p); 768 } 769 } 770 771 #ifdef CONFIG_DEBUG_STACK_USAGE 772 static void check_stack_usage(void) 773 { 774 static DEFINE_SPINLOCK(low_water_lock); 775 static int lowest_to_date = THREAD_SIZE; 776 unsigned long free; 777 778 free = stack_not_used(current); 779 780 if (free >= lowest_to_date) 781 return; 782 783 spin_lock(&low_water_lock); 784 if (free < lowest_to_date) { 785 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n", 786 current->comm, task_pid_nr(current), free); 787 lowest_to_date = free; 788 } 789 spin_unlock(&low_water_lock); 790 } 791 #else 792 static inline void check_stack_usage(void) {} 793 #endif 794 795 static void synchronize_group_exit(struct task_struct *tsk, long code) 796 { 797 struct sighand_struct *sighand = tsk->sighand; 798 struct signal_struct *signal = tsk->signal; 799 800 spin_lock_irq(&sighand->siglock); 801 signal->quick_threads--; 802 if ((signal->quick_threads == 0) && 803 !(signal->flags & SIGNAL_GROUP_EXIT)) { 804 signal->flags = SIGNAL_GROUP_EXIT; 805 signal->group_exit_code = code; 806 signal->group_stop_count = 0; 807 } 808 spin_unlock_irq(&sighand->siglock); 809 } 810 811 void __noreturn do_exit(long code) 812 { 813 struct task_struct *tsk = current; 814 int group_dead; 815 816 WARN_ON(irqs_disabled()); 817 818 synchronize_group_exit(tsk, code); 819 820 WARN_ON(tsk->plug); 821 822 kcov_task_exit(tsk); 823 kmsan_task_exit(tsk); 824 825 coredump_task_exit(tsk); 826 ptrace_event(PTRACE_EVENT_EXIT, code); 827 user_events_exit(tsk); 828 829 io_uring_files_cancel(); 830 exit_signals(tsk); /* sets PF_EXITING */ 831 832 acct_update_integrals(tsk); 833 group_dead = atomic_dec_and_test(&tsk->signal->live); 834 if (group_dead) { 835 /* 836 * If the last thread of global init has exited, panic 837 * immediately to get a useable coredump. 838 */ 839 if (unlikely(is_global_init(tsk))) 840 panic("Attempted to kill init! exitcode=0x%08x\n", 841 tsk->signal->group_exit_code ?: (int)code); 842 843 #ifdef CONFIG_POSIX_TIMERS 844 hrtimer_cancel(&tsk->signal->real_timer); 845 exit_itimers(tsk); 846 #endif 847 if (tsk->mm) 848 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); 849 } 850 acct_collect(code, group_dead); 851 if (group_dead) 852 tty_audit_exit(); 853 audit_free(tsk); 854 855 tsk->exit_code = code; 856 taskstats_exit(tsk, group_dead); 857 858 exit_mm(); 859 860 if (group_dead) 861 acct_process(); 862 trace_sched_process_exit(tsk); 863 864 exit_sem(tsk); 865 exit_shm(tsk); 866 exit_files(tsk); 867 exit_fs(tsk); 868 if (group_dead) 869 disassociate_ctty(1); 870 exit_task_namespaces(tsk); 871 exit_task_work(tsk); 872 exit_thread(tsk); 873 874 /* 875 * Flush inherited counters to the parent - before the parent 876 * gets woken up by child-exit notifications. 877 * 878 * because of cgroup mode, must be called before cgroup_exit() 879 */ 880 perf_event_exit_task(tsk); 881 882 sched_autogroup_exit_task(tsk); 883 cgroup_exit(tsk); 884 885 /* 886 * FIXME: do that only when needed, using sched_exit tracepoint 887 */ 888 flush_ptrace_hw_breakpoint(tsk); 889 890 exit_tasks_rcu_start(); 891 exit_notify(tsk, group_dead); 892 proc_exit_connector(tsk); 893 mpol_put_task_policy(tsk); 894 #ifdef CONFIG_FUTEX 895 if (unlikely(current->pi_state_cache)) 896 kfree(current->pi_state_cache); 897 #endif 898 /* 899 * Make sure we are holding no locks: 900 */ 901 debug_check_no_locks_held(); 902 903 if (tsk->io_context) 904 exit_io_context(tsk); 905 906 if (tsk->splice_pipe) 907 free_pipe_info(tsk->splice_pipe); 908 909 if (tsk->task_frag.page) 910 put_page(tsk->task_frag.page); 911 912 exit_task_stack_account(tsk); 913 914 check_stack_usage(); 915 preempt_disable(); 916 if (tsk->nr_dirtied) 917 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); 918 exit_rcu(); 919 exit_tasks_rcu_finish(); 920 921 lockdep_free_task(tsk); 922 do_task_dead(); 923 } 924 925 void __noreturn make_task_dead(int signr) 926 { 927 /* 928 * Take the task off the cpu after something catastrophic has 929 * happened. 930 * 931 * We can get here from a kernel oops, sometimes with preemption off. 932 * Start by checking for critical errors. 933 * Then fix up important state like USER_DS and preemption. 934 * Then do everything else. 935 */ 936 struct task_struct *tsk = current; 937 unsigned int limit; 938 939 if (unlikely(in_interrupt())) 940 panic("Aiee, killing interrupt handler!"); 941 if (unlikely(!tsk->pid)) 942 panic("Attempted to kill the idle task!"); 943 944 if (unlikely(irqs_disabled())) { 945 pr_info("note: %s[%d] exited with irqs disabled\n", 946 current->comm, task_pid_nr(current)); 947 local_irq_enable(); 948 } 949 if (unlikely(in_atomic())) { 950 pr_info("note: %s[%d] exited with preempt_count %d\n", 951 current->comm, task_pid_nr(current), 952 preempt_count()); 953 preempt_count_set(PREEMPT_ENABLED); 954 } 955 956 /* 957 * Every time the system oopses, if the oops happens while a reference 958 * to an object was held, the reference leaks. 959 * If the oops doesn't also leak memory, repeated oopsing can cause 960 * reference counters to wrap around (if they're not using refcount_t). 961 * This means that repeated oopsing can make unexploitable-looking bugs 962 * exploitable through repeated oopsing. 963 * To make sure this can't happen, place an upper bound on how often the 964 * kernel may oops without panic(). 965 */ 966 limit = READ_ONCE(oops_limit); 967 if (atomic_inc_return(&oops_count) >= limit && limit) 968 panic("Oopsed too often (kernel.oops_limit is %d)", limit); 969 970 /* 971 * We're taking recursive faults here in make_task_dead. Safest is to just 972 * leave this task alone and wait for reboot. 973 */ 974 if (unlikely(tsk->flags & PF_EXITING)) { 975 pr_alert("Fixing recursive fault but reboot is needed!\n"); 976 futex_exit_recursive(tsk); 977 tsk->exit_state = EXIT_DEAD; 978 refcount_inc(&tsk->rcu_users); 979 do_task_dead(); 980 } 981 982 do_exit(signr); 983 } 984 985 SYSCALL_DEFINE1(exit, int, error_code) 986 { 987 do_exit((error_code&0xff)<<8); 988 } 989 990 /* 991 * Take down every thread in the group. This is called by fatal signals 992 * as well as by sys_exit_group (below). 993 */ 994 void __noreturn 995 do_group_exit(int exit_code) 996 { 997 struct signal_struct *sig = current->signal; 998 999 if (sig->flags & SIGNAL_GROUP_EXIT) 1000 exit_code = sig->group_exit_code; 1001 else if (sig->group_exec_task) 1002 exit_code = 0; 1003 else { 1004 struct sighand_struct *const sighand = current->sighand; 1005 1006 spin_lock_irq(&sighand->siglock); 1007 if (sig->flags & SIGNAL_GROUP_EXIT) 1008 /* Another thread got here before we took the lock. */ 1009 exit_code = sig->group_exit_code; 1010 else if (sig->group_exec_task) 1011 exit_code = 0; 1012 else { 1013 sig->group_exit_code = exit_code; 1014 sig->flags = SIGNAL_GROUP_EXIT; 1015 zap_other_threads(current); 1016 } 1017 spin_unlock_irq(&sighand->siglock); 1018 } 1019 1020 do_exit(exit_code); 1021 /* NOTREACHED */ 1022 } 1023 1024 /* 1025 * this kills every thread in the thread group. Note that any externally 1026 * wait4()-ing process will get the correct exit code - even if this 1027 * thread is not the thread group leader. 1028 */ 1029 SYSCALL_DEFINE1(exit_group, int, error_code) 1030 { 1031 do_group_exit((error_code & 0xff) << 8); 1032 /* NOTREACHED */ 1033 return 0; 1034 } 1035 1036 static int eligible_pid(struct wait_opts *wo, struct task_struct *p) 1037 { 1038 return wo->wo_type == PIDTYPE_MAX || 1039 task_pid_type(p, wo->wo_type) == wo->wo_pid; 1040 } 1041 1042 static int 1043 eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p) 1044 { 1045 if (!eligible_pid(wo, p)) 1046 return 0; 1047 1048 /* 1049 * Wait for all children (clone and not) if __WALL is set or 1050 * if it is traced by us. 1051 */ 1052 if (ptrace || (wo->wo_flags & __WALL)) 1053 return 1; 1054 1055 /* 1056 * Otherwise, wait for clone children *only* if __WCLONE is set; 1057 * otherwise, wait for non-clone children *only*. 1058 * 1059 * Note: a "clone" child here is one that reports to its parent 1060 * using a signal other than SIGCHLD, or a non-leader thread which 1061 * we can only see if it is traced by us. 1062 */ 1063 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) 1064 return 0; 1065 1066 return 1; 1067 } 1068 1069 /* 1070 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold 1071 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1072 * the lock and this task is uninteresting. If we return nonzero, we have 1073 * released the lock and the system call should return. 1074 */ 1075 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) 1076 { 1077 int state, status; 1078 pid_t pid = task_pid_vnr(p); 1079 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1080 struct waitid_info *infop; 1081 1082 if (!likely(wo->wo_flags & WEXITED)) 1083 return 0; 1084 1085 if (unlikely(wo->wo_flags & WNOWAIT)) { 1086 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1087 ? p->signal->group_exit_code : p->exit_code; 1088 get_task_struct(p); 1089 read_unlock(&tasklist_lock); 1090 sched_annotate_sleep(); 1091 if (wo->wo_rusage) 1092 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1093 put_task_struct(p); 1094 goto out_info; 1095 } 1096 /* 1097 * Move the task's state to DEAD/TRACE, only one thread can do this. 1098 */ 1099 state = (ptrace_reparented(p) && thread_group_leader(p)) ? 1100 EXIT_TRACE : EXIT_DEAD; 1101 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) 1102 return 0; 1103 /* 1104 * We own this thread, nobody else can reap it. 1105 */ 1106 read_unlock(&tasklist_lock); 1107 sched_annotate_sleep(); 1108 1109 /* 1110 * Check thread_group_leader() to exclude the traced sub-threads. 1111 */ 1112 if (state == EXIT_DEAD && thread_group_leader(p)) { 1113 struct signal_struct *sig = p->signal; 1114 struct signal_struct *psig = current->signal; 1115 unsigned long maxrss; 1116 u64 tgutime, tgstime; 1117 1118 /* 1119 * The resource counters for the group leader are in its 1120 * own task_struct. Those for dead threads in the group 1121 * are in its signal_struct, as are those for the child 1122 * processes it has previously reaped. All these 1123 * accumulate in the parent's signal_struct c* fields. 1124 * 1125 * We don't bother to take a lock here to protect these 1126 * p->signal fields because the whole thread group is dead 1127 * and nobody can change them. 1128 * 1129 * psig->stats_lock also protects us from our sub-threads 1130 * which can reap other children at the same time. 1131 * 1132 * We use thread_group_cputime_adjusted() to get times for 1133 * the thread group, which consolidates times for all threads 1134 * in the group including the group leader. 1135 */ 1136 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1137 write_seqlock_irq(&psig->stats_lock); 1138 psig->cutime += tgutime + sig->cutime; 1139 psig->cstime += tgstime + sig->cstime; 1140 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; 1141 psig->cmin_flt += 1142 p->min_flt + sig->min_flt + sig->cmin_flt; 1143 psig->cmaj_flt += 1144 p->maj_flt + sig->maj_flt + sig->cmaj_flt; 1145 psig->cnvcsw += 1146 p->nvcsw + sig->nvcsw + sig->cnvcsw; 1147 psig->cnivcsw += 1148 p->nivcsw + sig->nivcsw + sig->cnivcsw; 1149 psig->cinblock += 1150 task_io_get_inblock(p) + 1151 sig->inblock + sig->cinblock; 1152 psig->coublock += 1153 task_io_get_oublock(p) + 1154 sig->oublock + sig->coublock; 1155 maxrss = max(sig->maxrss, sig->cmaxrss); 1156 if (psig->cmaxrss < maxrss) 1157 psig->cmaxrss = maxrss; 1158 task_io_accounting_add(&psig->ioac, &p->ioac); 1159 task_io_accounting_add(&psig->ioac, &sig->ioac); 1160 write_sequnlock_irq(&psig->stats_lock); 1161 } 1162 1163 if (wo->wo_rusage) 1164 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1165 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1166 ? p->signal->group_exit_code : p->exit_code; 1167 wo->wo_stat = status; 1168 1169 if (state == EXIT_TRACE) { 1170 write_lock_irq(&tasklist_lock); 1171 /* We dropped tasklist, ptracer could die and untrace */ 1172 ptrace_unlink(p); 1173 1174 /* If parent wants a zombie, don't release it now */ 1175 state = EXIT_ZOMBIE; 1176 if (do_notify_parent(p, p->exit_signal)) 1177 state = EXIT_DEAD; 1178 p->exit_state = state; 1179 write_unlock_irq(&tasklist_lock); 1180 } 1181 if (state == EXIT_DEAD) 1182 release_task(p); 1183 1184 out_info: 1185 infop = wo->wo_info; 1186 if (infop) { 1187 if ((status & 0x7f) == 0) { 1188 infop->cause = CLD_EXITED; 1189 infop->status = status >> 8; 1190 } else { 1191 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; 1192 infop->status = status & 0x7f; 1193 } 1194 infop->pid = pid; 1195 infop->uid = uid; 1196 } 1197 1198 return pid; 1199 } 1200 1201 static int *task_stopped_code(struct task_struct *p, bool ptrace) 1202 { 1203 if (ptrace) { 1204 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING)) 1205 return &p->exit_code; 1206 } else { 1207 if (p->signal->flags & SIGNAL_STOP_STOPPED) 1208 return &p->signal->group_exit_code; 1209 } 1210 return NULL; 1211 } 1212 1213 /** 1214 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED 1215 * @wo: wait options 1216 * @ptrace: is the wait for ptrace 1217 * @p: task to wait for 1218 * 1219 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. 1220 * 1221 * CONTEXT: 1222 * read_lock(&tasklist_lock), which is released if return value is 1223 * non-zero. Also, grabs and releases @p->sighand->siglock. 1224 * 1225 * RETURNS: 1226 * 0 if wait condition didn't exist and search for other wait conditions 1227 * should continue. Non-zero return, -errno on failure and @p's pid on 1228 * success, implies that tasklist_lock is released and wait condition 1229 * search should terminate. 1230 */ 1231 static int wait_task_stopped(struct wait_opts *wo, 1232 int ptrace, struct task_struct *p) 1233 { 1234 struct waitid_info *infop; 1235 int exit_code, *p_code, why; 1236 uid_t uid = 0; /* unneeded, required by compiler */ 1237 pid_t pid; 1238 1239 /* 1240 * Traditionally we see ptrace'd stopped tasks regardless of options. 1241 */ 1242 if (!ptrace && !(wo->wo_flags & WUNTRACED)) 1243 return 0; 1244 1245 if (!task_stopped_code(p, ptrace)) 1246 return 0; 1247 1248 exit_code = 0; 1249 spin_lock_irq(&p->sighand->siglock); 1250 1251 p_code = task_stopped_code(p, ptrace); 1252 if (unlikely(!p_code)) 1253 goto unlock_sig; 1254 1255 exit_code = *p_code; 1256 if (!exit_code) 1257 goto unlock_sig; 1258 1259 if (!unlikely(wo->wo_flags & WNOWAIT)) 1260 *p_code = 0; 1261 1262 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1263 unlock_sig: 1264 spin_unlock_irq(&p->sighand->siglock); 1265 if (!exit_code) 1266 return 0; 1267 1268 /* 1269 * Now we are pretty sure this task is interesting. 1270 * Make sure it doesn't get reaped out from under us while we 1271 * give up the lock and then examine it below. We don't want to 1272 * keep holding onto the tasklist_lock while we call getrusage and 1273 * possibly take page faults for user memory. 1274 */ 1275 get_task_struct(p); 1276 pid = task_pid_vnr(p); 1277 why = ptrace ? CLD_TRAPPED : CLD_STOPPED; 1278 read_unlock(&tasklist_lock); 1279 sched_annotate_sleep(); 1280 if (wo->wo_rusage) 1281 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1282 put_task_struct(p); 1283 1284 if (likely(!(wo->wo_flags & WNOWAIT))) 1285 wo->wo_stat = (exit_code << 8) | 0x7f; 1286 1287 infop = wo->wo_info; 1288 if (infop) { 1289 infop->cause = why; 1290 infop->status = exit_code; 1291 infop->pid = pid; 1292 infop->uid = uid; 1293 } 1294 return pid; 1295 } 1296 1297 /* 1298 * Handle do_wait work for one task in a live, non-stopped state. 1299 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1300 * the lock and this task is uninteresting. If we return nonzero, we have 1301 * released the lock and the system call should return. 1302 */ 1303 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) 1304 { 1305 struct waitid_info *infop; 1306 pid_t pid; 1307 uid_t uid; 1308 1309 if (!unlikely(wo->wo_flags & WCONTINUED)) 1310 return 0; 1311 1312 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 1313 return 0; 1314 1315 spin_lock_irq(&p->sighand->siglock); 1316 /* Re-check with the lock held. */ 1317 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { 1318 spin_unlock_irq(&p->sighand->siglock); 1319 return 0; 1320 } 1321 if (!unlikely(wo->wo_flags & WNOWAIT)) 1322 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1323 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1324 spin_unlock_irq(&p->sighand->siglock); 1325 1326 pid = task_pid_vnr(p); 1327 get_task_struct(p); 1328 read_unlock(&tasklist_lock); 1329 sched_annotate_sleep(); 1330 if (wo->wo_rusage) 1331 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1332 put_task_struct(p); 1333 1334 infop = wo->wo_info; 1335 if (!infop) { 1336 wo->wo_stat = 0xffff; 1337 } else { 1338 infop->cause = CLD_CONTINUED; 1339 infop->pid = pid; 1340 infop->uid = uid; 1341 infop->status = SIGCONT; 1342 } 1343 return pid; 1344 } 1345 1346 /* 1347 * Consider @p for a wait by @parent. 1348 * 1349 * -ECHILD should be in ->notask_error before the first call. 1350 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1351 * Returns zero if the search for a child should continue; 1352 * then ->notask_error is 0 if @p is an eligible child, 1353 * or still -ECHILD. 1354 */ 1355 static int wait_consider_task(struct wait_opts *wo, int ptrace, 1356 struct task_struct *p) 1357 { 1358 /* 1359 * We can race with wait_task_zombie() from another thread. 1360 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition 1361 * can't confuse the checks below. 1362 */ 1363 int exit_state = READ_ONCE(p->exit_state); 1364 int ret; 1365 1366 if (unlikely(exit_state == EXIT_DEAD)) 1367 return 0; 1368 1369 ret = eligible_child(wo, ptrace, p); 1370 if (!ret) 1371 return ret; 1372 1373 if (unlikely(exit_state == EXIT_TRACE)) { 1374 /* 1375 * ptrace == 0 means we are the natural parent. In this case 1376 * we should clear notask_error, debugger will notify us. 1377 */ 1378 if (likely(!ptrace)) 1379 wo->notask_error = 0; 1380 return 0; 1381 } 1382 1383 if (likely(!ptrace) && unlikely(p->ptrace)) { 1384 /* 1385 * If it is traced by its real parent's group, just pretend 1386 * the caller is ptrace_do_wait() and reap this child if it 1387 * is zombie. 1388 * 1389 * This also hides group stop state from real parent; otherwise 1390 * a single stop can be reported twice as group and ptrace stop. 1391 * If a ptracer wants to distinguish these two events for its 1392 * own children it should create a separate process which takes 1393 * the role of real parent. 1394 */ 1395 if (!ptrace_reparented(p)) 1396 ptrace = 1; 1397 } 1398 1399 /* slay zombie? */ 1400 if (exit_state == EXIT_ZOMBIE) { 1401 /* we don't reap group leaders with subthreads */ 1402 if (!delay_group_leader(p)) { 1403 /* 1404 * A zombie ptracee is only visible to its ptracer. 1405 * Notification and reaping will be cascaded to the 1406 * real parent when the ptracer detaches. 1407 */ 1408 if (unlikely(ptrace) || likely(!p->ptrace)) 1409 return wait_task_zombie(wo, p); 1410 } 1411 1412 /* 1413 * Allow access to stopped/continued state via zombie by 1414 * falling through. Clearing of notask_error is complex. 1415 * 1416 * When !@ptrace: 1417 * 1418 * If WEXITED is set, notask_error should naturally be 1419 * cleared. If not, subset of WSTOPPED|WCONTINUED is set, 1420 * so, if there are live subthreads, there are events to 1421 * wait for. If all subthreads are dead, it's still safe 1422 * to clear - this function will be called again in finite 1423 * amount time once all the subthreads are released and 1424 * will then return without clearing. 1425 * 1426 * When @ptrace: 1427 * 1428 * Stopped state is per-task and thus can't change once the 1429 * target task dies. Only continued and exited can happen. 1430 * Clear notask_error if WCONTINUED | WEXITED. 1431 */ 1432 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) 1433 wo->notask_error = 0; 1434 } else { 1435 /* 1436 * @p is alive and it's gonna stop, continue or exit, so 1437 * there always is something to wait for. 1438 */ 1439 wo->notask_error = 0; 1440 } 1441 1442 /* 1443 * Wait for stopped. Depending on @ptrace, different stopped state 1444 * is used and the two don't interact with each other. 1445 */ 1446 ret = wait_task_stopped(wo, ptrace, p); 1447 if (ret) 1448 return ret; 1449 1450 /* 1451 * Wait for continued. There's only one continued state and the 1452 * ptracer can consume it which can confuse the real parent. Don't 1453 * use WCONTINUED from ptracer. You don't need or want it. 1454 */ 1455 return wait_task_continued(wo, p); 1456 } 1457 1458 /* 1459 * Do the work of do_wait() for one thread in the group, @tsk. 1460 * 1461 * -ECHILD should be in ->notask_error before the first call. 1462 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1463 * Returns zero if the search for a child should continue; then 1464 * ->notask_error is 0 if there were any eligible children, 1465 * or still -ECHILD. 1466 */ 1467 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) 1468 { 1469 struct task_struct *p; 1470 1471 list_for_each_entry(p, &tsk->children, sibling) { 1472 int ret = wait_consider_task(wo, 0, p); 1473 1474 if (ret) 1475 return ret; 1476 } 1477 1478 return 0; 1479 } 1480 1481 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) 1482 { 1483 struct task_struct *p; 1484 1485 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { 1486 int ret = wait_consider_task(wo, 1, p); 1487 1488 if (ret) 1489 return ret; 1490 } 1491 1492 return 0; 1493 } 1494 1495 bool pid_child_should_wake(struct wait_opts *wo, struct task_struct *p) 1496 { 1497 if (!eligible_pid(wo, p)) 1498 return false; 1499 1500 if ((wo->wo_flags & __WNOTHREAD) && wo->child_wait.private != p->parent) 1501 return false; 1502 1503 return true; 1504 } 1505 1506 static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode, 1507 int sync, void *key) 1508 { 1509 struct wait_opts *wo = container_of(wait, struct wait_opts, 1510 child_wait); 1511 struct task_struct *p = key; 1512 1513 if (pid_child_should_wake(wo, p)) 1514 return default_wake_function(wait, mode, sync, key); 1515 1516 return 0; 1517 } 1518 1519 void __wake_up_parent(struct task_struct *p, struct task_struct *parent) 1520 { 1521 __wake_up_sync_key(&parent->signal->wait_chldexit, 1522 TASK_INTERRUPTIBLE, p); 1523 } 1524 1525 static bool is_effectively_child(struct wait_opts *wo, bool ptrace, 1526 struct task_struct *target) 1527 { 1528 struct task_struct *parent = 1529 !ptrace ? target->real_parent : target->parent; 1530 1531 return current == parent || (!(wo->wo_flags & __WNOTHREAD) && 1532 same_thread_group(current, parent)); 1533 } 1534 1535 /* 1536 * Optimization for waiting on PIDTYPE_PID. No need to iterate through child 1537 * and tracee lists to find the target task. 1538 */ 1539 static int do_wait_pid(struct wait_opts *wo) 1540 { 1541 bool ptrace; 1542 struct task_struct *target; 1543 int retval; 1544 1545 ptrace = false; 1546 target = pid_task(wo->wo_pid, PIDTYPE_TGID); 1547 if (target && is_effectively_child(wo, ptrace, target)) { 1548 retval = wait_consider_task(wo, ptrace, target); 1549 if (retval) 1550 return retval; 1551 } 1552 1553 ptrace = true; 1554 target = pid_task(wo->wo_pid, PIDTYPE_PID); 1555 if (target && target->ptrace && 1556 is_effectively_child(wo, ptrace, target)) { 1557 retval = wait_consider_task(wo, ptrace, target); 1558 if (retval) 1559 return retval; 1560 } 1561 1562 return 0; 1563 } 1564 1565 long __do_wait(struct wait_opts *wo) 1566 { 1567 long retval; 1568 1569 /* 1570 * If there is nothing that can match our criteria, just get out. 1571 * We will clear ->notask_error to zero if we see any child that 1572 * might later match our criteria, even if we are not able to reap 1573 * it yet. 1574 */ 1575 wo->notask_error = -ECHILD; 1576 if ((wo->wo_type < PIDTYPE_MAX) && 1577 (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type))) 1578 goto notask; 1579 1580 read_lock(&tasklist_lock); 1581 1582 if (wo->wo_type == PIDTYPE_PID) { 1583 retval = do_wait_pid(wo); 1584 if (retval) 1585 return retval; 1586 } else { 1587 struct task_struct *tsk = current; 1588 1589 do { 1590 retval = do_wait_thread(wo, tsk); 1591 if (retval) 1592 return retval; 1593 1594 retval = ptrace_do_wait(wo, tsk); 1595 if (retval) 1596 return retval; 1597 1598 if (wo->wo_flags & __WNOTHREAD) 1599 break; 1600 } while_each_thread(current, tsk); 1601 } 1602 read_unlock(&tasklist_lock); 1603 1604 notask: 1605 retval = wo->notask_error; 1606 if (!retval && !(wo->wo_flags & WNOHANG)) 1607 return -ERESTARTSYS; 1608 1609 return retval; 1610 } 1611 1612 static long do_wait(struct wait_opts *wo) 1613 { 1614 int retval; 1615 1616 trace_sched_process_wait(wo->wo_pid); 1617 1618 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); 1619 wo->child_wait.private = current; 1620 add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1621 1622 do { 1623 set_current_state(TASK_INTERRUPTIBLE); 1624 retval = __do_wait(wo); 1625 if (retval != -ERESTARTSYS) 1626 break; 1627 if (signal_pending(current)) 1628 break; 1629 schedule(); 1630 } while (1); 1631 1632 __set_current_state(TASK_RUNNING); 1633 remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1634 return retval; 1635 } 1636 1637 int kernel_waitid_prepare(struct wait_opts *wo, int which, pid_t upid, 1638 struct waitid_info *infop, int options, 1639 struct rusage *ru) 1640 { 1641 unsigned int f_flags = 0; 1642 struct pid *pid = NULL; 1643 enum pid_type type; 1644 1645 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED| 1646 __WNOTHREAD|__WCLONE|__WALL)) 1647 return -EINVAL; 1648 if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) 1649 return -EINVAL; 1650 1651 switch (which) { 1652 case P_ALL: 1653 type = PIDTYPE_MAX; 1654 break; 1655 case P_PID: 1656 type = PIDTYPE_PID; 1657 if (upid <= 0) 1658 return -EINVAL; 1659 1660 pid = find_get_pid(upid); 1661 break; 1662 case P_PGID: 1663 type = PIDTYPE_PGID; 1664 if (upid < 0) 1665 return -EINVAL; 1666 1667 if (upid) 1668 pid = find_get_pid(upid); 1669 else 1670 pid = get_task_pid(current, PIDTYPE_PGID); 1671 break; 1672 case P_PIDFD: 1673 type = PIDTYPE_PID; 1674 if (upid < 0) 1675 return -EINVAL; 1676 1677 pid = pidfd_get_pid(upid, &f_flags); 1678 if (IS_ERR(pid)) 1679 return PTR_ERR(pid); 1680 1681 break; 1682 default: 1683 return -EINVAL; 1684 } 1685 1686 wo->wo_type = type; 1687 wo->wo_pid = pid; 1688 wo->wo_flags = options; 1689 wo->wo_info = infop; 1690 wo->wo_rusage = ru; 1691 if (f_flags & O_NONBLOCK) 1692 wo->wo_flags |= WNOHANG; 1693 1694 return 0; 1695 } 1696 1697 static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop, 1698 int options, struct rusage *ru) 1699 { 1700 struct wait_opts wo; 1701 long ret; 1702 1703 ret = kernel_waitid_prepare(&wo, which, upid, infop, options, ru); 1704 if (ret) 1705 return ret; 1706 1707 ret = do_wait(&wo); 1708 if (!ret && !(options & WNOHANG) && (wo.wo_flags & WNOHANG)) 1709 ret = -EAGAIN; 1710 1711 put_pid(wo.wo_pid); 1712 return ret; 1713 } 1714 1715 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, 1716 infop, int, options, struct rusage __user *, ru) 1717 { 1718 struct rusage r; 1719 struct waitid_info info = {.status = 0}; 1720 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL); 1721 int signo = 0; 1722 1723 if (err > 0) { 1724 signo = SIGCHLD; 1725 err = 0; 1726 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1727 return -EFAULT; 1728 } 1729 if (!infop) 1730 return err; 1731 1732 if (!user_write_access_begin(infop, sizeof(*infop))) 1733 return -EFAULT; 1734 1735 unsafe_put_user(signo, &infop->si_signo, Efault); 1736 unsafe_put_user(0, &infop->si_errno, Efault); 1737 unsafe_put_user(info.cause, &infop->si_code, Efault); 1738 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1739 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1740 unsafe_put_user(info.status, &infop->si_status, Efault); 1741 user_write_access_end(); 1742 return err; 1743 Efault: 1744 user_write_access_end(); 1745 return -EFAULT; 1746 } 1747 1748 long kernel_wait4(pid_t upid, int __user *stat_addr, int options, 1749 struct rusage *ru) 1750 { 1751 struct wait_opts wo; 1752 struct pid *pid = NULL; 1753 enum pid_type type; 1754 long ret; 1755 1756 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| 1757 __WNOTHREAD|__WCLONE|__WALL)) 1758 return -EINVAL; 1759 1760 /* -INT_MIN is not defined */ 1761 if (upid == INT_MIN) 1762 return -ESRCH; 1763 1764 if (upid == -1) 1765 type = PIDTYPE_MAX; 1766 else if (upid < 0) { 1767 type = PIDTYPE_PGID; 1768 pid = find_get_pid(-upid); 1769 } else if (upid == 0) { 1770 type = PIDTYPE_PGID; 1771 pid = get_task_pid(current, PIDTYPE_PGID); 1772 } else /* upid > 0 */ { 1773 type = PIDTYPE_PID; 1774 pid = find_get_pid(upid); 1775 } 1776 1777 wo.wo_type = type; 1778 wo.wo_pid = pid; 1779 wo.wo_flags = options | WEXITED; 1780 wo.wo_info = NULL; 1781 wo.wo_stat = 0; 1782 wo.wo_rusage = ru; 1783 ret = do_wait(&wo); 1784 put_pid(pid); 1785 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr)) 1786 ret = -EFAULT; 1787 1788 return ret; 1789 } 1790 1791 int kernel_wait(pid_t pid, int *stat) 1792 { 1793 struct wait_opts wo = { 1794 .wo_type = PIDTYPE_PID, 1795 .wo_pid = find_get_pid(pid), 1796 .wo_flags = WEXITED, 1797 }; 1798 int ret; 1799 1800 ret = do_wait(&wo); 1801 if (ret > 0 && wo.wo_stat) 1802 *stat = wo.wo_stat; 1803 put_pid(wo.wo_pid); 1804 return ret; 1805 } 1806 1807 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, 1808 int, options, struct rusage __user *, ru) 1809 { 1810 struct rusage r; 1811 long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL); 1812 1813 if (err > 0) { 1814 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1815 return -EFAULT; 1816 } 1817 return err; 1818 } 1819 1820 #ifdef __ARCH_WANT_SYS_WAITPID 1821 1822 /* 1823 * sys_waitpid() remains for compatibility. waitpid() should be 1824 * implemented by calling sys_wait4() from libc.a. 1825 */ 1826 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) 1827 { 1828 return kernel_wait4(pid, stat_addr, options, NULL); 1829 } 1830 1831 #endif 1832 1833 #ifdef CONFIG_COMPAT 1834 COMPAT_SYSCALL_DEFINE4(wait4, 1835 compat_pid_t, pid, 1836 compat_uint_t __user *, stat_addr, 1837 int, options, 1838 struct compat_rusage __user *, ru) 1839 { 1840 struct rusage r; 1841 long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL); 1842 if (err > 0) { 1843 if (ru && put_compat_rusage(&r, ru)) 1844 return -EFAULT; 1845 } 1846 return err; 1847 } 1848 1849 COMPAT_SYSCALL_DEFINE5(waitid, 1850 int, which, compat_pid_t, pid, 1851 struct compat_siginfo __user *, infop, int, options, 1852 struct compat_rusage __user *, uru) 1853 { 1854 struct rusage ru; 1855 struct waitid_info info = {.status = 0}; 1856 long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL); 1857 int signo = 0; 1858 if (err > 0) { 1859 signo = SIGCHLD; 1860 err = 0; 1861 if (uru) { 1862 /* kernel_waitid() overwrites everything in ru */ 1863 if (COMPAT_USE_64BIT_TIME) 1864 err = copy_to_user(uru, &ru, sizeof(ru)); 1865 else 1866 err = put_compat_rusage(&ru, uru); 1867 if (err) 1868 return -EFAULT; 1869 } 1870 } 1871 1872 if (!infop) 1873 return err; 1874 1875 if (!user_write_access_begin(infop, sizeof(*infop))) 1876 return -EFAULT; 1877 1878 unsafe_put_user(signo, &infop->si_signo, Efault); 1879 unsafe_put_user(0, &infop->si_errno, Efault); 1880 unsafe_put_user(info.cause, &infop->si_code, Efault); 1881 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1882 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1883 unsafe_put_user(info.status, &infop->si_status, Efault); 1884 user_write_access_end(); 1885 return err; 1886 Efault: 1887 user_write_access_end(); 1888 return -EFAULT; 1889 } 1890 #endif 1891 1892 /** 1893 * thread_group_exited - check that a thread group has exited 1894 * @pid: tgid of thread group to be checked. 1895 * 1896 * Test if the thread group represented by tgid has exited (all 1897 * threads are zombies, dead or completely gone). 1898 * 1899 * Return: true if the thread group has exited. false otherwise. 1900 */ 1901 bool thread_group_exited(struct pid *pid) 1902 { 1903 struct task_struct *task; 1904 bool exited; 1905 1906 rcu_read_lock(); 1907 task = pid_task(pid, PIDTYPE_PID); 1908 exited = !task || 1909 (READ_ONCE(task->exit_state) && thread_group_empty(task)); 1910 rcu_read_unlock(); 1911 1912 return exited; 1913 } 1914 EXPORT_SYMBOL(thread_group_exited); 1915 1916 /* 1917 * This needs to be __function_aligned as GCC implicitly makes any 1918 * implementation of abort() cold and drops alignment specified by 1919 * -falign-functions=N. 1920 * 1921 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c11 1922 */ 1923 __weak __function_aligned void abort(void) 1924 { 1925 BUG(); 1926 1927 /* if that doesn't kill us, halt */ 1928 panic("Oops failed to kill thread"); 1929 } 1930 EXPORT_SYMBOL(abort); 1931