1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/exit.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/slab.h> 10 #include <linux/sched/autogroup.h> 11 #include <linux/sched/mm.h> 12 #include <linux/sched/stat.h> 13 #include <linux/sched/task.h> 14 #include <linux/sched/task_stack.h> 15 #include <linux/sched/cputime.h> 16 #include <linux/interrupt.h> 17 #include <linux/module.h> 18 #include <linux/capability.h> 19 #include <linux/completion.h> 20 #include <linux/personality.h> 21 #include <linux/tty.h> 22 #include <linux/iocontext.h> 23 #include <linux/key.h> 24 #include <linux/cpu.h> 25 #include <linux/acct.h> 26 #include <linux/tsacct_kern.h> 27 #include <linux/file.h> 28 #include <linux/fdtable.h> 29 #include <linux/freezer.h> 30 #include <linux/binfmts.h> 31 #include <linux/nsproxy.h> 32 #include <linux/pid_namespace.h> 33 #include <linux/ptrace.h> 34 #include <linux/profile.h> 35 #include <linux/mount.h> 36 #include <linux/proc_fs.h> 37 #include <linux/kthread.h> 38 #include <linux/mempolicy.h> 39 #include <linux/taskstats_kern.h> 40 #include <linux/delayacct.h> 41 #include <linux/cgroup.h> 42 #include <linux/syscalls.h> 43 #include <linux/signal.h> 44 #include <linux/posix-timers.h> 45 #include <linux/cn_proc.h> 46 #include <linux/mutex.h> 47 #include <linux/futex.h> 48 #include <linux/pipe_fs_i.h> 49 #include <linux/audit.h> /* for audit_free() */ 50 #include <linux/resource.h> 51 #include <linux/task_io_accounting_ops.h> 52 #include <linux/blkdev.h> 53 #include <linux/task_work.h> 54 #include <linux/fs_struct.h> 55 #include <linux/init_task.h> 56 #include <linux/perf_event.h> 57 #include <trace/events/sched.h> 58 #include <linux/hw_breakpoint.h> 59 #include <linux/oom.h> 60 #include <linux/writeback.h> 61 #include <linux/shm.h> 62 #include <linux/kcov.h> 63 #include <linux/kmsan.h> 64 #include <linux/random.h> 65 #include <linux/rcuwait.h> 66 #include <linux/compat.h> 67 #include <linux/io_uring.h> 68 #include <linux/kprobes.h> 69 #include <linux/rethook.h> 70 #include <linux/sysfs.h> 71 #include <linux/user_events.h> 72 #include <linux/uaccess.h> 73 74 #include <uapi/linux/wait.h> 75 76 #include <asm/unistd.h> 77 #include <asm/mmu_context.h> 78 79 #include "exit.h" 80 81 /* 82 * The default value should be high enough to not crash a system that randomly 83 * crashes its kernel from time to time, but low enough to at least not permit 84 * overflowing 32-bit refcounts or the ldsem writer count. 85 */ 86 static unsigned int oops_limit = 10000; 87 88 #ifdef CONFIG_SYSCTL 89 static struct ctl_table kern_exit_table[] = { 90 { 91 .procname = "oops_limit", 92 .data = &oops_limit, 93 .maxlen = sizeof(oops_limit), 94 .mode = 0644, 95 .proc_handler = proc_douintvec, 96 }, 97 }; 98 99 static __init int kernel_exit_sysctls_init(void) 100 { 101 register_sysctl_init("kernel", kern_exit_table); 102 return 0; 103 } 104 late_initcall(kernel_exit_sysctls_init); 105 #endif 106 107 static atomic_t oops_count = ATOMIC_INIT(0); 108 109 #ifdef CONFIG_SYSFS 110 static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr, 111 char *page) 112 { 113 return sysfs_emit(page, "%d\n", atomic_read(&oops_count)); 114 } 115 116 static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count); 117 118 static __init int kernel_exit_sysfs_init(void) 119 { 120 sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL); 121 return 0; 122 } 123 late_initcall(kernel_exit_sysfs_init); 124 #endif 125 126 static void __unhash_process(struct task_struct *p, bool group_dead) 127 { 128 nr_threads--; 129 detach_pid(p, PIDTYPE_PID); 130 if (group_dead) { 131 detach_pid(p, PIDTYPE_TGID); 132 detach_pid(p, PIDTYPE_PGID); 133 detach_pid(p, PIDTYPE_SID); 134 135 list_del_rcu(&p->tasks); 136 list_del_init(&p->sibling); 137 __this_cpu_dec(process_counts); 138 } 139 list_del_rcu(&p->thread_node); 140 } 141 142 /* 143 * This function expects the tasklist_lock write-locked. 144 */ 145 static void __exit_signal(struct task_struct *tsk) 146 { 147 struct signal_struct *sig = tsk->signal; 148 bool group_dead = thread_group_leader(tsk); 149 struct sighand_struct *sighand; 150 struct tty_struct *tty; 151 u64 utime, stime; 152 153 sighand = rcu_dereference_check(tsk->sighand, 154 lockdep_tasklist_lock_is_held()); 155 spin_lock(&sighand->siglock); 156 157 #ifdef CONFIG_POSIX_TIMERS 158 posix_cpu_timers_exit(tsk); 159 if (group_dead) 160 posix_cpu_timers_exit_group(tsk); 161 #endif 162 163 if (group_dead) { 164 tty = sig->tty; 165 sig->tty = NULL; 166 } else { 167 /* 168 * If there is any task waiting for the group exit 169 * then notify it: 170 */ 171 if (sig->notify_count > 0 && !--sig->notify_count) 172 wake_up_process(sig->group_exec_task); 173 174 if (tsk == sig->curr_target) 175 sig->curr_target = next_thread(tsk); 176 } 177 178 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, 179 sizeof(unsigned long long)); 180 181 /* 182 * Accumulate here the counters for all threads as they die. We could 183 * skip the group leader because it is the last user of signal_struct, 184 * but we want to avoid the race with thread_group_cputime() which can 185 * see the empty ->thread_head list. 186 */ 187 task_cputime(tsk, &utime, &stime); 188 write_seqlock(&sig->stats_lock); 189 sig->utime += utime; 190 sig->stime += stime; 191 sig->gtime += task_gtime(tsk); 192 sig->min_flt += tsk->min_flt; 193 sig->maj_flt += tsk->maj_flt; 194 sig->nvcsw += tsk->nvcsw; 195 sig->nivcsw += tsk->nivcsw; 196 sig->inblock += task_io_get_inblock(tsk); 197 sig->oublock += task_io_get_oublock(tsk); 198 task_io_accounting_add(&sig->ioac, &tsk->ioac); 199 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 200 sig->nr_threads--; 201 __unhash_process(tsk, group_dead); 202 write_sequnlock(&sig->stats_lock); 203 204 /* 205 * Do this under ->siglock, we can race with another thread 206 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. 207 */ 208 flush_sigqueue(&tsk->pending); 209 tsk->sighand = NULL; 210 spin_unlock(&sighand->siglock); 211 212 __cleanup_sighand(sighand); 213 clear_tsk_thread_flag(tsk, TIF_SIGPENDING); 214 if (group_dead) { 215 flush_sigqueue(&sig->shared_pending); 216 tty_kref_put(tty); 217 } 218 } 219 220 static void delayed_put_task_struct(struct rcu_head *rhp) 221 { 222 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 223 224 kprobe_flush_task(tsk); 225 rethook_flush_task(tsk); 226 perf_event_delayed_put(tsk); 227 trace_sched_process_free(tsk); 228 put_task_struct(tsk); 229 } 230 231 void put_task_struct_rcu_user(struct task_struct *task) 232 { 233 if (refcount_dec_and_test(&task->rcu_users)) 234 call_rcu(&task->rcu, delayed_put_task_struct); 235 } 236 237 void __weak release_thread(struct task_struct *dead_task) 238 { 239 } 240 241 void release_task(struct task_struct *p) 242 { 243 struct task_struct *leader; 244 struct pid *thread_pid; 245 int zap_leader; 246 repeat: 247 /* don't need to get the RCU readlock here - the process is dead and 248 * can't be modifying its own credentials. But shut RCU-lockdep up */ 249 rcu_read_lock(); 250 dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); 251 rcu_read_unlock(); 252 253 cgroup_release(p); 254 255 write_lock_irq(&tasklist_lock); 256 ptrace_release_task(p); 257 thread_pid = get_pid(p->thread_pid); 258 __exit_signal(p); 259 260 /* 261 * If we are the last non-leader member of the thread 262 * group, and the leader is zombie, then notify the 263 * group leader's parent process. (if it wants notification.) 264 */ 265 zap_leader = 0; 266 leader = p->group_leader; 267 if (leader != p && thread_group_empty(leader) 268 && leader->exit_state == EXIT_ZOMBIE) { 269 /* 270 * If we were the last child thread and the leader has 271 * exited already, and the leader's parent ignores SIGCHLD, 272 * then we are the one who should release the leader. 273 */ 274 zap_leader = do_notify_parent(leader, leader->exit_signal); 275 if (zap_leader) 276 leader->exit_state = EXIT_DEAD; 277 } 278 279 write_unlock_irq(&tasklist_lock); 280 seccomp_filter_release(p); 281 proc_flush_pid(thread_pid); 282 put_pid(thread_pid); 283 release_thread(p); 284 put_task_struct_rcu_user(p); 285 286 p = leader; 287 if (unlikely(zap_leader)) 288 goto repeat; 289 } 290 291 int rcuwait_wake_up(struct rcuwait *w) 292 { 293 int ret = 0; 294 struct task_struct *task; 295 296 rcu_read_lock(); 297 298 /* 299 * Order condition vs @task, such that everything prior to the load 300 * of @task is visible. This is the condition as to why the user called 301 * rcuwait_wake() in the first place. Pairs with set_current_state() 302 * barrier (A) in rcuwait_wait_event(). 303 * 304 * WAIT WAKE 305 * [S] tsk = current [S] cond = true 306 * MB (A) MB (B) 307 * [L] cond [L] tsk 308 */ 309 smp_mb(); /* (B) */ 310 311 task = rcu_dereference(w->task); 312 if (task) 313 ret = wake_up_process(task); 314 rcu_read_unlock(); 315 316 return ret; 317 } 318 EXPORT_SYMBOL_GPL(rcuwait_wake_up); 319 320 /* 321 * Determine if a process group is "orphaned", according to the POSIX 322 * definition in 2.2.2.52. Orphaned process groups are not to be affected 323 * by terminal-generated stop signals. Newly orphaned process groups are 324 * to receive a SIGHUP and a SIGCONT. 325 * 326 * "I ask you, have you ever known what it is to be an orphan?" 327 */ 328 static int will_become_orphaned_pgrp(struct pid *pgrp, 329 struct task_struct *ignored_task) 330 { 331 struct task_struct *p; 332 333 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 334 if ((p == ignored_task) || 335 (p->exit_state && thread_group_empty(p)) || 336 is_global_init(p->real_parent)) 337 continue; 338 339 if (task_pgrp(p->real_parent) != pgrp && 340 task_session(p->real_parent) == task_session(p)) 341 return 0; 342 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 343 344 return 1; 345 } 346 347 int is_current_pgrp_orphaned(void) 348 { 349 int retval; 350 351 read_lock(&tasklist_lock); 352 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); 353 read_unlock(&tasklist_lock); 354 355 return retval; 356 } 357 358 static bool has_stopped_jobs(struct pid *pgrp) 359 { 360 struct task_struct *p; 361 362 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 363 if (p->signal->flags & SIGNAL_STOP_STOPPED) 364 return true; 365 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 366 367 return false; 368 } 369 370 /* 371 * Check to see if any process groups have become orphaned as 372 * a result of our exiting, and if they have any stopped jobs, 373 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 374 */ 375 static void 376 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) 377 { 378 struct pid *pgrp = task_pgrp(tsk); 379 struct task_struct *ignored_task = tsk; 380 381 if (!parent) 382 /* exit: our father is in a different pgrp than 383 * we are and we were the only connection outside. 384 */ 385 parent = tsk->real_parent; 386 else 387 /* reparent: our child is in a different pgrp than 388 * we are, and it was the only connection outside. 389 */ 390 ignored_task = NULL; 391 392 if (task_pgrp(parent) != pgrp && 393 task_session(parent) == task_session(tsk) && 394 will_become_orphaned_pgrp(pgrp, ignored_task) && 395 has_stopped_jobs(pgrp)) { 396 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); 397 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); 398 } 399 } 400 401 static void coredump_task_exit(struct task_struct *tsk) 402 { 403 struct core_state *core_state; 404 405 /* 406 * Serialize with any possible pending coredump. 407 * We must hold siglock around checking core_state 408 * and setting PF_POSTCOREDUMP. The core-inducing thread 409 * will increment ->nr_threads for each thread in the 410 * group without PF_POSTCOREDUMP set. 411 */ 412 spin_lock_irq(&tsk->sighand->siglock); 413 tsk->flags |= PF_POSTCOREDUMP; 414 core_state = tsk->signal->core_state; 415 spin_unlock_irq(&tsk->sighand->siglock); 416 if (core_state) { 417 struct core_thread self; 418 419 self.task = current; 420 if (self.task->flags & PF_SIGNALED) 421 self.next = xchg(&core_state->dumper.next, &self); 422 else 423 self.task = NULL; 424 /* 425 * Implies mb(), the result of xchg() must be visible 426 * to core_state->dumper. 427 */ 428 if (atomic_dec_and_test(&core_state->nr_threads)) 429 complete(&core_state->startup); 430 431 for (;;) { 432 set_current_state(TASK_UNINTERRUPTIBLE|TASK_FREEZABLE); 433 if (!self.task) /* see coredump_finish() */ 434 break; 435 schedule(); 436 } 437 __set_current_state(TASK_RUNNING); 438 } 439 } 440 441 #ifdef CONFIG_MEMCG 442 /* 443 * A task is exiting. If it owned this mm, find a new owner for the mm. 444 */ 445 void mm_update_next_owner(struct mm_struct *mm) 446 { 447 struct task_struct *c, *g, *p = current; 448 449 retry: 450 /* 451 * If the exiting or execing task is not the owner, it's 452 * someone else's problem. 453 */ 454 if (mm->owner != p) 455 return; 456 /* 457 * The current owner is exiting/execing and there are no other 458 * candidates. Do not leave the mm pointing to a possibly 459 * freed task structure. 460 */ 461 if (atomic_read(&mm->mm_users) <= 1) { 462 WRITE_ONCE(mm->owner, NULL); 463 return; 464 } 465 466 read_lock(&tasklist_lock); 467 /* 468 * Search in the children 469 */ 470 list_for_each_entry(c, &p->children, sibling) { 471 if (c->mm == mm) 472 goto assign_new_owner; 473 } 474 475 /* 476 * Search in the siblings 477 */ 478 list_for_each_entry(c, &p->real_parent->children, sibling) { 479 if (c->mm == mm) 480 goto assign_new_owner; 481 } 482 483 /* 484 * Search through everything else, we should not get here often. 485 */ 486 for_each_process(g) { 487 if (g->flags & PF_KTHREAD) 488 continue; 489 for_each_thread(g, c) { 490 if (c->mm == mm) 491 goto assign_new_owner; 492 if (c->mm) 493 break; 494 } 495 } 496 read_unlock(&tasklist_lock); 497 /* 498 * We found no owner yet mm_users > 1: this implies that we are 499 * most likely racing with swapoff (try_to_unuse()) or /proc or 500 * ptrace or page migration (get_task_mm()). Mark owner as NULL. 501 */ 502 WRITE_ONCE(mm->owner, NULL); 503 return; 504 505 assign_new_owner: 506 BUG_ON(c == p); 507 get_task_struct(c); 508 /* 509 * The task_lock protects c->mm from changing. 510 * We always want mm->owner->mm == mm 511 */ 512 task_lock(c); 513 /* 514 * Delay read_unlock() till we have the task_lock() 515 * to ensure that c does not slip away underneath us 516 */ 517 read_unlock(&tasklist_lock); 518 if (c->mm != mm) { 519 task_unlock(c); 520 put_task_struct(c); 521 goto retry; 522 } 523 WRITE_ONCE(mm->owner, c); 524 lru_gen_migrate_mm(mm); 525 task_unlock(c); 526 put_task_struct(c); 527 } 528 #endif /* CONFIG_MEMCG */ 529 530 /* 531 * Turn us into a lazy TLB process if we 532 * aren't already.. 533 */ 534 static void exit_mm(void) 535 { 536 struct mm_struct *mm = current->mm; 537 538 exit_mm_release(current, mm); 539 if (!mm) 540 return; 541 mmap_read_lock(mm); 542 mmgrab_lazy_tlb(mm); 543 BUG_ON(mm != current->active_mm); 544 /* more a memory barrier than a real lock */ 545 task_lock(current); 546 /* 547 * When a thread stops operating on an address space, the loop 548 * in membarrier_private_expedited() may not observe that 549 * tsk->mm, and the loop in membarrier_global_expedited() may 550 * not observe a MEMBARRIER_STATE_GLOBAL_EXPEDITED 551 * rq->membarrier_state, so those would not issue an IPI. 552 * Membarrier requires a memory barrier after accessing 553 * user-space memory, before clearing tsk->mm or the 554 * rq->membarrier_state. 555 */ 556 smp_mb__after_spinlock(); 557 local_irq_disable(); 558 current->mm = NULL; 559 membarrier_update_current_mm(NULL); 560 enter_lazy_tlb(mm, current); 561 local_irq_enable(); 562 task_unlock(current); 563 mmap_read_unlock(mm); 564 mm_update_next_owner(mm); 565 mmput(mm); 566 if (test_thread_flag(TIF_MEMDIE)) 567 exit_oom_victim(); 568 } 569 570 static struct task_struct *find_alive_thread(struct task_struct *p) 571 { 572 struct task_struct *t; 573 574 for_each_thread(p, t) { 575 if (!(t->flags & PF_EXITING)) 576 return t; 577 } 578 return NULL; 579 } 580 581 static struct task_struct *find_child_reaper(struct task_struct *father, 582 struct list_head *dead) 583 __releases(&tasklist_lock) 584 __acquires(&tasklist_lock) 585 { 586 struct pid_namespace *pid_ns = task_active_pid_ns(father); 587 struct task_struct *reaper = pid_ns->child_reaper; 588 struct task_struct *p, *n; 589 590 if (likely(reaper != father)) 591 return reaper; 592 593 reaper = find_alive_thread(father); 594 if (reaper) { 595 pid_ns->child_reaper = reaper; 596 return reaper; 597 } 598 599 write_unlock_irq(&tasklist_lock); 600 601 list_for_each_entry_safe(p, n, dead, ptrace_entry) { 602 list_del_init(&p->ptrace_entry); 603 release_task(p); 604 } 605 606 zap_pid_ns_processes(pid_ns); 607 write_lock_irq(&tasklist_lock); 608 609 return father; 610 } 611 612 /* 613 * When we die, we re-parent all our children, and try to: 614 * 1. give them to another thread in our thread group, if such a member exists 615 * 2. give it to the first ancestor process which prctl'd itself as a 616 * child_subreaper for its children (like a service manager) 617 * 3. give it to the init process (PID 1) in our pid namespace 618 */ 619 static struct task_struct *find_new_reaper(struct task_struct *father, 620 struct task_struct *child_reaper) 621 { 622 struct task_struct *thread, *reaper; 623 624 thread = find_alive_thread(father); 625 if (thread) 626 return thread; 627 628 if (father->signal->has_child_subreaper) { 629 unsigned int ns_level = task_pid(father)->level; 630 /* 631 * Find the first ->is_child_subreaper ancestor in our pid_ns. 632 * We can't check reaper != child_reaper to ensure we do not 633 * cross the namespaces, the exiting parent could be injected 634 * by setns() + fork(). 635 * We check pid->level, this is slightly more efficient than 636 * task_active_pid_ns(reaper) != task_active_pid_ns(father). 637 */ 638 for (reaper = father->real_parent; 639 task_pid(reaper)->level == ns_level; 640 reaper = reaper->real_parent) { 641 if (reaper == &init_task) 642 break; 643 if (!reaper->signal->is_child_subreaper) 644 continue; 645 thread = find_alive_thread(reaper); 646 if (thread) 647 return thread; 648 } 649 } 650 651 return child_reaper; 652 } 653 654 /* 655 * Any that need to be release_task'd are put on the @dead list. 656 */ 657 static void reparent_leader(struct task_struct *father, struct task_struct *p, 658 struct list_head *dead) 659 { 660 if (unlikely(p->exit_state == EXIT_DEAD)) 661 return; 662 663 /* We don't want people slaying init. */ 664 p->exit_signal = SIGCHLD; 665 666 /* If it has exited notify the new parent about this child's death. */ 667 if (!p->ptrace && 668 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { 669 if (do_notify_parent(p, p->exit_signal)) { 670 p->exit_state = EXIT_DEAD; 671 list_add(&p->ptrace_entry, dead); 672 } 673 } 674 675 kill_orphaned_pgrp(p, father); 676 } 677 678 /* 679 * This does two things: 680 * 681 * A. Make init inherit all the child processes 682 * B. Check to see if any process groups have become orphaned 683 * as a result of our exiting, and if they have any stopped 684 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 685 */ 686 static void forget_original_parent(struct task_struct *father, 687 struct list_head *dead) 688 { 689 struct task_struct *p, *t, *reaper; 690 691 if (unlikely(!list_empty(&father->ptraced))) 692 exit_ptrace(father, dead); 693 694 /* Can drop and reacquire tasklist_lock */ 695 reaper = find_child_reaper(father, dead); 696 if (list_empty(&father->children)) 697 return; 698 699 reaper = find_new_reaper(father, reaper); 700 list_for_each_entry(p, &father->children, sibling) { 701 for_each_thread(p, t) { 702 RCU_INIT_POINTER(t->real_parent, reaper); 703 BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father)); 704 if (likely(!t->ptrace)) 705 t->parent = t->real_parent; 706 if (t->pdeath_signal) 707 group_send_sig_info(t->pdeath_signal, 708 SEND_SIG_NOINFO, t, 709 PIDTYPE_TGID); 710 } 711 /* 712 * If this is a threaded reparent there is no need to 713 * notify anyone anything has happened. 714 */ 715 if (!same_thread_group(reaper, father)) 716 reparent_leader(father, p, dead); 717 } 718 list_splice_tail_init(&father->children, &reaper->children); 719 } 720 721 /* 722 * Send signals to all our closest relatives so that they know 723 * to properly mourn us.. 724 */ 725 static void exit_notify(struct task_struct *tsk, int group_dead) 726 { 727 bool autoreap; 728 struct task_struct *p, *n; 729 LIST_HEAD(dead); 730 731 write_lock_irq(&tasklist_lock); 732 forget_original_parent(tsk, &dead); 733 734 if (group_dead) 735 kill_orphaned_pgrp(tsk->group_leader, NULL); 736 737 tsk->exit_state = EXIT_ZOMBIE; 738 /* 739 * sub-thread or delay_group_leader(), wake up the 740 * PIDFD_THREAD waiters. 741 */ 742 if (!thread_group_empty(tsk)) 743 do_notify_pidfd(tsk); 744 745 if (unlikely(tsk->ptrace)) { 746 int sig = thread_group_leader(tsk) && 747 thread_group_empty(tsk) && 748 !ptrace_reparented(tsk) ? 749 tsk->exit_signal : SIGCHLD; 750 autoreap = do_notify_parent(tsk, sig); 751 } else if (thread_group_leader(tsk)) { 752 autoreap = thread_group_empty(tsk) && 753 do_notify_parent(tsk, tsk->exit_signal); 754 } else { 755 autoreap = true; 756 } 757 758 if (autoreap) { 759 tsk->exit_state = EXIT_DEAD; 760 list_add(&tsk->ptrace_entry, &dead); 761 } 762 763 /* mt-exec, de_thread() is waiting for group leader */ 764 if (unlikely(tsk->signal->notify_count < 0)) 765 wake_up_process(tsk->signal->group_exec_task); 766 write_unlock_irq(&tasklist_lock); 767 768 list_for_each_entry_safe(p, n, &dead, ptrace_entry) { 769 list_del_init(&p->ptrace_entry); 770 release_task(p); 771 } 772 } 773 774 #ifdef CONFIG_DEBUG_STACK_USAGE 775 static void check_stack_usage(void) 776 { 777 static DEFINE_SPINLOCK(low_water_lock); 778 static int lowest_to_date = THREAD_SIZE; 779 unsigned long free; 780 781 free = stack_not_used(current); 782 783 if (free >= lowest_to_date) 784 return; 785 786 spin_lock(&low_water_lock); 787 if (free < lowest_to_date) { 788 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n", 789 current->comm, task_pid_nr(current), free); 790 lowest_to_date = free; 791 } 792 spin_unlock(&low_water_lock); 793 } 794 #else 795 static inline void check_stack_usage(void) {} 796 #endif 797 798 static void synchronize_group_exit(struct task_struct *tsk, long code) 799 { 800 struct sighand_struct *sighand = tsk->sighand; 801 struct signal_struct *signal = tsk->signal; 802 803 spin_lock_irq(&sighand->siglock); 804 signal->quick_threads--; 805 if ((signal->quick_threads == 0) && 806 !(signal->flags & SIGNAL_GROUP_EXIT)) { 807 signal->flags = SIGNAL_GROUP_EXIT; 808 signal->group_exit_code = code; 809 signal->group_stop_count = 0; 810 } 811 spin_unlock_irq(&sighand->siglock); 812 } 813 814 void __noreturn do_exit(long code) 815 { 816 struct task_struct *tsk = current; 817 int group_dead; 818 819 WARN_ON(irqs_disabled()); 820 821 synchronize_group_exit(tsk, code); 822 823 WARN_ON(tsk->plug); 824 825 kcov_task_exit(tsk); 826 kmsan_task_exit(tsk); 827 828 coredump_task_exit(tsk); 829 ptrace_event(PTRACE_EVENT_EXIT, code); 830 user_events_exit(tsk); 831 832 io_uring_files_cancel(); 833 exit_signals(tsk); /* sets PF_EXITING */ 834 835 acct_update_integrals(tsk); 836 group_dead = atomic_dec_and_test(&tsk->signal->live); 837 if (group_dead) { 838 /* 839 * If the last thread of global init has exited, panic 840 * immediately to get a useable coredump. 841 */ 842 if (unlikely(is_global_init(tsk))) 843 panic("Attempted to kill init! exitcode=0x%08x\n", 844 tsk->signal->group_exit_code ?: (int)code); 845 846 #ifdef CONFIG_POSIX_TIMERS 847 hrtimer_cancel(&tsk->signal->real_timer); 848 exit_itimers(tsk); 849 #endif 850 if (tsk->mm) 851 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); 852 } 853 acct_collect(code, group_dead); 854 if (group_dead) 855 tty_audit_exit(); 856 audit_free(tsk); 857 858 tsk->exit_code = code; 859 taskstats_exit(tsk, group_dead); 860 861 exit_mm(); 862 863 if (group_dead) 864 acct_process(); 865 trace_sched_process_exit(tsk); 866 867 exit_sem(tsk); 868 exit_shm(tsk); 869 exit_files(tsk); 870 exit_fs(tsk); 871 if (group_dead) 872 disassociate_ctty(1); 873 exit_task_namespaces(tsk); 874 exit_task_work(tsk); 875 exit_thread(tsk); 876 877 /* 878 * Flush inherited counters to the parent - before the parent 879 * gets woken up by child-exit notifications. 880 * 881 * because of cgroup mode, must be called before cgroup_exit() 882 */ 883 perf_event_exit_task(tsk); 884 885 sched_autogroup_exit_task(tsk); 886 cgroup_exit(tsk); 887 888 /* 889 * FIXME: do that only when needed, using sched_exit tracepoint 890 */ 891 flush_ptrace_hw_breakpoint(tsk); 892 893 exit_tasks_rcu_start(); 894 exit_notify(tsk, group_dead); 895 proc_exit_connector(tsk); 896 mpol_put_task_policy(tsk); 897 #ifdef CONFIG_FUTEX 898 if (unlikely(current->pi_state_cache)) 899 kfree(current->pi_state_cache); 900 #endif 901 /* 902 * Make sure we are holding no locks: 903 */ 904 debug_check_no_locks_held(); 905 906 if (tsk->io_context) 907 exit_io_context(tsk); 908 909 if (tsk->splice_pipe) 910 free_pipe_info(tsk->splice_pipe); 911 912 if (tsk->task_frag.page) 913 put_page(tsk->task_frag.page); 914 915 exit_task_stack_account(tsk); 916 917 check_stack_usage(); 918 preempt_disable(); 919 if (tsk->nr_dirtied) 920 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); 921 exit_rcu(); 922 exit_tasks_rcu_finish(); 923 924 lockdep_free_task(tsk); 925 do_task_dead(); 926 } 927 928 void __noreturn make_task_dead(int signr) 929 { 930 /* 931 * Take the task off the cpu after something catastrophic has 932 * happened. 933 * 934 * We can get here from a kernel oops, sometimes with preemption off. 935 * Start by checking for critical errors. 936 * Then fix up important state like USER_DS and preemption. 937 * Then do everything else. 938 */ 939 struct task_struct *tsk = current; 940 unsigned int limit; 941 942 if (unlikely(in_interrupt())) 943 panic("Aiee, killing interrupt handler!"); 944 if (unlikely(!tsk->pid)) 945 panic("Attempted to kill the idle task!"); 946 947 if (unlikely(irqs_disabled())) { 948 pr_info("note: %s[%d] exited with irqs disabled\n", 949 current->comm, task_pid_nr(current)); 950 local_irq_enable(); 951 } 952 if (unlikely(in_atomic())) { 953 pr_info("note: %s[%d] exited with preempt_count %d\n", 954 current->comm, task_pid_nr(current), 955 preempt_count()); 956 preempt_count_set(PREEMPT_ENABLED); 957 } 958 959 /* 960 * Every time the system oopses, if the oops happens while a reference 961 * to an object was held, the reference leaks. 962 * If the oops doesn't also leak memory, repeated oopsing can cause 963 * reference counters to wrap around (if they're not using refcount_t). 964 * This means that repeated oopsing can make unexploitable-looking bugs 965 * exploitable through repeated oopsing. 966 * To make sure this can't happen, place an upper bound on how often the 967 * kernel may oops without panic(). 968 */ 969 limit = READ_ONCE(oops_limit); 970 if (atomic_inc_return(&oops_count) >= limit && limit) 971 panic("Oopsed too often (kernel.oops_limit is %d)", limit); 972 973 /* 974 * We're taking recursive faults here in make_task_dead. Safest is to just 975 * leave this task alone and wait for reboot. 976 */ 977 if (unlikely(tsk->flags & PF_EXITING)) { 978 pr_alert("Fixing recursive fault but reboot is needed!\n"); 979 futex_exit_recursive(tsk); 980 tsk->exit_state = EXIT_DEAD; 981 refcount_inc(&tsk->rcu_users); 982 do_task_dead(); 983 } 984 985 do_exit(signr); 986 } 987 988 SYSCALL_DEFINE1(exit, int, error_code) 989 { 990 do_exit((error_code&0xff)<<8); 991 } 992 993 /* 994 * Take down every thread in the group. This is called by fatal signals 995 * as well as by sys_exit_group (below). 996 */ 997 void __noreturn 998 do_group_exit(int exit_code) 999 { 1000 struct signal_struct *sig = current->signal; 1001 1002 if (sig->flags & SIGNAL_GROUP_EXIT) 1003 exit_code = sig->group_exit_code; 1004 else if (sig->group_exec_task) 1005 exit_code = 0; 1006 else { 1007 struct sighand_struct *const sighand = current->sighand; 1008 1009 spin_lock_irq(&sighand->siglock); 1010 if (sig->flags & SIGNAL_GROUP_EXIT) 1011 /* Another thread got here before we took the lock. */ 1012 exit_code = sig->group_exit_code; 1013 else if (sig->group_exec_task) 1014 exit_code = 0; 1015 else { 1016 sig->group_exit_code = exit_code; 1017 sig->flags = SIGNAL_GROUP_EXIT; 1018 zap_other_threads(current); 1019 } 1020 spin_unlock_irq(&sighand->siglock); 1021 } 1022 1023 do_exit(exit_code); 1024 /* NOTREACHED */ 1025 } 1026 1027 /* 1028 * this kills every thread in the thread group. Note that any externally 1029 * wait4()-ing process will get the correct exit code - even if this 1030 * thread is not the thread group leader. 1031 */ 1032 SYSCALL_DEFINE1(exit_group, int, error_code) 1033 { 1034 do_group_exit((error_code & 0xff) << 8); 1035 /* NOTREACHED */ 1036 return 0; 1037 } 1038 1039 static int eligible_pid(struct wait_opts *wo, struct task_struct *p) 1040 { 1041 return wo->wo_type == PIDTYPE_MAX || 1042 task_pid_type(p, wo->wo_type) == wo->wo_pid; 1043 } 1044 1045 static int 1046 eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p) 1047 { 1048 if (!eligible_pid(wo, p)) 1049 return 0; 1050 1051 /* 1052 * Wait for all children (clone and not) if __WALL is set or 1053 * if it is traced by us. 1054 */ 1055 if (ptrace || (wo->wo_flags & __WALL)) 1056 return 1; 1057 1058 /* 1059 * Otherwise, wait for clone children *only* if __WCLONE is set; 1060 * otherwise, wait for non-clone children *only*. 1061 * 1062 * Note: a "clone" child here is one that reports to its parent 1063 * using a signal other than SIGCHLD, or a non-leader thread which 1064 * we can only see if it is traced by us. 1065 */ 1066 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) 1067 return 0; 1068 1069 return 1; 1070 } 1071 1072 /* 1073 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold 1074 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1075 * the lock and this task is uninteresting. If we return nonzero, we have 1076 * released the lock and the system call should return. 1077 */ 1078 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) 1079 { 1080 int state, status; 1081 pid_t pid = task_pid_vnr(p); 1082 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1083 struct waitid_info *infop; 1084 1085 if (!likely(wo->wo_flags & WEXITED)) 1086 return 0; 1087 1088 if (unlikely(wo->wo_flags & WNOWAIT)) { 1089 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1090 ? p->signal->group_exit_code : p->exit_code; 1091 get_task_struct(p); 1092 read_unlock(&tasklist_lock); 1093 sched_annotate_sleep(); 1094 if (wo->wo_rusage) 1095 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1096 put_task_struct(p); 1097 goto out_info; 1098 } 1099 /* 1100 * Move the task's state to DEAD/TRACE, only one thread can do this. 1101 */ 1102 state = (ptrace_reparented(p) && thread_group_leader(p)) ? 1103 EXIT_TRACE : EXIT_DEAD; 1104 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) 1105 return 0; 1106 /* 1107 * We own this thread, nobody else can reap it. 1108 */ 1109 read_unlock(&tasklist_lock); 1110 sched_annotate_sleep(); 1111 1112 /* 1113 * Check thread_group_leader() to exclude the traced sub-threads. 1114 */ 1115 if (state == EXIT_DEAD && thread_group_leader(p)) { 1116 struct signal_struct *sig = p->signal; 1117 struct signal_struct *psig = current->signal; 1118 unsigned long maxrss; 1119 u64 tgutime, tgstime; 1120 1121 /* 1122 * The resource counters for the group leader are in its 1123 * own task_struct. Those for dead threads in the group 1124 * are in its signal_struct, as are those for the child 1125 * processes it has previously reaped. All these 1126 * accumulate in the parent's signal_struct c* fields. 1127 * 1128 * We don't bother to take a lock here to protect these 1129 * p->signal fields because the whole thread group is dead 1130 * and nobody can change them. 1131 * 1132 * psig->stats_lock also protects us from our sub-threads 1133 * which can reap other children at the same time. 1134 * 1135 * We use thread_group_cputime_adjusted() to get times for 1136 * the thread group, which consolidates times for all threads 1137 * in the group including the group leader. 1138 */ 1139 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1140 write_seqlock_irq(&psig->stats_lock); 1141 psig->cutime += tgutime + sig->cutime; 1142 psig->cstime += tgstime + sig->cstime; 1143 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; 1144 psig->cmin_flt += 1145 p->min_flt + sig->min_flt + sig->cmin_flt; 1146 psig->cmaj_flt += 1147 p->maj_flt + sig->maj_flt + sig->cmaj_flt; 1148 psig->cnvcsw += 1149 p->nvcsw + sig->nvcsw + sig->cnvcsw; 1150 psig->cnivcsw += 1151 p->nivcsw + sig->nivcsw + sig->cnivcsw; 1152 psig->cinblock += 1153 task_io_get_inblock(p) + 1154 sig->inblock + sig->cinblock; 1155 psig->coublock += 1156 task_io_get_oublock(p) + 1157 sig->oublock + sig->coublock; 1158 maxrss = max(sig->maxrss, sig->cmaxrss); 1159 if (psig->cmaxrss < maxrss) 1160 psig->cmaxrss = maxrss; 1161 task_io_accounting_add(&psig->ioac, &p->ioac); 1162 task_io_accounting_add(&psig->ioac, &sig->ioac); 1163 write_sequnlock_irq(&psig->stats_lock); 1164 } 1165 1166 if (wo->wo_rusage) 1167 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1168 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1169 ? p->signal->group_exit_code : p->exit_code; 1170 wo->wo_stat = status; 1171 1172 if (state == EXIT_TRACE) { 1173 write_lock_irq(&tasklist_lock); 1174 /* We dropped tasklist, ptracer could die and untrace */ 1175 ptrace_unlink(p); 1176 1177 /* If parent wants a zombie, don't release it now */ 1178 state = EXIT_ZOMBIE; 1179 if (do_notify_parent(p, p->exit_signal)) 1180 state = EXIT_DEAD; 1181 p->exit_state = state; 1182 write_unlock_irq(&tasklist_lock); 1183 } 1184 if (state == EXIT_DEAD) 1185 release_task(p); 1186 1187 out_info: 1188 infop = wo->wo_info; 1189 if (infop) { 1190 if ((status & 0x7f) == 0) { 1191 infop->cause = CLD_EXITED; 1192 infop->status = status >> 8; 1193 } else { 1194 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; 1195 infop->status = status & 0x7f; 1196 } 1197 infop->pid = pid; 1198 infop->uid = uid; 1199 } 1200 1201 return pid; 1202 } 1203 1204 static int *task_stopped_code(struct task_struct *p, bool ptrace) 1205 { 1206 if (ptrace) { 1207 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING)) 1208 return &p->exit_code; 1209 } else { 1210 if (p->signal->flags & SIGNAL_STOP_STOPPED) 1211 return &p->signal->group_exit_code; 1212 } 1213 return NULL; 1214 } 1215 1216 /** 1217 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED 1218 * @wo: wait options 1219 * @ptrace: is the wait for ptrace 1220 * @p: task to wait for 1221 * 1222 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. 1223 * 1224 * CONTEXT: 1225 * read_lock(&tasklist_lock), which is released if return value is 1226 * non-zero. Also, grabs and releases @p->sighand->siglock. 1227 * 1228 * RETURNS: 1229 * 0 if wait condition didn't exist and search for other wait conditions 1230 * should continue. Non-zero return, -errno on failure and @p's pid on 1231 * success, implies that tasklist_lock is released and wait condition 1232 * search should terminate. 1233 */ 1234 static int wait_task_stopped(struct wait_opts *wo, 1235 int ptrace, struct task_struct *p) 1236 { 1237 struct waitid_info *infop; 1238 int exit_code, *p_code, why; 1239 uid_t uid = 0; /* unneeded, required by compiler */ 1240 pid_t pid; 1241 1242 /* 1243 * Traditionally we see ptrace'd stopped tasks regardless of options. 1244 */ 1245 if (!ptrace && !(wo->wo_flags & WUNTRACED)) 1246 return 0; 1247 1248 if (!task_stopped_code(p, ptrace)) 1249 return 0; 1250 1251 exit_code = 0; 1252 spin_lock_irq(&p->sighand->siglock); 1253 1254 p_code = task_stopped_code(p, ptrace); 1255 if (unlikely(!p_code)) 1256 goto unlock_sig; 1257 1258 exit_code = *p_code; 1259 if (!exit_code) 1260 goto unlock_sig; 1261 1262 if (!unlikely(wo->wo_flags & WNOWAIT)) 1263 *p_code = 0; 1264 1265 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1266 unlock_sig: 1267 spin_unlock_irq(&p->sighand->siglock); 1268 if (!exit_code) 1269 return 0; 1270 1271 /* 1272 * Now we are pretty sure this task is interesting. 1273 * Make sure it doesn't get reaped out from under us while we 1274 * give up the lock and then examine it below. We don't want to 1275 * keep holding onto the tasklist_lock while we call getrusage and 1276 * possibly take page faults for user memory. 1277 */ 1278 get_task_struct(p); 1279 pid = task_pid_vnr(p); 1280 why = ptrace ? CLD_TRAPPED : CLD_STOPPED; 1281 read_unlock(&tasklist_lock); 1282 sched_annotate_sleep(); 1283 if (wo->wo_rusage) 1284 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1285 put_task_struct(p); 1286 1287 if (likely(!(wo->wo_flags & WNOWAIT))) 1288 wo->wo_stat = (exit_code << 8) | 0x7f; 1289 1290 infop = wo->wo_info; 1291 if (infop) { 1292 infop->cause = why; 1293 infop->status = exit_code; 1294 infop->pid = pid; 1295 infop->uid = uid; 1296 } 1297 return pid; 1298 } 1299 1300 /* 1301 * Handle do_wait work for one task in a live, non-stopped state. 1302 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1303 * the lock and this task is uninteresting. If we return nonzero, we have 1304 * released the lock and the system call should return. 1305 */ 1306 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) 1307 { 1308 struct waitid_info *infop; 1309 pid_t pid; 1310 uid_t uid; 1311 1312 if (!unlikely(wo->wo_flags & WCONTINUED)) 1313 return 0; 1314 1315 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 1316 return 0; 1317 1318 spin_lock_irq(&p->sighand->siglock); 1319 /* Re-check with the lock held. */ 1320 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { 1321 spin_unlock_irq(&p->sighand->siglock); 1322 return 0; 1323 } 1324 if (!unlikely(wo->wo_flags & WNOWAIT)) 1325 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1326 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1327 spin_unlock_irq(&p->sighand->siglock); 1328 1329 pid = task_pid_vnr(p); 1330 get_task_struct(p); 1331 read_unlock(&tasklist_lock); 1332 sched_annotate_sleep(); 1333 if (wo->wo_rusage) 1334 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1335 put_task_struct(p); 1336 1337 infop = wo->wo_info; 1338 if (!infop) { 1339 wo->wo_stat = 0xffff; 1340 } else { 1341 infop->cause = CLD_CONTINUED; 1342 infop->pid = pid; 1343 infop->uid = uid; 1344 infop->status = SIGCONT; 1345 } 1346 return pid; 1347 } 1348 1349 /* 1350 * Consider @p for a wait by @parent. 1351 * 1352 * -ECHILD should be in ->notask_error before the first call. 1353 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1354 * Returns zero if the search for a child should continue; 1355 * then ->notask_error is 0 if @p is an eligible child, 1356 * or still -ECHILD. 1357 */ 1358 static int wait_consider_task(struct wait_opts *wo, int ptrace, 1359 struct task_struct *p) 1360 { 1361 /* 1362 * We can race with wait_task_zombie() from another thread. 1363 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition 1364 * can't confuse the checks below. 1365 */ 1366 int exit_state = READ_ONCE(p->exit_state); 1367 int ret; 1368 1369 if (unlikely(exit_state == EXIT_DEAD)) 1370 return 0; 1371 1372 ret = eligible_child(wo, ptrace, p); 1373 if (!ret) 1374 return ret; 1375 1376 if (unlikely(exit_state == EXIT_TRACE)) { 1377 /* 1378 * ptrace == 0 means we are the natural parent. In this case 1379 * we should clear notask_error, debugger will notify us. 1380 */ 1381 if (likely(!ptrace)) 1382 wo->notask_error = 0; 1383 return 0; 1384 } 1385 1386 if (likely(!ptrace) && unlikely(p->ptrace)) { 1387 /* 1388 * If it is traced by its real parent's group, just pretend 1389 * the caller is ptrace_do_wait() and reap this child if it 1390 * is zombie. 1391 * 1392 * This also hides group stop state from real parent; otherwise 1393 * a single stop can be reported twice as group and ptrace stop. 1394 * If a ptracer wants to distinguish these two events for its 1395 * own children it should create a separate process which takes 1396 * the role of real parent. 1397 */ 1398 if (!ptrace_reparented(p)) 1399 ptrace = 1; 1400 } 1401 1402 /* slay zombie? */ 1403 if (exit_state == EXIT_ZOMBIE) { 1404 /* we don't reap group leaders with subthreads */ 1405 if (!delay_group_leader(p)) { 1406 /* 1407 * A zombie ptracee is only visible to its ptracer. 1408 * Notification and reaping will be cascaded to the 1409 * real parent when the ptracer detaches. 1410 */ 1411 if (unlikely(ptrace) || likely(!p->ptrace)) 1412 return wait_task_zombie(wo, p); 1413 } 1414 1415 /* 1416 * Allow access to stopped/continued state via zombie by 1417 * falling through. Clearing of notask_error is complex. 1418 * 1419 * When !@ptrace: 1420 * 1421 * If WEXITED is set, notask_error should naturally be 1422 * cleared. If not, subset of WSTOPPED|WCONTINUED is set, 1423 * so, if there are live subthreads, there are events to 1424 * wait for. If all subthreads are dead, it's still safe 1425 * to clear - this function will be called again in finite 1426 * amount time once all the subthreads are released and 1427 * will then return without clearing. 1428 * 1429 * When @ptrace: 1430 * 1431 * Stopped state is per-task and thus can't change once the 1432 * target task dies. Only continued and exited can happen. 1433 * Clear notask_error if WCONTINUED | WEXITED. 1434 */ 1435 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) 1436 wo->notask_error = 0; 1437 } else { 1438 /* 1439 * @p is alive and it's gonna stop, continue or exit, so 1440 * there always is something to wait for. 1441 */ 1442 wo->notask_error = 0; 1443 } 1444 1445 /* 1446 * Wait for stopped. Depending on @ptrace, different stopped state 1447 * is used and the two don't interact with each other. 1448 */ 1449 ret = wait_task_stopped(wo, ptrace, p); 1450 if (ret) 1451 return ret; 1452 1453 /* 1454 * Wait for continued. There's only one continued state and the 1455 * ptracer can consume it which can confuse the real parent. Don't 1456 * use WCONTINUED from ptracer. You don't need or want it. 1457 */ 1458 return wait_task_continued(wo, p); 1459 } 1460 1461 /* 1462 * Do the work of do_wait() for one thread in the group, @tsk. 1463 * 1464 * -ECHILD should be in ->notask_error before the first call. 1465 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1466 * Returns zero if the search for a child should continue; then 1467 * ->notask_error is 0 if there were any eligible children, 1468 * or still -ECHILD. 1469 */ 1470 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) 1471 { 1472 struct task_struct *p; 1473 1474 list_for_each_entry(p, &tsk->children, sibling) { 1475 int ret = wait_consider_task(wo, 0, p); 1476 1477 if (ret) 1478 return ret; 1479 } 1480 1481 return 0; 1482 } 1483 1484 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) 1485 { 1486 struct task_struct *p; 1487 1488 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { 1489 int ret = wait_consider_task(wo, 1, p); 1490 1491 if (ret) 1492 return ret; 1493 } 1494 1495 return 0; 1496 } 1497 1498 bool pid_child_should_wake(struct wait_opts *wo, struct task_struct *p) 1499 { 1500 if (!eligible_pid(wo, p)) 1501 return false; 1502 1503 if ((wo->wo_flags & __WNOTHREAD) && wo->child_wait.private != p->parent) 1504 return false; 1505 1506 return true; 1507 } 1508 1509 static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode, 1510 int sync, void *key) 1511 { 1512 struct wait_opts *wo = container_of(wait, struct wait_opts, 1513 child_wait); 1514 struct task_struct *p = key; 1515 1516 if (pid_child_should_wake(wo, p)) 1517 return default_wake_function(wait, mode, sync, key); 1518 1519 return 0; 1520 } 1521 1522 void __wake_up_parent(struct task_struct *p, struct task_struct *parent) 1523 { 1524 __wake_up_sync_key(&parent->signal->wait_chldexit, 1525 TASK_INTERRUPTIBLE, p); 1526 } 1527 1528 static bool is_effectively_child(struct wait_opts *wo, bool ptrace, 1529 struct task_struct *target) 1530 { 1531 struct task_struct *parent = 1532 !ptrace ? target->real_parent : target->parent; 1533 1534 return current == parent || (!(wo->wo_flags & __WNOTHREAD) && 1535 same_thread_group(current, parent)); 1536 } 1537 1538 /* 1539 * Optimization for waiting on PIDTYPE_PID. No need to iterate through child 1540 * and tracee lists to find the target task. 1541 */ 1542 static int do_wait_pid(struct wait_opts *wo) 1543 { 1544 bool ptrace; 1545 struct task_struct *target; 1546 int retval; 1547 1548 ptrace = false; 1549 target = pid_task(wo->wo_pid, PIDTYPE_TGID); 1550 if (target && is_effectively_child(wo, ptrace, target)) { 1551 retval = wait_consider_task(wo, ptrace, target); 1552 if (retval) 1553 return retval; 1554 } 1555 1556 ptrace = true; 1557 target = pid_task(wo->wo_pid, PIDTYPE_PID); 1558 if (target && target->ptrace && 1559 is_effectively_child(wo, ptrace, target)) { 1560 retval = wait_consider_task(wo, ptrace, target); 1561 if (retval) 1562 return retval; 1563 } 1564 1565 return 0; 1566 } 1567 1568 long __do_wait(struct wait_opts *wo) 1569 { 1570 long retval; 1571 1572 /* 1573 * If there is nothing that can match our criteria, just get out. 1574 * We will clear ->notask_error to zero if we see any child that 1575 * might later match our criteria, even if we are not able to reap 1576 * it yet. 1577 */ 1578 wo->notask_error = -ECHILD; 1579 if ((wo->wo_type < PIDTYPE_MAX) && 1580 (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type))) 1581 goto notask; 1582 1583 read_lock(&tasklist_lock); 1584 1585 if (wo->wo_type == PIDTYPE_PID) { 1586 retval = do_wait_pid(wo); 1587 if (retval) 1588 return retval; 1589 } else { 1590 struct task_struct *tsk = current; 1591 1592 do { 1593 retval = do_wait_thread(wo, tsk); 1594 if (retval) 1595 return retval; 1596 1597 retval = ptrace_do_wait(wo, tsk); 1598 if (retval) 1599 return retval; 1600 1601 if (wo->wo_flags & __WNOTHREAD) 1602 break; 1603 } while_each_thread(current, tsk); 1604 } 1605 read_unlock(&tasklist_lock); 1606 1607 notask: 1608 retval = wo->notask_error; 1609 if (!retval && !(wo->wo_flags & WNOHANG)) 1610 return -ERESTARTSYS; 1611 1612 return retval; 1613 } 1614 1615 static long do_wait(struct wait_opts *wo) 1616 { 1617 int retval; 1618 1619 trace_sched_process_wait(wo->wo_pid); 1620 1621 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); 1622 wo->child_wait.private = current; 1623 add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1624 1625 do { 1626 set_current_state(TASK_INTERRUPTIBLE); 1627 retval = __do_wait(wo); 1628 if (retval != -ERESTARTSYS) 1629 break; 1630 if (signal_pending(current)) 1631 break; 1632 schedule(); 1633 } while (1); 1634 1635 __set_current_state(TASK_RUNNING); 1636 remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1637 return retval; 1638 } 1639 1640 int kernel_waitid_prepare(struct wait_opts *wo, int which, pid_t upid, 1641 struct waitid_info *infop, int options, 1642 struct rusage *ru) 1643 { 1644 unsigned int f_flags = 0; 1645 struct pid *pid = NULL; 1646 enum pid_type type; 1647 1648 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED| 1649 __WNOTHREAD|__WCLONE|__WALL)) 1650 return -EINVAL; 1651 if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) 1652 return -EINVAL; 1653 1654 switch (which) { 1655 case P_ALL: 1656 type = PIDTYPE_MAX; 1657 break; 1658 case P_PID: 1659 type = PIDTYPE_PID; 1660 if (upid <= 0) 1661 return -EINVAL; 1662 1663 pid = find_get_pid(upid); 1664 break; 1665 case P_PGID: 1666 type = PIDTYPE_PGID; 1667 if (upid < 0) 1668 return -EINVAL; 1669 1670 if (upid) 1671 pid = find_get_pid(upid); 1672 else 1673 pid = get_task_pid(current, PIDTYPE_PGID); 1674 break; 1675 case P_PIDFD: 1676 type = PIDTYPE_PID; 1677 if (upid < 0) 1678 return -EINVAL; 1679 1680 pid = pidfd_get_pid(upid, &f_flags); 1681 if (IS_ERR(pid)) 1682 return PTR_ERR(pid); 1683 1684 break; 1685 default: 1686 return -EINVAL; 1687 } 1688 1689 wo->wo_type = type; 1690 wo->wo_pid = pid; 1691 wo->wo_flags = options; 1692 wo->wo_info = infop; 1693 wo->wo_rusage = ru; 1694 if (f_flags & O_NONBLOCK) 1695 wo->wo_flags |= WNOHANG; 1696 1697 return 0; 1698 } 1699 1700 static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop, 1701 int options, struct rusage *ru) 1702 { 1703 struct wait_opts wo; 1704 long ret; 1705 1706 ret = kernel_waitid_prepare(&wo, which, upid, infop, options, ru); 1707 if (ret) 1708 return ret; 1709 1710 ret = do_wait(&wo); 1711 if (!ret && !(options & WNOHANG) && (wo.wo_flags & WNOHANG)) 1712 ret = -EAGAIN; 1713 1714 put_pid(wo.wo_pid); 1715 return ret; 1716 } 1717 1718 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, 1719 infop, int, options, struct rusage __user *, ru) 1720 { 1721 struct rusage r; 1722 struct waitid_info info = {.status = 0}; 1723 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL); 1724 int signo = 0; 1725 1726 if (err > 0) { 1727 signo = SIGCHLD; 1728 err = 0; 1729 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1730 return -EFAULT; 1731 } 1732 if (!infop) 1733 return err; 1734 1735 if (!user_write_access_begin(infop, sizeof(*infop))) 1736 return -EFAULT; 1737 1738 unsafe_put_user(signo, &infop->si_signo, Efault); 1739 unsafe_put_user(0, &infop->si_errno, Efault); 1740 unsafe_put_user(info.cause, &infop->si_code, Efault); 1741 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1742 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1743 unsafe_put_user(info.status, &infop->si_status, Efault); 1744 user_write_access_end(); 1745 return err; 1746 Efault: 1747 user_write_access_end(); 1748 return -EFAULT; 1749 } 1750 1751 long kernel_wait4(pid_t upid, int __user *stat_addr, int options, 1752 struct rusage *ru) 1753 { 1754 struct wait_opts wo; 1755 struct pid *pid = NULL; 1756 enum pid_type type; 1757 long ret; 1758 1759 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| 1760 __WNOTHREAD|__WCLONE|__WALL)) 1761 return -EINVAL; 1762 1763 /* -INT_MIN is not defined */ 1764 if (upid == INT_MIN) 1765 return -ESRCH; 1766 1767 if (upid == -1) 1768 type = PIDTYPE_MAX; 1769 else if (upid < 0) { 1770 type = PIDTYPE_PGID; 1771 pid = find_get_pid(-upid); 1772 } else if (upid == 0) { 1773 type = PIDTYPE_PGID; 1774 pid = get_task_pid(current, PIDTYPE_PGID); 1775 } else /* upid > 0 */ { 1776 type = PIDTYPE_PID; 1777 pid = find_get_pid(upid); 1778 } 1779 1780 wo.wo_type = type; 1781 wo.wo_pid = pid; 1782 wo.wo_flags = options | WEXITED; 1783 wo.wo_info = NULL; 1784 wo.wo_stat = 0; 1785 wo.wo_rusage = ru; 1786 ret = do_wait(&wo); 1787 put_pid(pid); 1788 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr)) 1789 ret = -EFAULT; 1790 1791 return ret; 1792 } 1793 1794 int kernel_wait(pid_t pid, int *stat) 1795 { 1796 struct wait_opts wo = { 1797 .wo_type = PIDTYPE_PID, 1798 .wo_pid = find_get_pid(pid), 1799 .wo_flags = WEXITED, 1800 }; 1801 int ret; 1802 1803 ret = do_wait(&wo); 1804 if (ret > 0 && wo.wo_stat) 1805 *stat = wo.wo_stat; 1806 put_pid(wo.wo_pid); 1807 return ret; 1808 } 1809 1810 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, 1811 int, options, struct rusage __user *, ru) 1812 { 1813 struct rusage r; 1814 long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL); 1815 1816 if (err > 0) { 1817 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1818 return -EFAULT; 1819 } 1820 return err; 1821 } 1822 1823 #ifdef __ARCH_WANT_SYS_WAITPID 1824 1825 /* 1826 * sys_waitpid() remains for compatibility. waitpid() should be 1827 * implemented by calling sys_wait4() from libc.a. 1828 */ 1829 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) 1830 { 1831 return kernel_wait4(pid, stat_addr, options, NULL); 1832 } 1833 1834 #endif 1835 1836 #ifdef CONFIG_COMPAT 1837 COMPAT_SYSCALL_DEFINE4(wait4, 1838 compat_pid_t, pid, 1839 compat_uint_t __user *, stat_addr, 1840 int, options, 1841 struct compat_rusage __user *, ru) 1842 { 1843 struct rusage r; 1844 long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL); 1845 if (err > 0) { 1846 if (ru && put_compat_rusage(&r, ru)) 1847 return -EFAULT; 1848 } 1849 return err; 1850 } 1851 1852 COMPAT_SYSCALL_DEFINE5(waitid, 1853 int, which, compat_pid_t, pid, 1854 struct compat_siginfo __user *, infop, int, options, 1855 struct compat_rusage __user *, uru) 1856 { 1857 struct rusage ru; 1858 struct waitid_info info = {.status = 0}; 1859 long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL); 1860 int signo = 0; 1861 if (err > 0) { 1862 signo = SIGCHLD; 1863 err = 0; 1864 if (uru) { 1865 /* kernel_waitid() overwrites everything in ru */ 1866 if (COMPAT_USE_64BIT_TIME) 1867 err = copy_to_user(uru, &ru, sizeof(ru)); 1868 else 1869 err = put_compat_rusage(&ru, uru); 1870 if (err) 1871 return -EFAULT; 1872 } 1873 } 1874 1875 if (!infop) 1876 return err; 1877 1878 if (!user_write_access_begin(infop, sizeof(*infop))) 1879 return -EFAULT; 1880 1881 unsafe_put_user(signo, &infop->si_signo, Efault); 1882 unsafe_put_user(0, &infop->si_errno, Efault); 1883 unsafe_put_user(info.cause, &infop->si_code, Efault); 1884 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1885 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1886 unsafe_put_user(info.status, &infop->si_status, Efault); 1887 user_write_access_end(); 1888 return err; 1889 Efault: 1890 user_write_access_end(); 1891 return -EFAULT; 1892 } 1893 #endif 1894 1895 /* 1896 * This needs to be __function_aligned as GCC implicitly makes any 1897 * implementation of abort() cold and drops alignment specified by 1898 * -falign-functions=N. 1899 * 1900 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c11 1901 */ 1902 __weak __function_aligned void abort(void) 1903 { 1904 BUG(); 1905 1906 /* if that doesn't kill us, halt */ 1907 panic("Oops failed to kill thread"); 1908 } 1909 EXPORT_SYMBOL(abort); 1910