1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/exit.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/slab.h> 10 #include <linux/sched/autogroup.h> 11 #include <linux/sched/mm.h> 12 #include <linux/sched/stat.h> 13 #include <linux/sched/task.h> 14 #include <linux/sched/task_stack.h> 15 #include <linux/sched/cputime.h> 16 #include <linux/interrupt.h> 17 #include <linux/module.h> 18 #include <linux/capability.h> 19 #include <linux/completion.h> 20 #include <linux/personality.h> 21 #include <linux/tty.h> 22 #include <linux/iocontext.h> 23 #include <linux/key.h> 24 #include <linux/cpu.h> 25 #include <linux/acct.h> 26 #include <linux/tsacct_kern.h> 27 #include <linux/file.h> 28 #include <linux/freezer.h> 29 #include <linux/binfmts.h> 30 #include <linux/nsproxy.h> 31 #include <linux/pid_namespace.h> 32 #include <linux/ptrace.h> 33 #include <linux/profile.h> 34 #include <linux/mount.h> 35 #include <linux/proc_fs.h> 36 #include <linux/kthread.h> 37 #include <linux/mempolicy.h> 38 #include <linux/taskstats_kern.h> 39 #include <linux/delayacct.h> 40 #include <linux/cgroup.h> 41 #include <linux/syscalls.h> 42 #include <linux/signal.h> 43 #include <linux/posix-timers.h> 44 #include <linux/cn_proc.h> 45 #include <linux/mutex.h> 46 #include <linux/futex.h> 47 #include <linux/pipe_fs_i.h> 48 #include <linux/audit.h> /* for audit_free() */ 49 #include <linux/resource.h> 50 #include <linux/task_io_accounting_ops.h> 51 #include <linux/blkdev.h> 52 #include <linux/task_work.h> 53 #include <linux/fs_struct.h> 54 #include <linux/init_task.h> 55 #include <linux/perf_event.h> 56 #include <trace/events/sched.h> 57 #include <linux/hw_breakpoint.h> 58 #include <linux/oom.h> 59 #include <linux/writeback.h> 60 #include <linux/shm.h> 61 #include <linux/kcov.h> 62 #include <linux/kmsan.h> 63 #include <linux/random.h> 64 #include <linux/rcuwait.h> 65 #include <linux/compat.h> 66 #include <linux/io_uring.h> 67 #include <linux/kprobes.h> 68 #include <linux/rethook.h> 69 #include <linux/sysfs.h> 70 #include <linux/user_events.h> 71 #include <linux/uaccess.h> 72 73 #include <uapi/linux/wait.h> 74 75 #include <asm/unistd.h> 76 #include <asm/mmu_context.h> 77 78 #include "exit.h" 79 80 /* 81 * The default value should be high enough to not crash a system that randomly 82 * crashes its kernel from time to time, but low enough to at least not permit 83 * overflowing 32-bit refcounts or the ldsem writer count. 84 */ 85 static unsigned int oops_limit = 10000; 86 87 #ifdef CONFIG_SYSCTL 88 static struct ctl_table kern_exit_table[] = { 89 { 90 .procname = "oops_limit", 91 .data = &oops_limit, 92 .maxlen = sizeof(oops_limit), 93 .mode = 0644, 94 .proc_handler = proc_douintvec, 95 }, 96 }; 97 98 static __init int kernel_exit_sysctls_init(void) 99 { 100 register_sysctl_init("kernel", kern_exit_table); 101 return 0; 102 } 103 late_initcall(kernel_exit_sysctls_init); 104 #endif 105 106 static atomic_t oops_count = ATOMIC_INIT(0); 107 108 #ifdef CONFIG_SYSFS 109 static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr, 110 char *page) 111 { 112 return sysfs_emit(page, "%d\n", atomic_read(&oops_count)); 113 } 114 115 static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count); 116 117 static __init int kernel_exit_sysfs_init(void) 118 { 119 sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL); 120 return 0; 121 } 122 late_initcall(kernel_exit_sysfs_init); 123 #endif 124 125 static void __unhash_process(struct task_struct *p, bool group_dead) 126 { 127 nr_threads--; 128 detach_pid(p, PIDTYPE_PID); 129 if (group_dead) { 130 detach_pid(p, PIDTYPE_TGID); 131 detach_pid(p, PIDTYPE_PGID); 132 detach_pid(p, PIDTYPE_SID); 133 134 list_del_rcu(&p->tasks); 135 list_del_init(&p->sibling); 136 __this_cpu_dec(process_counts); 137 } 138 list_del_rcu(&p->thread_node); 139 } 140 141 /* 142 * This function expects the tasklist_lock write-locked. 143 */ 144 static void __exit_signal(struct task_struct *tsk) 145 { 146 struct signal_struct *sig = tsk->signal; 147 bool group_dead = thread_group_leader(tsk); 148 struct sighand_struct *sighand; 149 struct tty_struct *tty; 150 u64 utime, stime; 151 152 sighand = rcu_dereference_check(tsk->sighand, 153 lockdep_tasklist_lock_is_held()); 154 spin_lock(&sighand->siglock); 155 156 #ifdef CONFIG_POSIX_TIMERS 157 posix_cpu_timers_exit(tsk); 158 if (group_dead) 159 posix_cpu_timers_exit_group(tsk); 160 #endif 161 162 if (group_dead) { 163 tty = sig->tty; 164 sig->tty = NULL; 165 } else { 166 /* 167 * If there is any task waiting for the group exit 168 * then notify it: 169 */ 170 if (sig->notify_count > 0 && !--sig->notify_count) 171 wake_up_process(sig->group_exec_task); 172 173 if (tsk == sig->curr_target) 174 sig->curr_target = next_thread(tsk); 175 } 176 177 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, 178 sizeof(unsigned long long)); 179 180 /* 181 * Accumulate here the counters for all threads as they die. We could 182 * skip the group leader because it is the last user of signal_struct, 183 * but we want to avoid the race with thread_group_cputime() which can 184 * see the empty ->thread_head list. 185 */ 186 task_cputime(tsk, &utime, &stime); 187 write_seqlock(&sig->stats_lock); 188 sig->utime += utime; 189 sig->stime += stime; 190 sig->gtime += task_gtime(tsk); 191 sig->min_flt += tsk->min_flt; 192 sig->maj_flt += tsk->maj_flt; 193 sig->nvcsw += tsk->nvcsw; 194 sig->nivcsw += tsk->nivcsw; 195 sig->inblock += task_io_get_inblock(tsk); 196 sig->oublock += task_io_get_oublock(tsk); 197 task_io_accounting_add(&sig->ioac, &tsk->ioac); 198 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 199 sig->nr_threads--; 200 __unhash_process(tsk, group_dead); 201 write_sequnlock(&sig->stats_lock); 202 203 /* 204 * Do this under ->siglock, we can race with another thread 205 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. 206 */ 207 flush_sigqueue(&tsk->pending); 208 tsk->sighand = NULL; 209 spin_unlock(&sighand->siglock); 210 211 __cleanup_sighand(sighand); 212 clear_tsk_thread_flag(tsk, TIF_SIGPENDING); 213 if (group_dead) { 214 flush_sigqueue(&sig->shared_pending); 215 tty_kref_put(tty); 216 } 217 } 218 219 static void delayed_put_task_struct(struct rcu_head *rhp) 220 { 221 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 222 223 kprobe_flush_task(tsk); 224 rethook_flush_task(tsk); 225 perf_event_delayed_put(tsk); 226 trace_sched_process_free(tsk); 227 put_task_struct(tsk); 228 } 229 230 void put_task_struct_rcu_user(struct task_struct *task) 231 { 232 if (refcount_dec_and_test(&task->rcu_users)) 233 call_rcu(&task->rcu, delayed_put_task_struct); 234 } 235 236 void __weak release_thread(struct task_struct *dead_task) 237 { 238 } 239 240 void release_task(struct task_struct *p) 241 { 242 struct task_struct *leader; 243 struct pid *thread_pid; 244 int zap_leader; 245 repeat: 246 /* don't need to get the RCU readlock here - the process is dead and 247 * can't be modifying its own credentials. But shut RCU-lockdep up */ 248 rcu_read_lock(); 249 dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); 250 rcu_read_unlock(); 251 252 cgroup_release(p); 253 254 write_lock_irq(&tasklist_lock); 255 ptrace_release_task(p); 256 thread_pid = get_pid(p->thread_pid); 257 __exit_signal(p); 258 259 /* 260 * If we are the last non-leader member of the thread 261 * group, and the leader is zombie, then notify the 262 * group leader's parent process. (if it wants notification.) 263 */ 264 zap_leader = 0; 265 leader = p->group_leader; 266 if (leader != p && thread_group_empty(leader) 267 && leader->exit_state == EXIT_ZOMBIE) { 268 /* 269 * If we were the last child thread and the leader has 270 * exited already, and the leader's parent ignores SIGCHLD, 271 * then we are the one who should release the leader. 272 */ 273 zap_leader = do_notify_parent(leader, leader->exit_signal); 274 if (zap_leader) 275 leader->exit_state = EXIT_DEAD; 276 } 277 278 write_unlock_irq(&tasklist_lock); 279 proc_flush_pid(thread_pid); 280 put_pid(thread_pid); 281 release_thread(p); 282 put_task_struct_rcu_user(p); 283 284 p = leader; 285 if (unlikely(zap_leader)) 286 goto repeat; 287 } 288 289 int rcuwait_wake_up(struct rcuwait *w) 290 { 291 int ret = 0; 292 struct task_struct *task; 293 294 rcu_read_lock(); 295 296 /* 297 * Order condition vs @task, such that everything prior to the load 298 * of @task is visible. This is the condition as to why the user called 299 * rcuwait_wake() in the first place. Pairs with set_current_state() 300 * barrier (A) in rcuwait_wait_event(). 301 * 302 * WAIT WAKE 303 * [S] tsk = current [S] cond = true 304 * MB (A) MB (B) 305 * [L] cond [L] tsk 306 */ 307 smp_mb(); /* (B) */ 308 309 task = rcu_dereference(w->task); 310 if (task) 311 ret = wake_up_process(task); 312 rcu_read_unlock(); 313 314 return ret; 315 } 316 EXPORT_SYMBOL_GPL(rcuwait_wake_up); 317 318 /* 319 * Determine if a process group is "orphaned", according to the POSIX 320 * definition in 2.2.2.52. Orphaned process groups are not to be affected 321 * by terminal-generated stop signals. Newly orphaned process groups are 322 * to receive a SIGHUP and a SIGCONT. 323 * 324 * "I ask you, have you ever known what it is to be an orphan?" 325 */ 326 static int will_become_orphaned_pgrp(struct pid *pgrp, 327 struct task_struct *ignored_task) 328 { 329 struct task_struct *p; 330 331 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 332 if ((p == ignored_task) || 333 (p->exit_state && thread_group_empty(p)) || 334 is_global_init(p->real_parent)) 335 continue; 336 337 if (task_pgrp(p->real_parent) != pgrp && 338 task_session(p->real_parent) == task_session(p)) 339 return 0; 340 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 341 342 return 1; 343 } 344 345 int is_current_pgrp_orphaned(void) 346 { 347 int retval; 348 349 read_lock(&tasklist_lock); 350 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); 351 read_unlock(&tasklist_lock); 352 353 return retval; 354 } 355 356 static bool has_stopped_jobs(struct pid *pgrp) 357 { 358 struct task_struct *p; 359 360 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 361 if (p->signal->flags & SIGNAL_STOP_STOPPED) 362 return true; 363 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 364 365 return false; 366 } 367 368 /* 369 * Check to see if any process groups have become orphaned as 370 * a result of our exiting, and if they have any stopped jobs, 371 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 372 */ 373 static void 374 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) 375 { 376 struct pid *pgrp = task_pgrp(tsk); 377 struct task_struct *ignored_task = tsk; 378 379 if (!parent) 380 /* exit: our father is in a different pgrp than 381 * we are and we were the only connection outside. 382 */ 383 parent = tsk->real_parent; 384 else 385 /* reparent: our child is in a different pgrp than 386 * we are, and it was the only connection outside. 387 */ 388 ignored_task = NULL; 389 390 if (task_pgrp(parent) != pgrp && 391 task_session(parent) == task_session(tsk) && 392 will_become_orphaned_pgrp(pgrp, ignored_task) && 393 has_stopped_jobs(pgrp)) { 394 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); 395 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); 396 } 397 } 398 399 static void coredump_task_exit(struct task_struct *tsk) 400 { 401 struct core_state *core_state; 402 403 /* 404 * Serialize with any possible pending coredump. 405 * We must hold siglock around checking core_state 406 * and setting PF_POSTCOREDUMP. The core-inducing thread 407 * will increment ->nr_threads for each thread in the 408 * group without PF_POSTCOREDUMP set. 409 */ 410 spin_lock_irq(&tsk->sighand->siglock); 411 tsk->flags |= PF_POSTCOREDUMP; 412 core_state = tsk->signal->core_state; 413 spin_unlock_irq(&tsk->sighand->siglock); 414 if (core_state) { 415 struct core_thread self; 416 417 self.task = current; 418 if (self.task->flags & PF_SIGNALED) 419 self.next = xchg(&core_state->dumper.next, &self); 420 else 421 self.task = NULL; 422 /* 423 * Implies mb(), the result of xchg() must be visible 424 * to core_state->dumper. 425 */ 426 if (atomic_dec_and_test(&core_state->nr_threads)) 427 complete(&core_state->startup); 428 429 for (;;) { 430 set_current_state(TASK_IDLE|TASK_FREEZABLE); 431 if (!self.task) /* see coredump_finish() */ 432 break; 433 schedule(); 434 } 435 __set_current_state(TASK_RUNNING); 436 } 437 } 438 439 #ifdef CONFIG_MEMCG 440 /* drops tasklist_lock if succeeds */ 441 static bool __try_to_set_owner(struct task_struct *tsk, struct mm_struct *mm) 442 { 443 bool ret = false; 444 445 task_lock(tsk); 446 if (likely(tsk->mm == mm)) { 447 /* tsk can't pass exit_mm/exec_mmap and exit */ 448 read_unlock(&tasklist_lock); 449 WRITE_ONCE(mm->owner, tsk); 450 lru_gen_migrate_mm(mm); 451 ret = true; 452 } 453 task_unlock(tsk); 454 return ret; 455 } 456 457 static bool try_to_set_owner(struct task_struct *g, struct mm_struct *mm) 458 { 459 struct task_struct *t; 460 461 for_each_thread(g, t) { 462 struct mm_struct *t_mm = READ_ONCE(t->mm); 463 if (t_mm == mm) { 464 if (__try_to_set_owner(t, mm)) 465 return true; 466 } else if (t_mm) 467 break; 468 } 469 470 return false; 471 } 472 473 /* 474 * A task is exiting. If it owned this mm, find a new owner for the mm. 475 */ 476 void mm_update_next_owner(struct mm_struct *mm) 477 { 478 struct task_struct *g, *p = current; 479 480 /* 481 * If the exiting or execing task is not the owner, it's 482 * someone else's problem. 483 */ 484 if (mm->owner != p) 485 return; 486 /* 487 * The current owner is exiting/execing and there are no other 488 * candidates. Do not leave the mm pointing to a possibly 489 * freed task structure. 490 */ 491 if (atomic_read(&mm->mm_users) <= 1) { 492 WRITE_ONCE(mm->owner, NULL); 493 return; 494 } 495 496 read_lock(&tasklist_lock); 497 /* 498 * Search in the children 499 */ 500 list_for_each_entry(g, &p->children, sibling) { 501 if (try_to_set_owner(g, mm)) 502 goto ret; 503 } 504 /* 505 * Search in the siblings 506 */ 507 list_for_each_entry(g, &p->real_parent->children, sibling) { 508 if (try_to_set_owner(g, mm)) 509 goto ret; 510 } 511 /* 512 * Search through everything else, we should not get here often. 513 */ 514 for_each_process(g) { 515 if (atomic_read(&mm->mm_users) <= 1) 516 break; 517 if (g->flags & PF_KTHREAD) 518 continue; 519 if (try_to_set_owner(g, mm)) 520 goto ret; 521 } 522 read_unlock(&tasklist_lock); 523 /* 524 * We found no owner yet mm_users > 1: this implies that we are 525 * most likely racing with swapoff (try_to_unuse()) or /proc or 526 * ptrace or page migration (get_task_mm()). Mark owner as NULL. 527 */ 528 WRITE_ONCE(mm->owner, NULL); 529 ret: 530 return; 531 532 } 533 #endif /* CONFIG_MEMCG */ 534 535 /* 536 * Turn us into a lazy TLB process if we 537 * aren't already.. 538 */ 539 static void exit_mm(void) 540 { 541 struct mm_struct *mm = current->mm; 542 543 exit_mm_release(current, mm); 544 if (!mm) 545 return; 546 mmap_read_lock(mm); 547 mmgrab_lazy_tlb(mm); 548 BUG_ON(mm != current->active_mm); 549 /* more a memory barrier than a real lock */ 550 task_lock(current); 551 /* 552 * When a thread stops operating on an address space, the loop 553 * in membarrier_private_expedited() may not observe that 554 * tsk->mm, and the loop in membarrier_global_expedited() may 555 * not observe a MEMBARRIER_STATE_GLOBAL_EXPEDITED 556 * rq->membarrier_state, so those would not issue an IPI. 557 * Membarrier requires a memory barrier after accessing 558 * user-space memory, before clearing tsk->mm or the 559 * rq->membarrier_state. 560 */ 561 smp_mb__after_spinlock(); 562 local_irq_disable(); 563 current->mm = NULL; 564 membarrier_update_current_mm(NULL); 565 enter_lazy_tlb(mm, current); 566 local_irq_enable(); 567 task_unlock(current); 568 mmap_read_unlock(mm); 569 mm_update_next_owner(mm); 570 mmput(mm); 571 if (test_thread_flag(TIF_MEMDIE)) 572 exit_oom_victim(); 573 } 574 575 static struct task_struct *find_alive_thread(struct task_struct *p) 576 { 577 struct task_struct *t; 578 579 for_each_thread(p, t) { 580 if (!(t->flags & PF_EXITING)) 581 return t; 582 } 583 return NULL; 584 } 585 586 static struct task_struct *find_child_reaper(struct task_struct *father, 587 struct list_head *dead) 588 __releases(&tasklist_lock) 589 __acquires(&tasklist_lock) 590 { 591 struct pid_namespace *pid_ns = task_active_pid_ns(father); 592 struct task_struct *reaper = pid_ns->child_reaper; 593 struct task_struct *p, *n; 594 595 if (likely(reaper != father)) 596 return reaper; 597 598 reaper = find_alive_thread(father); 599 if (reaper) { 600 pid_ns->child_reaper = reaper; 601 return reaper; 602 } 603 604 write_unlock_irq(&tasklist_lock); 605 606 list_for_each_entry_safe(p, n, dead, ptrace_entry) { 607 list_del_init(&p->ptrace_entry); 608 release_task(p); 609 } 610 611 zap_pid_ns_processes(pid_ns); 612 write_lock_irq(&tasklist_lock); 613 614 return father; 615 } 616 617 /* 618 * When we die, we re-parent all our children, and try to: 619 * 1. give them to another thread in our thread group, if such a member exists 620 * 2. give it to the first ancestor process which prctl'd itself as a 621 * child_subreaper for its children (like a service manager) 622 * 3. give it to the init process (PID 1) in our pid namespace 623 */ 624 static struct task_struct *find_new_reaper(struct task_struct *father, 625 struct task_struct *child_reaper) 626 { 627 struct task_struct *thread, *reaper; 628 629 thread = find_alive_thread(father); 630 if (thread) 631 return thread; 632 633 if (father->signal->has_child_subreaper) { 634 unsigned int ns_level = task_pid(father)->level; 635 /* 636 * Find the first ->is_child_subreaper ancestor in our pid_ns. 637 * We can't check reaper != child_reaper to ensure we do not 638 * cross the namespaces, the exiting parent could be injected 639 * by setns() + fork(). 640 * We check pid->level, this is slightly more efficient than 641 * task_active_pid_ns(reaper) != task_active_pid_ns(father). 642 */ 643 for (reaper = father->real_parent; 644 task_pid(reaper)->level == ns_level; 645 reaper = reaper->real_parent) { 646 if (reaper == &init_task) 647 break; 648 if (!reaper->signal->is_child_subreaper) 649 continue; 650 thread = find_alive_thread(reaper); 651 if (thread) 652 return thread; 653 } 654 } 655 656 return child_reaper; 657 } 658 659 /* 660 * Any that need to be release_task'd are put on the @dead list. 661 */ 662 static void reparent_leader(struct task_struct *father, struct task_struct *p, 663 struct list_head *dead) 664 { 665 if (unlikely(p->exit_state == EXIT_DEAD)) 666 return; 667 668 /* We don't want people slaying init. */ 669 p->exit_signal = SIGCHLD; 670 671 /* If it has exited notify the new parent about this child's death. */ 672 if (!p->ptrace && 673 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { 674 if (do_notify_parent(p, p->exit_signal)) { 675 p->exit_state = EXIT_DEAD; 676 list_add(&p->ptrace_entry, dead); 677 } 678 } 679 680 kill_orphaned_pgrp(p, father); 681 } 682 683 /* 684 * This does two things: 685 * 686 * A. Make init inherit all the child processes 687 * B. Check to see if any process groups have become orphaned 688 * as a result of our exiting, and if they have any stopped 689 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 690 */ 691 static void forget_original_parent(struct task_struct *father, 692 struct list_head *dead) 693 { 694 struct task_struct *p, *t, *reaper; 695 696 if (unlikely(!list_empty(&father->ptraced))) 697 exit_ptrace(father, dead); 698 699 /* Can drop and reacquire tasklist_lock */ 700 reaper = find_child_reaper(father, dead); 701 if (list_empty(&father->children)) 702 return; 703 704 reaper = find_new_reaper(father, reaper); 705 list_for_each_entry(p, &father->children, sibling) { 706 for_each_thread(p, t) { 707 RCU_INIT_POINTER(t->real_parent, reaper); 708 BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father)); 709 if (likely(!t->ptrace)) 710 t->parent = t->real_parent; 711 if (t->pdeath_signal) 712 group_send_sig_info(t->pdeath_signal, 713 SEND_SIG_NOINFO, t, 714 PIDTYPE_TGID); 715 } 716 /* 717 * If this is a threaded reparent there is no need to 718 * notify anyone anything has happened. 719 */ 720 if (!same_thread_group(reaper, father)) 721 reparent_leader(father, p, dead); 722 } 723 list_splice_tail_init(&father->children, &reaper->children); 724 } 725 726 /* 727 * Send signals to all our closest relatives so that they know 728 * to properly mourn us.. 729 */ 730 static void exit_notify(struct task_struct *tsk, int group_dead) 731 { 732 bool autoreap; 733 struct task_struct *p, *n; 734 LIST_HEAD(dead); 735 736 write_lock_irq(&tasklist_lock); 737 forget_original_parent(tsk, &dead); 738 739 if (group_dead) 740 kill_orphaned_pgrp(tsk->group_leader, NULL); 741 742 tsk->exit_state = EXIT_ZOMBIE; 743 /* 744 * sub-thread or delay_group_leader(), wake up the 745 * PIDFD_THREAD waiters. 746 */ 747 if (!thread_group_empty(tsk)) 748 do_notify_pidfd(tsk); 749 750 if (unlikely(tsk->ptrace)) { 751 int sig = thread_group_leader(tsk) && 752 thread_group_empty(tsk) && 753 !ptrace_reparented(tsk) ? 754 tsk->exit_signal : SIGCHLD; 755 autoreap = do_notify_parent(tsk, sig); 756 } else if (thread_group_leader(tsk)) { 757 autoreap = thread_group_empty(tsk) && 758 do_notify_parent(tsk, tsk->exit_signal); 759 } else { 760 autoreap = true; 761 } 762 763 if (autoreap) { 764 tsk->exit_state = EXIT_DEAD; 765 list_add(&tsk->ptrace_entry, &dead); 766 } 767 768 /* mt-exec, de_thread() is waiting for group leader */ 769 if (unlikely(tsk->signal->notify_count < 0)) 770 wake_up_process(tsk->signal->group_exec_task); 771 write_unlock_irq(&tasklist_lock); 772 773 list_for_each_entry_safe(p, n, &dead, ptrace_entry) { 774 list_del_init(&p->ptrace_entry); 775 release_task(p); 776 } 777 } 778 779 #ifdef CONFIG_DEBUG_STACK_USAGE 780 unsigned long stack_not_used(struct task_struct *p) 781 { 782 unsigned long *n = end_of_stack(p); 783 784 do { /* Skip over canary */ 785 # ifdef CONFIG_STACK_GROWSUP 786 n--; 787 # else 788 n++; 789 # endif 790 } while (!*n); 791 792 # ifdef CONFIG_STACK_GROWSUP 793 return (unsigned long)end_of_stack(p) - (unsigned long)n; 794 # else 795 return (unsigned long)n - (unsigned long)end_of_stack(p); 796 # endif 797 } 798 799 /* Count the maximum pages reached in kernel stacks */ 800 static inline void kstack_histogram(unsigned long used_stack) 801 { 802 #ifdef CONFIG_VM_EVENT_COUNTERS 803 if (used_stack <= 1024) 804 count_vm_event(KSTACK_1K); 805 #if THREAD_SIZE > 1024 806 else if (used_stack <= 2048) 807 count_vm_event(KSTACK_2K); 808 #endif 809 #if THREAD_SIZE > 2048 810 else if (used_stack <= 4096) 811 count_vm_event(KSTACK_4K); 812 #endif 813 #if THREAD_SIZE > 4096 814 else if (used_stack <= 8192) 815 count_vm_event(KSTACK_8K); 816 #endif 817 #if THREAD_SIZE > 8192 818 else if (used_stack <= 16384) 819 count_vm_event(KSTACK_16K); 820 #endif 821 #if THREAD_SIZE > 16384 822 else if (used_stack <= 32768) 823 count_vm_event(KSTACK_32K); 824 #endif 825 #if THREAD_SIZE > 32768 826 else if (used_stack <= 65536) 827 count_vm_event(KSTACK_64K); 828 #endif 829 #if THREAD_SIZE > 65536 830 else 831 count_vm_event(KSTACK_REST); 832 #endif 833 #endif /* CONFIG_VM_EVENT_COUNTERS */ 834 } 835 836 static void check_stack_usage(void) 837 { 838 static DEFINE_SPINLOCK(low_water_lock); 839 static int lowest_to_date = THREAD_SIZE; 840 unsigned long free; 841 842 free = stack_not_used(current); 843 kstack_histogram(THREAD_SIZE - free); 844 845 if (free >= lowest_to_date) 846 return; 847 848 spin_lock(&low_water_lock); 849 if (free < lowest_to_date) { 850 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n", 851 current->comm, task_pid_nr(current), free); 852 lowest_to_date = free; 853 } 854 spin_unlock(&low_water_lock); 855 } 856 #else 857 static inline void check_stack_usage(void) {} 858 #endif 859 860 static void synchronize_group_exit(struct task_struct *tsk, long code) 861 { 862 struct sighand_struct *sighand = tsk->sighand; 863 struct signal_struct *signal = tsk->signal; 864 865 spin_lock_irq(&sighand->siglock); 866 signal->quick_threads--; 867 if ((signal->quick_threads == 0) && 868 !(signal->flags & SIGNAL_GROUP_EXIT)) { 869 signal->flags = SIGNAL_GROUP_EXIT; 870 signal->group_exit_code = code; 871 signal->group_stop_count = 0; 872 } 873 spin_unlock_irq(&sighand->siglock); 874 } 875 876 void __noreturn do_exit(long code) 877 { 878 struct task_struct *tsk = current; 879 int group_dead; 880 881 WARN_ON(irqs_disabled()); 882 883 synchronize_group_exit(tsk, code); 884 885 WARN_ON(tsk->plug); 886 887 kcov_task_exit(tsk); 888 kmsan_task_exit(tsk); 889 890 coredump_task_exit(tsk); 891 ptrace_event(PTRACE_EVENT_EXIT, code); 892 user_events_exit(tsk); 893 894 io_uring_files_cancel(); 895 exit_signals(tsk); /* sets PF_EXITING */ 896 897 seccomp_filter_release(tsk); 898 899 acct_update_integrals(tsk); 900 group_dead = atomic_dec_and_test(&tsk->signal->live); 901 if (group_dead) { 902 /* 903 * If the last thread of global init has exited, panic 904 * immediately to get a useable coredump. 905 */ 906 if (unlikely(is_global_init(tsk))) 907 panic("Attempted to kill init! exitcode=0x%08x\n", 908 tsk->signal->group_exit_code ?: (int)code); 909 910 #ifdef CONFIG_POSIX_TIMERS 911 hrtimer_cancel(&tsk->signal->real_timer); 912 exit_itimers(tsk); 913 #endif 914 if (tsk->mm) 915 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); 916 } 917 acct_collect(code, group_dead); 918 if (group_dead) 919 tty_audit_exit(); 920 audit_free(tsk); 921 922 tsk->exit_code = code; 923 taskstats_exit(tsk, group_dead); 924 925 exit_mm(); 926 927 if (group_dead) 928 acct_process(); 929 trace_sched_process_exit(tsk); 930 931 exit_sem(tsk); 932 exit_shm(tsk); 933 exit_files(tsk); 934 exit_fs(tsk); 935 if (group_dead) 936 disassociate_ctty(1); 937 exit_task_namespaces(tsk); 938 exit_task_work(tsk); 939 exit_thread(tsk); 940 941 /* 942 * Flush inherited counters to the parent - before the parent 943 * gets woken up by child-exit notifications. 944 * 945 * because of cgroup mode, must be called before cgroup_exit() 946 */ 947 perf_event_exit_task(tsk); 948 949 sched_autogroup_exit_task(tsk); 950 cgroup_exit(tsk); 951 952 /* 953 * FIXME: do that only when needed, using sched_exit tracepoint 954 */ 955 flush_ptrace_hw_breakpoint(tsk); 956 957 exit_tasks_rcu_start(); 958 exit_notify(tsk, group_dead); 959 proc_exit_connector(tsk); 960 mpol_put_task_policy(tsk); 961 #ifdef CONFIG_FUTEX 962 if (unlikely(current->pi_state_cache)) 963 kfree(current->pi_state_cache); 964 #endif 965 /* 966 * Make sure we are holding no locks: 967 */ 968 debug_check_no_locks_held(); 969 970 if (tsk->io_context) 971 exit_io_context(tsk); 972 973 if (tsk->splice_pipe) 974 free_pipe_info(tsk->splice_pipe); 975 976 if (tsk->task_frag.page) 977 put_page(tsk->task_frag.page); 978 979 exit_task_stack_account(tsk); 980 981 check_stack_usage(); 982 preempt_disable(); 983 if (tsk->nr_dirtied) 984 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); 985 exit_rcu(); 986 exit_tasks_rcu_finish(); 987 988 lockdep_free_task(tsk); 989 do_task_dead(); 990 } 991 992 void __noreturn make_task_dead(int signr) 993 { 994 /* 995 * Take the task off the cpu after something catastrophic has 996 * happened. 997 * 998 * We can get here from a kernel oops, sometimes with preemption off. 999 * Start by checking for critical errors. 1000 * Then fix up important state like USER_DS and preemption. 1001 * Then do everything else. 1002 */ 1003 struct task_struct *tsk = current; 1004 unsigned int limit; 1005 1006 if (unlikely(in_interrupt())) 1007 panic("Aiee, killing interrupt handler!"); 1008 if (unlikely(!tsk->pid)) 1009 panic("Attempted to kill the idle task!"); 1010 1011 if (unlikely(irqs_disabled())) { 1012 pr_info("note: %s[%d] exited with irqs disabled\n", 1013 current->comm, task_pid_nr(current)); 1014 local_irq_enable(); 1015 } 1016 if (unlikely(in_atomic())) { 1017 pr_info("note: %s[%d] exited with preempt_count %d\n", 1018 current->comm, task_pid_nr(current), 1019 preempt_count()); 1020 preempt_count_set(PREEMPT_ENABLED); 1021 } 1022 1023 /* 1024 * Every time the system oopses, if the oops happens while a reference 1025 * to an object was held, the reference leaks. 1026 * If the oops doesn't also leak memory, repeated oopsing can cause 1027 * reference counters to wrap around (if they're not using refcount_t). 1028 * This means that repeated oopsing can make unexploitable-looking bugs 1029 * exploitable through repeated oopsing. 1030 * To make sure this can't happen, place an upper bound on how often the 1031 * kernel may oops without panic(). 1032 */ 1033 limit = READ_ONCE(oops_limit); 1034 if (atomic_inc_return(&oops_count) >= limit && limit) 1035 panic("Oopsed too often (kernel.oops_limit is %d)", limit); 1036 1037 /* 1038 * We're taking recursive faults here in make_task_dead. Safest is to just 1039 * leave this task alone and wait for reboot. 1040 */ 1041 if (unlikely(tsk->flags & PF_EXITING)) { 1042 pr_alert("Fixing recursive fault but reboot is needed!\n"); 1043 futex_exit_recursive(tsk); 1044 tsk->exit_state = EXIT_DEAD; 1045 refcount_inc(&tsk->rcu_users); 1046 do_task_dead(); 1047 } 1048 1049 do_exit(signr); 1050 } 1051 1052 SYSCALL_DEFINE1(exit, int, error_code) 1053 { 1054 do_exit((error_code&0xff)<<8); 1055 } 1056 1057 /* 1058 * Take down every thread in the group. This is called by fatal signals 1059 * as well as by sys_exit_group (below). 1060 */ 1061 void __noreturn 1062 do_group_exit(int exit_code) 1063 { 1064 struct signal_struct *sig = current->signal; 1065 1066 if (sig->flags & SIGNAL_GROUP_EXIT) 1067 exit_code = sig->group_exit_code; 1068 else if (sig->group_exec_task) 1069 exit_code = 0; 1070 else { 1071 struct sighand_struct *const sighand = current->sighand; 1072 1073 spin_lock_irq(&sighand->siglock); 1074 if (sig->flags & SIGNAL_GROUP_EXIT) 1075 /* Another thread got here before we took the lock. */ 1076 exit_code = sig->group_exit_code; 1077 else if (sig->group_exec_task) 1078 exit_code = 0; 1079 else { 1080 sig->group_exit_code = exit_code; 1081 sig->flags = SIGNAL_GROUP_EXIT; 1082 zap_other_threads(current); 1083 } 1084 spin_unlock_irq(&sighand->siglock); 1085 } 1086 1087 do_exit(exit_code); 1088 /* NOTREACHED */ 1089 } 1090 1091 /* 1092 * this kills every thread in the thread group. Note that any externally 1093 * wait4()-ing process will get the correct exit code - even if this 1094 * thread is not the thread group leader. 1095 */ 1096 SYSCALL_DEFINE1(exit_group, int, error_code) 1097 { 1098 do_group_exit((error_code & 0xff) << 8); 1099 /* NOTREACHED */ 1100 return 0; 1101 } 1102 1103 static int eligible_pid(struct wait_opts *wo, struct task_struct *p) 1104 { 1105 return wo->wo_type == PIDTYPE_MAX || 1106 task_pid_type(p, wo->wo_type) == wo->wo_pid; 1107 } 1108 1109 static int 1110 eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p) 1111 { 1112 if (!eligible_pid(wo, p)) 1113 return 0; 1114 1115 /* 1116 * Wait for all children (clone and not) if __WALL is set or 1117 * if it is traced by us. 1118 */ 1119 if (ptrace || (wo->wo_flags & __WALL)) 1120 return 1; 1121 1122 /* 1123 * Otherwise, wait for clone children *only* if __WCLONE is set; 1124 * otherwise, wait for non-clone children *only*. 1125 * 1126 * Note: a "clone" child here is one that reports to its parent 1127 * using a signal other than SIGCHLD, or a non-leader thread which 1128 * we can only see if it is traced by us. 1129 */ 1130 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) 1131 return 0; 1132 1133 return 1; 1134 } 1135 1136 /* 1137 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold 1138 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1139 * the lock and this task is uninteresting. If we return nonzero, we have 1140 * released the lock and the system call should return. 1141 */ 1142 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) 1143 { 1144 int state, status; 1145 pid_t pid = task_pid_vnr(p); 1146 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1147 struct waitid_info *infop; 1148 1149 if (!likely(wo->wo_flags & WEXITED)) 1150 return 0; 1151 1152 if (unlikely(wo->wo_flags & WNOWAIT)) { 1153 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1154 ? p->signal->group_exit_code : p->exit_code; 1155 get_task_struct(p); 1156 read_unlock(&tasklist_lock); 1157 sched_annotate_sleep(); 1158 if (wo->wo_rusage) 1159 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1160 put_task_struct(p); 1161 goto out_info; 1162 } 1163 /* 1164 * Move the task's state to DEAD/TRACE, only one thread can do this. 1165 */ 1166 state = (ptrace_reparented(p) && thread_group_leader(p)) ? 1167 EXIT_TRACE : EXIT_DEAD; 1168 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) 1169 return 0; 1170 /* 1171 * We own this thread, nobody else can reap it. 1172 */ 1173 read_unlock(&tasklist_lock); 1174 sched_annotate_sleep(); 1175 1176 /* 1177 * Check thread_group_leader() to exclude the traced sub-threads. 1178 */ 1179 if (state == EXIT_DEAD && thread_group_leader(p)) { 1180 struct signal_struct *sig = p->signal; 1181 struct signal_struct *psig = current->signal; 1182 unsigned long maxrss; 1183 u64 tgutime, tgstime; 1184 1185 /* 1186 * The resource counters for the group leader are in its 1187 * own task_struct. Those for dead threads in the group 1188 * are in its signal_struct, as are those for the child 1189 * processes it has previously reaped. All these 1190 * accumulate in the parent's signal_struct c* fields. 1191 * 1192 * We don't bother to take a lock here to protect these 1193 * p->signal fields because the whole thread group is dead 1194 * and nobody can change them. 1195 * 1196 * psig->stats_lock also protects us from our sub-threads 1197 * which can reap other children at the same time. 1198 * 1199 * We use thread_group_cputime_adjusted() to get times for 1200 * the thread group, which consolidates times for all threads 1201 * in the group including the group leader. 1202 */ 1203 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1204 write_seqlock_irq(&psig->stats_lock); 1205 psig->cutime += tgutime + sig->cutime; 1206 psig->cstime += tgstime + sig->cstime; 1207 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; 1208 psig->cmin_flt += 1209 p->min_flt + sig->min_flt + sig->cmin_flt; 1210 psig->cmaj_flt += 1211 p->maj_flt + sig->maj_flt + sig->cmaj_flt; 1212 psig->cnvcsw += 1213 p->nvcsw + sig->nvcsw + sig->cnvcsw; 1214 psig->cnivcsw += 1215 p->nivcsw + sig->nivcsw + sig->cnivcsw; 1216 psig->cinblock += 1217 task_io_get_inblock(p) + 1218 sig->inblock + sig->cinblock; 1219 psig->coublock += 1220 task_io_get_oublock(p) + 1221 sig->oublock + sig->coublock; 1222 maxrss = max(sig->maxrss, sig->cmaxrss); 1223 if (psig->cmaxrss < maxrss) 1224 psig->cmaxrss = maxrss; 1225 task_io_accounting_add(&psig->ioac, &p->ioac); 1226 task_io_accounting_add(&psig->ioac, &sig->ioac); 1227 write_sequnlock_irq(&psig->stats_lock); 1228 } 1229 1230 if (wo->wo_rusage) 1231 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1232 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1233 ? p->signal->group_exit_code : p->exit_code; 1234 wo->wo_stat = status; 1235 1236 if (state == EXIT_TRACE) { 1237 write_lock_irq(&tasklist_lock); 1238 /* We dropped tasklist, ptracer could die and untrace */ 1239 ptrace_unlink(p); 1240 1241 /* If parent wants a zombie, don't release it now */ 1242 state = EXIT_ZOMBIE; 1243 if (do_notify_parent(p, p->exit_signal)) 1244 state = EXIT_DEAD; 1245 p->exit_state = state; 1246 write_unlock_irq(&tasklist_lock); 1247 } 1248 if (state == EXIT_DEAD) 1249 release_task(p); 1250 1251 out_info: 1252 infop = wo->wo_info; 1253 if (infop) { 1254 if ((status & 0x7f) == 0) { 1255 infop->cause = CLD_EXITED; 1256 infop->status = status >> 8; 1257 } else { 1258 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; 1259 infop->status = status & 0x7f; 1260 } 1261 infop->pid = pid; 1262 infop->uid = uid; 1263 } 1264 1265 return pid; 1266 } 1267 1268 static int *task_stopped_code(struct task_struct *p, bool ptrace) 1269 { 1270 if (ptrace) { 1271 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING)) 1272 return &p->exit_code; 1273 } else { 1274 if (p->signal->flags & SIGNAL_STOP_STOPPED) 1275 return &p->signal->group_exit_code; 1276 } 1277 return NULL; 1278 } 1279 1280 /** 1281 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED 1282 * @wo: wait options 1283 * @ptrace: is the wait for ptrace 1284 * @p: task to wait for 1285 * 1286 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. 1287 * 1288 * CONTEXT: 1289 * read_lock(&tasklist_lock), which is released if return value is 1290 * non-zero. Also, grabs and releases @p->sighand->siglock. 1291 * 1292 * RETURNS: 1293 * 0 if wait condition didn't exist and search for other wait conditions 1294 * should continue. Non-zero return, -errno on failure and @p's pid on 1295 * success, implies that tasklist_lock is released and wait condition 1296 * search should terminate. 1297 */ 1298 static int wait_task_stopped(struct wait_opts *wo, 1299 int ptrace, struct task_struct *p) 1300 { 1301 struct waitid_info *infop; 1302 int exit_code, *p_code, why; 1303 uid_t uid = 0; /* unneeded, required by compiler */ 1304 pid_t pid; 1305 1306 /* 1307 * Traditionally we see ptrace'd stopped tasks regardless of options. 1308 */ 1309 if (!ptrace && !(wo->wo_flags & WUNTRACED)) 1310 return 0; 1311 1312 if (!task_stopped_code(p, ptrace)) 1313 return 0; 1314 1315 exit_code = 0; 1316 spin_lock_irq(&p->sighand->siglock); 1317 1318 p_code = task_stopped_code(p, ptrace); 1319 if (unlikely(!p_code)) 1320 goto unlock_sig; 1321 1322 exit_code = *p_code; 1323 if (!exit_code) 1324 goto unlock_sig; 1325 1326 if (!unlikely(wo->wo_flags & WNOWAIT)) 1327 *p_code = 0; 1328 1329 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1330 unlock_sig: 1331 spin_unlock_irq(&p->sighand->siglock); 1332 if (!exit_code) 1333 return 0; 1334 1335 /* 1336 * Now we are pretty sure this task is interesting. 1337 * Make sure it doesn't get reaped out from under us while we 1338 * give up the lock and then examine it below. We don't want to 1339 * keep holding onto the tasklist_lock while we call getrusage and 1340 * possibly take page faults for user memory. 1341 */ 1342 get_task_struct(p); 1343 pid = task_pid_vnr(p); 1344 why = ptrace ? CLD_TRAPPED : CLD_STOPPED; 1345 read_unlock(&tasklist_lock); 1346 sched_annotate_sleep(); 1347 if (wo->wo_rusage) 1348 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1349 put_task_struct(p); 1350 1351 if (likely(!(wo->wo_flags & WNOWAIT))) 1352 wo->wo_stat = (exit_code << 8) | 0x7f; 1353 1354 infop = wo->wo_info; 1355 if (infop) { 1356 infop->cause = why; 1357 infop->status = exit_code; 1358 infop->pid = pid; 1359 infop->uid = uid; 1360 } 1361 return pid; 1362 } 1363 1364 /* 1365 * Handle do_wait work for one task in a live, non-stopped state. 1366 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1367 * the lock and this task is uninteresting. If we return nonzero, we have 1368 * released the lock and the system call should return. 1369 */ 1370 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) 1371 { 1372 struct waitid_info *infop; 1373 pid_t pid; 1374 uid_t uid; 1375 1376 if (!unlikely(wo->wo_flags & WCONTINUED)) 1377 return 0; 1378 1379 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 1380 return 0; 1381 1382 spin_lock_irq(&p->sighand->siglock); 1383 /* Re-check with the lock held. */ 1384 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { 1385 spin_unlock_irq(&p->sighand->siglock); 1386 return 0; 1387 } 1388 if (!unlikely(wo->wo_flags & WNOWAIT)) 1389 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1390 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1391 spin_unlock_irq(&p->sighand->siglock); 1392 1393 pid = task_pid_vnr(p); 1394 get_task_struct(p); 1395 read_unlock(&tasklist_lock); 1396 sched_annotate_sleep(); 1397 if (wo->wo_rusage) 1398 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1399 put_task_struct(p); 1400 1401 infop = wo->wo_info; 1402 if (!infop) { 1403 wo->wo_stat = 0xffff; 1404 } else { 1405 infop->cause = CLD_CONTINUED; 1406 infop->pid = pid; 1407 infop->uid = uid; 1408 infop->status = SIGCONT; 1409 } 1410 return pid; 1411 } 1412 1413 /* 1414 * Consider @p for a wait by @parent. 1415 * 1416 * -ECHILD should be in ->notask_error before the first call. 1417 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1418 * Returns zero if the search for a child should continue; 1419 * then ->notask_error is 0 if @p is an eligible child, 1420 * or still -ECHILD. 1421 */ 1422 static int wait_consider_task(struct wait_opts *wo, int ptrace, 1423 struct task_struct *p) 1424 { 1425 /* 1426 * We can race with wait_task_zombie() from another thread. 1427 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition 1428 * can't confuse the checks below. 1429 */ 1430 int exit_state = READ_ONCE(p->exit_state); 1431 int ret; 1432 1433 if (unlikely(exit_state == EXIT_DEAD)) 1434 return 0; 1435 1436 ret = eligible_child(wo, ptrace, p); 1437 if (!ret) 1438 return ret; 1439 1440 if (unlikely(exit_state == EXIT_TRACE)) { 1441 /* 1442 * ptrace == 0 means we are the natural parent. In this case 1443 * we should clear notask_error, debugger will notify us. 1444 */ 1445 if (likely(!ptrace)) 1446 wo->notask_error = 0; 1447 return 0; 1448 } 1449 1450 if (likely(!ptrace) && unlikely(p->ptrace)) { 1451 /* 1452 * If it is traced by its real parent's group, just pretend 1453 * the caller is ptrace_do_wait() and reap this child if it 1454 * is zombie. 1455 * 1456 * This also hides group stop state from real parent; otherwise 1457 * a single stop can be reported twice as group and ptrace stop. 1458 * If a ptracer wants to distinguish these two events for its 1459 * own children it should create a separate process which takes 1460 * the role of real parent. 1461 */ 1462 if (!ptrace_reparented(p)) 1463 ptrace = 1; 1464 } 1465 1466 /* slay zombie? */ 1467 if (exit_state == EXIT_ZOMBIE) { 1468 /* we don't reap group leaders with subthreads */ 1469 if (!delay_group_leader(p)) { 1470 /* 1471 * A zombie ptracee is only visible to its ptracer. 1472 * Notification and reaping will be cascaded to the 1473 * real parent when the ptracer detaches. 1474 */ 1475 if (unlikely(ptrace) || likely(!p->ptrace)) 1476 return wait_task_zombie(wo, p); 1477 } 1478 1479 /* 1480 * Allow access to stopped/continued state via zombie by 1481 * falling through. Clearing of notask_error is complex. 1482 * 1483 * When !@ptrace: 1484 * 1485 * If WEXITED is set, notask_error should naturally be 1486 * cleared. If not, subset of WSTOPPED|WCONTINUED is set, 1487 * so, if there are live subthreads, there are events to 1488 * wait for. If all subthreads are dead, it's still safe 1489 * to clear - this function will be called again in finite 1490 * amount time once all the subthreads are released and 1491 * will then return without clearing. 1492 * 1493 * When @ptrace: 1494 * 1495 * Stopped state is per-task and thus can't change once the 1496 * target task dies. Only continued and exited can happen. 1497 * Clear notask_error if WCONTINUED | WEXITED. 1498 */ 1499 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) 1500 wo->notask_error = 0; 1501 } else { 1502 /* 1503 * @p is alive and it's gonna stop, continue or exit, so 1504 * there always is something to wait for. 1505 */ 1506 wo->notask_error = 0; 1507 } 1508 1509 /* 1510 * Wait for stopped. Depending on @ptrace, different stopped state 1511 * is used and the two don't interact with each other. 1512 */ 1513 ret = wait_task_stopped(wo, ptrace, p); 1514 if (ret) 1515 return ret; 1516 1517 /* 1518 * Wait for continued. There's only one continued state and the 1519 * ptracer can consume it which can confuse the real parent. Don't 1520 * use WCONTINUED from ptracer. You don't need or want it. 1521 */ 1522 return wait_task_continued(wo, p); 1523 } 1524 1525 /* 1526 * Do the work of do_wait() for one thread in the group, @tsk. 1527 * 1528 * -ECHILD should be in ->notask_error before the first call. 1529 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1530 * Returns zero if the search for a child should continue; then 1531 * ->notask_error is 0 if there were any eligible children, 1532 * or still -ECHILD. 1533 */ 1534 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) 1535 { 1536 struct task_struct *p; 1537 1538 list_for_each_entry(p, &tsk->children, sibling) { 1539 int ret = wait_consider_task(wo, 0, p); 1540 1541 if (ret) 1542 return ret; 1543 } 1544 1545 return 0; 1546 } 1547 1548 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) 1549 { 1550 struct task_struct *p; 1551 1552 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { 1553 int ret = wait_consider_task(wo, 1, p); 1554 1555 if (ret) 1556 return ret; 1557 } 1558 1559 return 0; 1560 } 1561 1562 bool pid_child_should_wake(struct wait_opts *wo, struct task_struct *p) 1563 { 1564 if (!eligible_pid(wo, p)) 1565 return false; 1566 1567 if ((wo->wo_flags & __WNOTHREAD) && wo->child_wait.private != p->parent) 1568 return false; 1569 1570 return true; 1571 } 1572 1573 static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode, 1574 int sync, void *key) 1575 { 1576 struct wait_opts *wo = container_of(wait, struct wait_opts, 1577 child_wait); 1578 struct task_struct *p = key; 1579 1580 if (pid_child_should_wake(wo, p)) 1581 return default_wake_function(wait, mode, sync, key); 1582 1583 return 0; 1584 } 1585 1586 void __wake_up_parent(struct task_struct *p, struct task_struct *parent) 1587 { 1588 __wake_up_sync_key(&parent->signal->wait_chldexit, 1589 TASK_INTERRUPTIBLE, p); 1590 } 1591 1592 static bool is_effectively_child(struct wait_opts *wo, bool ptrace, 1593 struct task_struct *target) 1594 { 1595 struct task_struct *parent = 1596 !ptrace ? target->real_parent : target->parent; 1597 1598 return current == parent || (!(wo->wo_flags & __WNOTHREAD) && 1599 same_thread_group(current, parent)); 1600 } 1601 1602 /* 1603 * Optimization for waiting on PIDTYPE_PID. No need to iterate through child 1604 * and tracee lists to find the target task. 1605 */ 1606 static int do_wait_pid(struct wait_opts *wo) 1607 { 1608 bool ptrace; 1609 struct task_struct *target; 1610 int retval; 1611 1612 ptrace = false; 1613 target = pid_task(wo->wo_pid, PIDTYPE_TGID); 1614 if (target && is_effectively_child(wo, ptrace, target)) { 1615 retval = wait_consider_task(wo, ptrace, target); 1616 if (retval) 1617 return retval; 1618 } 1619 1620 ptrace = true; 1621 target = pid_task(wo->wo_pid, PIDTYPE_PID); 1622 if (target && target->ptrace && 1623 is_effectively_child(wo, ptrace, target)) { 1624 retval = wait_consider_task(wo, ptrace, target); 1625 if (retval) 1626 return retval; 1627 } 1628 1629 return 0; 1630 } 1631 1632 long __do_wait(struct wait_opts *wo) 1633 { 1634 long retval; 1635 1636 /* 1637 * If there is nothing that can match our criteria, just get out. 1638 * We will clear ->notask_error to zero if we see any child that 1639 * might later match our criteria, even if we are not able to reap 1640 * it yet. 1641 */ 1642 wo->notask_error = -ECHILD; 1643 if ((wo->wo_type < PIDTYPE_MAX) && 1644 (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type))) 1645 goto notask; 1646 1647 read_lock(&tasklist_lock); 1648 1649 if (wo->wo_type == PIDTYPE_PID) { 1650 retval = do_wait_pid(wo); 1651 if (retval) 1652 return retval; 1653 } else { 1654 struct task_struct *tsk = current; 1655 1656 do { 1657 retval = do_wait_thread(wo, tsk); 1658 if (retval) 1659 return retval; 1660 1661 retval = ptrace_do_wait(wo, tsk); 1662 if (retval) 1663 return retval; 1664 1665 if (wo->wo_flags & __WNOTHREAD) 1666 break; 1667 } while_each_thread(current, tsk); 1668 } 1669 read_unlock(&tasklist_lock); 1670 1671 notask: 1672 retval = wo->notask_error; 1673 if (!retval && !(wo->wo_flags & WNOHANG)) 1674 return -ERESTARTSYS; 1675 1676 return retval; 1677 } 1678 1679 static long do_wait(struct wait_opts *wo) 1680 { 1681 int retval; 1682 1683 trace_sched_process_wait(wo->wo_pid); 1684 1685 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); 1686 wo->child_wait.private = current; 1687 add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1688 1689 do { 1690 set_current_state(TASK_INTERRUPTIBLE); 1691 retval = __do_wait(wo); 1692 if (retval != -ERESTARTSYS) 1693 break; 1694 if (signal_pending(current)) 1695 break; 1696 schedule(); 1697 } while (1); 1698 1699 __set_current_state(TASK_RUNNING); 1700 remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1701 return retval; 1702 } 1703 1704 int kernel_waitid_prepare(struct wait_opts *wo, int which, pid_t upid, 1705 struct waitid_info *infop, int options, 1706 struct rusage *ru) 1707 { 1708 unsigned int f_flags = 0; 1709 struct pid *pid = NULL; 1710 enum pid_type type; 1711 1712 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED| 1713 __WNOTHREAD|__WCLONE|__WALL)) 1714 return -EINVAL; 1715 if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) 1716 return -EINVAL; 1717 1718 switch (which) { 1719 case P_ALL: 1720 type = PIDTYPE_MAX; 1721 break; 1722 case P_PID: 1723 type = PIDTYPE_PID; 1724 if (upid <= 0) 1725 return -EINVAL; 1726 1727 pid = find_get_pid(upid); 1728 break; 1729 case P_PGID: 1730 type = PIDTYPE_PGID; 1731 if (upid < 0) 1732 return -EINVAL; 1733 1734 if (upid) 1735 pid = find_get_pid(upid); 1736 else 1737 pid = get_task_pid(current, PIDTYPE_PGID); 1738 break; 1739 case P_PIDFD: 1740 type = PIDTYPE_PID; 1741 if (upid < 0) 1742 return -EINVAL; 1743 1744 pid = pidfd_get_pid(upid, &f_flags); 1745 if (IS_ERR(pid)) 1746 return PTR_ERR(pid); 1747 1748 break; 1749 default: 1750 return -EINVAL; 1751 } 1752 1753 wo->wo_type = type; 1754 wo->wo_pid = pid; 1755 wo->wo_flags = options; 1756 wo->wo_info = infop; 1757 wo->wo_rusage = ru; 1758 if (f_flags & O_NONBLOCK) 1759 wo->wo_flags |= WNOHANG; 1760 1761 return 0; 1762 } 1763 1764 static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop, 1765 int options, struct rusage *ru) 1766 { 1767 struct wait_opts wo; 1768 long ret; 1769 1770 ret = kernel_waitid_prepare(&wo, which, upid, infop, options, ru); 1771 if (ret) 1772 return ret; 1773 1774 ret = do_wait(&wo); 1775 if (!ret && !(options & WNOHANG) && (wo.wo_flags & WNOHANG)) 1776 ret = -EAGAIN; 1777 1778 put_pid(wo.wo_pid); 1779 return ret; 1780 } 1781 1782 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, 1783 infop, int, options, struct rusage __user *, ru) 1784 { 1785 struct rusage r; 1786 struct waitid_info info = {.status = 0}; 1787 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL); 1788 int signo = 0; 1789 1790 if (err > 0) { 1791 signo = SIGCHLD; 1792 err = 0; 1793 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1794 return -EFAULT; 1795 } 1796 if (!infop) 1797 return err; 1798 1799 if (!user_write_access_begin(infop, sizeof(*infop))) 1800 return -EFAULT; 1801 1802 unsafe_put_user(signo, &infop->si_signo, Efault); 1803 unsafe_put_user(0, &infop->si_errno, Efault); 1804 unsafe_put_user(info.cause, &infop->si_code, Efault); 1805 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1806 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1807 unsafe_put_user(info.status, &infop->si_status, Efault); 1808 user_write_access_end(); 1809 return err; 1810 Efault: 1811 user_write_access_end(); 1812 return -EFAULT; 1813 } 1814 1815 long kernel_wait4(pid_t upid, int __user *stat_addr, int options, 1816 struct rusage *ru) 1817 { 1818 struct wait_opts wo; 1819 struct pid *pid = NULL; 1820 enum pid_type type; 1821 long ret; 1822 1823 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| 1824 __WNOTHREAD|__WCLONE|__WALL)) 1825 return -EINVAL; 1826 1827 /* -INT_MIN is not defined */ 1828 if (upid == INT_MIN) 1829 return -ESRCH; 1830 1831 if (upid == -1) 1832 type = PIDTYPE_MAX; 1833 else if (upid < 0) { 1834 type = PIDTYPE_PGID; 1835 pid = find_get_pid(-upid); 1836 } else if (upid == 0) { 1837 type = PIDTYPE_PGID; 1838 pid = get_task_pid(current, PIDTYPE_PGID); 1839 } else /* upid > 0 */ { 1840 type = PIDTYPE_PID; 1841 pid = find_get_pid(upid); 1842 } 1843 1844 wo.wo_type = type; 1845 wo.wo_pid = pid; 1846 wo.wo_flags = options | WEXITED; 1847 wo.wo_info = NULL; 1848 wo.wo_stat = 0; 1849 wo.wo_rusage = ru; 1850 ret = do_wait(&wo); 1851 put_pid(pid); 1852 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr)) 1853 ret = -EFAULT; 1854 1855 return ret; 1856 } 1857 1858 int kernel_wait(pid_t pid, int *stat) 1859 { 1860 struct wait_opts wo = { 1861 .wo_type = PIDTYPE_PID, 1862 .wo_pid = find_get_pid(pid), 1863 .wo_flags = WEXITED, 1864 }; 1865 int ret; 1866 1867 ret = do_wait(&wo); 1868 if (ret > 0 && wo.wo_stat) 1869 *stat = wo.wo_stat; 1870 put_pid(wo.wo_pid); 1871 return ret; 1872 } 1873 1874 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, 1875 int, options, struct rusage __user *, ru) 1876 { 1877 struct rusage r; 1878 long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL); 1879 1880 if (err > 0) { 1881 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1882 return -EFAULT; 1883 } 1884 return err; 1885 } 1886 1887 #ifdef __ARCH_WANT_SYS_WAITPID 1888 1889 /* 1890 * sys_waitpid() remains for compatibility. waitpid() should be 1891 * implemented by calling sys_wait4() from libc.a. 1892 */ 1893 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) 1894 { 1895 return kernel_wait4(pid, stat_addr, options, NULL); 1896 } 1897 1898 #endif 1899 1900 #ifdef CONFIG_COMPAT 1901 COMPAT_SYSCALL_DEFINE4(wait4, 1902 compat_pid_t, pid, 1903 compat_uint_t __user *, stat_addr, 1904 int, options, 1905 struct compat_rusage __user *, ru) 1906 { 1907 struct rusage r; 1908 long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL); 1909 if (err > 0) { 1910 if (ru && put_compat_rusage(&r, ru)) 1911 return -EFAULT; 1912 } 1913 return err; 1914 } 1915 1916 COMPAT_SYSCALL_DEFINE5(waitid, 1917 int, which, compat_pid_t, pid, 1918 struct compat_siginfo __user *, infop, int, options, 1919 struct compat_rusage __user *, uru) 1920 { 1921 struct rusage ru; 1922 struct waitid_info info = {.status = 0}; 1923 long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL); 1924 int signo = 0; 1925 if (err > 0) { 1926 signo = SIGCHLD; 1927 err = 0; 1928 if (uru) { 1929 /* kernel_waitid() overwrites everything in ru */ 1930 if (COMPAT_USE_64BIT_TIME) 1931 err = copy_to_user(uru, &ru, sizeof(ru)); 1932 else 1933 err = put_compat_rusage(&ru, uru); 1934 if (err) 1935 return -EFAULT; 1936 } 1937 } 1938 1939 if (!infop) 1940 return err; 1941 1942 if (!user_write_access_begin(infop, sizeof(*infop))) 1943 return -EFAULT; 1944 1945 unsafe_put_user(signo, &infop->si_signo, Efault); 1946 unsafe_put_user(0, &infop->si_errno, Efault); 1947 unsafe_put_user(info.cause, &infop->si_code, Efault); 1948 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1949 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1950 unsafe_put_user(info.status, &infop->si_status, Efault); 1951 user_write_access_end(); 1952 return err; 1953 Efault: 1954 user_write_access_end(); 1955 return -EFAULT; 1956 } 1957 #endif 1958 1959 /* 1960 * This needs to be __function_aligned as GCC implicitly makes any 1961 * implementation of abort() cold and drops alignment specified by 1962 * -falign-functions=N. 1963 * 1964 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c11 1965 */ 1966 __weak __function_aligned void abort(void) 1967 { 1968 BUG(); 1969 1970 /* if that doesn't kill us, halt */ 1971 panic("Oops failed to kill thread"); 1972 } 1973 EXPORT_SYMBOL(abort); 1974