1 /* 2 * linux/kernel/exit.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 #include <linux/config.h> 8 #include <linux/mm.h> 9 #include <linux/slab.h> 10 #include <linux/interrupt.h> 11 #include <linux/smp_lock.h> 12 #include <linux/module.h> 13 #include <linux/capability.h> 14 #include <linux/completion.h> 15 #include <linux/personality.h> 16 #include <linux/tty.h> 17 #include <linux/namespace.h> 18 #include <linux/key.h> 19 #include <linux/security.h> 20 #include <linux/cpu.h> 21 #include <linux/acct.h> 22 #include <linux/file.h> 23 #include <linux/binfmts.h> 24 #include <linux/ptrace.h> 25 #include <linux/profile.h> 26 #include <linux/mount.h> 27 #include <linux/proc_fs.h> 28 #include <linux/mempolicy.h> 29 #include <linux/cpuset.h> 30 #include <linux/syscalls.h> 31 #include <linux/signal.h> 32 #include <linux/posix-timers.h> 33 #include <linux/cn_proc.h> 34 #include <linux/mutex.h> 35 #include <linux/futex.h> 36 #include <linux/compat.h> 37 #include <linux/pipe_fs_i.h> 38 39 #include <asm/uaccess.h> 40 #include <asm/unistd.h> 41 #include <asm/pgtable.h> 42 #include <asm/mmu_context.h> 43 44 extern void sem_exit (void); 45 extern struct task_struct *child_reaper; 46 47 int getrusage(struct task_struct *, int, struct rusage __user *); 48 49 static void exit_mm(struct task_struct * tsk); 50 51 static void __unhash_process(struct task_struct *p) 52 { 53 nr_threads--; 54 detach_pid(p, PIDTYPE_PID); 55 if (thread_group_leader(p)) { 56 detach_pid(p, PIDTYPE_PGID); 57 detach_pid(p, PIDTYPE_SID); 58 59 list_del_rcu(&p->tasks); 60 __get_cpu_var(process_counts)--; 61 } 62 list_del_rcu(&p->thread_group); 63 remove_parent(p); 64 } 65 66 /* 67 * This function expects the tasklist_lock write-locked. 68 */ 69 static void __exit_signal(struct task_struct *tsk) 70 { 71 struct signal_struct *sig = tsk->signal; 72 struct sighand_struct *sighand; 73 74 BUG_ON(!sig); 75 BUG_ON(!atomic_read(&sig->count)); 76 77 rcu_read_lock(); 78 sighand = rcu_dereference(tsk->sighand); 79 spin_lock(&sighand->siglock); 80 81 posix_cpu_timers_exit(tsk); 82 if (atomic_dec_and_test(&sig->count)) 83 posix_cpu_timers_exit_group(tsk); 84 else { 85 /* 86 * If there is any task waiting for the group exit 87 * then notify it: 88 */ 89 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { 90 wake_up_process(sig->group_exit_task); 91 sig->group_exit_task = NULL; 92 } 93 if (tsk == sig->curr_target) 94 sig->curr_target = next_thread(tsk); 95 /* 96 * Accumulate here the counters for all threads but the 97 * group leader as they die, so they can be added into 98 * the process-wide totals when those are taken. 99 * The group leader stays around as a zombie as long 100 * as there are other threads. When it gets reaped, 101 * the exit.c code will add its counts into these totals. 102 * We won't ever get here for the group leader, since it 103 * will have been the last reference on the signal_struct. 104 */ 105 sig->utime = cputime_add(sig->utime, tsk->utime); 106 sig->stime = cputime_add(sig->stime, tsk->stime); 107 sig->min_flt += tsk->min_flt; 108 sig->maj_flt += tsk->maj_flt; 109 sig->nvcsw += tsk->nvcsw; 110 sig->nivcsw += tsk->nivcsw; 111 sig->sched_time += tsk->sched_time; 112 sig = NULL; /* Marker for below. */ 113 } 114 115 __unhash_process(tsk); 116 117 tsk->signal = NULL; 118 tsk->sighand = NULL; 119 spin_unlock(&sighand->siglock); 120 rcu_read_unlock(); 121 122 __cleanup_sighand(sighand); 123 clear_tsk_thread_flag(tsk,TIF_SIGPENDING); 124 flush_sigqueue(&tsk->pending); 125 if (sig) { 126 flush_sigqueue(&sig->shared_pending); 127 __cleanup_signal(sig); 128 } 129 } 130 131 static void delayed_put_task_struct(struct rcu_head *rhp) 132 { 133 put_task_struct(container_of(rhp, struct task_struct, rcu)); 134 } 135 136 void release_task(struct task_struct * p) 137 { 138 int zap_leader; 139 task_t *leader; 140 struct dentry *proc_dentry; 141 142 repeat: 143 atomic_dec(&p->user->processes); 144 spin_lock(&p->proc_lock); 145 proc_dentry = proc_pid_unhash(p); 146 write_lock_irq(&tasklist_lock); 147 ptrace_unlink(p); 148 BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); 149 __exit_signal(p); 150 151 /* 152 * If we are the last non-leader member of the thread 153 * group, and the leader is zombie, then notify the 154 * group leader's parent process. (if it wants notification.) 155 */ 156 zap_leader = 0; 157 leader = p->group_leader; 158 if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { 159 BUG_ON(leader->exit_signal == -1); 160 do_notify_parent(leader, leader->exit_signal); 161 /* 162 * If we were the last child thread and the leader has 163 * exited already, and the leader's parent ignores SIGCHLD, 164 * then we are the one who should release the leader. 165 * 166 * do_notify_parent() will have marked it self-reaping in 167 * that case. 168 */ 169 zap_leader = (leader->exit_signal == -1); 170 } 171 172 sched_exit(p); 173 write_unlock_irq(&tasklist_lock); 174 spin_unlock(&p->proc_lock); 175 proc_pid_flush(proc_dentry); 176 release_thread(p); 177 call_rcu(&p->rcu, delayed_put_task_struct); 178 179 p = leader; 180 if (unlikely(zap_leader)) 181 goto repeat; 182 } 183 184 /* 185 * This checks not only the pgrp, but falls back on the pid if no 186 * satisfactory pgrp is found. I dunno - gdb doesn't work correctly 187 * without this... 188 */ 189 int session_of_pgrp(int pgrp) 190 { 191 struct task_struct *p; 192 int sid = -1; 193 194 read_lock(&tasklist_lock); 195 do_each_task_pid(pgrp, PIDTYPE_PGID, p) { 196 if (p->signal->session > 0) { 197 sid = p->signal->session; 198 goto out; 199 } 200 } while_each_task_pid(pgrp, PIDTYPE_PGID, p); 201 p = find_task_by_pid(pgrp); 202 if (p) 203 sid = p->signal->session; 204 out: 205 read_unlock(&tasklist_lock); 206 207 return sid; 208 } 209 210 /* 211 * Determine if a process group is "orphaned", according to the POSIX 212 * definition in 2.2.2.52. Orphaned process groups are not to be affected 213 * by terminal-generated stop signals. Newly orphaned process groups are 214 * to receive a SIGHUP and a SIGCONT. 215 * 216 * "I ask you, have you ever known what it is to be an orphan?" 217 */ 218 static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task) 219 { 220 struct task_struct *p; 221 int ret = 1; 222 223 do_each_task_pid(pgrp, PIDTYPE_PGID, p) { 224 if (p == ignored_task 225 || p->exit_state 226 || p->real_parent->pid == 1) 227 continue; 228 if (process_group(p->real_parent) != pgrp 229 && p->real_parent->signal->session == p->signal->session) { 230 ret = 0; 231 break; 232 } 233 } while_each_task_pid(pgrp, PIDTYPE_PGID, p); 234 return ret; /* (sighing) "Often!" */ 235 } 236 237 int is_orphaned_pgrp(int pgrp) 238 { 239 int retval; 240 241 read_lock(&tasklist_lock); 242 retval = will_become_orphaned_pgrp(pgrp, NULL); 243 read_unlock(&tasklist_lock); 244 245 return retval; 246 } 247 248 static int has_stopped_jobs(int pgrp) 249 { 250 int retval = 0; 251 struct task_struct *p; 252 253 do_each_task_pid(pgrp, PIDTYPE_PGID, p) { 254 if (p->state != TASK_STOPPED) 255 continue; 256 257 /* If p is stopped by a debugger on a signal that won't 258 stop it, then don't count p as stopped. This isn't 259 perfect but it's a good approximation. */ 260 if (unlikely (p->ptrace) 261 && p->exit_code != SIGSTOP 262 && p->exit_code != SIGTSTP 263 && p->exit_code != SIGTTOU 264 && p->exit_code != SIGTTIN) 265 continue; 266 267 retval = 1; 268 break; 269 } while_each_task_pid(pgrp, PIDTYPE_PGID, p); 270 return retval; 271 } 272 273 /** 274 * reparent_to_init - Reparent the calling kernel thread to the init task. 275 * 276 * If a kernel thread is launched as a result of a system call, or if 277 * it ever exits, it should generally reparent itself to init so that 278 * it is correctly cleaned up on exit. 279 * 280 * The various task state such as scheduling policy and priority may have 281 * been inherited from a user process, so we reset them to sane values here. 282 * 283 * NOTE that reparent_to_init() gives the caller full capabilities. 284 */ 285 static void reparent_to_init(void) 286 { 287 write_lock_irq(&tasklist_lock); 288 289 ptrace_unlink(current); 290 /* Reparent to init */ 291 remove_parent(current); 292 current->parent = child_reaper; 293 current->real_parent = child_reaper; 294 add_parent(current); 295 296 /* Set the exit signal to SIGCHLD so we signal init on exit */ 297 current->exit_signal = SIGCHLD; 298 299 if ((current->policy == SCHED_NORMAL || 300 current->policy == SCHED_BATCH) 301 && (task_nice(current) < 0)) 302 set_user_nice(current, 0); 303 /* cpus_allowed? */ 304 /* rt_priority? */ 305 /* signals? */ 306 security_task_reparent_to_init(current); 307 memcpy(current->signal->rlim, init_task.signal->rlim, 308 sizeof(current->signal->rlim)); 309 atomic_inc(&(INIT_USER->__count)); 310 write_unlock_irq(&tasklist_lock); 311 switch_uid(INIT_USER); 312 } 313 314 void __set_special_pids(pid_t session, pid_t pgrp) 315 { 316 struct task_struct *curr = current->group_leader; 317 318 if (curr->signal->session != session) { 319 detach_pid(curr, PIDTYPE_SID); 320 curr->signal->session = session; 321 attach_pid(curr, PIDTYPE_SID, session); 322 } 323 if (process_group(curr) != pgrp) { 324 detach_pid(curr, PIDTYPE_PGID); 325 curr->signal->pgrp = pgrp; 326 attach_pid(curr, PIDTYPE_PGID, pgrp); 327 } 328 } 329 330 void set_special_pids(pid_t session, pid_t pgrp) 331 { 332 write_lock_irq(&tasklist_lock); 333 __set_special_pids(session, pgrp); 334 write_unlock_irq(&tasklist_lock); 335 } 336 337 /* 338 * Let kernel threads use this to say that they 339 * allow a certain signal (since daemonize() will 340 * have disabled all of them by default). 341 */ 342 int allow_signal(int sig) 343 { 344 if (!valid_signal(sig) || sig < 1) 345 return -EINVAL; 346 347 spin_lock_irq(¤t->sighand->siglock); 348 sigdelset(¤t->blocked, sig); 349 if (!current->mm) { 350 /* Kernel threads handle their own signals. 351 Let the signal code know it'll be handled, so 352 that they don't get converted to SIGKILL or 353 just silently dropped */ 354 current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; 355 } 356 recalc_sigpending(); 357 spin_unlock_irq(¤t->sighand->siglock); 358 return 0; 359 } 360 361 EXPORT_SYMBOL(allow_signal); 362 363 int disallow_signal(int sig) 364 { 365 if (!valid_signal(sig) || sig < 1) 366 return -EINVAL; 367 368 spin_lock_irq(¤t->sighand->siglock); 369 sigaddset(¤t->blocked, sig); 370 recalc_sigpending(); 371 spin_unlock_irq(¤t->sighand->siglock); 372 return 0; 373 } 374 375 EXPORT_SYMBOL(disallow_signal); 376 377 /* 378 * Put all the gunge required to become a kernel thread without 379 * attached user resources in one place where it belongs. 380 */ 381 382 void daemonize(const char *name, ...) 383 { 384 va_list args; 385 struct fs_struct *fs; 386 sigset_t blocked; 387 388 va_start(args, name); 389 vsnprintf(current->comm, sizeof(current->comm), name, args); 390 va_end(args); 391 392 /* 393 * If we were started as result of loading a module, close all of the 394 * user space pages. We don't need them, and if we didn't close them 395 * they would be locked into memory. 396 */ 397 exit_mm(current); 398 399 set_special_pids(1, 1); 400 mutex_lock(&tty_mutex); 401 current->signal->tty = NULL; 402 mutex_unlock(&tty_mutex); 403 404 /* Block and flush all signals */ 405 sigfillset(&blocked); 406 sigprocmask(SIG_BLOCK, &blocked, NULL); 407 flush_signals(current); 408 409 /* Become as one with the init task */ 410 411 exit_fs(current); /* current->fs->count--; */ 412 fs = init_task.fs; 413 current->fs = fs; 414 atomic_inc(&fs->count); 415 exit_namespace(current); 416 current->namespace = init_task.namespace; 417 get_namespace(current->namespace); 418 exit_files(current); 419 current->files = init_task.files; 420 atomic_inc(¤t->files->count); 421 422 reparent_to_init(); 423 } 424 425 EXPORT_SYMBOL(daemonize); 426 427 static void close_files(struct files_struct * files) 428 { 429 int i, j; 430 struct fdtable *fdt; 431 432 j = 0; 433 434 /* 435 * It is safe to dereference the fd table without RCU or 436 * ->file_lock because this is the last reference to the 437 * files structure. 438 */ 439 fdt = files_fdtable(files); 440 for (;;) { 441 unsigned long set; 442 i = j * __NFDBITS; 443 if (i >= fdt->max_fdset || i >= fdt->max_fds) 444 break; 445 set = fdt->open_fds->fds_bits[j++]; 446 while (set) { 447 if (set & 1) { 448 struct file * file = xchg(&fdt->fd[i], NULL); 449 if (file) 450 filp_close(file, files); 451 } 452 i++; 453 set >>= 1; 454 } 455 } 456 } 457 458 struct files_struct *get_files_struct(struct task_struct *task) 459 { 460 struct files_struct *files; 461 462 task_lock(task); 463 files = task->files; 464 if (files) 465 atomic_inc(&files->count); 466 task_unlock(task); 467 468 return files; 469 } 470 471 void fastcall put_files_struct(struct files_struct *files) 472 { 473 struct fdtable *fdt; 474 475 if (atomic_dec_and_test(&files->count)) { 476 close_files(files); 477 /* 478 * Free the fd and fdset arrays if we expanded them. 479 * If the fdtable was embedded, pass files for freeing 480 * at the end of the RCU grace period. Otherwise, 481 * you can free files immediately. 482 */ 483 fdt = files_fdtable(files); 484 if (fdt == &files->fdtab) 485 fdt->free_files = files; 486 else 487 kmem_cache_free(files_cachep, files); 488 free_fdtable(fdt); 489 } 490 } 491 492 EXPORT_SYMBOL(put_files_struct); 493 494 static inline void __exit_files(struct task_struct *tsk) 495 { 496 struct files_struct * files = tsk->files; 497 498 if (files) { 499 task_lock(tsk); 500 tsk->files = NULL; 501 task_unlock(tsk); 502 put_files_struct(files); 503 } 504 } 505 506 void exit_files(struct task_struct *tsk) 507 { 508 __exit_files(tsk); 509 } 510 511 static inline void __put_fs_struct(struct fs_struct *fs) 512 { 513 /* No need to hold fs->lock if we are killing it */ 514 if (atomic_dec_and_test(&fs->count)) { 515 dput(fs->root); 516 mntput(fs->rootmnt); 517 dput(fs->pwd); 518 mntput(fs->pwdmnt); 519 if (fs->altroot) { 520 dput(fs->altroot); 521 mntput(fs->altrootmnt); 522 } 523 kmem_cache_free(fs_cachep, fs); 524 } 525 } 526 527 void put_fs_struct(struct fs_struct *fs) 528 { 529 __put_fs_struct(fs); 530 } 531 532 static inline void __exit_fs(struct task_struct *tsk) 533 { 534 struct fs_struct * fs = tsk->fs; 535 536 if (fs) { 537 task_lock(tsk); 538 tsk->fs = NULL; 539 task_unlock(tsk); 540 __put_fs_struct(fs); 541 } 542 } 543 544 void exit_fs(struct task_struct *tsk) 545 { 546 __exit_fs(tsk); 547 } 548 549 EXPORT_SYMBOL_GPL(exit_fs); 550 551 /* 552 * Turn us into a lazy TLB process if we 553 * aren't already.. 554 */ 555 static void exit_mm(struct task_struct * tsk) 556 { 557 struct mm_struct *mm = tsk->mm; 558 559 mm_release(tsk, mm); 560 if (!mm) 561 return; 562 /* 563 * Serialize with any possible pending coredump. 564 * We must hold mmap_sem around checking core_waiters 565 * and clearing tsk->mm. The core-inducing thread 566 * will increment core_waiters for each thread in the 567 * group with ->mm != NULL. 568 */ 569 down_read(&mm->mmap_sem); 570 if (mm->core_waiters) { 571 up_read(&mm->mmap_sem); 572 down_write(&mm->mmap_sem); 573 if (!--mm->core_waiters) 574 complete(mm->core_startup_done); 575 up_write(&mm->mmap_sem); 576 577 wait_for_completion(&mm->core_done); 578 down_read(&mm->mmap_sem); 579 } 580 atomic_inc(&mm->mm_count); 581 if (mm != tsk->active_mm) BUG(); 582 /* more a memory barrier than a real lock */ 583 task_lock(tsk); 584 tsk->mm = NULL; 585 up_read(&mm->mmap_sem); 586 enter_lazy_tlb(mm, current); 587 task_unlock(tsk); 588 mmput(mm); 589 } 590 591 static inline void choose_new_parent(task_t *p, task_t *reaper) 592 { 593 /* 594 * Make sure we're not reparenting to ourselves and that 595 * the parent is not a zombie. 596 */ 597 BUG_ON(p == reaper || reaper->exit_state); 598 p->real_parent = reaper; 599 } 600 601 static void reparent_thread(task_t *p, task_t *father, int traced) 602 { 603 /* We don't want people slaying init. */ 604 if (p->exit_signal != -1) 605 p->exit_signal = SIGCHLD; 606 607 if (p->pdeath_signal) 608 /* We already hold the tasklist_lock here. */ 609 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); 610 611 /* Move the child from its dying parent to the new one. */ 612 if (unlikely(traced)) { 613 /* Preserve ptrace links if someone else is tracing this child. */ 614 list_del_init(&p->ptrace_list); 615 if (p->parent != p->real_parent) 616 list_add(&p->ptrace_list, &p->real_parent->ptrace_children); 617 } else { 618 /* If this child is being traced, then we're the one tracing it 619 * anyway, so let go of it. 620 */ 621 p->ptrace = 0; 622 remove_parent(p); 623 p->parent = p->real_parent; 624 add_parent(p); 625 626 /* If we'd notified the old parent about this child's death, 627 * also notify the new parent. 628 */ 629 if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 && 630 thread_group_empty(p)) 631 do_notify_parent(p, p->exit_signal); 632 else if (p->state == TASK_TRACED) { 633 /* 634 * If it was at a trace stop, turn it into 635 * a normal stop since it's no longer being 636 * traced. 637 */ 638 ptrace_untrace(p); 639 } 640 } 641 642 /* 643 * process group orphan check 644 * Case ii: Our child is in a different pgrp 645 * than we are, and it was the only connection 646 * outside, so the child pgrp is now orphaned. 647 */ 648 if ((process_group(p) != process_group(father)) && 649 (p->signal->session == father->signal->session)) { 650 int pgrp = process_group(p); 651 652 if (will_become_orphaned_pgrp(pgrp, NULL) && has_stopped_jobs(pgrp)) { 653 __kill_pg_info(SIGHUP, SEND_SIG_PRIV, pgrp); 654 __kill_pg_info(SIGCONT, SEND_SIG_PRIV, pgrp); 655 } 656 } 657 } 658 659 /* 660 * When we die, we re-parent all our children. 661 * Try to give them to another thread in our thread 662 * group, and if no such member exists, give it to 663 * the global child reaper process (ie "init") 664 */ 665 static void forget_original_parent(struct task_struct * father, 666 struct list_head *to_release) 667 { 668 struct task_struct *p, *reaper = father; 669 struct list_head *_p, *_n; 670 671 do { 672 reaper = next_thread(reaper); 673 if (reaper == father) { 674 reaper = child_reaper; 675 break; 676 } 677 } while (reaper->exit_state); 678 679 /* 680 * There are only two places where our children can be: 681 * 682 * - in our child list 683 * - in our ptraced child list 684 * 685 * Search them and reparent children. 686 */ 687 list_for_each_safe(_p, _n, &father->children) { 688 int ptrace; 689 p = list_entry(_p,struct task_struct,sibling); 690 691 ptrace = p->ptrace; 692 693 /* if father isn't the real parent, then ptrace must be enabled */ 694 BUG_ON(father != p->real_parent && !ptrace); 695 696 if (father == p->real_parent) { 697 /* reparent with a reaper, real father it's us */ 698 choose_new_parent(p, reaper); 699 reparent_thread(p, father, 0); 700 } else { 701 /* reparent ptraced task to its real parent */ 702 __ptrace_unlink (p); 703 if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 && 704 thread_group_empty(p)) 705 do_notify_parent(p, p->exit_signal); 706 } 707 708 /* 709 * if the ptraced child is a zombie with exit_signal == -1 710 * we must collect it before we exit, or it will remain 711 * zombie forever since we prevented it from self-reap itself 712 * while it was being traced by us, to be able to see it in wait4. 713 */ 714 if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1)) 715 list_add(&p->ptrace_list, to_release); 716 } 717 list_for_each_safe(_p, _n, &father->ptrace_children) { 718 p = list_entry(_p,struct task_struct,ptrace_list); 719 choose_new_parent(p, reaper); 720 reparent_thread(p, father, 1); 721 } 722 } 723 724 /* 725 * Send signals to all our closest relatives so that they know 726 * to properly mourn us.. 727 */ 728 static void exit_notify(struct task_struct *tsk) 729 { 730 int state; 731 struct task_struct *t; 732 struct list_head ptrace_dead, *_p, *_n; 733 734 if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT) 735 && !thread_group_empty(tsk)) { 736 /* 737 * This occurs when there was a race between our exit 738 * syscall and a group signal choosing us as the one to 739 * wake up. It could be that we are the only thread 740 * alerted to check for pending signals, but another thread 741 * should be woken now to take the signal since we will not. 742 * Now we'll wake all the threads in the group just to make 743 * sure someone gets all the pending signals. 744 */ 745 read_lock(&tasklist_lock); 746 spin_lock_irq(&tsk->sighand->siglock); 747 for (t = next_thread(tsk); t != tsk; t = next_thread(t)) 748 if (!signal_pending(t) && !(t->flags & PF_EXITING)) { 749 recalc_sigpending_tsk(t); 750 if (signal_pending(t)) 751 signal_wake_up(t, 0); 752 } 753 spin_unlock_irq(&tsk->sighand->siglock); 754 read_unlock(&tasklist_lock); 755 } 756 757 write_lock_irq(&tasklist_lock); 758 759 /* 760 * This does two things: 761 * 762 * A. Make init inherit all the child processes 763 * B. Check to see if any process groups have become orphaned 764 * as a result of our exiting, and if they have any stopped 765 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 766 */ 767 768 INIT_LIST_HEAD(&ptrace_dead); 769 forget_original_parent(tsk, &ptrace_dead); 770 BUG_ON(!list_empty(&tsk->children)); 771 BUG_ON(!list_empty(&tsk->ptrace_children)); 772 773 /* 774 * Check to see if any process groups have become orphaned 775 * as a result of our exiting, and if they have any stopped 776 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 777 * 778 * Case i: Our father is in a different pgrp than we are 779 * and we were the only connection outside, so our pgrp 780 * is about to become orphaned. 781 */ 782 783 t = tsk->real_parent; 784 785 if ((process_group(t) != process_group(tsk)) && 786 (t->signal->session == tsk->signal->session) && 787 will_become_orphaned_pgrp(process_group(tsk), tsk) && 788 has_stopped_jobs(process_group(tsk))) { 789 __kill_pg_info(SIGHUP, SEND_SIG_PRIV, process_group(tsk)); 790 __kill_pg_info(SIGCONT, SEND_SIG_PRIV, process_group(tsk)); 791 } 792 793 /* Let father know we died 794 * 795 * Thread signals are configurable, but you aren't going to use 796 * that to send signals to arbitary processes. 797 * That stops right now. 798 * 799 * If the parent exec id doesn't match the exec id we saved 800 * when we started then we know the parent has changed security 801 * domain. 802 * 803 * If our self_exec id doesn't match our parent_exec_id then 804 * we have changed execution domain as these two values started 805 * the same after a fork. 806 * 807 */ 808 809 if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 && 810 ( tsk->parent_exec_id != t->self_exec_id || 811 tsk->self_exec_id != tsk->parent_exec_id) 812 && !capable(CAP_KILL)) 813 tsk->exit_signal = SIGCHLD; 814 815 816 /* If something other than our normal parent is ptracing us, then 817 * send it a SIGCHLD instead of honoring exit_signal. exit_signal 818 * only has special meaning to our real parent. 819 */ 820 if (tsk->exit_signal != -1 && thread_group_empty(tsk)) { 821 int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD; 822 do_notify_parent(tsk, signal); 823 } else if (tsk->ptrace) { 824 do_notify_parent(tsk, SIGCHLD); 825 } 826 827 state = EXIT_ZOMBIE; 828 if (tsk->exit_signal == -1 && 829 (likely(tsk->ptrace == 0) || 830 unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT))) 831 state = EXIT_DEAD; 832 tsk->exit_state = state; 833 834 write_unlock_irq(&tasklist_lock); 835 836 list_for_each_safe(_p, _n, &ptrace_dead) { 837 list_del_init(_p); 838 t = list_entry(_p,struct task_struct,ptrace_list); 839 release_task(t); 840 } 841 842 /* If the process is dead, release it - nobody will wait for it */ 843 if (state == EXIT_DEAD) 844 release_task(tsk); 845 } 846 847 fastcall NORET_TYPE void do_exit(long code) 848 { 849 struct task_struct *tsk = current; 850 int group_dead; 851 852 profile_task_exit(tsk); 853 854 WARN_ON(atomic_read(&tsk->fs_excl)); 855 856 if (unlikely(in_interrupt())) 857 panic("Aiee, killing interrupt handler!"); 858 if (unlikely(!tsk->pid)) 859 panic("Attempted to kill the idle task!"); 860 if (unlikely(tsk == child_reaper)) 861 panic("Attempted to kill init!"); 862 863 if (unlikely(current->ptrace & PT_TRACE_EXIT)) { 864 current->ptrace_message = code; 865 ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP); 866 } 867 868 /* 869 * We're taking recursive faults here in do_exit. Safest is to just 870 * leave this task alone and wait for reboot. 871 */ 872 if (unlikely(tsk->flags & PF_EXITING)) { 873 printk(KERN_ALERT 874 "Fixing recursive fault but reboot is needed!\n"); 875 if (tsk->io_context) 876 exit_io_context(); 877 set_current_state(TASK_UNINTERRUPTIBLE); 878 schedule(); 879 } 880 881 tsk->flags |= PF_EXITING; 882 883 /* 884 * Make sure we don't try to process any timer firings 885 * while we are already exiting. 886 */ 887 tsk->it_virt_expires = cputime_zero; 888 tsk->it_prof_expires = cputime_zero; 889 tsk->it_sched_expires = 0; 890 891 if (unlikely(in_atomic())) 892 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", 893 current->comm, current->pid, 894 preempt_count()); 895 896 acct_update_integrals(tsk); 897 if (tsk->mm) { 898 update_hiwater_rss(tsk->mm); 899 update_hiwater_vm(tsk->mm); 900 } 901 group_dead = atomic_dec_and_test(&tsk->signal->live); 902 if (group_dead) { 903 hrtimer_cancel(&tsk->signal->real_timer); 904 exit_itimers(tsk->signal); 905 acct_process(code); 906 } 907 if (unlikely(tsk->robust_list)) 908 exit_robust_list(tsk); 909 #ifdef CONFIG_COMPAT 910 if (unlikely(tsk->compat_robust_list)) 911 compat_exit_robust_list(tsk); 912 #endif 913 exit_mm(tsk); 914 915 exit_sem(tsk); 916 __exit_files(tsk); 917 __exit_fs(tsk); 918 exit_namespace(tsk); 919 exit_thread(); 920 cpuset_exit(tsk); 921 exit_keys(tsk); 922 923 if (group_dead && tsk->signal->leader) 924 disassociate_ctty(1); 925 926 module_put(task_thread_info(tsk)->exec_domain->module); 927 if (tsk->binfmt) 928 module_put(tsk->binfmt->module); 929 930 tsk->exit_code = code; 931 proc_exit_connector(tsk); 932 exit_notify(tsk); 933 #ifdef CONFIG_NUMA 934 mpol_free(tsk->mempolicy); 935 tsk->mempolicy = NULL; 936 #endif 937 /* 938 * If DEBUG_MUTEXES is on, make sure we are holding no locks: 939 */ 940 mutex_debug_check_no_locks_held(tsk); 941 942 if (tsk->io_context) 943 exit_io_context(); 944 945 if (tsk->splice_pipe) 946 __free_pipe_info(tsk->splice_pipe); 947 948 /* PF_DEAD causes final put_task_struct after we schedule. */ 949 preempt_disable(); 950 BUG_ON(tsk->flags & PF_DEAD); 951 tsk->flags |= PF_DEAD; 952 953 schedule(); 954 BUG(); 955 /* Avoid "noreturn function does return". */ 956 for (;;) ; 957 } 958 959 EXPORT_SYMBOL_GPL(do_exit); 960 961 NORET_TYPE void complete_and_exit(struct completion *comp, long code) 962 { 963 if (comp) 964 complete(comp); 965 966 do_exit(code); 967 } 968 969 EXPORT_SYMBOL(complete_and_exit); 970 971 asmlinkage long sys_exit(int error_code) 972 { 973 do_exit((error_code&0xff)<<8); 974 } 975 976 /* 977 * Take down every thread in the group. This is called by fatal signals 978 * as well as by sys_exit_group (below). 979 */ 980 NORET_TYPE void 981 do_group_exit(int exit_code) 982 { 983 BUG_ON(exit_code & 0x80); /* core dumps don't get here */ 984 985 if (current->signal->flags & SIGNAL_GROUP_EXIT) 986 exit_code = current->signal->group_exit_code; 987 else if (!thread_group_empty(current)) { 988 struct signal_struct *const sig = current->signal; 989 struct sighand_struct *const sighand = current->sighand; 990 spin_lock_irq(&sighand->siglock); 991 if (sig->flags & SIGNAL_GROUP_EXIT) 992 /* Another thread got here before we took the lock. */ 993 exit_code = sig->group_exit_code; 994 else { 995 sig->group_exit_code = exit_code; 996 zap_other_threads(current); 997 } 998 spin_unlock_irq(&sighand->siglock); 999 } 1000 1001 do_exit(exit_code); 1002 /* NOTREACHED */ 1003 } 1004 1005 /* 1006 * this kills every thread in the thread group. Note that any externally 1007 * wait4()-ing process will get the correct exit code - even if this 1008 * thread is not the thread group leader. 1009 */ 1010 asmlinkage void sys_exit_group(int error_code) 1011 { 1012 do_group_exit((error_code & 0xff) << 8); 1013 } 1014 1015 static int eligible_child(pid_t pid, int options, task_t *p) 1016 { 1017 if (pid > 0) { 1018 if (p->pid != pid) 1019 return 0; 1020 } else if (!pid) { 1021 if (process_group(p) != process_group(current)) 1022 return 0; 1023 } else if (pid != -1) { 1024 if (process_group(p) != -pid) 1025 return 0; 1026 } 1027 1028 /* 1029 * Do not consider detached threads that are 1030 * not ptraced: 1031 */ 1032 if (p->exit_signal == -1 && !p->ptrace) 1033 return 0; 1034 1035 /* Wait for all children (clone and not) if __WALL is set; 1036 * otherwise, wait for clone children *only* if __WCLONE is 1037 * set; otherwise, wait for non-clone children *only*. (Note: 1038 * A "clone" child here is one that reports to its parent 1039 * using a signal other than SIGCHLD.) */ 1040 if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0)) 1041 && !(options & __WALL)) 1042 return 0; 1043 /* 1044 * Do not consider thread group leaders that are 1045 * in a non-empty thread group: 1046 */ 1047 if (current->tgid != p->tgid && delay_group_leader(p)) 1048 return 2; 1049 1050 if (security_task_wait(p)) 1051 return 0; 1052 1053 return 1; 1054 } 1055 1056 static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid, 1057 int why, int status, 1058 struct siginfo __user *infop, 1059 struct rusage __user *rusagep) 1060 { 1061 int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; 1062 put_task_struct(p); 1063 if (!retval) 1064 retval = put_user(SIGCHLD, &infop->si_signo); 1065 if (!retval) 1066 retval = put_user(0, &infop->si_errno); 1067 if (!retval) 1068 retval = put_user((short)why, &infop->si_code); 1069 if (!retval) 1070 retval = put_user(pid, &infop->si_pid); 1071 if (!retval) 1072 retval = put_user(uid, &infop->si_uid); 1073 if (!retval) 1074 retval = put_user(status, &infop->si_status); 1075 if (!retval) 1076 retval = pid; 1077 return retval; 1078 } 1079 1080 /* 1081 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold 1082 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1083 * the lock and this task is uninteresting. If we return nonzero, we have 1084 * released the lock and the system call should return. 1085 */ 1086 static int wait_task_zombie(task_t *p, int noreap, 1087 struct siginfo __user *infop, 1088 int __user *stat_addr, struct rusage __user *ru) 1089 { 1090 unsigned long state; 1091 int retval; 1092 int status; 1093 1094 if (unlikely(noreap)) { 1095 pid_t pid = p->pid; 1096 uid_t uid = p->uid; 1097 int exit_code = p->exit_code; 1098 int why, status; 1099 1100 if (unlikely(p->exit_state != EXIT_ZOMBIE)) 1101 return 0; 1102 if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) 1103 return 0; 1104 get_task_struct(p); 1105 read_unlock(&tasklist_lock); 1106 if ((exit_code & 0x7f) == 0) { 1107 why = CLD_EXITED; 1108 status = exit_code >> 8; 1109 } else { 1110 why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED; 1111 status = exit_code & 0x7f; 1112 } 1113 return wait_noreap_copyout(p, pid, uid, why, 1114 status, infop, ru); 1115 } 1116 1117 /* 1118 * Try to move the task's state to DEAD 1119 * only one thread is allowed to do this: 1120 */ 1121 state = xchg(&p->exit_state, EXIT_DEAD); 1122 if (state != EXIT_ZOMBIE) { 1123 BUG_ON(state != EXIT_DEAD); 1124 return 0; 1125 } 1126 if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) { 1127 /* 1128 * This can only happen in a race with a ptraced thread 1129 * dying on another processor. 1130 */ 1131 return 0; 1132 } 1133 1134 if (likely(p->real_parent == p->parent) && likely(p->signal)) { 1135 struct signal_struct *psig; 1136 struct signal_struct *sig; 1137 1138 /* 1139 * The resource counters for the group leader are in its 1140 * own task_struct. Those for dead threads in the group 1141 * are in its signal_struct, as are those for the child 1142 * processes it has previously reaped. All these 1143 * accumulate in the parent's signal_struct c* fields. 1144 * 1145 * We don't bother to take a lock here to protect these 1146 * p->signal fields, because they are only touched by 1147 * __exit_signal, which runs with tasklist_lock 1148 * write-locked anyway, and so is excluded here. We do 1149 * need to protect the access to p->parent->signal fields, 1150 * as other threads in the parent group can be right 1151 * here reaping other children at the same time. 1152 */ 1153 spin_lock_irq(&p->parent->sighand->siglock); 1154 psig = p->parent->signal; 1155 sig = p->signal; 1156 psig->cutime = 1157 cputime_add(psig->cutime, 1158 cputime_add(p->utime, 1159 cputime_add(sig->utime, 1160 sig->cutime))); 1161 psig->cstime = 1162 cputime_add(psig->cstime, 1163 cputime_add(p->stime, 1164 cputime_add(sig->stime, 1165 sig->cstime))); 1166 psig->cmin_flt += 1167 p->min_flt + sig->min_flt + sig->cmin_flt; 1168 psig->cmaj_flt += 1169 p->maj_flt + sig->maj_flt + sig->cmaj_flt; 1170 psig->cnvcsw += 1171 p->nvcsw + sig->nvcsw + sig->cnvcsw; 1172 psig->cnivcsw += 1173 p->nivcsw + sig->nivcsw + sig->cnivcsw; 1174 spin_unlock_irq(&p->parent->sighand->siglock); 1175 } 1176 1177 /* 1178 * Now we are sure this task is interesting, and no other 1179 * thread can reap it because we set its state to EXIT_DEAD. 1180 */ 1181 read_unlock(&tasklist_lock); 1182 1183 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; 1184 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1185 ? p->signal->group_exit_code : p->exit_code; 1186 if (!retval && stat_addr) 1187 retval = put_user(status, stat_addr); 1188 if (!retval && infop) 1189 retval = put_user(SIGCHLD, &infop->si_signo); 1190 if (!retval && infop) 1191 retval = put_user(0, &infop->si_errno); 1192 if (!retval && infop) { 1193 int why; 1194 1195 if ((status & 0x7f) == 0) { 1196 why = CLD_EXITED; 1197 status >>= 8; 1198 } else { 1199 why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; 1200 status &= 0x7f; 1201 } 1202 retval = put_user((short)why, &infop->si_code); 1203 if (!retval) 1204 retval = put_user(status, &infop->si_status); 1205 } 1206 if (!retval && infop) 1207 retval = put_user(p->pid, &infop->si_pid); 1208 if (!retval && infop) 1209 retval = put_user(p->uid, &infop->si_uid); 1210 if (retval) { 1211 // TODO: is this safe? 1212 p->exit_state = EXIT_ZOMBIE; 1213 return retval; 1214 } 1215 retval = p->pid; 1216 if (p->real_parent != p->parent) { 1217 write_lock_irq(&tasklist_lock); 1218 /* Double-check with lock held. */ 1219 if (p->real_parent != p->parent) { 1220 __ptrace_unlink(p); 1221 // TODO: is this safe? 1222 p->exit_state = EXIT_ZOMBIE; 1223 /* 1224 * If this is not a detached task, notify the parent. 1225 * If it's still not detached after that, don't release 1226 * it now. 1227 */ 1228 if (p->exit_signal != -1) { 1229 do_notify_parent(p, p->exit_signal); 1230 if (p->exit_signal != -1) 1231 p = NULL; 1232 } 1233 } 1234 write_unlock_irq(&tasklist_lock); 1235 } 1236 if (p != NULL) 1237 release_task(p); 1238 BUG_ON(!retval); 1239 return retval; 1240 } 1241 1242 /* 1243 * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold 1244 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1245 * the lock and this task is uninteresting. If we return nonzero, we have 1246 * released the lock and the system call should return. 1247 */ 1248 static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap, 1249 struct siginfo __user *infop, 1250 int __user *stat_addr, struct rusage __user *ru) 1251 { 1252 int retval, exit_code; 1253 1254 if (!p->exit_code) 1255 return 0; 1256 if (delayed_group_leader && !(p->ptrace & PT_PTRACED) && 1257 p->signal && p->signal->group_stop_count > 0) 1258 /* 1259 * A group stop is in progress and this is the group leader. 1260 * We won't report until all threads have stopped. 1261 */ 1262 return 0; 1263 1264 /* 1265 * Now we are pretty sure this task is interesting. 1266 * Make sure it doesn't get reaped out from under us while we 1267 * give up the lock and then examine it below. We don't want to 1268 * keep holding onto the tasklist_lock while we call getrusage and 1269 * possibly take page faults for user memory. 1270 */ 1271 get_task_struct(p); 1272 read_unlock(&tasklist_lock); 1273 1274 if (unlikely(noreap)) { 1275 pid_t pid = p->pid; 1276 uid_t uid = p->uid; 1277 int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; 1278 1279 exit_code = p->exit_code; 1280 if (unlikely(!exit_code) || 1281 unlikely(p->state & TASK_TRACED)) 1282 goto bail_ref; 1283 return wait_noreap_copyout(p, pid, uid, 1284 why, (exit_code << 8) | 0x7f, 1285 infop, ru); 1286 } 1287 1288 write_lock_irq(&tasklist_lock); 1289 1290 /* 1291 * This uses xchg to be atomic with the thread resuming and setting 1292 * it. It must also be done with the write lock held to prevent a 1293 * race with the EXIT_ZOMBIE case. 1294 */ 1295 exit_code = xchg(&p->exit_code, 0); 1296 if (unlikely(p->exit_state)) { 1297 /* 1298 * The task resumed and then died. Let the next iteration 1299 * catch it in EXIT_ZOMBIE. Note that exit_code might 1300 * already be zero here if it resumed and did _exit(0). 1301 * The task itself is dead and won't touch exit_code again; 1302 * other processors in this function are locked out. 1303 */ 1304 p->exit_code = exit_code; 1305 exit_code = 0; 1306 } 1307 if (unlikely(exit_code == 0)) { 1308 /* 1309 * Another thread in this function got to it first, or it 1310 * resumed, or it resumed and then died. 1311 */ 1312 write_unlock_irq(&tasklist_lock); 1313 bail_ref: 1314 put_task_struct(p); 1315 /* 1316 * We are returning to the wait loop without having successfully 1317 * removed the process and having released the lock. We cannot 1318 * continue, since the "p" task pointer is potentially stale. 1319 * 1320 * Return -EAGAIN, and do_wait() will restart the loop from the 1321 * beginning. Do _not_ re-acquire the lock. 1322 */ 1323 return -EAGAIN; 1324 } 1325 1326 /* move to end of parent's list to avoid starvation */ 1327 remove_parent(p); 1328 add_parent(p); 1329 1330 write_unlock_irq(&tasklist_lock); 1331 1332 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; 1333 if (!retval && stat_addr) 1334 retval = put_user((exit_code << 8) | 0x7f, stat_addr); 1335 if (!retval && infop) 1336 retval = put_user(SIGCHLD, &infop->si_signo); 1337 if (!retval && infop) 1338 retval = put_user(0, &infop->si_errno); 1339 if (!retval && infop) 1340 retval = put_user((short)((p->ptrace & PT_PTRACED) 1341 ? CLD_TRAPPED : CLD_STOPPED), 1342 &infop->si_code); 1343 if (!retval && infop) 1344 retval = put_user(exit_code, &infop->si_status); 1345 if (!retval && infop) 1346 retval = put_user(p->pid, &infop->si_pid); 1347 if (!retval && infop) 1348 retval = put_user(p->uid, &infop->si_uid); 1349 if (!retval) 1350 retval = p->pid; 1351 put_task_struct(p); 1352 1353 BUG_ON(!retval); 1354 return retval; 1355 } 1356 1357 /* 1358 * Handle do_wait work for one task in a live, non-stopped state. 1359 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1360 * the lock and this task is uninteresting. If we return nonzero, we have 1361 * released the lock and the system call should return. 1362 */ 1363 static int wait_task_continued(task_t *p, int noreap, 1364 struct siginfo __user *infop, 1365 int __user *stat_addr, struct rusage __user *ru) 1366 { 1367 int retval; 1368 pid_t pid; 1369 uid_t uid; 1370 1371 if (unlikely(!p->signal)) 1372 return 0; 1373 1374 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 1375 return 0; 1376 1377 spin_lock_irq(&p->sighand->siglock); 1378 /* Re-check with the lock held. */ 1379 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { 1380 spin_unlock_irq(&p->sighand->siglock); 1381 return 0; 1382 } 1383 if (!noreap) 1384 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1385 spin_unlock_irq(&p->sighand->siglock); 1386 1387 pid = p->pid; 1388 uid = p->uid; 1389 get_task_struct(p); 1390 read_unlock(&tasklist_lock); 1391 1392 if (!infop) { 1393 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; 1394 put_task_struct(p); 1395 if (!retval && stat_addr) 1396 retval = put_user(0xffff, stat_addr); 1397 if (!retval) 1398 retval = p->pid; 1399 } else { 1400 retval = wait_noreap_copyout(p, pid, uid, 1401 CLD_CONTINUED, SIGCONT, 1402 infop, ru); 1403 BUG_ON(retval == 0); 1404 } 1405 1406 return retval; 1407 } 1408 1409 1410 static inline int my_ptrace_child(struct task_struct *p) 1411 { 1412 if (!(p->ptrace & PT_PTRACED)) 1413 return 0; 1414 if (!(p->ptrace & PT_ATTACHED)) 1415 return 1; 1416 /* 1417 * This child was PTRACE_ATTACH'd. We should be seeing it only if 1418 * we are the attacher. If we are the real parent, this is a race 1419 * inside ptrace_attach. It is waiting for the tasklist_lock, 1420 * which we have to switch the parent links, but has already set 1421 * the flags in p->ptrace. 1422 */ 1423 return (p->parent != p->real_parent); 1424 } 1425 1426 static long do_wait(pid_t pid, int options, struct siginfo __user *infop, 1427 int __user *stat_addr, struct rusage __user *ru) 1428 { 1429 DECLARE_WAITQUEUE(wait, current); 1430 struct task_struct *tsk; 1431 int flag, retval; 1432 1433 add_wait_queue(¤t->signal->wait_chldexit,&wait); 1434 repeat: 1435 /* 1436 * We will set this flag if we see any child that might later 1437 * match our criteria, even if we are not able to reap it yet. 1438 */ 1439 flag = 0; 1440 current->state = TASK_INTERRUPTIBLE; 1441 read_lock(&tasklist_lock); 1442 tsk = current; 1443 do { 1444 struct task_struct *p; 1445 struct list_head *_p; 1446 int ret; 1447 1448 list_for_each(_p,&tsk->children) { 1449 p = list_entry(_p,struct task_struct,sibling); 1450 1451 ret = eligible_child(pid, options, p); 1452 if (!ret) 1453 continue; 1454 1455 switch (p->state) { 1456 case TASK_TRACED: 1457 /* 1458 * When we hit the race with PTRACE_ATTACH, 1459 * we will not report this child. But the 1460 * race means it has not yet been moved to 1461 * our ptrace_children list, so we need to 1462 * set the flag here to avoid a spurious ECHILD 1463 * when the race happens with the only child. 1464 */ 1465 flag = 1; 1466 if (!my_ptrace_child(p)) 1467 continue; 1468 /*FALLTHROUGH*/ 1469 case TASK_STOPPED: 1470 /* 1471 * It's stopped now, so it might later 1472 * continue, exit, or stop again. 1473 */ 1474 flag = 1; 1475 if (!(options & WUNTRACED) && 1476 !my_ptrace_child(p)) 1477 continue; 1478 retval = wait_task_stopped(p, ret == 2, 1479 (options & WNOWAIT), 1480 infop, 1481 stat_addr, ru); 1482 if (retval == -EAGAIN) 1483 goto repeat; 1484 if (retval != 0) /* He released the lock. */ 1485 goto end; 1486 break; 1487 default: 1488 // case EXIT_DEAD: 1489 if (p->exit_state == EXIT_DEAD) 1490 continue; 1491 // case EXIT_ZOMBIE: 1492 if (p->exit_state == EXIT_ZOMBIE) { 1493 /* 1494 * Eligible but we cannot release 1495 * it yet: 1496 */ 1497 if (ret == 2) 1498 goto check_continued; 1499 if (!likely(options & WEXITED)) 1500 continue; 1501 retval = wait_task_zombie( 1502 p, (options & WNOWAIT), 1503 infop, stat_addr, ru); 1504 /* He released the lock. */ 1505 if (retval != 0) 1506 goto end; 1507 break; 1508 } 1509 check_continued: 1510 /* 1511 * It's running now, so it might later 1512 * exit, stop, or stop and then continue. 1513 */ 1514 flag = 1; 1515 if (!unlikely(options & WCONTINUED)) 1516 continue; 1517 retval = wait_task_continued( 1518 p, (options & WNOWAIT), 1519 infop, stat_addr, ru); 1520 if (retval != 0) /* He released the lock. */ 1521 goto end; 1522 break; 1523 } 1524 } 1525 if (!flag) { 1526 list_for_each(_p, &tsk->ptrace_children) { 1527 p = list_entry(_p, struct task_struct, 1528 ptrace_list); 1529 if (!eligible_child(pid, options, p)) 1530 continue; 1531 flag = 1; 1532 break; 1533 } 1534 } 1535 if (options & __WNOTHREAD) 1536 break; 1537 tsk = next_thread(tsk); 1538 if (tsk->signal != current->signal) 1539 BUG(); 1540 } while (tsk != current); 1541 1542 read_unlock(&tasklist_lock); 1543 if (flag) { 1544 retval = 0; 1545 if (options & WNOHANG) 1546 goto end; 1547 retval = -ERESTARTSYS; 1548 if (signal_pending(current)) 1549 goto end; 1550 schedule(); 1551 goto repeat; 1552 } 1553 retval = -ECHILD; 1554 end: 1555 current->state = TASK_RUNNING; 1556 remove_wait_queue(¤t->signal->wait_chldexit,&wait); 1557 if (infop) { 1558 if (retval > 0) 1559 retval = 0; 1560 else { 1561 /* 1562 * For a WNOHANG return, clear out all the fields 1563 * we would set so the user can easily tell the 1564 * difference. 1565 */ 1566 if (!retval) 1567 retval = put_user(0, &infop->si_signo); 1568 if (!retval) 1569 retval = put_user(0, &infop->si_errno); 1570 if (!retval) 1571 retval = put_user(0, &infop->si_code); 1572 if (!retval) 1573 retval = put_user(0, &infop->si_pid); 1574 if (!retval) 1575 retval = put_user(0, &infop->si_uid); 1576 if (!retval) 1577 retval = put_user(0, &infop->si_status); 1578 } 1579 } 1580 return retval; 1581 } 1582 1583 asmlinkage long sys_waitid(int which, pid_t pid, 1584 struct siginfo __user *infop, int options, 1585 struct rusage __user *ru) 1586 { 1587 long ret; 1588 1589 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) 1590 return -EINVAL; 1591 if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) 1592 return -EINVAL; 1593 1594 switch (which) { 1595 case P_ALL: 1596 pid = -1; 1597 break; 1598 case P_PID: 1599 if (pid <= 0) 1600 return -EINVAL; 1601 break; 1602 case P_PGID: 1603 if (pid <= 0) 1604 return -EINVAL; 1605 pid = -pid; 1606 break; 1607 default: 1608 return -EINVAL; 1609 } 1610 1611 ret = do_wait(pid, options, infop, NULL, ru); 1612 1613 /* avoid REGPARM breakage on x86: */ 1614 prevent_tail_call(ret); 1615 return ret; 1616 } 1617 1618 asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr, 1619 int options, struct rusage __user *ru) 1620 { 1621 long ret; 1622 1623 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| 1624 __WNOTHREAD|__WCLONE|__WALL)) 1625 return -EINVAL; 1626 ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru); 1627 1628 /* avoid REGPARM breakage on x86: */ 1629 prevent_tail_call(ret); 1630 return ret; 1631 } 1632 1633 #ifdef __ARCH_WANT_SYS_WAITPID 1634 1635 /* 1636 * sys_waitpid() remains for compatibility. waitpid() should be 1637 * implemented by calling sys_wait4() from libc.a. 1638 */ 1639 asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options) 1640 { 1641 return sys_wait4(pid, stat_addr, options, NULL); 1642 } 1643 1644 #endif 1645