xref: /linux/kernel/exit.c (revision 643d1f7fe3aa12c8bdea6fa5b4ba874ff6dd601d)
1 /*
2  *  linux/kernel/exit.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/capability.h>
12 #include <linux/completion.h>
13 #include <linux/personality.h>
14 #include <linux/tty.h>
15 #include <linux/mnt_namespace.h>
16 #include <linux/key.h>
17 #include <linux/security.h>
18 #include <linux/cpu.h>
19 #include <linux/acct.h>
20 #include <linux/tsacct_kern.h>
21 #include <linux/file.h>
22 #include <linux/binfmts.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pid_namespace.h>
25 #include <linux/ptrace.h>
26 #include <linux/profile.h>
27 #include <linux/mount.h>
28 #include <linux/proc_fs.h>
29 #include <linux/kthread.h>
30 #include <linux/mempolicy.h>
31 #include <linux/taskstats_kern.h>
32 #include <linux/delayacct.h>
33 #include <linux/freezer.h>
34 #include <linux/cgroup.h>
35 #include <linux/syscalls.h>
36 #include <linux/signal.h>
37 #include <linux/posix-timers.h>
38 #include <linux/cn_proc.h>
39 #include <linux/mutex.h>
40 #include <linux/futex.h>
41 #include <linux/compat.h>
42 #include <linux/pipe_fs_i.h>
43 #include <linux/audit.h> /* for audit_free() */
44 #include <linux/resource.h>
45 #include <linux/blkdev.h>
46 #include <linux/task_io_accounting_ops.h>
47 
48 #include <asm/uaccess.h>
49 #include <asm/unistd.h>
50 #include <asm/pgtable.h>
51 #include <asm/mmu_context.h>
52 
53 static void exit_mm(struct task_struct * tsk);
54 
55 static void __unhash_process(struct task_struct *p)
56 {
57 	nr_threads--;
58 	detach_pid(p, PIDTYPE_PID);
59 	if (thread_group_leader(p)) {
60 		detach_pid(p, PIDTYPE_PGID);
61 		detach_pid(p, PIDTYPE_SID);
62 
63 		list_del_rcu(&p->tasks);
64 		__get_cpu_var(process_counts)--;
65 	}
66 	list_del_rcu(&p->thread_group);
67 	remove_parent(p);
68 }
69 
70 /*
71  * This function expects the tasklist_lock write-locked.
72  */
73 static void __exit_signal(struct task_struct *tsk)
74 {
75 	struct signal_struct *sig = tsk->signal;
76 	struct sighand_struct *sighand;
77 
78 	BUG_ON(!sig);
79 	BUG_ON(!atomic_read(&sig->count));
80 
81 	rcu_read_lock();
82 	sighand = rcu_dereference(tsk->sighand);
83 	spin_lock(&sighand->siglock);
84 
85 	posix_cpu_timers_exit(tsk);
86 	if (atomic_dec_and_test(&sig->count))
87 		posix_cpu_timers_exit_group(tsk);
88 	else {
89 		/*
90 		 * If there is any task waiting for the group exit
91 		 * then notify it:
92 		 */
93 		if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
94 			wake_up_process(sig->group_exit_task);
95 
96 		if (tsk == sig->curr_target)
97 			sig->curr_target = next_thread(tsk);
98 		/*
99 		 * Accumulate here the counters for all threads but the
100 		 * group leader as they die, so they can be added into
101 		 * the process-wide totals when those are taken.
102 		 * The group leader stays around as a zombie as long
103 		 * as there are other threads.  When it gets reaped,
104 		 * the exit.c code will add its counts into these totals.
105 		 * We won't ever get here for the group leader, since it
106 		 * will have been the last reference on the signal_struct.
107 		 */
108 		sig->utime = cputime_add(sig->utime, tsk->utime);
109 		sig->stime = cputime_add(sig->stime, tsk->stime);
110 		sig->gtime = cputime_add(sig->gtime, tsk->gtime);
111 		sig->min_flt += tsk->min_flt;
112 		sig->maj_flt += tsk->maj_flt;
113 		sig->nvcsw += tsk->nvcsw;
114 		sig->nivcsw += tsk->nivcsw;
115 		sig->inblock += task_io_get_inblock(tsk);
116 		sig->oublock += task_io_get_oublock(tsk);
117 		sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
118 		sig = NULL; /* Marker for below. */
119 	}
120 
121 	__unhash_process(tsk);
122 
123 	tsk->signal = NULL;
124 	tsk->sighand = NULL;
125 	spin_unlock(&sighand->siglock);
126 	rcu_read_unlock();
127 
128 	__cleanup_sighand(sighand);
129 	clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
130 	flush_sigqueue(&tsk->pending);
131 	if (sig) {
132 		flush_sigqueue(&sig->shared_pending);
133 		taskstats_tgid_free(sig);
134 		__cleanup_signal(sig);
135 	}
136 }
137 
138 static void delayed_put_task_struct(struct rcu_head *rhp)
139 {
140 	put_task_struct(container_of(rhp, struct task_struct, rcu));
141 }
142 
143 void release_task(struct task_struct * p)
144 {
145 	struct task_struct *leader;
146 	int zap_leader;
147 repeat:
148 	atomic_dec(&p->user->processes);
149 	proc_flush_task(p);
150 	write_lock_irq(&tasklist_lock);
151 	ptrace_unlink(p);
152 	BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
153 	__exit_signal(p);
154 
155 	/*
156 	 * If we are the last non-leader member of the thread
157 	 * group, and the leader is zombie, then notify the
158 	 * group leader's parent process. (if it wants notification.)
159 	 */
160 	zap_leader = 0;
161 	leader = p->group_leader;
162 	if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
163 		BUG_ON(leader->exit_signal == -1);
164 		do_notify_parent(leader, leader->exit_signal);
165 		/*
166 		 * If we were the last child thread and the leader has
167 		 * exited already, and the leader's parent ignores SIGCHLD,
168 		 * then we are the one who should release the leader.
169 		 *
170 		 * do_notify_parent() will have marked it self-reaping in
171 		 * that case.
172 		 */
173 		zap_leader = (leader->exit_signal == -1);
174 	}
175 
176 	write_unlock_irq(&tasklist_lock);
177 	release_thread(p);
178 	call_rcu(&p->rcu, delayed_put_task_struct);
179 
180 	p = leader;
181 	if (unlikely(zap_leader))
182 		goto repeat;
183 }
184 
185 /*
186  * This checks not only the pgrp, but falls back on the pid if no
187  * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
188  * without this...
189  *
190  * The caller must hold rcu lock or the tasklist lock.
191  */
192 struct pid *session_of_pgrp(struct pid *pgrp)
193 {
194 	struct task_struct *p;
195 	struct pid *sid = NULL;
196 
197 	p = pid_task(pgrp, PIDTYPE_PGID);
198 	if (p == NULL)
199 		p = pid_task(pgrp, PIDTYPE_PID);
200 	if (p != NULL)
201 		sid = task_session(p);
202 
203 	return sid;
204 }
205 
206 /*
207  * Determine if a process group is "orphaned", according to the POSIX
208  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
209  * by terminal-generated stop signals.  Newly orphaned process groups are
210  * to receive a SIGHUP and a SIGCONT.
211  *
212  * "I ask you, have you ever known what it is to be an orphan?"
213  */
214 static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
215 {
216 	struct task_struct *p;
217 	int ret = 1;
218 
219 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
220 		if (p == ignored_task
221 				|| p->exit_state
222 				|| is_global_init(p->real_parent))
223 			continue;
224 		if (task_pgrp(p->real_parent) != pgrp &&
225 		    task_session(p->real_parent) == task_session(p)) {
226 			ret = 0;
227 			break;
228 		}
229 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
230 	return ret;	/* (sighing) "Often!" */
231 }
232 
233 int is_current_pgrp_orphaned(void)
234 {
235 	int retval;
236 
237 	read_lock(&tasklist_lock);
238 	retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
239 	read_unlock(&tasklist_lock);
240 
241 	return retval;
242 }
243 
244 static int has_stopped_jobs(struct pid *pgrp)
245 {
246 	int retval = 0;
247 	struct task_struct *p;
248 
249 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
250 		if (!task_is_stopped(p))
251 			continue;
252 		retval = 1;
253 		break;
254 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
255 	return retval;
256 }
257 
258 /**
259  * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
260  *
261  * If a kernel thread is launched as a result of a system call, or if
262  * it ever exits, it should generally reparent itself to kthreadd so it
263  * isn't in the way of other processes and is correctly cleaned up on exit.
264  *
265  * The various task state such as scheduling policy and priority may have
266  * been inherited from a user process, so we reset them to sane values here.
267  *
268  * NOTE that reparent_to_kthreadd() gives the caller full capabilities.
269  */
270 static void reparent_to_kthreadd(void)
271 {
272 	write_lock_irq(&tasklist_lock);
273 
274 	ptrace_unlink(current);
275 	/* Reparent to init */
276 	remove_parent(current);
277 	current->real_parent = current->parent = kthreadd_task;
278 	add_parent(current);
279 
280 	/* Set the exit signal to SIGCHLD so we signal init on exit */
281 	current->exit_signal = SIGCHLD;
282 
283 	if (task_nice(current) < 0)
284 		set_user_nice(current, 0);
285 	/* cpus_allowed? */
286 	/* rt_priority? */
287 	/* signals? */
288 	security_task_reparent_to_init(current);
289 	memcpy(current->signal->rlim, init_task.signal->rlim,
290 	       sizeof(current->signal->rlim));
291 	atomic_inc(&(INIT_USER->__count));
292 	write_unlock_irq(&tasklist_lock);
293 	switch_uid(INIT_USER);
294 }
295 
296 void __set_special_pids(pid_t session, pid_t pgrp)
297 {
298 	struct task_struct *curr = current->group_leader;
299 
300 	if (task_session_nr(curr) != session) {
301 		detach_pid(curr, PIDTYPE_SID);
302 		set_task_session(curr, session);
303 		attach_pid(curr, PIDTYPE_SID, find_pid(session));
304 	}
305 	if (task_pgrp_nr(curr) != pgrp) {
306 		detach_pid(curr, PIDTYPE_PGID);
307 		set_task_pgrp(curr, pgrp);
308 		attach_pid(curr, PIDTYPE_PGID, find_pid(pgrp));
309 	}
310 }
311 
312 static void set_special_pids(pid_t session, pid_t pgrp)
313 {
314 	write_lock_irq(&tasklist_lock);
315 	__set_special_pids(session, pgrp);
316 	write_unlock_irq(&tasklist_lock);
317 }
318 
319 /*
320  * Let kernel threads use this to say that they
321  * allow a certain signal (since daemonize() will
322  * have disabled all of them by default).
323  */
324 int allow_signal(int sig)
325 {
326 	if (!valid_signal(sig) || sig < 1)
327 		return -EINVAL;
328 
329 	spin_lock_irq(&current->sighand->siglock);
330 	sigdelset(&current->blocked, sig);
331 	if (!current->mm) {
332 		/* Kernel threads handle their own signals.
333 		   Let the signal code know it'll be handled, so
334 		   that they don't get converted to SIGKILL or
335 		   just silently dropped */
336 		current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
337 	}
338 	recalc_sigpending();
339 	spin_unlock_irq(&current->sighand->siglock);
340 	return 0;
341 }
342 
343 EXPORT_SYMBOL(allow_signal);
344 
345 int disallow_signal(int sig)
346 {
347 	if (!valid_signal(sig) || sig < 1)
348 		return -EINVAL;
349 
350 	spin_lock_irq(&current->sighand->siglock);
351 	current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
352 	recalc_sigpending();
353 	spin_unlock_irq(&current->sighand->siglock);
354 	return 0;
355 }
356 
357 EXPORT_SYMBOL(disallow_signal);
358 
359 /*
360  *	Put all the gunge required to become a kernel thread without
361  *	attached user resources in one place where it belongs.
362  */
363 
364 void daemonize(const char *name, ...)
365 {
366 	va_list args;
367 	struct fs_struct *fs;
368 	sigset_t blocked;
369 
370 	va_start(args, name);
371 	vsnprintf(current->comm, sizeof(current->comm), name, args);
372 	va_end(args);
373 
374 	/*
375 	 * If we were started as result of loading a module, close all of the
376 	 * user space pages.  We don't need them, and if we didn't close them
377 	 * they would be locked into memory.
378 	 */
379 	exit_mm(current);
380 	/*
381 	 * We don't want to have TIF_FREEZE set if the system-wide hibernation
382 	 * or suspend transition begins right now.
383 	 */
384 	current->flags |= PF_NOFREEZE;
385 
386 	set_special_pids(1, 1);
387 	proc_clear_tty(current);
388 
389 	/* Block and flush all signals */
390 	sigfillset(&blocked);
391 	sigprocmask(SIG_BLOCK, &blocked, NULL);
392 	flush_signals(current);
393 
394 	/* Become as one with the init task */
395 
396 	exit_fs(current);	/* current->fs->count--; */
397 	fs = init_task.fs;
398 	current->fs = fs;
399 	atomic_inc(&fs->count);
400 
401 	if (current->nsproxy != init_task.nsproxy) {
402 		get_nsproxy(init_task.nsproxy);
403 		switch_task_namespaces(current, init_task.nsproxy);
404 	}
405 
406 	exit_files(current);
407 	current->files = init_task.files;
408 	atomic_inc(&current->files->count);
409 
410 	reparent_to_kthreadd();
411 }
412 
413 EXPORT_SYMBOL(daemonize);
414 
415 static void close_files(struct files_struct * files)
416 {
417 	int i, j;
418 	struct fdtable *fdt;
419 
420 	j = 0;
421 
422 	/*
423 	 * It is safe to dereference the fd table without RCU or
424 	 * ->file_lock because this is the last reference to the
425 	 * files structure.
426 	 */
427 	fdt = files_fdtable(files);
428 	for (;;) {
429 		unsigned long set;
430 		i = j * __NFDBITS;
431 		if (i >= fdt->max_fds)
432 			break;
433 		set = fdt->open_fds->fds_bits[j++];
434 		while (set) {
435 			if (set & 1) {
436 				struct file * file = xchg(&fdt->fd[i], NULL);
437 				if (file) {
438 					filp_close(file, files);
439 					cond_resched();
440 				}
441 			}
442 			i++;
443 			set >>= 1;
444 		}
445 	}
446 }
447 
448 struct files_struct *get_files_struct(struct task_struct *task)
449 {
450 	struct files_struct *files;
451 
452 	task_lock(task);
453 	files = task->files;
454 	if (files)
455 		atomic_inc(&files->count);
456 	task_unlock(task);
457 
458 	return files;
459 }
460 
461 void fastcall put_files_struct(struct files_struct *files)
462 {
463 	struct fdtable *fdt;
464 
465 	if (atomic_dec_and_test(&files->count)) {
466 		close_files(files);
467 		/*
468 		 * Free the fd and fdset arrays if we expanded them.
469 		 * If the fdtable was embedded, pass files for freeing
470 		 * at the end of the RCU grace period. Otherwise,
471 		 * you can free files immediately.
472 		 */
473 		fdt = files_fdtable(files);
474 		if (fdt != &files->fdtab)
475 			kmem_cache_free(files_cachep, files);
476 		free_fdtable(fdt);
477 	}
478 }
479 
480 EXPORT_SYMBOL(put_files_struct);
481 
482 void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
483 {
484 	struct files_struct *old;
485 
486 	old = tsk->files;
487 	task_lock(tsk);
488 	tsk->files = files;
489 	task_unlock(tsk);
490 	put_files_struct(old);
491 }
492 EXPORT_SYMBOL(reset_files_struct);
493 
494 static void __exit_files(struct task_struct *tsk)
495 {
496 	struct files_struct * files = tsk->files;
497 
498 	if (files) {
499 		task_lock(tsk);
500 		tsk->files = NULL;
501 		task_unlock(tsk);
502 		put_files_struct(files);
503 	}
504 }
505 
506 void exit_files(struct task_struct *tsk)
507 {
508 	__exit_files(tsk);
509 }
510 
511 static void __put_fs_struct(struct fs_struct *fs)
512 {
513 	/* No need to hold fs->lock if we are killing it */
514 	if (atomic_dec_and_test(&fs->count)) {
515 		dput(fs->root);
516 		mntput(fs->rootmnt);
517 		dput(fs->pwd);
518 		mntput(fs->pwdmnt);
519 		if (fs->altroot) {
520 			dput(fs->altroot);
521 			mntput(fs->altrootmnt);
522 		}
523 		kmem_cache_free(fs_cachep, fs);
524 	}
525 }
526 
527 void put_fs_struct(struct fs_struct *fs)
528 {
529 	__put_fs_struct(fs);
530 }
531 
532 static void __exit_fs(struct task_struct *tsk)
533 {
534 	struct fs_struct * fs = tsk->fs;
535 
536 	if (fs) {
537 		task_lock(tsk);
538 		tsk->fs = NULL;
539 		task_unlock(tsk);
540 		__put_fs_struct(fs);
541 	}
542 }
543 
544 void exit_fs(struct task_struct *tsk)
545 {
546 	__exit_fs(tsk);
547 }
548 
549 EXPORT_SYMBOL_GPL(exit_fs);
550 
551 /*
552  * Turn us into a lazy TLB process if we
553  * aren't already..
554  */
555 static void exit_mm(struct task_struct * tsk)
556 {
557 	struct mm_struct *mm = tsk->mm;
558 
559 	mm_release(tsk, mm);
560 	if (!mm)
561 		return;
562 	/*
563 	 * Serialize with any possible pending coredump.
564 	 * We must hold mmap_sem around checking core_waiters
565 	 * and clearing tsk->mm.  The core-inducing thread
566 	 * will increment core_waiters for each thread in the
567 	 * group with ->mm != NULL.
568 	 */
569 	down_read(&mm->mmap_sem);
570 	if (mm->core_waiters) {
571 		up_read(&mm->mmap_sem);
572 		down_write(&mm->mmap_sem);
573 		if (!--mm->core_waiters)
574 			complete(mm->core_startup_done);
575 		up_write(&mm->mmap_sem);
576 
577 		wait_for_completion(&mm->core_done);
578 		down_read(&mm->mmap_sem);
579 	}
580 	atomic_inc(&mm->mm_count);
581 	BUG_ON(mm != tsk->active_mm);
582 	/* more a memory barrier than a real lock */
583 	task_lock(tsk);
584 	tsk->mm = NULL;
585 	up_read(&mm->mmap_sem);
586 	enter_lazy_tlb(mm, current);
587 	/* We don't want this task to be frozen prematurely */
588 	clear_freeze_flag(tsk);
589 	task_unlock(tsk);
590 	mmput(mm);
591 }
592 
593 static void
594 reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
595 {
596 	if (p->pdeath_signal)
597 		/* We already hold the tasklist_lock here.  */
598 		group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
599 
600 	/* Move the child from its dying parent to the new one.  */
601 	if (unlikely(traced)) {
602 		/* Preserve ptrace links if someone else is tracing this child.  */
603 		list_del_init(&p->ptrace_list);
604 		if (p->parent != p->real_parent)
605 			list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
606 	} else {
607 		/* If this child is being traced, then we're the one tracing it
608 		 * anyway, so let go of it.
609 		 */
610 		p->ptrace = 0;
611 		remove_parent(p);
612 		p->parent = p->real_parent;
613 		add_parent(p);
614 
615 		if (task_is_traced(p)) {
616 			/*
617 			 * If it was at a trace stop, turn it into
618 			 * a normal stop since it's no longer being
619 			 * traced.
620 			 */
621 			ptrace_untrace(p);
622 		}
623 	}
624 
625 	/* If this is a threaded reparent there is no need to
626 	 * notify anyone anything has happened.
627 	 */
628 	if (p->real_parent->group_leader == father->group_leader)
629 		return;
630 
631 	/* We don't want people slaying init.  */
632 	if (p->exit_signal != -1)
633 		p->exit_signal = SIGCHLD;
634 
635 	/* If we'd notified the old parent about this child's death,
636 	 * also notify the new parent.
637 	 */
638 	if (!traced && p->exit_state == EXIT_ZOMBIE &&
639 	    p->exit_signal != -1 && thread_group_empty(p))
640 		do_notify_parent(p, p->exit_signal);
641 
642 	/*
643 	 * process group orphan check
644 	 * Case ii: Our child is in a different pgrp
645 	 * than we are, and it was the only connection
646 	 * outside, so the child pgrp is now orphaned.
647 	 */
648 	if ((task_pgrp(p) != task_pgrp(father)) &&
649 	    (task_session(p) == task_session(father))) {
650 		struct pid *pgrp = task_pgrp(p);
651 
652 		if (will_become_orphaned_pgrp(pgrp, NULL) &&
653 		    has_stopped_jobs(pgrp)) {
654 			__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
655 			__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
656 		}
657 	}
658 }
659 
660 /*
661  * When we die, we re-parent all our children.
662  * Try to give them to another thread in our thread
663  * group, and if no such member exists, give it to
664  * the child reaper process (ie "init") in our pid
665  * space.
666  */
667 static void forget_original_parent(struct task_struct *father)
668 {
669 	struct task_struct *p, *n, *reaper = father;
670 	struct list_head ptrace_dead;
671 
672 	INIT_LIST_HEAD(&ptrace_dead);
673 
674 	write_lock_irq(&tasklist_lock);
675 
676 	do {
677 		reaper = next_thread(reaper);
678 		if (reaper == father) {
679 			reaper = task_child_reaper(father);
680 			break;
681 		}
682 	} while (reaper->flags & PF_EXITING);
683 
684 	/*
685 	 * There are only two places where our children can be:
686 	 *
687 	 * - in our child list
688 	 * - in our ptraced child list
689 	 *
690 	 * Search them and reparent children.
691 	 */
692 	list_for_each_entry_safe(p, n, &father->children, sibling) {
693 		int ptrace;
694 
695 		ptrace = p->ptrace;
696 
697 		/* if father isn't the real parent, then ptrace must be enabled */
698 		BUG_ON(father != p->real_parent && !ptrace);
699 
700 		if (father == p->real_parent) {
701 			/* reparent with a reaper, real father it's us */
702 			p->real_parent = reaper;
703 			reparent_thread(p, father, 0);
704 		} else {
705 			/* reparent ptraced task to its real parent */
706 			__ptrace_unlink (p);
707 			if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
708 			    thread_group_empty(p))
709 				do_notify_parent(p, p->exit_signal);
710 		}
711 
712 		/*
713 		 * if the ptraced child is a zombie with exit_signal == -1
714 		 * we must collect it before we exit, or it will remain
715 		 * zombie forever since we prevented it from self-reap itself
716 		 * while it was being traced by us, to be able to see it in wait4.
717 		 */
718 		if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
719 			list_add(&p->ptrace_list, &ptrace_dead);
720 	}
721 
722 	list_for_each_entry_safe(p, n, &father->ptrace_children, ptrace_list) {
723 		p->real_parent = reaper;
724 		reparent_thread(p, father, 1);
725 	}
726 
727 	write_unlock_irq(&tasklist_lock);
728 	BUG_ON(!list_empty(&father->children));
729 	BUG_ON(!list_empty(&father->ptrace_children));
730 
731 	list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_list) {
732 		list_del_init(&p->ptrace_list);
733 		release_task(p);
734 	}
735 
736 }
737 
738 /*
739  * Send signals to all our closest relatives so that they know
740  * to properly mourn us..
741  */
742 static void exit_notify(struct task_struct *tsk)
743 {
744 	int state;
745 	struct task_struct *t;
746 	struct pid *pgrp;
747 
748 	if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
749 	    && !thread_group_empty(tsk)) {
750 		/*
751 		 * This occurs when there was a race between our exit
752 		 * syscall and a group signal choosing us as the one to
753 		 * wake up.  It could be that we are the only thread
754 		 * alerted to check for pending signals, but another thread
755 		 * should be woken now to take the signal since we will not.
756 		 * Now we'll wake all the threads in the group just to make
757 		 * sure someone gets all the pending signals.
758 		 */
759 		spin_lock_irq(&tsk->sighand->siglock);
760 		for (t = next_thread(tsk); t != tsk; t = next_thread(t))
761 			if (!signal_pending(t) && !(t->flags & PF_EXITING))
762 				recalc_sigpending_and_wake(t);
763 		spin_unlock_irq(&tsk->sighand->siglock);
764 	}
765 
766 	/*
767 	 * This does two things:
768 	 *
769   	 * A.  Make init inherit all the child processes
770 	 * B.  Check to see if any process groups have become orphaned
771 	 *	as a result of our exiting, and if they have any stopped
772 	 *	jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
773 	 */
774 	forget_original_parent(tsk);
775 	exit_task_namespaces(tsk);
776 
777 	write_lock_irq(&tasklist_lock);
778 	/*
779 	 * Check to see if any process groups have become orphaned
780 	 * as a result of our exiting, and if they have any stopped
781 	 * jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
782 	 *
783 	 * Case i: Our father is in a different pgrp than we are
784 	 * and we were the only connection outside, so our pgrp
785 	 * is about to become orphaned.
786 	 */
787 	t = tsk->real_parent;
788 
789 	pgrp = task_pgrp(tsk);
790 	if ((task_pgrp(t) != pgrp) &&
791 	    (task_session(t) == task_session(tsk)) &&
792 	    will_become_orphaned_pgrp(pgrp, tsk) &&
793 	    has_stopped_jobs(pgrp)) {
794 		__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
795 		__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
796 	}
797 
798 	/* Let father know we died
799 	 *
800 	 * Thread signals are configurable, but you aren't going to use
801 	 * that to send signals to arbitary processes.
802 	 * That stops right now.
803 	 *
804 	 * If the parent exec id doesn't match the exec id we saved
805 	 * when we started then we know the parent has changed security
806 	 * domain.
807 	 *
808 	 * If our self_exec id doesn't match our parent_exec_id then
809 	 * we have changed execution domain as these two values started
810 	 * the same after a fork.
811 	 */
812 	if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
813 	    ( tsk->parent_exec_id != t->self_exec_id  ||
814 	      tsk->self_exec_id != tsk->parent_exec_id)
815 	    && !capable(CAP_KILL))
816 		tsk->exit_signal = SIGCHLD;
817 
818 
819 	/* If something other than our normal parent is ptracing us, then
820 	 * send it a SIGCHLD instead of honoring exit_signal.  exit_signal
821 	 * only has special meaning to our real parent.
822 	 */
823 	if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
824 		int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
825 		do_notify_parent(tsk, signal);
826 	} else if (tsk->ptrace) {
827 		do_notify_parent(tsk, SIGCHLD);
828 	}
829 
830 	state = EXIT_ZOMBIE;
831 	if (tsk->exit_signal == -1 && likely(!tsk->ptrace))
832 		state = EXIT_DEAD;
833 	tsk->exit_state = state;
834 
835 	if (thread_group_leader(tsk) &&
836 	    tsk->signal->notify_count < 0 &&
837 	    tsk->signal->group_exit_task)
838 		wake_up_process(tsk->signal->group_exit_task);
839 
840 	write_unlock_irq(&tasklist_lock);
841 
842 	/* If the process is dead, release it - nobody will wait for it */
843 	if (state == EXIT_DEAD)
844 		release_task(tsk);
845 }
846 
847 #ifdef CONFIG_DEBUG_STACK_USAGE
848 static void check_stack_usage(void)
849 {
850 	static DEFINE_SPINLOCK(low_water_lock);
851 	static int lowest_to_date = THREAD_SIZE;
852 	unsigned long *n = end_of_stack(current);
853 	unsigned long free;
854 
855 	while (*n == 0)
856 		n++;
857 	free = (unsigned long)n - (unsigned long)end_of_stack(current);
858 
859 	if (free >= lowest_to_date)
860 		return;
861 
862 	spin_lock(&low_water_lock);
863 	if (free < lowest_to_date) {
864 		printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
865 				"left\n",
866 				current->comm, free);
867 		lowest_to_date = free;
868 	}
869 	spin_unlock(&low_water_lock);
870 }
871 #else
872 static inline void check_stack_usage(void) {}
873 #endif
874 
875 static inline void exit_child_reaper(struct task_struct *tsk)
876 {
877 	if (likely(tsk->group_leader != task_child_reaper(tsk)))
878 		return;
879 
880 	if (tsk->nsproxy->pid_ns == &init_pid_ns)
881 		panic("Attempted to kill init!");
882 
883 	/*
884 	 * @tsk is the last thread in the 'cgroup-init' and is exiting.
885 	 * Terminate all remaining processes in the namespace and reap them
886 	 * before exiting @tsk.
887 	 *
888 	 * Note that @tsk (last thread of cgroup-init) may not necessarily
889 	 * be the child-reaper (i.e main thread of cgroup-init) of the
890 	 * namespace i.e the child_reaper may have already exited.
891 	 *
892 	 * Even after a child_reaper exits, we let it inherit orphaned children,
893 	 * because, pid_ns->child_reaper remains valid as long as there is
894 	 * at least one living sub-thread in the cgroup init.
895 
896 	 * This living sub-thread of the cgroup-init will be notified when
897 	 * a child inherited by the 'child-reaper' exits (do_notify_parent()
898 	 * uses __group_send_sig_info()). Further, when reaping child processes,
899 	 * do_wait() iterates over children of all living sub threads.
900 
901 	 * i.e even though 'child_reaper' thread is listed as the parent of the
902 	 * orphaned children, any living sub-thread in the cgroup-init can
903 	 * perform the role of the child_reaper.
904 	 */
905 	zap_pid_ns_processes(tsk->nsproxy->pid_ns);
906 }
907 
908 fastcall NORET_TYPE void do_exit(long code)
909 {
910 	struct task_struct *tsk = current;
911 	int group_dead;
912 
913 	profile_task_exit(tsk);
914 
915 	WARN_ON(atomic_read(&tsk->fs_excl));
916 
917 	if (unlikely(in_interrupt()))
918 		panic("Aiee, killing interrupt handler!");
919 	if (unlikely(!tsk->pid))
920 		panic("Attempted to kill the idle task!");
921 
922 	if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
923 		current->ptrace_message = code;
924 		ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
925 	}
926 
927 	/*
928 	 * We're taking recursive faults here in do_exit. Safest is to just
929 	 * leave this task alone and wait for reboot.
930 	 */
931 	if (unlikely(tsk->flags & PF_EXITING)) {
932 		printk(KERN_ALERT
933 			"Fixing recursive fault but reboot is needed!\n");
934 		/*
935 		 * We can do this unlocked here. The futex code uses
936 		 * this flag just to verify whether the pi state
937 		 * cleanup has been done or not. In the worst case it
938 		 * loops once more. We pretend that the cleanup was
939 		 * done as there is no way to return. Either the
940 		 * OWNER_DIED bit is set by now or we push the blocked
941 		 * task into the wait for ever nirwana as well.
942 		 */
943 		tsk->flags |= PF_EXITPIDONE;
944 		if (tsk->io_context)
945 			exit_io_context();
946 		set_current_state(TASK_UNINTERRUPTIBLE);
947 		schedule();
948 	}
949 
950 	tsk->flags |= PF_EXITING;
951 	/*
952 	 * tsk->flags are checked in the futex code to protect against
953 	 * an exiting task cleaning up the robust pi futexes.
954 	 */
955 	smp_mb();
956 	spin_unlock_wait(&tsk->pi_lock);
957 
958 	if (unlikely(in_atomic()))
959 		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
960 				current->comm, task_pid_nr(current),
961 				preempt_count());
962 
963 	acct_update_integrals(tsk);
964 	if (tsk->mm) {
965 		update_hiwater_rss(tsk->mm);
966 		update_hiwater_vm(tsk->mm);
967 	}
968 	group_dead = atomic_dec_and_test(&tsk->signal->live);
969 	if (group_dead) {
970 		exit_child_reaper(tsk);
971 		hrtimer_cancel(&tsk->signal->real_timer);
972 		exit_itimers(tsk->signal);
973 	}
974 	acct_collect(code, group_dead);
975 #ifdef CONFIG_FUTEX
976 	if (unlikely(tsk->robust_list))
977 		exit_robust_list(tsk);
978 #ifdef CONFIG_COMPAT
979 	if (unlikely(tsk->compat_robust_list))
980 		compat_exit_robust_list(tsk);
981 #endif
982 #endif
983 	if (group_dead)
984 		tty_audit_exit();
985 	if (unlikely(tsk->audit_context))
986 		audit_free(tsk);
987 
988 	tsk->exit_code = code;
989 	taskstats_exit(tsk, group_dead);
990 
991 	exit_mm(tsk);
992 
993 	if (group_dead)
994 		acct_process();
995 	exit_sem(tsk);
996 	__exit_files(tsk);
997 	__exit_fs(tsk);
998 	check_stack_usage();
999 	exit_thread();
1000 	cgroup_exit(tsk, 1);
1001 	exit_keys(tsk);
1002 
1003 	if (group_dead && tsk->signal->leader)
1004 		disassociate_ctty(1);
1005 
1006 	module_put(task_thread_info(tsk)->exec_domain->module);
1007 	if (tsk->binfmt)
1008 		module_put(tsk->binfmt->module);
1009 
1010 	proc_exit_connector(tsk);
1011 	exit_notify(tsk);
1012 #ifdef CONFIG_NUMA
1013 	mpol_free(tsk->mempolicy);
1014 	tsk->mempolicy = NULL;
1015 #endif
1016 #ifdef CONFIG_FUTEX
1017 	/*
1018 	 * This must happen late, after the PID is not
1019 	 * hashed anymore:
1020 	 */
1021 	if (unlikely(!list_empty(&tsk->pi_state_list)))
1022 		exit_pi_state_list(tsk);
1023 	if (unlikely(current->pi_state_cache))
1024 		kfree(current->pi_state_cache);
1025 #endif
1026 	/*
1027 	 * Make sure we are holding no locks:
1028 	 */
1029 	debug_check_no_locks_held(tsk);
1030 	/*
1031 	 * We can do this unlocked here. The futex code uses this flag
1032 	 * just to verify whether the pi state cleanup has been done
1033 	 * or not. In the worst case it loops once more.
1034 	 */
1035 	tsk->flags |= PF_EXITPIDONE;
1036 
1037 	if (tsk->io_context)
1038 		exit_io_context();
1039 
1040 	if (tsk->splice_pipe)
1041 		__free_pipe_info(tsk->splice_pipe);
1042 
1043 	preempt_disable();
1044 	/* causes final put_task_struct in finish_task_switch(). */
1045 	tsk->state = TASK_DEAD;
1046 
1047 	schedule();
1048 	BUG();
1049 	/* Avoid "noreturn function does return".  */
1050 	for (;;)
1051 		cpu_relax();	/* For when BUG is null */
1052 }
1053 
1054 EXPORT_SYMBOL_GPL(do_exit);
1055 
1056 NORET_TYPE void complete_and_exit(struct completion *comp, long code)
1057 {
1058 	if (comp)
1059 		complete(comp);
1060 
1061 	do_exit(code);
1062 }
1063 
1064 EXPORT_SYMBOL(complete_and_exit);
1065 
1066 asmlinkage long sys_exit(int error_code)
1067 {
1068 	do_exit((error_code&0xff)<<8);
1069 }
1070 
1071 /*
1072  * Take down every thread in the group.  This is called by fatal signals
1073  * as well as by sys_exit_group (below).
1074  */
1075 NORET_TYPE void
1076 do_group_exit(int exit_code)
1077 {
1078 	BUG_ON(exit_code & 0x80); /* core dumps don't get here */
1079 
1080 	if (current->signal->flags & SIGNAL_GROUP_EXIT)
1081 		exit_code = current->signal->group_exit_code;
1082 	else if (!thread_group_empty(current)) {
1083 		struct signal_struct *const sig = current->signal;
1084 		struct sighand_struct *const sighand = current->sighand;
1085 		spin_lock_irq(&sighand->siglock);
1086 		if (sig->flags & SIGNAL_GROUP_EXIT)
1087 			/* Another thread got here before we took the lock.  */
1088 			exit_code = sig->group_exit_code;
1089 		else {
1090 			sig->group_exit_code = exit_code;
1091 			zap_other_threads(current);
1092 		}
1093 		spin_unlock_irq(&sighand->siglock);
1094 	}
1095 
1096 	do_exit(exit_code);
1097 	/* NOTREACHED */
1098 }
1099 
1100 /*
1101  * this kills every thread in the thread group. Note that any externally
1102  * wait4()-ing process will get the correct exit code - even if this
1103  * thread is not the thread group leader.
1104  */
1105 asmlinkage void sys_exit_group(int error_code)
1106 {
1107 	do_group_exit((error_code & 0xff) << 8);
1108 }
1109 
1110 static int eligible_child(pid_t pid, int options, struct task_struct *p)
1111 {
1112 	int err;
1113 	struct pid_namespace *ns;
1114 
1115 	ns = current->nsproxy->pid_ns;
1116 	if (pid > 0) {
1117 		if (task_pid_nr_ns(p, ns) != pid)
1118 			return 0;
1119 	} else if (!pid) {
1120 		if (task_pgrp_nr_ns(p, ns) != task_pgrp_vnr(current))
1121 			return 0;
1122 	} else if (pid != -1) {
1123 		if (task_pgrp_nr_ns(p, ns) != -pid)
1124 			return 0;
1125 	}
1126 
1127 	/*
1128 	 * Do not consider detached threads that are
1129 	 * not ptraced:
1130 	 */
1131 	if (p->exit_signal == -1 && !p->ptrace)
1132 		return 0;
1133 
1134 	/* Wait for all children (clone and not) if __WALL is set;
1135 	 * otherwise, wait for clone children *only* if __WCLONE is
1136 	 * set; otherwise, wait for non-clone children *only*.  (Note:
1137 	 * A "clone" child here is one that reports to its parent
1138 	 * using a signal other than SIGCHLD.) */
1139 	if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
1140 	    && !(options & __WALL))
1141 		return 0;
1142 	/*
1143 	 * Do not consider thread group leaders that are
1144 	 * in a non-empty thread group:
1145 	 */
1146 	if (delay_group_leader(p))
1147 		return 2;
1148 
1149 	err = security_task_wait(p);
1150 	if (err)
1151 		return err;
1152 
1153 	return 1;
1154 }
1155 
1156 static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
1157 			       int why, int status,
1158 			       struct siginfo __user *infop,
1159 			       struct rusage __user *rusagep)
1160 {
1161 	int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
1162 
1163 	put_task_struct(p);
1164 	if (!retval)
1165 		retval = put_user(SIGCHLD, &infop->si_signo);
1166 	if (!retval)
1167 		retval = put_user(0, &infop->si_errno);
1168 	if (!retval)
1169 		retval = put_user((short)why, &infop->si_code);
1170 	if (!retval)
1171 		retval = put_user(pid, &infop->si_pid);
1172 	if (!retval)
1173 		retval = put_user(uid, &infop->si_uid);
1174 	if (!retval)
1175 		retval = put_user(status, &infop->si_status);
1176 	if (!retval)
1177 		retval = pid;
1178 	return retval;
1179 }
1180 
1181 /*
1182  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
1183  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1184  * the lock and this task is uninteresting.  If we return nonzero, we have
1185  * released the lock and the system call should return.
1186  */
1187 static int wait_task_zombie(struct task_struct *p, int noreap,
1188 			    struct siginfo __user *infop,
1189 			    int __user *stat_addr, struct rusage __user *ru)
1190 {
1191 	unsigned long state;
1192 	int retval, status, traced;
1193 	struct pid_namespace *ns;
1194 
1195 	ns = current->nsproxy->pid_ns;
1196 
1197 	if (unlikely(noreap)) {
1198 		pid_t pid = task_pid_nr_ns(p, ns);
1199 		uid_t uid = p->uid;
1200 		int exit_code = p->exit_code;
1201 		int why, status;
1202 
1203 		if (unlikely(p->exit_state != EXIT_ZOMBIE))
1204 			return 0;
1205 		if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
1206 			return 0;
1207 		get_task_struct(p);
1208 		read_unlock(&tasklist_lock);
1209 		if ((exit_code & 0x7f) == 0) {
1210 			why = CLD_EXITED;
1211 			status = exit_code >> 8;
1212 		} else {
1213 			why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1214 			status = exit_code & 0x7f;
1215 		}
1216 		return wait_noreap_copyout(p, pid, uid, why,
1217 					   status, infop, ru);
1218 	}
1219 
1220 	/*
1221 	 * Try to move the task's state to DEAD
1222 	 * only one thread is allowed to do this:
1223 	 */
1224 	state = xchg(&p->exit_state, EXIT_DEAD);
1225 	if (state != EXIT_ZOMBIE) {
1226 		BUG_ON(state != EXIT_DEAD);
1227 		return 0;
1228 	}
1229 
1230 	/* traced means p->ptrace, but not vice versa */
1231 	traced = (p->real_parent != p->parent);
1232 
1233 	if (likely(!traced)) {
1234 		struct signal_struct *psig;
1235 		struct signal_struct *sig;
1236 
1237 		/*
1238 		 * The resource counters for the group leader are in its
1239 		 * own task_struct.  Those for dead threads in the group
1240 		 * are in its signal_struct, as are those for the child
1241 		 * processes it has previously reaped.  All these
1242 		 * accumulate in the parent's signal_struct c* fields.
1243 		 *
1244 		 * We don't bother to take a lock here to protect these
1245 		 * p->signal fields, because they are only touched by
1246 		 * __exit_signal, which runs with tasklist_lock
1247 		 * write-locked anyway, and so is excluded here.  We do
1248 		 * need to protect the access to p->parent->signal fields,
1249 		 * as other threads in the parent group can be right
1250 		 * here reaping other children at the same time.
1251 		 */
1252 		spin_lock_irq(&p->parent->sighand->siglock);
1253 		psig = p->parent->signal;
1254 		sig = p->signal;
1255 		psig->cutime =
1256 			cputime_add(psig->cutime,
1257 			cputime_add(p->utime,
1258 			cputime_add(sig->utime,
1259 				    sig->cutime)));
1260 		psig->cstime =
1261 			cputime_add(psig->cstime,
1262 			cputime_add(p->stime,
1263 			cputime_add(sig->stime,
1264 				    sig->cstime)));
1265 		psig->cgtime =
1266 			cputime_add(psig->cgtime,
1267 			cputime_add(p->gtime,
1268 			cputime_add(sig->gtime,
1269 				    sig->cgtime)));
1270 		psig->cmin_flt +=
1271 			p->min_flt + sig->min_flt + sig->cmin_flt;
1272 		psig->cmaj_flt +=
1273 			p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1274 		psig->cnvcsw +=
1275 			p->nvcsw + sig->nvcsw + sig->cnvcsw;
1276 		psig->cnivcsw +=
1277 			p->nivcsw + sig->nivcsw + sig->cnivcsw;
1278 		psig->cinblock +=
1279 			task_io_get_inblock(p) +
1280 			sig->inblock + sig->cinblock;
1281 		psig->coublock +=
1282 			task_io_get_oublock(p) +
1283 			sig->oublock + sig->coublock;
1284 		spin_unlock_irq(&p->parent->sighand->siglock);
1285 	}
1286 
1287 	/*
1288 	 * Now we are sure this task is interesting, and no other
1289 	 * thread can reap it because we set its state to EXIT_DEAD.
1290 	 */
1291 	read_unlock(&tasklist_lock);
1292 
1293 	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1294 	status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1295 		? p->signal->group_exit_code : p->exit_code;
1296 	if (!retval && stat_addr)
1297 		retval = put_user(status, stat_addr);
1298 	if (!retval && infop)
1299 		retval = put_user(SIGCHLD, &infop->si_signo);
1300 	if (!retval && infop)
1301 		retval = put_user(0, &infop->si_errno);
1302 	if (!retval && infop) {
1303 		int why;
1304 
1305 		if ((status & 0x7f) == 0) {
1306 			why = CLD_EXITED;
1307 			status >>= 8;
1308 		} else {
1309 			why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1310 			status &= 0x7f;
1311 		}
1312 		retval = put_user((short)why, &infop->si_code);
1313 		if (!retval)
1314 			retval = put_user(status, &infop->si_status);
1315 	}
1316 	if (!retval && infop)
1317 		retval = put_user(task_pid_nr_ns(p, ns), &infop->si_pid);
1318 	if (!retval && infop)
1319 		retval = put_user(p->uid, &infop->si_uid);
1320 	if (!retval)
1321 		retval = task_pid_nr_ns(p, ns);
1322 
1323 	if (traced) {
1324 		write_lock_irq(&tasklist_lock);
1325 		/* We dropped tasklist, ptracer could die and untrace */
1326 		ptrace_unlink(p);
1327 		/*
1328 		 * If this is not a detached task, notify the parent.
1329 		 * If it's still not detached after that, don't release
1330 		 * it now.
1331 		 */
1332 		if (p->exit_signal != -1) {
1333 			do_notify_parent(p, p->exit_signal);
1334 			if (p->exit_signal != -1) {
1335 				p->exit_state = EXIT_ZOMBIE;
1336 				p = NULL;
1337 			}
1338 		}
1339 		write_unlock_irq(&tasklist_lock);
1340 	}
1341 	if (p != NULL)
1342 		release_task(p);
1343 
1344 	return retval;
1345 }
1346 
1347 /*
1348  * Handle sys_wait4 work for one task in state TASK_STOPPED.  We hold
1349  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1350  * the lock and this task is uninteresting.  If we return nonzero, we have
1351  * released the lock and the system call should return.
1352  */
1353 static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
1354 			     int noreap, struct siginfo __user *infop,
1355 			     int __user *stat_addr, struct rusage __user *ru)
1356 {
1357 	int retval, exit_code;
1358 	pid_t pid;
1359 
1360 	if (!p->exit_code)
1361 		return 0;
1362 	if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
1363 	    p->signal->group_stop_count > 0)
1364 		/*
1365 		 * A group stop is in progress and this is the group leader.
1366 		 * We won't report until all threads have stopped.
1367 		 */
1368 		return 0;
1369 
1370 	/*
1371 	 * Now we are pretty sure this task is interesting.
1372 	 * Make sure it doesn't get reaped out from under us while we
1373 	 * give up the lock and then examine it below.  We don't want to
1374 	 * keep holding onto the tasklist_lock while we call getrusage and
1375 	 * possibly take page faults for user memory.
1376 	 */
1377 	pid = task_pid_nr_ns(p, current->nsproxy->pid_ns);
1378 	get_task_struct(p);
1379 	read_unlock(&tasklist_lock);
1380 
1381 	if (unlikely(noreap)) {
1382 		uid_t uid = p->uid;
1383 		int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1384 
1385 		exit_code = p->exit_code;
1386 		if (unlikely(!exit_code) || unlikely(p->exit_state))
1387 			goto bail_ref;
1388 		return wait_noreap_copyout(p, pid, uid,
1389 					   why, exit_code,
1390 					   infop, ru);
1391 	}
1392 
1393 	write_lock_irq(&tasklist_lock);
1394 
1395 	/*
1396 	 * This uses xchg to be atomic with the thread resuming and setting
1397 	 * it.  It must also be done with the write lock held to prevent a
1398 	 * race with the EXIT_ZOMBIE case.
1399 	 */
1400 	exit_code = xchg(&p->exit_code, 0);
1401 	if (unlikely(p->exit_state)) {
1402 		/*
1403 		 * The task resumed and then died.  Let the next iteration
1404 		 * catch it in EXIT_ZOMBIE.  Note that exit_code might
1405 		 * already be zero here if it resumed and did _exit(0).
1406 		 * The task itself is dead and won't touch exit_code again;
1407 		 * other processors in this function are locked out.
1408 		 */
1409 		p->exit_code = exit_code;
1410 		exit_code = 0;
1411 	}
1412 	if (unlikely(exit_code == 0)) {
1413 		/*
1414 		 * Another thread in this function got to it first, or it
1415 		 * resumed, or it resumed and then died.
1416 		 */
1417 		write_unlock_irq(&tasklist_lock);
1418 bail_ref:
1419 		put_task_struct(p);
1420 		/*
1421 		 * We are returning to the wait loop without having successfully
1422 		 * removed the process and having released the lock. We cannot
1423 		 * continue, since the "p" task pointer is potentially stale.
1424 		 *
1425 		 * Return -EAGAIN, and do_wait() will restart the loop from the
1426 		 * beginning. Do _not_ re-acquire the lock.
1427 		 */
1428 		return -EAGAIN;
1429 	}
1430 
1431 	/* move to end of parent's list to avoid starvation */
1432 	remove_parent(p);
1433 	add_parent(p);
1434 
1435 	write_unlock_irq(&tasklist_lock);
1436 
1437 	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1438 	if (!retval && stat_addr)
1439 		retval = put_user((exit_code << 8) | 0x7f, stat_addr);
1440 	if (!retval && infop)
1441 		retval = put_user(SIGCHLD, &infop->si_signo);
1442 	if (!retval && infop)
1443 		retval = put_user(0, &infop->si_errno);
1444 	if (!retval && infop)
1445 		retval = put_user((short)((p->ptrace & PT_PTRACED)
1446 					  ? CLD_TRAPPED : CLD_STOPPED),
1447 				  &infop->si_code);
1448 	if (!retval && infop)
1449 		retval = put_user(exit_code, &infop->si_status);
1450 	if (!retval && infop)
1451 		retval = put_user(pid, &infop->si_pid);
1452 	if (!retval && infop)
1453 		retval = put_user(p->uid, &infop->si_uid);
1454 	if (!retval)
1455 		retval = pid;
1456 	put_task_struct(p);
1457 
1458 	BUG_ON(!retval);
1459 	return retval;
1460 }
1461 
1462 /*
1463  * Handle do_wait work for one task in a live, non-stopped state.
1464  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1465  * the lock and this task is uninteresting.  If we return nonzero, we have
1466  * released the lock and the system call should return.
1467  */
1468 static int wait_task_continued(struct task_struct *p, int noreap,
1469 			       struct siginfo __user *infop,
1470 			       int __user *stat_addr, struct rusage __user *ru)
1471 {
1472 	int retval;
1473 	pid_t pid;
1474 	uid_t uid;
1475 	struct pid_namespace *ns;
1476 
1477 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1478 		return 0;
1479 
1480 	spin_lock_irq(&p->sighand->siglock);
1481 	/* Re-check with the lock held.  */
1482 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1483 		spin_unlock_irq(&p->sighand->siglock);
1484 		return 0;
1485 	}
1486 	if (!noreap)
1487 		p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1488 	spin_unlock_irq(&p->sighand->siglock);
1489 
1490 	ns = current->nsproxy->pid_ns;
1491 	pid = task_pid_nr_ns(p, ns);
1492 	uid = p->uid;
1493 	get_task_struct(p);
1494 	read_unlock(&tasklist_lock);
1495 
1496 	if (!infop) {
1497 		retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1498 		put_task_struct(p);
1499 		if (!retval && stat_addr)
1500 			retval = put_user(0xffff, stat_addr);
1501 		if (!retval)
1502 			retval = task_pid_nr_ns(p, ns);
1503 	} else {
1504 		retval = wait_noreap_copyout(p, pid, uid,
1505 					     CLD_CONTINUED, SIGCONT,
1506 					     infop, ru);
1507 		BUG_ON(retval == 0);
1508 	}
1509 
1510 	return retval;
1511 }
1512 
1513 
1514 static inline int my_ptrace_child(struct task_struct *p)
1515 {
1516 	if (!(p->ptrace & PT_PTRACED))
1517 		return 0;
1518 	if (!(p->ptrace & PT_ATTACHED))
1519 		return 1;
1520 	/*
1521 	 * This child was PTRACE_ATTACH'd.  We should be seeing it only if
1522 	 * we are the attacher.  If we are the real parent, this is a race
1523 	 * inside ptrace_attach.  It is waiting for the tasklist_lock,
1524 	 * which we have to switch the parent links, but has already set
1525 	 * the flags in p->ptrace.
1526 	 */
1527 	return (p->parent != p->real_parent);
1528 }
1529 
1530 static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
1531 		    int __user *stat_addr, struct rusage __user *ru)
1532 {
1533 	DECLARE_WAITQUEUE(wait, current);
1534 	struct task_struct *tsk;
1535 	int flag, retval;
1536 	int allowed, denied;
1537 
1538 	add_wait_queue(&current->signal->wait_chldexit,&wait);
1539 repeat:
1540 	/*
1541 	 * We will set this flag if we see any child that might later
1542 	 * match our criteria, even if we are not able to reap it yet.
1543 	 */
1544 	flag = 0;
1545 	allowed = denied = 0;
1546 	current->state = TASK_INTERRUPTIBLE;
1547 	read_lock(&tasklist_lock);
1548 	tsk = current;
1549 	do {
1550 		struct task_struct *p;
1551 		int ret;
1552 
1553 		list_for_each_entry(p, &tsk->children, sibling) {
1554 			ret = eligible_child(pid, options, p);
1555 			if (!ret)
1556 				continue;
1557 
1558 			if (unlikely(ret < 0)) {
1559 				denied = ret;
1560 				continue;
1561 			}
1562 			allowed = 1;
1563 
1564 			if (task_is_stopped_or_traced(p)) {
1565 				/*
1566 				 * It's stopped now, so it might later
1567 				 * continue, exit, or stop again.
1568 				 *
1569 				 * When we hit the race with PTRACE_ATTACH, we
1570 				 * will not report this child.  But the race
1571 				 * means it has not yet been moved to our
1572 				 * ptrace_children list, so we need to set the
1573 				 * flag here to avoid a spurious ECHILD when
1574 				 * the race happens with the only child.
1575 				 */
1576 				flag = 1;
1577 
1578 				if (!my_ptrace_child(p)) {
1579 					if (task_is_traced(p))
1580 						continue;
1581 					if (!(options & WUNTRACED))
1582 						continue;
1583 				}
1584 
1585 				retval = wait_task_stopped(p, ret == 2,
1586 						(options & WNOWAIT), infop,
1587 						stat_addr, ru);
1588 				if (retval == -EAGAIN)
1589 					goto repeat;
1590 				if (retval != 0) /* He released the lock.  */
1591 					goto end;
1592 			} else if (p->exit_state == EXIT_DEAD) {
1593 				continue;
1594 			} else if (p->exit_state == EXIT_ZOMBIE) {
1595 				/*
1596 				 * Eligible but we cannot release it yet:
1597 				 */
1598 				if (ret == 2)
1599 					goto check_continued;
1600 				if (!likely(options & WEXITED))
1601 					continue;
1602 				retval = wait_task_zombie(p,
1603 						(options & WNOWAIT), infop,
1604 						stat_addr, ru);
1605 				/* He released the lock.  */
1606 				if (retval != 0)
1607 					goto end;
1608 			} else {
1609 check_continued:
1610 				/*
1611 				 * It's running now, so it might later
1612 				 * exit, stop, or stop and then continue.
1613 				 */
1614 				flag = 1;
1615 				if (!unlikely(options & WCONTINUED))
1616 					continue;
1617 				retval = wait_task_continued(p,
1618 						(options & WNOWAIT), infop,
1619 						stat_addr, ru);
1620 				if (retval != 0) /* He released the lock.  */
1621 					goto end;
1622 			}
1623 		}
1624 		if (!flag) {
1625 			list_for_each_entry(p, &tsk->ptrace_children,
1626 					    ptrace_list) {
1627 				if (!eligible_child(pid, options, p))
1628 					continue;
1629 				flag = 1;
1630 				break;
1631 			}
1632 		}
1633 		if (options & __WNOTHREAD)
1634 			break;
1635 		tsk = next_thread(tsk);
1636 		BUG_ON(tsk->signal != current->signal);
1637 	} while (tsk != current);
1638 
1639 	read_unlock(&tasklist_lock);
1640 	if (flag) {
1641 		retval = 0;
1642 		if (options & WNOHANG)
1643 			goto end;
1644 		retval = -ERESTARTSYS;
1645 		if (signal_pending(current))
1646 			goto end;
1647 		schedule();
1648 		goto repeat;
1649 	}
1650 	retval = -ECHILD;
1651 	if (unlikely(denied) && !allowed)
1652 		retval = denied;
1653 end:
1654 	current->state = TASK_RUNNING;
1655 	remove_wait_queue(&current->signal->wait_chldexit,&wait);
1656 	if (infop) {
1657 		if (retval > 0)
1658 		retval = 0;
1659 		else {
1660 			/*
1661 			 * For a WNOHANG return, clear out all the fields
1662 			 * we would set so the user can easily tell the
1663 			 * difference.
1664 			 */
1665 			if (!retval)
1666 				retval = put_user(0, &infop->si_signo);
1667 			if (!retval)
1668 				retval = put_user(0, &infop->si_errno);
1669 			if (!retval)
1670 				retval = put_user(0, &infop->si_code);
1671 			if (!retval)
1672 				retval = put_user(0, &infop->si_pid);
1673 			if (!retval)
1674 				retval = put_user(0, &infop->si_uid);
1675 			if (!retval)
1676 				retval = put_user(0, &infop->si_status);
1677 		}
1678 	}
1679 	return retval;
1680 }
1681 
1682 asmlinkage long sys_waitid(int which, pid_t pid,
1683 			   struct siginfo __user *infop, int options,
1684 			   struct rusage __user *ru)
1685 {
1686 	long ret;
1687 
1688 	if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
1689 		return -EINVAL;
1690 	if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1691 		return -EINVAL;
1692 
1693 	switch (which) {
1694 	case P_ALL:
1695 		pid = -1;
1696 		break;
1697 	case P_PID:
1698 		if (pid <= 0)
1699 			return -EINVAL;
1700 		break;
1701 	case P_PGID:
1702 		if (pid <= 0)
1703 			return -EINVAL;
1704 		pid = -pid;
1705 		break;
1706 	default:
1707 		return -EINVAL;
1708 	}
1709 
1710 	ret = do_wait(pid, options, infop, NULL, ru);
1711 
1712 	/* avoid REGPARM breakage on x86: */
1713 	prevent_tail_call(ret);
1714 	return ret;
1715 }
1716 
1717 asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
1718 			  int options, struct rusage __user *ru)
1719 {
1720 	long ret;
1721 
1722 	if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1723 			__WNOTHREAD|__WCLONE|__WALL))
1724 		return -EINVAL;
1725 	ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru);
1726 
1727 	/* avoid REGPARM breakage on x86: */
1728 	prevent_tail_call(ret);
1729 	return ret;
1730 }
1731 
1732 #ifdef __ARCH_WANT_SYS_WAITPID
1733 
1734 /*
1735  * sys_waitpid() remains for compatibility. waitpid() should be
1736  * implemented by calling sys_wait4() from libc.a.
1737  */
1738 asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
1739 {
1740 	return sys_wait4(pid, stat_addr, options, NULL);
1741 }
1742 
1743 #endif
1744