xref: /linux/kernel/exit.c (revision ed3174d93c342b8b2eeba6bbd124707d55304a7b)
1 /*
2  *  linux/kernel/exit.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/capability.h>
12 #include <linux/completion.h>
13 #include <linux/personality.h>
14 #include <linux/tty.h>
15 #include <linux/mnt_namespace.h>
16 #include <linux/key.h>
17 #include <linux/security.h>
18 #include <linux/cpu.h>
19 #include <linux/acct.h>
20 #include <linux/tsacct_kern.h>
21 #include <linux/file.h>
22 #include <linux/binfmts.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pid_namespace.h>
25 #include <linux/ptrace.h>
26 #include <linux/profile.h>
27 #include <linux/mount.h>
28 #include <linux/proc_fs.h>
29 #include <linux/kthread.h>
30 #include <linux/mempolicy.h>
31 #include <linux/taskstats_kern.h>
32 #include <linux/delayacct.h>
33 #include <linux/freezer.h>
34 #include <linux/cgroup.h>
35 #include <linux/syscalls.h>
36 #include <linux/signal.h>
37 #include <linux/posix-timers.h>
38 #include <linux/cn_proc.h>
39 #include <linux/mutex.h>
40 #include <linux/futex.h>
41 #include <linux/compat.h>
42 #include <linux/pipe_fs_i.h>
43 #include <linux/audit.h> /* for audit_free() */
44 #include <linux/resource.h>
45 #include <linux/blkdev.h>
46 #include <linux/task_io_accounting_ops.h>
47 
48 #include <asm/uaccess.h>
49 #include <asm/unistd.h>
50 #include <asm/pgtable.h>
51 #include <asm/mmu_context.h>
52 
53 static void exit_mm(struct task_struct * tsk);
54 
55 static void __unhash_process(struct task_struct *p)
56 {
57 	nr_threads--;
58 	detach_pid(p, PIDTYPE_PID);
59 	if (thread_group_leader(p)) {
60 		detach_pid(p, PIDTYPE_PGID);
61 		detach_pid(p, PIDTYPE_SID);
62 
63 		list_del_rcu(&p->tasks);
64 		__get_cpu_var(process_counts)--;
65 	}
66 	list_del_rcu(&p->thread_group);
67 	remove_parent(p);
68 }
69 
70 /*
71  * This function expects the tasklist_lock write-locked.
72  */
73 static void __exit_signal(struct task_struct *tsk)
74 {
75 	struct signal_struct *sig = tsk->signal;
76 	struct sighand_struct *sighand;
77 
78 	BUG_ON(!sig);
79 	BUG_ON(!atomic_read(&sig->count));
80 
81 	rcu_read_lock();
82 	sighand = rcu_dereference(tsk->sighand);
83 	spin_lock(&sighand->siglock);
84 
85 	posix_cpu_timers_exit(tsk);
86 	if (atomic_dec_and_test(&sig->count))
87 		posix_cpu_timers_exit_group(tsk);
88 	else {
89 		/*
90 		 * If there is any task waiting for the group exit
91 		 * then notify it:
92 		 */
93 		if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
94 			wake_up_process(sig->group_exit_task);
95 
96 		if (tsk == sig->curr_target)
97 			sig->curr_target = next_thread(tsk);
98 		/*
99 		 * Accumulate here the counters for all threads but the
100 		 * group leader as they die, so they can be added into
101 		 * the process-wide totals when those are taken.
102 		 * The group leader stays around as a zombie as long
103 		 * as there are other threads.  When it gets reaped,
104 		 * the exit.c code will add its counts into these totals.
105 		 * We won't ever get here for the group leader, since it
106 		 * will have been the last reference on the signal_struct.
107 		 */
108 		sig->utime = cputime_add(sig->utime, tsk->utime);
109 		sig->stime = cputime_add(sig->stime, tsk->stime);
110 		sig->gtime = cputime_add(sig->gtime, tsk->gtime);
111 		sig->min_flt += tsk->min_flt;
112 		sig->maj_flt += tsk->maj_flt;
113 		sig->nvcsw += tsk->nvcsw;
114 		sig->nivcsw += tsk->nivcsw;
115 		sig->inblock += task_io_get_inblock(tsk);
116 		sig->oublock += task_io_get_oublock(tsk);
117 		sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
118 		sig = NULL; /* Marker for below. */
119 	}
120 
121 	__unhash_process(tsk);
122 
123 	tsk->signal = NULL;
124 	tsk->sighand = NULL;
125 	spin_unlock(&sighand->siglock);
126 	rcu_read_unlock();
127 
128 	__cleanup_sighand(sighand);
129 	clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
130 	flush_sigqueue(&tsk->pending);
131 	if (sig) {
132 		flush_sigqueue(&sig->shared_pending);
133 		taskstats_tgid_free(sig);
134 		__cleanup_signal(sig);
135 	}
136 }
137 
138 static void delayed_put_task_struct(struct rcu_head *rhp)
139 {
140 	put_task_struct(container_of(rhp, struct task_struct, rcu));
141 }
142 
143 void release_task(struct task_struct * p)
144 {
145 	struct task_struct *leader;
146 	int zap_leader;
147 repeat:
148 	atomic_dec(&p->user->processes);
149 	proc_flush_task(p);
150 	write_lock_irq(&tasklist_lock);
151 	ptrace_unlink(p);
152 	BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
153 	__exit_signal(p);
154 
155 	/*
156 	 * If we are the last non-leader member of the thread
157 	 * group, and the leader is zombie, then notify the
158 	 * group leader's parent process. (if it wants notification.)
159 	 */
160 	zap_leader = 0;
161 	leader = p->group_leader;
162 	if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
163 		BUG_ON(leader->exit_signal == -1);
164 		do_notify_parent(leader, leader->exit_signal);
165 		/*
166 		 * If we were the last child thread and the leader has
167 		 * exited already, and the leader's parent ignores SIGCHLD,
168 		 * then we are the one who should release the leader.
169 		 *
170 		 * do_notify_parent() will have marked it self-reaping in
171 		 * that case.
172 		 */
173 		zap_leader = (leader->exit_signal == -1);
174 	}
175 
176 	write_unlock_irq(&tasklist_lock);
177 	release_thread(p);
178 	call_rcu(&p->rcu, delayed_put_task_struct);
179 
180 	p = leader;
181 	if (unlikely(zap_leader))
182 		goto repeat;
183 }
184 
185 /*
186  * This checks not only the pgrp, but falls back on the pid if no
187  * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
188  * without this...
189  *
190  * The caller must hold rcu lock or the tasklist lock.
191  */
192 struct pid *session_of_pgrp(struct pid *pgrp)
193 {
194 	struct task_struct *p;
195 	struct pid *sid = NULL;
196 
197 	p = pid_task(pgrp, PIDTYPE_PGID);
198 	if (p == NULL)
199 		p = pid_task(pgrp, PIDTYPE_PID);
200 	if (p != NULL)
201 		sid = task_session(p);
202 
203 	return sid;
204 }
205 
206 /*
207  * Determine if a process group is "orphaned", according to the POSIX
208  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
209  * by terminal-generated stop signals.  Newly orphaned process groups are
210  * to receive a SIGHUP and a SIGCONT.
211  *
212  * "I ask you, have you ever known what it is to be an orphan?"
213  */
214 static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
215 {
216 	struct task_struct *p;
217 	int ret = 1;
218 
219 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
220 		if (p == ignored_task
221 				|| p->exit_state
222 				|| is_global_init(p->real_parent))
223 			continue;
224 		if (task_pgrp(p->real_parent) != pgrp &&
225 		    task_session(p->real_parent) == task_session(p)) {
226 			ret = 0;
227 			break;
228 		}
229 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
230 	return ret;	/* (sighing) "Often!" */
231 }
232 
233 int is_current_pgrp_orphaned(void)
234 {
235 	int retval;
236 
237 	read_lock(&tasklist_lock);
238 	retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
239 	read_unlock(&tasklist_lock);
240 
241 	return retval;
242 }
243 
244 static int has_stopped_jobs(struct pid *pgrp)
245 {
246 	int retval = 0;
247 	struct task_struct *p;
248 
249 	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
250 		if (!task_is_stopped(p))
251 			continue;
252 		retval = 1;
253 		break;
254 	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
255 	return retval;
256 }
257 
258 /**
259  * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
260  *
261  * If a kernel thread is launched as a result of a system call, or if
262  * it ever exits, it should generally reparent itself to kthreadd so it
263  * isn't in the way of other processes and is correctly cleaned up on exit.
264  *
265  * The various task state such as scheduling policy and priority may have
266  * been inherited from a user process, so we reset them to sane values here.
267  *
268  * NOTE that reparent_to_kthreadd() gives the caller full capabilities.
269  */
270 static void reparent_to_kthreadd(void)
271 {
272 	write_lock_irq(&tasklist_lock);
273 
274 	ptrace_unlink(current);
275 	/* Reparent to init */
276 	remove_parent(current);
277 	current->real_parent = current->parent = kthreadd_task;
278 	add_parent(current);
279 
280 	/* Set the exit signal to SIGCHLD so we signal init on exit */
281 	current->exit_signal = SIGCHLD;
282 
283 	if (task_nice(current) < 0)
284 		set_user_nice(current, 0);
285 	/* cpus_allowed? */
286 	/* rt_priority? */
287 	/* signals? */
288 	security_task_reparent_to_init(current);
289 	memcpy(current->signal->rlim, init_task.signal->rlim,
290 	       sizeof(current->signal->rlim));
291 	atomic_inc(&(INIT_USER->__count));
292 	write_unlock_irq(&tasklist_lock);
293 	switch_uid(INIT_USER);
294 }
295 
296 void __set_special_pids(struct pid *pid)
297 {
298 	struct task_struct *curr = current->group_leader;
299 	pid_t nr = pid_nr(pid);
300 
301 	if (task_session(curr) != pid) {
302 		detach_pid(curr, PIDTYPE_SID);
303 		attach_pid(curr, PIDTYPE_SID, pid);
304 		set_task_session(curr, nr);
305 	}
306 	if (task_pgrp(curr) != pid) {
307 		detach_pid(curr, PIDTYPE_PGID);
308 		attach_pid(curr, PIDTYPE_PGID, pid);
309 		set_task_pgrp(curr, nr);
310 	}
311 }
312 
313 static void set_special_pids(struct pid *pid)
314 {
315 	write_lock_irq(&tasklist_lock);
316 	__set_special_pids(pid);
317 	write_unlock_irq(&tasklist_lock);
318 }
319 
320 /*
321  * Let kernel threads use this to say that they
322  * allow a certain signal (since daemonize() will
323  * have disabled all of them by default).
324  */
325 int allow_signal(int sig)
326 {
327 	if (!valid_signal(sig) || sig < 1)
328 		return -EINVAL;
329 
330 	spin_lock_irq(&current->sighand->siglock);
331 	sigdelset(&current->blocked, sig);
332 	if (!current->mm) {
333 		/* Kernel threads handle their own signals.
334 		   Let the signal code know it'll be handled, so
335 		   that they don't get converted to SIGKILL or
336 		   just silently dropped */
337 		current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
338 	}
339 	recalc_sigpending();
340 	spin_unlock_irq(&current->sighand->siglock);
341 	return 0;
342 }
343 
344 EXPORT_SYMBOL(allow_signal);
345 
346 int disallow_signal(int sig)
347 {
348 	if (!valid_signal(sig) || sig < 1)
349 		return -EINVAL;
350 
351 	spin_lock_irq(&current->sighand->siglock);
352 	current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
353 	recalc_sigpending();
354 	spin_unlock_irq(&current->sighand->siglock);
355 	return 0;
356 }
357 
358 EXPORT_SYMBOL(disallow_signal);
359 
360 /*
361  *	Put all the gunge required to become a kernel thread without
362  *	attached user resources in one place where it belongs.
363  */
364 
365 void daemonize(const char *name, ...)
366 {
367 	va_list args;
368 	struct fs_struct *fs;
369 	sigset_t blocked;
370 
371 	va_start(args, name);
372 	vsnprintf(current->comm, sizeof(current->comm), name, args);
373 	va_end(args);
374 
375 	/*
376 	 * If we were started as result of loading a module, close all of the
377 	 * user space pages.  We don't need them, and if we didn't close them
378 	 * they would be locked into memory.
379 	 */
380 	exit_mm(current);
381 	/*
382 	 * We don't want to have TIF_FREEZE set if the system-wide hibernation
383 	 * or suspend transition begins right now.
384 	 */
385 	current->flags |= PF_NOFREEZE;
386 
387 	if (current->nsproxy != &init_nsproxy) {
388 		get_nsproxy(&init_nsproxy);
389 		switch_task_namespaces(current, &init_nsproxy);
390 	}
391 	set_special_pids(&init_struct_pid);
392 	proc_clear_tty(current);
393 
394 	/* Block and flush all signals */
395 	sigfillset(&blocked);
396 	sigprocmask(SIG_BLOCK, &blocked, NULL);
397 	flush_signals(current);
398 
399 	/* Become as one with the init task */
400 
401 	exit_fs(current);	/* current->fs->count--; */
402 	fs = init_task.fs;
403 	current->fs = fs;
404 	atomic_inc(&fs->count);
405 
406 	exit_files(current);
407 	current->files = init_task.files;
408 	atomic_inc(&current->files->count);
409 
410 	reparent_to_kthreadd();
411 }
412 
413 EXPORT_SYMBOL(daemonize);
414 
415 static void close_files(struct files_struct * files)
416 {
417 	int i, j;
418 	struct fdtable *fdt;
419 
420 	j = 0;
421 
422 	/*
423 	 * It is safe to dereference the fd table without RCU or
424 	 * ->file_lock because this is the last reference to the
425 	 * files structure.
426 	 */
427 	fdt = files_fdtable(files);
428 	for (;;) {
429 		unsigned long set;
430 		i = j * __NFDBITS;
431 		if (i >= fdt->max_fds)
432 			break;
433 		set = fdt->open_fds->fds_bits[j++];
434 		while (set) {
435 			if (set & 1) {
436 				struct file * file = xchg(&fdt->fd[i], NULL);
437 				if (file) {
438 					filp_close(file, files);
439 					cond_resched();
440 				}
441 			}
442 			i++;
443 			set >>= 1;
444 		}
445 	}
446 }
447 
448 struct files_struct *get_files_struct(struct task_struct *task)
449 {
450 	struct files_struct *files;
451 
452 	task_lock(task);
453 	files = task->files;
454 	if (files)
455 		atomic_inc(&files->count);
456 	task_unlock(task);
457 
458 	return files;
459 }
460 
461 void put_files_struct(struct files_struct *files)
462 {
463 	struct fdtable *fdt;
464 
465 	if (atomic_dec_and_test(&files->count)) {
466 		close_files(files);
467 		/*
468 		 * Free the fd and fdset arrays if we expanded them.
469 		 * If the fdtable was embedded, pass files for freeing
470 		 * at the end of the RCU grace period. Otherwise,
471 		 * you can free files immediately.
472 		 */
473 		fdt = files_fdtable(files);
474 		if (fdt != &files->fdtab)
475 			kmem_cache_free(files_cachep, files);
476 		free_fdtable(fdt);
477 	}
478 }
479 
480 EXPORT_SYMBOL(put_files_struct);
481 
482 void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
483 {
484 	struct files_struct *old;
485 
486 	old = tsk->files;
487 	task_lock(tsk);
488 	tsk->files = files;
489 	task_unlock(tsk);
490 	put_files_struct(old);
491 }
492 EXPORT_SYMBOL(reset_files_struct);
493 
494 static void __exit_files(struct task_struct *tsk)
495 {
496 	struct files_struct * files = tsk->files;
497 
498 	if (files) {
499 		task_lock(tsk);
500 		tsk->files = NULL;
501 		task_unlock(tsk);
502 		put_files_struct(files);
503 	}
504 }
505 
506 void exit_files(struct task_struct *tsk)
507 {
508 	__exit_files(tsk);
509 }
510 
511 static void __put_fs_struct(struct fs_struct *fs)
512 {
513 	/* No need to hold fs->lock if we are killing it */
514 	if (atomic_dec_and_test(&fs->count)) {
515 		dput(fs->root);
516 		mntput(fs->rootmnt);
517 		dput(fs->pwd);
518 		mntput(fs->pwdmnt);
519 		if (fs->altroot) {
520 			dput(fs->altroot);
521 			mntput(fs->altrootmnt);
522 		}
523 		kmem_cache_free(fs_cachep, fs);
524 	}
525 }
526 
527 void put_fs_struct(struct fs_struct *fs)
528 {
529 	__put_fs_struct(fs);
530 }
531 
532 static void __exit_fs(struct task_struct *tsk)
533 {
534 	struct fs_struct * fs = tsk->fs;
535 
536 	if (fs) {
537 		task_lock(tsk);
538 		tsk->fs = NULL;
539 		task_unlock(tsk);
540 		__put_fs_struct(fs);
541 	}
542 }
543 
544 void exit_fs(struct task_struct *tsk)
545 {
546 	__exit_fs(tsk);
547 }
548 
549 EXPORT_SYMBOL_GPL(exit_fs);
550 
551 /*
552  * Turn us into a lazy TLB process if we
553  * aren't already..
554  */
555 static void exit_mm(struct task_struct * tsk)
556 {
557 	struct mm_struct *mm = tsk->mm;
558 
559 	mm_release(tsk, mm);
560 	if (!mm)
561 		return;
562 	/*
563 	 * Serialize with any possible pending coredump.
564 	 * We must hold mmap_sem around checking core_waiters
565 	 * and clearing tsk->mm.  The core-inducing thread
566 	 * will increment core_waiters for each thread in the
567 	 * group with ->mm != NULL.
568 	 */
569 	down_read(&mm->mmap_sem);
570 	if (mm->core_waiters) {
571 		up_read(&mm->mmap_sem);
572 		down_write(&mm->mmap_sem);
573 		if (!--mm->core_waiters)
574 			complete(mm->core_startup_done);
575 		up_write(&mm->mmap_sem);
576 
577 		wait_for_completion(&mm->core_done);
578 		down_read(&mm->mmap_sem);
579 	}
580 	atomic_inc(&mm->mm_count);
581 	BUG_ON(mm != tsk->active_mm);
582 	/* more a memory barrier than a real lock */
583 	task_lock(tsk);
584 	tsk->mm = NULL;
585 	up_read(&mm->mmap_sem);
586 	enter_lazy_tlb(mm, current);
587 	/* We don't want this task to be frozen prematurely */
588 	clear_freeze_flag(tsk);
589 	task_unlock(tsk);
590 	mmput(mm);
591 }
592 
593 static void
594 reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
595 {
596 	if (p->pdeath_signal)
597 		/* We already hold the tasklist_lock here.  */
598 		group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
599 
600 	/* Move the child from its dying parent to the new one.  */
601 	if (unlikely(traced)) {
602 		/* Preserve ptrace links if someone else is tracing this child.  */
603 		list_del_init(&p->ptrace_list);
604 		if (p->parent != p->real_parent)
605 			list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
606 	} else {
607 		/* If this child is being traced, then we're the one tracing it
608 		 * anyway, so let go of it.
609 		 */
610 		p->ptrace = 0;
611 		remove_parent(p);
612 		p->parent = p->real_parent;
613 		add_parent(p);
614 
615 		if (task_is_traced(p)) {
616 			/*
617 			 * If it was at a trace stop, turn it into
618 			 * a normal stop since it's no longer being
619 			 * traced.
620 			 */
621 			ptrace_untrace(p);
622 		}
623 	}
624 
625 	/* If this is a threaded reparent there is no need to
626 	 * notify anyone anything has happened.
627 	 */
628 	if (p->real_parent->group_leader == father->group_leader)
629 		return;
630 
631 	/* We don't want people slaying init.  */
632 	if (p->exit_signal != -1)
633 		p->exit_signal = SIGCHLD;
634 
635 	/* If we'd notified the old parent about this child's death,
636 	 * also notify the new parent.
637 	 */
638 	if (!traced && p->exit_state == EXIT_ZOMBIE &&
639 	    p->exit_signal != -1 && thread_group_empty(p))
640 		do_notify_parent(p, p->exit_signal);
641 
642 	/*
643 	 * process group orphan check
644 	 * Case ii: Our child is in a different pgrp
645 	 * than we are, and it was the only connection
646 	 * outside, so the child pgrp is now orphaned.
647 	 */
648 	if ((task_pgrp(p) != task_pgrp(father)) &&
649 	    (task_session(p) == task_session(father))) {
650 		struct pid *pgrp = task_pgrp(p);
651 
652 		if (will_become_orphaned_pgrp(pgrp, NULL) &&
653 		    has_stopped_jobs(pgrp)) {
654 			__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
655 			__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
656 		}
657 	}
658 }
659 
660 /*
661  * When we die, we re-parent all our children.
662  * Try to give them to another thread in our thread
663  * group, and if no such member exists, give it to
664  * the child reaper process (ie "init") in our pid
665  * space.
666  */
667 static void forget_original_parent(struct task_struct *father)
668 {
669 	struct task_struct *p, *n, *reaper = father;
670 	struct list_head ptrace_dead;
671 
672 	INIT_LIST_HEAD(&ptrace_dead);
673 
674 	write_lock_irq(&tasklist_lock);
675 
676 	do {
677 		reaper = next_thread(reaper);
678 		if (reaper == father) {
679 			reaper = task_child_reaper(father);
680 			break;
681 		}
682 	} while (reaper->flags & PF_EXITING);
683 
684 	/*
685 	 * There are only two places where our children can be:
686 	 *
687 	 * - in our child list
688 	 * - in our ptraced child list
689 	 *
690 	 * Search them and reparent children.
691 	 */
692 	list_for_each_entry_safe(p, n, &father->children, sibling) {
693 		int ptrace;
694 
695 		ptrace = p->ptrace;
696 
697 		/* if father isn't the real parent, then ptrace must be enabled */
698 		BUG_ON(father != p->real_parent && !ptrace);
699 
700 		if (father == p->real_parent) {
701 			/* reparent with a reaper, real father it's us */
702 			p->real_parent = reaper;
703 			reparent_thread(p, father, 0);
704 		} else {
705 			/* reparent ptraced task to its real parent */
706 			__ptrace_unlink (p);
707 			if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
708 			    thread_group_empty(p))
709 				do_notify_parent(p, p->exit_signal);
710 		}
711 
712 		/*
713 		 * if the ptraced child is a zombie with exit_signal == -1
714 		 * we must collect it before we exit, or it will remain
715 		 * zombie forever since we prevented it from self-reap itself
716 		 * while it was being traced by us, to be able to see it in wait4.
717 		 */
718 		if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
719 			list_add(&p->ptrace_list, &ptrace_dead);
720 	}
721 
722 	list_for_each_entry_safe(p, n, &father->ptrace_children, ptrace_list) {
723 		p->real_parent = reaper;
724 		reparent_thread(p, father, 1);
725 	}
726 
727 	write_unlock_irq(&tasklist_lock);
728 	BUG_ON(!list_empty(&father->children));
729 	BUG_ON(!list_empty(&father->ptrace_children));
730 
731 	list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_list) {
732 		list_del_init(&p->ptrace_list);
733 		release_task(p);
734 	}
735 
736 }
737 
738 /*
739  * Send signals to all our closest relatives so that they know
740  * to properly mourn us..
741  */
742 static void exit_notify(struct task_struct *tsk)
743 {
744 	int state;
745 	struct task_struct *t;
746 	struct pid *pgrp;
747 
748 	/*
749 	 * This does two things:
750 	 *
751   	 * A.  Make init inherit all the child processes
752 	 * B.  Check to see if any process groups have become orphaned
753 	 *	as a result of our exiting, and if they have any stopped
754 	 *	jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
755 	 */
756 	forget_original_parent(tsk);
757 	exit_task_namespaces(tsk);
758 
759 	write_lock_irq(&tasklist_lock);
760 	/*
761 	 * Check to see if any process groups have become orphaned
762 	 * as a result of our exiting, and if they have any stopped
763 	 * jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
764 	 *
765 	 * Case i: Our father is in a different pgrp than we are
766 	 * and we were the only connection outside, so our pgrp
767 	 * is about to become orphaned.
768 	 */
769 	t = tsk->real_parent;
770 
771 	pgrp = task_pgrp(tsk);
772 	if ((task_pgrp(t) != pgrp) &&
773 	    (task_session(t) == task_session(tsk)) &&
774 	    will_become_orphaned_pgrp(pgrp, tsk) &&
775 	    has_stopped_jobs(pgrp)) {
776 		__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
777 		__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
778 	}
779 
780 	/* Let father know we died
781 	 *
782 	 * Thread signals are configurable, but you aren't going to use
783 	 * that to send signals to arbitary processes.
784 	 * That stops right now.
785 	 *
786 	 * If the parent exec id doesn't match the exec id we saved
787 	 * when we started then we know the parent has changed security
788 	 * domain.
789 	 *
790 	 * If our self_exec id doesn't match our parent_exec_id then
791 	 * we have changed execution domain as these two values started
792 	 * the same after a fork.
793 	 */
794 	if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
795 	    ( tsk->parent_exec_id != t->self_exec_id  ||
796 	      tsk->self_exec_id != tsk->parent_exec_id)
797 	    && !capable(CAP_KILL))
798 		tsk->exit_signal = SIGCHLD;
799 
800 
801 	/* If something other than our normal parent is ptracing us, then
802 	 * send it a SIGCHLD instead of honoring exit_signal.  exit_signal
803 	 * only has special meaning to our real parent.
804 	 */
805 	if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
806 		int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
807 		do_notify_parent(tsk, signal);
808 	} else if (tsk->ptrace) {
809 		do_notify_parent(tsk, SIGCHLD);
810 	}
811 
812 	state = EXIT_ZOMBIE;
813 	if (tsk->exit_signal == -1 && likely(!tsk->ptrace))
814 		state = EXIT_DEAD;
815 	tsk->exit_state = state;
816 
817 	if (thread_group_leader(tsk) &&
818 	    tsk->signal->notify_count < 0 &&
819 	    tsk->signal->group_exit_task)
820 		wake_up_process(tsk->signal->group_exit_task);
821 
822 	write_unlock_irq(&tasklist_lock);
823 
824 	/* If the process is dead, release it - nobody will wait for it */
825 	if (state == EXIT_DEAD)
826 		release_task(tsk);
827 }
828 
829 #ifdef CONFIG_DEBUG_STACK_USAGE
830 static void check_stack_usage(void)
831 {
832 	static DEFINE_SPINLOCK(low_water_lock);
833 	static int lowest_to_date = THREAD_SIZE;
834 	unsigned long *n = end_of_stack(current);
835 	unsigned long free;
836 
837 	while (*n == 0)
838 		n++;
839 	free = (unsigned long)n - (unsigned long)end_of_stack(current);
840 
841 	if (free >= lowest_to_date)
842 		return;
843 
844 	spin_lock(&low_water_lock);
845 	if (free < lowest_to_date) {
846 		printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
847 				"left\n",
848 				current->comm, free);
849 		lowest_to_date = free;
850 	}
851 	spin_unlock(&low_water_lock);
852 }
853 #else
854 static inline void check_stack_usage(void) {}
855 #endif
856 
857 static inline void exit_child_reaper(struct task_struct *tsk)
858 {
859 	if (likely(tsk->group_leader != task_child_reaper(tsk)))
860 		return;
861 
862 	if (tsk->nsproxy->pid_ns == &init_pid_ns)
863 		panic("Attempted to kill init!");
864 
865 	/*
866 	 * @tsk is the last thread in the 'cgroup-init' and is exiting.
867 	 * Terminate all remaining processes in the namespace and reap them
868 	 * before exiting @tsk.
869 	 *
870 	 * Note that @tsk (last thread of cgroup-init) may not necessarily
871 	 * be the child-reaper (i.e main thread of cgroup-init) of the
872 	 * namespace i.e the child_reaper may have already exited.
873 	 *
874 	 * Even after a child_reaper exits, we let it inherit orphaned children,
875 	 * because, pid_ns->child_reaper remains valid as long as there is
876 	 * at least one living sub-thread in the cgroup init.
877 
878 	 * This living sub-thread of the cgroup-init will be notified when
879 	 * a child inherited by the 'child-reaper' exits (do_notify_parent()
880 	 * uses __group_send_sig_info()). Further, when reaping child processes,
881 	 * do_wait() iterates over children of all living sub threads.
882 
883 	 * i.e even though 'child_reaper' thread is listed as the parent of the
884 	 * orphaned children, any living sub-thread in the cgroup-init can
885 	 * perform the role of the child_reaper.
886 	 */
887 	zap_pid_ns_processes(tsk->nsproxy->pid_ns);
888 }
889 
890 NORET_TYPE void do_exit(long code)
891 {
892 	struct task_struct *tsk = current;
893 	int group_dead;
894 
895 	profile_task_exit(tsk);
896 
897 	WARN_ON(atomic_read(&tsk->fs_excl));
898 
899 	if (unlikely(in_interrupt()))
900 		panic("Aiee, killing interrupt handler!");
901 	if (unlikely(!tsk->pid))
902 		panic("Attempted to kill the idle task!");
903 
904 	if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
905 		current->ptrace_message = code;
906 		ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
907 	}
908 
909 	/*
910 	 * We're taking recursive faults here in do_exit. Safest is to just
911 	 * leave this task alone and wait for reboot.
912 	 */
913 	if (unlikely(tsk->flags & PF_EXITING)) {
914 		printk(KERN_ALERT
915 			"Fixing recursive fault but reboot is needed!\n");
916 		/*
917 		 * We can do this unlocked here. The futex code uses
918 		 * this flag just to verify whether the pi state
919 		 * cleanup has been done or not. In the worst case it
920 		 * loops once more. We pretend that the cleanup was
921 		 * done as there is no way to return. Either the
922 		 * OWNER_DIED bit is set by now or we push the blocked
923 		 * task into the wait for ever nirwana as well.
924 		 */
925 		tsk->flags |= PF_EXITPIDONE;
926 		if (tsk->io_context)
927 			exit_io_context();
928 		set_current_state(TASK_UNINTERRUPTIBLE);
929 		schedule();
930 	}
931 
932 	exit_signals(tsk);  /* sets PF_EXITING */
933 	/*
934 	 * tsk->flags are checked in the futex code to protect against
935 	 * an exiting task cleaning up the robust pi futexes.
936 	 */
937 	smp_mb();
938 	spin_unlock_wait(&tsk->pi_lock);
939 
940 	if (unlikely(in_atomic()))
941 		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
942 				current->comm, task_pid_nr(current),
943 				preempt_count());
944 
945 	acct_update_integrals(tsk);
946 	if (tsk->mm) {
947 		update_hiwater_rss(tsk->mm);
948 		update_hiwater_vm(tsk->mm);
949 	}
950 	group_dead = atomic_dec_and_test(&tsk->signal->live);
951 	if (group_dead) {
952 		exit_child_reaper(tsk);
953 		hrtimer_cancel(&tsk->signal->real_timer);
954 		exit_itimers(tsk->signal);
955 	}
956 	acct_collect(code, group_dead);
957 #ifdef CONFIG_FUTEX
958 	if (unlikely(tsk->robust_list))
959 		exit_robust_list(tsk);
960 #ifdef CONFIG_COMPAT
961 	if (unlikely(tsk->compat_robust_list))
962 		compat_exit_robust_list(tsk);
963 #endif
964 #endif
965 	if (group_dead)
966 		tty_audit_exit();
967 	if (unlikely(tsk->audit_context))
968 		audit_free(tsk);
969 
970 	tsk->exit_code = code;
971 	taskstats_exit(tsk, group_dead);
972 
973 	exit_mm(tsk);
974 
975 	if (group_dead)
976 		acct_process();
977 	exit_sem(tsk);
978 	__exit_files(tsk);
979 	__exit_fs(tsk);
980 	check_stack_usage();
981 	exit_thread();
982 	cgroup_exit(tsk, 1);
983 	exit_keys(tsk);
984 
985 	if (group_dead && tsk->signal->leader)
986 		disassociate_ctty(1);
987 
988 	module_put(task_thread_info(tsk)->exec_domain->module);
989 	if (tsk->binfmt)
990 		module_put(tsk->binfmt->module);
991 
992 	proc_exit_connector(tsk);
993 	exit_notify(tsk);
994 #ifdef CONFIG_NUMA
995 	mpol_free(tsk->mempolicy);
996 	tsk->mempolicy = NULL;
997 #endif
998 #ifdef CONFIG_FUTEX
999 	/*
1000 	 * This must happen late, after the PID is not
1001 	 * hashed anymore:
1002 	 */
1003 	if (unlikely(!list_empty(&tsk->pi_state_list)))
1004 		exit_pi_state_list(tsk);
1005 	if (unlikely(current->pi_state_cache))
1006 		kfree(current->pi_state_cache);
1007 #endif
1008 	/*
1009 	 * Make sure we are holding no locks:
1010 	 */
1011 	debug_check_no_locks_held(tsk);
1012 	/*
1013 	 * We can do this unlocked here. The futex code uses this flag
1014 	 * just to verify whether the pi state cleanup has been done
1015 	 * or not. In the worst case it loops once more.
1016 	 */
1017 	tsk->flags |= PF_EXITPIDONE;
1018 
1019 	if (tsk->io_context)
1020 		exit_io_context();
1021 
1022 	if (tsk->splice_pipe)
1023 		__free_pipe_info(tsk->splice_pipe);
1024 
1025 	preempt_disable();
1026 	/* causes final put_task_struct in finish_task_switch(). */
1027 	tsk->state = TASK_DEAD;
1028 
1029 	schedule();
1030 	BUG();
1031 	/* Avoid "noreturn function does return".  */
1032 	for (;;)
1033 		cpu_relax();	/* For when BUG is null */
1034 }
1035 
1036 EXPORT_SYMBOL_GPL(do_exit);
1037 
1038 NORET_TYPE void complete_and_exit(struct completion *comp, long code)
1039 {
1040 	if (comp)
1041 		complete(comp);
1042 
1043 	do_exit(code);
1044 }
1045 
1046 EXPORT_SYMBOL(complete_and_exit);
1047 
1048 asmlinkage long sys_exit(int error_code)
1049 {
1050 	do_exit((error_code&0xff)<<8);
1051 }
1052 
1053 /*
1054  * Take down every thread in the group.  This is called by fatal signals
1055  * as well as by sys_exit_group (below).
1056  */
1057 NORET_TYPE void
1058 do_group_exit(int exit_code)
1059 {
1060 	BUG_ON(exit_code & 0x80); /* core dumps don't get here */
1061 
1062 	if (current->signal->flags & SIGNAL_GROUP_EXIT)
1063 		exit_code = current->signal->group_exit_code;
1064 	else if (!thread_group_empty(current)) {
1065 		struct signal_struct *const sig = current->signal;
1066 		struct sighand_struct *const sighand = current->sighand;
1067 		spin_lock_irq(&sighand->siglock);
1068 		if (signal_group_exit(sig))
1069 			/* Another thread got here before we took the lock.  */
1070 			exit_code = sig->group_exit_code;
1071 		else {
1072 			sig->group_exit_code = exit_code;
1073 			sig->flags = SIGNAL_GROUP_EXIT;
1074 			zap_other_threads(current);
1075 		}
1076 		spin_unlock_irq(&sighand->siglock);
1077 	}
1078 
1079 	do_exit(exit_code);
1080 	/* NOTREACHED */
1081 }
1082 
1083 /*
1084  * this kills every thread in the thread group. Note that any externally
1085  * wait4()-ing process will get the correct exit code - even if this
1086  * thread is not the thread group leader.
1087  */
1088 asmlinkage void sys_exit_group(int error_code)
1089 {
1090 	do_group_exit((error_code & 0xff) << 8);
1091 }
1092 
1093 static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
1094 {
1095 	struct pid *pid = NULL;
1096 	if (type == PIDTYPE_PID)
1097 		pid = task->pids[type].pid;
1098 	else if (type < PIDTYPE_MAX)
1099 		pid = task->group_leader->pids[type].pid;
1100 	return pid;
1101 }
1102 
1103 static int eligible_child(enum pid_type type, struct pid *pid, int options,
1104 			  struct task_struct *p)
1105 {
1106 	int err;
1107 
1108 	if (type < PIDTYPE_MAX) {
1109 		if (task_pid_type(p, type) != pid)
1110 			return 0;
1111 	}
1112 
1113 	/*
1114 	 * Do not consider detached threads that are
1115 	 * not ptraced:
1116 	 */
1117 	if (p->exit_signal == -1 && !p->ptrace)
1118 		return 0;
1119 
1120 	/* Wait for all children (clone and not) if __WALL is set;
1121 	 * otherwise, wait for clone children *only* if __WCLONE is
1122 	 * set; otherwise, wait for non-clone children *only*.  (Note:
1123 	 * A "clone" child here is one that reports to its parent
1124 	 * using a signal other than SIGCHLD.) */
1125 	if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
1126 	    && !(options & __WALL))
1127 		return 0;
1128 
1129 	err = security_task_wait(p);
1130 	if (likely(!err))
1131 		return 1;
1132 
1133 	if (type != PIDTYPE_PID)
1134 		return 0;
1135 	/* This child was explicitly requested, abort */
1136 	read_unlock(&tasklist_lock);
1137 	return err;
1138 }
1139 
1140 static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
1141 			       int why, int status,
1142 			       struct siginfo __user *infop,
1143 			       struct rusage __user *rusagep)
1144 {
1145 	int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
1146 
1147 	put_task_struct(p);
1148 	if (!retval)
1149 		retval = put_user(SIGCHLD, &infop->si_signo);
1150 	if (!retval)
1151 		retval = put_user(0, &infop->si_errno);
1152 	if (!retval)
1153 		retval = put_user((short)why, &infop->si_code);
1154 	if (!retval)
1155 		retval = put_user(pid, &infop->si_pid);
1156 	if (!retval)
1157 		retval = put_user(uid, &infop->si_uid);
1158 	if (!retval)
1159 		retval = put_user(status, &infop->si_status);
1160 	if (!retval)
1161 		retval = pid;
1162 	return retval;
1163 }
1164 
1165 /*
1166  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
1167  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1168  * the lock and this task is uninteresting.  If we return nonzero, we have
1169  * released the lock and the system call should return.
1170  */
1171 static int wait_task_zombie(struct task_struct *p, int noreap,
1172 			    struct siginfo __user *infop,
1173 			    int __user *stat_addr, struct rusage __user *ru)
1174 {
1175 	unsigned long state;
1176 	int retval, status, traced;
1177 	pid_t pid = task_pid_vnr(p);
1178 
1179 	if (unlikely(noreap)) {
1180 		uid_t uid = p->uid;
1181 		int exit_code = p->exit_code;
1182 		int why, status;
1183 
1184 		get_task_struct(p);
1185 		read_unlock(&tasklist_lock);
1186 		if ((exit_code & 0x7f) == 0) {
1187 			why = CLD_EXITED;
1188 			status = exit_code >> 8;
1189 		} else {
1190 			why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1191 			status = exit_code & 0x7f;
1192 		}
1193 		return wait_noreap_copyout(p, pid, uid, why,
1194 					   status, infop, ru);
1195 	}
1196 
1197 	/*
1198 	 * Try to move the task's state to DEAD
1199 	 * only one thread is allowed to do this:
1200 	 */
1201 	state = xchg(&p->exit_state, EXIT_DEAD);
1202 	if (state != EXIT_ZOMBIE) {
1203 		BUG_ON(state != EXIT_DEAD);
1204 		return 0;
1205 	}
1206 
1207 	/* traced means p->ptrace, but not vice versa */
1208 	traced = (p->real_parent != p->parent);
1209 
1210 	if (likely(!traced)) {
1211 		struct signal_struct *psig;
1212 		struct signal_struct *sig;
1213 
1214 		/*
1215 		 * The resource counters for the group leader are in its
1216 		 * own task_struct.  Those for dead threads in the group
1217 		 * are in its signal_struct, as are those for the child
1218 		 * processes it has previously reaped.  All these
1219 		 * accumulate in the parent's signal_struct c* fields.
1220 		 *
1221 		 * We don't bother to take a lock here to protect these
1222 		 * p->signal fields, because they are only touched by
1223 		 * __exit_signal, which runs with tasklist_lock
1224 		 * write-locked anyway, and so is excluded here.  We do
1225 		 * need to protect the access to p->parent->signal fields,
1226 		 * as other threads in the parent group can be right
1227 		 * here reaping other children at the same time.
1228 		 */
1229 		spin_lock_irq(&p->parent->sighand->siglock);
1230 		psig = p->parent->signal;
1231 		sig = p->signal;
1232 		psig->cutime =
1233 			cputime_add(psig->cutime,
1234 			cputime_add(p->utime,
1235 			cputime_add(sig->utime,
1236 				    sig->cutime)));
1237 		psig->cstime =
1238 			cputime_add(psig->cstime,
1239 			cputime_add(p->stime,
1240 			cputime_add(sig->stime,
1241 				    sig->cstime)));
1242 		psig->cgtime =
1243 			cputime_add(psig->cgtime,
1244 			cputime_add(p->gtime,
1245 			cputime_add(sig->gtime,
1246 				    sig->cgtime)));
1247 		psig->cmin_flt +=
1248 			p->min_flt + sig->min_flt + sig->cmin_flt;
1249 		psig->cmaj_flt +=
1250 			p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1251 		psig->cnvcsw +=
1252 			p->nvcsw + sig->nvcsw + sig->cnvcsw;
1253 		psig->cnivcsw +=
1254 			p->nivcsw + sig->nivcsw + sig->cnivcsw;
1255 		psig->cinblock +=
1256 			task_io_get_inblock(p) +
1257 			sig->inblock + sig->cinblock;
1258 		psig->coublock +=
1259 			task_io_get_oublock(p) +
1260 			sig->oublock + sig->coublock;
1261 		spin_unlock_irq(&p->parent->sighand->siglock);
1262 	}
1263 
1264 	/*
1265 	 * Now we are sure this task is interesting, and no other
1266 	 * thread can reap it because we set its state to EXIT_DEAD.
1267 	 */
1268 	read_unlock(&tasklist_lock);
1269 
1270 	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1271 	status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1272 		? p->signal->group_exit_code : p->exit_code;
1273 	if (!retval && stat_addr)
1274 		retval = put_user(status, stat_addr);
1275 	if (!retval && infop)
1276 		retval = put_user(SIGCHLD, &infop->si_signo);
1277 	if (!retval && infop)
1278 		retval = put_user(0, &infop->si_errno);
1279 	if (!retval && infop) {
1280 		int why;
1281 
1282 		if ((status & 0x7f) == 0) {
1283 			why = CLD_EXITED;
1284 			status >>= 8;
1285 		} else {
1286 			why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1287 			status &= 0x7f;
1288 		}
1289 		retval = put_user((short)why, &infop->si_code);
1290 		if (!retval)
1291 			retval = put_user(status, &infop->si_status);
1292 	}
1293 	if (!retval && infop)
1294 		retval = put_user(pid, &infop->si_pid);
1295 	if (!retval && infop)
1296 		retval = put_user(p->uid, &infop->si_uid);
1297 	if (!retval)
1298 		retval = pid;
1299 
1300 	if (traced) {
1301 		write_lock_irq(&tasklist_lock);
1302 		/* We dropped tasklist, ptracer could die and untrace */
1303 		ptrace_unlink(p);
1304 		/*
1305 		 * If this is not a detached task, notify the parent.
1306 		 * If it's still not detached after that, don't release
1307 		 * it now.
1308 		 */
1309 		if (p->exit_signal != -1) {
1310 			do_notify_parent(p, p->exit_signal);
1311 			if (p->exit_signal != -1) {
1312 				p->exit_state = EXIT_ZOMBIE;
1313 				p = NULL;
1314 			}
1315 		}
1316 		write_unlock_irq(&tasklist_lock);
1317 	}
1318 	if (p != NULL)
1319 		release_task(p);
1320 
1321 	return retval;
1322 }
1323 
1324 /*
1325  * Handle sys_wait4 work for one task in state TASK_STOPPED.  We hold
1326  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1327  * the lock and this task is uninteresting.  If we return nonzero, we have
1328  * released the lock and the system call should return.
1329  */
1330 static int wait_task_stopped(struct task_struct *p,
1331 			     int noreap, struct siginfo __user *infop,
1332 			     int __user *stat_addr, struct rusage __user *ru)
1333 {
1334 	int retval, exit_code, why;
1335 	uid_t uid = 0; /* unneeded, required by compiler */
1336 	pid_t pid;
1337 
1338 	exit_code = 0;
1339 	spin_lock_irq(&p->sighand->siglock);
1340 
1341 	if (unlikely(!task_is_stopped_or_traced(p)))
1342 		goto unlock_sig;
1343 
1344 	if (!(p->ptrace & PT_PTRACED) && p->signal->group_stop_count > 0)
1345 		/*
1346 		 * A group stop is in progress and this is the group leader.
1347 		 * We won't report until all threads have stopped.
1348 		 */
1349 		goto unlock_sig;
1350 
1351 	exit_code = p->exit_code;
1352 	if (!exit_code)
1353 		goto unlock_sig;
1354 
1355 	if (!noreap)
1356 		p->exit_code = 0;
1357 
1358 	uid = p->uid;
1359 unlock_sig:
1360 	spin_unlock_irq(&p->sighand->siglock);
1361 	if (!exit_code)
1362 		return 0;
1363 
1364 	/*
1365 	 * Now we are pretty sure this task is interesting.
1366 	 * Make sure it doesn't get reaped out from under us while we
1367 	 * give up the lock and then examine it below.  We don't want to
1368 	 * keep holding onto the tasklist_lock while we call getrusage and
1369 	 * possibly take page faults for user memory.
1370 	 */
1371 	get_task_struct(p);
1372 	pid = task_pid_vnr(p);
1373 	why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1374 	read_unlock(&tasklist_lock);
1375 
1376 	if (unlikely(noreap))
1377 		return wait_noreap_copyout(p, pid, uid,
1378 					   why, exit_code,
1379 					   infop, ru);
1380 
1381 	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1382 	if (!retval && stat_addr)
1383 		retval = put_user((exit_code << 8) | 0x7f, stat_addr);
1384 	if (!retval && infop)
1385 		retval = put_user(SIGCHLD, &infop->si_signo);
1386 	if (!retval && infop)
1387 		retval = put_user(0, &infop->si_errno);
1388 	if (!retval && infop)
1389 		retval = put_user(why, &infop->si_code);
1390 	if (!retval && infop)
1391 		retval = put_user(exit_code, &infop->si_status);
1392 	if (!retval && infop)
1393 		retval = put_user(pid, &infop->si_pid);
1394 	if (!retval && infop)
1395 		retval = put_user(uid, &infop->si_uid);
1396 	if (!retval)
1397 		retval = pid;
1398 	put_task_struct(p);
1399 
1400 	BUG_ON(!retval);
1401 	return retval;
1402 }
1403 
1404 /*
1405  * Handle do_wait work for one task in a live, non-stopped state.
1406  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1407  * the lock and this task is uninteresting.  If we return nonzero, we have
1408  * released the lock and the system call should return.
1409  */
1410 static int wait_task_continued(struct task_struct *p, int noreap,
1411 			       struct siginfo __user *infop,
1412 			       int __user *stat_addr, struct rusage __user *ru)
1413 {
1414 	int retval;
1415 	pid_t pid;
1416 	uid_t uid;
1417 
1418 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1419 		return 0;
1420 
1421 	spin_lock_irq(&p->sighand->siglock);
1422 	/* Re-check with the lock held.  */
1423 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1424 		spin_unlock_irq(&p->sighand->siglock);
1425 		return 0;
1426 	}
1427 	if (!noreap)
1428 		p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1429 	spin_unlock_irq(&p->sighand->siglock);
1430 
1431 	pid = task_pid_vnr(p);
1432 	uid = p->uid;
1433 	get_task_struct(p);
1434 	read_unlock(&tasklist_lock);
1435 
1436 	if (!infop) {
1437 		retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1438 		put_task_struct(p);
1439 		if (!retval && stat_addr)
1440 			retval = put_user(0xffff, stat_addr);
1441 		if (!retval)
1442 			retval = pid;
1443 	} else {
1444 		retval = wait_noreap_copyout(p, pid, uid,
1445 					     CLD_CONTINUED, SIGCONT,
1446 					     infop, ru);
1447 		BUG_ON(retval == 0);
1448 	}
1449 
1450 	return retval;
1451 }
1452 
1453 static long do_wait(enum pid_type type, struct pid *pid, int options,
1454 		    struct siginfo __user *infop, int __user *stat_addr,
1455 		    struct rusage __user *ru)
1456 {
1457 	DECLARE_WAITQUEUE(wait, current);
1458 	struct task_struct *tsk;
1459 	int flag, retval;
1460 
1461 	add_wait_queue(&current->signal->wait_chldexit,&wait);
1462 repeat:
1463 	/* If there is nothing that can match our critier just get out */
1464 	retval = -ECHILD;
1465 	if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type])))
1466 		goto end;
1467 
1468 	/*
1469 	 * We will set this flag if we see any child that might later
1470 	 * match our criteria, even if we are not able to reap it yet.
1471 	 */
1472 	flag = retval = 0;
1473 	current->state = TASK_INTERRUPTIBLE;
1474 	read_lock(&tasklist_lock);
1475 	tsk = current;
1476 	do {
1477 		struct task_struct *p;
1478 
1479 		list_for_each_entry(p, &tsk->children, sibling) {
1480 			int ret = eligible_child(type, pid, options, p);
1481 			if (!ret)
1482 				continue;
1483 
1484 			if (unlikely(ret < 0)) {
1485 				retval = ret;
1486 			} else if (task_is_stopped_or_traced(p)) {
1487 				/*
1488 				 * It's stopped now, so it might later
1489 				 * continue, exit, or stop again.
1490 				 */
1491 				flag = 1;
1492 				if (!(p->ptrace & PT_PTRACED) &&
1493 				    !(options & WUNTRACED))
1494 					continue;
1495 
1496 				retval = wait_task_stopped(p,
1497 						(options & WNOWAIT), infop,
1498 						stat_addr, ru);
1499 			} else if (p->exit_state == EXIT_ZOMBIE &&
1500 					!delay_group_leader(p)) {
1501 				/*
1502 				 * We don't reap group leaders with subthreads.
1503 				 */
1504 				if (!likely(options & WEXITED))
1505 					continue;
1506 				retval = wait_task_zombie(p,
1507 						(options & WNOWAIT), infop,
1508 						stat_addr, ru);
1509 			} else if (p->exit_state != EXIT_DEAD) {
1510 				/*
1511 				 * It's running now, so it might later
1512 				 * exit, stop, or stop and then continue.
1513 				 */
1514 				flag = 1;
1515 				if (!unlikely(options & WCONTINUED))
1516 					continue;
1517 				retval = wait_task_continued(p,
1518 						(options & WNOWAIT), infop,
1519 						stat_addr, ru);
1520 			}
1521 			if (retval != 0) /* tasklist_lock released */
1522 				goto end;
1523 		}
1524 		if (!flag) {
1525 			list_for_each_entry(p, &tsk->ptrace_children,
1526 								ptrace_list) {
1527 				flag = eligible_child(type, pid, options, p);
1528 				if (!flag)
1529 					continue;
1530 				if (likely(flag > 0))
1531 					break;
1532 				retval = flag;
1533 				goto end;
1534 			}
1535 		}
1536 		if (options & __WNOTHREAD)
1537 			break;
1538 		tsk = next_thread(tsk);
1539 		BUG_ON(tsk->signal != current->signal);
1540 	} while (tsk != current);
1541 	read_unlock(&tasklist_lock);
1542 
1543 	if (flag) {
1544 		if (options & WNOHANG)
1545 			goto end;
1546 		retval = -ERESTARTSYS;
1547 		if (signal_pending(current))
1548 			goto end;
1549 		schedule();
1550 		goto repeat;
1551 	}
1552 	retval = -ECHILD;
1553 end:
1554 	current->state = TASK_RUNNING;
1555 	remove_wait_queue(&current->signal->wait_chldexit,&wait);
1556 	if (infop) {
1557 		if (retval > 0)
1558 			retval = 0;
1559 		else {
1560 			/*
1561 			 * For a WNOHANG return, clear out all the fields
1562 			 * we would set so the user can easily tell the
1563 			 * difference.
1564 			 */
1565 			if (!retval)
1566 				retval = put_user(0, &infop->si_signo);
1567 			if (!retval)
1568 				retval = put_user(0, &infop->si_errno);
1569 			if (!retval)
1570 				retval = put_user(0, &infop->si_code);
1571 			if (!retval)
1572 				retval = put_user(0, &infop->si_pid);
1573 			if (!retval)
1574 				retval = put_user(0, &infop->si_uid);
1575 			if (!retval)
1576 				retval = put_user(0, &infop->si_status);
1577 		}
1578 	}
1579 	return retval;
1580 }
1581 
1582 asmlinkage long sys_waitid(int which, pid_t upid,
1583 			   struct siginfo __user *infop, int options,
1584 			   struct rusage __user *ru)
1585 {
1586 	struct pid *pid = NULL;
1587 	enum pid_type type;
1588 	long ret;
1589 
1590 	if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
1591 		return -EINVAL;
1592 	if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1593 		return -EINVAL;
1594 
1595 	switch (which) {
1596 	case P_ALL:
1597 		type = PIDTYPE_MAX;
1598 		break;
1599 	case P_PID:
1600 		type = PIDTYPE_PID;
1601 		if (upid <= 0)
1602 			return -EINVAL;
1603 		break;
1604 	case P_PGID:
1605 		type = PIDTYPE_PGID;
1606 		if (upid <= 0)
1607 			return -EINVAL;
1608 		break;
1609 	default:
1610 		return -EINVAL;
1611 	}
1612 
1613 	if (type < PIDTYPE_MAX)
1614 		pid = find_get_pid(upid);
1615 	ret = do_wait(type, pid, options, infop, NULL, ru);
1616 	put_pid(pid);
1617 
1618 	/* avoid REGPARM breakage on x86: */
1619 	prevent_tail_call(ret);
1620 	return ret;
1621 }
1622 
1623 asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr,
1624 			  int options, struct rusage __user *ru)
1625 {
1626 	struct pid *pid = NULL;
1627 	enum pid_type type;
1628 	long ret;
1629 
1630 	if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1631 			__WNOTHREAD|__WCLONE|__WALL))
1632 		return -EINVAL;
1633 
1634 	if (upid == -1)
1635 		type = PIDTYPE_MAX;
1636 	else if (upid < 0) {
1637 		type = PIDTYPE_PGID;
1638 		pid = find_get_pid(-upid);
1639 	} else if (upid == 0) {
1640 		type = PIDTYPE_PGID;
1641 		pid = get_pid(task_pgrp(current));
1642 	} else /* upid > 0 */ {
1643 		type = PIDTYPE_PID;
1644 		pid = find_get_pid(upid);
1645 	}
1646 
1647 	ret = do_wait(type, pid, options | WEXITED, NULL, stat_addr, ru);
1648 	put_pid(pid);
1649 
1650 	/* avoid REGPARM breakage on x86: */
1651 	prevent_tail_call(ret);
1652 	return ret;
1653 }
1654 
1655 #ifdef __ARCH_WANT_SYS_WAITPID
1656 
1657 /*
1658  * sys_waitpid() remains for compatibility. waitpid() should be
1659  * implemented by calling sys_wait4() from libc.a.
1660  */
1661 asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
1662 {
1663 	return sys_wait4(pid, stat_addr, options, NULL);
1664 }
1665 
1666 #endif
1667