xref: /linux/kernel/exit.c (revision 99b5aa3c10c7cff1e97239fda93649222fc12d25)
1 /*
2  *  linux/kernel/exit.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/interrupt.h>
10 #include <linux/smp_lock.h>
11 #include <linux/module.h>
12 #include <linux/capability.h>
13 #include <linux/completion.h>
14 #include <linux/personality.h>
15 #include <linux/tty.h>
16 #include <linux/mnt_namespace.h>
17 #include <linux/key.h>
18 #include <linux/security.h>
19 #include <linux/cpu.h>
20 #include <linux/acct.h>
21 #include <linux/tsacct_kern.h>
22 #include <linux/file.h>
23 #include <linux/binfmts.h>
24 #include <linux/nsproxy.h>
25 #include <linux/pid_namespace.h>
26 #include <linux/ptrace.h>
27 #include <linux/profile.h>
28 #include <linux/mount.h>
29 #include <linux/proc_fs.h>
30 #include <linux/mempolicy.h>
31 #include <linux/taskstats_kern.h>
32 #include <linux/delayacct.h>
33 #include <linux/cpuset.h>
34 #include <linux/syscalls.h>
35 #include <linux/signal.h>
36 #include <linux/posix-timers.h>
37 #include <linux/cn_proc.h>
38 #include <linux/mutex.h>
39 #include <linux/futex.h>
40 #include <linux/compat.h>
41 #include <linux/pipe_fs_i.h>
42 #include <linux/audit.h> /* for audit_free() */
43 #include <linux/resource.h>
44 #include <linux/blkdev.h>
45 
46 #include <asm/uaccess.h>
47 #include <asm/unistd.h>
48 #include <asm/pgtable.h>
49 #include <asm/mmu_context.h>
50 
51 extern void sem_exit (void);
52 
53 static void exit_mm(struct task_struct * tsk);
54 
55 static void __unhash_process(struct task_struct *p)
56 {
57 	nr_threads--;
58 	detach_pid(p, PIDTYPE_PID);
59 	if (thread_group_leader(p)) {
60 		detach_pid(p, PIDTYPE_PGID);
61 		detach_pid(p, PIDTYPE_SID);
62 
63 		list_del_rcu(&p->tasks);
64 		__get_cpu_var(process_counts)--;
65 	}
66 	list_del_rcu(&p->thread_group);
67 	remove_parent(p);
68 }
69 
70 /*
71  * This function expects the tasklist_lock write-locked.
72  */
73 static void __exit_signal(struct task_struct *tsk)
74 {
75 	struct signal_struct *sig = tsk->signal;
76 	struct sighand_struct *sighand;
77 
78 	BUG_ON(!sig);
79 	BUG_ON(!atomic_read(&sig->count));
80 
81 	rcu_read_lock();
82 	sighand = rcu_dereference(tsk->sighand);
83 	spin_lock(&sighand->siglock);
84 
85 	posix_cpu_timers_exit(tsk);
86 	if (atomic_dec_and_test(&sig->count))
87 		posix_cpu_timers_exit_group(tsk);
88 	else {
89 		/*
90 		 * If there is any task waiting for the group exit
91 		 * then notify it:
92 		 */
93 		if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
94 			wake_up_process(sig->group_exit_task);
95 			sig->group_exit_task = NULL;
96 		}
97 		if (tsk == sig->curr_target)
98 			sig->curr_target = next_thread(tsk);
99 		/*
100 		 * Accumulate here the counters for all threads but the
101 		 * group leader as they die, so they can be added into
102 		 * the process-wide totals when those are taken.
103 		 * The group leader stays around as a zombie as long
104 		 * as there are other threads.  When it gets reaped,
105 		 * the exit.c code will add its counts into these totals.
106 		 * We won't ever get here for the group leader, since it
107 		 * will have been the last reference on the signal_struct.
108 		 */
109 		sig->utime = cputime_add(sig->utime, tsk->utime);
110 		sig->stime = cputime_add(sig->stime, tsk->stime);
111 		sig->min_flt += tsk->min_flt;
112 		sig->maj_flt += tsk->maj_flt;
113 		sig->nvcsw += tsk->nvcsw;
114 		sig->nivcsw += tsk->nivcsw;
115 		sig->sched_time += tsk->sched_time;
116 		sig = NULL; /* Marker for below. */
117 	}
118 
119 	__unhash_process(tsk);
120 
121 	tsk->signal = NULL;
122 	tsk->sighand = NULL;
123 	spin_unlock(&sighand->siglock);
124 	rcu_read_unlock();
125 
126 	__cleanup_sighand(sighand);
127 	clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
128 	flush_sigqueue(&tsk->pending);
129 	if (sig) {
130 		flush_sigqueue(&sig->shared_pending);
131 		taskstats_tgid_free(sig);
132 		__cleanup_signal(sig);
133 	}
134 }
135 
136 static void delayed_put_task_struct(struct rcu_head *rhp)
137 {
138 	put_task_struct(container_of(rhp, struct task_struct, rcu));
139 }
140 
141 void release_task(struct task_struct * p)
142 {
143 	struct task_struct *leader;
144 	int zap_leader;
145 repeat:
146 	atomic_dec(&p->user->processes);
147 	write_lock_irq(&tasklist_lock);
148 	ptrace_unlink(p);
149 	BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
150 	__exit_signal(p);
151 
152 	/*
153 	 * If we are the last non-leader member of the thread
154 	 * group, and the leader is zombie, then notify the
155 	 * group leader's parent process. (if it wants notification.)
156 	 */
157 	zap_leader = 0;
158 	leader = p->group_leader;
159 	if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
160 		BUG_ON(leader->exit_signal == -1);
161 		do_notify_parent(leader, leader->exit_signal);
162 		/*
163 		 * If we were the last child thread and the leader has
164 		 * exited already, and the leader's parent ignores SIGCHLD,
165 		 * then we are the one who should release the leader.
166 		 *
167 		 * do_notify_parent() will have marked it self-reaping in
168 		 * that case.
169 		 */
170 		zap_leader = (leader->exit_signal == -1);
171 	}
172 
173 	sched_exit(p);
174 	write_unlock_irq(&tasklist_lock);
175 	proc_flush_task(p);
176 	release_thread(p);
177 	call_rcu(&p->rcu, delayed_put_task_struct);
178 
179 	p = leader;
180 	if (unlikely(zap_leader))
181 		goto repeat;
182 }
183 
184 /*
185  * This checks not only the pgrp, but falls back on the pid if no
186  * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
187  * without this...
188  */
189 int session_of_pgrp(int pgrp)
190 {
191 	struct task_struct *p;
192 	int sid = 0;
193 
194 	read_lock(&tasklist_lock);
195 
196 	p = find_task_by_pid_type(PIDTYPE_PGID, pgrp);
197 	if (p == NULL)
198 		p = find_task_by_pid(pgrp);
199 	if (p != NULL)
200 		sid = process_session(p);
201 
202 	read_unlock(&tasklist_lock);
203 
204 	return sid;
205 }
206 
207 /*
208  * Determine if a process group is "orphaned", according to the POSIX
209  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
210  * by terminal-generated stop signals.  Newly orphaned process groups are
211  * to receive a SIGHUP and a SIGCONT.
212  *
213  * "I ask you, have you ever known what it is to be an orphan?"
214  */
215 static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task)
216 {
217 	struct task_struct *p;
218 	int ret = 1;
219 
220 	do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
221 		if (p == ignored_task
222 				|| p->exit_state
223 				|| is_init(p->real_parent))
224 			continue;
225 		if (process_group(p->real_parent) != pgrp &&
226 		    process_session(p->real_parent) == process_session(p)) {
227 			ret = 0;
228 			break;
229 		}
230 	} while_each_task_pid(pgrp, PIDTYPE_PGID, p);
231 	return ret;	/* (sighing) "Often!" */
232 }
233 
234 int is_orphaned_pgrp(int pgrp)
235 {
236 	int retval;
237 
238 	read_lock(&tasklist_lock);
239 	retval = will_become_orphaned_pgrp(pgrp, NULL);
240 	read_unlock(&tasklist_lock);
241 
242 	return retval;
243 }
244 
245 static int has_stopped_jobs(int pgrp)
246 {
247 	int retval = 0;
248 	struct task_struct *p;
249 
250 	do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
251 		if (p->state != TASK_STOPPED)
252 			continue;
253 		retval = 1;
254 		break;
255 	} while_each_task_pid(pgrp, PIDTYPE_PGID, p);
256 	return retval;
257 }
258 
259 /**
260  * reparent_to_init - Reparent the calling kernel thread to the init task
261  * of the pid space that the thread belongs to.
262  *
263  * If a kernel thread is launched as a result of a system call, or if
264  * it ever exits, it should generally reparent itself to init so that
265  * it is correctly cleaned up on exit.
266  *
267  * The various task state such as scheduling policy and priority may have
268  * been inherited from a user process, so we reset them to sane values here.
269  *
270  * NOTE that reparent_to_init() gives the caller full capabilities.
271  */
272 static void reparent_to_init(void)
273 {
274 	write_lock_irq(&tasklist_lock);
275 
276 	ptrace_unlink(current);
277 	/* Reparent to init */
278 	remove_parent(current);
279 	current->parent = child_reaper(current);
280 	current->real_parent = child_reaper(current);
281 	add_parent(current);
282 
283 	/* Set the exit signal to SIGCHLD so we signal init on exit */
284 	current->exit_signal = SIGCHLD;
285 
286 	if (!has_rt_policy(current) && (task_nice(current) < 0))
287 		set_user_nice(current, 0);
288 	/* cpus_allowed? */
289 	/* rt_priority? */
290 	/* signals? */
291 	security_task_reparent_to_init(current);
292 	memcpy(current->signal->rlim, init_task.signal->rlim,
293 	       sizeof(current->signal->rlim));
294 	atomic_inc(&(INIT_USER->__count));
295 	write_unlock_irq(&tasklist_lock);
296 	switch_uid(INIT_USER);
297 }
298 
299 void __set_special_pids(pid_t session, pid_t pgrp)
300 {
301 	struct task_struct *curr = current->group_leader;
302 
303 	if (process_session(curr) != session) {
304 		detach_pid(curr, PIDTYPE_SID);
305 		set_signal_session(curr->signal, session);
306 		attach_pid(curr, PIDTYPE_SID, session);
307 	}
308 	if (process_group(curr) != pgrp) {
309 		detach_pid(curr, PIDTYPE_PGID);
310 		curr->signal->pgrp = pgrp;
311 		attach_pid(curr, PIDTYPE_PGID, pgrp);
312 	}
313 }
314 
315 static void set_special_pids(pid_t session, pid_t pgrp)
316 {
317 	write_lock_irq(&tasklist_lock);
318 	__set_special_pids(session, pgrp);
319 	write_unlock_irq(&tasklist_lock);
320 }
321 
322 /*
323  * Let kernel threads use this to say that they
324  * allow a certain signal (since daemonize() will
325  * have disabled all of them by default).
326  */
327 int allow_signal(int sig)
328 {
329 	if (!valid_signal(sig) || sig < 1)
330 		return -EINVAL;
331 
332 	spin_lock_irq(&current->sighand->siglock);
333 	sigdelset(&current->blocked, sig);
334 	if (!current->mm) {
335 		/* Kernel threads handle their own signals.
336 		   Let the signal code know it'll be handled, so
337 		   that they don't get converted to SIGKILL or
338 		   just silently dropped */
339 		current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
340 	}
341 	recalc_sigpending();
342 	spin_unlock_irq(&current->sighand->siglock);
343 	return 0;
344 }
345 
346 EXPORT_SYMBOL(allow_signal);
347 
348 int disallow_signal(int sig)
349 {
350 	if (!valid_signal(sig) || sig < 1)
351 		return -EINVAL;
352 
353 	spin_lock_irq(&current->sighand->siglock);
354 	sigaddset(&current->blocked, sig);
355 	recalc_sigpending();
356 	spin_unlock_irq(&current->sighand->siglock);
357 	return 0;
358 }
359 
360 EXPORT_SYMBOL(disallow_signal);
361 
362 /*
363  *	Put all the gunge required to become a kernel thread without
364  *	attached user resources in one place where it belongs.
365  */
366 
367 void daemonize(const char *name, ...)
368 {
369 	va_list args;
370 	struct fs_struct *fs;
371 	sigset_t blocked;
372 
373 	va_start(args, name);
374 	vsnprintf(current->comm, sizeof(current->comm), name, args);
375 	va_end(args);
376 
377 	/*
378 	 * If we were started as result of loading a module, close all of the
379 	 * user space pages.  We don't need them, and if we didn't close them
380 	 * they would be locked into memory.
381 	 */
382 	exit_mm(current);
383 
384 	set_special_pids(1, 1);
385 	proc_clear_tty(current);
386 
387 	/* Block and flush all signals */
388 	sigfillset(&blocked);
389 	sigprocmask(SIG_BLOCK, &blocked, NULL);
390 	flush_signals(current);
391 
392 	/* Become as one with the init task */
393 
394 	exit_fs(current);	/* current->fs->count--; */
395 	fs = init_task.fs;
396 	current->fs = fs;
397 	atomic_inc(&fs->count);
398 
399 	exit_task_namespaces(current);
400 	current->nsproxy = init_task.nsproxy;
401 	get_task_namespaces(current);
402 
403  	exit_files(current);
404 	current->files = init_task.files;
405 	atomic_inc(&current->files->count);
406 
407 	reparent_to_init();
408 }
409 
410 EXPORT_SYMBOL(daemonize);
411 
412 static void close_files(struct files_struct * files)
413 {
414 	int i, j;
415 	struct fdtable *fdt;
416 
417 	j = 0;
418 
419 	/*
420 	 * It is safe to dereference the fd table without RCU or
421 	 * ->file_lock because this is the last reference to the
422 	 * files structure.
423 	 */
424 	fdt = files_fdtable(files);
425 	for (;;) {
426 		unsigned long set;
427 		i = j * __NFDBITS;
428 		if (i >= fdt->max_fds)
429 			break;
430 		set = fdt->open_fds->fds_bits[j++];
431 		while (set) {
432 			if (set & 1) {
433 				struct file * file = xchg(&fdt->fd[i], NULL);
434 				if (file)
435 					filp_close(file, files);
436 			}
437 			i++;
438 			set >>= 1;
439 		}
440 	}
441 }
442 
443 struct files_struct *get_files_struct(struct task_struct *task)
444 {
445 	struct files_struct *files;
446 
447 	task_lock(task);
448 	files = task->files;
449 	if (files)
450 		atomic_inc(&files->count);
451 	task_unlock(task);
452 
453 	return files;
454 }
455 
456 void fastcall put_files_struct(struct files_struct *files)
457 {
458 	struct fdtable *fdt;
459 
460 	if (atomic_dec_and_test(&files->count)) {
461 		close_files(files);
462 		/*
463 		 * Free the fd and fdset arrays if we expanded them.
464 		 * If the fdtable was embedded, pass files for freeing
465 		 * at the end of the RCU grace period. Otherwise,
466 		 * you can free files immediately.
467 		 */
468 		fdt = files_fdtable(files);
469 		if (fdt != &files->fdtab)
470 			kmem_cache_free(files_cachep, files);
471 		call_rcu(&fdt->rcu, free_fdtable_rcu);
472 	}
473 }
474 
475 EXPORT_SYMBOL(put_files_struct);
476 
477 void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
478 {
479 	struct files_struct *old;
480 
481 	old = tsk->files;
482 	task_lock(tsk);
483 	tsk->files = files;
484 	task_unlock(tsk);
485 	put_files_struct(old);
486 }
487 EXPORT_SYMBOL(reset_files_struct);
488 
489 static inline void __exit_files(struct task_struct *tsk)
490 {
491 	struct files_struct * files = tsk->files;
492 
493 	if (files) {
494 		task_lock(tsk);
495 		tsk->files = NULL;
496 		task_unlock(tsk);
497 		put_files_struct(files);
498 	}
499 }
500 
501 void exit_files(struct task_struct *tsk)
502 {
503 	__exit_files(tsk);
504 }
505 
506 static inline void __put_fs_struct(struct fs_struct *fs)
507 {
508 	/* No need to hold fs->lock if we are killing it */
509 	if (atomic_dec_and_test(&fs->count)) {
510 		dput(fs->root);
511 		mntput(fs->rootmnt);
512 		dput(fs->pwd);
513 		mntput(fs->pwdmnt);
514 		if (fs->altroot) {
515 			dput(fs->altroot);
516 			mntput(fs->altrootmnt);
517 		}
518 		kmem_cache_free(fs_cachep, fs);
519 	}
520 }
521 
522 void put_fs_struct(struct fs_struct *fs)
523 {
524 	__put_fs_struct(fs);
525 }
526 
527 static inline void __exit_fs(struct task_struct *tsk)
528 {
529 	struct fs_struct * fs = tsk->fs;
530 
531 	if (fs) {
532 		task_lock(tsk);
533 		tsk->fs = NULL;
534 		task_unlock(tsk);
535 		__put_fs_struct(fs);
536 	}
537 }
538 
539 void exit_fs(struct task_struct *tsk)
540 {
541 	__exit_fs(tsk);
542 }
543 
544 EXPORT_SYMBOL_GPL(exit_fs);
545 
546 /*
547  * Turn us into a lazy TLB process if we
548  * aren't already..
549  */
550 static void exit_mm(struct task_struct * tsk)
551 {
552 	struct mm_struct *mm = tsk->mm;
553 
554 	mm_release(tsk, mm);
555 	if (!mm)
556 		return;
557 	/*
558 	 * Serialize with any possible pending coredump.
559 	 * We must hold mmap_sem around checking core_waiters
560 	 * and clearing tsk->mm.  The core-inducing thread
561 	 * will increment core_waiters for each thread in the
562 	 * group with ->mm != NULL.
563 	 */
564 	down_read(&mm->mmap_sem);
565 	if (mm->core_waiters) {
566 		up_read(&mm->mmap_sem);
567 		down_write(&mm->mmap_sem);
568 		if (!--mm->core_waiters)
569 			complete(mm->core_startup_done);
570 		up_write(&mm->mmap_sem);
571 
572 		wait_for_completion(&mm->core_done);
573 		down_read(&mm->mmap_sem);
574 	}
575 	atomic_inc(&mm->mm_count);
576 	BUG_ON(mm != tsk->active_mm);
577 	/* more a memory barrier than a real lock */
578 	task_lock(tsk);
579 	tsk->mm = NULL;
580 	up_read(&mm->mmap_sem);
581 	enter_lazy_tlb(mm, current);
582 	task_unlock(tsk);
583 	mmput(mm);
584 }
585 
586 static inline void
587 choose_new_parent(struct task_struct *p, struct task_struct *reaper)
588 {
589 	/*
590 	 * Make sure we're not reparenting to ourselves and that
591 	 * the parent is not a zombie.
592 	 */
593 	BUG_ON(p == reaper || reaper->exit_state);
594 	p->real_parent = reaper;
595 }
596 
597 static void
598 reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
599 {
600 	/* We don't want people slaying init.  */
601 	if (p->exit_signal != -1)
602 		p->exit_signal = SIGCHLD;
603 
604 	if (p->pdeath_signal)
605 		/* We already hold the tasklist_lock here.  */
606 		group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
607 
608 	/* Move the child from its dying parent to the new one.  */
609 	if (unlikely(traced)) {
610 		/* Preserve ptrace links if someone else is tracing this child.  */
611 		list_del_init(&p->ptrace_list);
612 		if (p->parent != p->real_parent)
613 			list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
614 	} else {
615 		/* If this child is being traced, then we're the one tracing it
616 		 * anyway, so let go of it.
617 		 */
618 		p->ptrace = 0;
619 		remove_parent(p);
620 		p->parent = p->real_parent;
621 		add_parent(p);
622 
623 		/* If we'd notified the old parent about this child's death,
624 		 * also notify the new parent.
625 		 */
626 		if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
627 		    thread_group_empty(p))
628 			do_notify_parent(p, p->exit_signal);
629 		else if (p->state == TASK_TRACED) {
630 			/*
631 			 * If it was at a trace stop, turn it into
632 			 * a normal stop since it's no longer being
633 			 * traced.
634 			 */
635 			ptrace_untrace(p);
636 		}
637 	}
638 
639 	/*
640 	 * process group orphan check
641 	 * Case ii: Our child is in a different pgrp
642 	 * than we are, and it was the only connection
643 	 * outside, so the child pgrp is now orphaned.
644 	 */
645 	if ((process_group(p) != process_group(father)) &&
646 	    (process_session(p) == process_session(father))) {
647 		int pgrp = process_group(p);
648 
649 		if (will_become_orphaned_pgrp(pgrp, NULL) &&
650 		    has_stopped_jobs(pgrp)) {
651 			__kill_pg_info(SIGHUP, SEND_SIG_PRIV, pgrp);
652 			__kill_pg_info(SIGCONT, SEND_SIG_PRIV, pgrp);
653 		}
654 	}
655 }
656 
657 /*
658  * When we die, we re-parent all our children.
659  * Try to give them to another thread in our thread
660  * group, and if no such member exists, give it to
661  * the child reaper process (ie "init") in our pid
662  * space.
663  */
664 static void
665 forget_original_parent(struct task_struct *father, struct list_head *to_release)
666 {
667 	struct task_struct *p, *reaper = father;
668 	struct list_head *_p, *_n;
669 
670 	do {
671 		reaper = next_thread(reaper);
672 		if (reaper == father) {
673 			reaper = child_reaper(father);
674 			break;
675 		}
676 	} while (reaper->exit_state);
677 
678 	/*
679 	 * There are only two places where our children can be:
680 	 *
681 	 * - in our child list
682 	 * - in our ptraced child list
683 	 *
684 	 * Search them and reparent children.
685 	 */
686 	list_for_each_safe(_p, _n, &father->children) {
687 		int ptrace;
688 		p = list_entry(_p, struct task_struct, sibling);
689 
690 		ptrace = p->ptrace;
691 
692 		/* if father isn't the real parent, then ptrace must be enabled */
693 		BUG_ON(father != p->real_parent && !ptrace);
694 
695 		if (father == p->real_parent) {
696 			/* reparent with a reaper, real father it's us */
697 			choose_new_parent(p, reaper);
698 			reparent_thread(p, father, 0);
699 		} else {
700 			/* reparent ptraced task to its real parent */
701 			__ptrace_unlink (p);
702 			if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
703 			    thread_group_empty(p))
704 				do_notify_parent(p, p->exit_signal);
705 		}
706 
707 		/*
708 		 * if the ptraced child is a zombie with exit_signal == -1
709 		 * we must collect it before we exit, or it will remain
710 		 * zombie forever since we prevented it from self-reap itself
711 		 * while it was being traced by us, to be able to see it in wait4.
712 		 */
713 		if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
714 			list_add(&p->ptrace_list, to_release);
715 	}
716 	list_for_each_safe(_p, _n, &father->ptrace_children) {
717 		p = list_entry(_p, struct task_struct, ptrace_list);
718 		choose_new_parent(p, reaper);
719 		reparent_thread(p, father, 1);
720 	}
721 }
722 
723 /*
724  * Send signals to all our closest relatives so that they know
725  * to properly mourn us..
726  */
727 static void exit_notify(struct task_struct *tsk)
728 {
729 	int state;
730 	struct task_struct *t;
731 	struct list_head ptrace_dead, *_p, *_n;
732 
733 	if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
734 	    && !thread_group_empty(tsk)) {
735 		/*
736 		 * This occurs when there was a race between our exit
737 		 * syscall and a group signal choosing us as the one to
738 		 * wake up.  It could be that we are the only thread
739 		 * alerted to check for pending signals, but another thread
740 		 * should be woken now to take the signal since we will not.
741 		 * Now we'll wake all the threads in the group just to make
742 		 * sure someone gets all the pending signals.
743 		 */
744 		read_lock(&tasklist_lock);
745 		spin_lock_irq(&tsk->sighand->siglock);
746 		for (t = next_thread(tsk); t != tsk; t = next_thread(t))
747 			if (!signal_pending(t) && !(t->flags & PF_EXITING)) {
748 				recalc_sigpending_tsk(t);
749 				if (signal_pending(t))
750 					signal_wake_up(t, 0);
751 			}
752 		spin_unlock_irq(&tsk->sighand->siglock);
753 		read_unlock(&tasklist_lock);
754 	}
755 
756 	write_lock_irq(&tasklist_lock);
757 
758 	/*
759 	 * This does two things:
760 	 *
761   	 * A.  Make init inherit all the child processes
762 	 * B.  Check to see if any process groups have become orphaned
763 	 *	as a result of our exiting, and if they have any stopped
764 	 *	jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
765 	 */
766 
767 	INIT_LIST_HEAD(&ptrace_dead);
768 	forget_original_parent(tsk, &ptrace_dead);
769 	BUG_ON(!list_empty(&tsk->children));
770 	BUG_ON(!list_empty(&tsk->ptrace_children));
771 
772 	/*
773 	 * Check to see if any process groups have become orphaned
774 	 * as a result of our exiting, and if they have any stopped
775 	 * jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
776 	 *
777 	 * Case i: Our father is in a different pgrp than we are
778 	 * and we were the only connection outside, so our pgrp
779 	 * is about to become orphaned.
780 	 */
781 
782 	t = tsk->real_parent;
783 
784 	if ((process_group(t) != process_group(tsk)) &&
785 	    (process_session(t) == process_session(tsk)) &&
786 	    will_become_orphaned_pgrp(process_group(tsk), tsk) &&
787 	    has_stopped_jobs(process_group(tsk))) {
788 		__kill_pg_info(SIGHUP, SEND_SIG_PRIV, process_group(tsk));
789 		__kill_pg_info(SIGCONT, SEND_SIG_PRIV, process_group(tsk));
790 	}
791 
792 	/* Let father know we died
793 	 *
794 	 * Thread signals are configurable, but you aren't going to use
795 	 * that to send signals to arbitary processes.
796 	 * That stops right now.
797 	 *
798 	 * If the parent exec id doesn't match the exec id we saved
799 	 * when we started then we know the parent has changed security
800 	 * domain.
801 	 *
802 	 * If our self_exec id doesn't match our parent_exec_id then
803 	 * we have changed execution domain as these two values started
804 	 * the same after a fork.
805 	 *
806 	 */
807 
808 	if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
809 	    ( tsk->parent_exec_id != t->self_exec_id  ||
810 	      tsk->self_exec_id != tsk->parent_exec_id)
811 	    && !capable(CAP_KILL))
812 		tsk->exit_signal = SIGCHLD;
813 
814 
815 	/* If something other than our normal parent is ptracing us, then
816 	 * send it a SIGCHLD instead of honoring exit_signal.  exit_signal
817 	 * only has special meaning to our real parent.
818 	 */
819 	if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
820 		int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
821 		do_notify_parent(tsk, signal);
822 	} else if (tsk->ptrace) {
823 		do_notify_parent(tsk, SIGCHLD);
824 	}
825 
826 	state = EXIT_ZOMBIE;
827 	if (tsk->exit_signal == -1 &&
828 	    (likely(tsk->ptrace == 0) ||
829 	     unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT)))
830 		state = EXIT_DEAD;
831 	tsk->exit_state = state;
832 
833 	write_unlock_irq(&tasklist_lock);
834 
835 	list_for_each_safe(_p, _n, &ptrace_dead) {
836 		list_del_init(_p);
837 		t = list_entry(_p, struct task_struct, ptrace_list);
838 		release_task(t);
839 	}
840 
841 	/* If the process is dead, release it - nobody will wait for it */
842 	if (state == EXIT_DEAD)
843 		release_task(tsk);
844 }
845 
846 fastcall NORET_TYPE void do_exit(long code)
847 {
848 	struct task_struct *tsk = current;
849 	int group_dead;
850 
851 	profile_task_exit(tsk);
852 
853 	WARN_ON(atomic_read(&tsk->fs_excl));
854 
855 	if (unlikely(in_interrupt()))
856 		panic("Aiee, killing interrupt handler!");
857 	if (unlikely(!tsk->pid))
858 		panic("Attempted to kill the idle task!");
859 	if (unlikely(tsk == child_reaper(tsk))) {
860 		if (tsk->nsproxy->pid_ns != &init_pid_ns)
861 			tsk->nsproxy->pid_ns->child_reaper = init_pid_ns.child_reaper;
862 		else
863 			panic("Attempted to kill init!");
864 	}
865 
866 
867 	if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
868 		current->ptrace_message = code;
869 		ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
870 	}
871 
872 	/*
873 	 * We're taking recursive faults here in do_exit. Safest is to just
874 	 * leave this task alone and wait for reboot.
875 	 */
876 	if (unlikely(tsk->flags & PF_EXITING)) {
877 		printk(KERN_ALERT
878 			"Fixing recursive fault but reboot is needed!\n");
879 		if (tsk->io_context)
880 			exit_io_context();
881 		set_current_state(TASK_UNINTERRUPTIBLE);
882 		schedule();
883 	}
884 
885 	tsk->flags |= PF_EXITING;
886 
887 	if (unlikely(in_atomic()))
888 		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
889 				current->comm, current->pid,
890 				preempt_count());
891 
892 	acct_update_integrals(tsk);
893 	if (tsk->mm) {
894 		update_hiwater_rss(tsk->mm);
895 		update_hiwater_vm(tsk->mm);
896 	}
897 	group_dead = atomic_dec_and_test(&tsk->signal->live);
898 	if (group_dead) {
899  		hrtimer_cancel(&tsk->signal->real_timer);
900 		exit_itimers(tsk->signal);
901 	}
902 	acct_collect(code, group_dead);
903 	if (unlikely(tsk->robust_list))
904 		exit_robust_list(tsk);
905 #if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT)
906 	if (unlikely(tsk->compat_robust_list))
907 		compat_exit_robust_list(tsk);
908 #endif
909 	if (unlikely(tsk->audit_context))
910 		audit_free(tsk);
911 
912 	taskstats_exit(tsk, group_dead);
913 
914 	exit_mm(tsk);
915 
916 	if (group_dead)
917 		acct_process();
918 	exit_sem(tsk);
919 	__exit_files(tsk);
920 	__exit_fs(tsk);
921 	exit_thread();
922 	cpuset_exit(tsk);
923 	exit_keys(tsk);
924 
925 	if (group_dead && tsk->signal->leader)
926 		disassociate_ctty(1);
927 
928 	module_put(task_thread_info(tsk)->exec_domain->module);
929 	if (tsk->binfmt)
930 		module_put(tsk->binfmt->module);
931 
932 	tsk->exit_code = code;
933 	proc_exit_connector(tsk);
934 	exit_notify(tsk);
935 	exit_task_namespaces(tsk);
936 #ifdef CONFIG_NUMA
937 	mpol_free(tsk->mempolicy);
938 	tsk->mempolicy = NULL;
939 #endif
940 	/*
941 	 * This must happen late, after the PID is not
942 	 * hashed anymore:
943 	 */
944 	if (unlikely(!list_empty(&tsk->pi_state_list)))
945 		exit_pi_state_list(tsk);
946 	if (unlikely(current->pi_state_cache))
947 		kfree(current->pi_state_cache);
948 	/*
949 	 * Make sure we are holding no locks:
950 	 */
951 	debug_check_no_locks_held(tsk);
952 
953 	if (tsk->io_context)
954 		exit_io_context();
955 
956 	if (tsk->splice_pipe)
957 		__free_pipe_info(tsk->splice_pipe);
958 
959 	preempt_disable();
960 	/* causes final put_task_struct in finish_task_switch(). */
961 	tsk->state = TASK_DEAD;
962 
963 	schedule();
964 	BUG();
965 	/* Avoid "noreturn function does return".  */
966 	for (;;)
967 		cpu_relax();	/* For when BUG is null */
968 }
969 
970 EXPORT_SYMBOL_GPL(do_exit);
971 
972 NORET_TYPE void complete_and_exit(struct completion *comp, long code)
973 {
974 	if (comp)
975 		complete(comp);
976 
977 	do_exit(code);
978 }
979 
980 EXPORT_SYMBOL(complete_and_exit);
981 
982 asmlinkage long sys_exit(int error_code)
983 {
984 	do_exit((error_code&0xff)<<8);
985 }
986 
987 /*
988  * Take down every thread in the group.  This is called by fatal signals
989  * as well as by sys_exit_group (below).
990  */
991 NORET_TYPE void
992 do_group_exit(int exit_code)
993 {
994 	BUG_ON(exit_code & 0x80); /* core dumps don't get here */
995 
996 	if (current->signal->flags & SIGNAL_GROUP_EXIT)
997 		exit_code = current->signal->group_exit_code;
998 	else if (!thread_group_empty(current)) {
999 		struct signal_struct *const sig = current->signal;
1000 		struct sighand_struct *const sighand = current->sighand;
1001 		spin_lock_irq(&sighand->siglock);
1002 		if (sig->flags & SIGNAL_GROUP_EXIT)
1003 			/* Another thread got here before we took the lock.  */
1004 			exit_code = sig->group_exit_code;
1005 		else {
1006 			sig->group_exit_code = exit_code;
1007 			zap_other_threads(current);
1008 		}
1009 		spin_unlock_irq(&sighand->siglock);
1010 	}
1011 
1012 	do_exit(exit_code);
1013 	/* NOTREACHED */
1014 }
1015 
1016 /*
1017  * this kills every thread in the thread group. Note that any externally
1018  * wait4()-ing process will get the correct exit code - even if this
1019  * thread is not the thread group leader.
1020  */
1021 asmlinkage void sys_exit_group(int error_code)
1022 {
1023 	do_group_exit((error_code & 0xff) << 8);
1024 }
1025 
1026 static int eligible_child(pid_t pid, int options, struct task_struct *p)
1027 {
1028 	if (pid > 0) {
1029 		if (p->pid != pid)
1030 			return 0;
1031 	} else if (!pid) {
1032 		if (process_group(p) != process_group(current))
1033 			return 0;
1034 	} else if (pid != -1) {
1035 		if (process_group(p) != -pid)
1036 			return 0;
1037 	}
1038 
1039 	/*
1040 	 * Do not consider detached threads that are
1041 	 * not ptraced:
1042 	 */
1043 	if (p->exit_signal == -1 && !p->ptrace)
1044 		return 0;
1045 
1046 	/* Wait for all children (clone and not) if __WALL is set;
1047 	 * otherwise, wait for clone children *only* if __WCLONE is
1048 	 * set; otherwise, wait for non-clone children *only*.  (Note:
1049 	 * A "clone" child here is one that reports to its parent
1050 	 * using a signal other than SIGCHLD.) */
1051 	if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
1052 	    && !(options & __WALL))
1053 		return 0;
1054 	/*
1055 	 * Do not consider thread group leaders that are
1056 	 * in a non-empty thread group:
1057 	 */
1058 	if (delay_group_leader(p))
1059 		return 2;
1060 
1061 	if (security_task_wait(p))
1062 		return 0;
1063 
1064 	return 1;
1065 }
1066 
1067 static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
1068 			       int why, int status,
1069 			       struct siginfo __user *infop,
1070 			       struct rusage __user *rusagep)
1071 {
1072 	int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
1073 
1074 	put_task_struct(p);
1075 	if (!retval)
1076 		retval = put_user(SIGCHLD, &infop->si_signo);
1077 	if (!retval)
1078 		retval = put_user(0, &infop->si_errno);
1079 	if (!retval)
1080 		retval = put_user((short)why, &infop->si_code);
1081 	if (!retval)
1082 		retval = put_user(pid, &infop->si_pid);
1083 	if (!retval)
1084 		retval = put_user(uid, &infop->si_uid);
1085 	if (!retval)
1086 		retval = put_user(status, &infop->si_status);
1087 	if (!retval)
1088 		retval = pid;
1089 	return retval;
1090 }
1091 
1092 /*
1093  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
1094  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1095  * the lock and this task is uninteresting.  If we return nonzero, we have
1096  * released the lock and the system call should return.
1097  */
1098 static int wait_task_zombie(struct task_struct *p, int noreap,
1099 			    struct siginfo __user *infop,
1100 			    int __user *stat_addr, struct rusage __user *ru)
1101 {
1102 	unsigned long state;
1103 	int retval;
1104 	int status;
1105 
1106 	if (unlikely(noreap)) {
1107 		pid_t pid = p->pid;
1108 		uid_t uid = p->uid;
1109 		int exit_code = p->exit_code;
1110 		int why, status;
1111 
1112 		if (unlikely(p->exit_state != EXIT_ZOMBIE))
1113 			return 0;
1114 		if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
1115 			return 0;
1116 		get_task_struct(p);
1117 		read_unlock(&tasklist_lock);
1118 		if ((exit_code & 0x7f) == 0) {
1119 			why = CLD_EXITED;
1120 			status = exit_code >> 8;
1121 		} else {
1122 			why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1123 			status = exit_code & 0x7f;
1124 		}
1125 		return wait_noreap_copyout(p, pid, uid, why,
1126 					   status, infop, ru);
1127 	}
1128 
1129 	/*
1130 	 * Try to move the task's state to DEAD
1131 	 * only one thread is allowed to do this:
1132 	 */
1133 	state = xchg(&p->exit_state, EXIT_DEAD);
1134 	if (state != EXIT_ZOMBIE) {
1135 		BUG_ON(state != EXIT_DEAD);
1136 		return 0;
1137 	}
1138 	if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) {
1139 		/*
1140 		 * This can only happen in a race with a ptraced thread
1141 		 * dying on another processor.
1142 		 */
1143 		return 0;
1144 	}
1145 
1146 	if (likely(p->real_parent == p->parent) && likely(p->signal)) {
1147 		struct signal_struct *psig;
1148 		struct signal_struct *sig;
1149 
1150 		/*
1151 		 * The resource counters for the group leader are in its
1152 		 * own task_struct.  Those for dead threads in the group
1153 		 * are in its signal_struct, as are those for the child
1154 		 * processes it has previously reaped.  All these
1155 		 * accumulate in the parent's signal_struct c* fields.
1156 		 *
1157 		 * We don't bother to take a lock here to protect these
1158 		 * p->signal fields, because they are only touched by
1159 		 * __exit_signal, which runs with tasklist_lock
1160 		 * write-locked anyway, and so is excluded here.  We do
1161 		 * need to protect the access to p->parent->signal fields,
1162 		 * as other threads in the parent group can be right
1163 		 * here reaping other children at the same time.
1164 		 */
1165 		spin_lock_irq(&p->parent->sighand->siglock);
1166 		psig = p->parent->signal;
1167 		sig = p->signal;
1168 		psig->cutime =
1169 			cputime_add(psig->cutime,
1170 			cputime_add(p->utime,
1171 			cputime_add(sig->utime,
1172 				    sig->cutime)));
1173 		psig->cstime =
1174 			cputime_add(psig->cstime,
1175 			cputime_add(p->stime,
1176 			cputime_add(sig->stime,
1177 				    sig->cstime)));
1178 		psig->cmin_flt +=
1179 			p->min_flt + sig->min_flt + sig->cmin_flt;
1180 		psig->cmaj_flt +=
1181 			p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1182 		psig->cnvcsw +=
1183 			p->nvcsw + sig->nvcsw + sig->cnvcsw;
1184 		psig->cnivcsw +=
1185 			p->nivcsw + sig->nivcsw + sig->cnivcsw;
1186 		spin_unlock_irq(&p->parent->sighand->siglock);
1187 	}
1188 
1189 	/*
1190 	 * Now we are sure this task is interesting, and no other
1191 	 * thread can reap it because we set its state to EXIT_DEAD.
1192 	 */
1193 	read_unlock(&tasklist_lock);
1194 
1195 	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1196 	status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1197 		? p->signal->group_exit_code : p->exit_code;
1198 	if (!retval && stat_addr)
1199 		retval = put_user(status, stat_addr);
1200 	if (!retval && infop)
1201 		retval = put_user(SIGCHLD, &infop->si_signo);
1202 	if (!retval && infop)
1203 		retval = put_user(0, &infop->si_errno);
1204 	if (!retval && infop) {
1205 		int why;
1206 
1207 		if ((status & 0x7f) == 0) {
1208 			why = CLD_EXITED;
1209 			status >>= 8;
1210 		} else {
1211 			why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1212 			status &= 0x7f;
1213 		}
1214 		retval = put_user((short)why, &infop->si_code);
1215 		if (!retval)
1216 			retval = put_user(status, &infop->si_status);
1217 	}
1218 	if (!retval && infop)
1219 		retval = put_user(p->pid, &infop->si_pid);
1220 	if (!retval && infop)
1221 		retval = put_user(p->uid, &infop->si_uid);
1222 	if (retval) {
1223 		// TODO: is this safe?
1224 		p->exit_state = EXIT_ZOMBIE;
1225 		return retval;
1226 	}
1227 	retval = p->pid;
1228 	if (p->real_parent != p->parent) {
1229 		write_lock_irq(&tasklist_lock);
1230 		/* Double-check with lock held.  */
1231 		if (p->real_parent != p->parent) {
1232 			__ptrace_unlink(p);
1233 			// TODO: is this safe?
1234 			p->exit_state = EXIT_ZOMBIE;
1235 			/*
1236 			 * If this is not a detached task, notify the parent.
1237 			 * If it's still not detached after that, don't release
1238 			 * it now.
1239 			 */
1240 			if (p->exit_signal != -1) {
1241 				do_notify_parent(p, p->exit_signal);
1242 				if (p->exit_signal != -1)
1243 					p = NULL;
1244 			}
1245 		}
1246 		write_unlock_irq(&tasklist_lock);
1247 	}
1248 	if (p != NULL)
1249 		release_task(p);
1250 	BUG_ON(!retval);
1251 	return retval;
1252 }
1253 
1254 /*
1255  * Handle sys_wait4 work for one task in state TASK_STOPPED.  We hold
1256  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1257  * the lock and this task is uninteresting.  If we return nonzero, we have
1258  * released the lock and the system call should return.
1259  */
1260 static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
1261 			     int noreap, struct siginfo __user *infop,
1262 			     int __user *stat_addr, struct rusage __user *ru)
1263 {
1264 	int retval, exit_code;
1265 
1266 	if (!p->exit_code)
1267 		return 0;
1268 	if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
1269 	    p->signal && p->signal->group_stop_count > 0)
1270 		/*
1271 		 * A group stop is in progress and this is the group leader.
1272 		 * We won't report until all threads have stopped.
1273 		 */
1274 		return 0;
1275 
1276 	/*
1277 	 * Now we are pretty sure this task is interesting.
1278 	 * Make sure it doesn't get reaped out from under us while we
1279 	 * give up the lock and then examine it below.  We don't want to
1280 	 * keep holding onto the tasklist_lock while we call getrusage and
1281 	 * possibly take page faults for user memory.
1282 	 */
1283 	get_task_struct(p);
1284 	read_unlock(&tasklist_lock);
1285 
1286 	if (unlikely(noreap)) {
1287 		pid_t pid = p->pid;
1288 		uid_t uid = p->uid;
1289 		int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1290 
1291 		exit_code = p->exit_code;
1292 		if (unlikely(!exit_code) ||
1293 		    unlikely(p->state & TASK_TRACED))
1294 			goto bail_ref;
1295 		return wait_noreap_copyout(p, pid, uid,
1296 					   why, (exit_code << 8) | 0x7f,
1297 					   infop, ru);
1298 	}
1299 
1300 	write_lock_irq(&tasklist_lock);
1301 
1302 	/*
1303 	 * This uses xchg to be atomic with the thread resuming and setting
1304 	 * it.  It must also be done with the write lock held to prevent a
1305 	 * race with the EXIT_ZOMBIE case.
1306 	 */
1307 	exit_code = xchg(&p->exit_code, 0);
1308 	if (unlikely(p->exit_state)) {
1309 		/*
1310 		 * The task resumed and then died.  Let the next iteration
1311 		 * catch it in EXIT_ZOMBIE.  Note that exit_code might
1312 		 * already be zero here if it resumed and did _exit(0).
1313 		 * The task itself is dead and won't touch exit_code again;
1314 		 * other processors in this function are locked out.
1315 		 */
1316 		p->exit_code = exit_code;
1317 		exit_code = 0;
1318 	}
1319 	if (unlikely(exit_code == 0)) {
1320 		/*
1321 		 * Another thread in this function got to it first, or it
1322 		 * resumed, or it resumed and then died.
1323 		 */
1324 		write_unlock_irq(&tasklist_lock);
1325 bail_ref:
1326 		put_task_struct(p);
1327 		/*
1328 		 * We are returning to the wait loop without having successfully
1329 		 * removed the process and having released the lock. We cannot
1330 		 * continue, since the "p" task pointer is potentially stale.
1331 		 *
1332 		 * Return -EAGAIN, and do_wait() will restart the loop from the
1333 		 * beginning. Do _not_ re-acquire the lock.
1334 		 */
1335 		return -EAGAIN;
1336 	}
1337 
1338 	/* move to end of parent's list to avoid starvation */
1339 	remove_parent(p);
1340 	add_parent(p);
1341 
1342 	write_unlock_irq(&tasklist_lock);
1343 
1344 	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1345 	if (!retval && stat_addr)
1346 		retval = put_user((exit_code << 8) | 0x7f, stat_addr);
1347 	if (!retval && infop)
1348 		retval = put_user(SIGCHLD, &infop->si_signo);
1349 	if (!retval && infop)
1350 		retval = put_user(0, &infop->si_errno);
1351 	if (!retval && infop)
1352 		retval = put_user((short)((p->ptrace & PT_PTRACED)
1353 					  ? CLD_TRAPPED : CLD_STOPPED),
1354 				  &infop->si_code);
1355 	if (!retval && infop)
1356 		retval = put_user(exit_code, &infop->si_status);
1357 	if (!retval && infop)
1358 		retval = put_user(p->pid, &infop->si_pid);
1359 	if (!retval && infop)
1360 		retval = put_user(p->uid, &infop->si_uid);
1361 	if (!retval)
1362 		retval = p->pid;
1363 	put_task_struct(p);
1364 
1365 	BUG_ON(!retval);
1366 	return retval;
1367 }
1368 
1369 /*
1370  * Handle do_wait work for one task in a live, non-stopped state.
1371  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1372  * the lock and this task is uninteresting.  If we return nonzero, we have
1373  * released the lock and the system call should return.
1374  */
1375 static int wait_task_continued(struct task_struct *p, int noreap,
1376 			       struct siginfo __user *infop,
1377 			       int __user *stat_addr, struct rusage __user *ru)
1378 {
1379 	int retval;
1380 	pid_t pid;
1381 	uid_t uid;
1382 
1383 	if (unlikely(!p->signal))
1384 		return 0;
1385 
1386 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1387 		return 0;
1388 
1389 	spin_lock_irq(&p->sighand->siglock);
1390 	/* Re-check with the lock held.  */
1391 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1392 		spin_unlock_irq(&p->sighand->siglock);
1393 		return 0;
1394 	}
1395 	if (!noreap)
1396 		p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1397 	spin_unlock_irq(&p->sighand->siglock);
1398 
1399 	pid = p->pid;
1400 	uid = p->uid;
1401 	get_task_struct(p);
1402 	read_unlock(&tasklist_lock);
1403 
1404 	if (!infop) {
1405 		retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1406 		put_task_struct(p);
1407 		if (!retval && stat_addr)
1408 			retval = put_user(0xffff, stat_addr);
1409 		if (!retval)
1410 			retval = p->pid;
1411 	} else {
1412 		retval = wait_noreap_copyout(p, pid, uid,
1413 					     CLD_CONTINUED, SIGCONT,
1414 					     infop, ru);
1415 		BUG_ON(retval == 0);
1416 	}
1417 
1418 	return retval;
1419 }
1420 
1421 
1422 static inline int my_ptrace_child(struct task_struct *p)
1423 {
1424 	if (!(p->ptrace & PT_PTRACED))
1425 		return 0;
1426 	if (!(p->ptrace & PT_ATTACHED))
1427 		return 1;
1428 	/*
1429 	 * This child was PTRACE_ATTACH'd.  We should be seeing it only if
1430 	 * we are the attacher.  If we are the real parent, this is a race
1431 	 * inside ptrace_attach.  It is waiting for the tasklist_lock,
1432 	 * which we have to switch the parent links, but has already set
1433 	 * the flags in p->ptrace.
1434 	 */
1435 	return (p->parent != p->real_parent);
1436 }
1437 
1438 static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
1439 		    int __user *stat_addr, struct rusage __user *ru)
1440 {
1441 	DECLARE_WAITQUEUE(wait, current);
1442 	struct task_struct *tsk;
1443 	int flag, retval;
1444 
1445 	add_wait_queue(&current->signal->wait_chldexit,&wait);
1446 repeat:
1447 	/*
1448 	 * We will set this flag if we see any child that might later
1449 	 * match our criteria, even if we are not able to reap it yet.
1450 	 */
1451 	flag = 0;
1452 	current->state = TASK_INTERRUPTIBLE;
1453 	read_lock(&tasklist_lock);
1454 	tsk = current;
1455 	do {
1456 		struct task_struct *p;
1457 		struct list_head *_p;
1458 		int ret;
1459 
1460 		list_for_each(_p,&tsk->children) {
1461 			p = list_entry(_p, struct task_struct, sibling);
1462 
1463 			ret = eligible_child(pid, options, p);
1464 			if (!ret)
1465 				continue;
1466 
1467 			switch (p->state) {
1468 			case TASK_TRACED:
1469 				/*
1470 				 * When we hit the race with PTRACE_ATTACH,
1471 				 * we will not report this child.  But the
1472 				 * race means it has not yet been moved to
1473 				 * our ptrace_children list, so we need to
1474 				 * set the flag here to avoid a spurious ECHILD
1475 				 * when the race happens with the only child.
1476 				 */
1477 				flag = 1;
1478 				if (!my_ptrace_child(p))
1479 					continue;
1480 				/*FALLTHROUGH*/
1481 			case TASK_STOPPED:
1482 				/*
1483 				 * It's stopped now, so it might later
1484 				 * continue, exit, or stop again.
1485 				 */
1486 				flag = 1;
1487 				if (!(options & WUNTRACED) &&
1488 				    !my_ptrace_child(p))
1489 					continue;
1490 				retval = wait_task_stopped(p, ret == 2,
1491 							   (options & WNOWAIT),
1492 							   infop,
1493 							   stat_addr, ru);
1494 				if (retval == -EAGAIN)
1495 					goto repeat;
1496 				if (retval != 0) /* He released the lock.  */
1497 					goto end;
1498 				break;
1499 			default:
1500 			// case EXIT_DEAD:
1501 				if (p->exit_state == EXIT_DEAD)
1502 					continue;
1503 			// case EXIT_ZOMBIE:
1504 				if (p->exit_state == EXIT_ZOMBIE) {
1505 					/*
1506 					 * Eligible but we cannot release
1507 					 * it yet:
1508 					 */
1509 					if (ret == 2)
1510 						goto check_continued;
1511 					if (!likely(options & WEXITED))
1512 						continue;
1513 					retval = wait_task_zombie(
1514 						p, (options & WNOWAIT),
1515 						infop, stat_addr, ru);
1516 					/* He released the lock.  */
1517 					if (retval != 0)
1518 						goto end;
1519 					break;
1520 				}
1521 check_continued:
1522 				/*
1523 				 * It's running now, so it might later
1524 				 * exit, stop, or stop and then continue.
1525 				 */
1526 				flag = 1;
1527 				if (!unlikely(options & WCONTINUED))
1528 					continue;
1529 				retval = wait_task_continued(
1530 					p, (options & WNOWAIT),
1531 					infop, stat_addr, ru);
1532 				if (retval != 0) /* He released the lock.  */
1533 					goto end;
1534 				break;
1535 			}
1536 		}
1537 		if (!flag) {
1538 			list_for_each(_p, &tsk->ptrace_children) {
1539 				p = list_entry(_p, struct task_struct,
1540 						ptrace_list);
1541 				if (!eligible_child(pid, options, p))
1542 					continue;
1543 				flag = 1;
1544 				break;
1545 			}
1546 		}
1547 		if (options & __WNOTHREAD)
1548 			break;
1549 		tsk = next_thread(tsk);
1550 		BUG_ON(tsk->signal != current->signal);
1551 	} while (tsk != current);
1552 
1553 	read_unlock(&tasklist_lock);
1554 	if (flag) {
1555 		retval = 0;
1556 		if (options & WNOHANG)
1557 			goto end;
1558 		retval = -ERESTARTSYS;
1559 		if (signal_pending(current))
1560 			goto end;
1561 		schedule();
1562 		goto repeat;
1563 	}
1564 	retval = -ECHILD;
1565 end:
1566 	current->state = TASK_RUNNING;
1567 	remove_wait_queue(&current->signal->wait_chldexit,&wait);
1568 	if (infop) {
1569 		if (retval > 0)
1570 		retval = 0;
1571 		else {
1572 			/*
1573 			 * For a WNOHANG return, clear out all the fields
1574 			 * we would set so the user can easily tell the
1575 			 * difference.
1576 			 */
1577 			if (!retval)
1578 				retval = put_user(0, &infop->si_signo);
1579 			if (!retval)
1580 				retval = put_user(0, &infop->si_errno);
1581 			if (!retval)
1582 				retval = put_user(0, &infop->si_code);
1583 			if (!retval)
1584 				retval = put_user(0, &infop->si_pid);
1585 			if (!retval)
1586 				retval = put_user(0, &infop->si_uid);
1587 			if (!retval)
1588 				retval = put_user(0, &infop->si_status);
1589 		}
1590 	}
1591 	return retval;
1592 }
1593 
1594 asmlinkage long sys_waitid(int which, pid_t pid,
1595 			   struct siginfo __user *infop, int options,
1596 			   struct rusage __user *ru)
1597 {
1598 	long ret;
1599 
1600 	if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
1601 		return -EINVAL;
1602 	if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1603 		return -EINVAL;
1604 
1605 	switch (which) {
1606 	case P_ALL:
1607 		pid = -1;
1608 		break;
1609 	case P_PID:
1610 		if (pid <= 0)
1611 			return -EINVAL;
1612 		break;
1613 	case P_PGID:
1614 		if (pid <= 0)
1615 			return -EINVAL;
1616 		pid = -pid;
1617 		break;
1618 	default:
1619 		return -EINVAL;
1620 	}
1621 
1622 	ret = do_wait(pid, options, infop, NULL, ru);
1623 
1624 	/* avoid REGPARM breakage on x86: */
1625 	prevent_tail_call(ret);
1626 	return ret;
1627 }
1628 
1629 asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
1630 			  int options, struct rusage __user *ru)
1631 {
1632 	long ret;
1633 
1634 	if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1635 			__WNOTHREAD|__WCLONE|__WALL))
1636 		return -EINVAL;
1637 	ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru);
1638 
1639 	/* avoid REGPARM breakage on x86: */
1640 	prevent_tail_call(ret);
1641 	return ret;
1642 }
1643 
1644 #ifdef __ARCH_WANT_SYS_WAITPID
1645 
1646 /*
1647  * sys_waitpid() remains for compatibility. waitpid() should be
1648  * implemented by calling sys_wait4() from libc.a.
1649  */
1650 asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
1651 {
1652 	return sys_wait4(pid, stat_addr, options, NULL);
1653 }
1654 
1655 #endif
1656