xref: /linux/kernel/exit.c (revision 4bedea94545165364618d403d03b61d797acba0b)
1 /*
2  *  linux/kernel/exit.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/config.h>
8 #include <linux/mm.h>
9 #include <linux/slab.h>
10 #include <linux/interrupt.h>
11 #include <linux/smp_lock.h>
12 #include <linux/module.h>
13 #include <linux/completion.h>
14 #include <linux/personality.h>
15 #include <linux/tty.h>
16 #include <linux/namespace.h>
17 #include <linux/key.h>
18 #include <linux/security.h>
19 #include <linux/cpu.h>
20 #include <linux/acct.h>
21 #include <linux/file.h>
22 #include <linux/binfmts.h>
23 #include <linux/ptrace.h>
24 #include <linux/profile.h>
25 #include <linux/mount.h>
26 #include <linux/proc_fs.h>
27 #include <linux/mempolicy.h>
28 #include <linux/cpuset.h>
29 #include <linux/syscalls.h>
30 #include <linux/signal.h>
31 
32 #include <asm/uaccess.h>
33 #include <asm/unistd.h>
34 #include <asm/pgtable.h>
35 #include <asm/mmu_context.h>
36 
37 extern void sem_exit (void);
38 extern struct task_struct *child_reaper;
39 
40 int getrusage(struct task_struct *, int, struct rusage __user *);
41 
42 static void exit_mm(struct task_struct * tsk);
43 
44 static void __unhash_process(struct task_struct *p)
45 {
46 	nr_threads--;
47 	detach_pid(p, PIDTYPE_PID);
48 	detach_pid(p, PIDTYPE_TGID);
49 	if (thread_group_leader(p)) {
50 		detach_pid(p, PIDTYPE_PGID);
51 		detach_pid(p, PIDTYPE_SID);
52 		if (p->pid)
53 			__get_cpu_var(process_counts)--;
54 	}
55 
56 	REMOVE_LINKS(p);
57 }
58 
59 void release_task(struct task_struct * p)
60 {
61 	int zap_leader;
62 	task_t *leader;
63 	struct dentry *proc_dentry;
64 
65 repeat:
66 	atomic_dec(&p->user->processes);
67 	spin_lock(&p->proc_lock);
68 	proc_dentry = proc_pid_unhash(p);
69 	write_lock_irq(&tasklist_lock);
70 	if (unlikely(p->ptrace))
71 		__ptrace_unlink(p);
72 	BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
73 	__exit_signal(p);
74 	__exit_sighand(p);
75 	/*
76 	 * Note that the fastpath in sys_times depends on __exit_signal having
77 	 * updated the counters before a task is removed from the tasklist of
78 	 * the process by __unhash_process.
79 	 */
80 	__unhash_process(p);
81 
82 	/*
83 	 * If we are the last non-leader member of the thread
84 	 * group, and the leader is zombie, then notify the
85 	 * group leader's parent process. (if it wants notification.)
86 	 */
87 	zap_leader = 0;
88 	leader = p->group_leader;
89 	if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
90 		BUG_ON(leader->exit_signal == -1);
91 		do_notify_parent(leader, leader->exit_signal);
92 		/*
93 		 * If we were the last child thread and the leader has
94 		 * exited already, and the leader's parent ignores SIGCHLD,
95 		 * then we are the one who should release the leader.
96 		 *
97 		 * do_notify_parent() will have marked it self-reaping in
98 		 * that case.
99 		 */
100 		zap_leader = (leader->exit_signal == -1);
101 	}
102 
103 	sched_exit(p);
104 	write_unlock_irq(&tasklist_lock);
105 	spin_unlock(&p->proc_lock);
106 	proc_pid_flush(proc_dentry);
107 	release_thread(p);
108 	put_task_struct(p);
109 
110 	p = leader;
111 	if (unlikely(zap_leader))
112 		goto repeat;
113 }
114 
115 /* we are using it only for SMP init */
116 
117 void unhash_process(struct task_struct *p)
118 {
119 	struct dentry *proc_dentry;
120 
121 	spin_lock(&p->proc_lock);
122 	proc_dentry = proc_pid_unhash(p);
123 	write_lock_irq(&tasklist_lock);
124 	__unhash_process(p);
125 	write_unlock_irq(&tasklist_lock);
126 	spin_unlock(&p->proc_lock);
127 	proc_pid_flush(proc_dentry);
128 }
129 
130 /*
131  * This checks not only the pgrp, but falls back on the pid if no
132  * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
133  * without this...
134  */
135 int session_of_pgrp(int pgrp)
136 {
137 	struct task_struct *p;
138 	int sid = -1;
139 
140 	read_lock(&tasklist_lock);
141 	do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
142 		if (p->signal->session > 0) {
143 			sid = p->signal->session;
144 			goto out;
145 		}
146 	} while_each_task_pid(pgrp, PIDTYPE_PGID, p);
147 	p = find_task_by_pid(pgrp);
148 	if (p)
149 		sid = p->signal->session;
150 out:
151 	read_unlock(&tasklist_lock);
152 
153 	return sid;
154 }
155 
156 /*
157  * Determine if a process group is "orphaned", according to the POSIX
158  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
159  * by terminal-generated stop signals.  Newly orphaned process groups are
160  * to receive a SIGHUP and a SIGCONT.
161  *
162  * "I ask you, have you ever known what it is to be an orphan?"
163  */
164 static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task)
165 {
166 	struct task_struct *p;
167 	int ret = 1;
168 
169 	do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
170 		if (p == ignored_task
171 				|| p->exit_state
172 				|| p->real_parent->pid == 1)
173 			continue;
174 		if (process_group(p->real_parent) != pgrp
175 			    && p->real_parent->signal->session == p->signal->session) {
176 			ret = 0;
177 			break;
178 		}
179 	} while_each_task_pid(pgrp, PIDTYPE_PGID, p);
180 	return ret;	/* (sighing) "Often!" */
181 }
182 
183 int is_orphaned_pgrp(int pgrp)
184 {
185 	int retval;
186 
187 	read_lock(&tasklist_lock);
188 	retval = will_become_orphaned_pgrp(pgrp, NULL);
189 	read_unlock(&tasklist_lock);
190 
191 	return retval;
192 }
193 
194 static inline int has_stopped_jobs(int pgrp)
195 {
196 	int retval = 0;
197 	struct task_struct *p;
198 
199 	do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
200 		if (p->state != TASK_STOPPED)
201 			continue;
202 
203 		/* If p is stopped by a debugger on a signal that won't
204 		   stop it, then don't count p as stopped.  This isn't
205 		   perfect but it's a good approximation.  */
206 		if (unlikely (p->ptrace)
207 		    && p->exit_code != SIGSTOP
208 		    && p->exit_code != SIGTSTP
209 		    && p->exit_code != SIGTTOU
210 		    && p->exit_code != SIGTTIN)
211 			continue;
212 
213 		retval = 1;
214 		break;
215 	} while_each_task_pid(pgrp, PIDTYPE_PGID, p);
216 	return retval;
217 }
218 
219 /**
220  * reparent_to_init - Reparent the calling kernel thread to the init task.
221  *
222  * If a kernel thread is launched as a result of a system call, or if
223  * it ever exits, it should generally reparent itself to init so that
224  * it is correctly cleaned up on exit.
225  *
226  * The various task state such as scheduling policy and priority may have
227  * been inherited from a user process, so we reset them to sane values here.
228  *
229  * NOTE that reparent_to_init() gives the caller full capabilities.
230  */
231 static inline void reparent_to_init(void)
232 {
233 	write_lock_irq(&tasklist_lock);
234 
235 	ptrace_unlink(current);
236 	/* Reparent to init */
237 	REMOVE_LINKS(current);
238 	current->parent = child_reaper;
239 	current->real_parent = child_reaper;
240 	SET_LINKS(current);
241 
242 	/* Set the exit signal to SIGCHLD so we signal init on exit */
243 	current->exit_signal = SIGCHLD;
244 
245 	if ((current->policy == SCHED_NORMAL) && (task_nice(current) < 0))
246 		set_user_nice(current, 0);
247 	/* cpus_allowed? */
248 	/* rt_priority? */
249 	/* signals? */
250 	security_task_reparent_to_init(current);
251 	memcpy(current->signal->rlim, init_task.signal->rlim,
252 	       sizeof(current->signal->rlim));
253 	atomic_inc(&(INIT_USER->__count));
254 	write_unlock_irq(&tasklist_lock);
255 	switch_uid(INIT_USER);
256 }
257 
258 void __set_special_pids(pid_t session, pid_t pgrp)
259 {
260 	struct task_struct *curr = current;
261 
262 	if (curr->signal->session != session) {
263 		detach_pid(curr, PIDTYPE_SID);
264 		curr->signal->session = session;
265 		attach_pid(curr, PIDTYPE_SID, session);
266 	}
267 	if (process_group(curr) != pgrp) {
268 		detach_pid(curr, PIDTYPE_PGID);
269 		curr->signal->pgrp = pgrp;
270 		attach_pid(curr, PIDTYPE_PGID, pgrp);
271 	}
272 }
273 
274 void set_special_pids(pid_t session, pid_t pgrp)
275 {
276 	write_lock_irq(&tasklist_lock);
277 	__set_special_pids(session, pgrp);
278 	write_unlock_irq(&tasklist_lock);
279 }
280 
281 /*
282  * Let kernel threads use this to say that they
283  * allow a certain signal (since daemonize() will
284  * have disabled all of them by default).
285  */
286 int allow_signal(int sig)
287 {
288 	if (!valid_signal(sig) || sig < 1)
289 		return -EINVAL;
290 
291 	spin_lock_irq(&current->sighand->siglock);
292 	sigdelset(&current->blocked, sig);
293 	if (!current->mm) {
294 		/* Kernel threads handle their own signals.
295 		   Let the signal code know it'll be handled, so
296 		   that they don't get converted to SIGKILL or
297 		   just silently dropped */
298 		current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
299 	}
300 	recalc_sigpending();
301 	spin_unlock_irq(&current->sighand->siglock);
302 	return 0;
303 }
304 
305 EXPORT_SYMBOL(allow_signal);
306 
307 int disallow_signal(int sig)
308 {
309 	if (!valid_signal(sig) || sig < 1)
310 		return -EINVAL;
311 
312 	spin_lock_irq(&current->sighand->siglock);
313 	sigaddset(&current->blocked, sig);
314 	recalc_sigpending();
315 	spin_unlock_irq(&current->sighand->siglock);
316 	return 0;
317 }
318 
319 EXPORT_SYMBOL(disallow_signal);
320 
321 /*
322  *	Put all the gunge required to become a kernel thread without
323  *	attached user resources in one place where it belongs.
324  */
325 
326 void daemonize(const char *name, ...)
327 {
328 	va_list args;
329 	struct fs_struct *fs;
330 	sigset_t blocked;
331 
332 	va_start(args, name);
333 	vsnprintf(current->comm, sizeof(current->comm), name, args);
334 	va_end(args);
335 
336 	/*
337 	 * If we were started as result of loading a module, close all of the
338 	 * user space pages.  We don't need them, and if we didn't close them
339 	 * they would be locked into memory.
340 	 */
341 	exit_mm(current);
342 
343 	set_special_pids(1, 1);
344 	down(&tty_sem);
345 	current->signal->tty = NULL;
346 	up(&tty_sem);
347 
348 	/* Block and flush all signals */
349 	sigfillset(&blocked);
350 	sigprocmask(SIG_BLOCK, &blocked, NULL);
351 	flush_signals(current);
352 
353 	/* Become as one with the init task */
354 
355 	exit_fs(current);	/* current->fs->count--; */
356 	fs = init_task.fs;
357 	current->fs = fs;
358 	atomic_inc(&fs->count);
359  	exit_files(current);
360 	current->files = init_task.files;
361 	atomic_inc(&current->files->count);
362 
363 	reparent_to_init();
364 }
365 
366 EXPORT_SYMBOL(daemonize);
367 
368 static inline void close_files(struct files_struct * files)
369 {
370 	int i, j;
371 
372 	j = 0;
373 	for (;;) {
374 		unsigned long set;
375 		i = j * __NFDBITS;
376 		if (i >= files->max_fdset || i >= files->max_fds)
377 			break;
378 		set = files->open_fds->fds_bits[j++];
379 		while (set) {
380 			if (set & 1) {
381 				struct file * file = xchg(&files->fd[i], NULL);
382 				if (file)
383 					filp_close(file, files);
384 			}
385 			i++;
386 			set >>= 1;
387 		}
388 	}
389 }
390 
391 struct files_struct *get_files_struct(struct task_struct *task)
392 {
393 	struct files_struct *files;
394 
395 	task_lock(task);
396 	files = task->files;
397 	if (files)
398 		atomic_inc(&files->count);
399 	task_unlock(task);
400 
401 	return files;
402 }
403 
404 void fastcall put_files_struct(struct files_struct *files)
405 {
406 	if (atomic_dec_and_test(&files->count)) {
407 		close_files(files);
408 		/*
409 		 * Free the fd and fdset arrays if we expanded them.
410 		 */
411 		if (files->fd != &files->fd_array[0])
412 			free_fd_array(files->fd, files->max_fds);
413 		if (files->max_fdset > __FD_SETSIZE) {
414 			free_fdset(files->open_fds, files->max_fdset);
415 			free_fdset(files->close_on_exec, files->max_fdset);
416 		}
417 		kmem_cache_free(files_cachep, files);
418 	}
419 }
420 
421 EXPORT_SYMBOL(put_files_struct);
422 
423 static inline void __exit_files(struct task_struct *tsk)
424 {
425 	struct files_struct * files = tsk->files;
426 
427 	if (files) {
428 		task_lock(tsk);
429 		tsk->files = NULL;
430 		task_unlock(tsk);
431 		put_files_struct(files);
432 	}
433 }
434 
435 void exit_files(struct task_struct *tsk)
436 {
437 	__exit_files(tsk);
438 }
439 
440 static inline void __put_fs_struct(struct fs_struct *fs)
441 {
442 	/* No need to hold fs->lock if we are killing it */
443 	if (atomic_dec_and_test(&fs->count)) {
444 		dput(fs->root);
445 		mntput(fs->rootmnt);
446 		dput(fs->pwd);
447 		mntput(fs->pwdmnt);
448 		if (fs->altroot) {
449 			dput(fs->altroot);
450 			mntput(fs->altrootmnt);
451 		}
452 		kmem_cache_free(fs_cachep, fs);
453 	}
454 }
455 
456 void put_fs_struct(struct fs_struct *fs)
457 {
458 	__put_fs_struct(fs);
459 }
460 
461 static inline void __exit_fs(struct task_struct *tsk)
462 {
463 	struct fs_struct * fs = tsk->fs;
464 
465 	if (fs) {
466 		task_lock(tsk);
467 		tsk->fs = NULL;
468 		task_unlock(tsk);
469 		__put_fs_struct(fs);
470 	}
471 }
472 
473 void exit_fs(struct task_struct *tsk)
474 {
475 	__exit_fs(tsk);
476 }
477 
478 EXPORT_SYMBOL_GPL(exit_fs);
479 
480 /*
481  * Turn us into a lazy TLB process if we
482  * aren't already..
483  */
484 static void exit_mm(struct task_struct * tsk)
485 {
486 	struct mm_struct *mm = tsk->mm;
487 
488 	mm_release(tsk, mm);
489 	if (!mm)
490 		return;
491 	/*
492 	 * Serialize with any possible pending coredump.
493 	 * We must hold mmap_sem around checking core_waiters
494 	 * and clearing tsk->mm.  The core-inducing thread
495 	 * will increment core_waiters for each thread in the
496 	 * group with ->mm != NULL.
497 	 */
498 	down_read(&mm->mmap_sem);
499 	if (mm->core_waiters) {
500 		up_read(&mm->mmap_sem);
501 		down_write(&mm->mmap_sem);
502 		if (!--mm->core_waiters)
503 			complete(mm->core_startup_done);
504 		up_write(&mm->mmap_sem);
505 
506 		wait_for_completion(&mm->core_done);
507 		down_read(&mm->mmap_sem);
508 	}
509 	atomic_inc(&mm->mm_count);
510 	if (mm != tsk->active_mm) BUG();
511 	/* more a memory barrier than a real lock */
512 	task_lock(tsk);
513 	tsk->mm = NULL;
514 	up_read(&mm->mmap_sem);
515 	enter_lazy_tlb(mm, current);
516 	task_unlock(tsk);
517 	mmput(mm);
518 }
519 
520 static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_reaper)
521 {
522 	/*
523 	 * Make sure we're not reparenting to ourselves and that
524 	 * the parent is not a zombie.
525 	 */
526 	BUG_ON(p == reaper || reaper->exit_state >= EXIT_ZOMBIE);
527 	p->real_parent = reaper;
528 }
529 
530 static inline void reparent_thread(task_t *p, task_t *father, int traced)
531 {
532 	/* We don't want people slaying init.  */
533 	if (p->exit_signal != -1)
534 		p->exit_signal = SIGCHLD;
535 
536 	if (p->pdeath_signal)
537 		/* We already hold the tasklist_lock here.  */
538 		group_send_sig_info(p->pdeath_signal, (void *) 0, p);
539 
540 	/* Move the child from its dying parent to the new one.  */
541 	if (unlikely(traced)) {
542 		/* Preserve ptrace links if someone else is tracing this child.  */
543 		list_del_init(&p->ptrace_list);
544 		if (p->parent != p->real_parent)
545 			list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
546 	} else {
547 		/* If this child is being traced, then we're the one tracing it
548 		 * anyway, so let go of it.
549 		 */
550 		p->ptrace = 0;
551 		list_del_init(&p->sibling);
552 		p->parent = p->real_parent;
553 		list_add_tail(&p->sibling, &p->parent->children);
554 
555 		/* If we'd notified the old parent about this child's death,
556 		 * also notify the new parent.
557 		 */
558 		if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
559 		    thread_group_empty(p))
560 			do_notify_parent(p, p->exit_signal);
561 		else if (p->state == TASK_TRACED) {
562 			/*
563 			 * If it was at a trace stop, turn it into
564 			 * a normal stop since it's no longer being
565 			 * traced.
566 			 */
567 			ptrace_untrace(p);
568 		}
569 	}
570 
571 	/*
572 	 * process group orphan check
573 	 * Case ii: Our child is in a different pgrp
574 	 * than we are, and it was the only connection
575 	 * outside, so the child pgrp is now orphaned.
576 	 */
577 	if ((process_group(p) != process_group(father)) &&
578 	    (p->signal->session == father->signal->session)) {
579 		int pgrp = process_group(p);
580 
581 		if (will_become_orphaned_pgrp(pgrp, NULL) && has_stopped_jobs(pgrp)) {
582 			__kill_pg_info(SIGHUP, (void *)1, pgrp);
583 			__kill_pg_info(SIGCONT, (void *)1, pgrp);
584 		}
585 	}
586 }
587 
588 /*
589  * When we die, we re-parent all our children.
590  * Try to give them to another thread in our thread
591  * group, and if no such member exists, give it to
592  * the global child reaper process (ie "init")
593  */
594 static inline void forget_original_parent(struct task_struct * father,
595 					  struct list_head *to_release)
596 {
597 	struct task_struct *p, *reaper = father;
598 	struct list_head *_p, *_n;
599 
600 	do {
601 		reaper = next_thread(reaper);
602 		if (reaper == father) {
603 			reaper = child_reaper;
604 			break;
605 		}
606 	} while (reaper->exit_state);
607 
608 	/*
609 	 * There are only two places where our children can be:
610 	 *
611 	 * - in our child list
612 	 * - in our ptraced child list
613 	 *
614 	 * Search them and reparent children.
615 	 */
616 	list_for_each_safe(_p, _n, &father->children) {
617 		int ptrace;
618 		p = list_entry(_p,struct task_struct,sibling);
619 
620 		ptrace = p->ptrace;
621 
622 		/* if father isn't the real parent, then ptrace must be enabled */
623 		BUG_ON(father != p->real_parent && !ptrace);
624 
625 		if (father == p->real_parent) {
626 			/* reparent with a reaper, real father it's us */
627 			choose_new_parent(p, reaper, child_reaper);
628 			reparent_thread(p, father, 0);
629 		} else {
630 			/* reparent ptraced task to its real parent */
631 			__ptrace_unlink (p);
632 			if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
633 			    thread_group_empty(p))
634 				do_notify_parent(p, p->exit_signal);
635 		}
636 
637 		/*
638 		 * if the ptraced child is a zombie with exit_signal == -1
639 		 * we must collect it before we exit, or it will remain
640 		 * zombie forever since we prevented it from self-reap itself
641 		 * while it was being traced by us, to be able to see it in wait4.
642 		 */
643 		if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
644 			list_add(&p->ptrace_list, to_release);
645 	}
646 	list_for_each_safe(_p, _n, &father->ptrace_children) {
647 		p = list_entry(_p,struct task_struct,ptrace_list);
648 		choose_new_parent(p, reaper, child_reaper);
649 		reparent_thread(p, father, 1);
650 	}
651 }
652 
653 /*
654  * Send signals to all our closest relatives so that they know
655  * to properly mourn us..
656  */
657 static void exit_notify(struct task_struct *tsk)
658 {
659 	int state;
660 	struct task_struct *t;
661 	struct list_head ptrace_dead, *_p, *_n;
662 
663 	if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
664 	    && !thread_group_empty(tsk)) {
665 		/*
666 		 * This occurs when there was a race between our exit
667 		 * syscall and a group signal choosing us as the one to
668 		 * wake up.  It could be that we are the only thread
669 		 * alerted to check for pending signals, but another thread
670 		 * should be woken now to take the signal since we will not.
671 		 * Now we'll wake all the threads in the group just to make
672 		 * sure someone gets all the pending signals.
673 		 */
674 		read_lock(&tasklist_lock);
675 		spin_lock_irq(&tsk->sighand->siglock);
676 		for (t = next_thread(tsk); t != tsk; t = next_thread(t))
677 			if (!signal_pending(t) && !(t->flags & PF_EXITING)) {
678 				recalc_sigpending_tsk(t);
679 				if (signal_pending(t))
680 					signal_wake_up(t, 0);
681 			}
682 		spin_unlock_irq(&tsk->sighand->siglock);
683 		read_unlock(&tasklist_lock);
684 	}
685 
686 	write_lock_irq(&tasklist_lock);
687 
688 	/*
689 	 * This does two things:
690 	 *
691   	 * A.  Make init inherit all the child processes
692 	 * B.  Check to see if any process groups have become orphaned
693 	 *	as a result of our exiting, and if they have any stopped
694 	 *	jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
695 	 */
696 
697 	INIT_LIST_HEAD(&ptrace_dead);
698 	forget_original_parent(tsk, &ptrace_dead);
699 	BUG_ON(!list_empty(&tsk->children));
700 	BUG_ON(!list_empty(&tsk->ptrace_children));
701 
702 	/*
703 	 * Check to see if any process groups have become orphaned
704 	 * as a result of our exiting, and if they have any stopped
705 	 * jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
706 	 *
707 	 * Case i: Our father is in a different pgrp than we are
708 	 * and we were the only connection outside, so our pgrp
709 	 * is about to become orphaned.
710 	 */
711 
712 	t = tsk->real_parent;
713 
714 	if ((process_group(t) != process_group(tsk)) &&
715 	    (t->signal->session == tsk->signal->session) &&
716 	    will_become_orphaned_pgrp(process_group(tsk), tsk) &&
717 	    has_stopped_jobs(process_group(tsk))) {
718 		__kill_pg_info(SIGHUP, (void *)1, process_group(tsk));
719 		__kill_pg_info(SIGCONT, (void *)1, process_group(tsk));
720 	}
721 
722 	/* Let father know we died
723 	 *
724 	 * Thread signals are configurable, but you aren't going to use
725 	 * that to send signals to arbitary processes.
726 	 * That stops right now.
727 	 *
728 	 * If the parent exec id doesn't match the exec id we saved
729 	 * when we started then we know the parent has changed security
730 	 * domain.
731 	 *
732 	 * If our self_exec id doesn't match our parent_exec_id then
733 	 * we have changed execution domain as these two values started
734 	 * the same after a fork.
735 	 *
736 	 */
737 
738 	if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
739 	    ( tsk->parent_exec_id != t->self_exec_id  ||
740 	      tsk->self_exec_id != tsk->parent_exec_id)
741 	    && !capable(CAP_KILL))
742 		tsk->exit_signal = SIGCHLD;
743 
744 
745 	/* If something other than our normal parent is ptracing us, then
746 	 * send it a SIGCHLD instead of honoring exit_signal.  exit_signal
747 	 * only has special meaning to our real parent.
748 	 */
749 	if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
750 		int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
751 		do_notify_parent(tsk, signal);
752 	} else if (tsk->ptrace) {
753 		do_notify_parent(tsk, SIGCHLD);
754 	}
755 
756 	state = EXIT_ZOMBIE;
757 	if (tsk->exit_signal == -1 &&
758 	    (likely(tsk->ptrace == 0) ||
759 	     unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT)))
760 		state = EXIT_DEAD;
761 	tsk->exit_state = state;
762 
763 	write_unlock_irq(&tasklist_lock);
764 
765 	list_for_each_safe(_p, _n, &ptrace_dead) {
766 		list_del_init(_p);
767 		t = list_entry(_p,struct task_struct,ptrace_list);
768 		release_task(t);
769 	}
770 
771 	/* If the process is dead, release it - nobody will wait for it */
772 	if (state == EXIT_DEAD)
773 		release_task(tsk);
774 
775 	/* PF_DEAD causes final put_task_struct after we schedule. */
776 	preempt_disable();
777 	tsk->flags |= PF_DEAD;
778 }
779 
780 fastcall NORET_TYPE void do_exit(long code)
781 {
782 	struct task_struct *tsk = current;
783 	int group_dead;
784 
785 	profile_task_exit(tsk);
786 
787 	if (unlikely(in_interrupt()))
788 		panic("Aiee, killing interrupt handler!");
789 	if (unlikely(!tsk->pid))
790 		panic("Attempted to kill the idle task!");
791 	if (unlikely(tsk->pid == 1))
792 		panic("Attempted to kill init!");
793 	if (tsk->io_context)
794 		exit_io_context();
795 
796 	if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
797 		current->ptrace_message = code;
798 		ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
799 	}
800 
801 	/*
802 	 * We're taking recursive faults here in do_exit. Safest is to just
803 	 * leave this task alone and wait for reboot.
804 	 */
805 	if (unlikely(tsk->flags & PF_EXITING)) {
806 		printk(KERN_ALERT
807 			"Fixing recursive fault but reboot is needed!\n");
808 		set_current_state(TASK_UNINTERRUPTIBLE);
809 		schedule();
810 	}
811 
812 	tsk->flags |= PF_EXITING;
813 
814 	/*
815 	 * Make sure we don't try to process any timer firings
816 	 * while we are already exiting.
817 	 */
818  	tsk->it_virt_expires = cputime_zero;
819  	tsk->it_prof_expires = cputime_zero;
820 	tsk->it_sched_expires = 0;
821 
822 	if (unlikely(in_atomic()))
823 		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
824 				current->comm, current->pid,
825 				preempt_count());
826 
827 	acct_update_integrals(tsk);
828 	update_mem_hiwater(tsk);
829 	group_dead = atomic_dec_and_test(&tsk->signal->live);
830 	if (group_dead)
831 		acct_process(code);
832 	exit_mm(tsk);
833 
834 	exit_sem(tsk);
835 	__exit_files(tsk);
836 	__exit_fs(tsk);
837 	exit_namespace(tsk);
838 	exit_thread();
839 	cpuset_exit(tsk);
840 	exit_keys(tsk);
841 
842 	if (group_dead && tsk->signal->leader)
843 		disassociate_ctty(1);
844 
845 	module_put(tsk->thread_info->exec_domain->module);
846 	if (tsk->binfmt)
847 		module_put(tsk->binfmt->module);
848 
849 	tsk->exit_code = code;
850 	exit_notify(tsk);
851 #ifdef CONFIG_NUMA
852 	mpol_free(tsk->mempolicy);
853 	tsk->mempolicy = NULL;
854 #endif
855 
856 	BUG_ON(!(current->flags & PF_DEAD));
857 	schedule();
858 	BUG();
859 	/* Avoid "noreturn function does return".  */
860 	for (;;) ;
861 }
862 
863 EXPORT_SYMBOL_GPL(do_exit);
864 
865 NORET_TYPE void complete_and_exit(struct completion *comp, long code)
866 {
867 	if (comp)
868 		complete(comp);
869 
870 	do_exit(code);
871 }
872 
873 EXPORT_SYMBOL(complete_and_exit);
874 
875 asmlinkage long sys_exit(int error_code)
876 {
877 	do_exit((error_code&0xff)<<8);
878 }
879 
880 task_t fastcall *next_thread(const task_t *p)
881 {
882 	return pid_task(p->pids[PIDTYPE_TGID].pid_list.next, PIDTYPE_TGID);
883 }
884 
885 EXPORT_SYMBOL(next_thread);
886 
887 /*
888  * Take down every thread in the group.  This is called by fatal signals
889  * as well as by sys_exit_group (below).
890  */
891 NORET_TYPE void
892 do_group_exit(int exit_code)
893 {
894 	BUG_ON(exit_code & 0x80); /* core dumps don't get here */
895 
896 	if (current->signal->flags & SIGNAL_GROUP_EXIT)
897 		exit_code = current->signal->group_exit_code;
898 	else if (!thread_group_empty(current)) {
899 		struct signal_struct *const sig = current->signal;
900 		struct sighand_struct *const sighand = current->sighand;
901 		read_lock(&tasklist_lock);
902 		spin_lock_irq(&sighand->siglock);
903 		if (sig->flags & SIGNAL_GROUP_EXIT)
904 			/* Another thread got here before we took the lock.  */
905 			exit_code = sig->group_exit_code;
906 		else {
907 			sig->flags = SIGNAL_GROUP_EXIT;
908 			sig->group_exit_code = exit_code;
909 			zap_other_threads(current);
910 		}
911 		spin_unlock_irq(&sighand->siglock);
912 		read_unlock(&tasklist_lock);
913 	}
914 
915 	do_exit(exit_code);
916 	/* NOTREACHED */
917 }
918 
919 /*
920  * this kills every thread in the thread group. Note that any externally
921  * wait4()-ing process will get the correct exit code - even if this
922  * thread is not the thread group leader.
923  */
924 asmlinkage void sys_exit_group(int error_code)
925 {
926 	do_group_exit((error_code & 0xff) << 8);
927 }
928 
929 static int eligible_child(pid_t pid, int options, task_t *p)
930 {
931 	if (pid > 0) {
932 		if (p->pid != pid)
933 			return 0;
934 	} else if (!pid) {
935 		if (process_group(p) != process_group(current))
936 			return 0;
937 	} else if (pid != -1) {
938 		if (process_group(p) != -pid)
939 			return 0;
940 	}
941 
942 	/*
943 	 * Do not consider detached threads that are
944 	 * not ptraced:
945 	 */
946 	if (p->exit_signal == -1 && !p->ptrace)
947 		return 0;
948 
949 	/* Wait for all children (clone and not) if __WALL is set;
950 	 * otherwise, wait for clone children *only* if __WCLONE is
951 	 * set; otherwise, wait for non-clone children *only*.  (Note:
952 	 * A "clone" child here is one that reports to its parent
953 	 * using a signal other than SIGCHLD.) */
954 	if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
955 	    && !(options & __WALL))
956 		return 0;
957 	/*
958 	 * Do not consider thread group leaders that are
959 	 * in a non-empty thread group:
960 	 */
961 	if (current->tgid != p->tgid && delay_group_leader(p))
962 		return 2;
963 
964 	if (security_task_wait(p))
965 		return 0;
966 
967 	return 1;
968 }
969 
970 static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid,
971 			       int why, int status,
972 			       struct siginfo __user *infop,
973 			       struct rusage __user *rusagep)
974 {
975 	int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
976 	put_task_struct(p);
977 	if (!retval)
978 		retval = put_user(SIGCHLD, &infop->si_signo);
979 	if (!retval)
980 		retval = put_user(0, &infop->si_errno);
981 	if (!retval)
982 		retval = put_user((short)why, &infop->si_code);
983 	if (!retval)
984 		retval = put_user(pid, &infop->si_pid);
985 	if (!retval)
986 		retval = put_user(uid, &infop->si_uid);
987 	if (!retval)
988 		retval = put_user(status, &infop->si_status);
989 	if (!retval)
990 		retval = pid;
991 	return retval;
992 }
993 
994 /*
995  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
996  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
997  * the lock and this task is uninteresting.  If we return nonzero, we have
998  * released the lock and the system call should return.
999  */
1000 static int wait_task_zombie(task_t *p, int noreap,
1001 			    struct siginfo __user *infop,
1002 			    int __user *stat_addr, struct rusage __user *ru)
1003 {
1004 	unsigned long state;
1005 	int retval;
1006 	int status;
1007 
1008 	if (unlikely(noreap)) {
1009 		pid_t pid = p->pid;
1010 		uid_t uid = p->uid;
1011 		int exit_code = p->exit_code;
1012 		int why, status;
1013 
1014 		if (unlikely(p->exit_state != EXIT_ZOMBIE))
1015 			return 0;
1016 		if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
1017 			return 0;
1018 		get_task_struct(p);
1019 		read_unlock(&tasklist_lock);
1020 		if ((exit_code & 0x7f) == 0) {
1021 			why = CLD_EXITED;
1022 			status = exit_code >> 8;
1023 		} else {
1024 			why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1025 			status = exit_code & 0x7f;
1026 		}
1027 		return wait_noreap_copyout(p, pid, uid, why,
1028 					   status, infop, ru);
1029 	}
1030 
1031 	/*
1032 	 * Try to move the task's state to DEAD
1033 	 * only one thread is allowed to do this:
1034 	 */
1035 	state = xchg(&p->exit_state, EXIT_DEAD);
1036 	if (state != EXIT_ZOMBIE) {
1037 		BUG_ON(state != EXIT_DEAD);
1038 		return 0;
1039 	}
1040 	if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) {
1041 		/*
1042 		 * This can only happen in a race with a ptraced thread
1043 		 * dying on another processor.
1044 		 */
1045 		return 0;
1046 	}
1047 
1048 	if (likely(p->real_parent == p->parent) && likely(p->signal)) {
1049 		/*
1050 		 * The resource counters for the group leader are in its
1051 		 * own task_struct.  Those for dead threads in the group
1052 		 * are in its signal_struct, as are those for the child
1053 		 * processes it has previously reaped.  All these
1054 		 * accumulate in the parent's signal_struct c* fields.
1055 		 *
1056 		 * We don't bother to take a lock here to protect these
1057 		 * p->signal fields, because they are only touched by
1058 		 * __exit_signal, which runs with tasklist_lock
1059 		 * write-locked anyway, and so is excluded here.  We do
1060 		 * need to protect the access to p->parent->signal fields,
1061 		 * as other threads in the parent group can be right
1062 		 * here reaping other children at the same time.
1063 		 */
1064 		spin_lock_irq(&p->parent->sighand->siglock);
1065 		p->parent->signal->cutime =
1066 			cputime_add(p->parent->signal->cutime,
1067 			cputime_add(p->utime,
1068 			cputime_add(p->signal->utime,
1069 				    p->signal->cutime)));
1070 		p->parent->signal->cstime =
1071 			cputime_add(p->parent->signal->cstime,
1072 			cputime_add(p->stime,
1073 			cputime_add(p->signal->stime,
1074 				    p->signal->cstime)));
1075 		p->parent->signal->cmin_flt +=
1076 			p->min_flt + p->signal->min_flt + p->signal->cmin_flt;
1077 		p->parent->signal->cmaj_flt +=
1078 			p->maj_flt + p->signal->maj_flt + p->signal->cmaj_flt;
1079 		p->parent->signal->cnvcsw +=
1080 			p->nvcsw + p->signal->nvcsw + p->signal->cnvcsw;
1081 		p->parent->signal->cnivcsw +=
1082 			p->nivcsw + p->signal->nivcsw + p->signal->cnivcsw;
1083 		spin_unlock_irq(&p->parent->sighand->siglock);
1084 	}
1085 
1086 	/*
1087 	 * Now we are sure this task is interesting, and no other
1088 	 * thread can reap it because we set its state to EXIT_DEAD.
1089 	 */
1090 	read_unlock(&tasklist_lock);
1091 
1092 	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1093 	status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1094 		? p->signal->group_exit_code : p->exit_code;
1095 	if (!retval && stat_addr)
1096 		retval = put_user(status, stat_addr);
1097 	if (!retval && infop)
1098 		retval = put_user(SIGCHLD, &infop->si_signo);
1099 	if (!retval && infop)
1100 		retval = put_user(0, &infop->si_errno);
1101 	if (!retval && infop) {
1102 		int why;
1103 
1104 		if ((status & 0x7f) == 0) {
1105 			why = CLD_EXITED;
1106 			status >>= 8;
1107 		} else {
1108 			why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1109 			status &= 0x7f;
1110 		}
1111 		retval = put_user((short)why, &infop->si_code);
1112 		if (!retval)
1113 			retval = put_user(status, &infop->si_status);
1114 	}
1115 	if (!retval && infop)
1116 		retval = put_user(p->pid, &infop->si_pid);
1117 	if (!retval && infop)
1118 		retval = put_user(p->uid, &infop->si_uid);
1119 	if (retval) {
1120 		// TODO: is this safe?
1121 		p->exit_state = EXIT_ZOMBIE;
1122 		return retval;
1123 	}
1124 	retval = p->pid;
1125 	if (p->real_parent != p->parent) {
1126 		write_lock_irq(&tasklist_lock);
1127 		/* Double-check with lock held.  */
1128 		if (p->real_parent != p->parent) {
1129 			__ptrace_unlink(p);
1130 			// TODO: is this safe?
1131 			p->exit_state = EXIT_ZOMBIE;
1132 			/*
1133 			 * If this is not a detached task, notify the parent.
1134 			 * If it's still not detached after that, don't release
1135 			 * it now.
1136 			 */
1137 			if (p->exit_signal != -1) {
1138 				do_notify_parent(p, p->exit_signal);
1139 				if (p->exit_signal != -1)
1140 					p = NULL;
1141 			}
1142 		}
1143 		write_unlock_irq(&tasklist_lock);
1144 	}
1145 	if (p != NULL)
1146 		release_task(p);
1147 	BUG_ON(!retval);
1148 	return retval;
1149 }
1150 
1151 /*
1152  * Handle sys_wait4 work for one task in state TASK_STOPPED.  We hold
1153  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1154  * the lock and this task is uninteresting.  If we return nonzero, we have
1155  * released the lock and the system call should return.
1156  */
1157 static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap,
1158 			     struct siginfo __user *infop,
1159 			     int __user *stat_addr, struct rusage __user *ru)
1160 {
1161 	int retval, exit_code;
1162 
1163 	if (!p->exit_code)
1164 		return 0;
1165 	if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
1166 	    p->signal && p->signal->group_stop_count > 0)
1167 		/*
1168 		 * A group stop is in progress and this is the group leader.
1169 		 * We won't report until all threads have stopped.
1170 		 */
1171 		return 0;
1172 
1173 	/*
1174 	 * Now we are pretty sure this task is interesting.
1175 	 * Make sure it doesn't get reaped out from under us while we
1176 	 * give up the lock and then examine it below.  We don't want to
1177 	 * keep holding onto the tasklist_lock while we call getrusage and
1178 	 * possibly take page faults for user memory.
1179 	 */
1180 	get_task_struct(p);
1181 	read_unlock(&tasklist_lock);
1182 
1183 	if (unlikely(noreap)) {
1184 		pid_t pid = p->pid;
1185 		uid_t uid = p->uid;
1186 		int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1187 
1188 		exit_code = p->exit_code;
1189 		if (unlikely(!exit_code) ||
1190 		    unlikely(p->state > TASK_STOPPED))
1191 			goto bail_ref;
1192 		return wait_noreap_copyout(p, pid, uid,
1193 					   why, (exit_code << 8) | 0x7f,
1194 					   infop, ru);
1195 	}
1196 
1197 	write_lock_irq(&tasklist_lock);
1198 
1199 	/*
1200 	 * This uses xchg to be atomic with the thread resuming and setting
1201 	 * it.  It must also be done with the write lock held to prevent a
1202 	 * race with the EXIT_ZOMBIE case.
1203 	 */
1204 	exit_code = xchg(&p->exit_code, 0);
1205 	if (unlikely(p->exit_state)) {
1206 		/*
1207 		 * The task resumed and then died.  Let the next iteration
1208 		 * catch it in EXIT_ZOMBIE.  Note that exit_code might
1209 		 * already be zero here if it resumed and did _exit(0).
1210 		 * The task itself is dead and won't touch exit_code again;
1211 		 * other processors in this function are locked out.
1212 		 */
1213 		p->exit_code = exit_code;
1214 		exit_code = 0;
1215 	}
1216 	if (unlikely(exit_code == 0)) {
1217 		/*
1218 		 * Another thread in this function got to it first, or it
1219 		 * resumed, or it resumed and then died.
1220 		 */
1221 		write_unlock_irq(&tasklist_lock);
1222 bail_ref:
1223 		put_task_struct(p);
1224 		/*
1225 		 * We are returning to the wait loop without having successfully
1226 		 * removed the process and having released the lock. We cannot
1227 		 * continue, since the "p" task pointer is potentially stale.
1228 		 *
1229 		 * Return -EAGAIN, and do_wait() will restart the loop from the
1230 		 * beginning. Do _not_ re-acquire the lock.
1231 		 */
1232 		return -EAGAIN;
1233 	}
1234 
1235 	/* move to end of parent's list to avoid starvation */
1236 	remove_parent(p);
1237 	add_parent(p, p->parent);
1238 
1239 	write_unlock_irq(&tasklist_lock);
1240 
1241 	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1242 	if (!retval && stat_addr)
1243 		retval = put_user((exit_code << 8) | 0x7f, stat_addr);
1244 	if (!retval && infop)
1245 		retval = put_user(SIGCHLD, &infop->si_signo);
1246 	if (!retval && infop)
1247 		retval = put_user(0, &infop->si_errno);
1248 	if (!retval && infop)
1249 		retval = put_user((short)((p->ptrace & PT_PTRACED)
1250 					  ? CLD_TRAPPED : CLD_STOPPED),
1251 				  &infop->si_code);
1252 	if (!retval && infop)
1253 		retval = put_user(exit_code, &infop->si_status);
1254 	if (!retval && infop)
1255 		retval = put_user(p->pid, &infop->si_pid);
1256 	if (!retval && infop)
1257 		retval = put_user(p->uid, &infop->si_uid);
1258 	if (!retval)
1259 		retval = p->pid;
1260 	put_task_struct(p);
1261 
1262 	BUG_ON(!retval);
1263 	return retval;
1264 }
1265 
1266 /*
1267  * Handle do_wait work for one task in a live, non-stopped state.
1268  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1269  * the lock and this task is uninteresting.  If we return nonzero, we have
1270  * released the lock and the system call should return.
1271  */
1272 static int wait_task_continued(task_t *p, int noreap,
1273 			       struct siginfo __user *infop,
1274 			       int __user *stat_addr, struct rusage __user *ru)
1275 {
1276 	int retval;
1277 	pid_t pid;
1278 	uid_t uid;
1279 
1280 	if (unlikely(!p->signal))
1281 		return 0;
1282 
1283 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1284 		return 0;
1285 
1286 	spin_lock_irq(&p->sighand->siglock);
1287 	/* Re-check with the lock held.  */
1288 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1289 		spin_unlock_irq(&p->sighand->siglock);
1290 		return 0;
1291 	}
1292 	if (!noreap)
1293 		p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1294 	spin_unlock_irq(&p->sighand->siglock);
1295 
1296 	pid = p->pid;
1297 	uid = p->uid;
1298 	get_task_struct(p);
1299 	read_unlock(&tasklist_lock);
1300 
1301 	if (!infop) {
1302 		retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1303 		put_task_struct(p);
1304 		if (!retval && stat_addr)
1305 			retval = put_user(0xffff, stat_addr);
1306 		if (!retval)
1307 			retval = p->pid;
1308 	} else {
1309 		retval = wait_noreap_copyout(p, pid, uid,
1310 					     CLD_CONTINUED, SIGCONT,
1311 					     infop, ru);
1312 		BUG_ON(retval == 0);
1313 	}
1314 
1315 	return retval;
1316 }
1317 
1318 
1319 static inline int my_ptrace_child(struct task_struct *p)
1320 {
1321 	if (!(p->ptrace & PT_PTRACED))
1322 		return 0;
1323 	if (!(p->ptrace & PT_ATTACHED))
1324 		return 1;
1325 	/*
1326 	 * This child was PTRACE_ATTACH'd.  We should be seeing it only if
1327 	 * we are the attacher.  If we are the real parent, this is a race
1328 	 * inside ptrace_attach.  It is waiting for the tasklist_lock,
1329 	 * which we have to switch the parent links, but has already set
1330 	 * the flags in p->ptrace.
1331 	 */
1332 	return (p->parent != p->real_parent);
1333 }
1334 
1335 static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
1336 		    int __user *stat_addr, struct rusage __user *ru)
1337 {
1338 	DECLARE_WAITQUEUE(wait, current);
1339 	struct task_struct *tsk;
1340 	int flag, retval;
1341 
1342 	add_wait_queue(&current->signal->wait_chldexit,&wait);
1343 repeat:
1344 	/*
1345 	 * We will set this flag if we see any child that might later
1346 	 * match our criteria, even if we are not able to reap it yet.
1347 	 */
1348 	flag = 0;
1349 	current->state = TASK_INTERRUPTIBLE;
1350 	read_lock(&tasklist_lock);
1351 	tsk = current;
1352 	do {
1353 		struct task_struct *p;
1354 		struct list_head *_p;
1355 		int ret;
1356 
1357 		list_for_each(_p,&tsk->children) {
1358 			p = list_entry(_p,struct task_struct,sibling);
1359 
1360 			ret = eligible_child(pid, options, p);
1361 			if (!ret)
1362 				continue;
1363 
1364 			switch (p->state) {
1365 			case TASK_TRACED:
1366 				if (!my_ptrace_child(p))
1367 					continue;
1368 				/*FALLTHROUGH*/
1369 			case TASK_STOPPED:
1370 				/*
1371 				 * It's stopped now, so it might later
1372 				 * continue, exit, or stop again.
1373 				 */
1374 				flag = 1;
1375 				if (!(options & WUNTRACED) &&
1376 				    !my_ptrace_child(p))
1377 					continue;
1378 				retval = wait_task_stopped(p, ret == 2,
1379 							   (options & WNOWAIT),
1380 							   infop,
1381 							   stat_addr, ru);
1382 				if (retval == -EAGAIN)
1383 					goto repeat;
1384 				if (retval != 0) /* He released the lock.  */
1385 					goto end;
1386 				break;
1387 			default:
1388 			// case EXIT_DEAD:
1389 				if (p->exit_state == EXIT_DEAD)
1390 					continue;
1391 			// case EXIT_ZOMBIE:
1392 				if (p->exit_state == EXIT_ZOMBIE) {
1393 					/*
1394 					 * Eligible but we cannot release
1395 					 * it yet:
1396 					 */
1397 					if (ret == 2)
1398 						goto check_continued;
1399 					if (!likely(options & WEXITED))
1400 						continue;
1401 					retval = wait_task_zombie(
1402 						p, (options & WNOWAIT),
1403 						infop, stat_addr, ru);
1404 					/* He released the lock.  */
1405 					if (retval != 0)
1406 						goto end;
1407 					break;
1408 				}
1409 check_continued:
1410 				/*
1411 				 * It's running now, so it might later
1412 				 * exit, stop, or stop and then continue.
1413 				 */
1414 				flag = 1;
1415 				if (!unlikely(options & WCONTINUED))
1416 					continue;
1417 				retval = wait_task_continued(
1418 					p, (options & WNOWAIT),
1419 					infop, stat_addr, ru);
1420 				if (retval != 0) /* He released the lock.  */
1421 					goto end;
1422 				break;
1423 			}
1424 		}
1425 		if (!flag) {
1426 			list_for_each(_p, &tsk->ptrace_children) {
1427 				p = list_entry(_p, struct task_struct,
1428 						ptrace_list);
1429 				if (!eligible_child(pid, options, p))
1430 					continue;
1431 				flag = 1;
1432 				break;
1433 			}
1434 		}
1435 		if (options & __WNOTHREAD)
1436 			break;
1437 		tsk = next_thread(tsk);
1438 		if (tsk->signal != current->signal)
1439 			BUG();
1440 	} while (tsk != current);
1441 
1442 	read_unlock(&tasklist_lock);
1443 	if (flag) {
1444 		retval = 0;
1445 		if (options & WNOHANG)
1446 			goto end;
1447 		retval = -ERESTARTSYS;
1448 		if (signal_pending(current))
1449 			goto end;
1450 		schedule();
1451 		goto repeat;
1452 	}
1453 	retval = -ECHILD;
1454 end:
1455 	current->state = TASK_RUNNING;
1456 	remove_wait_queue(&current->signal->wait_chldexit,&wait);
1457 	if (infop) {
1458 		if (retval > 0)
1459 		retval = 0;
1460 		else {
1461 			/*
1462 			 * For a WNOHANG return, clear out all the fields
1463 			 * we would set so the user can easily tell the
1464 			 * difference.
1465 			 */
1466 			if (!retval)
1467 				retval = put_user(0, &infop->si_signo);
1468 			if (!retval)
1469 				retval = put_user(0, &infop->si_errno);
1470 			if (!retval)
1471 				retval = put_user(0, &infop->si_code);
1472 			if (!retval)
1473 				retval = put_user(0, &infop->si_pid);
1474 			if (!retval)
1475 				retval = put_user(0, &infop->si_uid);
1476 			if (!retval)
1477 				retval = put_user(0, &infop->si_status);
1478 		}
1479 	}
1480 	return retval;
1481 }
1482 
1483 asmlinkage long sys_waitid(int which, pid_t pid,
1484 			   struct siginfo __user *infop, int options,
1485 			   struct rusage __user *ru)
1486 {
1487 	long ret;
1488 
1489 	if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
1490 		return -EINVAL;
1491 	if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1492 		return -EINVAL;
1493 
1494 	switch (which) {
1495 	case P_ALL:
1496 		pid = -1;
1497 		break;
1498 	case P_PID:
1499 		if (pid <= 0)
1500 			return -EINVAL;
1501 		break;
1502 	case P_PGID:
1503 		if (pid <= 0)
1504 			return -EINVAL;
1505 		pid = -pid;
1506 		break;
1507 	default:
1508 		return -EINVAL;
1509 	}
1510 
1511 	ret = do_wait(pid, options, infop, NULL, ru);
1512 
1513 	/* avoid REGPARM breakage on x86: */
1514 	prevent_tail_call(ret);
1515 	return ret;
1516 }
1517 
1518 asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
1519 			  int options, struct rusage __user *ru)
1520 {
1521 	long ret;
1522 
1523 	if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1524 			__WNOTHREAD|__WCLONE|__WALL))
1525 		return -EINVAL;
1526 	ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru);
1527 
1528 	/* avoid REGPARM breakage on x86: */
1529 	prevent_tail_call(ret);
1530 	return ret;
1531 }
1532 
1533 #ifdef __ARCH_WANT_SYS_WAITPID
1534 
1535 /*
1536  * sys_waitpid() remains for compatibility. waitpid() should be
1537  * implemented by calling sys_wait4() from libc.a.
1538  */
1539 asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
1540 {
1541 	return sys_wait4(pid, stat_addr, options, NULL);
1542 }
1543 
1544 #endif
1545