xref: /linux/kernel/exit.c (revision 4f1933620f57145212cdbb1ac6ce099eeeb21c5a)
1 /*
2  *  linux/kernel/exit.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/config.h>
8 #include <linux/mm.h>
9 #include <linux/slab.h>
10 #include <linux/interrupt.h>
11 #include <linux/smp_lock.h>
12 #include <linux/module.h>
13 #include <linux/capability.h>
14 #include <linux/completion.h>
15 #include <linux/personality.h>
16 #include <linux/tty.h>
17 #include <linux/namespace.h>
18 #include <linux/key.h>
19 #include <linux/security.h>
20 #include <linux/cpu.h>
21 #include <linux/acct.h>
22 #include <linux/file.h>
23 #include <linux/binfmts.h>
24 #include <linux/ptrace.h>
25 #include <linux/profile.h>
26 #include <linux/mount.h>
27 #include <linux/proc_fs.h>
28 #include <linux/mempolicy.h>
29 #include <linux/cpuset.h>
30 #include <linux/syscalls.h>
31 #include <linux/signal.h>
32 #include <linux/cn_proc.h>
33 #include <linux/mutex.h>
34 
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/pgtable.h>
38 #include <asm/mmu_context.h>
39 
40 extern void sem_exit (void);
41 extern struct task_struct *child_reaper;
42 
43 int getrusage(struct task_struct *, int, struct rusage __user *);
44 
45 static void exit_mm(struct task_struct * tsk);
46 
47 static void __unhash_process(struct task_struct *p)
48 {
49 	nr_threads--;
50 	detach_pid(p, PIDTYPE_PID);
51 	detach_pid(p, PIDTYPE_TGID);
52 	if (thread_group_leader(p)) {
53 		detach_pid(p, PIDTYPE_PGID);
54 		detach_pid(p, PIDTYPE_SID);
55 		if (p->pid)
56 			__get_cpu_var(process_counts)--;
57 	}
58 
59 	REMOVE_LINKS(p);
60 }
61 
62 void release_task(struct task_struct * p)
63 {
64 	int zap_leader;
65 	task_t *leader;
66 	struct dentry *proc_dentry;
67 
68 repeat:
69 	atomic_dec(&p->user->processes);
70 	spin_lock(&p->proc_lock);
71 	proc_dentry = proc_pid_unhash(p);
72 	write_lock_irq(&tasklist_lock);
73 	if (unlikely(p->ptrace))
74 		__ptrace_unlink(p);
75 	BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
76 	__exit_signal(p);
77 	/*
78 	 * Note that the fastpath in sys_times depends on __exit_signal having
79 	 * updated the counters before a task is removed from the tasklist of
80 	 * the process by __unhash_process.
81 	 */
82 	__unhash_process(p);
83 
84 	/*
85 	 * If we are the last non-leader member of the thread
86 	 * group, and the leader is zombie, then notify the
87 	 * group leader's parent process. (if it wants notification.)
88 	 */
89 	zap_leader = 0;
90 	leader = p->group_leader;
91 	if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
92 		BUG_ON(leader->exit_signal == -1);
93 		do_notify_parent(leader, leader->exit_signal);
94 		/*
95 		 * If we were the last child thread and the leader has
96 		 * exited already, and the leader's parent ignores SIGCHLD,
97 		 * then we are the one who should release the leader.
98 		 *
99 		 * do_notify_parent() will have marked it self-reaping in
100 		 * that case.
101 		 */
102 		zap_leader = (leader->exit_signal == -1);
103 	}
104 
105 	sched_exit(p);
106 	write_unlock_irq(&tasklist_lock);
107 	spin_unlock(&p->proc_lock);
108 	proc_pid_flush(proc_dentry);
109 	release_thread(p);
110 	put_task_struct(p);
111 
112 	p = leader;
113 	if (unlikely(zap_leader))
114 		goto repeat;
115 }
116 
117 /* we are using it only for SMP init */
118 
119 void unhash_process(struct task_struct *p)
120 {
121 	struct dentry *proc_dentry;
122 
123 	spin_lock(&p->proc_lock);
124 	proc_dentry = proc_pid_unhash(p);
125 	write_lock_irq(&tasklist_lock);
126 	__unhash_process(p);
127 	write_unlock_irq(&tasklist_lock);
128 	spin_unlock(&p->proc_lock);
129 	proc_pid_flush(proc_dentry);
130 }
131 
132 /*
133  * This checks not only the pgrp, but falls back on the pid if no
134  * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
135  * without this...
136  */
137 int session_of_pgrp(int pgrp)
138 {
139 	struct task_struct *p;
140 	int sid = -1;
141 
142 	read_lock(&tasklist_lock);
143 	do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
144 		if (p->signal->session > 0) {
145 			sid = p->signal->session;
146 			goto out;
147 		}
148 	} while_each_task_pid(pgrp, PIDTYPE_PGID, p);
149 	p = find_task_by_pid(pgrp);
150 	if (p)
151 		sid = p->signal->session;
152 out:
153 	read_unlock(&tasklist_lock);
154 
155 	return sid;
156 }
157 
158 /*
159  * Determine if a process group is "orphaned", according to the POSIX
160  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
161  * by terminal-generated stop signals.  Newly orphaned process groups are
162  * to receive a SIGHUP and a SIGCONT.
163  *
164  * "I ask you, have you ever known what it is to be an orphan?"
165  */
166 static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task)
167 {
168 	struct task_struct *p;
169 	int ret = 1;
170 
171 	do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
172 		if (p == ignored_task
173 				|| p->exit_state
174 				|| p->real_parent->pid == 1)
175 			continue;
176 		if (process_group(p->real_parent) != pgrp
177 			    && p->real_parent->signal->session == p->signal->session) {
178 			ret = 0;
179 			break;
180 		}
181 	} while_each_task_pid(pgrp, PIDTYPE_PGID, p);
182 	return ret;	/* (sighing) "Often!" */
183 }
184 
185 int is_orphaned_pgrp(int pgrp)
186 {
187 	int retval;
188 
189 	read_lock(&tasklist_lock);
190 	retval = will_become_orphaned_pgrp(pgrp, NULL);
191 	read_unlock(&tasklist_lock);
192 
193 	return retval;
194 }
195 
196 static int has_stopped_jobs(int pgrp)
197 {
198 	int retval = 0;
199 	struct task_struct *p;
200 
201 	do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
202 		if (p->state != TASK_STOPPED)
203 			continue;
204 
205 		/* If p is stopped by a debugger on a signal that won't
206 		   stop it, then don't count p as stopped.  This isn't
207 		   perfect but it's a good approximation.  */
208 		if (unlikely (p->ptrace)
209 		    && p->exit_code != SIGSTOP
210 		    && p->exit_code != SIGTSTP
211 		    && p->exit_code != SIGTTOU
212 		    && p->exit_code != SIGTTIN)
213 			continue;
214 
215 		retval = 1;
216 		break;
217 	} while_each_task_pid(pgrp, PIDTYPE_PGID, p);
218 	return retval;
219 }
220 
221 /**
222  * reparent_to_init - Reparent the calling kernel thread to the init task.
223  *
224  * If a kernel thread is launched as a result of a system call, or if
225  * it ever exits, it should generally reparent itself to init so that
226  * it is correctly cleaned up on exit.
227  *
228  * The various task state such as scheduling policy and priority may have
229  * been inherited from a user process, so we reset them to sane values here.
230  *
231  * NOTE that reparent_to_init() gives the caller full capabilities.
232  */
233 static void reparent_to_init(void)
234 {
235 	write_lock_irq(&tasklist_lock);
236 
237 	ptrace_unlink(current);
238 	/* Reparent to init */
239 	REMOVE_LINKS(current);
240 	current->parent = child_reaper;
241 	current->real_parent = child_reaper;
242 	SET_LINKS(current);
243 
244 	/* Set the exit signal to SIGCHLD so we signal init on exit */
245 	current->exit_signal = SIGCHLD;
246 
247 	if ((current->policy == SCHED_NORMAL ||
248 			current->policy == SCHED_BATCH)
249 				&& (task_nice(current) < 0))
250 		set_user_nice(current, 0);
251 	/* cpus_allowed? */
252 	/* rt_priority? */
253 	/* signals? */
254 	security_task_reparent_to_init(current);
255 	memcpy(current->signal->rlim, init_task.signal->rlim,
256 	       sizeof(current->signal->rlim));
257 	atomic_inc(&(INIT_USER->__count));
258 	write_unlock_irq(&tasklist_lock);
259 	switch_uid(INIT_USER);
260 }
261 
262 void __set_special_pids(pid_t session, pid_t pgrp)
263 {
264 	struct task_struct *curr = current->group_leader;
265 
266 	if (curr->signal->session != session) {
267 		detach_pid(curr, PIDTYPE_SID);
268 		curr->signal->session = session;
269 		attach_pid(curr, PIDTYPE_SID, session);
270 	}
271 	if (process_group(curr) != pgrp) {
272 		detach_pid(curr, PIDTYPE_PGID);
273 		curr->signal->pgrp = pgrp;
274 		attach_pid(curr, PIDTYPE_PGID, pgrp);
275 	}
276 }
277 
278 void set_special_pids(pid_t session, pid_t pgrp)
279 {
280 	write_lock_irq(&tasklist_lock);
281 	__set_special_pids(session, pgrp);
282 	write_unlock_irq(&tasklist_lock);
283 }
284 
285 /*
286  * Let kernel threads use this to say that they
287  * allow a certain signal (since daemonize() will
288  * have disabled all of them by default).
289  */
290 int allow_signal(int sig)
291 {
292 	if (!valid_signal(sig) || sig < 1)
293 		return -EINVAL;
294 
295 	spin_lock_irq(&current->sighand->siglock);
296 	sigdelset(&current->blocked, sig);
297 	if (!current->mm) {
298 		/* Kernel threads handle their own signals.
299 		   Let the signal code know it'll be handled, so
300 		   that they don't get converted to SIGKILL or
301 		   just silently dropped */
302 		current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
303 	}
304 	recalc_sigpending();
305 	spin_unlock_irq(&current->sighand->siglock);
306 	return 0;
307 }
308 
309 EXPORT_SYMBOL(allow_signal);
310 
311 int disallow_signal(int sig)
312 {
313 	if (!valid_signal(sig) || sig < 1)
314 		return -EINVAL;
315 
316 	spin_lock_irq(&current->sighand->siglock);
317 	sigaddset(&current->blocked, sig);
318 	recalc_sigpending();
319 	spin_unlock_irq(&current->sighand->siglock);
320 	return 0;
321 }
322 
323 EXPORT_SYMBOL(disallow_signal);
324 
325 /*
326  *	Put all the gunge required to become a kernel thread without
327  *	attached user resources in one place where it belongs.
328  */
329 
330 void daemonize(const char *name, ...)
331 {
332 	va_list args;
333 	struct fs_struct *fs;
334 	sigset_t blocked;
335 
336 	va_start(args, name);
337 	vsnprintf(current->comm, sizeof(current->comm), name, args);
338 	va_end(args);
339 
340 	/*
341 	 * If we were started as result of loading a module, close all of the
342 	 * user space pages.  We don't need them, and if we didn't close them
343 	 * they would be locked into memory.
344 	 */
345 	exit_mm(current);
346 
347 	set_special_pids(1, 1);
348 	down(&tty_sem);
349 	current->signal->tty = NULL;
350 	up(&tty_sem);
351 
352 	/* Block and flush all signals */
353 	sigfillset(&blocked);
354 	sigprocmask(SIG_BLOCK, &blocked, NULL);
355 	flush_signals(current);
356 
357 	/* Become as one with the init task */
358 
359 	exit_fs(current);	/* current->fs->count--; */
360 	fs = init_task.fs;
361 	current->fs = fs;
362 	atomic_inc(&fs->count);
363  	exit_files(current);
364 	current->files = init_task.files;
365 	atomic_inc(&current->files->count);
366 
367 	reparent_to_init();
368 }
369 
370 EXPORT_SYMBOL(daemonize);
371 
372 static void close_files(struct files_struct * files)
373 {
374 	int i, j;
375 	struct fdtable *fdt;
376 
377 	j = 0;
378 
379 	/*
380 	 * It is safe to dereference the fd table without RCU or
381 	 * ->file_lock because this is the last reference to the
382 	 * files structure.
383 	 */
384 	fdt = files_fdtable(files);
385 	for (;;) {
386 		unsigned long set;
387 		i = j * __NFDBITS;
388 		if (i >= fdt->max_fdset || i >= fdt->max_fds)
389 			break;
390 		set = fdt->open_fds->fds_bits[j++];
391 		while (set) {
392 			if (set & 1) {
393 				struct file * file = xchg(&fdt->fd[i], NULL);
394 				if (file)
395 					filp_close(file, files);
396 			}
397 			i++;
398 			set >>= 1;
399 		}
400 	}
401 }
402 
403 struct files_struct *get_files_struct(struct task_struct *task)
404 {
405 	struct files_struct *files;
406 
407 	task_lock(task);
408 	files = task->files;
409 	if (files)
410 		atomic_inc(&files->count);
411 	task_unlock(task);
412 
413 	return files;
414 }
415 
416 void fastcall put_files_struct(struct files_struct *files)
417 {
418 	struct fdtable *fdt;
419 
420 	if (atomic_dec_and_test(&files->count)) {
421 		close_files(files);
422 		/*
423 		 * Free the fd and fdset arrays if we expanded them.
424 		 * If the fdtable was embedded, pass files for freeing
425 		 * at the end of the RCU grace period. Otherwise,
426 		 * you can free files immediately.
427 		 */
428 		fdt = files_fdtable(files);
429 		if (fdt == &files->fdtab)
430 			fdt->free_files = files;
431 		else
432 			kmem_cache_free(files_cachep, files);
433 		free_fdtable(fdt);
434 	}
435 }
436 
437 EXPORT_SYMBOL(put_files_struct);
438 
439 static inline void __exit_files(struct task_struct *tsk)
440 {
441 	struct files_struct * files = tsk->files;
442 
443 	if (files) {
444 		task_lock(tsk);
445 		tsk->files = NULL;
446 		task_unlock(tsk);
447 		put_files_struct(files);
448 	}
449 }
450 
451 void exit_files(struct task_struct *tsk)
452 {
453 	__exit_files(tsk);
454 }
455 
456 static inline void __put_fs_struct(struct fs_struct *fs)
457 {
458 	/* No need to hold fs->lock if we are killing it */
459 	if (atomic_dec_and_test(&fs->count)) {
460 		dput(fs->root);
461 		mntput(fs->rootmnt);
462 		dput(fs->pwd);
463 		mntput(fs->pwdmnt);
464 		if (fs->altroot) {
465 			dput(fs->altroot);
466 			mntput(fs->altrootmnt);
467 		}
468 		kmem_cache_free(fs_cachep, fs);
469 	}
470 }
471 
472 void put_fs_struct(struct fs_struct *fs)
473 {
474 	__put_fs_struct(fs);
475 }
476 
477 static inline void __exit_fs(struct task_struct *tsk)
478 {
479 	struct fs_struct * fs = tsk->fs;
480 
481 	if (fs) {
482 		task_lock(tsk);
483 		tsk->fs = NULL;
484 		task_unlock(tsk);
485 		__put_fs_struct(fs);
486 	}
487 }
488 
489 void exit_fs(struct task_struct *tsk)
490 {
491 	__exit_fs(tsk);
492 }
493 
494 EXPORT_SYMBOL_GPL(exit_fs);
495 
496 /*
497  * Turn us into a lazy TLB process if we
498  * aren't already..
499  */
500 static void exit_mm(struct task_struct * tsk)
501 {
502 	struct mm_struct *mm = tsk->mm;
503 
504 	mm_release(tsk, mm);
505 	if (!mm)
506 		return;
507 	/*
508 	 * Serialize with any possible pending coredump.
509 	 * We must hold mmap_sem around checking core_waiters
510 	 * and clearing tsk->mm.  The core-inducing thread
511 	 * will increment core_waiters for each thread in the
512 	 * group with ->mm != NULL.
513 	 */
514 	down_read(&mm->mmap_sem);
515 	if (mm->core_waiters) {
516 		up_read(&mm->mmap_sem);
517 		down_write(&mm->mmap_sem);
518 		if (!--mm->core_waiters)
519 			complete(mm->core_startup_done);
520 		up_write(&mm->mmap_sem);
521 
522 		wait_for_completion(&mm->core_done);
523 		down_read(&mm->mmap_sem);
524 	}
525 	atomic_inc(&mm->mm_count);
526 	if (mm != tsk->active_mm) BUG();
527 	/* more a memory barrier than a real lock */
528 	task_lock(tsk);
529 	tsk->mm = NULL;
530 	up_read(&mm->mmap_sem);
531 	enter_lazy_tlb(mm, current);
532 	task_unlock(tsk);
533 	mmput(mm);
534 }
535 
536 static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_reaper)
537 {
538 	/*
539 	 * Make sure we're not reparenting to ourselves and that
540 	 * the parent is not a zombie.
541 	 */
542 	BUG_ON(p == reaper || reaper->exit_state >= EXIT_ZOMBIE);
543 	p->real_parent = reaper;
544 }
545 
546 static void reparent_thread(task_t *p, task_t *father, int traced)
547 {
548 	/* We don't want people slaying init.  */
549 	if (p->exit_signal != -1)
550 		p->exit_signal = SIGCHLD;
551 
552 	if (p->pdeath_signal)
553 		/* We already hold the tasklist_lock here.  */
554 		group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
555 
556 	/* Move the child from its dying parent to the new one.  */
557 	if (unlikely(traced)) {
558 		/* Preserve ptrace links if someone else is tracing this child.  */
559 		list_del_init(&p->ptrace_list);
560 		if (p->parent != p->real_parent)
561 			list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
562 	} else {
563 		/* If this child is being traced, then we're the one tracing it
564 		 * anyway, so let go of it.
565 		 */
566 		p->ptrace = 0;
567 		list_del_init(&p->sibling);
568 		p->parent = p->real_parent;
569 		list_add_tail(&p->sibling, &p->parent->children);
570 
571 		/* If we'd notified the old parent about this child's death,
572 		 * also notify the new parent.
573 		 */
574 		if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
575 		    thread_group_empty(p))
576 			do_notify_parent(p, p->exit_signal);
577 		else if (p->state == TASK_TRACED) {
578 			/*
579 			 * If it was at a trace stop, turn it into
580 			 * a normal stop since it's no longer being
581 			 * traced.
582 			 */
583 			ptrace_untrace(p);
584 		}
585 	}
586 
587 	/*
588 	 * process group orphan check
589 	 * Case ii: Our child is in a different pgrp
590 	 * than we are, and it was the only connection
591 	 * outside, so the child pgrp is now orphaned.
592 	 */
593 	if ((process_group(p) != process_group(father)) &&
594 	    (p->signal->session == father->signal->session)) {
595 		int pgrp = process_group(p);
596 
597 		if (will_become_orphaned_pgrp(pgrp, NULL) && has_stopped_jobs(pgrp)) {
598 			__kill_pg_info(SIGHUP, SEND_SIG_PRIV, pgrp);
599 			__kill_pg_info(SIGCONT, SEND_SIG_PRIV, pgrp);
600 		}
601 	}
602 }
603 
604 /*
605  * When we die, we re-parent all our children.
606  * Try to give them to another thread in our thread
607  * group, and if no such member exists, give it to
608  * the global child reaper process (ie "init")
609  */
610 static void forget_original_parent(struct task_struct * father,
611 					  struct list_head *to_release)
612 {
613 	struct task_struct *p, *reaper = father;
614 	struct list_head *_p, *_n;
615 
616 	do {
617 		reaper = next_thread(reaper);
618 		if (reaper == father) {
619 			reaper = child_reaper;
620 			break;
621 		}
622 	} while (reaper->exit_state);
623 
624 	/*
625 	 * There are only two places where our children can be:
626 	 *
627 	 * - in our child list
628 	 * - in our ptraced child list
629 	 *
630 	 * Search them and reparent children.
631 	 */
632 	list_for_each_safe(_p, _n, &father->children) {
633 		int ptrace;
634 		p = list_entry(_p,struct task_struct,sibling);
635 
636 		ptrace = p->ptrace;
637 
638 		/* if father isn't the real parent, then ptrace must be enabled */
639 		BUG_ON(father != p->real_parent && !ptrace);
640 
641 		if (father == p->real_parent) {
642 			/* reparent with a reaper, real father it's us */
643 			choose_new_parent(p, reaper, child_reaper);
644 			reparent_thread(p, father, 0);
645 		} else {
646 			/* reparent ptraced task to its real parent */
647 			__ptrace_unlink (p);
648 			if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
649 			    thread_group_empty(p))
650 				do_notify_parent(p, p->exit_signal);
651 		}
652 
653 		/*
654 		 * if the ptraced child is a zombie with exit_signal == -1
655 		 * we must collect it before we exit, or it will remain
656 		 * zombie forever since we prevented it from self-reap itself
657 		 * while it was being traced by us, to be able to see it in wait4.
658 		 */
659 		if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
660 			list_add(&p->ptrace_list, to_release);
661 	}
662 	list_for_each_safe(_p, _n, &father->ptrace_children) {
663 		p = list_entry(_p,struct task_struct,ptrace_list);
664 		choose_new_parent(p, reaper, child_reaper);
665 		reparent_thread(p, father, 1);
666 	}
667 }
668 
669 /*
670  * Send signals to all our closest relatives so that they know
671  * to properly mourn us..
672  */
673 static void exit_notify(struct task_struct *tsk)
674 {
675 	int state;
676 	struct task_struct *t;
677 	struct list_head ptrace_dead, *_p, *_n;
678 
679 	if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
680 	    && !thread_group_empty(tsk)) {
681 		/*
682 		 * This occurs when there was a race between our exit
683 		 * syscall and a group signal choosing us as the one to
684 		 * wake up.  It could be that we are the only thread
685 		 * alerted to check for pending signals, but another thread
686 		 * should be woken now to take the signal since we will not.
687 		 * Now we'll wake all the threads in the group just to make
688 		 * sure someone gets all the pending signals.
689 		 */
690 		read_lock(&tasklist_lock);
691 		spin_lock_irq(&tsk->sighand->siglock);
692 		for (t = next_thread(tsk); t != tsk; t = next_thread(t))
693 			if (!signal_pending(t) && !(t->flags & PF_EXITING)) {
694 				recalc_sigpending_tsk(t);
695 				if (signal_pending(t))
696 					signal_wake_up(t, 0);
697 			}
698 		spin_unlock_irq(&tsk->sighand->siglock);
699 		read_unlock(&tasklist_lock);
700 	}
701 
702 	write_lock_irq(&tasklist_lock);
703 
704 	/*
705 	 * This does two things:
706 	 *
707   	 * A.  Make init inherit all the child processes
708 	 * B.  Check to see if any process groups have become orphaned
709 	 *	as a result of our exiting, and if they have any stopped
710 	 *	jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
711 	 */
712 
713 	INIT_LIST_HEAD(&ptrace_dead);
714 	forget_original_parent(tsk, &ptrace_dead);
715 	BUG_ON(!list_empty(&tsk->children));
716 	BUG_ON(!list_empty(&tsk->ptrace_children));
717 
718 	/*
719 	 * Check to see if any process groups have become orphaned
720 	 * as a result of our exiting, and if they have any stopped
721 	 * jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
722 	 *
723 	 * Case i: Our father is in a different pgrp than we are
724 	 * and we were the only connection outside, so our pgrp
725 	 * is about to become orphaned.
726 	 */
727 
728 	t = tsk->real_parent;
729 
730 	if ((process_group(t) != process_group(tsk)) &&
731 	    (t->signal->session == tsk->signal->session) &&
732 	    will_become_orphaned_pgrp(process_group(tsk), tsk) &&
733 	    has_stopped_jobs(process_group(tsk))) {
734 		__kill_pg_info(SIGHUP, SEND_SIG_PRIV, process_group(tsk));
735 		__kill_pg_info(SIGCONT, SEND_SIG_PRIV, process_group(tsk));
736 	}
737 
738 	/* Let father know we died
739 	 *
740 	 * Thread signals are configurable, but you aren't going to use
741 	 * that to send signals to arbitary processes.
742 	 * That stops right now.
743 	 *
744 	 * If the parent exec id doesn't match the exec id we saved
745 	 * when we started then we know the parent has changed security
746 	 * domain.
747 	 *
748 	 * If our self_exec id doesn't match our parent_exec_id then
749 	 * we have changed execution domain as these two values started
750 	 * the same after a fork.
751 	 *
752 	 */
753 
754 	if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
755 	    ( tsk->parent_exec_id != t->self_exec_id  ||
756 	      tsk->self_exec_id != tsk->parent_exec_id)
757 	    && !capable(CAP_KILL))
758 		tsk->exit_signal = SIGCHLD;
759 
760 
761 	/* If something other than our normal parent is ptracing us, then
762 	 * send it a SIGCHLD instead of honoring exit_signal.  exit_signal
763 	 * only has special meaning to our real parent.
764 	 */
765 	if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
766 		int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
767 		do_notify_parent(tsk, signal);
768 	} else if (tsk->ptrace) {
769 		do_notify_parent(tsk, SIGCHLD);
770 	}
771 
772 	state = EXIT_ZOMBIE;
773 	if (tsk->exit_signal == -1 &&
774 	    (likely(tsk->ptrace == 0) ||
775 	     unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT)))
776 		state = EXIT_DEAD;
777 	tsk->exit_state = state;
778 
779 	write_unlock_irq(&tasklist_lock);
780 
781 	list_for_each_safe(_p, _n, &ptrace_dead) {
782 		list_del_init(_p);
783 		t = list_entry(_p,struct task_struct,ptrace_list);
784 		release_task(t);
785 	}
786 
787 	/* If the process is dead, release it - nobody will wait for it */
788 	if (state == EXIT_DEAD)
789 		release_task(tsk);
790 }
791 
792 fastcall NORET_TYPE void do_exit(long code)
793 {
794 	struct task_struct *tsk = current;
795 	int group_dead;
796 
797 	profile_task_exit(tsk);
798 
799 	WARN_ON(atomic_read(&tsk->fs_excl));
800 
801 	if (unlikely(in_interrupt()))
802 		panic("Aiee, killing interrupt handler!");
803 	if (unlikely(!tsk->pid))
804 		panic("Attempted to kill the idle task!");
805 	if (unlikely(tsk->pid == 1))
806 		panic("Attempted to kill init!");
807 	if (tsk->io_context)
808 		exit_io_context();
809 
810 	if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
811 		current->ptrace_message = code;
812 		ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
813 	}
814 
815 	/*
816 	 * We're taking recursive faults here in do_exit. Safest is to just
817 	 * leave this task alone and wait for reboot.
818 	 */
819 	if (unlikely(tsk->flags & PF_EXITING)) {
820 		printk(KERN_ALERT
821 			"Fixing recursive fault but reboot is needed!\n");
822 		set_current_state(TASK_UNINTERRUPTIBLE);
823 		schedule();
824 	}
825 
826 	tsk->flags |= PF_EXITING;
827 
828 	/*
829 	 * Make sure we don't try to process any timer firings
830 	 * while we are already exiting.
831 	 */
832  	tsk->it_virt_expires = cputime_zero;
833  	tsk->it_prof_expires = cputime_zero;
834 	tsk->it_sched_expires = 0;
835 
836 	if (unlikely(in_atomic()))
837 		printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
838 				current->comm, current->pid,
839 				preempt_count());
840 
841 	acct_update_integrals(tsk);
842 	if (tsk->mm) {
843 		update_hiwater_rss(tsk->mm);
844 		update_hiwater_vm(tsk->mm);
845 	}
846 	group_dead = atomic_dec_and_test(&tsk->signal->live);
847 	if (group_dead) {
848  		hrtimer_cancel(&tsk->signal->real_timer);
849 		exit_itimers(tsk->signal);
850 		acct_process(code);
851 	}
852 	exit_mm(tsk);
853 
854 	exit_sem(tsk);
855 	__exit_files(tsk);
856 	__exit_fs(tsk);
857 	exit_namespace(tsk);
858 	exit_thread();
859 	cpuset_exit(tsk);
860 	exit_keys(tsk);
861 
862 	if (group_dead && tsk->signal->leader)
863 		disassociate_ctty(1);
864 
865 	module_put(task_thread_info(tsk)->exec_domain->module);
866 	if (tsk->binfmt)
867 		module_put(tsk->binfmt->module);
868 
869 	tsk->exit_code = code;
870 	proc_exit_connector(tsk);
871 	exit_notify(tsk);
872 #ifdef CONFIG_NUMA
873 	mpol_free(tsk->mempolicy);
874 	tsk->mempolicy = NULL;
875 #endif
876 	/*
877 	 * If DEBUG_MUTEXES is on, make sure we are holding no locks:
878 	 */
879 	mutex_debug_check_no_locks_held(tsk);
880 
881 	/* PF_DEAD causes final put_task_struct after we schedule. */
882 	preempt_disable();
883 	BUG_ON(tsk->flags & PF_DEAD);
884 	tsk->flags |= PF_DEAD;
885 
886 	schedule();
887 	BUG();
888 	/* Avoid "noreturn function does return".  */
889 	for (;;) ;
890 }
891 
892 EXPORT_SYMBOL_GPL(do_exit);
893 
894 NORET_TYPE void complete_and_exit(struct completion *comp, long code)
895 {
896 	if (comp)
897 		complete(comp);
898 
899 	do_exit(code);
900 }
901 
902 EXPORT_SYMBOL(complete_and_exit);
903 
904 asmlinkage long sys_exit(int error_code)
905 {
906 	do_exit((error_code&0xff)<<8);
907 }
908 
909 task_t fastcall *next_thread(const task_t *p)
910 {
911 	return pid_task(p->pids[PIDTYPE_TGID].pid_list.next, PIDTYPE_TGID);
912 }
913 
914 EXPORT_SYMBOL(next_thread);
915 
916 /*
917  * Take down every thread in the group.  This is called by fatal signals
918  * as well as by sys_exit_group (below).
919  */
920 NORET_TYPE void
921 do_group_exit(int exit_code)
922 {
923 	BUG_ON(exit_code & 0x80); /* core dumps don't get here */
924 
925 	if (current->signal->flags & SIGNAL_GROUP_EXIT)
926 		exit_code = current->signal->group_exit_code;
927 	else if (!thread_group_empty(current)) {
928 		struct signal_struct *const sig = current->signal;
929 		struct sighand_struct *const sighand = current->sighand;
930 		read_lock(&tasklist_lock);
931 		spin_lock_irq(&sighand->siglock);
932 		if (sig->flags & SIGNAL_GROUP_EXIT)
933 			/* Another thread got here before we took the lock.  */
934 			exit_code = sig->group_exit_code;
935 		else {
936 			sig->group_exit_code = exit_code;
937 			zap_other_threads(current);
938 		}
939 		spin_unlock_irq(&sighand->siglock);
940 		read_unlock(&tasklist_lock);
941 	}
942 
943 	do_exit(exit_code);
944 	/* NOTREACHED */
945 }
946 
947 /*
948  * this kills every thread in the thread group. Note that any externally
949  * wait4()-ing process will get the correct exit code - even if this
950  * thread is not the thread group leader.
951  */
952 asmlinkage void sys_exit_group(int error_code)
953 {
954 	do_group_exit((error_code & 0xff) << 8);
955 }
956 
957 static int eligible_child(pid_t pid, int options, task_t *p)
958 {
959 	if (pid > 0) {
960 		if (p->pid != pid)
961 			return 0;
962 	} else if (!pid) {
963 		if (process_group(p) != process_group(current))
964 			return 0;
965 	} else if (pid != -1) {
966 		if (process_group(p) != -pid)
967 			return 0;
968 	}
969 
970 	/*
971 	 * Do not consider detached threads that are
972 	 * not ptraced:
973 	 */
974 	if (p->exit_signal == -1 && !p->ptrace)
975 		return 0;
976 
977 	/* Wait for all children (clone and not) if __WALL is set;
978 	 * otherwise, wait for clone children *only* if __WCLONE is
979 	 * set; otherwise, wait for non-clone children *only*.  (Note:
980 	 * A "clone" child here is one that reports to its parent
981 	 * using a signal other than SIGCHLD.) */
982 	if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
983 	    && !(options & __WALL))
984 		return 0;
985 	/*
986 	 * Do not consider thread group leaders that are
987 	 * in a non-empty thread group:
988 	 */
989 	if (current->tgid != p->tgid && delay_group_leader(p))
990 		return 2;
991 
992 	if (security_task_wait(p))
993 		return 0;
994 
995 	return 1;
996 }
997 
998 static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid,
999 			       int why, int status,
1000 			       struct siginfo __user *infop,
1001 			       struct rusage __user *rusagep)
1002 {
1003 	int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
1004 	put_task_struct(p);
1005 	if (!retval)
1006 		retval = put_user(SIGCHLD, &infop->si_signo);
1007 	if (!retval)
1008 		retval = put_user(0, &infop->si_errno);
1009 	if (!retval)
1010 		retval = put_user((short)why, &infop->si_code);
1011 	if (!retval)
1012 		retval = put_user(pid, &infop->si_pid);
1013 	if (!retval)
1014 		retval = put_user(uid, &infop->si_uid);
1015 	if (!retval)
1016 		retval = put_user(status, &infop->si_status);
1017 	if (!retval)
1018 		retval = pid;
1019 	return retval;
1020 }
1021 
1022 /*
1023  * Handle sys_wait4 work for one task in state EXIT_ZOMBIE.  We hold
1024  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1025  * the lock and this task is uninteresting.  If we return nonzero, we have
1026  * released the lock and the system call should return.
1027  */
1028 static int wait_task_zombie(task_t *p, int noreap,
1029 			    struct siginfo __user *infop,
1030 			    int __user *stat_addr, struct rusage __user *ru)
1031 {
1032 	unsigned long state;
1033 	int retval;
1034 	int status;
1035 
1036 	if (unlikely(noreap)) {
1037 		pid_t pid = p->pid;
1038 		uid_t uid = p->uid;
1039 		int exit_code = p->exit_code;
1040 		int why, status;
1041 
1042 		if (unlikely(p->exit_state != EXIT_ZOMBIE))
1043 			return 0;
1044 		if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
1045 			return 0;
1046 		get_task_struct(p);
1047 		read_unlock(&tasklist_lock);
1048 		if ((exit_code & 0x7f) == 0) {
1049 			why = CLD_EXITED;
1050 			status = exit_code >> 8;
1051 		} else {
1052 			why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1053 			status = exit_code & 0x7f;
1054 		}
1055 		return wait_noreap_copyout(p, pid, uid, why,
1056 					   status, infop, ru);
1057 	}
1058 
1059 	/*
1060 	 * Try to move the task's state to DEAD
1061 	 * only one thread is allowed to do this:
1062 	 */
1063 	state = xchg(&p->exit_state, EXIT_DEAD);
1064 	if (state != EXIT_ZOMBIE) {
1065 		BUG_ON(state != EXIT_DEAD);
1066 		return 0;
1067 	}
1068 	if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) {
1069 		/*
1070 		 * This can only happen in a race with a ptraced thread
1071 		 * dying on another processor.
1072 		 */
1073 		return 0;
1074 	}
1075 
1076 	if (likely(p->real_parent == p->parent) && likely(p->signal)) {
1077 		struct signal_struct *psig;
1078 		struct signal_struct *sig;
1079 
1080 		/*
1081 		 * The resource counters for the group leader are in its
1082 		 * own task_struct.  Those for dead threads in the group
1083 		 * are in its signal_struct, as are those for the child
1084 		 * processes it has previously reaped.  All these
1085 		 * accumulate in the parent's signal_struct c* fields.
1086 		 *
1087 		 * We don't bother to take a lock here to protect these
1088 		 * p->signal fields, because they are only touched by
1089 		 * __exit_signal, which runs with tasklist_lock
1090 		 * write-locked anyway, and so is excluded here.  We do
1091 		 * need to protect the access to p->parent->signal fields,
1092 		 * as other threads in the parent group can be right
1093 		 * here reaping other children at the same time.
1094 		 */
1095 		spin_lock_irq(&p->parent->sighand->siglock);
1096 		psig = p->parent->signal;
1097 		sig = p->signal;
1098 		psig->cutime =
1099 			cputime_add(psig->cutime,
1100 			cputime_add(p->utime,
1101 			cputime_add(sig->utime,
1102 				    sig->cutime)));
1103 		psig->cstime =
1104 			cputime_add(psig->cstime,
1105 			cputime_add(p->stime,
1106 			cputime_add(sig->stime,
1107 				    sig->cstime)));
1108 		psig->cmin_flt +=
1109 			p->min_flt + sig->min_flt + sig->cmin_flt;
1110 		psig->cmaj_flt +=
1111 			p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1112 		psig->cnvcsw +=
1113 			p->nvcsw + sig->nvcsw + sig->cnvcsw;
1114 		psig->cnivcsw +=
1115 			p->nivcsw + sig->nivcsw + sig->cnivcsw;
1116 		spin_unlock_irq(&p->parent->sighand->siglock);
1117 	}
1118 
1119 	/*
1120 	 * Now we are sure this task is interesting, and no other
1121 	 * thread can reap it because we set its state to EXIT_DEAD.
1122 	 */
1123 	read_unlock(&tasklist_lock);
1124 
1125 	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1126 	status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1127 		? p->signal->group_exit_code : p->exit_code;
1128 	if (!retval && stat_addr)
1129 		retval = put_user(status, stat_addr);
1130 	if (!retval && infop)
1131 		retval = put_user(SIGCHLD, &infop->si_signo);
1132 	if (!retval && infop)
1133 		retval = put_user(0, &infop->si_errno);
1134 	if (!retval && infop) {
1135 		int why;
1136 
1137 		if ((status & 0x7f) == 0) {
1138 			why = CLD_EXITED;
1139 			status >>= 8;
1140 		} else {
1141 			why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1142 			status &= 0x7f;
1143 		}
1144 		retval = put_user((short)why, &infop->si_code);
1145 		if (!retval)
1146 			retval = put_user(status, &infop->si_status);
1147 	}
1148 	if (!retval && infop)
1149 		retval = put_user(p->pid, &infop->si_pid);
1150 	if (!retval && infop)
1151 		retval = put_user(p->uid, &infop->si_uid);
1152 	if (retval) {
1153 		// TODO: is this safe?
1154 		p->exit_state = EXIT_ZOMBIE;
1155 		return retval;
1156 	}
1157 	retval = p->pid;
1158 	if (p->real_parent != p->parent) {
1159 		write_lock_irq(&tasklist_lock);
1160 		/* Double-check with lock held.  */
1161 		if (p->real_parent != p->parent) {
1162 			__ptrace_unlink(p);
1163 			// TODO: is this safe?
1164 			p->exit_state = EXIT_ZOMBIE;
1165 			/*
1166 			 * If this is not a detached task, notify the parent.
1167 			 * If it's still not detached after that, don't release
1168 			 * it now.
1169 			 */
1170 			if (p->exit_signal != -1) {
1171 				do_notify_parent(p, p->exit_signal);
1172 				if (p->exit_signal != -1)
1173 					p = NULL;
1174 			}
1175 		}
1176 		write_unlock_irq(&tasklist_lock);
1177 	}
1178 	if (p != NULL)
1179 		release_task(p);
1180 	BUG_ON(!retval);
1181 	return retval;
1182 }
1183 
1184 /*
1185  * Handle sys_wait4 work for one task in state TASK_STOPPED.  We hold
1186  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1187  * the lock and this task is uninteresting.  If we return nonzero, we have
1188  * released the lock and the system call should return.
1189  */
1190 static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap,
1191 			     struct siginfo __user *infop,
1192 			     int __user *stat_addr, struct rusage __user *ru)
1193 {
1194 	int retval, exit_code;
1195 
1196 	if (!p->exit_code)
1197 		return 0;
1198 	if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
1199 	    p->signal && p->signal->group_stop_count > 0)
1200 		/*
1201 		 * A group stop is in progress and this is the group leader.
1202 		 * We won't report until all threads have stopped.
1203 		 */
1204 		return 0;
1205 
1206 	/*
1207 	 * Now we are pretty sure this task is interesting.
1208 	 * Make sure it doesn't get reaped out from under us while we
1209 	 * give up the lock and then examine it below.  We don't want to
1210 	 * keep holding onto the tasklist_lock while we call getrusage and
1211 	 * possibly take page faults for user memory.
1212 	 */
1213 	get_task_struct(p);
1214 	read_unlock(&tasklist_lock);
1215 
1216 	if (unlikely(noreap)) {
1217 		pid_t pid = p->pid;
1218 		uid_t uid = p->uid;
1219 		int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1220 
1221 		exit_code = p->exit_code;
1222 		if (unlikely(!exit_code) ||
1223 		    unlikely(p->state & TASK_TRACED))
1224 			goto bail_ref;
1225 		return wait_noreap_copyout(p, pid, uid,
1226 					   why, (exit_code << 8) | 0x7f,
1227 					   infop, ru);
1228 	}
1229 
1230 	write_lock_irq(&tasklist_lock);
1231 
1232 	/*
1233 	 * This uses xchg to be atomic with the thread resuming and setting
1234 	 * it.  It must also be done with the write lock held to prevent a
1235 	 * race with the EXIT_ZOMBIE case.
1236 	 */
1237 	exit_code = xchg(&p->exit_code, 0);
1238 	if (unlikely(p->exit_state)) {
1239 		/*
1240 		 * The task resumed and then died.  Let the next iteration
1241 		 * catch it in EXIT_ZOMBIE.  Note that exit_code might
1242 		 * already be zero here if it resumed and did _exit(0).
1243 		 * The task itself is dead and won't touch exit_code again;
1244 		 * other processors in this function are locked out.
1245 		 */
1246 		p->exit_code = exit_code;
1247 		exit_code = 0;
1248 	}
1249 	if (unlikely(exit_code == 0)) {
1250 		/*
1251 		 * Another thread in this function got to it first, or it
1252 		 * resumed, or it resumed and then died.
1253 		 */
1254 		write_unlock_irq(&tasklist_lock);
1255 bail_ref:
1256 		put_task_struct(p);
1257 		/*
1258 		 * We are returning to the wait loop without having successfully
1259 		 * removed the process and having released the lock. We cannot
1260 		 * continue, since the "p" task pointer is potentially stale.
1261 		 *
1262 		 * Return -EAGAIN, and do_wait() will restart the loop from the
1263 		 * beginning. Do _not_ re-acquire the lock.
1264 		 */
1265 		return -EAGAIN;
1266 	}
1267 
1268 	/* move to end of parent's list to avoid starvation */
1269 	remove_parent(p);
1270 	add_parent(p, p->parent);
1271 
1272 	write_unlock_irq(&tasklist_lock);
1273 
1274 	retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1275 	if (!retval && stat_addr)
1276 		retval = put_user((exit_code << 8) | 0x7f, stat_addr);
1277 	if (!retval && infop)
1278 		retval = put_user(SIGCHLD, &infop->si_signo);
1279 	if (!retval && infop)
1280 		retval = put_user(0, &infop->si_errno);
1281 	if (!retval && infop)
1282 		retval = put_user((short)((p->ptrace & PT_PTRACED)
1283 					  ? CLD_TRAPPED : CLD_STOPPED),
1284 				  &infop->si_code);
1285 	if (!retval && infop)
1286 		retval = put_user(exit_code, &infop->si_status);
1287 	if (!retval && infop)
1288 		retval = put_user(p->pid, &infop->si_pid);
1289 	if (!retval && infop)
1290 		retval = put_user(p->uid, &infop->si_uid);
1291 	if (!retval)
1292 		retval = p->pid;
1293 	put_task_struct(p);
1294 
1295 	BUG_ON(!retval);
1296 	return retval;
1297 }
1298 
1299 /*
1300  * Handle do_wait work for one task in a live, non-stopped state.
1301  * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
1302  * the lock and this task is uninteresting.  If we return nonzero, we have
1303  * released the lock and the system call should return.
1304  */
1305 static int wait_task_continued(task_t *p, int noreap,
1306 			       struct siginfo __user *infop,
1307 			       int __user *stat_addr, struct rusage __user *ru)
1308 {
1309 	int retval;
1310 	pid_t pid;
1311 	uid_t uid;
1312 
1313 	if (unlikely(!p->signal))
1314 		return 0;
1315 
1316 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1317 		return 0;
1318 
1319 	spin_lock_irq(&p->sighand->siglock);
1320 	/* Re-check with the lock held.  */
1321 	if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1322 		spin_unlock_irq(&p->sighand->siglock);
1323 		return 0;
1324 	}
1325 	if (!noreap)
1326 		p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1327 	spin_unlock_irq(&p->sighand->siglock);
1328 
1329 	pid = p->pid;
1330 	uid = p->uid;
1331 	get_task_struct(p);
1332 	read_unlock(&tasklist_lock);
1333 
1334 	if (!infop) {
1335 		retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1336 		put_task_struct(p);
1337 		if (!retval && stat_addr)
1338 			retval = put_user(0xffff, stat_addr);
1339 		if (!retval)
1340 			retval = p->pid;
1341 	} else {
1342 		retval = wait_noreap_copyout(p, pid, uid,
1343 					     CLD_CONTINUED, SIGCONT,
1344 					     infop, ru);
1345 		BUG_ON(retval == 0);
1346 	}
1347 
1348 	return retval;
1349 }
1350 
1351 
1352 static inline int my_ptrace_child(struct task_struct *p)
1353 {
1354 	if (!(p->ptrace & PT_PTRACED))
1355 		return 0;
1356 	if (!(p->ptrace & PT_ATTACHED))
1357 		return 1;
1358 	/*
1359 	 * This child was PTRACE_ATTACH'd.  We should be seeing it only if
1360 	 * we are the attacher.  If we are the real parent, this is a race
1361 	 * inside ptrace_attach.  It is waiting for the tasklist_lock,
1362 	 * which we have to switch the parent links, but has already set
1363 	 * the flags in p->ptrace.
1364 	 */
1365 	return (p->parent != p->real_parent);
1366 }
1367 
1368 static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
1369 		    int __user *stat_addr, struct rusage __user *ru)
1370 {
1371 	DECLARE_WAITQUEUE(wait, current);
1372 	struct task_struct *tsk;
1373 	int flag, retval;
1374 
1375 	add_wait_queue(&current->signal->wait_chldexit,&wait);
1376 repeat:
1377 	/*
1378 	 * We will set this flag if we see any child that might later
1379 	 * match our criteria, even if we are not able to reap it yet.
1380 	 */
1381 	flag = 0;
1382 	current->state = TASK_INTERRUPTIBLE;
1383 	read_lock(&tasklist_lock);
1384 	tsk = current;
1385 	do {
1386 		struct task_struct *p;
1387 		struct list_head *_p;
1388 		int ret;
1389 
1390 		list_for_each(_p,&tsk->children) {
1391 			p = list_entry(_p,struct task_struct,sibling);
1392 
1393 			ret = eligible_child(pid, options, p);
1394 			if (!ret)
1395 				continue;
1396 
1397 			switch (p->state) {
1398 			case TASK_TRACED:
1399 				/*
1400 				 * When we hit the race with PTRACE_ATTACH,
1401 				 * we will not report this child.  But the
1402 				 * race means it has not yet been moved to
1403 				 * our ptrace_children list, so we need to
1404 				 * set the flag here to avoid a spurious ECHILD
1405 				 * when the race happens with the only child.
1406 				 */
1407 				flag = 1;
1408 				if (!my_ptrace_child(p))
1409 					continue;
1410 				/*FALLTHROUGH*/
1411 			case TASK_STOPPED:
1412 				/*
1413 				 * It's stopped now, so it might later
1414 				 * continue, exit, or stop again.
1415 				 */
1416 				flag = 1;
1417 				if (!(options & WUNTRACED) &&
1418 				    !my_ptrace_child(p))
1419 					continue;
1420 				retval = wait_task_stopped(p, ret == 2,
1421 							   (options & WNOWAIT),
1422 							   infop,
1423 							   stat_addr, ru);
1424 				if (retval == -EAGAIN)
1425 					goto repeat;
1426 				if (retval != 0) /* He released the lock.  */
1427 					goto end;
1428 				break;
1429 			default:
1430 			// case EXIT_DEAD:
1431 				if (p->exit_state == EXIT_DEAD)
1432 					continue;
1433 			// case EXIT_ZOMBIE:
1434 				if (p->exit_state == EXIT_ZOMBIE) {
1435 					/*
1436 					 * Eligible but we cannot release
1437 					 * it yet:
1438 					 */
1439 					if (ret == 2)
1440 						goto check_continued;
1441 					if (!likely(options & WEXITED))
1442 						continue;
1443 					retval = wait_task_zombie(
1444 						p, (options & WNOWAIT),
1445 						infop, stat_addr, ru);
1446 					/* He released the lock.  */
1447 					if (retval != 0)
1448 						goto end;
1449 					break;
1450 				}
1451 check_continued:
1452 				/*
1453 				 * It's running now, so it might later
1454 				 * exit, stop, or stop and then continue.
1455 				 */
1456 				flag = 1;
1457 				if (!unlikely(options & WCONTINUED))
1458 					continue;
1459 				retval = wait_task_continued(
1460 					p, (options & WNOWAIT),
1461 					infop, stat_addr, ru);
1462 				if (retval != 0) /* He released the lock.  */
1463 					goto end;
1464 				break;
1465 			}
1466 		}
1467 		if (!flag) {
1468 			list_for_each(_p, &tsk->ptrace_children) {
1469 				p = list_entry(_p, struct task_struct,
1470 						ptrace_list);
1471 				if (!eligible_child(pid, options, p))
1472 					continue;
1473 				flag = 1;
1474 				break;
1475 			}
1476 		}
1477 		if (options & __WNOTHREAD)
1478 			break;
1479 		tsk = next_thread(tsk);
1480 		if (tsk->signal != current->signal)
1481 			BUG();
1482 	} while (tsk != current);
1483 
1484 	read_unlock(&tasklist_lock);
1485 	if (flag) {
1486 		retval = 0;
1487 		if (options & WNOHANG)
1488 			goto end;
1489 		retval = -ERESTARTSYS;
1490 		if (signal_pending(current))
1491 			goto end;
1492 		schedule();
1493 		goto repeat;
1494 	}
1495 	retval = -ECHILD;
1496 end:
1497 	current->state = TASK_RUNNING;
1498 	remove_wait_queue(&current->signal->wait_chldexit,&wait);
1499 	if (infop) {
1500 		if (retval > 0)
1501 		retval = 0;
1502 		else {
1503 			/*
1504 			 * For a WNOHANG return, clear out all the fields
1505 			 * we would set so the user can easily tell the
1506 			 * difference.
1507 			 */
1508 			if (!retval)
1509 				retval = put_user(0, &infop->si_signo);
1510 			if (!retval)
1511 				retval = put_user(0, &infop->si_errno);
1512 			if (!retval)
1513 				retval = put_user(0, &infop->si_code);
1514 			if (!retval)
1515 				retval = put_user(0, &infop->si_pid);
1516 			if (!retval)
1517 				retval = put_user(0, &infop->si_uid);
1518 			if (!retval)
1519 				retval = put_user(0, &infop->si_status);
1520 		}
1521 	}
1522 	return retval;
1523 }
1524 
1525 asmlinkage long sys_waitid(int which, pid_t pid,
1526 			   struct siginfo __user *infop, int options,
1527 			   struct rusage __user *ru)
1528 {
1529 	long ret;
1530 
1531 	if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
1532 		return -EINVAL;
1533 	if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1534 		return -EINVAL;
1535 
1536 	switch (which) {
1537 	case P_ALL:
1538 		pid = -1;
1539 		break;
1540 	case P_PID:
1541 		if (pid <= 0)
1542 			return -EINVAL;
1543 		break;
1544 	case P_PGID:
1545 		if (pid <= 0)
1546 			return -EINVAL;
1547 		pid = -pid;
1548 		break;
1549 	default:
1550 		return -EINVAL;
1551 	}
1552 
1553 	ret = do_wait(pid, options, infop, NULL, ru);
1554 
1555 	/* avoid REGPARM breakage on x86: */
1556 	prevent_tail_call(ret);
1557 	return ret;
1558 }
1559 
1560 asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
1561 			  int options, struct rusage __user *ru)
1562 {
1563 	long ret;
1564 
1565 	if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1566 			__WNOTHREAD|__WCLONE|__WALL))
1567 		return -EINVAL;
1568 	ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru);
1569 
1570 	/* avoid REGPARM breakage on x86: */
1571 	prevent_tail_call(ret);
1572 	return ret;
1573 }
1574 
1575 #ifdef __ARCH_WANT_SYS_WAITPID
1576 
1577 /*
1578  * sys_waitpid() remains for compatibility. waitpid() should be
1579  * implemented by calling sys_wait4() from libc.a.
1580  */
1581 asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
1582 {
1583 	return sys_wait4(pid, stat_addr, options, NULL);
1584 }
1585 
1586 #endif
1587