xref: /freebsd/sys/kern/kern_fork.c (revision e1fe3dba5ce2826061f6489765be9b4a341736a9)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_fork.c	8.6 (Berkeley) 4/8/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_ktrace.h"
41 #include "opt_mac.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/sysproto.h>
46 #include <sys/eventhandler.h>
47 #include <sys/filedesc.h>
48 #include <sys/kernel.h>
49 #include <sys/kthread.h>
50 #include <sys/sysctl.h>
51 #include <sys/lock.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/pioctl.h>
56 #include <sys/resourcevar.h>
57 #include <sys/sched.h>
58 #include <sys/syscall.h>
59 #include <sys/vmmeter.h>
60 #include <sys/vnode.h>
61 #include <sys/acct.h>
62 #include <sys/mac.h>
63 #include <sys/ktr.h>
64 #include <sys/ktrace.h>
65 #include <sys/unistd.h>
66 #include <sys/sx.h>
67 #include <sys/signalvar.h>
68 
69 #include <security/audit/audit.h>
70 
71 #include <vm/vm.h>
72 #include <vm/pmap.h>
73 #include <vm/vm_map.h>
74 #include <vm/vm_extern.h>
75 #include <vm/uma.h>
76 
77 
78 #ifndef _SYS_SYSPROTO_H_
79 struct fork_args {
80 	int     dummy;
81 };
82 #endif
83 
84 static int forksleep; /* Place for fork1() to sleep on. */
85 
86 /*
87  * MPSAFE
88  */
89 /* ARGSUSED */
90 int
91 fork(td, uap)
92 	struct thread *td;
93 	struct fork_args *uap;
94 {
95 	int error;
96 	struct proc *p2;
97 
98 	error = fork1(td, RFFDG | RFPROC, 0, &p2);
99 	if (error == 0) {
100 		td->td_retval[0] = p2->p_pid;
101 		td->td_retval[1] = 0;
102 	}
103 	return (error);
104 }
105 
106 /*
107  * MPSAFE
108  */
109 /* ARGSUSED */
110 int
111 vfork(td, uap)
112 	struct thread *td;
113 	struct vfork_args *uap;
114 {
115 	int error;
116 	struct proc *p2;
117 
118 	error = fork1(td, RFFDG | RFPROC | RFPPWAIT | RFMEM, 0, &p2);
119 	if (error == 0) {
120 		td->td_retval[0] = p2->p_pid;
121 		td->td_retval[1] = 0;
122 	}
123 	return (error);
124 }
125 
126 /*
127  * MPSAFE
128  */
129 int
130 rfork(td, uap)
131 	struct thread *td;
132 	struct rfork_args *uap;
133 {
134 	struct proc *p2;
135 	int error;
136 
137 	/* Don't allow kernel-only flags. */
138 	if ((uap->flags & RFKERNELONLY) != 0)
139 		return (EINVAL);
140 
141 	AUDIT_ARG(fflags, uap->flags);
142 	error = fork1(td, uap->flags, 0, &p2);
143 	if (error == 0) {
144 		td->td_retval[0] = p2 ? p2->p_pid : 0;
145 		td->td_retval[1] = 0;
146 	}
147 	return (error);
148 }
149 
150 int	nprocs = 1;		/* process 0 */
151 int	lastpid = 0;
152 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
153     "Last used PID");
154 
155 /*
156  * Random component to lastpid generation.  We mix in a random factor to make
157  * it a little harder to predict.  We sanity check the modulus value to avoid
158  * doing it in critical paths.  Don't let it be too small or we pointlessly
159  * waste randomness entropy, and don't let it be impossibly large.  Using a
160  * modulus that is too big causes a LOT more process table scans and slows
161  * down fork processing as the pidchecked caching is defeated.
162  */
163 static int randompid = 0;
164 
165 static int
166 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
167 {
168 	int error, pid;
169 
170 	error = sysctl_wire_old_buffer(req, sizeof(int));
171 	if (error != 0)
172 		return(error);
173 	sx_xlock(&allproc_lock);
174 	pid = randompid;
175 	error = sysctl_handle_int(oidp, &pid, 0, req);
176 	if (error == 0 && req->newptr != NULL) {
177 		if (pid < 0 || pid > PID_MAX - 100)	/* out of range */
178 			pid = PID_MAX - 100;
179 		else if (pid < 2)			/* NOP */
180 			pid = 0;
181 		else if (pid < 100)			/* Make it reasonable */
182 			pid = 100;
183 		randompid = pid;
184 	}
185 	sx_xunlock(&allproc_lock);
186 	return (error);
187 }
188 
189 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
190     0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
191 
192 int
193 fork1(td, flags, pages, procp)
194 	struct thread *td;
195 	int flags;
196 	int pages;
197 	struct proc **procp;
198 {
199 	struct proc *p1, *p2, *pptr;
200 	uid_t uid;
201 	struct proc *newproc;
202 	int ok, trypid;
203 	static int curfail, pidchecked = 0;
204 	static struct timeval lastfail;
205 	struct filedesc *fd;
206 	struct filedesc_to_leader *fdtol;
207 	struct thread *td2;
208 	struct ksegrp *kg2;
209 	struct sigacts *newsigacts;
210 	int error;
211 
212 	/* Can't copy and clear. */
213 	if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
214 		return (EINVAL);
215 
216 	p1 = td->td_proc;
217 
218 	/*
219 	 * Here we don't create a new process, but we divorce
220 	 * certain parts of a process from itself.
221 	 */
222 	if ((flags & RFPROC) == 0) {
223 		if ((p1->p_flag & P_HADTHREADS) &&
224 		    (flags & (RFCFDG | RFFDG))) {
225 			PROC_LOCK(p1);
226 			if (thread_single(SINGLE_BOUNDARY)) {
227 				PROC_UNLOCK(p1);
228 				return (ERESTART);
229 			}
230 			PROC_UNLOCK(p1);
231 		}
232 
233 		vm_forkproc(td, NULL, NULL, flags);
234 
235 		/*
236 		 * Close all file descriptors.
237 		 */
238 		if (flags & RFCFDG) {
239 			struct filedesc *fdtmp;
240 			fdtmp = fdinit(td->td_proc->p_fd);
241 			fdfree(td);
242 			p1->p_fd = fdtmp;
243 		}
244 
245 		/*
246 		 * Unshare file descriptors (from parent).
247 		 */
248 		if (flags & RFFDG)
249 			fdunshare(p1, td);
250 
251 		if ((p1->p_flag & P_HADTHREADS) &&
252 		    (flags & (RFCFDG | RFFDG))) {
253 			PROC_LOCK(p1);
254 			thread_single_end();
255 			PROC_UNLOCK(p1);
256 		}
257 		*procp = NULL;
258 		return (0);
259 	}
260 
261 	/*
262 	 * Note 1:1 allows for forking with one thread coming out on the
263 	 * other side with the expectation that the process is about to
264 	 * exec.
265 	 */
266 	if (p1->p_flag & P_HADTHREADS) {
267 		/*
268 		 * Idle the other threads for a second.
269 		 * Since the user space is copied, it must remain stable.
270 		 * In addition, all threads (from the user perspective)
271 		 * need to either be suspended or in the kernel,
272 		 * where they will try restart in the parent and will
273 		 * be aborted in the child.
274 		 */
275 		PROC_LOCK(p1);
276 		if (thread_single(SINGLE_NO_EXIT)) {
277 			/* Abort. Someone else is single threading before us. */
278 			PROC_UNLOCK(p1);
279 			return (ERESTART);
280 		}
281 		PROC_UNLOCK(p1);
282 		/*
283 		 * All other activity in this process
284 		 * is now suspended at the user boundary,
285 		 * (or other safe places if we think of any).
286 		 */
287 	}
288 
289 	/* Allocate new proc. */
290 	newproc = uma_zalloc(proc_zone, M_WAITOK);
291 #ifdef MAC
292 	mac_init_proc(newproc);
293 #endif
294 #ifdef AUDIT
295 	audit_proc_alloc(newproc);
296 #endif
297 	knlist_init(&newproc->p_klist, &newproc->p_mtx, NULL, NULL, NULL);
298 	STAILQ_INIT(&newproc->p_ktr);
299 
300 	/* We have to lock the process tree while we look for a pid. */
301 	sx_slock(&proctree_lock);
302 
303 	/*
304 	 * Although process entries are dynamically created, we still keep
305 	 * a global limit on the maximum number we will create.  Don't allow
306 	 * a nonprivileged user to use the last ten processes; don't let root
307 	 * exceed the limit. The variable nprocs is the current number of
308 	 * processes, maxproc is the limit.
309 	 */
310 	sx_xlock(&allproc_lock);
311 	uid = td->td_ucred->cr_ruid;
312 	if ((nprocs >= maxproc - 10 &&
313 	    suser_cred(td->td_ucred, SUSER_RUID) != 0) ||
314 	    nprocs >= maxproc) {
315 		error = EAGAIN;
316 		goto fail;
317 	}
318 
319 	/*
320 	 * Increment the count of procs running with this uid. Don't allow
321 	 * a nonprivileged user to exceed their current limit.
322 	 */
323 	PROC_LOCK(p1);
324 	ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
325 		(uid != 0) ? lim_cur(p1, RLIMIT_NPROC) : 0);
326 	PROC_UNLOCK(p1);
327 	if (!ok) {
328 		error = EAGAIN;
329 		goto fail;
330 	}
331 
332 	/*
333 	 * Increment the nprocs resource before blocking can occur.  There
334 	 * are hard-limits as to the number of processes that can run.
335 	 */
336 	nprocs++;
337 
338 	/*
339 	 * Find an unused process ID.  We remember a range of unused IDs
340 	 * ready to use (from lastpid+1 through pidchecked-1).
341 	 *
342 	 * If RFHIGHPID is set (used during system boot), do not allocate
343 	 * low-numbered pids.
344 	 */
345 	trypid = lastpid + 1;
346 	if (flags & RFHIGHPID) {
347 		if (trypid < 10)
348 			trypid = 10;
349 	} else {
350 		if (randompid)
351 			trypid += arc4random() % randompid;
352 	}
353 retry:
354 	/*
355 	 * If the process ID prototype has wrapped around,
356 	 * restart somewhat above 0, as the low-numbered procs
357 	 * tend to include daemons that don't exit.
358 	 */
359 	if (trypid >= PID_MAX) {
360 		trypid = trypid % PID_MAX;
361 		if (trypid < 100)
362 			trypid += 100;
363 		pidchecked = 0;
364 	}
365 	if (trypid >= pidchecked) {
366 		int doingzomb = 0;
367 
368 		pidchecked = PID_MAX;
369 		/*
370 		 * Scan the active and zombie procs to check whether this pid
371 		 * is in use.  Remember the lowest pid that's greater
372 		 * than trypid, so we can avoid checking for a while.
373 		 */
374 		p2 = LIST_FIRST(&allproc);
375 again:
376 		for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) {
377 			PROC_LOCK(p2);
378 			while (p2->p_pid == trypid ||
379 			    (p2->p_pgrp != NULL &&
380 			    (p2->p_pgrp->pg_id == trypid ||
381 			    (p2->p_session != NULL &&
382 			    p2->p_session->s_sid == trypid)))) {
383 				trypid++;
384 				if (trypid >= pidchecked) {
385 					PROC_UNLOCK(p2);
386 					goto retry;
387 				}
388 			}
389 			if (p2->p_pid > trypid && pidchecked > p2->p_pid)
390 				pidchecked = p2->p_pid;
391 			if (p2->p_pgrp != NULL) {
392 				if (p2->p_pgrp->pg_id > trypid &&
393 				    pidchecked > p2->p_pgrp->pg_id)
394 					pidchecked = p2->p_pgrp->pg_id;
395 				if (p2->p_session != NULL &&
396 				    p2->p_session->s_sid > trypid &&
397 				    pidchecked > p2->p_session->s_sid)
398 					pidchecked = p2->p_session->s_sid;
399 			}
400 			PROC_UNLOCK(p2);
401 		}
402 		if (!doingzomb) {
403 			doingzomb = 1;
404 			p2 = LIST_FIRST(&zombproc);
405 			goto again;
406 		}
407 	}
408 	sx_sunlock(&proctree_lock);
409 
410 	/*
411 	 * RFHIGHPID does not mess with the lastpid counter during boot.
412 	 */
413 	if (flags & RFHIGHPID)
414 		pidchecked = 0;
415 	else
416 		lastpid = trypid;
417 
418 	p2 = newproc;
419 	p2->p_state = PRS_NEW;		/* protect against others */
420 	p2->p_pid = trypid;
421 	AUDIT_ARG(pid, p2->p_pid);
422 	LIST_INSERT_HEAD(&allproc, p2, p_list);
423 	LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
424 	sx_xunlock(&allproc_lock);
425 
426 	/*
427 	 * Malloc things while we don't hold any locks.
428 	 */
429 	if (flags & RFSIGSHARE)
430 		newsigacts = NULL;
431 	else
432 		newsigacts = sigacts_alloc();
433 
434 	/*
435 	 * Copy filedesc.
436 	 */
437 	if (flags & RFCFDG) {
438 		fd = fdinit(p1->p_fd);
439 		fdtol = NULL;
440 	} else if (flags & RFFDG) {
441 		fd = fdcopy(p1->p_fd);
442 		fdtol = NULL;
443 	} else {
444 		fd = fdshare(p1->p_fd);
445 		if (p1->p_fdtol == NULL)
446 			p1->p_fdtol =
447 				filedesc_to_leader_alloc(NULL,
448 							 NULL,
449 							 p1->p_leader);
450 		if ((flags & RFTHREAD) != 0) {
451 			/*
452 			 * Shared file descriptor table and
453 			 * shared process leaders.
454 			 */
455 			fdtol = p1->p_fdtol;
456 			FILEDESC_LOCK_FAST(p1->p_fd);
457 			fdtol->fdl_refcount++;
458 			FILEDESC_UNLOCK_FAST(p1->p_fd);
459 		} else {
460 			/*
461 			 * Shared file descriptor table, and
462 			 * different process leaders
463 			 */
464 			fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
465 							 p1->p_fd,
466 							 p2);
467 		}
468 	}
469 	/*
470 	 * Make a proc table entry for the new process.
471 	 * Start by zeroing the section of proc that is zero-initialized,
472 	 * then copy the section that is copied directly from the parent.
473 	 */
474 	td2 = FIRST_THREAD_IN_PROC(p2);
475 	kg2 = FIRST_KSEGRP_IN_PROC(p2);
476 
477 	/* Allocate and switch to an alternate kstack if specified. */
478 	if (pages != 0)
479 		vm_thread_new_altkstack(td2, pages);
480 
481 	PROC_LOCK(p2);
482 	PROC_LOCK(p1);
483 
484 	bzero(&p2->p_startzero,
485 	    __rangeof(struct proc, p_startzero, p_endzero));
486 	bzero(&td2->td_startzero,
487 	    __rangeof(struct thread, td_startzero, td_endzero));
488 	bzero(&kg2->kg_startzero,
489 	    __rangeof(struct ksegrp, kg_startzero, kg_endzero));
490 
491 	bcopy(&p1->p_startcopy, &p2->p_startcopy,
492 	    __rangeof(struct proc, p_startcopy, p_endcopy));
493 	bcopy(&td->td_startcopy, &td2->td_startcopy,
494 	    __rangeof(struct thread, td_startcopy, td_endcopy));
495 	bcopy(&td->td_ksegrp->kg_startcopy, &kg2->kg_startcopy,
496 	    __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
497 
498 	td2->td_sigstk = td->td_sigstk;
499 	td2->td_sigmask = td->td_sigmask;
500 
501 	/*
502 	 * Duplicate sub-structures as needed.
503 	 * Increase reference counts on shared objects.
504 	 */
505 	p2->p_flag = 0;
506 	if (p1->p_flag & P_PROFIL)
507 		startprofclock(p2);
508 	mtx_lock_spin(&sched_lock);
509 	p2->p_sflag = PS_INMEM;
510 	/*
511 	 * Allow the scheduler to adjust the priority of the child and
512 	 * parent while we hold the sched_lock.
513 	 */
514 	sched_fork(td, td2);
515 
516 	mtx_unlock_spin(&sched_lock);
517 	p2->p_ucred = crhold(td->td_ucred);
518 	td2->td_ucred = crhold(p2->p_ucred);	/* XXXKSE */
519 #ifdef AUDIT
520 	audit_proc_fork(p1, p2);
521 #endif
522 	pargs_hold(p2->p_args);
523 
524 	if (flags & RFSIGSHARE) {
525 		p2->p_sigacts = sigacts_hold(p1->p_sigacts);
526 	} else {
527 		sigacts_copy(newsigacts, p1->p_sigacts);
528 		p2->p_sigacts = newsigacts;
529 	}
530 	if (flags & RFLINUXTHPN)
531 	        p2->p_sigparent = SIGUSR1;
532 	else
533 	        p2->p_sigparent = SIGCHLD;
534 
535 	p2->p_textvp = p1->p_textvp;
536 	p2->p_fd = fd;
537 	p2->p_fdtol = fdtol;
538 
539 	/*
540 	 * p_limit is copy-on-write.  Bump its refcount.
541 	 */
542 	p2->p_limit = lim_hold(p1->p_limit);
543 
544 	pstats_fork(p1->p_stats, p2->p_stats);
545 
546 	PROC_UNLOCK(p1);
547 	PROC_UNLOCK(p2);
548 
549 	/* Bump references to the text vnode (for procfs) */
550 	if (p2->p_textvp)
551 		vref(p2->p_textvp);
552 
553 	/*
554 	 * Set up linkage for kernel based threading.
555 	 */
556 	if ((flags & RFTHREAD) != 0) {
557 		mtx_lock(&ppeers_lock);
558 		p2->p_peers = p1->p_peers;
559 		p1->p_peers = p2;
560 		p2->p_leader = p1->p_leader;
561 		mtx_unlock(&ppeers_lock);
562 		PROC_LOCK(p1->p_leader);
563 		if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
564 			PROC_UNLOCK(p1->p_leader);
565 			/*
566 			 * The task leader is exiting, so process p1 is
567 			 * going to be killed shortly.  Since p1 obviously
568 			 * isn't dead yet, we know that the leader is either
569 			 * sending SIGKILL's to all the processes in this
570 			 * task or is sleeping waiting for all the peers to
571 			 * exit.  We let p1 complete the fork, but we need
572 			 * to go ahead and kill the new process p2 since
573 			 * the task leader may not get a chance to send
574 			 * SIGKILL to it.  We leave it on the list so that
575 			 * the task leader will wait for this new process
576 			 * to commit suicide.
577 			 */
578 			PROC_LOCK(p2);
579 			psignal(p2, SIGKILL);
580 			PROC_UNLOCK(p2);
581 		} else
582 			PROC_UNLOCK(p1->p_leader);
583 	} else {
584 		p2->p_peers = NULL;
585 		p2->p_leader = p2;
586 	}
587 
588 	sx_xlock(&proctree_lock);
589 	PGRP_LOCK(p1->p_pgrp);
590 	PROC_LOCK(p2);
591 	PROC_LOCK(p1);
592 
593 	/*
594 	 * Preserve some more flags in subprocess.  P_PROFIL has already
595 	 * been preserved.
596 	 */
597 	p2->p_flag |= p1->p_flag & P_SUGID;
598 	td2->td_pflags |= td->td_pflags & TDP_ALTSTACK;
599 	SESS_LOCK(p1->p_session);
600 	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
601 		p2->p_flag |= P_CONTROLT;
602 	SESS_UNLOCK(p1->p_session);
603 	if (flags & RFPPWAIT)
604 		p2->p_flag |= P_PPWAIT;
605 
606 	p2->p_pgrp = p1->p_pgrp;
607 	LIST_INSERT_AFTER(p1, p2, p_pglist);
608 	PGRP_UNLOCK(p1->p_pgrp);
609 	LIST_INIT(&p2->p_children);
610 
611 	callout_init(&p2->p_itcallout, CALLOUT_MPSAFE);
612 
613 #ifdef KTRACE
614 	/*
615 	 * Copy traceflag and tracefile if enabled.
616 	 */
617 	mtx_lock(&ktrace_mtx);
618 	KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode"));
619 	if (p1->p_traceflag & KTRFAC_INHERIT) {
620 		p2->p_traceflag = p1->p_traceflag;
621 		if ((p2->p_tracevp = p1->p_tracevp) != NULL) {
622 			VREF(p2->p_tracevp);
623 			KASSERT(p1->p_tracecred != NULL,
624 			    ("ktrace vnode with no cred"));
625 			p2->p_tracecred = crhold(p1->p_tracecred);
626 		}
627 	}
628 	mtx_unlock(&ktrace_mtx);
629 #endif
630 
631 	/*
632 	 * If PF_FORK is set, the child process inherits the
633 	 * procfs ioctl flags from its parent.
634 	 */
635 	if (p1->p_pfsflags & PF_FORK) {
636 		p2->p_stops = p1->p_stops;
637 		p2->p_pfsflags = p1->p_pfsflags;
638 	}
639 
640 	/*
641 	 * This begins the section where we must prevent the parent
642 	 * from being swapped.
643 	 */
644 	_PHOLD(p1);
645 	PROC_UNLOCK(p1);
646 
647 	/*
648 	 * Attach the new process to its parent.
649 	 *
650 	 * If RFNOWAIT is set, the newly created process becomes a child
651 	 * of init.  This effectively disassociates the child from the
652 	 * parent.
653 	 */
654 	if (flags & RFNOWAIT)
655 		pptr = initproc;
656 	else
657 		pptr = p1;
658 	p2->p_pptr = pptr;
659 	LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
660 	sx_xunlock(&proctree_lock);
661 
662 	/* Inform accounting that we have forked. */
663 	p2->p_acflag = AFORK;
664 	PROC_UNLOCK(p2);
665 
666 	/*
667 	 * Finish creating the child process.  It will return via a different
668 	 * execution path later.  (ie: directly into user mode)
669 	 */
670 	vm_forkproc(td, p2, td2, flags);
671 
672 	if (flags == (RFFDG | RFPROC)) {
673 		atomic_add_int(&cnt.v_forks, 1);
674 		atomic_add_int(&cnt.v_forkpages, p2->p_vmspace->vm_dsize +
675 		    p2->p_vmspace->vm_ssize);
676 	} else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
677 		atomic_add_int(&cnt.v_vforks, 1);
678 		atomic_add_int(&cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
679 		    p2->p_vmspace->vm_ssize);
680 	} else if (p1 == &proc0) {
681 		atomic_add_int(&cnt.v_kthreads, 1);
682 		atomic_add_int(&cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
683 		    p2->p_vmspace->vm_ssize);
684 	} else {
685 		atomic_add_int(&cnt.v_rforks, 1);
686 		atomic_add_int(&cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
687 		    p2->p_vmspace->vm_ssize);
688 	}
689 
690 	/*
691 	 * Both processes are set up, now check if any loadable modules want
692 	 * to adjust anything.
693 	 *   What if they have an error? XXX
694 	 */
695 	EVENTHANDLER_INVOKE(process_fork, p1, p2, flags);
696 
697 	/*
698 	 * Set the child start time and mark the process as being complete.
699 	 */
700 	microuptime(&p2->p_stats->p_start);
701 	mtx_lock_spin(&sched_lock);
702 	p2->p_state = PRS_NORMAL;
703 
704 	/*
705 	 * If RFSTOPPED not requested, make child runnable and add to
706 	 * run queue.
707 	 */
708 	if ((flags & RFSTOPPED) == 0) {
709 		TD_SET_CAN_RUN(td2);
710 		setrunqueue(td2, SRQ_BORING);
711 	}
712 	mtx_unlock_spin(&sched_lock);
713 
714 	/*
715 	 * Now can be swapped.
716 	 */
717 	PROC_LOCK(p1);
718 	_PRELE(p1);
719 
720 	/*
721 	 * Tell any interested parties about the new process.
722 	 */
723 	KNOTE_LOCKED(&p1->p_klist, NOTE_FORK | p2->p_pid);
724 
725 	PROC_UNLOCK(p1);
726 
727 	/*
728 	 * Preserve synchronization semantics of vfork.  If waiting for
729 	 * child to exec or exit, set P_PPWAIT on child, and sleep on our
730 	 * proc (in case of exit).
731 	 */
732 	PROC_LOCK(p2);
733 	while (p2->p_flag & P_PPWAIT)
734 		msleep(p1, &p2->p_mtx, PWAIT, "ppwait", 0);
735 	PROC_UNLOCK(p2);
736 
737 	/*
738 	 * If other threads are waiting, let them continue now.
739 	 */
740 	if (p1->p_flag & P_HADTHREADS) {
741 		PROC_LOCK(p1);
742 		thread_single_end();
743 		PROC_UNLOCK(p1);
744 	}
745 
746 	/*
747 	 * Return child proc pointer to parent.
748 	 */
749 	*procp = p2;
750 	return (0);
751 fail:
752 	sx_sunlock(&proctree_lock);
753 	if (ppsratecheck(&lastfail, &curfail, 1))
754 		printf("maxproc limit exceeded by uid %i, please see tuning(7) and login.conf(5).\n",
755 			uid);
756 	sx_xunlock(&allproc_lock);
757 #ifdef MAC
758 	mac_destroy_proc(newproc);
759 #endif
760 #ifdef AUDIT
761 	audit_proc_free(newproc);
762 #endif
763 	uma_zfree(proc_zone, newproc);
764 	if (p1->p_flag & P_HADTHREADS) {
765 		PROC_LOCK(p1);
766 		thread_single_end();
767 		PROC_UNLOCK(p1);
768 	}
769 	tsleep(&forksleep, PUSER, "fork", hz / 2);
770 	return (error);
771 }
772 
773 /*
774  * Handle the return of a child process from fork1().  This function
775  * is called from the MD fork_trampoline() entry point.
776  */
777 void
778 fork_exit(callout, arg, frame)
779 	void (*callout)(void *, struct trapframe *);
780 	void *arg;
781 	struct trapframe *frame;
782 {
783 	struct proc *p;
784 	struct thread *td;
785 
786 	/*
787 	 * Finish setting up thread glue so that it begins execution in a
788 	 * non-nested critical section with sched_lock held but not recursed.
789 	 */
790 	td = curthread;
791 	p = td->td_proc;
792 	td->td_oncpu = PCPU_GET(cpuid);
793 	KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
794 
795 	sched_lock.mtx_lock = (uintptr_t)td;
796 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
797 	CTR4(KTR_PROC, "fork_exit: new thread %p (kse %p, pid %d, %s)",
798 		td, td->td_sched, p->p_pid, p->p_comm);
799 
800 	/*
801 	 * Processes normally resume in mi_switch() after being
802 	 * cpu_switch()'ed to, but when children start up they arrive here
803 	 * instead, so we must do much the same things as mi_switch() would.
804 	 */
805 
806 	if ((td = PCPU_GET(deadthread))) {
807 		PCPU_SET(deadthread, NULL);
808 		thread_stash(td);
809 	}
810 	td = curthread;
811 	mtx_unlock_spin(&sched_lock);
812 
813 	/*
814 	 * cpu_set_fork_handler intercepts this function call to
815 	 * have this call a non-return function to stay in kernel mode.
816 	 * initproc has its own fork handler, but it does return.
817 	 */
818 	KASSERT(callout != NULL, ("NULL callout in fork_exit"));
819 	callout(arg, frame);
820 
821 	/*
822 	 * Check if a kernel thread misbehaved and returned from its main
823 	 * function.
824 	 */
825 	if (p->p_flag & P_KTHREAD) {
826 		printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
827 		    p->p_comm, p->p_pid);
828 		kthread_exit(0);
829 	}
830 	mtx_assert(&Giant, MA_NOTOWNED);
831 }
832 
833 /*
834  * Simplified back end of syscall(), used when returning from fork()
835  * directly into user mode.  Giant is not held on entry, and must not
836  * be held on return.  This function is passed in to fork_exit() as the
837  * first parameter and is called when returning to a new userland process.
838  */
839 void
840 fork_return(td, frame)
841 	struct thread *td;
842 	struct trapframe *frame;
843 {
844 
845 	userret(td, frame);
846 #ifdef KTRACE
847 	if (KTRPOINT(td, KTR_SYSRET))
848 		ktrsysret(SYS_fork, 0, 0);
849 #endif
850 	mtx_assert(&Giant, MA_NOTOWNED);
851 }
852