xref: /freebsd/sys/kern/kern_fork.c (revision 6af83ee0d2941d18880b6aaa2b4facd1d30c6106)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_fork.c	8.6 (Berkeley) 4/8/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_ktrace.h"
41 #include "opt_mac.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/sysproto.h>
46 #include <sys/eventhandler.h>
47 #include <sys/filedesc.h>
48 #include <sys/kernel.h>
49 #include <sys/kthread.h>
50 #include <sys/sysctl.h>
51 #include <sys/lock.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/pioctl.h>
56 #include <sys/resourcevar.h>
57 #include <sys/sched.h>
58 #include <sys/syscall.h>
59 #include <sys/vmmeter.h>
60 #include <sys/vnode.h>
61 #include <sys/acct.h>
62 #include <sys/mac.h>
63 #include <sys/ktr.h>
64 #include <sys/ktrace.h>
65 #include <sys/unistd.h>
66 #include <sys/sx.h>
67 #include <sys/signalvar.h>
68 
69 #include <vm/vm.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_extern.h>
73 #include <vm/uma.h>
74 
75 #include <machine/critical.h>
76 
77 #ifndef _SYS_SYSPROTO_H_
78 struct fork_args {
79 	int     dummy;
80 };
81 #endif
82 
83 static int forksleep; /* Place for fork1() to sleep on. */
84 
85 /*
86  * MPSAFE
87  */
88 /* ARGSUSED */
89 int
90 fork(td, uap)
91 	struct thread *td;
92 	struct fork_args *uap;
93 {
94 	int error;
95 	struct proc *p2;
96 
97 	error = fork1(td, RFFDG | RFPROC, 0, &p2);
98 	if (error == 0) {
99 		td->td_retval[0] = p2->p_pid;
100 		td->td_retval[1] = 0;
101 	}
102 	return (error);
103 }
104 
105 /*
106  * MPSAFE
107  */
108 /* ARGSUSED */
109 int
110 vfork(td, uap)
111 	struct thread *td;
112 	struct vfork_args *uap;
113 {
114 	int error;
115 	struct proc *p2;
116 
117 	error = fork1(td, RFFDG | RFPROC | RFPPWAIT | RFMEM, 0, &p2);
118 	if (error == 0) {
119 		td->td_retval[0] = p2->p_pid;
120 		td->td_retval[1] = 0;
121 	}
122 	return (error);
123 }
124 
125 /*
126  * MPSAFE
127  */
128 int
129 rfork(td, uap)
130 	struct thread *td;
131 	struct rfork_args *uap;
132 {
133 	struct proc *p2;
134 	int error;
135 
136 	/* Don't allow kernel-only flags. */
137 	if ((uap->flags & RFKERNELONLY) != 0)
138 		return (EINVAL);
139 
140 	error = fork1(td, uap->flags, 0, &p2);
141 	if (error == 0) {
142 		td->td_retval[0] = p2 ? p2->p_pid : 0;
143 		td->td_retval[1] = 0;
144 	}
145 	return (error);
146 }
147 
148 int	nprocs = 1;		/* process 0 */
149 int	lastpid = 0;
150 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
151     "Last used PID");
152 
153 /*
154  * Random component to lastpid generation.  We mix in a random factor to make
155  * it a little harder to predict.  We sanity check the modulus value to avoid
156  * doing it in critical paths.  Don't let it be too small or we pointlessly
157  * waste randomness entropy, and don't let it be impossibly large.  Using a
158  * modulus that is too big causes a LOT more process table scans and slows
159  * down fork processing as the pidchecked caching is defeated.
160  */
161 static int randompid = 0;
162 
163 static int
164 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
165 {
166 	int error, pid;
167 
168 	error = sysctl_wire_old_buffer(req, sizeof(int));
169 	if (error != 0)
170 		return(error);
171 	sx_xlock(&allproc_lock);
172 	pid = randompid;
173 	error = sysctl_handle_int(oidp, &pid, 0, req);
174 	if (error == 0 && req->newptr != NULL) {
175 		if (pid < 0 || pid > PID_MAX - 100)	/* out of range */
176 			pid = PID_MAX - 100;
177 		else if (pid < 2)			/* NOP */
178 			pid = 0;
179 		else if (pid < 100)			/* Make it reasonable */
180 			pid = 100;
181 		randompid = pid;
182 	}
183 	sx_xunlock(&allproc_lock);
184 	return (error);
185 }
186 
187 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
188     0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
189 
190 int
191 fork1(td, flags, pages, procp)
192 	struct thread *td;
193 	int flags;
194 	int pages;
195 	struct proc **procp;
196 {
197 	struct proc *p1, *p2, *pptr;
198 	uid_t uid;
199 	struct proc *newproc;
200 	int ok, trypid;
201 	static int curfail, pidchecked = 0;
202 	static struct timeval lastfail;
203 	struct filedesc *fd;
204 	struct filedesc_to_leader *fdtol;
205 	struct thread *td2;
206 	struct ksegrp *kg2;
207 	struct sigacts *newsigacts;
208 	int error;
209 
210 	/* Can't copy and clear. */
211 	if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
212 		return (EINVAL);
213 
214 	p1 = td->td_proc;
215 
216 	/*
217 	 * Here we don't create a new process, but we divorce
218 	 * certain parts of a process from itself.
219 	 */
220 	if ((flags & RFPROC) == 0) {
221 		vm_forkproc(td, NULL, NULL, flags);
222 
223 		/*
224 		 * Close all file descriptors.
225 		 */
226 		if (flags & RFCFDG) {
227 			struct filedesc *fdtmp;
228 			fdtmp = fdinit(td->td_proc->p_fd);
229 			fdfree(td);
230 			p1->p_fd = fdtmp;
231 		}
232 
233 		/*
234 		 * Unshare file descriptors (from parent).
235 		 */
236 		if (flags & RFFDG)
237 			fdunshare(p1, td);
238 		*procp = NULL;
239 		return (0);
240 	}
241 
242 	/*
243 	 * Note 1:1 allows for forking with one thread coming out on the
244 	 * other side with the expectation that the process is about to
245 	 * exec.
246 	 */
247 	if (p1->p_flag & P_HADTHREADS) {
248 		/*
249 		 * Idle the other threads for a second.
250 		 * Since the user space is copied, it must remain stable.
251 		 * In addition, all threads (from the user perspective)
252 		 * need to either be suspended or in the kernel,
253 		 * where they will try restart in the parent and will
254 		 * be aborted in the child.
255 		 */
256 		PROC_LOCK(p1);
257 		if (thread_single(SINGLE_NO_EXIT)) {
258 			/* Abort. Someone else is single threading before us. */
259 			PROC_UNLOCK(p1);
260 			return (ERESTART);
261 		}
262 		PROC_UNLOCK(p1);
263 		/*
264 		 * All other activity in this process
265 		 * is now suspended at the user boundary,
266 		 * (or other safe places if we think of any).
267 		 */
268 	}
269 
270 	/* Allocate new proc. */
271 	newproc = uma_zalloc(proc_zone, M_WAITOK);
272 #ifdef MAC
273 	mac_init_proc(newproc);
274 #endif
275 	knlist_init(&newproc->p_klist, &newproc->p_mtx);
276 
277 	/* We have to lock the process tree while we look for a pid. */
278 	sx_slock(&proctree_lock);
279 
280 	/*
281 	 * Although process entries are dynamically created, we still keep
282 	 * a global limit on the maximum number we will create.  Don't allow
283 	 * a nonprivileged user to use the last ten processes; don't let root
284 	 * exceed the limit. The variable nprocs is the current number of
285 	 * processes, maxproc is the limit.
286 	 */
287 	sx_xlock(&allproc_lock);
288 	uid = td->td_ucred->cr_ruid;
289 	if ((nprocs >= maxproc - 10 &&
290 	    suser_cred(td->td_ucred, SUSER_RUID) != 0) ||
291 	    nprocs >= maxproc) {
292 		error = EAGAIN;
293 		goto fail;
294 	}
295 
296 	/*
297 	 * Increment the count of procs running with this uid. Don't allow
298 	 * a nonprivileged user to exceed their current limit.
299 	 */
300 	PROC_LOCK(p1);
301 	ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
302 		(uid != 0) ? lim_cur(p1, RLIMIT_NPROC) : 0);
303 	PROC_UNLOCK(p1);
304 	if (!ok) {
305 		error = EAGAIN;
306 		goto fail;
307 	}
308 
309 	/*
310 	 * Increment the nprocs resource before blocking can occur.  There
311 	 * are hard-limits as to the number of processes that can run.
312 	 */
313 	nprocs++;
314 
315 	/*
316 	 * Find an unused process ID.  We remember a range of unused IDs
317 	 * ready to use (from lastpid+1 through pidchecked-1).
318 	 *
319 	 * If RFHIGHPID is set (used during system boot), do not allocate
320 	 * low-numbered pids.
321 	 */
322 	trypid = lastpid + 1;
323 	if (flags & RFHIGHPID) {
324 		if (trypid < 10)
325 			trypid = 10;
326 	} else {
327 		if (randompid)
328 			trypid += arc4random() % randompid;
329 	}
330 retry:
331 	/*
332 	 * If the process ID prototype has wrapped around,
333 	 * restart somewhat above 0, as the low-numbered procs
334 	 * tend to include daemons that don't exit.
335 	 */
336 	if (trypid >= PID_MAX) {
337 		trypid = trypid % PID_MAX;
338 		if (trypid < 100)
339 			trypid += 100;
340 		pidchecked = 0;
341 	}
342 	if (trypid >= pidchecked) {
343 		int doingzomb = 0;
344 
345 		pidchecked = PID_MAX;
346 		/*
347 		 * Scan the active and zombie procs to check whether this pid
348 		 * is in use.  Remember the lowest pid that's greater
349 		 * than trypid, so we can avoid checking for a while.
350 		 */
351 		p2 = LIST_FIRST(&allproc);
352 again:
353 		for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) {
354 			PROC_LOCK(p2);
355 			while (p2->p_pid == trypid ||
356 			    (p2->p_pgrp != NULL &&
357 			    (p2->p_pgrp->pg_id == trypid ||
358 			    (p2->p_session != NULL &&
359 			    p2->p_session->s_sid == trypid)))) {
360 				trypid++;
361 				if (trypid >= pidchecked) {
362 					PROC_UNLOCK(p2);
363 					goto retry;
364 				}
365 			}
366 			if (p2->p_pid > trypid && pidchecked > p2->p_pid)
367 				pidchecked = p2->p_pid;
368 			if (p2->p_pgrp != NULL) {
369 				if (p2->p_pgrp->pg_id > trypid &&
370 				    pidchecked > p2->p_pgrp->pg_id)
371 					pidchecked = p2->p_pgrp->pg_id;
372 				if (p2->p_session != NULL &&
373 				    p2->p_session->s_sid > trypid &&
374 				    pidchecked > p2->p_session->s_sid)
375 					pidchecked = p2->p_session->s_sid;
376 			}
377 			PROC_UNLOCK(p2);
378 		}
379 		if (!doingzomb) {
380 			doingzomb = 1;
381 			p2 = LIST_FIRST(&zombproc);
382 			goto again;
383 		}
384 	}
385 	sx_sunlock(&proctree_lock);
386 
387 	/*
388 	 * RFHIGHPID does not mess with the lastpid counter during boot.
389 	 */
390 	if (flags & RFHIGHPID)
391 		pidchecked = 0;
392 	else
393 		lastpid = trypid;
394 
395 	p2 = newproc;
396 	p2->p_state = PRS_NEW;		/* protect against others */
397 	p2->p_pid = trypid;
398 	LIST_INSERT_HEAD(&allproc, p2, p_list);
399 	LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
400 	sx_xunlock(&allproc_lock);
401 
402 	/*
403 	 * Malloc things while we don't hold any locks.
404 	 */
405 	if (flags & RFSIGSHARE)
406 		newsigacts = NULL;
407 	else
408 		newsigacts = sigacts_alloc();
409 
410 	/*
411 	 * Copy filedesc.
412 	 */
413 	if (flags & RFCFDG) {
414 		fd = fdinit(p1->p_fd);
415 		fdtol = NULL;
416 	} else if (flags & RFFDG) {
417 		fd = fdcopy(p1->p_fd);
418 		fdtol = NULL;
419 	} else {
420 		fd = fdshare(p1->p_fd);
421 		if (p1->p_fdtol == NULL)
422 			p1->p_fdtol =
423 				filedesc_to_leader_alloc(NULL,
424 							 NULL,
425 							 p1->p_leader);
426 		if ((flags & RFTHREAD) != 0) {
427 			/*
428 			 * Shared file descriptor table and
429 			 * shared process leaders.
430 			 */
431 			fdtol = p1->p_fdtol;
432 			FILEDESC_LOCK_FAST(p1->p_fd);
433 			fdtol->fdl_refcount++;
434 			FILEDESC_UNLOCK_FAST(p1->p_fd);
435 		} else {
436 			/*
437 			 * Shared file descriptor table, and
438 			 * different process leaders
439 			 */
440 			fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
441 							 p1->p_fd,
442 							 p2);
443 		}
444 	}
445 	/*
446 	 * Make a proc table entry for the new process.
447 	 * Start by zeroing the section of proc that is zero-initialized,
448 	 * then copy the section that is copied directly from the parent.
449 	 */
450 	td2 = FIRST_THREAD_IN_PROC(p2);
451 	kg2 = FIRST_KSEGRP_IN_PROC(p2);
452 
453 	/* Allocate and switch to an alternate kstack if specified. */
454 	if (pages != 0)
455 		vm_thread_new_altkstack(td2, pages);
456 
457 	PROC_LOCK(p2);
458 	PROC_LOCK(p1);
459 
460 	bzero(&p2->p_startzero,
461 	    __rangeof(struct proc, p_startzero, p_endzero));
462 	bzero(&td2->td_startzero,
463 	    __rangeof(struct thread, td_startzero, td_endzero));
464 	bzero(&kg2->kg_startzero,
465 	    __rangeof(struct ksegrp, kg_startzero, kg_endzero));
466 
467 	bcopy(&p1->p_startcopy, &p2->p_startcopy,
468 	    __rangeof(struct proc, p_startcopy, p_endcopy));
469 	bcopy(&td->td_startcopy, &td2->td_startcopy,
470 	    __rangeof(struct thread, td_startcopy, td_endcopy));
471 	bcopy(&td->td_ksegrp->kg_startcopy, &kg2->kg_startcopy,
472 	    __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
473 
474 	td2->td_sigstk = td->td_sigstk;
475 
476 	/*
477 	 * Duplicate sub-structures as needed.
478 	 * Increase reference counts on shared objects.
479 	 */
480 	p2->p_flag = 0;
481 	if (p1->p_flag & P_PROFIL)
482 		startprofclock(p2);
483 	mtx_lock_spin(&sched_lock);
484 	p2->p_sflag = PS_INMEM;
485 	/*
486 	 * Allow the scheduler to adjust the priority of the child and
487 	 * parent while we hold the sched_lock.
488 	 */
489 	sched_fork(td, td2);
490 
491 	mtx_unlock_spin(&sched_lock);
492 	p2->p_ucred = crhold(td->td_ucred);
493 	td2->td_ucred = crhold(p2->p_ucred);	/* XXXKSE */
494 
495 	pargs_hold(p2->p_args);
496 
497 	if (flags & RFSIGSHARE) {
498 		p2->p_sigacts = sigacts_hold(p1->p_sigacts);
499 	} else {
500 		sigacts_copy(newsigacts, p1->p_sigacts);
501 		p2->p_sigacts = newsigacts;
502 	}
503 	if (flags & RFLINUXTHPN)
504 	        p2->p_sigparent = SIGUSR1;
505 	else
506 	        p2->p_sigparent = SIGCHLD;
507 
508 	p2->p_textvp = p1->p_textvp;
509 	p2->p_fd = fd;
510 	p2->p_fdtol = fdtol;
511 
512 	/*
513 	 * p_limit is copy-on-write.  Bump its refcount.
514 	 */
515 	p2->p_limit = lim_hold(p1->p_limit);
516 
517 	pstats_fork(p1->p_stats, p2->p_stats);
518 
519 	PROC_UNLOCK(p1);
520 	PROC_UNLOCK(p2);
521 
522 	/* Bump references to the text vnode (for procfs) */
523 	if (p2->p_textvp)
524 		vref(p2->p_textvp);
525 
526 	/*
527 	 * Set up linkage for kernel based threading.
528 	 */
529 	if ((flags & RFTHREAD) != 0) {
530 		mtx_lock(&ppeers_lock);
531 		p2->p_peers = p1->p_peers;
532 		p1->p_peers = p2;
533 		p2->p_leader = p1->p_leader;
534 		mtx_unlock(&ppeers_lock);
535 		PROC_LOCK(p1->p_leader);
536 		if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
537 			PROC_UNLOCK(p1->p_leader);
538 			/*
539 			 * The task leader is exiting, so process p1 is
540 			 * going to be killed shortly.  Since p1 obviously
541 			 * isn't dead yet, we know that the leader is either
542 			 * sending SIGKILL's to all the processes in this
543 			 * task or is sleeping waiting for all the peers to
544 			 * exit.  We let p1 complete the fork, but we need
545 			 * to go ahead and kill the new process p2 since
546 			 * the task leader may not get a chance to send
547 			 * SIGKILL to it.  We leave it on the list so that
548 			 * the task leader will wait for this new process
549 			 * to commit suicide.
550 			 */
551 			PROC_LOCK(p2);
552 			psignal(p2, SIGKILL);
553 			PROC_UNLOCK(p2);
554 		} else
555 			PROC_UNLOCK(p1->p_leader);
556 	} else {
557 		p2->p_peers = NULL;
558 		p2->p_leader = p2;
559 	}
560 
561 	sx_xlock(&proctree_lock);
562 	PGRP_LOCK(p1->p_pgrp);
563 	PROC_LOCK(p2);
564 	PROC_LOCK(p1);
565 
566 	/*
567 	 * Preserve some more flags in subprocess.  P_PROFIL has already
568 	 * been preserved.
569 	 */
570 	p2->p_flag |= p1->p_flag & P_SUGID;
571 	td2->td_pflags |= td->td_pflags & TDP_ALTSTACK;
572 	SESS_LOCK(p1->p_session);
573 	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
574 		p2->p_flag |= P_CONTROLT;
575 	SESS_UNLOCK(p1->p_session);
576 	if (flags & RFPPWAIT)
577 		p2->p_flag |= P_PPWAIT;
578 
579 	p2->p_pgrp = p1->p_pgrp;
580 	LIST_INSERT_AFTER(p1, p2, p_pglist);
581 	PGRP_UNLOCK(p1->p_pgrp);
582 	LIST_INIT(&p2->p_children);
583 
584 	callout_init(&p2->p_itcallout, CALLOUT_MPSAFE);
585 
586 #ifdef KTRACE
587 	/*
588 	 * Copy traceflag and tracefile if enabled.
589 	 */
590 	mtx_lock(&ktrace_mtx);
591 	KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode"));
592 	if (p1->p_traceflag & KTRFAC_INHERIT) {
593 		p2->p_traceflag = p1->p_traceflag;
594 		if ((p2->p_tracevp = p1->p_tracevp) != NULL) {
595 			VREF(p2->p_tracevp);
596 			KASSERT(p1->p_tracecred != NULL,
597 			    ("ktrace vnode with no cred"));
598 			p2->p_tracecred = crhold(p1->p_tracecred);
599 		}
600 	}
601 	mtx_unlock(&ktrace_mtx);
602 #endif
603 
604 	/*
605 	 * If PF_FORK is set, the child process inherits the
606 	 * procfs ioctl flags from its parent.
607 	 */
608 	if (p1->p_pfsflags & PF_FORK) {
609 		p2->p_stops = p1->p_stops;
610 		p2->p_pfsflags = p1->p_pfsflags;
611 	}
612 
613 	/*
614 	 * This begins the section where we must prevent the parent
615 	 * from being swapped.
616 	 */
617 	_PHOLD(p1);
618 	PROC_UNLOCK(p1);
619 
620 	/*
621 	 * Attach the new process to its parent.
622 	 *
623 	 * If RFNOWAIT is set, the newly created process becomes a child
624 	 * of init.  This effectively disassociates the child from the
625 	 * parent.
626 	 */
627 	if (flags & RFNOWAIT)
628 		pptr = initproc;
629 	else
630 		pptr = p1;
631 	p2->p_pptr = pptr;
632 	LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
633 	sx_xunlock(&proctree_lock);
634 
635 	/* Inform accounting that we have forked. */
636 	p2->p_acflag = AFORK;
637 	PROC_UNLOCK(p2);
638 
639 	/*
640 	 * Finish creating the child process.  It will return via a different
641 	 * execution path later.  (ie: directly into user mode)
642 	 */
643 	vm_forkproc(td, p2, td2, flags);
644 
645 	if (flags == (RFFDG | RFPROC)) {
646 		atomic_add_int(&cnt.v_forks, 1);
647 		atomic_add_int(&cnt.v_forkpages, p2->p_vmspace->vm_dsize +
648 		    p2->p_vmspace->vm_ssize);
649 	} else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
650 		atomic_add_int(&cnt.v_vforks, 1);
651 		atomic_add_int(&cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
652 		    p2->p_vmspace->vm_ssize);
653 	} else if (p1 == &proc0) {
654 		atomic_add_int(&cnt.v_kthreads, 1);
655 		atomic_add_int(&cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
656 		    p2->p_vmspace->vm_ssize);
657 	} else {
658 		atomic_add_int(&cnt.v_rforks, 1);
659 		atomic_add_int(&cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
660 		    p2->p_vmspace->vm_ssize);
661 	}
662 
663 	/*
664 	 * Both processes are set up, now check if any loadable modules want
665 	 * to adjust anything.
666 	 *   What if they have an error? XXX
667 	 */
668 	EVENTHANDLER_INVOKE(process_fork, p1, p2, flags);
669 
670 	/*
671 	 * Set the child start time and mark the process as being complete.
672 	 */
673 	microuptime(&p2->p_stats->p_start);
674 	mtx_lock_spin(&sched_lock);
675 	p2->p_state = PRS_NORMAL;
676 
677 	/*
678 	 * If RFSTOPPED not requested, make child runnable and add to
679 	 * run queue.
680 	 */
681 	if ((flags & RFSTOPPED) == 0) {
682 		TD_SET_CAN_RUN(td2);
683 		setrunqueue(td2, SRQ_BORING);
684 	}
685 	mtx_unlock_spin(&sched_lock);
686 
687 	/*
688 	 * Now can be swapped.
689 	 */
690 	PROC_LOCK(p1);
691 	_PRELE(p1);
692 
693 	/*
694 	 * Tell any interested parties about the new process.
695 	 */
696 	KNOTE_LOCKED(&p1->p_klist, NOTE_FORK | p2->p_pid);
697 
698 	PROC_UNLOCK(p1);
699 
700 	/*
701 	 * Preserve synchronization semantics of vfork.  If waiting for
702 	 * child to exec or exit, set P_PPWAIT on child, and sleep on our
703 	 * proc (in case of exit).
704 	 */
705 	PROC_LOCK(p2);
706 	while (p2->p_flag & P_PPWAIT)
707 		msleep(p1, &p2->p_mtx, PWAIT, "ppwait", 0);
708 	PROC_UNLOCK(p2);
709 
710 	/*
711 	 * If other threads are waiting, let them continue now.
712 	 */
713 	if (p1->p_flag & P_HADTHREADS) {
714 		PROC_LOCK(p1);
715 		thread_single_end();
716 		PROC_UNLOCK(p1);
717 	}
718 
719 	/*
720 	 * Return child proc pointer to parent.
721 	 */
722 	*procp = p2;
723 	return (0);
724 fail:
725 	sx_sunlock(&proctree_lock);
726 	if (ppsratecheck(&lastfail, &curfail, 1))
727 		printf("maxproc limit exceeded by uid %i, please see tuning(7) and login.conf(5).\n",
728 			uid);
729 	sx_xunlock(&allproc_lock);
730 #ifdef MAC
731 	mac_destroy_proc(newproc);
732 #endif
733 	uma_zfree(proc_zone, newproc);
734 	if (p1->p_flag & P_HADTHREADS) {
735 		PROC_LOCK(p1);
736 		thread_single_end();
737 		PROC_UNLOCK(p1);
738 	}
739 	tsleep(&forksleep, PUSER, "fork", hz / 2);
740 	return (error);
741 }
742 
743 /*
744  * Handle the return of a child process from fork1().  This function
745  * is called from the MD fork_trampoline() entry point.
746  */
747 void
748 fork_exit(callout, arg, frame)
749 	void (*callout)(void *, struct trapframe *);
750 	void *arg;
751 	struct trapframe *frame;
752 {
753 	struct proc *p;
754 	struct thread *td;
755 
756 	/*
757 	 * Finish setting up thread glue so that it begins execution in a
758 	 * non-nested critical section with sched_lock held but not recursed.
759 	 */
760 	td = curthread;
761 	p = td->td_proc;
762 	td->td_oncpu = PCPU_GET(cpuid);
763 	KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
764 
765 	sched_lock.mtx_lock = (uintptr_t)td;
766 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
767 	cpu_critical_fork_exit();
768 	CTR4(KTR_PROC, "fork_exit: new thread %p (kse %p, pid %d, %s)",
769 		td, td->td_sched, p->p_pid, p->p_comm);
770 
771 	/*
772 	 * Processes normally resume in mi_switch() after being
773 	 * cpu_switch()'ed to, but when children start up they arrive here
774 	 * instead, so we must do much the same things as mi_switch() would.
775 	 */
776 
777 	if ((td = PCPU_GET(deadthread))) {
778 		PCPU_SET(deadthread, NULL);
779 		thread_stash(td);
780 	}
781 	td = curthread;
782 	mtx_unlock_spin(&sched_lock);
783 
784 	/*
785 	 * cpu_set_fork_handler intercepts this function call to
786 	 * have this call a non-return function to stay in kernel mode.
787 	 * initproc has its own fork handler, but it does return.
788 	 */
789 	KASSERT(callout != NULL, ("NULL callout in fork_exit"));
790 	callout(arg, frame);
791 
792 	/*
793 	 * Check if a kernel thread misbehaved and returned from its main
794 	 * function.
795 	 */
796 	PROC_LOCK(p);
797 	if (p->p_flag & P_KTHREAD) {
798 		PROC_UNLOCK(p);
799 		printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
800 		    p->p_comm, p->p_pid);
801 		kthread_exit(0);
802 	}
803 	PROC_UNLOCK(p);
804 	mtx_assert(&Giant, MA_NOTOWNED);
805 }
806 
807 /*
808  * Simplified back end of syscall(), used when returning from fork()
809  * directly into user mode.  Giant is not held on entry, and must not
810  * be held on return.  This function is passed in to fork_exit() as the
811  * first parameter and is called when returning to a new userland process.
812  */
813 void
814 fork_return(td, frame)
815 	struct thread *td;
816 	struct trapframe *frame;
817 {
818 
819 	userret(td, frame, 0);
820 #ifdef KTRACE
821 	if (KTRPOINT(td, KTR_SYSRET))
822 		ktrsysret(SYS_fork, 0, 0);
823 #endif
824 	mtx_assert(&Giant, MA_NOTOWNED);
825 }
826