xref: /freebsd/sys/kern/kern_fork.c (revision 39beb93c3f8bdbf72a61fda42300b5ebed7390c8)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_fork.c	8.6 (Berkeley) 4/8/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_kdtrace.h"
41 #include "opt_ktrace.h"
42 #include "opt_mac.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysproto.h>
47 #include <sys/eventhandler.h>
48 #include <sys/filedesc.h>
49 #include <sys/kernel.h>
50 #include <sys/kthread.h>
51 #include <sys/sysctl.h>
52 #include <sys/lock.h>
53 #include <sys/malloc.h>
54 #include <sys/mutex.h>
55 #include <sys/priv.h>
56 #include <sys/proc.h>
57 #include <sys/jail.h>
58 #include <sys/pioctl.h>
59 #include <sys/resourcevar.h>
60 #include <sys/sched.h>
61 #include <sys/syscall.h>
62 #include <sys/vmmeter.h>
63 #include <sys/vnode.h>
64 #include <sys/acct.h>
65 #include <sys/ktr.h>
66 #include <sys/ktrace.h>
67 #include <sys/unistd.h>
68 #include <sys/sdt.h>
69 #include <sys/sx.h>
70 #include <sys/signalvar.h>
71 
72 #include <security/audit/audit.h>
73 #include <security/mac/mac_framework.h>
74 
75 #include <vm/vm.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_extern.h>
79 #include <vm/uma.h>
80 
81 #ifdef KDTRACE_HOOKS
82 #include <sys/dtrace_bsd.h>
83 dtrace_fork_func_t	dtrace_fasttrap_fork;
84 #endif
85 
86 SDT_PROVIDER_DECLARE(proc);
87 SDT_PROBE_DEFINE(proc, kernel, , create);
88 SDT_PROBE_ARGTYPE(proc, kernel, , create, 0, "struct proc *");
89 SDT_PROBE_ARGTYPE(proc, kernel, , create, 1, "struct proc *");
90 SDT_PROBE_ARGTYPE(proc, kernel, , create, 2, "int");
91 
92 #ifndef _SYS_SYSPROTO_H_
93 struct fork_args {
94 	int     dummy;
95 };
96 #endif
97 
98 /* ARGSUSED */
99 int
100 fork(td, uap)
101 	struct thread *td;
102 	struct fork_args *uap;
103 {
104 	int error;
105 	struct proc *p2;
106 
107 	error = fork1(td, RFFDG | RFPROC, 0, &p2);
108 	if (error == 0) {
109 		td->td_retval[0] = p2->p_pid;
110 		td->td_retval[1] = 0;
111 	}
112 	return (error);
113 }
114 
115 /* ARGSUSED */
116 int
117 vfork(td, uap)
118 	struct thread *td;
119 	struct vfork_args *uap;
120 {
121 	int error, flags;
122 	struct proc *p2;
123 
124 #ifdef XEN
125 	flags = RFFDG | RFPROC; /* validate that this is still an issue */
126 #else
127 	flags = RFFDG | RFPROC | RFPPWAIT | RFMEM;
128 #endif
129 	error = fork1(td, flags, 0, &p2);
130 	if (error == 0) {
131 		td->td_retval[0] = p2->p_pid;
132 		td->td_retval[1] = 0;
133 	}
134 	return (error);
135 }
136 
137 int
138 rfork(td, uap)
139 	struct thread *td;
140 	struct rfork_args *uap;
141 {
142 	struct proc *p2;
143 	int error;
144 
145 	/* Don't allow kernel-only flags. */
146 	if ((uap->flags & RFKERNELONLY) != 0)
147 		return (EINVAL);
148 
149 	AUDIT_ARG(fflags, uap->flags);
150 	error = fork1(td, uap->flags, 0, &p2);
151 	if (error == 0) {
152 		td->td_retval[0] = p2 ? p2->p_pid : 0;
153 		td->td_retval[1] = 0;
154 	}
155 	return (error);
156 }
157 
158 int	nprocs = 1;		/* process 0 */
159 int	lastpid = 0;
160 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
161     "Last used PID");
162 
163 /*
164  * Random component to lastpid generation.  We mix in a random factor to make
165  * it a little harder to predict.  We sanity check the modulus value to avoid
166  * doing it in critical paths.  Don't let it be too small or we pointlessly
167  * waste randomness entropy, and don't let it be impossibly large.  Using a
168  * modulus that is too big causes a LOT more process table scans and slows
169  * down fork processing as the pidchecked caching is defeated.
170  */
171 static int randompid = 0;
172 
173 static int
174 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
175 {
176 	int error, pid;
177 
178 	error = sysctl_wire_old_buffer(req, sizeof(int));
179 	if (error != 0)
180 		return(error);
181 	sx_xlock(&allproc_lock);
182 	pid = randompid;
183 	error = sysctl_handle_int(oidp, &pid, 0, req);
184 	if (error == 0 && req->newptr != NULL) {
185 		if (pid < 0 || pid > PID_MAX - 100)	/* out of range */
186 			pid = PID_MAX - 100;
187 		else if (pid < 2)			/* NOP */
188 			pid = 0;
189 		else if (pid < 100)			/* Make it reasonable */
190 			pid = 100;
191 		randompid = pid;
192 	}
193 	sx_xunlock(&allproc_lock);
194 	return (error);
195 }
196 
197 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
198     0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
199 
200 int
201 fork1(td, flags, pages, procp)
202 	struct thread *td;
203 	int flags;
204 	int pages;
205 	struct proc **procp;
206 {
207 	struct proc *p1, *p2, *pptr;
208 	struct proc *newproc;
209 	int ok, trypid;
210 	static int curfail, pidchecked = 0;
211 	static struct timeval lastfail;
212 	struct filedesc *fd;
213 	struct filedesc_to_leader *fdtol;
214 	struct thread *td2;
215 	struct sigacts *newsigacts;
216 	struct vmspace *vm2;
217 	int error;
218 
219 	/* Can't copy and clear. */
220 	if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
221 		return (EINVAL);
222 
223 	p1 = td->td_proc;
224 
225 	/*
226 	 * Here we don't create a new process, but we divorce
227 	 * certain parts of a process from itself.
228 	 */
229 	if ((flags & RFPROC) == 0) {
230 		if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
231 		    (flags & (RFCFDG | RFFDG))) {
232 			PROC_LOCK(p1);
233 			if (thread_single(SINGLE_BOUNDARY)) {
234 				PROC_UNLOCK(p1);
235 				return (ERESTART);
236 			}
237 			PROC_UNLOCK(p1);
238 		}
239 
240 		error = vm_forkproc(td, NULL, NULL, NULL, flags);
241 		if (error)
242 			goto norfproc_fail;
243 
244 		/*
245 		 * Close all file descriptors.
246 		 */
247 		if (flags & RFCFDG) {
248 			struct filedesc *fdtmp;
249 			fdtmp = fdinit(td->td_proc->p_fd);
250 			fdfree(td);
251 			p1->p_fd = fdtmp;
252 		}
253 
254 		/*
255 		 * Unshare file descriptors (from parent).
256 		 */
257 		if (flags & RFFDG)
258 			fdunshare(p1, td);
259 
260 norfproc_fail:
261 		if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
262 		    (flags & (RFCFDG | RFFDG))) {
263 			PROC_LOCK(p1);
264 			thread_single_end();
265 			PROC_UNLOCK(p1);
266 		}
267 		*procp = NULL;
268 		return (error);
269 	}
270 
271 	/*
272 	 * XXX
273 	 * We did have single-threading code here
274 	 * however it proved un-needed and caused problems
275 	 */
276 
277 	vm2 = NULL;
278 	/* Allocate new proc. */
279 	newproc = uma_zalloc(proc_zone, M_WAITOK);
280 	if (TAILQ_EMPTY(&newproc->p_threads)) {
281 		td2 = thread_alloc();
282 		if (td2 == NULL) {
283 			error = ENOMEM;
284 			goto fail1;
285 		}
286 		proc_linkup(newproc, td2);
287 	} else
288 		td2 = FIRST_THREAD_IN_PROC(newproc);
289 
290 	/* Allocate and switch to an alternate kstack if specified. */
291 	if (pages != 0) {
292 		if (!vm_thread_new_altkstack(td2, pages)) {
293 			error = ENOMEM;
294 			goto fail1;
295 		}
296 	}
297 	if ((flags & RFMEM) == 0) {
298 		vm2 = vmspace_fork(p1->p_vmspace);
299 		if (vm2 == NULL) {
300 			error = ENOMEM;
301 			goto fail1;
302 		}
303 	}
304 #ifdef MAC
305 	mac_proc_init(newproc);
306 #endif
307 	knlist_init(&newproc->p_klist, &newproc->p_mtx, NULL, NULL, NULL);
308 	STAILQ_INIT(&newproc->p_ktr);
309 
310 	/* We have to lock the process tree while we look for a pid. */
311 	sx_slock(&proctree_lock);
312 
313 	/*
314 	 * Although process entries are dynamically created, we still keep
315 	 * a global limit on the maximum number we will create.  Don't allow
316 	 * a nonprivileged user to use the last ten processes; don't let root
317 	 * exceed the limit. The variable nprocs is the current number of
318 	 * processes, maxproc is the limit.
319 	 */
320 	sx_xlock(&allproc_lock);
321 	if ((nprocs >= maxproc - 10 && priv_check_cred(td->td_ucred,
322 	    PRIV_MAXPROC, 0) != 0) || nprocs >= maxproc) {
323 		error = EAGAIN;
324 		goto fail;
325 	}
326 
327 	/*
328 	 * Increment the count of procs running with this uid. Don't allow
329 	 * a nonprivileged user to exceed their current limit.
330 	 *
331 	 * XXXRW: Can we avoid privilege here if it's not needed?
332 	 */
333 	error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0);
334 	if (error == 0)
335 		ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0);
336 	else {
337 		PROC_LOCK(p1);
338 		ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
339 		    lim_cur(p1, RLIMIT_NPROC));
340 		PROC_UNLOCK(p1);
341 	}
342 	if (!ok) {
343 		error = EAGAIN;
344 		goto fail;
345 	}
346 
347 	/*
348 	 * Increment the nprocs resource before blocking can occur.  There
349 	 * are hard-limits as to the number of processes that can run.
350 	 */
351 	nprocs++;
352 
353 	/*
354 	 * Find an unused process ID.  We remember a range of unused IDs
355 	 * ready to use (from lastpid+1 through pidchecked-1).
356 	 *
357 	 * If RFHIGHPID is set (used during system boot), do not allocate
358 	 * low-numbered pids.
359 	 */
360 	trypid = lastpid + 1;
361 	if (flags & RFHIGHPID) {
362 		if (trypid < 10)
363 			trypid = 10;
364 	} else {
365 		if (randompid)
366 			trypid += arc4random() % randompid;
367 	}
368 retry:
369 	/*
370 	 * If the process ID prototype has wrapped around,
371 	 * restart somewhat above 0, as the low-numbered procs
372 	 * tend to include daemons that don't exit.
373 	 */
374 	if (trypid >= PID_MAX) {
375 		trypid = trypid % PID_MAX;
376 		if (trypid < 100)
377 			trypid += 100;
378 		pidchecked = 0;
379 	}
380 	if (trypid >= pidchecked) {
381 		int doingzomb = 0;
382 
383 		pidchecked = PID_MAX;
384 		/*
385 		 * Scan the active and zombie procs to check whether this pid
386 		 * is in use.  Remember the lowest pid that's greater
387 		 * than trypid, so we can avoid checking for a while.
388 		 */
389 		p2 = LIST_FIRST(&allproc);
390 again:
391 		for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) {
392 			while (p2->p_pid == trypid ||
393 			    (p2->p_pgrp != NULL &&
394 			    (p2->p_pgrp->pg_id == trypid ||
395 			    (p2->p_session != NULL &&
396 			    p2->p_session->s_sid == trypid)))) {
397 				trypid++;
398 				if (trypid >= pidchecked)
399 					goto retry;
400 			}
401 			if (p2->p_pid > trypid && pidchecked > p2->p_pid)
402 				pidchecked = p2->p_pid;
403 			if (p2->p_pgrp != NULL) {
404 				if (p2->p_pgrp->pg_id > trypid &&
405 				    pidchecked > p2->p_pgrp->pg_id)
406 					pidchecked = p2->p_pgrp->pg_id;
407 				if (p2->p_session != NULL &&
408 				    p2->p_session->s_sid > trypid &&
409 				    pidchecked > p2->p_session->s_sid)
410 					pidchecked = p2->p_session->s_sid;
411 			}
412 		}
413 		if (!doingzomb) {
414 			doingzomb = 1;
415 			p2 = LIST_FIRST(&zombproc);
416 			goto again;
417 		}
418 	}
419 	sx_sunlock(&proctree_lock);
420 
421 	/*
422 	 * RFHIGHPID does not mess with the lastpid counter during boot.
423 	 */
424 	if (flags & RFHIGHPID)
425 		pidchecked = 0;
426 	else
427 		lastpid = trypid;
428 
429 	p2 = newproc;
430 	p2->p_state = PRS_NEW;		/* protect against others */
431 	p2->p_pid = trypid;
432 	/*
433 	 * Allow the scheduler to initialize the child.
434 	 */
435 	thread_lock(td);
436 	sched_fork(td, td2);
437 	thread_unlock(td);
438 	AUDIT_ARG(pid, p2->p_pid);
439 	LIST_INSERT_HEAD(&allproc, p2, p_list);
440 	LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
441 
442 	PROC_LOCK(p2);
443 	PROC_LOCK(p1);
444 
445 	sx_xunlock(&allproc_lock);
446 
447 	bcopy(&p1->p_startcopy, &p2->p_startcopy,
448 	    __rangeof(struct proc, p_startcopy, p_endcopy));
449 	pargs_hold(p2->p_args);
450 	PROC_UNLOCK(p1);
451 
452 	bzero(&p2->p_startzero,
453 	    __rangeof(struct proc, p_startzero, p_endzero));
454 
455 	p2->p_ucred = crhold(td->td_ucred);
456 
457 	/* In case we are jailed tell the prison that we exist. */
458 	if (jailed(p2->p_ucred))
459 		prison_proc_hold(p2->p_ucred->cr_prison);
460 
461 	PROC_UNLOCK(p2);
462 
463 	/*
464 	 * Malloc things while we don't hold any locks.
465 	 */
466 	if (flags & RFSIGSHARE)
467 		newsigacts = NULL;
468 	else
469 		newsigacts = sigacts_alloc();
470 
471 	/*
472 	 * Copy filedesc.
473 	 */
474 	if (flags & RFCFDG) {
475 		fd = fdinit(p1->p_fd);
476 		fdtol = NULL;
477 	} else if (flags & RFFDG) {
478 		fd = fdcopy(p1->p_fd);
479 		fdtol = NULL;
480 	} else {
481 		fd = fdshare(p1->p_fd);
482 		if (p1->p_fdtol == NULL)
483 			p1->p_fdtol =
484 				filedesc_to_leader_alloc(NULL,
485 							 NULL,
486 							 p1->p_leader);
487 		if ((flags & RFTHREAD) != 0) {
488 			/*
489 			 * Shared file descriptor table and
490 			 * shared process leaders.
491 			 */
492 			fdtol = p1->p_fdtol;
493 			FILEDESC_XLOCK(p1->p_fd);
494 			fdtol->fdl_refcount++;
495 			FILEDESC_XUNLOCK(p1->p_fd);
496 		} else {
497 			/*
498 			 * Shared file descriptor table, and
499 			 * different process leaders
500 			 */
501 			fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
502 							 p1->p_fd,
503 							 p2);
504 		}
505 	}
506 	/*
507 	 * Make a proc table entry for the new process.
508 	 * Start by zeroing the section of proc that is zero-initialized,
509 	 * then copy the section that is copied directly from the parent.
510 	 */
511 
512 	PROC_LOCK(p2);
513 	PROC_LOCK(p1);
514 
515 	bzero(&td2->td_startzero,
516 	    __rangeof(struct thread, td_startzero, td_endzero));
517 
518 	bcopy(&td->td_startcopy, &td2->td_startcopy,
519 	    __rangeof(struct thread, td_startcopy, td_endcopy));
520 
521 	bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
522 	td2->td_sigstk = td->td_sigstk;
523 	td2->td_sigmask = td->td_sigmask;
524 	td2->td_flags = TDF_INMEM;
525 
526 	/*
527 	 * Duplicate sub-structures as needed.
528 	 * Increase reference counts on shared objects.
529 	 */
530 	p2->p_flag = P_INMEM;
531 	p2->p_swtick = ticks;
532 	if (p1->p_flag & P_PROFIL)
533 		startprofclock(p2);
534 	td2->td_ucred = crhold(p2->p_ucred);
535 
536 	if (flags & RFSIGSHARE) {
537 		p2->p_sigacts = sigacts_hold(p1->p_sigacts);
538 	} else {
539 		sigacts_copy(newsigacts, p1->p_sigacts);
540 		p2->p_sigacts = newsigacts;
541 	}
542 	if (flags & RFLINUXTHPN)
543 	        p2->p_sigparent = SIGUSR1;
544 	else
545 	        p2->p_sigparent = SIGCHLD;
546 
547 	p2->p_textvp = p1->p_textvp;
548 	p2->p_fd = fd;
549 	p2->p_fdtol = fdtol;
550 
551 	/*
552 	 * p_limit is copy-on-write.  Bump its refcount.
553 	 */
554 	lim_fork(p1, p2);
555 
556 	pstats_fork(p1->p_stats, p2->p_stats);
557 
558 	PROC_UNLOCK(p1);
559 	PROC_UNLOCK(p2);
560 
561 	/* Bump references to the text vnode (for procfs) */
562 	if (p2->p_textvp)
563 		vref(p2->p_textvp);
564 
565 	/*
566 	 * Set up linkage for kernel based threading.
567 	 */
568 	if ((flags & RFTHREAD) != 0) {
569 		mtx_lock(&ppeers_lock);
570 		p2->p_peers = p1->p_peers;
571 		p1->p_peers = p2;
572 		p2->p_leader = p1->p_leader;
573 		mtx_unlock(&ppeers_lock);
574 		PROC_LOCK(p1->p_leader);
575 		if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
576 			PROC_UNLOCK(p1->p_leader);
577 			/*
578 			 * The task leader is exiting, so process p1 is
579 			 * going to be killed shortly.  Since p1 obviously
580 			 * isn't dead yet, we know that the leader is either
581 			 * sending SIGKILL's to all the processes in this
582 			 * task or is sleeping waiting for all the peers to
583 			 * exit.  We let p1 complete the fork, but we need
584 			 * to go ahead and kill the new process p2 since
585 			 * the task leader may not get a chance to send
586 			 * SIGKILL to it.  We leave it on the list so that
587 			 * the task leader will wait for this new process
588 			 * to commit suicide.
589 			 */
590 			PROC_LOCK(p2);
591 			psignal(p2, SIGKILL);
592 			PROC_UNLOCK(p2);
593 		} else
594 			PROC_UNLOCK(p1->p_leader);
595 	} else {
596 		p2->p_peers = NULL;
597 		p2->p_leader = p2;
598 	}
599 
600 	sx_xlock(&proctree_lock);
601 	PGRP_LOCK(p1->p_pgrp);
602 	PROC_LOCK(p2);
603 	PROC_LOCK(p1);
604 
605 	/*
606 	 * Preserve some more flags in subprocess.  P_PROFIL has already
607 	 * been preserved.
608 	 */
609 	p2->p_flag |= p1->p_flag & P_SUGID;
610 	td2->td_pflags |= td->td_pflags & TDP_ALTSTACK;
611 	SESS_LOCK(p1->p_session);
612 	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
613 		p2->p_flag |= P_CONTROLT;
614 	SESS_UNLOCK(p1->p_session);
615 	if (flags & RFPPWAIT)
616 		p2->p_flag |= P_PPWAIT;
617 
618 	p2->p_pgrp = p1->p_pgrp;
619 	LIST_INSERT_AFTER(p1, p2, p_pglist);
620 	PGRP_UNLOCK(p1->p_pgrp);
621 	LIST_INIT(&p2->p_children);
622 
623 	callout_init(&p2->p_itcallout, CALLOUT_MPSAFE);
624 
625 #ifdef KTRACE
626 	/*
627 	 * Copy traceflag and tracefile if enabled.
628 	 */
629 	mtx_lock(&ktrace_mtx);
630 	KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode"));
631 	if (p1->p_traceflag & KTRFAC_INHERIT) {
632 		p2->p_traceflag = p1->p_traceflag;
633 		if ((p2->p_tracevp = p1->p_tracevp) != NULL) {
634 			VREF(p2->p_tracevp);
635 			KASSERT(p1->p_tracecred != NULL,
636 			    ("ktrace vnode with no cred"));
637 			p2->p_tracecred = crhold(p1->p_tracecred);
638 		}
639 	}
640 	mtx_unlock(&ktrace_mtx);
641 #endif
642 
643 	/*
644 	 * If PF_FORK is set, the child process inherits the
645 	 * procfs ioctl flags from its parent.
646 	 */
647 	if (p1->p_pfsflags & PF_FORK) {
648 		p2->p_stops = p1->p_stops;
649 		p2->p_pfsflags = p1->p_pfsflags;
650 	}
651 
652 #ifdef KDTRACE_HOOKS
653 	/*
654 	 * Tell the DTrace fasttrap provider about the new process
655 	 * if it has registered an interest.
656 	 */
657 	if (dtrace_fasttrap_fork)
658 		dtrace_fasttrap_fork(p1, p2);
659 #endif
660 
661 	/*
662 	 * This begins the section where we must prevent the parent
663 	 * from being swapped.
664 	 */
665 	_PHOLD(p1);
666 	PROC_UNLOCK(p1);
667 
668 	/*
669 	 * Attach the new process to its parent.
670 	 *
671 	 * If RFNOWAIT is set, the newly created process becomes a child
672 	 * of init.  This effectively disassociates the child from the
673 	 * parent.
674 	 */
675 	if (flags & RFNOWAIT)
676 		pptr = initproc;
677 	else
678 		pptr = p1;
679 	p2->p_pptr = pptr;
680 	LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
681 	sx_xunlock(&proctree_lock);
682 
683 	/* Inform accounting that we have forked. */
684 	p2->p_acflag = AFORK;
685 	PROC_UNLOCK(p2);
686 
687 	/*
688 	 * Finish creating the child process.  It will return via a different
689 	 * execution path later.  (ie: directly into user mode)
690 	 */
691 	vm_forkproc(td, p2, td2, vm2, flags);
692 
693 	if (flags == (RFFDG | RFPROC)) {
694 		PCPU_INC(cnt.v_forks);
695 		PCPU_ADD(cnt.v_forkpages, p2->p_vmspace->vm_dsize +
696 		    p2->p_vmspace->vm_ssize);
697 	} else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
698 		PCPU_INC(cnt.v_vforks);
699 		PCPU_ADD(cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
700 		    p2->p_vmspace->vm_ssize);
701 	} else if (p1 == &proc0) {
702 		PCPU_INC(cnt.v_kthreads);
703 		PCPU_ADD(cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
704 		    p2->p_vmspace->vm_ssize);
705 	} else {
706 		PCPU_INC(cnt.v_rforks);
707 		PCPU_ADD(cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
708 		    p2->p_vmspace->vm_ssize);
709 	}
710 
711 	/*
712 	 * Both processes are set up, now check if any loadable modules want
713 	 * to adjust anything.
714 	 *   What if they have an error? XXX
715 	 */
716 	EVENTHANDLER_INVOKE(process_fork, p1, p2, flags);
717 
718 	/*
719 	 * Set the child start time and mark the process as being complete.
720 	 */
721 	microuptime(&p2->p_stats->p_start);
722 	PROC_SLOCK(p2);
723 	p2->p_state = PRS_NORMAL;
724 	PROC_SUNLOCK(p2);
725 
726 	/*
727 	 * If RFSTOPPED not requested, make child runnable and add to
728 	 * run queue.
729 	 */
730 	if ((flags & RFSTOPPED) == 0) {
731 		thread_lock(td2);
732 		TD_SET_CAN_RUN(td2);
733 		sched_add(td2, SRQ_BORING);
734 		thread_unlock(td2);
735 	}
736 
737 	/*
738 	 * Now can be swapped.
739 	 */
740 	PROC_LOCK(p1);
741 	_PRELE(p1);
742 	PROC_UNLOCK(p1);
743 
744 	/*
745 	 * Tell any interested parties about the new process.
746 	 */
747 	knote_fork(&p1->p_klist, p2->p_pid);
748 	SDT_PROBE(proc, kernel, , create, p2, p1, flags, 0, 0);
749 
750 	/*
751 	 * Preserve synchronization semantics of vfork.  If waiting for
752 	 * child to exec or exit, set P_PPWAIT on child, and sleep on our
753 	 * proc (in case of exit).
754 	 */
755 	PROC_LOCK(p2);
756 	while (p2->p_flag & P_PPWAIT)
757 		cv_wait(&p2->p_pwait, &p2->p_mtx);
758 	PROC_UNLOCK(p2);
759 
760 	/*
761 	 * Return child proc pointer to parent.
762 	 */
763 	*procp = p2;
764 	return (0);
765 fail:
766 	sx_sunlock(&proctree_lock);
767 	if (ppsratecheck(&lastfail, &curfail, 1))
768 		printf("maxproc limit exceeded by uid %i, please see tuning(7) and login.conf(5).\n",
769 		    td->td_ucred->cr_ruid);
770 	sx_xunlock(&allproc_lock);
771 #ifdef MAC
772 	mac_proc_destroy(newproc);
773 #endif
774 fail1:
775 	if (vm2 != NULL)
776 		vmspace_free(vm2);
777 	uma_zfree(proc_zone, newproc);
778 	pause("fork", hz / 2);
779 	return (error);
780 }
781 
782 /*
783  * Handle the return of a child process from fork1().  This function
784  * is called from the MD fork_trampoline() entry point.
785  */
786 void
787 fork_exit(callout, arg, frame)
788 	void (*callout)(void *, struct trapframe *);
789 	void *arg;
790 	struct trapframe *frame;
791 {
792 	struct proc *p;
793 	struct thread *td;
794 	struct thread *dtd;
795 
796 	td = curthread;
797 	p = td->td_proc;
798 	KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
799 
800 	CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
801 		td, td->td_sched, p->p_pid, td->td_name);
802 
803 	sched_fork_exit(td);
804 	/*
805 	* Processes normally resume in mi_switch() after being
806 	* cpu_switch()'ed to, but when children start up they arrive here
807 	* instead, so we must do much the same things as mi_switch() would.
808 	*/
809 	if ((dtd = PCPU_GET(deadthread))) {
810 		PCPU_SET(deadthread, NULL);
811 		thread_stash(dtd);
812 	}
813 	thread_unlock(td);
814 
815 	/*
816 	 * cpu_set_fork_handler intercepts this function call to
817 	 * have this call a non-return function to stay in kernel mode.
818 	 * initproc has its own fork handler, but it does return.
819 	 */
820 	KASSERT(callout != NULL, ("NULL callout in fork_exit"));
821 	callout(arg, frame);
822 
823 	/*
824 	 * Check if a kernel thread misbehaved and returned from its main
825 	 * function.
826 	 */
827 	if (p->p_flag & P_KTHREAD) {
828 		printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
829 		    td->td_name, p->p_pid);
830 		kproc_exit(0);
831 	}
832 	mtx_assert(&Giant, MA_NOTOWNED);
833 
834 	EVENTHANDLER_INVOKE(schedtail, p);
835 }
836 
837 /*
838  * Simplified back end of syscall(), used when returning from fork()
839  * directly into user mode.  Giant is not held on entry, and must not
840  * be held on return.  This function is passed in to fork_exit() as the
841  * first parameter and is called when returning to a new userland process.
842  */
843 void
844 fork_return(td, frame)
845 	struct thread *td;
846 	struct trapframe *frame;
847 {
848 
849 	userret(td, frame);
850 #ifdef KTRACE
851 	if (KTRPOINT(td, KTR_SYSRET))
852 		ktrsysret(SYS_fork, 0, 0);
853 #endif
854 	mtx_assert(&Giant, MA_NOTOWNED);
855 }
856