xref: /freebsd/sys/kern/kern_fork.c (revision 129d3046ef0427d3b22b78a71f3494854d817fba)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_fork.c	8.6 (Berkeley) 4/8/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_kdtrace.h"
41 #include "opt_ktrace.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/sysproto.h>
46 #include <sys/eventhandler.h>
47 #include <sys/filedesc.h>
48 #include <sys/jail.h>
49 #include <sys/kernel.h>
50 #include <sys/kthread.h>
51 #include <sys/sysctl.h>
52 #include <sys/lock.h>
53 #include <sys/malloc.h>
54 #include <sys/mutex.h>
55 #include <sys/priv.h>
56 #include <sys/proc.h>
57 #include <sys/pioctl.h>
58 #include <sys/resourcevar.h>
59 #include <sys/sched.h>
60 #include <sys/syscall.h>
61 #include <sys/vmmeter.h>
62 #include <sys/vnode.h>
63 #include <sys/acct.h>
64 #include <sys/ktr.h>
65 #include <sys/ktrace.h>
66 #include <sys/unistd.h>
67 #include <sys/sdt.h>
68 #include <sys/sx.h>
69 #include <sys/signalvar.h>
70 #include <sys/vimage.h>
71 
72 #include <security/audit/audit.h>
73 #include <security/mac/mac_framework.h>
74 
75 #include <vm/vm.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_extern.h>
79 #include <vm/uma.h>
80 
81 #ifdef KDTRACE_HOOKS
82 #include <sys/dtrace_bsd.h>
83 dtrace_fork_func_t	dtrace_fasttrap_fork;
84 #endif
85 
86 SDT_PROVIDER_DECLARE(proc);
87 SDT_PROBE_DEFINE(proc, kernel, , create);
88 SDT_PROBE_ARGTYPE(proc, kernel, , create, 0, "struct proc *");
89 SDT_PROBE_ARGTYPE(proc, kernel, , create, 1, "struct proc *");
90 SDT_PROBE_ARGTYPE(proc, kernel, , create, 2, "int");
91 
92 #ifndef _SYS_SYSPROTO_H_
93 struct fork_args {
94 	int     dummy;
95 };
96 #endif
97 
98 /* ARGSUSED */
99 int
100 fork(td, uap)
101 	struct thread *td;
102 	struct fork_args *uap;
103 {
104 	int error;
105 	struct proc *p2;
106 
107 	error = fork1(td, RFFDG | RFPROC, 0, &p2);
108 	if (error == 0) {
109 		td->td_retval[0] = p2->p_pid;
110 		td->td_retval[1] = 0;
111 	}
112 	return (error);
113 }
114 
115 /* ARGSUSED */
116 int
117 vfork(td, uap)
118 	struct thread *td;
119 	struct vfork_args *uap;
120 {
121 	int error, flags;
122 	struct proc *p2;
123 
124 #ifdef XEN
125 	flags = RFFDG | RFPROC; /* validate that this is still an issue */
126 #else
127 	flags = RFFDG | RFPROC | RFPPWAIT | RFMEM;
128 #endif
129 	error = fork1(td, flags, 0, &p2);
130 	if (error == 0) {
131 		td->td_retval[0] = p2->p_pid;
132 		td->td_retval[1] = 0;
133 	}
134 	return (error);
135 }
136 
137 int
138 rfork(td, uap)
139 	struct thread *td;
140 	struct rfork_args *uap;
141 {
142 	struct proc *p2;
143 	int error;
144 
145 	/* Don't allow kernel-only flags. */
146 	if ((uap->flags & RFKERNELONLY) != 0)
147 		return (EINVAL);
148 
149 	AUDIT_ARG(fflags, uap->flags);
150 	error = fork1(td, uap->flags, 0, &p2);
151 	if (error == 0) {
152 		td->td_retval[0] = p2 ? p2->p_pid : 0;
153 		td->td_retval[1] = 0;
154 	}
155 	return (error);
156 }
157 
158 int	nprocs = 1;		/* process 0 */
159 int	lastpid = 0;
160 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
161     "Last used PID");
162 
163 /*
164  * Random component to lastpid generation.  We mix in a random factor to make
165  * it a little harder to predict.  We sanity check the modulus value to avoid
166  * doing it in critical paths.  Don't let it be too small or we pointlessly
167  * waste randomness entropy, and don't let it be impossibly large.  Using a
168  * modulus that is too big causes a LOT more process table scans and slows
169  * down fork processing as the pidchecked caching is defeated.
170  */
171 static int randompid = 0;
172 
173 static int
174 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
175 {
176 	int error, pid;
177 
178 	error = sysctl_wire_old_buffer(req, sizeof(int));
179 	if (error != 0)
180 		return(error);
181 	sx_xlock(&allproc_lock);
182 	pid = randompid;
183 	error = sysctl_handle_int(oidp, &pid, 0, req);
184 	if (error == 0 && req->newptr != NULL) {
185 		if (pid < 0 || pid > PID_MAX - 100)	/* out of range */
186 			pid = PID_MAX - 100;
187 		else if (pid < 2)			/* NOP */
188 			pid = 0;
189 		else if (pid < 100)			/* Make it reasonable */
190 			pid = 100;
191 		randompid = pid;
192 	}
193 	sx_xunlock(&allproc_lock);
194 	return (error);
195 }
196 
197 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
198     0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
199 
200 int
201 fork1(td, flags, pages, procp)
202 	struct thread *td;
203 	int flags;
204 	int pages;
205 	struct proc **procp;
206 {
207 	struct proc *p1, *p2, *pptr;
208 	struct proc *newproc;
209 	int ok, trypid;
210 	static int curfail, pidchecked = 0;
211 	static struct timeval lastfail;
212 	struct filedesc *fd;
213 	struct filedesc_to_leader *fdtol;
214 	struct thread *td2;
215 	struct sigacts *newsigacts;
216 	struct vmspace *vm2;
217 	int error;
218 
219 	/* Can't copy and clear. */
220 	if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
221 		return (EINVAL);
222 
223 	p1 = td->td_proc;
224 
225 	/*
226 	 * Here we don't create a new process, but we divorce
227 	 * certain parts of a process from itself.
228 	 */
229 	if ((flags & RFPROC) == 0) {
230 		if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
231 		    (flags & (RFCFDG | RFFDG))) {
232 			PROC_LOCK(p1);
233 			if (thread_single(SINGLE_BOUNDARY)) {
234 				PROC_UNLOCK(p1);
235 				return (ERESTART);
236 			}
237 			PROC_UNLOCK(p1);
238 		}
239 
240 		error = vm_forkproc(td, NULL, NULL, NULL, flags);
241 		if (error)
242 			goto norfproc_fail;
243 
244 		/*
245 		 * Close all file descriptors.
246 		 */
247 		if (flags & RFCFDG) {
248 			struct filedesc *fdtmp;
249 			fdtmp = fdinit(td->td_proc->p_fd);
250 			fdfree(td);
251 			p1->p_fd = fdtmp;
252 		}
253 
254 		/*
255 		 * Unshare file descriptors (from parent).
256 		 */
257 		if (flags & RFFDG)
258 			fdunshare(p1, td);
259 
260 norfproc_fail:
261 		if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
262 		    (flags & (RFCFDG | RFFDG))) {
263 			PROC_LOCK(p1);
264 			thread_single_end();
265 			PROC_UNLOCK(p1);
266 		}
267 		*procp = NULL;
268 		return (error);
269 	}
270 
271 	/*
272 	 * XXX
273 	 * We did have single-threading code here
274 	 * however it proved un-needed and caused problems
275 	 */
276 
277 	vm2 = NULL;
278 	/* Allocate new proc. */
279 	newproc = uma_zalloc(proc_zone, M_WAITOK);
280 	if (TAILQ_EMPTY(&newproc->p_threads)) {
281 		td2 = thread_alloc();
282 		if (td2 == NULL) {
283 			error = ENOMEM;
284 			goto fail1;
285 		}
286 		proc_linkup(newproc, td2);
287 	} else
288 		td2 = FIRST_THREAD_IN_PROC(newproc);
289 
290 	/* Allocate and switch to an alternate kstack if specified. */
291 	if (pages != 0) {
292 		if (!vm_thread_new_altkstack(td2, pages)) {
293 			error = ENOMEM;
294 			goto fail1;
295 		}
296 	}
297 	if ((flags & RFMEM) == 0) {
298 		vm2 = vmspace_fork(p1->p_vmspace);
299 		if (vm2 == NULL) {
300 			error = ENOMEM;
301 			goto fail1;
302 		}
303 	}
304 #ifdef MAC
305 	mac_proc_init(newproc);
306 #endif
307 	knlist_init(&newproc->p_klist, &newproc->p_mtx, NULL, NULL, NULL);
308 	STAILQ_INIT(&newproc->p_ktr);
309 
310 	/* We have to lock the process tree while we look for a pid. */
311 	sx_slock(&proctree_lock);
312 
313 	/*
314 	 * Although process entries are dynamically created, we still keep
315 	 * a global limit on the maximum number we will create.  Don't allow
316 	 * a nonprivileged user to use the last ten processes; don't let root
317 	 * exceed the limit. The variable nprocs is the current number of
318 	 * processes, maxproc is the limit.
319 	 */
320 	sx_xlock(&allproc_lock);
321 	if ((nprocs >= maxproc - 10 && priv_check_cred(td->td_ucred,
322 	    PRIV_MAXPROC, 0) != 0) || nprocs >= maxproc) {
323 		error = EAGAIN;
324 		goto fail;
325 	}
326 
327 	/*
328 	 * Increment the count of procs running with this uid. Don't allow
329 	 * a nonprivileged user to exceed their current limit.
330 	 *
331 	 * XXXRW: Can we avoid privilege here if it's not needed?
332 	 */
333 	error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0);
334 	if (error == 0)
335 		ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0);
336 	else {
337 		PROC_LOCK(p1);
338 		ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
339 		    lim_cur(p1, RLIMIT_NPROC));
340 		PROC_UNLOCK(p1);
341 	}
342 	if (!ok) {
343 		error = EAGAIN;
344 		goto fail;
345 	}
346 
347 	/*
348 	 * Increment the nprocs resource before blocking can occur.  There
349 	 * are hard-limits as to the number of processes that can run.
350 	 */
351 	nprocs++;
352 #ifdef VIMAGE
353 	P_TO_VPROCG(p1)->nprocs++;
354 #endif
355 
356 	/*
357 	 * Find an unused process ID.  We remember a range of unused IDs
358 	 * ready to use (from lastpid+1 through pidchecked-1).
359 	 *
360 	 * If RFHIGHPID is set (used during system boot), do not allocate
361 	 * low-numbered pids.
362 	 */
363 	trypid = lastpid + 1;
364 	if (flags & RFHIGHPID) {
365 		if (trypid < 10)
366 			trypid = 10;
367 	} else {
368 		if (randompid)
369 			trypid += arc4random() % randompid;
370 	}
371 retry:
372 	/*
373 	 * If the process ID prototype has wrapped around,
374 	 * restart somewhat above 0, as the low-numbered procs
375 	 * tend to include daemons that don't exit.
376 	 */
377 	if (trypid >= PID_MAX) {
378 		trypid = trypid % PID_MAX;
379 		if (trypid < 100)
380 			trypid += 100;
381 		pidchecked = 0;
382 	}
383 	if (trypid >= pidchecked) {
384 		int doingzomb = 0;
385 
386 		pidchecked = PID_MAX;
387 		/*
388 		 * Scan the active and zombie procs to check whether this pid
389 		 * is in use.  Remember the lowest pid that's greater
390 		 * than trypid, so we can avoid checking for a while.
391 		 */
392 		p2 = LIST_FIRST(&allproc);
393 again:
394 		for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) {
395 			while (p2->p_pid == trypid ||
396 			    (p2->p_pgrp != NULL &&
397 			    (p2->p_pgrp->pg_id == trypid ||
398 			    (p2->p_session != NULL &&
399 			    p2->p_session->s_sid == trypid)))) {
400 				trypid++;
401 				if (trypid >= pidchecked)
402 					goto retry;
403 			}
404 			if (p2->p_pid > trypid && pidchecked > p2->p_pid)
405 				pidchecked = p2->p_pid;
406 			if (p2->p_pgrp != NULL) {
407 				if (p2->p_pgrp->pg_id > trypid &&
408 				    pidchecked > p2->p_pgrp->pg_id)
409 					pidchecked = p2->p_pgrp->pg_id;
410 				if (p2->p_session != NULL &&
411 				    p2->p_session->s_sid > trypid &&
412 				    pidchecked > p2->p_session->s_sid)
413 					pidchecked = p2->p_session->s_sid;
414 			}
415 		}
416 		if (!doingzomb) {
417 			doingzomb = 1;
418 			p2 = LIST_FIRST(&zombproc);
419 			goto again;
420 		}
421 	}
422 	sx_sunlock(&proctree_lock);
423 
424 	/*
425 	 * RFHIGHPID does not mess with the lastpid counter during boot.
426 	 */
427 	if (flags & RFHIGHPID)
428 		pidchecked = 0;
429 	else
430 		lastpid = trypid;
431 
432 	p2 = newproc;
433 	p2->p_state = PRS_NEW;		/* protect against others */
434 	p2->p_pid = trypid;
435 	/*
436 	 * Allow the scheduler to initialize the child.
437 	 */
438 	thread_lock(td);
439 	sched_fork(td, td2);
440 	thread_unlock(td);
441 	AUDIT_ARG(pid, p2->p_pid);
442 	LIST_INSERT_HEAD(&allproc, p2, p_list);
443 	LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
444 
445 	PROC_LOCK(p2);
446 	PROC_LOCK(p1);
447 
448 	sx_xunlock(&allproc_lock);
449 
450 	bcopy(&p1->p_startcopy, &p2->p_startcopy,
451 	    __rangeof(struct proc, p_startcopy, p_endcopy));
452 	pargs_hold(p2->p_args);
453 	PROC_UNLOCK(p1);
454 
455 	bzero(&p2->p_startzero,
456 	    __rangeof(struct proc, p_startzero, p_endzero));
457 
458 	p2->p_ucred = crhold(td->td_ucred);
459 
460 	/* Tell the prison that we exist. */
461 	prison_proc_hold(p2->p_ucred->cr_prison);
462 
463 	PROC_UNLOCK(p2);
464 
465 	/*
466 	 * Malloc things while we don't hold any locks.
467 	 */
468 	if (flags & RFSIGSHARE)
469 		newsigacts = NULL;
470 	else
471 		newsigacts = sigacts_alloc();
472 
473 	/*
474 	 * Copy filedesc.
475 	 */
476 	if (flags & RFCFDG) {
477 		fd = fdinit(p1->p_fd);
478 		fdtol = NULL;
479 	} else if (flags & RFFDG) {
480 		fd = fdcopy(p1->p_fd);
481 		fdtol = NULL;
482 	} else {
483 		fd = fdshare(p1->p_fd);
484 		if (p1->p_fdtol == NULL)
485 			p1->p_fdtol =
486 				filedesc_to_leader_alloc(NULL,
487 							 NULL,
488 							 p1->p_leader);
489 		if ((flags & RFTHREAD) != 0) {
490 			/*
491 			 * Shared file descriptor table and
492 			 * shared process leaders.
493 			 */
494 			fdtol = p1->p_fdtol;
495 			FILEDESC_XLOCK(p1->p_fd);
496 			fdtol->fdl_refcount++;
497 			FILEDESC_XUNLOCK(p1->p_fd);
498 		} else {
499 			/*
500 			 * Shared file descriptor table, and
501 			 * different process leaders
502 			 */
503 			fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
504 							 p1->p_fd,
505 							 p2);
506 		}
507 	}
508 	/*
509 	 * Make a proc table entry for the new process.
510 	 * Start by zeroing the section of proc that is zero-initialized,
511 	 * then copy the section that is copied directly from the parent.
512 	 */
513 
514 	PROC_LOCK(p2);
515 	PROC_LOCK(p1);
516 
517 	bzero(&td2->td_startzero,
518 	    __rangeof(struct thread, td_startzero, td_endzero));
519 
520 	bcopy(&td->td_startcopy, &td2->td_startcopy,
521 	    __rangeof(struct thread, td_startcopy, td_endcopy));
522 
523 	bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
524 	td2->td_sigstk = td->td_sigstk;
525 	td2->td_sigmask = td->td_sigmask;
526 	td2->td_flags = TDF_INMEM;
527 
528 #ifdef VIMAGE
529 	td2->td_vnet = NULL;
530 	td2->td_vnet_lpush = NULL;
531 #endif
532 
533 	/*
534 	 * Duplicate sub-structures as needed.
535 	 * Increase reference counts on shared objects.
536 	 */
537 	p2->p_flag = P_INMEM;
538 	p2->p_swtick = ticks;
539 	if (p1->p_flag & P_PROFIL)
540 		startprofclock(p2);
541 	td2->td_ucred = crhold(p2->p_ucred);
542 
543 	if (flags & RFSIGSHARE) {
544 		p2->p_sigacts = sigacts_hold(p1->p_sigacts);
545 	} else {
546 		sigacts_copy(newsigacts, p1->p_sigacts);
547 		p2->p_sigacts = newsigacts;
548 	}
549 	if (flags & RFLINUXTHPN)
550 	        p2->p_sigparent = SIGUSR1;
551 	else
552 	        p2->p_sigparent = SIGCHLD;
553 
554 	p2->p_textvp = p1->p_textvp;
555 	p2->p_fd = fd;
556 	p2->p_fdtol = fdtol;
557 
558 	/*
559 	 * p_limit is copy-on-write.  Bump its refcount.
560 	 */
561 	lim_fork(p1, p2);
562 
563 	pstats_fork(p1->p_stats, p2->p_stats);
564 
565 	PROC_UNLOCK(p1);
566 	PROC_UNLOCK(p2);
567 
568 	/* Bump references to the text vnode (for procfs) */
569 	if (p2->p_textvp)
570 		vref(p2->p_textvp);
571 
572 	/*
573 	 * Set up linkage for kernel based threading.
574 	 */
575 	if ((flags & RFTHREAD) != 0) {
576 		mtx_lock(&ppeers_lock);
577 		p2->p_peers = p1->p_peers;
578 		p1->p_peers = p2;
579 		p2->p_leader = p1->p_leader;
580 		mtx_unlock(&ppeers_lock);
581 		PROC_LOCK(p1->p_leader);
582 		if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
583 			PROC_UNLOCK(p1->p_leader);
584 			/*
585 			 * The task leader is exiting, so process p1 is
586 			 * going to be killed shortly.  Since p1 obviously
587 			 * isn't dead yet, we know that the leader is either
588 			 * sending SIGKILL's to all the processes in this
589 			 * task or is sleeping waiting for all the peers to
590 			 * exit.  We let p1 complete the fork, but we need
591 			 * to go ahead and kill the new process p2 since
592 			 * the task leader may not get a chance to send
593 			 * SIGKILL to it.  We leave it on the list so that
594 			 * the task leader will wait for this new process
595 			 * to commit suicide.
596 			 */
597 			PROC_LOCK(p2);
598 			psignal(p2, SIGKILL);
599 			PROC_UNLOCK(p2);
600 		} else
601 			PROC_UNLOCK(p1->p_leader);
602 	} else {
603 		p2->p_peers = NULL;
604 		p2->p_leader = p2;
605 	}
606 
607 	sx_xlock(&proctree_lock);
608 	PGRP_LOCK(p1->p_pgrp);
609 	PROC_LOCK(p2);
610 	PROC_LOCK(p1);
611 
612 	/*
613 	 * Preserve some more flags in subprocess.  P_PROFIL has already
614 	 * been preserved.
615 	 */
616 	p2->p_flag |= p1->p_flag & P_SUGID;
617 	td2->td_pflags |= td->td_pflags & TDP_ALTSTACK;
618 	SESS_LOCK(p1->p_session);
619 	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
620 		p2->p_flag |= P_CONTROLT;
621 	SESS_UNLOCK(p1->p_session);
622 	if (flags & RFPPWAIT)
623 		p2->p_flag |= P_PPWAIT;
624 
625 	p2->p_pgrp = p1->p_pgrp;
626 	LIST_INSERT_AFTER(p1, p2, p_pglist);
627 	PGRP_UNLOCK(p1->p_pgrp);
628 	LIST_INIT(&p2->p_children);
629 
630 	callout_init(&p2->p_itcallout, CALLOUT_MPSAFE);
631 
632 #ifdef KTRACE
633 	/*
634 	 * Copy traceflag and tracefile if enabled.
635 	 */
636 	mtx_lock(&ktrace_mtx);
637 	KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode"));
638 	if (p1->p_traceflag & KTRFAC_INHERIT) {
639 		p2->p_traceflag = p1->p_traceflag;
640 		if ((p2->p_tracevp = p1->p_tracevp) != NULL) {
641 			VREF(p2->p_tracevp);
642 			KASSERT(p1->p_tracecred != NULL,
643 			    ("ktrace vnode with no cred"));
644 			p2->p_tracecred = crhold(p1->p_tracecred);
645 		}
646 	}
647 	mtx_unlock(&ktrace_mtx);
648 #endif
649 
650 	/*
651 	 * If PF_FORK is set, the child process inherits the
652 	 * procfs ioctl flags from its parent.
653 	 */
654 	if (p1->p_pfsflags & PF_FORK) {
655 		p2->p_stops = p1->p_stops;
656 		p2->p_pfsflags = p1->p_pfsflags;
657 	}
658 
659 #ifdef KDTRACE_HOOKS
660 	/*
661 	 * Tell the DTrace fasttrap provider about the new process
662 	 * if it has registered an interest.
663 	 */
664 	if (dtrace_fasttrap_fork)
665 		dtrace_fasttrap_fork(p1, p2);
666 #endif
667 
668 	/*
669 	 * This begins the section where we must prevent the parent
670 	 * from being swapped.
671 	 */
672 	_PHOLD(p1);
673 	PROC_UNLOCK(p1);
674 
675 	/*
676 	 * Attach the new process to its parent.
677 	 *
678 	 * If RFNOWAIT is set, the newly created process becomes a child
679 	 * of init.  This effectively disassociates the child from the
680 	 * parent.
681 	 */
682 	if (flags & RFNOWAIT)
683 		pptr = initproc;
684 	else
685 		pptr = p1;
686 	p2->p_pptr = pptr;
687 	LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
688 	sx_xunlock(&proctree_lock);
689 
690 	/* Inform accounting that we have forked. */
691 	p2->p_acflag = AFORK;
692 	PROC_UNLOCK(p2);
693 
694 	/*
695 	 * Finish creating the child process.  It will return via a different
696 	 * execution path later.  (ie: directly into user mode)
697 	 */
698 	vm_forkproc(td, p2, td2, vm2, flags);
699 
700 	if (flags == (RFFDG | RFPROC)) {
701 		PCPU_INC(cnt.v_forks);
702 		PCPU_ADD(cnt.v_forkpages, p2->p_vmspace->vm_dsize +
703 		    p2->p_vmspace->vm_ssize);
704 	} else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
705 		PCPU_INC(cnt.v_vforks);
706 		PCPU_ADD(cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
707 		    p2->p_vmspace->vm_ssize);
708 	} else if (p1 == &proc0) {
709 		PCPU_INC(cnt.v_kthreads);
710 		PCPU_ADD(cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
711 		    p2->p_vmspace->vm_ssize);
712 	} else {
713 		PCPU_INC(cnt.v_rforks);
714 		PCPU_ADD(cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
715 		    p2->p_vmspace->vm_ssize);
716 	}
717 
718 	/*
719 	 * Both processes are set up, now check if any loadable modules want
720 	 * to adjust anything.
721 	 *   What if they have an error? XXX
722 	 */
723 	EVENTHANDLER_INVOKE(process_fork, p1, p2, flags);
724 
725 	/*
726 	 * Set the child start time and mark the process as being complete.
727 	 */
728 	microuptime(&p2->p_stats->p_start);
729 	PROC_SLOCK(p2);
730 	p2->p_state = PRS_NORMAL;
731 	PROC_SUNLOCK(p2);
732 
733 	/*
734 	 * If RFSTOPPED not requested, make child runnable and add to
735 	 * run queue.
736 	 */
737 	if ((flags & RFSTOPPED) == 0) {
738 		thread_lock(td2);
739 		TD_SET_CAN_RUN(td2);
740 		sched_add(td2, SRQ_BORING);
741 		thread_unlock(td2);
742 	}
743 
744 	/*
745 	 * Now can be swapped.
746 	 */
747 	PROC_LOCK(p1);
748 	_PRELE(p1);
749 	PROC_UNLOCK(p1);
750 
751 	/*
752 	 * Tell any interested parties about the new process.
753 	 */
754 	knote_fork(&p1->p_klist, p2->p_pid);
755 	SDT_PROBE(proc, kernel, , create, p2, p1, flags, 0, 0);
756 
757 	/*
758 	 * Preserve synchronization semantics of vfork.  If waiting for
759 	 * child to exec or exit, set P_PPWAIT on child, and sleep on our
760 	 * proc (in case of exit).
761 	 */
762 	PROC_LOCK(p2);
763 	while (p2->p_flag & P_PPWAIT)
764 		cv_wait(&p2->p_pwait, &p2->p_mtx);
765 	PROC_UNLOCK(p2);
766 
767 	/*
768 	 * Return child proc pointer to parent.
769 	 */
770 	*procp = p2;
771 	return (0);
772 fail:
773 	sx_sunlock(&proctree_lock);
774 	if (ppsratecheck(&lastfail, &curfail, 1))
775 		printf("maxproc limit exceeded by uid %i, please see tuning(7) and login.conf(5).\n",
776 		    td->td_ucred->cr_ruid);
777 	sx_xunlock(&allproc_lock);
778 #ifdef MAC
779 	mac_proc_destroy(newproc);
780 #endif
781 fail1:
782 	if (vm2 != NULL)
783 		vmspace_free(vm2);
784 	uma_zfree(proc_zone, newproc);
785 	pause("fork", hz / 2);
786 	return (error);
787 }
788 
789 /*
790  * Handle the return of a child process from fork1().  This function
791  * is called from the MD fork_trampoline() entry point.
792  */
793 void
794 fork_exit(callout, arg, frame)
795 	void (*callout)(void *, struct trapframe *);
796 	void *arg;
797 	struct trapframe *frame;
798 {
799 	struct proc *p;
800 	struct thread *td;
801 	struct thread *dtd;
802 
803 	td = curthread;
804 	p = td->td_proc;
805 	KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
806 
807 	CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
808 		td, td->td_sched, p->p_pid, td->td_name);
809 
810 	sched_fork_exit(td);
811 	/*
812 	* Processes normally resume in mi_switch() after being
813 	* cpu_switch()'ed to, but when children start up they arrive here
814 	* instead, so we must do much the same things as mi_switch() would.
815 	*/
816 	if ((dtd = PCPU_GET(deadthread))) {
817 		PCPU_SET(deadthread, NULL);
818 		thread_stash(dtd);
819 	}
820 	thread_unlock(td);
821 
822 	/*
823 	 * cpu_set_fork_handler intercepts this function call to
824 	 * have this call a non-return function to stay in kernel mode.
825 	 * initproc has its own fork handler, but it does return.
826 	 */
827 	KASSERT(callout != NULL, ("NULL callout in fork_exit"));
828 	callout(arg, frame);
829 
830 	/*
831 	 * Check if a kernel thread misbehaved and returned from its main
832 	 * function.
833 	 */
834 	if (p->p_flag & P_KTHREAD) {
835 		printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
836 		    td->td_name, p->p_pid);
837 		kproc_exit(0);
838 	}
839 	mtx_assert(&Giant, MA_NOTOWNED);
840 
841 	EVENTHANDLER_INVOKE(schedtail, p);
842 }
843 
844 /*
845  * Simplified back end of syscall(), used when returning from fork()
846  * directly into user mode.  Giant is not held on entry, and must not
847  * be held on return.  This function is passed in to fork_exit() as the
848  * first parameter and is called when returning to a new userland process.
849  */
850 void
851 fork_return(td, frame)
852 	struct thread *td;
853 	struct trapframe *frame;
854 {
855 
856 	userret(td, frame);
857 #ifdef KTRACE
858 	if (KTRPOINT(td, KTR_SYSRET))
859 		ktrsysret(SYS_fork, 0, 0);
860 #endif
861 	mtx_assert(&Giant, MA_NOTOWNED);
862 }
863