1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_kdtrace.h" 41 #include "opt_ktrace.h" 42 #include "opt_mac.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sysproto.h> 47 #include <sys/eventhandler.h> 48 #include <sys/filedesc.h> 49 #include <sys/kernel.h> 50 #include <sys/kthread.h> 51 #include <sys/sysctl.h> 52 #include <sys/lock.h> 53 #include <sys/malloc.h> 54 #include <sys/mutex.h> 55 #include <sys/priv.h> 56 #include <sys/proc.h> 57 #include <sys/pioctl.h> 58 #include <sys/resourcevar.h> 59 #include <sys/sched.h> 60 #include <sys/syscall.h> 61 #include <sys/vmmeter.h> 62 #include <sys/vnode.h> 63 #include <sys/acct.h> 64 #include <sys/ktr.h> 65 #include <sys/ktrace.h> 66 #include <sys/unistd.h> 67 #include <sys/sdt.h> 68 #include <sys/sx.h> 69 #include <sys/signalvar.h> 70 71 #include <security/audit/audit.h> 72 #include <security/mac/mac_framework.h> 73 74 #include <vm/vm.h> 75 #include <vm/pmap.h> 76 #include <vm/vm_map.h> 77 #include <vm/vm_extern.h> 78 #include <vm/uma.h> 79 80 #ifdef KDTRACE_HOOKS 81 #include <sys/dtrace_bsd.h> 82 dtrace_fork_func_t dtrace_fasttrap_fork; 83 #endif 84 85 SDT_PROVIDER_DECLARE(proc); 86 SDT_PROBE_DEFINE(proc, kernel, , create); 87 SDT_PROBE_ARGTYPE(proc, kernel, , create, 0, "struct proc *"); 88 SDT_PROBE_ARGTYPE(proc, kernel, , create, 1, "struct proc *"); 89 SDT_PROBE_ARGTYPE(proc, kernel, , create, 2, "int"); 90 91 #ifndef _SYS_SYSPROTO_H_ 92 struct fork_args { 93 int dummy; 94 }; 95 #endif 96 97 /* ARGSUSED */ 98 int 99 fork(td, uap) 100 struct thread *td; 101 struct fork_args *uap; 102 { 103 int error; 104 struct proc *p2; 105 106 error = fork1(td, RFFDG | RFPROC, 0, &p2); 107 if (error == 0) { 108 td->td_retval[0] = p2->p_pid; 109 td->td_retval[1] = 0; 110 } 111 return (error); 112 } 113 114 /* ARGSUSED */ 115 int 116 vfork(td, uap) 117 struct thread *td; 118 struct vfork_args *uap; 119 { 120 int error, flags; 121 struct proc *p2; 122 123 #ifdef XEN 124 flags = RFFDG | RFPROC; /* validate that this is still an issue */ 125 #else 126 flags = RFFDG | RFPROC | RFPPWAIT | RFMEM; 127 #endif 128 error = fork1(td, flags, 0, &p2); 129 if (error == 0) { 130 td->td_retval[0] = p2->p_pid; 131 td->td_retval[1] = 0; 132 } 133 return (error); 134 } 135 136 int 137 rfork(td, uap) 138 struct thread *td; 139 struct rfork_args *uap; 140 { 141 struct proc *p2; 142 int error; 143 144 /* Don't allow kernel-only flags. */ 145 if ((uap->flags & RFKERNELONLY) != 0) 146 return (EINVAL); 147 148 AUDIT_ARG(fflags, uap->flags); 149 error = fork1(td, uap->flags, 0, &p2); 150 if (error == 0) { 151 td->td_retval[0] = p2 ? p2->p_pid : 0; 152 td->td_retval[1] = 0; 153 } 154 return (error); 155 } 156 157 int nprocs = 1; /* process 0 */ 158 int lastpid = 0; 159 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0, 160 "Last used PID"); 161 162 /* 163 * Random component to lastpid generation. We mix in a random factor to make 164 * it a little harder to predict. We sanity check the modulus value to avoid 165 * doing it in critical paths. Don't let it be too small or we pointlessly 166 * waste randomness entropy, and don't let it be impossibly large. Using a 167 * modulus that is too big causes a LOT more process table scans and slows 168 * down fork processing as the pidchecked caching is defeated. 169 */ 170 static int randompid = 0; 171 172 static int 173 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 174 { 175 int error, pid; 176 177 error = sysctl_wire_old_buffer(req, sizeof(int)); 178 if (error != 0) 179 return(error); 180 sx_xlock(&allproc_lock); 181 pid = randompid; 182 error = sysctl_handle_int(oidp, &pid, 0, req); 183 if (error == 0 && req->newptr != NULL) { 184 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 185 pid = PID_MAX - 100; 186 else if (pid < 2) /* NOP */ 187 pid = 0; 188 else if (pid < 100) /* Make it reasonable */ 189 pid = 100; 190 randompid = pid; 191 } 192 sx_xunlock(&allproc_lock); 193 return (error); 194 } 195 196 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 197 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 198 199 int 200 fork1(td, flags, pages, procp) 201 struct thread *td; 202 int flags; 203 int pages; 204 struct proc **procp; 205 { 206 struct proc *p1, *p2, *pptr; 207 struct proc *newproc; 208 int ok, trypid; 209 static int curfail, pidchecked = 0; 210 static struct timeval lastfail; 211 struct filedesc *fd; 212 struct filedesc_to_leader *fdtol; 213 struct thread *td2; 214 struct sigacts *newsigacts; 215 struct vmspace *vm2; 216 int error; 217 218 /* Can't copy and clear. */ 219 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 220 return (EINVAL); 221 222 p1 = td->td_proc; 223 224 /* 225 * Here we don't create a new process, but we divorce 226 * certain parts of a process from itself. 227 */ 228 if ((flags & RFPROC) == 0) { 229 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) && 230 (flags & (RFCFDG | RFFDG))) { 231 PROC_LOCK(p1); 232 if (thread_single(SINGLE_BOUNDARY)) { 233 PROC_UNLOCK(p1); 234 return (ERESTART); 235 } 236 PROC_UNLOCK(p1); 237 } 238 239 error = vm_forkproc(td, NULL, NULL, NULL, flags); 240 if (error) 241 goto norfproc_fail; 242 243 /* 244 * Close all file descriptors. 245 */ 246 if (flags & RFCFDG) { 247 struct filedesc *fdtmp; 248 fdtmp = fdinit(td->td_proc->p_fd); 249 fdfree(td); 250 p1->p_fd = fdtmp; 251 } 252 253 /* 254 * Unshare file descriptors (from parent). 255 */ 256 if (flags & RFFDG) 257 fdunshare(p1, td); 258 259 norfproc_fail: 260 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) && 261 (flags & (RFCFDG | RFFDG))) { 262 PROC_LOCK(p1); 263 thread_single_end(); 264 PROC_UNLOCK(p1); 265 } 266 *procp = NULL; 267 return (error); 268 } 269 270 /* 271 * XXX 272 * We did have single-threading code here 273 * however it proved un-needed and caused problems 274 */ 275 276 vm2 = NULL; 277 /* Allocate new proc. */ 278 newproc = uma_zalloc(proc_zone, M_WAITOK); 279 if (TAILQ_EMPTY(&newproc->p_threads)) { 280 td2 = thread_alloc(); 281 if (td2 == NULL) { 282 error = ENOMEM; 283 goto fail1; 284 } 285 proc_linkup(newproc, td2); 286 } else 287 td2 = FIRST_THREAD_IN_PROC(newproc); 288 289 /* Allocate and switch to an alternate kstack if specified. */ 290 if (pages != 0) { 291 if (!vm_thread_new_altkstack(td2, pages)) { 292 error = ENOMEM; 293 goto fail1; 294 } 295 } 296 if ((flags & RFMEM) == 0) { 297 vm2 = vmspace_fork(p1->p_vmspace); 298 if (vm2 == NULL) { 299 error = ENOMEM; 300 goto fail1; 301 } 302 } 303 #ifdef MAC 304 mac_proc_init(newproc); 305 #endif 306 knlist_init(&newproc->p_klist, &newproc->p_mtx, NULL, NULL, NULL); 307 STAILQ_INIT(&newproc->p_ktr); 308 309 /* We have to lock the process tree while we look for a pid. */ 310 sx_slock(&proctree_lock); 311 312 /* 313 * Although process entries are dynamically created, we still keep 314 * a global limit on the maximum number we will create. Don't allow 315 * a nonprivileged user to use the last ten processes; don't let root 316 * exceed the limit. The variable nprocs is the current number of 317 * processes, maxproc is the limit. 318 */ 319 sx_xlock(&allproc_lock); 320 if ((nprocs >= maxproc - 10 && priv_check_cred(td->td_ucred, 321 PRIV_MAXPROC, 0) != 0) || nprocs >= maxproc) { 322 error = EAGAIN; 323 goto fail; 324 } 325 326 /* 327 * Increment the count of procs running with this uid. Don't allow 328 * a nonprivileged user to exceed their current limit. 329 * 330 * XXXRW: Can we avoid privilege here if it's not needed? 331 */ 332 error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0); 333 if (error == 0) 334 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0); 335 else { 336 PROC_LOCK(p1); 337 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 338 lim_cur(p1, RLIMIT_NPROC)); 339 PROC_UNLOCK(p1); 340 } 341 if (!ok) { 342 error = EAGAIN; 343 goto fail; 344 } 345 346 /* 347 * Increment the nprocs resource before blocking can occur. There 348 * are hard-limits as to the number of processes that can run. 349 */ 350 nprocs++; 351 352 /* 353 * Find an unused process ID. We remember a range of unused IDs 354 * ready to use (from lastpid+1 through pidchecked-1). 355 * 356 * If RFHIGHPID is set (used during system boot), do not allocate 357 * low-numbered pids. 358 */ 359 trypid = lastpid + 1; 360 if (flags & RFHIGHPID) { 361 if (trypid < 10) 362 trypid = 10; 363 } else { 364 if (randompid) 365 trypid += arc4random() % randompid; 366 } 367 retry: 368 /* 369 * If the process ID prototype has wrapped around, 370 * restart somewhat above 0, as the low-numbered procs 371 * tend to include daemons that don't exit. 372 */ 373 if (trypid >= PID_MAX) { 374 trypid = trypid % PID_MAX; 375 if (trypid < 100) 376 trypid += 100; 377 pidchecked = 0; 378 } 379 if (trypid >= pidchecked) { 380 int doingzomb = 0; 381 382 pidchecked = PID_MAX; 383 /* 384 * Scan the active and zombie procs to check whether this pid 385 * is in use. Remember the lowest pid that's greater 386 * than trypid, so we can avoid checking for a while. 387 */ 388 p2 = LIST_FIRST(&allproc); 389 again: 390 for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) { 391 while (p2->p_pid == trypid || 392 (p2->p_pgrp != NULL && 393 (p2->p_pgrp->pg_id == trypid || 394 (p2->p_session != NULL && 395 p2->p_session->s_sid == trypid)))) { 396 trypid++; 397 if (trypid >= pidchecked) 398 goto retry; 399 } 400 if (p2->p_pid > trypid && pidchecked > p2->p_pid) 401 pidchecked = p2->p_pid; 402 if (p2->p_pgrp != NULL) { 403 if (p2->p_pgrp->pg_id > trypid && 404 pidchecked > p2->p_pgrp->pg_id) 405 pidchecked = p2->p_pgrp->pg_id; 406 if (p2->p_session != NULL && 407 p2->p_session->s_sid > trypid && 408 pidchecked > p2->p_session->s_sid) 409 pidchecked = p2->p_session->s_sid; 410 } 411 } 412 if (!doingzomb) { 413 doingzomb = 1; 414 p2 = LIST_FIRST(&zombproc); 415 goto again; 416 } 417 } 418 sx_sunlock(&proctree_lock); 419 420 /* 421 * RFHIGHPID does not mess with the lastpid counter during boot. 422 */ 423 if (flags & RFHIGHPID) 424 pidchecked = 0; 425 else 426 lastpid = trypid; 427 428 p2 = newproc; 429 p2->p_state = PRS_NEW; /* protect against others */ 430 p2->p_pid = trypid; 431 /* 432 * Allow the scheduler to initialize the child. 433 */ 434 thread_lock(td); 435 sched_fork(td, td2); 436 thread_unlock(td); 437 AUDIT_ARG(pid, p2->p_pid); 438 LIST_INSERT_HEAD(&allproc, p2, p_list); 439 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); 440 441 PROC_LOCK(p2); 442 PROC_LOCK(p1); 443 444 sx_xunlock(&allproc_lock); 445 446 bcopy(&p1->p_startcopy, &p2->p_startcopy, 447 __rangeof(struct proc, p_startcopy, p_endcopy)); 448 pargs_hold(p2->p_args); 449 PROC_UNLOCK(p1); 450 451 bzero(&p2->p_startzero, 452 __rangeof(struct proc, p_startzero, p_endzero)); 453 454 p2->p_ucred = crhold(td->td_ucred); 455 PROC_UNLOCK(p2); 456 457 /* 458 * Malloc things while we don't hold any locks. 459 */ 460 if (flags & RFSIGSHARE) 461 newsigacts = NULL; 462 else 463 newsigacts = sigacts_alloc(); 464 465 /* 466 * Copy filedesc. 467 */ 468 if (flags & RFCFDG) { 469 fd = fdinit(p1->p_fd); 470 fdtol = NULL; 471 } else if (flags & RFFDG) { 472 fd = fdcopy(p1->p_fd); 473 fdtol = NULL; 474 } else { 475 fd = fdshare(p1->p_fd); 476 if (p1->p_fdtol == NULL) 477 p1->p_fdtol = 478 filedesc_to_leader_alloc(NULL, 479 NULL, 480 p1->p_leader); 481 if ((flags & RFTHREAD) != 0) { 482 /* 483 * Shared file descriptor table and 484 * shared process leaders. 485 */ 486 fdtol = p1->p_fdtol; 487 FILEDESC_XLOCK(p1->p_fd); 488 fdtol->fdl_refcount++; 489 FILEDESC_XUNLOCK(p1->p_fd); 490 } else { 491 /* 492 * Shared file descriptor table, and 493 * different process leaders 494 */ 495 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, 496 p1->p_fd, 497 p2); 498 } 499 } 500 /* 501 * Make a proc table entry for the new process. 502 * Start by zeroing the section of proc that is zero-initialized, 503 * then copy the section that is copied directly from the parent. 504 */ 505 506 PROC_LOCK(p2); 507 PROC_LOCK(p1); 508 509 bzero(&td2->td_startzero, 510 __rangeof(struct thread, td_startzero, td_endzero)); 511 512 bcopy(&td->td_startcopy, &td2->td_startcopy, 513 __rangeof(struct thread, td_startcopy, td_endcopy)); 514 515 bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name)); 516 td2->td_sigstk = td->td_sigstk; 517 td2->td_sigmask = td->td_sigmask; 518 td2->td_flags = TDF_INMEM; 519 520 /* 521 * Duplicate sub-structures as needed. 522 * Increase reference counts on shared objects. 523 */ 524 p2->p_flag = P_INMEM; 525 p2->p_swtick = ticks; 526 if (p1->p_flag & P_PROFIL) 527 startprofclock(p2); 528 td2->td_ucred = crhold(p2->p_ucred); 529 530 if (flags & RFSIGSHARE) { 531 p2->p_sigacts = sigacts_hold(p1->p_sigacts); 532 } else { 533 sigacts_copy(newsigacts, p1->p_sigacts); 534 p2->p_sigacts = newsigacts; 535 } 536 if (flags & RFLINUXTHPN) 537 p2->p_sigparent = SIGUSR1; 538 else 539 p2->p_sigparent = SIGCHLD; 540 541 p2->p_textvp = p1->p_textvp; 542 p2->p_fd = fd; 543 p2->p_fdtol = fdtol; 544 545 /* 546 * p_limit is copy-on-write. Bump its refcount. 547 */ 548 lim_fork(p1, p2); 549 550 pstats_fork(p1->p_stats, p2->p_stats); 551 552 PROC_UNLOCK(p1); 553 PROC_UNLOCK(p2); 554 555 /* Bump references to the text vnode (for procfs) */ 556 if (p2->p_textvp) 557 vref(p2->p_textvp); 558 559 /* 560 * Set up linkage for kernel based threading. 561 */ 562 if ((flags & RFTHREAD) != 0) { 563 mtx_lock(&ppeers_lock); 564 p2->p_peers = p1->p_peers; 565 p1->p_peers = p2; 566 p2->p_leader = p1->p_leader; 567 mtx_unlock(&ppeers_lock); 568 PROC_LOCK(p1->p_leader); 569 if ((p1->p_leader->p_flag & P_WEXIT) != 0) { 570 PROC_UNLOCK(p1->p_leader); 571 /* 572 * The task leader is exiting, so process p1 is 573 * going to be killed shortly. Since p1 obviously 574 * isn't dead yet, we know that the leader is either 575 * sending SIGKILL's to all the processes in this 576 * task or is sleeping waiting for all the peers to 577 * exit. We let p1 complete the fork, but we need 578 * to go ahead and kill the new process p2 since 579 * the task leader may not get a chance to send 580 * SIGKILL to it. We leave it on the list so that 581 * the task leader will wait for this new process 582 * to commit suicide. 583 */ 584 PROC_LOCK(p2); 585 psignal(p2, SIGKILL); 586 PROC_UNLOCK(p2); 587 } else 588 PROC_UNLOCK(p1->p_leader); 589 } else { 590 p2->p_peers = NULL; 591 p2->p_leader = p2; 592 } 593 594 sx_xlock(&proctree_lock); 595 PGRP_LOCK(p1->p_pgrp); 596 PROC_LOCK(p2); 597 PROC_LOCK(p1); 598 599 /* 600 * Preserve some more flags in subprocess. P_PROFIL has already 601 * been preserved. 602 */ 603 p2->p_flag |= p1->p_flag & P_SUGID; 604 td2->td_pflags |= td->td_pflags & TDP_ALTSTACK; 605 SESS_LOCK(p1->p_session); 606 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) 607 p2->p_flag |= P_CONTROLT; 608 SESS_UNLOCK(p1->p_session); 609 if (flags & RFPPWAIT) 610 p2->p_flag |= P_PPWAIT; 611 612 p2->p_pgrp = p1->p_pgrp; 613 LIST_INSERT_AFTER(p1, p2, p_pglist); 614 PGRP_UNLOCK(p1->p_pgrp); 615 LIST_INIT(&p2->p_children); 616 617 callout_init(&p2->p_itcallout, CALLOUT_MPSAFE); 618 619 #ifdef KTRACE 620 /* 621 * Copy traceflag and tracefile if enabled. 622 */ 623 mtx_lock(&ktrace_mtx); 624 KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode")); 625 if (p1->p_traceflag & KTRFAC_INHERIT) { 626 p2->p_traceflag = p1->p_traceflag; 627 if ((p2->p_tracevp = p1->p_tracevp) != NULL) { 628 VREF(p2->p_tracevp); 629 KASSERT(p1->p_tracecred != NULL, 630 ("ktrace vnode with no cred")); 631 p2->p_tracecred = crhold(p1->p_tracecred); 632 } 633 } 634 mtx_unlock(&ktrace_mtx); 635 #endif 636 637 /* 638 * If PF_FORK is set, the child process inherits the 639 * procfs ioctl flags from its parent. 640 */ 641 if (p1->p_pfsflags & PF_FORK) { 642 p2->p_stops = p1->p_stops; 643 p2->p_pfsflags = p1->p_pfsflags; 644 } 645 646 #ifdef KDTRACE_HOOKS 647 /* 648 * Tell the DTrace fasttrap provider about the new process 649 * if it has registered an interest. 650 */ 651 if (dtrace_fasttrap_fork) 652 dtrace_fasttrap_fork(p1, p2); 653 #endif 654 655 /* 656 * This begins the section where we must prevent the parent 657 * from being swapped. 658 */ 659 _PHOLD(p1); 660 PROC_UNLOCK(p1); 661 662 /* 663 * Attach the new process to its parent. 664 * 665 * If RFNOWAIT is set, the newly created process becomes a child 666 * of init. This effectively disassociates the child from the 667 * parent. 668 */ 669 if (flags & RFNOWAIT) 670 pptr = initproc; 671 else 672 pptr = p1; 673 p2->p_pptr = pptr; 674 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 675 sx_xunlock(&proctree_lock); 676 677 /* Inform accounting that we have forked. */ 678 p2->p_acflag = AFORK; 679 PROC_UNLOCK(p2); 680 681 /* 682 * Finish creating the child process. It will return via a different 683 * execution path later. (ie: directly into user mode) 684 */ 685 vm_forkproc(td, p2, td2, vm2, flags); 686 687 if (flags == (RFFDG | RFPROC)) { 688 PCPU_INC(cnt.v_forks); 689 PCPU_ADD(cnt.v_forkpages, p2->p_vmspace->vm_dsize + 690 p2->p_vmspace->vm_ssize); 691 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) { 692 PCPU_INC(cnt.v_vforks); 693 PCPU_ADD(cnt.v_vforkpages, p2->p_vmspace->vm_dsize + 694 p2->p_vmspace->vm_ssize); 695 } else if (p1 == &proc0) { 696 PCPU_INC(cnt.v_kthreads); 697 PCPU_ADD(cnt.v_kthreadpages, p2->p_vmspace->vm_dsize + 698 p2->p_vmspace->vm_ssize); 699 } else { 700 PCPU_INC(cnt.v_rforks); 701 PCPU_ADD(cnt.v_rforkpages, p2->p_vmspace->vm_dsize + 702 p2->p_vmspace->vm_ssize); 703 } 704 705 /* 706 * Both processes are set up, now check if any loadable modules want 707 * to adjust anything. 708 * What if they have an error? XXX 709 */ 710 EVENTHANDLER_INVOKE(process_fork, p1, p2, flags); 711 712 /* 713 * Set the child start time and mark the process as being complete. 714 */ 715 microuptime(&p2->p_stats->p_start); 716 PROC_SLOCK(p2); 717 p2->p_state = PRS_NORMAL; 718 PROC_SUNLOCK(p2); 719 720 /* 721 * If RFSTOPPED not requested, make child runnable and add to 722 * run queue. 723 */ 724 if ((flags & RFSTOPPED) == 0) { 725 thread_lock(td2); 726 TD_SET_CAN_RUN(td2); 727 sched_add(td2, SRQ_BORING); 728 thread_unlock(td2); 729 } 730 731 /* 732 * Now can be swapped. 733 */ 734 PROC_LOCK(p1); 735 _PRELE(p1); 736 PROC_UNLOCK(p1); 737 738 /* 739 * Tell any interested parties about the new process. 740 */ 741 knote_fork(&p1->p_klist, p2->p_pid); 742 SDT_PROBE(proc, kernel, , create, p2, p1, flags, 0, 0); 743 744 /* 745 * Preserve synchronization semantics of vfork. If waiting for 746 * child to exec or exit, set P_PPWAIT on child, and sleep on our 747 * proc (in case of exit). 748 */ 749 PROC_LOCK(p2); 750 while (p2->p_flag & P_PPWAIT) 751 msleep(p1, &p2->p_mtx, PWAIT, "ppwait", 0); 752 PROC_UNLOCK(p2); 753 754 /* 755 * Return child proc pointer to parent. 756 */ 757 *procp = p2; 758 return (0); 759 fail: 760 sx_sunlock(&proctree_lock); 761 if (ppsratecheck(&lastfail, &curfail, 1)) 762 printf("maxproc limit exceeded by uid %i, please see tuning(7) and login.conf(5).\n", 763 td->td_ucred->cr_ruid); 764 sx_xunlock(&allproc_lock); 765 #ifdef MAC 766 mac_proc_destroy(newproc); 767 #endif 768 fail1: 769 if (vm2 != NULL) 770 vmspace_free(vm2); 771 uma_zfree(proc_zone, newproc); 772 pause("fork", hz / 2); 773 return (error); 774 } 775 776 /* 777 * Handle the return of a child process from fork1(). This function 778 * is called from the MD fork_trampoline() entry point. 779 */ 780 void 781 fork_exit(callout, arg, frame) 782 void (*callout)(void *, struct trapframe *); 783 void *arg; 784 struct trapframe *frame; 785 { 786 struct proc *p; 787 struct thread *td; 788 struct thread *dtd; 789 790 td = curthread; 791 p = td->td_proc; 792 KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new")); 793 794 CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)", 795 td, td->td_sched, p->p_pid, td->td_name); 796 797 sched_fork_exit(td); 798 /* 799 * Processes normally resume in mi_switch() after being 800 * cpu_switch()'ed to, but when children start up they arrive here 801 * instead, so we must do much the same things as mi_switch() would. 802 */ 803 if ((dtd = PCPU_GET(deadthread))) { 804 PCPU_SET(deadthread, NULL); 805 thread_stash(dtd); 806 } 807 thread_unlock(td); 808 809 /* 810 * cpu_set_fork_handler intercepts this function call to 811 * have this call a non-return function to stay in kernel mode. 812 * initproc has its own fork handler, but it does return. 813 */ 814 KASSERT(callout != NULL, ("NULL callout in fork_exit")); 815 callout(arg, frame); 816 817 /* 818 * Check if a kernel thread misbehaved and returned from its main 819 * function. 820 */ 821 if (p->p_flag & P_KTHREAD) { 822 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n", 823 td->td_name, p->p_pid); 824 kproc_exit(0); 825 } 826 mtx_assert(&Giant, MA_NOTOWNED); 827 828 EVENTHANDLER_INVOKE(schedtail, p); 829 } 830 831 /* 832 * Simplified back end of syscall(), used when returning from fork() 833 * directly into user mode. Giant is not held on entry, and must not 834 * be held on return. This function is passed in to fork_exit() as the 835 * first parameter and is called when returning to a new userland process. 836 */ 837 void 838 fork_return(td, frame) 839 struct thread *td; 840 struct trapframe *frame; 841 { 842 843 userret(td, frame); 844 #ifdef KTRACE 845 if (KTRPOINT(td, KTR_SYSRET)) 846 ktrsysret(SYS_fork, 0, 0); 847 #endif 848 mtx_assert(&Giant, MA_NOTOWNED); 849 } 850