1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include "opt_ktrace.h" 45 #include "opt_mac.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/sysproto.h> 50 #include <sys/eventhandler.h> 51 #include <sys/filedesc.h> 52 #include <sys/kernel.h> 53 #include <sys/kthread.h> 54 #include <sys/sysctl.h> 55 #include <sys/lock.h> 56 #include <sys/malloc.h> 57 #include <sys/mutex.h> 58 #include <sys/proc.h> 59 #include <sys/pioctl.h> 60 #include <sys/resourcevar.h> 61 #include <sys/sched.h> 62 #include <sys/syscall.h> 63 #include <sys/vmmeter.h> 64 #include <sys/vnode.h> 65 #include <sys/acct.h> 66 #include <sys/mac.h> 67 #include <sys/ktr.h> 68 #include <sys/ktrace.h> 69 #include <sys/unistd.h> 70 #include <sys/sx.h> 71 72 #include <vm/vm.h> 73 #include <vm/pmap.h> 74 #include <vm/vm_map.h> 75 #include <vm/vm_extern.h> 76 #include <vm/uma.h> 77 78 #include <sys/user.h> 79 #include <machine/critical.h> 80 81 #ifndef _SYS_SYSPROTO_H_ 82 struct fork_args { 83 int dummy; 84 }; 85 #endif 86 87 static int forksleep; /* Place for fork1() to sleep on. */ 88 89 /* 90 * MPSAFE 91 */ 92 /* ARGSUSED */ 93 int 94 fork(td, uap) 95 struct thread *td; 96 struct fork_args *uap; 97 { 98 int error; 99 struct proc *p2; 100 101 error = fork1(td, RFFDG | RFPROC, 0, &p2); 102 if (error == 0) { 103 td->td_retval[0] = p2->p_pid; 104 td->td_retval[1] = 0; 105 } 106 return (error); 107 } 108 109 /* 110 * MPSAFE 111 */ 112 /* ARGSUSED */ 113 int 114 vfork(td, uap) 115 struct thread *td; 116 struct vfork_args *uap; 117 { 118 int error; 119 struct proc *p2; 120 121 error = fork1(td, RFFDG | RFPROC | RFPPWAIT | RFMEM, 0, &p2); 122 if (error == 0) { 123 td->td_retval[0] = p2->p_pid; 124 td->td_retval[1] = 0; 125 } 126 return (error); 127 } 128 129 /* 130 * MPSAFE 131 */ 132 int 133 rfork(td, uap) 134 struct thread *td; 135 struct rfork_args *uap; 136 { 137 struct proc *p2; 138 int error; 139 140 /* Don't allow kernel-only flags. */ 141 if ((uap->flags & RFKERNELONLY) != 0) 142 return (EINVAL); 143 144 error = fork1(td, uap->flags, 0, &p2); 145 if (error == 0) { 146 td->td_retval[0] = p2 ? p2->p_pid : 0; 147 td->td_retval[1] = 0; 148 } 149 return (error); 150 } 151 152 int nprocs = 1; /* process 0 */ 153 int lastpid = 0; 154 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0, 155 "Last used PID"); 156 157 /* 158 * Random component to lastpid generation. We mix in a random factor to make 159 * it a little harder to predict. We sanity check the modulus value to avoid 160 * doing it in critical paths. Don't let it be too small or we pointlessly 161 * waste randomness entropy, and don't let it be impossibly large. Using a 162 * modulus that is too big causes a LOT more process table scans and slows 163 * down fork processing as the pidchecked caching is defeated. 164 */ 165 static int randompid = 0; 166 167 static int 168 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 169 { 170 int error, pid; 171 172 error = sysctl_wire_old_buffer(req, sizeof(int)); 173 if (error != 0) 174 return(error); 175 sx_xlock(&allproc_lock); 176 pid = randompid; 177 error = sysctl_handle_int(oidp, &pid, 0, req); 178 if (error == 0 && req->newptr != NULL) { 179 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 180 pid = PID_MAX - 100; 181 else if (pid < 2) /* NOP */ 182 pid = 0; 183 else if (pid < 100) /* Make it reasonable */ 184 pid = 100; 185 randompid = pid; 186 } 187 sx_xunlock(&allproc_lock); 188 return (error); 189 } 190 191 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 192 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 193 194 int 195 fork1(td, flags, pages, procp) 196 struct thread *td; 197 int flags; 198 int pages; 199 struct proc **procp; 200 { 201 struct proc *p1, *p2, *pptr; 202 uid_t uid; 203 struct proc *newproc; 204 int ok, trypid; 205 static int curfail, pidchecked = 0; 206 static struct timeval lastfail; 207 struct filedesc *fd; 208 struct filedesc_to_leader *fdtol; 209 struct thread *td2; 210 struct kse *ke2; 211 struct ksegrp *kg2; 212 struct sigacts *newsigacts; 213 int error; 214 215 /* Can't copy and clear. */ 216 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 217 return (EINVAL); 218 219 p1 = td->td_proc; 220 221 /* 222 * Here we don't create a new process, but we divorce 223 * certain parts of a process from itself. 224 */ 225 if ((flags & RFPROC) == 0) { 226 mtx_lock(&Giant); 227 vm_forkproc(td, NULL, NULL, flags); 228 mtx_unlock(&Giant); 229 230 /* 231 * Close all file descriptors. 232 */ 233 if (flags & RFCFDG) { 234 struct filedesc *fdtmp; 235 fdtmp = fdinit(td->td_proc->p_fd); 236 fdfree(td); 237 p1->p_fd = fdtmp; 238 } 239 240 /* 241 * Unshare file descriptors (from parent). 242 */ 243 if (flags & RFFDG) { 244 FILEDESC_LOCK(p1->p_fd); 245 if (p1->p_fd->fd_refcnt > 1) { 246 struct filedesc *newfd; 247 248 newfd = fdcopy(td->td_proc->p_fd); 249 FILEDESC_UNLOCK(p1->p_fd); 250 fdfree(td); 251 p1->p_fd = newfd; 252 } else 253 FILEDESC_UNLOCK(p1->p_fd); 254 } 255 *procp = NULL; 256 return (0); 257 } 258 259 /* 260 * Note 1:1 allows for forking with one thread coming out on the 261 * other side with the expectation that the process is about to 262 * exec. 263 */ 264 if (p1->p_flag & P_SA) { 265 /* 266 * Idle the other threads for a second. 267 * Since the user space is copied, it must remain stable. 268 * In addition, all threads (from the user perspective) 269 * need to either be suspended or in the kernel, 270 * where they will try restart in the parent and will 271 * be aborted in the child. 272 */ 273 mtx_lock(&Giant); 274 PROC_LOCK(p1); 275 if (thread_single(SINGLE_NO_EXIT)) { 276 /* Abort. Someone else is single threading before us. */ 277 PROC_UNLOCK(p1); 278 mtx_unlock(&Giant); 279 return (ERESTART); 280 } 281 PROC_UNLOCK(p1); 282 mtx_unlock(&Giant); 283 /* 284 * All other activity in this process 285 * is now suspended at the user boundary, 286 * (or other safe places if we think of any). 287 */ 288 } 289 290 /* Allocate new proc. */ 291 newproc = uma_zalloc(proc_zone, M_WAITOK); 292 #ifdef MAC 293 mac_init_proc(newproc); 294 #endif 295 296 /* We have to lock the process tree while we look for a pid. */ 297 sx_slock(&proctree_lock); 298 299 /* 300 * Although process entries are dynamically created, we still keep 301 * a global limit on the maximum number we will create. Don't allow 302 * a nonprivileged user to use the last ten processes; don't let root 303 * exceed the limit. The variable nprocs is the current number of 304 * processes, maxproc is the limit. 305 */ 306 sx_xlock(&allproc_lock); 307 uid = td->td_ucred->cr_ruid; 308 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) { 309 error = EAGAIN; 310 goto fail; 311 } 312 313 /* 314 * Increment the count of procs running with this uid. Don't allow 315 * a nonprivileged user to exceed their current limit. 316 */ 317 PROC_LOCK(p1); 318 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 319 (uid != 0) ? lim_cur(p1, RLIMIT_NPROC) : 0); 320 PROC_UNLOCK(p1); 321 if (!ok) { 322 error = EAGAIN; 323 goto fail; 324 } 325 326 /* 327 * Increment the nprocs resource before blocking can occur. There 328 * are hard-limits as to the number of processes that can run. 329 */ 330 nprocs++; 331 332 /* 333 * Find an unused process ID. We remember a range of unused IDs 334 * ready to use (from lastpid+1 through pidchecked-1). 335 * 336 * If RFHIGHPID is set (used during system boot), do not allocate 337 * low-numbered pids. 338 */ 339 trypid = lastpid + 1; 340 if (flags & RFHIGHPID) { 341 if (trypid < 10) 342 trypid = 10; 343 } else { 344 if (randompid) 345 trypid += arc4random() % randompid; 346 } 347 retry: 348 /* 349 * If the process ID prototype has wrapped around, 350 * restart somewhat above 0, as the low-numbered procs 351 * tend to include daemons that don't exit. 352 */ 353 if (trypid >= PID_MAX) { 354 trypid = trypid % PID_MAX; 355 if (trypid < 100) 356 trypid += 100; 357 pidchecked = 0; 358 } 359 if (trypid >= pidchecked) { 360 int doingzomb = 0; 361 362 pidchecked = PID_MAX; 363 /* 364 * Scan the active and zombie procs to check whether this pid 365 * is in use. Remember the lowest pid that's greater 366 * than trypid, so we can avoid checking for a while. 367 */ 368 p2 = LIST_FIRST(&allproc); 369 again: 370 for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) { 371 PROC_LOCK(p2); 372 while (p2->p_pid == trypid || 373 (p2->p_pgrp != NULL && 374 (p2->p_pgrp->pg_id == trypid || 375 (p2->p_session != NULL && 376 p2->p_session->s_sid == trypid)))) { 377 trypid++; 378 if (trypid >= pidchecked) { 379 PROC_UNLOCK(p2); 380 goto retry; 381 } 382 } 383 if (p2->p_pid > trypid && pidchecked > p2->p_pid) 384 pidchecked = p2->p_pid; 385 if (p2->p_pgrp != NULL) { 386 if (p2->p_pgrp->pg_id > trypid && 387 pidchecked > p2->p_pgrp->pg_id) 388 pidchecked = p2->p_pgrp->pg_id; 389 if (p2->p_session != NULL && 390 p2->p_session->s_sid > trypid && 391 pidchecked > p2->p_session->s_sid) 392 pidchecked = p2->p_session->s_sid; 393 } 394 PROC_UNLOCK(p2); 395 } 396 if (!doingzomb) { 397 doingzomb = 1; 398 p2 = LIST_FIRST(&zombproc); 399 goto again; 400 } 401 } 402 sx_sunlock(&proctree_lock); 403 404 /* 405 * RFHIGHPID does not mess with the lastpid counter during boot. 406 */ 407 if (flags & RFHIGHPID) 408 pidchecked = 0; 409 else 410 lastpid = trypid; 411 412 p2 = newproc; 413 p2->p_state = PRS_NEW; /* protect against others */ 414 p2->p_pid = trypid; 415 LIST_INSERT_HEAD(&allproc, p2, p_list); 416 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); 417 sx_xunlock(&allproc_lock); 418 419 /* 420 * Malloc things while we don't hold any locks. 421 */ 422 if (flags & RFSIGSHARE) 423 newsigacts = NULL; 424 else 425 newsigacts = sigacts_alloc(); 426 427 /* 428 * Copy filedesc. 429 */ 430 if (flags & RFCFDG) { 431 fd = fdinit(td->td_proc->p_fd); 432 fdtol = NULL; 433 } else if (flags & RFFDG) { 434 FILEDESC_LOCK(p1->p_fd); 435 fd = fdcopy(td->td_proc->p_fd); 436 FILEDESC_UNLOCK(p1->p_fd); 437 fdtol = NULL; 438 } else { 439 fd = fdshare(p1->p_fd); 440 if (p1->p_fdtol == NULL) 441 p1->p_fdtol = 442 filedesc_to_leader_alloc(NULL, 443 NULL, 444 p1->p_leader); 445 if ((flags & RFTHREAD) != 0) { 446 /* 447 * Shared file descriptor table and 448 * shared process leaders. 449 */ 450 fdtol = p1->p_fdtol; 451 FILEDESC_LOCK(p1->p_fd); 452 fdtol->fdl_refcount++; 453 FILEDESC_UNLOCK(p1->p_fd); 454 } else { 455 /* 456 * Shared file descriptor table, and 457 * different process leaders 458 */ 459 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, 460 p1->p_fd, 461 p2); 462 } 463 } 464 /* 465 * Make a proc table entry for the new process. 466 * Start by zeroing the section of proc that is zero-initialized, 467 * then copy the section that is copied directly from the parent. 468 */ 469 td2 = FIRST_THREAD_IN_PROC(p2); 470 kg2 = FIRST_KSEGRP_IN_PROC(p2); 471 ke2 = FIRST_KSE_IN_KSEGRP(kg2); 472 473 /* Allocate and switch to an alternate kstack if specified. */ 474 if (pages != 0) 475 vm_thread_new_altkstack(td2, pages); 476 477 mtx_lock(&Giant); /* XXX: for VREF() */ 478 PROC_LOCK(p2); 479 PROC_LOCK(p1); 480 481 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 482 483 bzero(&p2->p_startzero, 484 (unsigned) RANGEOF(struct proc, p_startzero, p_endzero)); 485 bzero(&ke2->ke_startzero, 486 (unsigned) RANGEOF(struct kse, ke_startzero, ke_endzero)); 487 bzero(&td2->td_startzero, 488 (unsigned) RANGEOF(struct thread, td_startzero, td_endzero)); 489 bzero(&kg2->kg_startzero, 490 (unsigned) RANGEOF(struct ksegrp, kg_startzero, kg_endzero)); 491 492 bcopy(&p1->p_startcopy, &p2->p_startcopy, 493 (unsigned) RANGEOF(struct proc, p_startcopy, p_endcopy)); 494 bcopy(&td->td_startcopy, &td2->td_startcopy, 495 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 496 bcopy(&td->td_ksegrp->kg_startcopy, &kg2->kg_startcopy, 497 (unsigned) RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); 498 #undef RANGEOF 499 500 td2->td_sigstk = td->td_sigstk; 501 502 /* Set up the thread as an active thread (as if runnable). */ 503 ke2->ke_state = KES_THREAD; 504 ke2->ke_thread = td2; 505 td2->td_kse = ke2; 506 507 /* 508 * Duplicate sub-structures as needed. 509 * Increase reference counts on shared objects. 510 * The p_stats substruct is set in vm_forkproc. 511 */ 512 p2->p_flag = 0; 513 if (p1->p_flag & P_PROFIL) 514 startprofclock(p2); 515 mtx_lock_spin(&sched_lock); 516 p2->p_sflag = PS_INMEM; 517 /* 518 * Allow the scheduler to adjust the priority of the child and 519 * parent while we hold the sched_lock. 520 */ 521 sched_fork(p1, p2); 522 523 mtx_unlock_spin(&sched_lock); 524 p2->p_ucred = crhold(td->td_ucred); 525 td2->td_ucred = crhold(p2->p_ucred); /* XXXKSE */ 526 527 pargs_hold(p2->p_args); 528 529 if (flags & RFSIGSHARE) { 530 p2->p_sigacts = sigacts_hold(p1->p_sigacts); 531 } else { 532 sigacts_copy(newsigacts, p1->p_sigacts); 533 p2->p_sigacts = newsigacts; 534 } 535 if (flags & RFLINUXTHPN) 536 p2->p_sigparent = SIGUSR1; 537 else 538 p2->p_sigparent = SIGCHLD; 539 540 /* Bump references to the text vnode (for procfs) */ 541 p2->p_textvp = p1->p_textvp; 542 if (p2->p_textvp) 543 VREF(p2->p_textvp); 544 mtx_unlock(&Giant); /* XXX: for VREF() */ 545 p2->p_fd = fd; 546 p2->p_fdtol = fdtol; 547 548 /* 549 * p_limit is copy-on-write. Bump its refcount. 550 */ 551 p2->p_limit = lim_hold(p1->p_limit); 552 PROC_UNLOCK(p1); 553 PROC_UNLOCK(p2); 554 555 /* 556 * Set up linkage for kernel based threading. 557 */ 558 if ((flags & RFTHREAD) != 0) { 559 mtx_lock(&ppeers_lock); 560 p2->p_peers = p1->p_peers; 561 p1->p_peers = p2; 562 p2->p_leader = p1->p_leader; 563 mtx_unlock(&ppeers_lock); 564 PROC_LOCK(p1->p_leader); 565 if ((p1->p_leader->p_flag & P_WEXIT) != 0) { 566 PROC_UNLOCK(p1->p_leader); 567 /* 568 * The task leader is exiting, so process p1 is 569 * going to be killed shortly. Since p1 obviously 570 * isn't dead yet, we know that the leader is either 571 * sending SIGKILL's to all the processes in this 572 * task or is sleeping waiting for all the peers to 573 * exit. We let p1 complete the fork, but we need 574 * to go ahead and kill the new process p2 since 575 * the task leader may not get a chance to send 576 * SIGKILL to it. We leave it on the list so that 577 * the task leader will wait for this new process 578 * to commit suicide. 579 */ 580 PROC_LOCK(p2); 581 psignal(p2, SIGKILL); 582 PROC_UNLOCK(p2); 583 } else 584 PROC_UNLOCK(p1->p_leader); 585 } else { 586 p2->p_peers = NULL; 587 p2->p_leader = p2; 588 } 589 590 sx_xlock(&proctree_lock); 591 PGRP_LOCK(p1->p_pgrp); 592 PROC_LOCK(p2); 593 PROC_LOCK(p1); 594 595 /* 596 * Preserve some more flags in subprocess. P_PROFIL has already 597 * been preserved. 598 */ 599 p2->p_flag |= p1->p_flag & P_SUGID; 600 td2->td_pflags |= td->td_pflags & TDP_ALTSTACK; 601 SESS_LOCK(p1->p_session); 602 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) 603 p2->p_flag |= P_CONTROLT; 604 SESS_UNLOCK(p1->p_session); 605 if (flags & RFPPWAIT) 606 p2->p_flag |= P_PPWAIT; 607 608 p2->p_pgrp = p1->p_pgrp; 609 LIST_INSERT_AFTER(p1, p2, p_pglist); 610 PGRP_UNLOCK(p1->p_pgrp); 611 LIST_INIT(&p2->p_children); 612 613 callout_init(&p2->p_itcallout, CALLOUT_MPSAFE); 614 615 #ifdef KTRACE 616 /* 617 * Copy traceflag and tracefile if enabled. 618 */ 619 mtx_lock(&ktrace_mtx); 620 KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode")); 621 if (p1->p_traceflag & KTRFAC_INHERIT) { 622 p2->p_traceflag = p1->p_traceflag; 623 if ((p2->p_tracevp = p1->p_tracevp) != NULL) { 624 VREF(p2->p_tracevp); 625 KASSERT(p1->p_tracecred != NULL, 626 ("ktrace vnode with no cred")); 627 p2->p_tracecred = crhold(p1->p_tracecred); 628 } 629 } 630 mtx_unlock(&ktrace_mtx); 631 #endif 632 633 /* 634 * If PF_FORK is set, the child process inherits the 635 * procfs ioctl flags from its parent. 636 */ 637 if (p1->p_pfsflags & PF_FORK) { 638 p2->p_stops = p1->p_stops; 639 p2->p_pfsflags = p1->p_pfsflags; 640 } 641 642 /* 643 * This begins the section where we must prevent the parent 644 * from being swapped. 645 */ 646 _PHOLD(p1); 647 PROC_UNLOCK(p1); 648 649 /* 650 * Attach the new process to its parent. 651 * 652 * If RFNOWAIT is set, the newly created process becomes a child 653 * of init. This effectively disassociates the child from the 654 * parent. 655 */ 656 if (flags & RFNOWAIT) 657 pptr = initproc; 658 else 659 pptr = p1; 660 p2->p_pptr = pptr; 661 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 662 sx_xunlock(&proctree_lock); 663 664 /* Inform accounting that we have forked. */ 665 p2->p_acflag = AFORK; 666 PROC_UNLOCK(p2); 667 668 /* 669 * Finish creating the child process. It will return via a different 670 * execution path later. (ie: directly into user mode) 671 */ 672 mtx_lock(&Giant); 673 vm_forkproc(td, p2, td2, flags); 674 675 if (flags == (RFFDG | RFPROC)) { 676 cnt.v_forks++; 677 cnt.v_forkpages += p2->p_vmspace->vm_dsize + 678 p2->p_vmspace->vm_ssize; 679 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) { 680 cnt.v_vforks++; 681 cnt.v_vforkpages += p2->p_vmspace->vm_dsize + 682 p2->p_vmspace->vm_ssize; 683 } else if (p1 == &proc0) { 684 cnt.v_kthreads++; 685 cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + 686 p2->p_vmspace->vm_ssize; 687 } else { 688 cnt.v_rforks++; 689 cnt.v_rforkpages += p2->p_vmspace->vm_dsize + 690 p2->p_vmspace->vm_ssize; 691 } 692 693 /* 694 * Both processes are set up, now check if any loadable modules want 695 * to adjust anything. 696 * What if they have an error? XXX 697 */ 698 EVENTHANDLER_INVOKE(process_fork, p1, p2, flags); 699 mtx_unlock(&Giant); 700 701 /* 702 * Set the child start time and mark the process as being complete. 703 */ 704 microuptime(&p2->p_stats->p_start); 705 mtx_lock_spin(&sched_lock); 706 p2->p_state = PRS_NORMAL; 707 708 /* 709 * If RFSTOPPED not requested, make child runnable and add to 710 * run queue. 711 */ 712 if ((flags & RFSTOPPED) == 0) { 713 TD_SET_CAN_RUN(td2); 714 setrunqueue(td2); 715 } 716 mtx_unlock_spin(&sched_lock); 717 718 /* 719 * Now can be swapped. 720 */ 721 PROC_LOCK(p1); 722 _PRELE(p1); 723 724 /* 725 * Tell any interested parties about the new process. 726 */ 727 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 728 729 PROC_UNLOCK(p1); 730 731 /* 732 * Preserve synchronization semantics of vfork. If waiting for 733 * child to exec or exit, set P_PPWAIT on child, and sleep on our 734 * proc (in case of exit). 735 */ 736 PROC_LOCK(p2); 737 while (p2->p_flag & P_PPWAIT) 738 msleep(p1, &p2->p_mtx, PWAIT, "ppwait", 0); 739 PROC_UNLOCK(p2); 740 741 /* 742 * If other threads are waiting, let them continue now. 743 */ 744 if (p1->p_flag & P_SA) { 745 PROC_LOCK(p1); 746 thread_single_end(); 747 PROC_UNLOCK(p1); 748 } 749 750 /* 751 * Return child proc pointer to parent. 752 */ 753 *procp = p2; 754 return (0); 755 fail: 756 sx_sunlock(&proctree_lock); 757 if (ppsratecheck(&lastfail, &curfail, 1)) 758 printf("maxproc limit exceeded by uid %i, please see tuning(7) and login.conf(5).\n", 759 uid); 760 sx_xunlock(&allproc_lock); 761 #ifdef MAC 762 mac_destroy_proc(newproc); 763 #endif 764 uma_zfree(proc_zone, newproc); 765 if (p1->p_flag & P_SA) { 766 PROC_LOCK(p1); 767 thread_single_end(); 768 PROC_UNLOCK(p1); 769 } 770 tsleep(&forksleep, PUSER, "fork", hz / 2); 771 return (error); 772 } 773 774 /* 775 * Handle the return of a child process from fork1(). This function 776 * is called from the MD fork_trampoline() entry point. 777 */ 778 void 779 fork_exit(callout, arg, frame) 780 void (*callout)(void *, struct trapframe *); 781 void *arg; 782 struct trapframe *frame; 783 { 784 struct proc *p; 785 struct thread *td; 786 787 /* 788 * Processes normally resume in mi_switch() after being 789 * cpu_switch()'ed to, but when children start up they arrive here 790 * instead, so we must do much the same things as mi_switch() would. 791 */ 792 793 if ((td = PCPU_GET(deadthread))) { 794 PCPU_SET(deadthread, NULL); 795 thread_stash(td); 796 } 797 td = curthread; 798 p = td->td_proc; 799 td->td_oncpu = PCPU_GET(cpuid); 800 KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new")); 801 802 /* 803 * Finish setting up thread glue so that it begins execution in a 804 * non-nested critical section with sched_lock held but not recursed. 805 */ 806 sched_lock.mtx_lock = (uintptr_t)td; 807 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 808 cpu_critical_fork_exit(); 809 CTR3(KTR_PROC, "fork_exit: new thread %p (pid %d, %s)", td, p->p_pid, 810 p->p_comm); 811 mtx_unlock_spin(&sched_lock); 812 813 /* 814 * cpu_set_fork_handler intercepts this function call to 815 * have this call a non-return function to stay in kernel mode. 816 * initproc has its own fork handler, but it does return. 817 */ 818 KASSERT(callout != NULL, ("NULL callout in fork_exit")); 819 callout(arg, frame); 820 821 /* 822 * Check if a kernel thread misbehaved and returned from its main 823 * function. 824 */ 825 PROC_LOCK(p); 826 if (p->p_flag & P_KTHREAD) { 827 PROC_UNLOCK(p); 828 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n", 829 p->p_comm, p->p_pid); 830 kthread_exit(0); 831 } 832 PROC_UNLOCK(p); 833 #ifdef DIAGNOSTIC 834 cred_free_thread(td); 835 #endif 836 mtx_assert(&Giant, MA_NOTOWNED); 837 } 838 839 /* 840 * Simplified back end of syscall(), used when returning from fork() 841 * directly into user mode. Giant is not held on entry, and must not 842 * be held on return. This function is passed in to fork_exit() as the 843 * first parameter and is called when returning to a new userland process. 844 */ 845 void 846 fork_return(td, frame) 847 struct thread *td; 848 struct trapframe *frame; 849 { 850 851 userret(td, frame, 0); 852 #ifdef KTRACE 853 if (KTRPOINT(td, KTR_SYSRET)) 854 ktrsysret(SYS_fork, 0, 0); 855 #endif 856 mtx_assert(&Giant, MA_NOTOWNED); 857 } 858