1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 39 * $FreeBSD$ 40 */ 41 42 #include "opt_ktrace.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sysproto.h> 47 #include <sys/filedesc.h> 48 #include <sys/kernel.h> 49 #include <sys/sysctl.h> 50 #include <sys/lock.h> 51 #include <sys/malloc.h> 52 #include <sys/mutex.h> 53 #include <sys/proc.h> 54 #include <sys/resourcevar.h> 55 #include <sys/syscall.h> 56 #include <sys/vnode.h> 57 #include <sys/acct.h> 58 #include <sys/ktr.h> 59 #include <sys/ktrace.h> 60 #include <sys/kthread.h> 61 #include <sys/unistd.h> 62 #include <sys/jail.h> 63 #include <sys/sx.h> 64 65 #include <vm/vm.h> 66 #include <vm/pmap.h> 67 #include <vm/vm_map.h> 68 #include <vm/vm_extern.h> 69 #include <vm/uma.h> 70 71 #include <sys/vmmeter.h> 72 #include <sys/user.h> 73 #include <machine/critical.h> 74 75 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback"); 76 77 /* 78 * These are the stuctures used to create a callout list for things to do 79 * when forking a process 80 */ 81 struct forklist { 82 forklist_fn function; 83 TAILQ_ENTRY(forklist) next; 84 }; 85 86 static struct sx fork_list_lock; 87 88 TAILQ_HEAD(forklist_head, forklist); 89 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list); 90 91 #ifndef _SYS_SYSPROTO_H_ 92 struct fork_args { 93 int dummy; 94 }; 95 #endif 96 97 int forksleep; /* Place for fork1() to sleep on. */ 98 99 static void 100 init_fork_list(void *data __unused) 101 { 102 103 sx_init(&fork_list_lock, "fork list"); 104 } 105 SYSINIT(fork_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_fork_list, NULL); 106 107 /* 108 * MPSAFE 109 */ 110 /* ARGSUSED */ 111 int 112 fork(td, uap) 113 struct thread *td; 114 struct fork_args *uap; 115 { 116 int error; 117 struct proc *p2; 118 119 mtx_lock(&Giant); 120 error = fork1(td, RFFDG | RFPROC, &p2); 121 if (error == 0) { 122 td->td_retval[0] = p2->p_pid; 123 td->td_retval[1] = 0; 124 } 125 mtx_unlock(&Giant); 126 return error; 127 } 128 129 /* 130 * MPSAFE 131 */ 132 /* ARGSUSED */ 133 int 134 vfork(td, uap) 135 struct thread *td; 136 struct vfork_args *uap; 137 { 138 int error; 139 struct proc *p2; 140 141 mtx_lock(&Giant); 142 error = fork1(td, RFFDG | RFPROC | RFPPWAIT | RFMEM, &p2); 143 if (error == 0) { 144 td->td_retval[0] = p2->p_pid; 145 td->td_retval[1] = 0; 146 } 147 mtx_unlock(&Giant); 148 return error; 149 } 150 151 /* 152 * MPSAFE 153 */ 154 int 155 rfork(td, uap) 156 struct thread *td; 157 struct rfork_args *uap; 158 { 159 int error; 160 struct proc *p2; 161 162 /* Don't allow kernel only flags. */ 163 if ((uap->flags & RFKERNELONLY) != 0) 164 return (EINVAL); 165 mtx_lock(&Giant); 166 error = fork1(td, uap->flags, &p2); 167 if (error == 0) { 168 td->td_retval[0] = p2 ? p2->p_pid : 0; 169 td->td_retval[1] = 0; 170 } 171 mtx_unlock(&Giant); 172 return error; 173 } 174 175 176 int nprocs = 1; /* process 0 */ 177 int lastpid = 0; 178 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0, 179 "Last used PID"); 180 181 /* 182 * Random component to lastpid generation. We mix in a random factor to make 183 * it a little harder to predict. We sanity check the modulus value to avoid 184 * doing it in critical paths. Don't let it be too small or we pointlessly 185 * waste randomness entropy, and don't let it be impossibly large. Using a 186 * modulus that is too big causes a LOT more process table scans and slows 187 * down fork processing as the pidchecked caching is defeated. 188 */ 189 static int randompid = 0; 190 191 static int 192 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 193 { 194 int error, pid; 195 196 sx_xlock(&allproc_lock); 197 pid = randompid; 198 error = sysctl_handle_int(oidp, &pid, 0, req); 199 if (error == 0 && req->newptr != NULL) { 200 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 201 pid = PID_MAX - 100; 202 else if (pid < 2) /* NOP */ 203 pid = 0; 204 else if (pid < 100) /* Make it reasonable */ 205 pid = 100; 206 randompid = pid; 207 } 208 sx_xunlock(&allproc_lock); 209 return (error); 210 } 211 212 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 213 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 214 215 #if 0 216 void 217 kse_init(struct kse *kse1, struct kse *kse2) 218 { 219 } 220 221 void 222 thread_init(struct thread *thread1, struct thread *thread2) 223 { 224 } 225 226 void 227 ksegrp_init(struct ksegrp *ksegrp1, struct ksegrp *ksegrp2) 228 { 229 } 230 #endif 231 232 int 233 fork1(td, flags, procp) 234 struct thread *td; /* parent proc */ 235 int flags; 236 struct proc **procp; /* child proc */ 237 { 238 struct proc *p2, *pptr; 239 uid_t uid; 240 struct proc *newproc; 241 int trypid; 242 int ok; 243 static int pidchecked = 0; 244 struct forklist *ep; 245 struct filedesc *fd; 246 struct proc *p1 = td->td_proc; 247 struct thread *td2; 248 struct kse *ke2; 249 struct ksegrp *kg2; 250 struct sigacts *newsigacts; 251 struct procsig *newprocsig; 252 253 GIANT_REQUIRED; 254 255 /* Can't copy and clear */ 256 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 257 return (EINVAL); 258 259 /* 260 * Here we don't create a new process, but we divorce 261 * certain parts of a process from itself. 262 */ 263 if ((flags & RFPROC) == 0) { 264 vm_forkproc(td, NULL, NULL, flags); 265 266 /* 267 * Close all file descriptors. 268 */ 269 if (flags & RFCFDG) { 270 struct filedesc *fdtmp; 271 fdtmp = fdinit(td); /* XXXKSE */ 272 PROC_LOCK(p1); 273 fdfree(td); /* XXXKSE */ 274 p1->p_fd = fdtmp; 275 PROC_UNLOCK(p1); 276 } 277 278 /* 279 * Unshare file descriptors (from parent.) 280 */ 281 if (flags & RFFDG) { 282 FILEDESC_LOCK(p1->p_fd); 283 if (p1->p_fd->fd_refcnt > 1) { 284 struct filedesc *newfd; 285 286 newfd = fdcopy(td); 287 FILEDESC_UNLOCK(p1->p_fd); 288 PROC_LOCK(p1); 289 fdfree(td); 290 p1->p_fd = newfd; 291 PROC_UNLOCK(p1); 292 } else 293 FILEDESC_UNLOCK(p1->p_fd); 294 } 295 *procp = NULL; 296 return (0); 297 } 298 299 /* Allocate new proc. */ 300 newproc = uma_zalloc(proc_zone, M_WAITOK); 301 302 /* 303 * Although process entries are dynamically created, we still keep 304 * a global limit on the maximum number we will create. Don't allow 305 * a nonprivileged user to use the last process; don't let root 306 * exceed the limit. The variable nprocs is the current number of 307 * processes, maxproc is the limit. 308 */ 309 sx_xlock(&allproc_lock); 310 uid = td->td_ucred->cr_ruid; 311 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) { 312 sx_xunlock(&allproc_lock); 313 uma_zfree(proc_zone, newproc); 314 tsleep(&forksleep, PUSER, "fork", hz / 2); 315 return (EAGAIN); 316 } 317 /* 318 * Increment the count of procs running with this uid. Don't allow 319 * a nonprivileged user to exceed their current limit. 320 */ 321 PROC_LOCK(p1); 322 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 323 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0); 324 PROC_UNLOCK(p1); 325 if (!ok) { 326 sx_xunlock(&allproc_lock); 327 uma_zfree(proc_zone, newproc); 328 tsleep(&forksleep, PUSER, "fork", hz / 2); 329 return (EAGAIN); 330 } 331 332 /* 333 * Increment the nprocs resource before blocking can occur. There 334 * are hard-limits as to the number of processes that can run. 335 */ 336 nprocs++; 337 338 /* 339 * Find an unused process ID. We remember a range of unused IDs 340 * ready to use (from lastpid+1 through pidchecked-1). 341 * 342 * If RFHIGHPID is set (used during system boot), do not allocate 343 * low-numbered pids. 344 */ 345 trypid = lastpid + 1; 346 if (flags & RFHIGHPID) { 347 if (trypid < 10) { 348 trypid = 10; 349 } 350 } else { 351 if (randompid) 352 trypid += arc4random() % randompid; 353 } 354 retry: 355 /* 356 * If the process ID prototype has wrapped around, 357 * restart somewhat above 0, as the low-numbered procs 358 * tend to include daemons that don't exit. 359 */ 360 if (trypid >= PID_MAX) { 361 trypid = trypid % PID_MAX; 362 if (trypid < 100) 363 trypid += 100; 364 pidchecked = 0; 365 } 366 if (trypid >= pidchecked) { 367 int doingzomb = 0; 368 369 pidchecked = PID_MAX; 370 /* 371 * Scan the active and zombie procs to check whether this pid 372 * is in use. Remember the lowest pid that's greater 373 * than trypid, so we can avoid checking for a while. 374 */ 375 p2 = LIST_FIRST(&allproc); 376 again: 377 for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) { 378 PROC_LOCK(p2); 379 while (p2->p_pid == trypid || 380 p2->p_pgrp->pg_id == trypid || 381 p2->p_session->s_sid == trypid) { 382 trypid++; 383 if (trypid >= pidchecked) { 384 PROC_UNLOCK(p2); 385 goto retry; 386 } 387 } 388 if (p2->p_pid > trypid && pidchecked > p2->p_pid) 389 pidchecked = p2->p_pid; 390 if (p2->p_pgrp->pg_id > trypid && 391 pidchecked > p2->p_pgrp->pg_id) 392 pidchecked = p2->p_pgrp->pg_id; 393 if (p2->p_session->s_sid > trypid && 394 pidchecked > p2->p_session->s_sid) 395 pidchecked = p2->p_session->s_sid; 396 PROC_UNLOCK(p2); 397 } 398 if (!doingzomb) { 399 doingzomb = 1; 400 p2 = LIST_FIRST(&zombproc); 401 goto again; 402 } 403 } 404 405 /* 406 * RFHIGHPID does not mess with the lastpid counter during boot. 407 */ 408 if (flags & RFHIGHPID) 409 pidchecked = 0; 410 else 411 lastpid = trypid; 412 413 p2 = newproc; 414 p2->p_stat = SIDL; /* protect against others */ 415 p2->p_pid = trypid; 416 LIST_INSERT_HEAD(&allproc, p2, p_list); 417 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); 418 sx_xunlock(&allproc_lock); 419 420 /* 421 * Malloc things while we don't hold any locks. 422 */ 423 if (flags & RFSIGSHARE) { 424 MALLOC(newsigacts, struct sigacts *, 425 sizeof(struct sigacts), M_SUBPROC, M_WAITOK); 426 newprocsig = NULL; 427 } else { 428 newsigacts = NULL; 429 MALLOC(newprocsig, struct procsig *, sizeof(struct procsig), 430 M_SUBPROC, M_WAITOK); 431 } 432 433 /* 434 * Copy filedesc. 435 * XXX: This is busted. fd*() need to not take proc 436 * arguments or something. 437 */ 438 if (flags & RFCFDG) 439 fd = fdinit(td); 440 else if (flags & RFFDG) { 441 FILEDESC_LOCK(p1->p_fd); 442 fd = fdcopy(td); 443 FILEDESC_UNLOCK(p1->p_fd); 444 } else 445 fd = fdshare(p1); 446 447 /* 448 * Make a proc table entry for the new process. 449 * Start by zeroing the section of proc that is zero-initialized, 450 * then copy the section that is copied directly from the parent. 451 */ 452 td2 = thread_get(p2); 453 ke2 = &p2->p_kse; 454 kg2 = &p2->p_ksegrp; 455 456 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 457 458 bzero(&p2->p_startzero, 459 (unsigned) RANGEOF(struct proc, p_startzero, p_endzero)); 460 bzero(&ke2->ke_startzero, 461 (unsigned) RANGEOF(struct kse, ke_startzero, ke_endzero)); 462 bzero(&td2->td_startzero, 463 (unsigned) RANGEOF(struct thread, td_startzero, td_endzero)); 464 bzero(&kg2->kg_startzero, 465 (unsigned) RANGEOF(struct ksegrp, kg_startzero, kg_endzero)); 466 467 mtx_init(&p2->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 468 PROC_LOCK(p2); 469 PROC_LOCK(p1); 470 471 bcopy(&p1->p_startcopy, &p2->p_startcopy, 472 (unsigned) RANGEOF(struct proc, p_startcopy, p_endcopy)); 473 bcopy(&td->td_kse->ke_startcopy, &ke2->ke_startcopy, 474 (unsigned) RANGEOF(struct kse, ke_startcopy, ke_endcopy)); 475 bcopy(&td->td_startcopy, &td2->td_startcopy, 476 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 477 bcopy(&td->td_ksegrp->kg_startcopy, &kg2->kg_startcopy, 478 (unsigned) RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); 479 #undef RANGEOF 480 481 /* 482 * XXXKSE Theoretically only the running thread would get copied 483 * Others in the kernel would be 'aborted' in the child. 484 * i.e return E*something* 485 */ 486 proc_linkup(p2, kg2, ke2, td2); 487 488 /* note.. XXXKSE no pcb or u-area yet */ 489 490 /* 491 * Duplicate sub-structures as needed. 492 * Increase reference counts on shared objects. 493 * The p_stats and p_sigacts substructs are set in vm_forkproc. 494 */ 495 p2->p_flag = 0; 496 mtx_lock_spin(&sched_lock); 497 p2->p_sflag = PS_INMEM; 498 if (p1->p_sflag & PS_PROFIL) 499 startprofclock(p2); 500 mtx_unlock_spin(&sched_lock); 501 p2->p_ucred = crhold(td->td_ucred); 502 td2->td_ucred = crhold(p2->p_ucred); /* XXXKSE */ 503 504 /* 505 * Setup linkage for kernel based threading 506 */ 507 if((flags & RFTHREAD) != 0) { 508 /* 509 * XXX: This assumes a leader is a parent or grandparent of 510 * all processes in a task. 511 */ 512 if (p1->p_leader != p1) 513 PROC_LOCK(p1->p_leader); 514 p2->p_peers = p1->p_peers; 515 p1->p_peers = p2; 516 p2->p_leader = p1->p_leader; 517 if (p1->p_leader != p1) 518 PROC_UNLOCK(p1->p_leader); 519 } else { 520 p2->p_peers = NULL; 521 p2->p_leader = p2; 522 } 523 524 pargs_hold(p2->p_args); 525 526 if (flags & RFSIGSHARE) { 527 p2->p_procsig = p1->p_procsig; 528 p2->p_procsig->ps_refcnt++; 529 if (p1->p_sigacts == &p1->p_uarea->u_sigacts) { 530 /* 531 * Set p_sigacts to the new shared structure. 532 * Note that this is updating p1->p_sigacts at the 533 * same time, since p_sigacts is just a pointer to 534 * the shared p_procsig->ps_sigacts. 535 */ 536 p2->p_sigacts = newsigacts; 537 newsigacts = NULL; 538 *p2->p_sigacts = p1->p_uarea->u_sigacts; 539 } 540 } else { 541 p2->p_procsig = newprocsig; 542 newprocsig = NULL; 543 bcopy(p1->p_procsig, p2->p_procsig, sizeof(*p2->p_procsig)); 544 p2->p_procsig->ps_refcnt = 1; 545 p2->p_sigacts = NULL; /* finished in vm_forkproc() */ 546 } 547 if (flags & RFLINUXTHPN) 548 p2->p_sigparent = SIGUSR1; 549 else 550 p2->p_sigparent = SIGCHLD; 551 552 /* Bump references to the text vnode (for procfs) */ 553 p2->p_textvp = p1->p_textvp; 554 if (p2->p_textvp) 555 VREF(p2->p_textvp); 556 p2->p_fd = fd; 557 PROC_UNLOCK(p1); 558 PROC_UNLOCK(p2); 559 560 /* 561 * If p_limit is still copy-on-write, bump refcnt, 562 * otherwise get a copy that won't be modified. 563 * (If PL_SHAREMOD is clear, the structure is shared 564 * copy-on-write.) 565 */ 566 if (p1->p_limit->p_lflags & PL_SHAREMOD) 567 p2->p_limit = limcopy(p1->p_limit); 568 else { 569 p2->p_limit = p1->p_limit; 570 p2->p_limit->p_refcnt++; 571 } 572 573 sx_xlock(&proctree_lock); 574 PGRP_LOCK(p1->p_pgrp); 575 PROC_LOCK(p2); 576 PROC_LOCK(p1); 577 578 /* 579 * Preserve some more flags in subprocess. PS_PROFIL has already 580 * been preserved. 581 */ 582 p2->p_flag |= p1->p_flag & (P_SUGID | P_ALTSTACK); 583 SESS_LOCK(p1->p_session); 584 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) 585 p2->p_flag |= P_CONTROLT; 586 SESS_UNLOCK(p1->p_session); 587 if (flags & RFPPWAIT) 588 p2->p_flag |= P_PPWAIT; 589 590 LIST_INSERT_AFTER(p1, p2, p_pglist); 591 PGRP_UNLOCK(p1->p_pgrp); 592 LIST_INIT(&p2->p_children); 593 LIST_INIT(&td2->td_contested); /* XXXKSE only 1 thread? */ 594 595 callout_init(&p2->p_itcallout, 0); 596 callout_init(&td2->td_slpcallout, 1); /* XXXKSE */ 597 598 #ifdef KTRACE 599 /* 600 * Copy traceflag and tracefile if enabled. 601 */ 602 mtx_lock(&ktrace_mtx); 603 KASSERT(p2->p_tracep == NULL, ("new process has a ktrace vnode")); 604 if (p1->p_traceflag & KTRFAC_INHERIT) { 605 p2->p_traceflag = p1->p_traceflag; 606 if ((p2->p_tracep = p1->p_tracep) != NULL) 607 VREF(p2->p_tracep); 608 } 609 mtx_unlock(&ktrace_mtx); 610 #endif 611 612 /* 613 * set priority of child to be that of parent 614 * XXXKSE hey! copying the estcpu seems dodgy.. should split it.. 615 */ 616 mtx_lock_spin(&sched_lock); 617 p2->p_ksegrp.kg_estcpu = p1->p_ksegrp.kg_estcpu; 618 mtx_unlock_spin(&sched_lock); 619 620 /* 621 * This begins the section where we must prevent the parent 622 * from being swapped. 623 */ 624 _PHOLD(p1); 625 PROC_UNLOCK(p1); 626 627 /* 628 * Attach the new process to its parent. 629 * 630 * If RFNOWAIT is set, the newly created process becomes a child 631 * of init. This effectively disassociates the child from the 632 * parent. 633 */ 634 if (flags & RFNOWAIT) 635 pptr = initproc; 636 else 637 pptr = p1; 638 p2->p_pptr = pptr; 639 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 640 PROC_UNLOCK(p2); 641 sx_xunlock(&proctree_lock); 642 643 /* 644 * XXXKSE: In KSE, there would be a race here if one thread was 645 * dieing due to a signal (or calling exit1() for that matter) while 646 * another thread was calling fork1(). Not sure how KSE wants to work 647 * around that. The problem is that up until the point above, if p1 648 * gets killed, it won't find p2 in its list in order for it to be 649 * reparented. Alternatively, we could add a new p_flag that gets set 650 * before we reparent all the children that we check above and just 651 * use init as our parent if that if that flag is set. (Either that 652 * or abort the fork if the flag is set since our parent died trying 653 * to fork us (which is evil)). 654 */ 655 656 KASSERT(newprocsig == NULL, ("unused newprocsig")); 657 if (newsigacts != NULL) 658 FREE(newsigacts, M_SUBPROC); 659 /* 660 * Finish creating the child process. It will return via a different 661 * execution path later. (ie: directly into user mode) 662 */ 663 vm_forkproc(td, p2, td2, flags); 664 665 if (flags == (RFFDG | RFPROC)) { 666 cnt.v_forks++; 667 cnt.v_forkpages += p2->p_vmspace->vm_dsize + 668 p2->p_vmspace->vm_ssize; 669 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) { 670 cnt.v_vforks++; 671 cnt.v_vforkpages += p2->p_vmspace->vm_dsize + 672 p2->p_vmspace->vm_ssize; 673 } else if (p1 == &proc0) { 674 cnt.v_kthreads++; 675 cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + 676 p2->p_vmspace->vm_ssize; 677 } else { 678 cnt.v_rforks++; 679 cnt.v_rforkpages += p2->p_vmspace->vm_dsize + 680 p2->p_vmspace->vm_ssize; 681 } 682 683 /* 684 * Both processes are set up, now check if any loadable modules want 685 * to adjust anything. 686 * What if they have an error? XXX 687 */ 688 sx_slock(&fork_list_lock); 689 TAILQ_FOREACH(ep, &fork_list, next) { 690 (*ep->function)(p1, p2, flags); 691 } 692 sx_sunlock(&fork_list_lock); 693 694 /* 695 * If RFSTOPPED not requested, make child runnable and add to 696 * run queue. 697 */ 698 microtime(&(p2->p_stats->p_start)); 699 p2->p_acflag = AFORK; 700 if ((flags & RFSTOPPED) == 0) { 701 mtx_lock_spin(&sched_lock); 702 p2->p_stat = SRUN; 703 setrunqueue(td2); 704 mtx_unlock_spin(&sched_lock); 705 } 706 707 /* 708 * Now can be swapped. 709 */ 710 PROC_LOCK(p1); 711 _PRELE(p1); 712 713 /* 714 * tell any interested parties about the new process 715 */ 716 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 717 PROC_UNLOCK(p1); 718 719 /* 720 * Preserve synchronization semantics of vfork. If waiting for 721 * child to exec or exit, set P_PPWAIT on child, and sleep on our 722 * proc (in case of exit). 723 */ 724 PROC_LOCK(p2); 725 while (p2->p_flag & P_PPWAIT) 726 msleep(p1, &p2->p_mtx, PWAIT, "ppwait", 0); 727 PROC_UNLOCK(p2); 728 729 /* 730 * Return child proc pointer to parent. 731 */ 732 *procp = p2; 733 return (0); 734 } 735 736 /* 737 * The next two functionms are general routines to handle adding/deleting 738 * items on the fork callout list. 739 * 740 * at_fork(): 741 * Take the arguments given and put them onto the fork callout list, 742 * However first make sure that it's not already there. 743 * Returns 0 on success or a standard error number. 744 */ 745 746 int 747 at_fork(function) 748 forklist_fn function; 749 { 750 struct forklist *ep; 751 752 #ifdef INVARIANTS 753 /* let the programmer know if he's been stupid */ 754 if (rm_at_fork(function)) 755 printf("WARNING: fork callout entry (%p) already present\n", 756 function); 757 #endif 758 ep = malloc(sizeof(*ep), M_ATFORK, M_NOWAIT); 759 if (ep == NULL) 760 return (ENOMEM); 761 ep->function = function; 762 sx_xlock(&fork_list_lock); 763 TAILQ_INSERT_TAIL(&fork_list, ep, next); 764 sx_xunlock(&fork_list_lock); 765 return (0); 766 } 767 768 /* 769 * Scan the exit callout list for the given item and remove it.. 770 * Returns the number of items removed (0 or 1) 771 */ 772 773 int 774 rm_at_fork(function) 775 forklist_fn function; 776 { 777 struct forklist *ep; 778 779 sx_xlock(&fork_list_lock); 780 TAILQ_FOREACH(ep, &fork_list, next) { 781 if (ep->function == function) { 782 TAILQ_REMOVE(&fork_list, ep, next); 783 sx_xunlock(&fork_list_lock); 784 free(ep, M_ATFORK); 785 return(1); 786 } 787 } 788 sx_xunlock(&fork_list_lock); 789 return (0); 790 } 791 792 /* 793 * Handle the return of a child process from fork1(). This function 794 * is called from the MD fork_trampoline() entry point. 795 */ 796 void 797 fork_exit(callout, arg, frame) 798 void (*callout)(void *, struct trapframe *); 799 void *arg; 800 struct trapframe *frame; 801 { 802 struct thread *td = curthread; 803 struct proc *p = td->td_proc; 804 805 td->td_kse->ke_oncpu = PCPU_GET(cpuid); 806 /* 807 * Finish setting up thread glue. We need to initialize 808 * the thread into a td_critnest=1 state. Some platforms 809 * may have already partially or fully initialized td_critnest 810 * and/or td_md.md_savecrit (when applciable). 811 * 812 * see <arch>/<arch>/critical.c 813 */ 814 sched_lock.mtx_lock = (uintptr_t)td; 815 sched_lock.mtx_recurse = 0; 816 cpu_critical_fork_exit(); 817 CTR3(KTR_PROC, "fork_exit: new proc %p (pid %d, %s)", p, p->p_pid, 818 p->p_comm); 819 if (PCPU_GET(switchtime.sec) == 0) 820 binuptime(PCPU_PTR(switchtime)); 821 PCPU_SET(switchticks, ticks); 822 mtx_unlock_spin(&sched_lock); 823 824 /* 825 * cpu_set_fork_handler intercepts this function call to 826 * have this call a non-return function to stay in kernel mode. 827 * initproc has its own fork handler, but it does return. 828 */ 829 KASSERT(callout != NULL, ("NULL callout in fork_exit")); 830 callout(arg, frame); 831 832 /* 833 * Check if a kernel thread misbehaved and returned from its main 834 * function. 835 */ 836 PROC_LOCK(p); 837 if (p->p_flag & P_KTHREAD) { 838 PROC_UNLOCK(p); 839 mtx_lock(&Giant); 840 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n", 841 p->p_comm, p->p_pid); 842 kthread_exit(0); 843 } 844 PROC_UNLOCK(p); 845 mtx_assert(&Giant, MA_NOTOWNED); 846 } 847 848 /* 849 * Simplified back end of syscall(), used when returning from fork() 850 * directly into user mode. Giant is not held on entry, and must not 851 * be held on return. This function is passed in to fork_exit() as the 852 * first parameter and is called when returning to a new userland process. 853 */ 854 void 855 fork_return(td, frame) 856 struct thread *td; 857 struct trapframe *frame; 858 { 859 860 userret(td, frame, 0); 861 #ifdef KTRACE 862 if (KTRPOINT(td, KTR_SYSRET)) 863 ktrsysret(SYS_fork, 0, 0); 864 #endif 865 mtx_assert(&Giant, MA_NOTOWNED); 866 } 867