1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 39 * $FreeBSD$ 40 */ 41 42 #include "opt_ktrace.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sysproto.h> 47 #include <sys/filedesc.h> 48 #include <sys/kernel.h> 49 #include <sys/sysctl.h> 50 #include <sys/lock.h> 51 #include <sys/malloc.h> 52 #include <sys/mutex.h> 53 #include <sys/proc.h> 54 #include <sys/resourcevar.h> 55 #include <sys/syscall.h> 56 #include <sys/vnode.h> 57 #include <sys/acct.h> 58 #include <sys/ktr.h> 59 #include <sys/ktrace.h> 60 #include <sys/kthread.h> 61 #include <sys/unistd.h> 62 #include <sys/jail.h> 63 #include <sys/sx.h> 64 65 #include <vm/vm.h> 66 #include <vm/pmap.h> 67 #include <vm/vm_map.h> 68 #include <vm/vm_extern.h> 69 #include <vm/uma.h> 70 71 #include <sys/vmmeter.h> 72 #include <sys/user.h> 73 #include <machine/critical.h> 74 75 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback"); 76 77 /* 78 * These are the stuctures used to create a callout list for things to do 79 * when forking a process 80 */ 81 struct forklist { 82 forklist_fn function; 83 TAILQ_ENTRY(forklist) next; 84 }; 85 86 static struct sx fork_list_lock; 87 88 TAILQ_HEAD(forklist_head, forklist); 89 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list); 90 91 #ifndef _SYS_SYSPROTO_H_ 92 struct fork_args { 93 int dummy; 94 }; 95 #endif 96 97 int forksleep; /* Place for fork1() to sleep on. */ 98 99 static void 100 init_fork_list(void *data __unused) 101 { 102 103 sx_init(&fork_list_lock, "fork list"); 104 } 105 SYSINIT(fork_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_fork_list, NULL); 106 107 /* 108 * MPSAFE 109 */ 110 /* ARGSUSED */ 111 int 112 fork(td, uap) 113 struct thread *td; 114 struct fork_args *uap; 115 { 116 int error; 117 struct proc *p2; 118 119 mtx_lock(&Giant); 120 error = fork1(td, RFFDG | RFPROC, &p2); 121 if (error == 0) { 122 td->td_retval[0] = p2->p_pid; 123 td->td_retval[1] = 0; 124 } 125 mtx_unlock(&Giant); 126 return error; 127 } 128 129 /* 130 * MPSAFE 131 */ 132 /* ARGSUSED */ 133 int 134 vfork(td, uap) 135 struct thread *td; 136 struct vfork_args *uap; 137 { 138 int error; 139 struct proc *p2; 140 141 mtx_lock(&Giant); 142 error = fork1(td, RFFDG | RFPROC | RFPPWAIT | RFMEM, &p2); 143 if (error == 0) { 144 td->td_retval[0] = p2->p_pid; 145 td->td_retval[1] = 0; 146 } 147 mtx_unlock(&Giant); 148 return error; 149 } 150 151 /* 152 * MPSAFE 153 */ 154 int 155 rfork(td, uap) 156 struct thread *td; 157 struct rfork_args *uap; 158 { 159 int error; 160 struct proc *p2; 161 162 /* Don't allow kernel only flags. */ 163 if ((uap->flags & RFKERNELONLY) != 0) 164 return (EINVAL); 165 mtx_lock(&Giant); 166 error = fork1(td, uap->flags, &p2); 167 if (error == 0) { 168 td->td_retval[0] = p2 ? p2->p_pid : 0; 169 td->td_retval[1] = 0; 170 } 171 mtx_unlock(&Giant); 172 return error; 173 } 174 175 176 int nprocs = 1; /* process 0 */ 177 int lastpid = 0; 178 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0, 179 "Last used PID"); 180 181 /* 182 * Random component to lastpid generation. We mix in a random factor to make 183 * it a little harder to predict. We sanity check the modulus value to avoid 184 * doing it in critical paths. Don't let it be too small or we pointlessly 185 * waste randomness entropy, and don't let it be impossibly large. Using a 186 * modulus that is too big causes a LOT more process table scans and slows 187 * down fork processing as the pidchecked caching is defeated. 188 */ 189 static int randompid = 0; 190 191 static int 192 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 193 { 194 int error, pid; 195 196 sx_xlock(&allproc_lock); 197 pid = randompid; 198 error = sysctl_handle_int(oidp, &pid, 0, req); 199 if (error == 0 && req->newptr != NULL) { 200 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 201 pid = PID_MAX - 100; 202 else if (pid < 2) /* NOP */ 203 pid = 0; 204 else if (pid < 100) /* Make it reasonable */ 205 pid = 100; 206 randompid = pid; 207 } 208 sx_xunlock(&allproc_lock); 209 return (error); 210 } 211 212 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 213 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 214 215 int 216 fork1(td, flags, procp) 217 struct thread *td; /* parent proc */ 218 int flags; 219 struct proc **procp; /* child proc */ 220 { 221 struct proc *p2, *pptr; 222 uid_t uid; 223 struct proc *newproc; 224 int trypid; 225 int ok; 226 static int pidchecked = 0; 227 struct forklist *ep; 228 struct filedesc *fd; 229 struct proc *p1 = td->td_proc; 230 struct thread *td2; 231 struct kse *ke2; 232 struct ksegrp *kg2; 233 struct sigacts *newsigacts; 234 struct procsig *newprocsig; 235 236 GIANT_REQUIRED; 237 238 /* Can't copy and clear */ 239 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 240 return (EINVAL); 241 242 /* 243 * Here we don't create a new process, but we divorce 244 * certain parts of a process from itself. 245 */ 246 if ((flags & RFPROC) == 0) { 247 vm_forkproc(td, NULL, NULL, flags); 248 249 /* 250 * Close all file descriptors. 251 */ 252 if (flags & RFCFDG) { 253 struct filedesc *fdtmp; 254 fdtmp = fdinit(td); /* XXXKSE */ 255 PROC_LOCK(p1); 256 fdfree(td); /* XXXKSE */ 257 p1->p_fd = fdtmp; 258 PROC_UNLOCK(p1); 259 } 260 261 /* 262 * Unshare file descriptors (from parent.) 263 */ 264 if (flags & RFFDG) { 265 FILEDESC_LOCK(p1->p_fd); 266 if (p1->p_fd->fd_refcnt > 1) { 267 struct filedesc *newfd; 268 269 newfd = fdcopy(td); 270 FILEDESC_UNLOCK(p1->p_fd); 271 PROC_LOCK(p1); 272 fdfree(td); 273 p1->p_fd = newfd; 274 PROC_UNLOCK(p1); 275 } else 276 FILEDESC_UNLOCK(p1->p_fd); 277 } 278 *procp = NULL; 279 return (0); 280 } 281 282 if (p1->p_flag & P_KSES) { 283 /* 284 * Idle the other threads for a second. 285 * Since the user space is copied, it must remain stable. 286 * In addition, all threads (from the user perspective) 287 * need to either be suspended or in the kernel, 288 * where they will try restart in the parent and will 289 * be aborted in the child. 290 */ 291 PROC_LOCK(p1); 292 if (thread_single(SNGLE_NO_EXIT)) { 293 /* Abort.. someone else is single threading before us */ 294 PROC_UNLOCK(p1); 295 return (ERESTART); 296 } 297 PROC_UNLOCK(p1); 298 /* 299 * All other activity in this process 300 * is now suspended at the user boundary, 301 * (or other safe places if we think of any). 302 */ 303 } 304 305 /* Allocate new proc. */ 306 newproc = uma_zalloc(proc_zone, M_WAITOK); 307 308 /* 309 * Although process entries are dynamically created, we still keep 310 * a global limit on the maximum number we will create. Don't allow 311 * a nonprivileged user to use the last process; don't let root 312 * exceed the limit. The variable nprocs is the current number of 313 * processes, maxproc is the limit. 314 */ 315 sx_xlock(&allproc_lock); 316 uid = td->td_ucred->cr_ruid; 317 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) { 318 sx_xunlock(&allproc_lock); 319 uma_zfree(proc_zone, newproc); 320 if (p1->p_flag & P_KSES) { 321 PROC_LOCK(p1); 322 thread_single_end(); 323 PROC_UNLOCK(p1); 324 } 325 tsleep(&forksleep, PUSER, "fork", hz / 2); 326 return (EAGAIN); 327 } 328 /* 329 * Increment the count of procs running with this uid. Don't allow 330 * a nonprivileged user to exceed their current limit. 331 */ 332 PROC_LOCK(p1); 333 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 334 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0); 335 PROC_UNLOCK(p1); 336 if (!ok) { 337 sx_xunlock(&allproc_lock); 338 uma_zfree(proc_zone, newproc); 339 if (p1->p_flag & P_KSES) { 340 PROC_LOCK(p1); 341 thread_single_end(); 342 PROC_UNLOCK(p1); 343 } 344 tsleep(&forksleep, PUSER, "fork", hz / 2); 345 return (EAGAIN); 346 } 347 348 /* 349 * Increment the nprocs resource before blocking can occur. There 350 * are hard-limits as to the number of processes that can run. 351 */ 352 nprocs++; 353 354 /* 355 * Find an unused process ID. We remember a range of unused IDs 356 * ready to use (from lastpid+1 through pidchecked-1). 357 * 358 * If RFHIGHPID is set (used during system boot), do not allocate 359 * low-numbered pids. 360 */ 361 trypid = lastpid + 1; 362 if (flags & RFHIGHPID) { 363 if (trypid < 10) { 364 trypid = 10; 365 } 366 } else { 367 if (randompid) 368 trypid += arc4random() % randompid; 369 } 370 retry: 371 /* 372 * If the process ID prototype has wrapped around, 373 * restart somewhat above 0, as the low-numbered procs 374 * tend to include daemons that don't exit. 375 */ 376 if (trypid >= PID_MAX) { 377 trypid = trypid % PID_MAX; 378 if (trypid < 100) 379 trypid += 100; 380 pidchecked = 0; 381 } 382 if (trypid >= pidchecked) { 383 int doingzomb = 0; 384 385 pidchecked = PID_MAX; 386 /* 387 * Scan the active and zombie procs to check whether this pid 388 * is in use. Remember the lowest pid that's greater 389 * than trypid, so we can avoid checking for a while. 390 */ 391 p2 = LIST_FIRST(&allproc); 392 again: 393 for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) { 394 PROC_LOCK(p2); 395 while (p2->p_pid == trypid || 396 p2->p_pgrp->pg_id == trypid || 397 p2->p_session->s_sid == trypid) { 398 trypid++; 399 if (trypid >= pidchecked) { 400 PROC_UNLOCK(p2); 401 goto retry; 402 } 403 } 404 if (p2->p_pid > trypid && pidchecked > p2->p_pid) 405 pidchecked = p2->p_pid; 406 if (p2->p_pgrp->pg_id > trypid && 407 pidchecked > p2->p_pgrp->pg_id) 408 pidchecked = p2->p_pgrp->pg_id; 409 if (p2->p_session->s_sid > trypid && 410 pidchecked > p2->p_session->s_sid) 411 pidchecked = p2->p_session->s_sid; 412 PROC_UNLOCK(p2); 413 } 414 if (!doingzomb) { 415 doingzomb = 1; 416 p2 = LIST_FIRST(&zombproc); 417 goto again; 418 } 419 } 420 421 /* 422 * RFHIGHPID does not mess with the lastpid counter during boot. 423 */ 424 if (flags & RFHIGHPID) 425 pidchecked = 0; 426 else 427 lastpid = trypid; 428 429 p2 = newproc; 430 p2->p_state = PRS_NEW; /* protect against others */ 431 p2->p_pid = trypid; 432 LIST_INSERT_HEAD(&allproc, p2, p_list); 433 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); 434 sx_xunlock(&allproc_lock); 435 436 /* 437 * Malloc things while we don't hold any locks. 438 */ 439 if (flags & RFSIGSHARE) { 440 MALLOC(newsigacts, struct sigacts *, 441 sizeof(struct sigacts), M_SUBPROC, M_WAITOK); 442 newprocsig = NULL; 443 } else { 444 newsigacts = NULL; 445 MALLOC(newprocsig, struct procsig *, sizeof(struct procsig), 446 M_SUBPROC, M_WAITOK); 447 } 448 449 /* 450 * Copy filedesc. 451 * XXX: This is busted. fd*() need to not take proc 452 * arguments or something. 453 */ 454 if (flags & RFCFDG) 455 fd = fdinit(td); 456 else if (flags & RFFDG) { 457 FILEDESC_LOCK(p1->p_fd); 458 fd = fdcopy(td); 459 FILEDESC_UNLOCK(p1->p_fd); 460 } else 461 fd = fdshare(p1); 462 463 /* 464 * Make a proc table entry for the new process. 465 * Start by zeroing the section of proc that is zero-initialized, 466 * then copy the section that is copied directly from the parent. 467 */ 468 td2 = thread_alloc(); 469 ke2 = &p2->p_kse; 470 kg2 = &p2->p_ksegrp; 471 472 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 473 474 bzero(&p2->p_startzero, 475 (unsigned) RANGEOF(struct proc, p_startzero, p_endzero)); 476 bzero(&ke2->ke_startzero, 477 (unsigned) RANGEOF(struct kse, ke_startzero, ke_endzero)); 478 #if 0 /* bzero'd by the thread allocator */ 479 bzero(&td2->td_startzero, 480 (unsigned) RANGEOF(struct thread, td_startzero, td_endzero)); 481 #endif 482 bzero(&kg2->kg_startzero, 483 (unsigned) RANGEOF(struct ksegrp, kg_startzero, kg_endzero)); 484 485 mtx_init(&p2->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 486 PROC_LOCK(p2); 487 PROC_LOCK(p1); 488 489 bcopy(&p1->p_startcopy, &p2->p_startcopy, 490 (unsigned) RANGEOF(struct proc, p_startcopy, p_endcopy)); 491 bcopy(&td->td_kse->ke_startcopy, &ke2->ke_startcopy, 492 (unsigned) RANGEOF(struct kse, ke_startcopy, ke_endcopy)); 493 bcopy(&td->td_startcopy, &td2->td_startcopy, 494 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 495 bcopy(&td->td_ksegrp->kg_startcopy, &kg2->kg_startcopy, 496 (unsigned) RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); 497 #undef RANGEOF 498 499 /* 500 * XXXKSE Theoretically only the running thread would get copied 501 * Others in the kernel would be 'aborted' in the child. 502 * i.e return E*something* 503 * On SMP we would have to stop them running on 504 * other CPUs! (set a flag in the proc that stops 505 * all returns to userland until completed) 506 * This is wrong but ok for 1:1. 507 */ 508 proc_linkup(p2, kg2, ke2, td2); 509 510 /* Set up the thread as an active thread (as if runnable). */ 511 TAILQ_REMOVE(&kg2->kg_iq, ke2, ke_kgrlist); 512 kg2->kg_idle_kses--; 513 ke2->ke_state = KES_THREAD; 514 ke2->ke_thread = td2; 515 td2->td_kse = ke2; 516 td2->td_flags &= ~TDF_UNBOUND; /* For the rest of this syscall. */ 517 518 /* note.. XXXKSE no pcb or u-area yet */ 519 520 /* 521 * Duplicate sub-structures as needed. 522 * Increase reference counts on shared objects. 523 * The p_stats and p_sigacts substructs are set in vm_forkproc. 524 */ 525 p2->p_flag = 0; 526 mtx_lock_spin(&sched_lock); 527 p2->p_sflag = PS_INMEM; 528 if (p1->p_sflag & PS_PROFIL) 529 startprofclock(p2); 530 mtx_unlock_spin(&sched_lock); 531 p2->p_ucred = crhold(td->td_ucred); 532 td2->td_ucred = crhold(p2->p_ucred); /* XXXKSE */ 533 534 /* 535 * Setup linkage for kernel based threading 536 */ 537 if((flags & RFTHREAD) != 0) { 538 /* 539 * XXX: This assumes a leader is a parent or grandparent of 540 * all processes in a task. 541 */ 542 if (p1->p_leader != p1) 543 PROC_LOCK(p1->p_leader); 544 p2->p_peers = p1->p_peers; 545 p1->p_peers = p2; 546 p2->p_leader = p1->p_leader; 547 if (p1->p_leader != p1) 548 PROC_UNLOCK(p1->p_leader); 549 } else { 550 p2->p_peers = NULL; 551 p2->p_leader = p2; 552 } 553 554 pargs_hold(p2->p_args); 555 556 if (flags & RFSIGSHARE) { 557 p2->p_procsig = p1->p_procsig; 558 p2->p_procsig->ps_refcnt++; 559 if (p1->p_sigacts == &p1->p_uarea->u_sigacts) { 560 /* 561 * Set p_sigacts to the new shared structure. 562 * Note that this is updating p1->p_sigacts at the 563 * same time, since p_sigacts is just a pointer to 564 * the shared p_procsig->ps_sigacts. 565 */ 566 p2->p_sigacts = newsigacts; 567 newsigacts = NULL; 568 *p2->p_sigacts = p1->p_uarea->u_sigacts; 569 } 570 } else { 571 p2->p_procsig = newprocsig; 572 newprocsig = NULL; 573 bcopy(p1->p_procsig, p2->p_procsig, sizeof(*p2->p_procsig)); 574 p2->p_procsig->ps_refcnt = 1; 575 p2->p_sigacts = NULL; /* finished in vm_forkproc() */ 576 } 577 if (flags & RFLINUXTHPN) 578 p2->p_sigparent = SIGUSR1; 579 else 580 p2->p_sigparent = SIGCHLD; 581 582 /* Bump references to the text vnode (for procfs) */ 583 p2->p_textvp = p1->p_textvp; 584 if (p2->p_textvp) 585 VREF(p2->p_textvp); 586 p2->p_fd = fd; 587 PROC_UNLOCK(p1); 588 PROC_UNLOCK(p2); 589 590 /* 591 * If p_limit is still copy-on-write, bump refcnt, 592 * otherwise get a copy that won't be modified. 593 * (If PL_SHAREMOD is clear, the structure is shared 594 * copy-on-write.) 595 */ 596 if (p1->p_limit->p_lflags & PL_SHAREMOD) 597 p2->p_limit = limcopy(p1->p_limit); 598 else { 599 p2->p_limit = p1->p_limit; 600 p2->p_limit->p_refcnt++; 601 } 602 603 sx_xlock(&proctree_lock); 604 PGRP_LOCK(p1->p_pgrp); 605 PROC_LOCK(p2); 606 PROC_LOCK(p1); 607 608 /* 609 * Preserve some more flags in subprocess. PS_PROFIL has already 610 * been preserved. 611 */ 612 p2->p_flag |= p1->p_flag & (P_SUGID | P_ALTSTACK); 613 SESS_LOCK(p1->p_session); 614 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) 615 p2->p_flag |= P_CONTROLT; 616 SESS_UNLOCK(p1->p_session); 617 if (flags & RFPPWAIT) 618 p2->p_flag |= P_PPWAIT; 619 620 LIST_INSERT_AFTER(p1, p2, p_pglist); 621 PGRP_UNLOCK(p1->p_pgrp); 622 LIST_INIT(&p2->p_children); 623 LIST_INIT(&td2->td_contested); /* XXXKSE only 1 thread? */ 624 625 callout_init(&p2->p_itcallout, 0); 626 callout_init(&td2->td_slpcallout, 1); /* XXXKSE */ 627 628 #ifdef KTRACE 629 /* 630 * Copy traceflag and tracefile if enabled. 631 */ 632 mtx_lock(&ktrace_mtx); 633 KASSERT(p2->p_tracep == NULL, ("new process has a ktrace vnode")); 634 if (p1->p_traceflag & KTRFAC_INHERIT) { 635 p2->p_traceflag = p1->p_traceflag; 636 if ((p2->p_tracep = p1->p_tracep) != NULL) 637 VREF(p2->p_tracep); 638 } 639 mtx_unlock(&ktrace_mtx); 640 #endif 641 642 /* 643 * set priority of child to be that of parent 644 * XXXKSE hey! copying the estcpu seems dodgy.. should split it.. 645 */ 646 mtx_lock_spin(&sched_lock); 647 p2->p_ksegrp.kg_estcpu = p1->p_ksegrp.kg_estcpu; 648 mtx_unlock_spin(&sched_lock); 649 650 /* 651 * This begins the section where we must prevent the parent 652 * from being swapped. 653 */ 654 _PHOLD(p1); 655 PROC_UNLOCK(p1); 656 657 /* 658 * Attach the new process to its parent. 659 * 660 * If RFNOWAIT is set, the newly created process becomes a child 661 * of init. This effectively disassociates the child from the 662 * parent. 663 */ 664 if (flags & RFNOWAIT) 665 pptr = initproc; 666 else 667 pptr = p1; 668 p2->p_pptr = pptr; 669 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 670 PROC_UNLOCK(p2); 671 sx_xunlock(&proctree_lock); 672 673 /* 674 * XXXKSE: In KSE, there would be a race here if one thread was 675 * dieing due to a signal (or calling exit1() for that matter) while 676 * another thread was calling fork1(). Not sure how KSE wants to work 677 * around that. The problem is that up until the point above, if p1 678 * gets killed, it won't find p2 in its list in order for it to be 679 * reparented. Alternatively, we could add a new p_flag that gets set 680 * before we reparent all the children that we check above and just 681 * use init as our parent if that if that flag is set. (Either that 682 * or abort the fork if the flag is set since our parent died trying 683 * to fork us (which is evil)). 684 */ 685 686 KASSERT(newprocsig == NULL, ("unused newprocsig")); 687 if (newsigacts != NULL) 688 FREE(newsigacts, M_SUBPROC); 689 /* 690 * Finish creating the child process. It will return via a different 691 * execution path later. (ie: directly into user mode) 692 */ 693 vm_forkproc(td, p2, td2, flags); 694 695 if (flags == (RFFDG | RFPROC)) { 696 cnt.v_forks++; 697 cnt.v_forkpages += p2->p_vmspace->vm_dsize + 698 p2->p_vmspace->vm_ssize; 699 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) { 700 cnt.v_vforks++; 701 cnt.v_vforkpages += p2->p_vmspace->vm_dsize + 702 p2->p_vmspace->vm_ssize; 703 } else if (p1 == &proc0) { 704 cnt.v_kthreads++; 705 cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + 706 p2->p_vmspace->vm_ssize; 707 } else { 708 cnt.v_rforks++; 709 cnt.v_rforkpages += p2->p_vmspace->vm_dsize + 710 p2->p_vmspace->vm_ssize; 711 } 712 713 /* 714 * Both processes are set up, now check if any loadable modules want 715 * to adjust anything. 716 * What if they have an error? XXX 717 */ 718 sx_slock(&fork_list_lock); 719 TAILQ_FOREACH(ep, &fork_list, next) { 720 (*ep->function)(p1, p2, flags); 721 } 722 sx_sunlock(&fork_list_lock); 723 724 /* 725 * If RFSTOPPED not requested, make child runnable and add to 726 * run queue. 727 */ 728 microtime(&(p2->p_stats->p_start)); 729 p2->p_acflag = AFORK; 730 if ((flags & RFSTOPPED) == 0) { 731 mtx_lock_spin(&sched_lock); 732 p2->p_state = PRS_NORMAL; 733 setrunqueue(td2); 734 mtx_unlock_spin(&sched_lock); 735 } 736 737 /* 738 * Now can be swapped. 739 */ 740 PROC_LOCK(p1); 741 _PRELE(p1); 742 743 /* 744 * tell any interested parties about the new process 745 */ 746 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 747 PROC_UNLOCK(p1); 748 749 /* 750 * Preserve synchronization semantics of vfork. If waiting for 751 * child to exec or exit, set P_PPWAIT on child, and sleep on our 752 * proc (in case of exit). 753 */ 754 PROC_LOCK(p2); 755 while (p2->p_flag & P_PPWAIT) 756 msleep(p1, &p2->p_mtx, PWAIT, "ppwait", 0); 757 PROC_UNLOCK(p2); 758 759 /* 760 * Return child proc pointer to parent. 761 */ 762 *procp = p2; 763 return (0); 764 } 765 766 /* 767 * The next two functionms are general routines to handle adding/deleting 768 * items on the fork callout list. 769 * 770 * at_fork(): 771 * Take the arguments given and put them onto the fork callout list, 772 * However first make sure that it's not already there. 773 * Returns 0 on success or a standard error number. 774 */ 775 776 int 777 at_fork(function) 778 forklist_fn function; 779 { 780 struct forklist *ep; 781 782 #ifdef INVARIANTS 783 /* let the programmer know if he's been stupid */ 784 if (rm_at_fork(function)) 785 printf("WARNING: fork callout entry (%p) already present\n", 786 function); 787 #endif 788 ep = malloc(sizeof(*ep), M_ATFORK, M_NOWAIT); 789 if (ep == NULL) 790 return (ENOMEM); 791 ep->function = function; 792 sx_xlock(&fork_list_lock); 793 TAILQ_INSERT_TAIL(&fork_list, ep, next); 794 sx_xunlock(&fork_list_lock); 795 return (0); 796 } 797 798 /* 799 * Scan the exit callout list for the given item and remove it.. 800 * Returns the number of items removed (0 or 1) 801 */ 802 803 int 804 rm_at_fork(function) 805 forklist_fn function; 806 { 807 struct forklist *ep; 808 809 sx_xlock(&fork_list_lock); 810 TAILQ_FOREACH(ep, &fork_list, next) { 811 if (ep->function == function) { 812 TAILQ_REMOVE(&fork_list, ep, next); 813 sx_xunlock(&fork_list_lock); 814 free(ep, M_ATFORK); 815 return(1); 816 } 817 } 818 sx_xunlock(&fork_list_lock); 819 return (0); 820 } 821 822 /* 823 * Handle the return of a child process from fork1(). This function 824 * is called from the MD fork_trampoline() entry point. 825 */ 826 void 827 fork_exit(callout, arg, frame) 828 void (*callout)(void *, struct trapframe *); 829 void *arg; 830 struct trapframe *frame; 831 { 832 struct thread *td = curthread; 833 struct proc *p = td->td_proc; 834 835 td->td_kse->ke_oncpu = PCPU_GET(cpuid); 836 p->p_state = PRS_NORMAL; 837 /* 838 * Finish setting up thread glue. We need to initialize 839 * the thread into a td_critnest=1 state. Some platforms 840 * may have already partially or fully initialized td_critnest 841 * and/or td_md.md_savecrit (when applciable). 842 * 843 * see <arch>/<arch>/critical.c 844 */ 845 sched_lock.mtx_lock = (uintptr_t)td; 846 sched_lock.mtx_recurse = 0; 847 cpu_critical_fork_exit(); 848 CTR3(KTR_PROC, "fork_exit: new thread %p (pid %d, %s)", td, p->p_pid, 849 p->p_comm); 850 if (PCPU_GET(switchtime.sec) == 0) 851 binuptime(PCPU_PTR(switchtime)); 852 PCPU_SET(switchticks, ticks); 853 mtx_unlock_spin(&sched_lock); 854 855 /* 856 * cpu_set_fork_handler intercepts this function call to 857 * have this call a non-return function to stay in kernel mode. 858 * initproc has its own fork handler, but it does return. 859 */ 860 KASSERT(callout != NULL, ("NULL callout in fork_exit")); 861 callout(arg, frame); 862 863 /* 864 * Check if a kernel thread misbehaved and returned from its main 865 * function. 866 */ 867 PROC_LOCK(p); 868 if (p->p_flag & P_KTHREAD) { 869 PROC_UNLOCK(p); 870 mtx_lock(&Giant); 871 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n", 872 p->p_comm, p->p_pid); 873 kthread_exit(0); 874 } 875 PROC_UNLOCK(p); 876 #ifdef DIAGNOSTIC 877 cred_free_thread(td); 878 #endif 879 mtx_assert(&Giant, MA_NOTOWNED); 880 } 881 882 /* 883 * Simplified back end of syscall(), used when returning from fork() 884 * directly into user mode. Giant is not held on entry, and must not 885 * be held on return. This function is passed in to fork_exit() as the 886 * first parameter and is called when returning to a new userland process. 887 */ 888 void 889 fork_return(td, frame) 890 struct thread *td; 891 struct trapframe *frame; 892 { 893 894 userret(td, frame, 0); 895 #ifdef KTRACE 896 if (KTRPOINT(td, KTR_SYSRET)) 897 ktrsysret(SYS_fork, 0, 0); 898 #endif 899 mtx_assert(&Giant, MA_NOTOWNED); 900 } 901