1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 39 * $FreeBSD$ 40 */ 41 42 #include "opt_ktrace.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sysproto.h> 47 #include <sys/filedesc.h> 48 #include <sys/kernel.h> 49 #include <sys/sysctl.h> 50 #include <sys/malloc.h> 51 #include <sys/mutex.h> 52 #include <sys/proc.h> 53 #include <sys/resourcevar.h> 54 #include <sys/vnode.h> 55 #include <sys/acct.h> 56 #include <sys/ktr.h> 57 #include <sys/ktrace.h> 58 #include <sys/unistd.h> 59 #include <sys/jail.h> 60 61 #include <vm/vm.h> 62 #include <sys/lock.h> 63 #include <vm/pmap.h> 64 #include <vm/vm_map.h> 65 #include <vm/vm_extern.h> 66 #include <vm/vm_zone.h> 67 68 #include <sys/user.h> 69 70 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback"); 71 72 static int fast_vfork = 1; 73 SYSCTL_INT(_kern, OID_AUTO, fast_vfork, CTLFLAG_RW, &fast_vfork, 0, 74 "flag to indicate whether we have a fast vfork()"); 75 76 /* 77 * These are the stuctures used to create a callout list for things to do 78 * when forking a process 79 */ 80 struct forklist { 81 forklist_fn function; 82 TAILQ_ENTRY(forklist) next; 83 }; 84 85 TAILQ_HEAD(forklist_head, forklist); 86 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list); 87 88 #ifndef _SYS_SYSPROTO_H_ 89 struct fork_args { 90 int dummy; 91 }; 92 #endif 93 94 /* ARGSUSED */ 95 int 96 fork(p, uap) 97 struct proc *p; 98 struct fork_args *uap; 99 { 100 int error; 101 struct proc *p2; 102 103 error = fork1(p, RFFDG | RFPROC, &p2); 104 if (error == 0) { 105 p->p_retval[0] = p2->p_pid; 106 p->p_retval[1] = 0; 107 } 108 return error; 109 } 110 111 /* ARGSUSED */ 112 int 113 vfork(p, uap) 114 struct proc *p; 115 struct vfork_args *uap; 116 { 117 int error; 118 struct proc *p2; 119 120 error = fork1(p, RFFDG | RFPROC | RFPPWAIT | RFMEM, &p2); 121 if (error == 0) { 122 p->p_retval[0] = p2->p_pid; 123 p->p_retval[1] = 0; 124 } 125 return error; 126 } 127 128 int 129 rfork(p, uap) 130 struct proc *p; 131 struct rfork_args *uap; 132 { 133 int error; 134 struct proc *p2; 135 136 /* mask kernel only flags out of the user flags */ 137 error = fork1(p, uap->flags & ~RFKERNELONLY, &p2); 138 if (error == 0) { 139 p->p_retval[0] = p2 ? p2->p_pid : 0; 140 p->p_retval[1] = 0; 141 } 142 return error; 143 } 144 145 146 int nprocs = 1; /* process 0 */ 147 static int nextpid = 0; 148 149 /* 150 * Random component to nextpid generation. We mix in a random factor to make 151 * it a little harder to predict. We sanity check the modulus value to avoid 152 * doing it in critical paths. Don't let it be too small or we pointlessly 153 * waste randomness entropy, and don't let it be impossibly large. Using a 154 * modulus that is too big causes a LOT more process table scans and slows 155 * down fork processing as the pidchecked caching is defeated. 156 */ 157 static int randompid = 0; 158 159 static int 160 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 161 { 162 int error, pid; 163 164 pid = randompid; 165 error = sysctl_handle_int(oidp, &pid, 0, req); 166 if (error || !req->newptr) 167 return (error); 168 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 169 pid = PID_MAX - 100; 170 else if (pid < 2) /* NOP */ 171 pid = 0; 172 else if (pid < 100) /* Make it reasonable */ 173 pid = 100; 174 randompid = pid; 175 return (error); 176 } 177 178 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 179 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 180 181 int 182 fork1(p1, flags, procp) 183 struct proc *p1; /* parent proc */ 184 int flags; 185 struct proc **procp; /* child proc */ 186 { 187 struct proc *p2, *pptr; 188 uid_t uid; 189 struct proc *newproc; 190 int trypid; 191 int ok; 192 static int pidchecked = 0; 193 struct forklist *ep; 194 195 /* Can't copy and clear */ 196 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 197 return (EINVAL); 198 199 /* 200 * Here we don't create a new process, but we divorce 201 * certain parts of a process from itself. 202 */ 203 if ((flags & RFPROC) == 0) { 204 205 vm_fork(p1, 0, flags); 206 207 /* 208 * Close all file descriptors. 209 */ 210 if (flags & RFCFDG) { 211 struct filedesc *fdtmp; 212 fdtmp = fdinit(p1); 213 fdfree(p1); 214 p1->p_fd = fdtmp; 215 } 216 217 /* 218 * Unshare file descriptors (from parent.) 219 */ 220 if (flags & RFFDG) { 221 if (p1->p_fd->fd_refcnt > 1) { 222 struct filedesc *newfd; 223 newfd = fdcopy(p1); 224 fdfree(p1); 225 p1->p_fd = newfd; 226 } 227 } 228 *procp = NULL; 229 return (0); 230 } 231 232 /* 233 * Although process entries are dynamically created, we still keep 234 * a global limit on the maximum number we will create. Don't allow 235 * a nonprivileged user to use the last process; don't let root 236 * exceed the limit. The variable nprocs is the current number of 237 * processes, maxproc is the limit. 238 */ 239 uid = p1->p_cred->p_ruid; 240 if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) { 241 tablefull("proc"); 242 return (EAGAIN); 243 } 244 /* 245 * Increment the nprocs resource before blocking can occur. There 246 * are hard-limits as to the number of processes that can run. 247 */ 248 nprocs++; 249 250 /* 251 * Increment the count of procs running with this uid. Don't allow 252 * a nonprivileged user to exceed their current limit. 253 */ 254 ok = chgproccnt(p1->p_cred->p_uidinfo, 1, 255 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0); 256 if (!ok) { 257 /* 258 * Back out the process count 259 */ 260 nprocs--; 261 return (EAGAIN); 262 } 263 264 /* Allocate new proc. */ 265 newproc = zalloc(proc_zone); 266 267 /* 268 * Setup linkage for kernel based threading 269 */ 270 if((flags & RFTHREAD) != 0) { 271 newproc->p_peers = p1->p_peers; 272 p1->p_peers = newproc; 273 newproc->p_leader = p1->p_leader; 274 } else { 275 newproc->p_peers = 0; 276 newproc->p_leader = newproc; 277 } 278 279 newproc->p_vmspace = NULL; 280 281 /* 282 * Find an unused process ID. We remember a range of unused IDs 283 * ready to use (from nextpid+1 through pidchecked-1). 284 * 285 * If RFHIGHPID is set (used during system boot), do not allocate 286 * low-numbered pids. 287 */ 288 ALLPROC_LOCK(AP_EXCLUSIVE); 289 trypid = nextpid + 1; 290 if (flags & RFHIGHPID) { 291 if (trypid < 10) { 292 trypid = 10; 293 } 294 } else { 295 if (randompid) 296 trypid += arc4random() % randompid; 297 } 298 retry: 299 /* 300 * If the process ID prototype has wrapped around, 301 * restart somewhat above 0, as the low-numbered procs 302 * tend to include daemons that don't exit. 303 */ 304 if (trypid >= PID_MAX) { 305 trypid = trypid % PID_MAX; 306 if (trypid < 100) 307 trypid += 100; 308 pidchecked = 0; 309 } 310 if (trypid >= pidchecked) { 311 int doingzomb = 0; 312 313 pidchecked = PID_MAX; 314 /* 315 * Scan the active and zombie procs to check whether this pid 316 * is in use. Remember the lowest pid that's greater 317 * than trypid, so we can avoid checking for a while. 318 */ 319 p2 = LIST_FIRST(&allproc); 320 again: 321 for (; p2 != 0; p2 = LIST_NEXT(p2, p_list)) { 322 while (p2->p_pid == trypid || 323 p2->p_pgrp->pg_id == trypid || 324 p2->p_session->s_sid == trypid) { 325 trypid++; 326 if (trypid >= pidchecked) 327 goto retry; 328 } 329 if (p2->p_pid > trypid && pidchecked > p2->p_pid) 330 pidchecked = p2->p_pid; 331 if (p2->p_pgrp->pg_id > trypid && 332 pidchecked > p2->p_pgrp->pg_id) 333 pidchecked = p2->p_pgrp->pg_id; 334 if (p2->p_session->s_sid > trypid && 335 pidchecked > p2->p_session->s_sid) 336 pidchecked = p2->p_session->s_sid; 337 } 338 if (!doingzomb) { 339 doingzomb = 1; 340 p2 = LIST_FIRST(&zombproc); 341 goto again; 342 } 343 } 344 345 /* 346 * RFHIGHPID does not mess with the nextpid counter during boot. 347 */ 348 if (flags & RFHIGHPID) 349 pidchecked = 0; 350 else 351 nextpid = trypid; 352 353 p2 = newproc; 354 p2->p_stat = SIDL; /* protect against others */ 355 p2->p_pid = trypid; 356 LIST_INSERT_HEAD(&allproc, p2, p_list); 357 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); 358 ALLPROC_LOCK(AP_RELEASE); 359 360 /* 361 * Make a proc table entry for the new process. 362 * Start by zeroing the section of proc that is zero-initialized, 363 * then copy the section that is copied directly from the parent. 364 */ 365 bzero(&p2->p_startzero, 366 (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero)); 367 bcopy(&p1->p_startcopy, &p2->p_startcopy, 368 (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); 369 370 mtx_init(&p2->p_mtx, "process lock", MTX_DEF); 371 p2->p_aioinfo = NULL; 372 373 /* 374 * Duplicate sub-structures as needed. 375 * Increase reference counts on shared objects. 376 * The p_stats and p_sigacts substructs are set in vm_fork. 377 */ 378 p2->p_flag = P_INMEM; 379 if (p1->p_flag & P_PROFIL) 380 startprofclock(p2); 381 MALLOC(p2->p_cred, struct pcred *, sizeof(struct pcred), 382 M_SUBPROC, M_WAITOK); 383 bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred)); 384 p2->p_cred->p_refcnt = 1; 385 crhold(p1->p_ucred); 386 uihold(p1->p_cred->p_uidinfo); 387 388 if (p2->p_prison) { 389 p2->p_prison->pr_ref++; 390 p2->p_flag |= P_JAILED; 391 } 392 393 if (p2->p_args) 394 p2->p_args->ar_ref++; 395 396 if (flags & RFSIGSHARE) { 397 p2->p_procsig = p1->p_procsig; 398 p2->p_procsig->ps_refcnt++; 399 if (p1->p_sigacts == &p1->p_addr->u_sigacts) { 400 struct sigacts *newsigacts; 401 int s; 402 403 /* Create the shared sigacts structure */ 404 MALLOC(newsigacts, struct sigacts *, 405 sizeof(struct sigacts), M_SUBPROC, M_WAITOK); 406 s = splhigh(); 407 /* 408 * Set p_sigacts to the new shared structure. 409 * Note that this is updating p1->p_sigacts at the 410 * same time, since p_sigacts is just a pointer to 411 * the shared p_procsig->ps_sigacts. 412 */ 413 p2->p_sigacts = newsigacts; 414 bcopy(&p1->p_addr->u_sigacts, p2->p_sigacts, 415 sizeof(*p2->p_sigacts)); 416 *p2->p_sigacts = p1->p_addr->u_sigacts; 417 splx(s); 418 } 419 } else { 420 MALLOC(p2->p_procsig, struct procsig *, sizeof(struct procsig), 421 M_SUBPROC, M_WAITOK); 422 bcopy(p1->p_procsig, p2->p_procsig, sizeof(*p2->p_procsig)); 423 p2->p_procsig->ps_refcnt = 1; 424 p2->p_sigacts = NULL; /* finished in vm_fork() */ 425 } 426 if (flags & RFLINUXTHPN) 427 p2->p_sigparent = SIGUSR1; 428 else 429 p2->p_sigparent = SIGCHLD; 430 431 /* bump references to the text vnode (for procfs) */ 432 p2->p_textvp = p1->p_textvp; 433 if (p2->p_textvp) 434 VREF(p2->p_textvp); 435 436 if (flags & RFCFDG) 437 p2->p_fd = fdinit(p1); 438 else if (flags & RFFDG) 439 p2->p_fd = fdcopy(p1); 440 else 441 p2->p_fd = fdshare(p1); 442 443 /* 444 * If p_limit is still copy-on-write, bump refcnt, 445 * otherwise get a copy that won't be modified. 446 * (If PL_SHAREMOD is clear, the structure is shared 447 * copy-on-write.) 448 */ 449 if (p1->p_limit->p_lflags & PL_SHAREMOD) 450 p2->p_limit = limcopy(p1->p_limit); 451 else { 452 p2->p_limit = p1->p_limit; 453 p2->p_limit->p_refcnt++; 454 } 455 456 /* 457 * Preserve some more flags in subprocess. P_PROFIL has already 458 * been preserved. 459 */ 460 p2->p_flag |= p1->p_flag & P_SUGID; 461 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) 462 p2->p_flag |= P_CONTROLT; 463 if (flags & RFPPWAIT) 464 p2->p_flag |= P_PPWAIT; 465 466 LIST_INSERT_AFTER(p1, p2, p_pglist); 467 468 /* 469 * Attach the new process to its parent. 470 * 471 * If RFNOWAIT is set, the newly created process becomes a child 472 * of init. This effectively disassociates the child from the 473 * parent. 474 */ 475 if (flags & RFNOWAIT) 476 pptr = initproc; 477 else 478 pptr = p1; 479 PROCTREE_LOCK(PT_EXCLUSIVE); 480 p2->p_pptr = pptr; 481 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 482 PROCTREE_LOCK(PT_RELEASE); 483 LIST_INIT(&p2->p_children); 484 LIST_INIT(&p2->p_heldmtx); 485 LIST_INIT(&p2->p_contested); 486 487 callout_init(&p2->p_itcallout, 0); 488 callout_init(&p2->p_slpcallout, 1); 489 490 #ifdef KTRACE 491 /* 492 * Copy traceflag and tracefile if enabled. 493 * If not inherited, these were zeroed above. 494 */ 495 if (p1->p_traceflag&KTRFAC_INHERIT) { 496 p2->p_traceflag = p1->p_traceflag; 497 if ((p2->p_tracep = p1->p_tracep) != NULL) 498 VREF(p2->p_tracep); 499 } 500 #endif 501 502 /* 503 * set priority of child to be that of parent 504 */ 505 p2->p_estcpu = p1->p_estcpu; 506 507 /* 508 * This begins the section where we must prevent the parent 509 * from being swapped. 510 */ 511 PHOLD(p1); 512 513 /* 514 * Finish creating the child process. It will return via a different 515 * execution path later. (ie: directly into user mode) 516 */ 517 vm_fork(p1, p2, flags); 518 519 /* 520 * Both processes are set up, now check if any loadable modules want 521 * to adjust anything. 522 * What if they have an error? XXX 523 */ 524 TAILQ_FOREACH(ep, &fork_list, next) { 525 (*ep->function)(p1, p2, flags); 526 } 527 528 /* 529 * If RFSTOPPED not requested, make child runnable and add to 530 * run queue. 531 */ 532 microtime(&(p2->p_stats->p_start)); 533 p2->p_acflag = AFORK; 534 if ((flags & RFSTOPPED) == 0) { 535 splhigh(); 536 mtx_enter(&sched_lock, MTX_SPIN); 537 p2->p_stat = SRUN; 538 setrunqueue(p2); 539 mtx_exit(&sched_lock, MTX_SPIN); 540 spl0(); 541 } 542 543 /* 544 * Now can be swapped. 545 */ 546 PRELE(p1); 547 548 /* 549 * tell any interested parties about the new process 550 */ 551 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 552 553 /* 554 * Preserve synchronization semantics of vfork. If waiting for 555 * child to exec or exit, set P_PPWAIT on child, and sleep on our 556 * proc (in case of exit). 557 */ 558 while (p2->p_flag & P_PPWAIT) 559 tsleep(p1, PWAIT, "ppwait", 0); 560 561 /* 562 * Return child proc pointer to parent. 563 */ 564 *procp = p2; 565 return (0); 566 } 567 568 /* 569 * The next two functionms are general routines to handle adding/deleting 570 * items on the fork callout list. 571 * 572 * at_fork(): 573 * Take the arguments given and put them onto the fork callout list, 574 * However first make sure that it's not already there. 575 * Returns 0 on success or a standard error number. 576 */ 577 578 int 579 at_fork(function) 580 forklist_fn function; 581 { 582 struct forklist *ep; 583 584 #ifdef INVARIANTS 585 /* let the programmer know if he's been stupid */ 586 if (rm_at_fork(function)) 587 printf("WARNING: fork callout entry (%p) already present\n", 588 function); 589 #endif 590 ep = malloc(sizeof(*ep), M_ATFORK, M_NOWAIT); 591 if (ep == NULL) 592 return (ENOMEM); 593 ep->function = function; 594 TAILQ_INSERT_TAIL(&fork_list, ep, next); 595 return (0); 596 } 597 598 /* 599 * Scan the exit callout list for the given item and remove it.. 600 * Returns the number of items removed (0 or 1) 601 */ 602 603 int 604 rm_at_fork(function) 605 forklist_fn function; 606 { 607 struct forklist *ep; 608 609 TAILQ_FOREACH(ep, &fork_list, next) { 610 if (ep->function == function) { 611 TAILQ_REMOVE(&fork_list, ep, next); 612 free(ep, M_ATFORK); 613 return(1); 614 } 615 } 616 return (0); 617 } 618