1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 39 * $FreeBSD$ 40 */ 41 42 #include "opt_ktrace.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sysproto.h> 47 #include <sys/filedesc.h> 48 #include <sys/kernel.h> 49 #include <sys/sysctl.h> 50 #include <sys/malloc.h> 51 #include <sys/mutex.h> 52 #include <sys/proc.h> 53 #include <sys/resourcevar.h> 54 #include <sys/vnode.h> 55 #include <sys/acct.h> 56 #include <sys/ktr.h> 57 #include <sys/ktrace.h> 58 #include <sys/unistd.h> 59 #include <sys/jail.h> 60 61 #include <vm/vm.h> 62 #include <sys/lock.h> 63 #include <vm/pmap.h> 64 #include <vm/vm_map.h> 65 #include <vm/vm_extern.h> 66 #include <vm/vm_zone.h> 67 68 #include <sys/user.h> 69 70 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback"); 71 72 static int fast_vfork = 1; 73 SYSCTL_INT(_kern, OID_AUTO, fast_vfork, CTLFLAG_RW, &fast_vfork, 0, 74 "flag to indicate whether we have a fast vfork()"); 75 76 /* 77 * These are the stuctures used to create a callout list for things to do 78 * when forking a process 79 */ 80 struct forklist { 81 forklist_fn function; 82 TAILQ_ENTRY(forklist) next; 83 }; 84 85 TAILQ_HEAD(forklist_head, forklist); 86 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list); 87 88 #ifndef _SYS_SYSPROTO_H_ 89 struct fork_args { 90 int dummy; 91 }; 92 #endif 93 94 /* ARGSUSED */ 95 int 96 fork(p, uap) 97 struct proc *p; 98 struct fork_args *uap; 99 { 100 int error; 101 struct proc *p2; 102 103 error = fork1(p, RFFDG | RFPROC, &p2); 104 if (error == 0) { 105 p->p_retval[0] = p2->p_pid; 106 p->p_retval[1] = 0; 107 } 108 return error; 109 } 110 111 /* ARGSUSED */ 112 int 113 vfork(p, uap) 114 struct proc *p; 115 struct vfork_args *uap; 116 { 117 int error; 118 struct proc *p2; 119 120 error = fork1(p, RFFDG | RFPROC | RFPPWAIT | RFMEM, &p2); 121 if (error == 0) { 122 p->p_retval[0] = p2->p_pid; 123 p->p_retval[1] = 0; 124 } 125 return error; 126 } 127 128 int 129 rfork(p, uap) 130 struct proc *p; 131 struct rfork_args *uap; 132 { 133 int error; 134 struct proc *p2; 135 136 /* mask kernel only flags out of the user flags */ 137 error = fork1(p, uap->flags & ~RFKERNELONLY, &p2); 138 if (error == 0) { 139 p->p_retval[0] = p2 ? p2->p_pid : 0; 140 p->p_retval[1] = 0; 141 } 142 return error; 143 } 144 145 146 int nprocs = 1; /* process 0 */ 147 static int nextpid = 0; 148 149 /* 150 * Random component to nextpid generation. We mix in a random factor to make 151 * it a little harder to predict. We sanity check the modulus value to avoid 152 * doing it in critical paths. Don't let it be too small or we pointlessly 153 * waste randomness entropy, and don't let it be impossibly large. Using a 154 * modulus that is too big causes a LOT more process table scans and slows 155 * down fork processing as the pidchecked caching is defeated. 156 */ 157 static int randompid = 0; 158 159 static int 160 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 161 { 162 int error, pid; 163 164 pid = randompid; 165 error = sysctl_handle_int(oidp, &pid, 0, req); 166 if (error || !req->newptr) 167 return (error); 168 if (pid < 0 || pid > PID_MAX - 100) /* out of range */ 169 pid = PID_MAX - 100; 170 else if (pid < 2) /* NOP */ 171 pid = 0; 172 else if (pid < 100) /* Make it reasonable */ 173 pid = 100; 174 randompid = pid; 175 return (error); 176 } 177 178 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 179 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); 180 181 int 182 fork1(p1, flags, procp) 183 struct proc *p1; /* parent proc */ 184 int flags; 185 struct proc **procp; /* child proc */ 186 { 187 struct proc *p2, *pptr; 188 uid_t uid; 189 struct proc *newproc; 190 int trypid; 191 int ok; 192 static int pidchecked = 0; 193 struct forklist *ep; 194 195 /* Can't copy and clear */ 196 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 197 return (EINVAL); 198 199 /* 200 * Here we don't create a new process, but we divorce 201 * certain parts of a process from itself. 202 */ 203 if ((flags & RFPROC) == 0) { 204 205 vm_fork(p1, 0, flags); 206 207 /* 208 * Close all file descriptors. 209 */ 210 if (flags & RFCFDG) { 211 struct filedesc *fdtmp; 212 fdtmp = fdinit(p1); 213 fdfree(p1); 214 p1->p_fd = fdtmp; 215 } 216 217 /* 218 * Unshare file descriptors (from parent.) 219 */ 220 if (flags & RFFDG) { 221 if (p1->p_fd->fd_refcnt > 1) { 222 struct filedesc *newfd; 223 newfd = fdcopy(p1); 224 fdfree(p1); 225 p1->p_fd = newfd; 226 } 227 } 228 *procp = NULL; 229 return (0); 230 } 231 232 /* 233 * Although process entries are dynamically created, we still keep 234 * a global limit on the maximum number we will create. Don't allow 235 * a nonprivileged user to use the last process; don't let root 236 * exceed the limit. The variable nprocs is the current number of 237 * processes, maxproc is the limit. 238 */ 239 uid = p1->p_cred->p_ruid; 240 if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) { 241 tablefull("proc"); 242 return (EAGAIN); 243 } 244 /* 245 * Increment the nprocs resource before blocking can occur. There 246 * are hard-limits as to the number of processes that can run. 247 */ 248 nprocs++; 249 250 /* 251 * Increment the count of procs running with this uid. Don't allow 252 * a nonprivileged user to exceed their current limit. 253 */ 254 ok = chgproccnt(p1->p_cred->p_uidinfo, 1, 255 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0); 256 if (!ok) { 257 /* 258 * Back out the process count 259 */ 260 nprocs--; 261 return (EAGAIN); 262 } 263 264 /* Allocate new proc. */ 265 newproc = zalloc(proc_zone); 266 267 /* 268 * Setup linkage for kernel based threading 269 */ 270 if((flags & RFTHREAD) != 0) { 271 newproc->p_peers = p1->p_peers; 272 p1->p_peers = newproc; 273 newproc->p_leader = p1->p_leader; 274 } else { 275 newproc->p_peers = 0; 276 newproc->p_leader = newproc; 277 } 278 279 newproc->p_wakeup = 0; 280 281 newproc->p_vmspace = NULL; 282 283 /* 284 * Find an unused process ID. We remember a range of unused IDs 285 * ready to use (from nextpid+1 through pidchecked-1). 286 * 287 * If RFHIGHPID is set (used during system boot), do not allocate 288 * low-numbered pids. 289 */ 290 lockmgr(&allproc_lock, LK_EXCLUSIVE, NULL, CURPROC); 291 trypid = nextpid + 1; 292 if (flags & RFHIGHPID) { 293 if (trypid < 10) { 294 trypid = 10; 295 } 296 } else { 297 if (randompid) 298 trypid += arc4random() % randompid; 299 } 300 retry: 301 /* 302 * If the process ID prototype has wrapped around, 303 * restart somewhat above 0, as the low-numbered procs 304 * tend to include daemons that don't exit. 305 */ 306 if (trypid >= PID_MAX) { 307 trypid = trypid % PID_MAX; 308 if (trypid < 100) 309 trypid += 100; 310 pidchecked = 0; 311 } 312 if (trypid >= pidchecked) { 313 int doingzomb = 0; 314 315 pidchecked = PID_MAX; 316 /* 317 * Scan the active and zombie procs to check whether this pid 318 * is in use. Remember the lowest pid that's greater 319 * than trypid, so we can avoid checking for a while. 320 */ 321 p2 = LIST_FIRST(&allproc); 322 again: 323 for (; p2 != 0; p2 = LIST_NEXT(p2, p_list)) { 324 while (p2->p_pid == trypid || 325 p2->p_pgrp->pg_id == trypid || 326 p2->p_session->s_sid == trypid) { 327 trypid++; 328 if (trypid >= pidchecked) 329 goto retry; 330 } 331 if (p2->p_pid > trypid && pidchecked > p2->p_pid) 332 pidchecked = p2->p_pid; 333 if (p2->p_pgrp->pg_id > trypid && 334 pidchecked > p2->p_pgrp->pg_id) 335 pidchecked = p2->p_pgrp->pg_id; 336 if (p2->p_session->s_sid > trypid && 337 pidchecked > p2->p_session->s_sid) 338 pidchecked = p2->p_session->s_sid; 339 } 340 if (!doingzomb) { 341 doingzomb = 1; 342 p2 = LIST_FIRST(&zombproc); 343 goto again; 344 } 345 } 346 347 /* 348 * RFHIGHPID does not mess with the nextpid counter during boot. 349 */ 350 if (flags & RFHIGHPID) 351 pidchecked = 0; 352 else 353 nextpid = trypid; 354 355 p2 = newproc; 356 p2->p_stat = SIDL; /* protect against others */ 357 p2->p_pid = trypid; 358 LIST_INSERT_HEAD(&allproc, p2, p_list); 359 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); 360 lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC); 361 362 /* 363 * Make a proc table entry for the new process. 364 * Start by zeroing the section of proc that is zero-initialized, 365 * then copy the section that is copied directly from the parent. 366 */ 367 bzero(&p2->p_startzero, 368 (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero)); 369 bcopy(&p1->p_startcopy, &p2->p_startcopy, 370 (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); 371 372 p2->p_aioinfo = NULL; 373 374 /* 375 * Duplicate sub-structures as needed. 376 * Increase reference counts on shared objects. 377 * The p_stats and p_sigacts substructs are set in vm_fork. 378 */ 379 p2->p_flag = P_INMEM; 380 if (p1->p_flag & P_PROFIL) 381 startprofclock(p2); 382 MALLOC(p2->p_cred, struct pcred *, sizeof(struct pcred), 383 M_SUBPROC, M_WAITOK); 384 bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred)); 385 p2->p_cred->p_refcnt = 1; 386 crhold(p1->p_ucred); 387 uihold(p1->p_cred->p_uidinfo); 388 389 if (p2->p_prison) { 390 p2->p_prison->pr_ref++; 391 p2->p_flag |= P_JAILED; 392 } 393 394 if (p2->p_args) 395 p2->p_args->ar_ref++; 396 397 if (flags & RFSIGSHARE) { 398 p2->p_procsig = p1->p_procsig; 399 p2->p_procsig->ps_refcnt++; 400 if (p1->p_sigacts == &p1->p_addr->u_sigacts) { 401 struct sigacts *newsigacts; 402 int s; 403 404 /* Create the shared sigacts structure */ 405 MALLOC(newsigacts, struct sigacts *, 406 sizeof(struct sigacts), M_SUBPROC, M_WAITOK); 407 s = splhigh(); 408 /* 409 * Set p_sigacts to the new shared structure. 410 * Note that this is updating p1->p_sigacts at the 411 * same time, since p_sigacts is just a pointer to 412 * the shared p_procsig->ps_sigacts. 413 */ 414 p2->p_sigacts = newsigacts; 415 bcopy(&p1->p_addr->u_sigacts, p2->p_sigacts, 416 sizeof(*p2->p_sigacts)); 417 *p2->p_sigacts = p1->p_addr->u_sigacts; 418 splx(s); 419 } 420 } else { 421 MALLOC(p2->p_procsig, struct procsig *, sizeof(struct procsig), 422 M_SUBPROC, M_WAITOK); 423 bcopy(p1->p_procsig, p2->p_procsig, sizeof(*p2->p_procsig)); 424 p2->p_procsig->ps_refcnt = 1; 425 p2->p_sigacts = NULL; /* finished in vm_fork() */ 426 } 427 if (flags & RFLINUXTHPN) 428 p2->p_sigparent = SIGUSR1; 429 else 430 p2->p_sigparent = SIGCHLD; 431 432 /* bump references to the text vnode (for procfs) */ 433 p2->p_textvp = p1->p_textvp; 434 if (p2->p_textvp) 435 VREF(p2->p_textvp); 436 437 if (flags & RFCFDG) 438 p2->p_fd = fdinit(p1); 439 else if (flags & RFFDG) 440 p2->p_fd = fdcopy(p1); 441 else 442 p2->p_fd = fdshare(p1); 443 444 /* 445 * If p_limit is still copy-on-write, bump refcnt, 446 * otherwise get a copy that won't be modified. 447 * (If PL_SHAREMOD is clear, the structure is shared 448 * copy-on-write.) 449 */ 450 if (p1->p_limit->p_lflags & PL_SHAREMOD) 451 p2->p_limit = limcopy(p1->p_limit); 452 else { 453 p2->p_limit = p1->p_limit; 454 p2->p_limit->p_refcnt++; 455 } 456 457 /* 458 * Preserve some more flags in subprocess. P_PROFIL has already 459 * been preserved. 460 */ 461 p2->p_flag |= p1->p_flag & P_SUGID; 462 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) 463 p2->p_flag |= P_CONTROLT; 464 if (flags & RFPPWAIT) 465 p2->p_flag |= P_PPWAIT; 466 467 LIST_INSERT_AFTER(p1, p2, p_pglist); 468 469 /* 470 * Attach the new process to its parent. 471 * 472 * If RFNOWAIT is set, the newly created process becomes a child 473 * of init. This effectively disassociates the child from the 474 * parent. 475 */ 476 if (flags & RFNOWAIT) 477 pptr = initproc; 478 else 479 pptr = p1; 480 p2->p_pptr = pptr; 481 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 482 LIST_INIT(&p2->p_children); 483 LIST_INIT(&p2->p_heldmtx); 484 LIST_INIT(&p2->p_contested); 485 486 #ifdef KTRACE 487 /* 488 * Copy traceflag and tracefile if enabled. 489 * If not inherited, these were zeroed above. 490 */ 491 if (p1->p_traceflag&KTRFAC_INHERIT) { 492 p2->p_traceflag = p1->p_traceflag; 493 if ((p2->p_tracep = p1->p_tracep) != NULL) 494 VREF(p2->p_tracep); 495 } 496 #endif 497 498 /* 499 * set priority of child to be that of parent 500 */ 501 p2->p_estcpu = p1->p_estcpu; 502 503 /* 504 * This begins the section where we must prevent the parent 505 * from being swapped. 506 */ 507 PHOLD(p1); 508 509 /* 510 * Finish creating the child process. It will return via a different 511 * execution path later. (ie: directly into user mode) 512 */ 513 vm_fork(p1, p2, flags); 514 515 /* 516 * Both processes are set up, now check if any loadable modules want 517 * to adjust anything. 518 * What if they have an error? XXX 519 */ 520 TAILQ_FOREACH(ep, &fork_list, next) { 521 (*ep->function)(p1, p2, flags); 522 } 523 524 /* 525 * If RFSTOPPED not requested, make child runnable and add to 526 * run queue. 527 */ 528 microtime(&(p2->p_stats->p_start)); 529 p2->p_acflag = AFORK; 530 if ((flags & RFSTOPPED) == 0) { 531 splhigh(); 532 mtx_enter(&sched_lock, MTX_SPIN); 533 p2->p_stat = SRUN; 534 setrunqueue(p2); 535 mtx_exit(&sched_lock, MTX_SPIN); 536 spl0(); 537 } 538 539 /* 540 * Now can be swapped. 541 */ 542 PRELE(p1); 543 544 /* 545 * tell any interested parties about the new process 546 */ 547 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); 548 549 /* 550 * Preserve synchronization semantics of vfork. If waiting for 551 * child to exec or exit, set P_PPWAIT on child, and sleep on our 552 * proc (in case of exit). 553 */ 554 while (p2->p_flag & P_PPWAIT) 555 tsleep(p1, PWAIT, "ppwait", 0); 556 557 /* 558 * Return child proc pointer to parent. 559 */ 560 *procp = p2; 561 return (0); 562 } 563 564 /* 565 * The next two functionms are general routines to handle adding/deleting 566 * items on the fork callout list. 567 * 568 * at_fork(): 569 * Take the arguments given and put them onto the fork callout list, 570 * However first make sure that it's not already there. 571 * Returns 0 on success or a standard error number. 572 */ 573 574 int 575 at_fork(function) 576 forklist_fn function; 577 { 578 struct forklist *ep; 579 580 #ifdef INVARIANTS 581 /* let the programmer know if he's been stupid */ 582 if (rm_at_fork(function)) 583 printf("WARNING: fork callout entry (%p) already present\n", 584 function); 585 #endif 586 ep = malloc(sizeof(*ep), M_ATFORK, M_NOWAIT); 587 if (ep == NULL) 588 return (ENOMEM); 589 ep->function = function; 590 TAILQ_INSERT_TAIL(&fork_list, ep, next); 591 return (0); 592 } 593 594 /* 595 * Scan the exit callout list for the given item and remove it.. 596 * Returns the number of items removed (0 or 1) 597 */ 598 599 int 600 rm_at_fork(function) 601 forklist_fn function; 602 { 603 struct forklist *ep; 604 605 TAILQ_FOREACH(ep, &fork_list, next) { 606 if (ep->function == function) { 607 TAILQ_REMOVE(&fork_list, ep, next); 608 free(ep, M_ATFORK); 609 return(1); 610 } 611 } 612 return (0); 613 } 614