1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1989, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_ktrace.h" 43 #include "opt_kstack_pages.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/bitstring.h> 48 #include <sys/sysproto.h> 49 #include <sys/eventhandler.h> 50 #include <sys/fcntl.h> 51 #include <sys/filedesc.h> 52 #include <sys/jail.h> 53 #include <sys/kernel.h> 54 #include <sys/kthread.h> 55 #include <sys/sysctl.h> 56 #include <sys/lock.h> 57 #include <sys/malloc.h> 58 #include <sys/mutex.h> 59 #include <sys/priv.h> 60 #include <sys/proc.h> 61 #include <sys/procdesc.h> 62 #include <sys/pioctl.h> 63 #include <sys/ptrace.h> 64 #include <sys/racct.h> 65 #include <sys/resourcevar.h> 66 #include <sys/sched.h> 67 #include <sys/syscall.h> 68 #include <sys/vmmeter.h> 69 #include <sys/vnode.h> 70 #include <sys/acct.h> 71 #include <sys/ktr.h> 72 #include <sys/ktrace.h> 73 #include <sys/unistd.h> 74 #include <sys/sdt.h> 75 #include <sys/sx.h> 76 #include <sys/sysent.h> 77 #include <sys/signalvar.h> 78 79 #include <security/audit/audit.h> 80 #include <security/mac/mac_framework.h> 81 82 #include <vm/vm.h> 83 #include <vm/pmap.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_extern.h> 86 #include <vm/uma.h> 87 88 #ifdef KDTRACE_HOOKS 89 #include <sys/dtrace_bsd.h> 90 dtrace_fork_func_t dtrace_fasttrap_fork; 91 #endif 92 93 SDT_PROVIDER_DECLARE(proc); 94 SDT_PROBE_DEFINE3(proc, , , create, "struct proc *", "struct proc *", "int"); 95 96 #ifndef _SYS_SYSPROTO_H_ 97 struct fork_args { 98 int dummy; 99 }; 100 #endif 101 102 /* ARGSUSED */ 103 int 104 sys_fork(struct thread *td, struct fork_args *uap) 105 { 106 struct fork_req fr; 107 int error, pid; 108 109 bzero(&fr, sizeof(fr)); 110 fr.fr_flags = RFFDG | RFPROC; 111 fr.fr_pidp = &pid; 112 error = fork1(td, &fr); 113 if (error == 0) { 114 td->td_retval[0] = pid; 115 td->td_retval[1] = 0; 116 } 117 return (error); 118 } 119 120 /* ARGUSED */ 121 int 122 sys_pdfork(struct thread *td, struct pdfork_args *uap) 123 { 124 struct fork_req fr; 125 int error, fd, pid; 126 127 bzero(&fr, sizeof(fr)); 128 fr.fr_flags = RFFDG | RFPROC | RFPROCDESC; 129 fr.fr_pidp = &pid; 130 fr.fr_pd_fd = &fd; 131 fr.fr_pd_flags = uap->flags; 132 /* 133 * It is necessary to return fd by reference because 0 is a valid file 134 * descriptor number, and the child needs to be able to distinguish 135 * itself from the parent using the return value. 136 */ 137 error = fork1(td, &fr); 138 if (error == 0) { 139 td->td_retval[0] = pid; 140 td->td_retval[1] = 0; 141 error = copyout(&fd, uap->fdp, sizeof(fd)); 142 } 143 return (error); 144 } 145 146 /* ARGSUSED */ 147 int 148 sys_vfork(struct thread *td, struct vfork_args *uap) 149 { 150 struct fork_req fr; 151 int error, pid; 152 153 bzero(&fr, sizeof(fr)); 154 fr.fr_flags = RFFDG | RFPROC | RFPPWAIT | RFMEM; 155 fr.fr_pidp = &pid; 156 error = fork1(td, &fr); 157 if (error == 0) { 158 td->td_retval[0] = pid; 159 td->td_retval[1] = 0; 160 } 161 return (error); 162 } 163 164 int 165 sys_rfork(struct thread *td, struct rfork_args *uap) 166 { 167 struct fork_req fr; 168 int error, pid; 169 170 /* Don't allow kernel-only flags. */ 171 if ((uap->flags & RFKERNELONLY) != 0) 172 return (EINVAL); 173 174 AUDIT_ARG_FFLAGS(uap->flags); 175 bzero(&fr, sizeof(fr)); 176 fr.fr_flags = uap->flags; 177 fr.fr_pidp = &pid; 178 error = fork1(td, &fr); 179 if (error == 0) { 180 td->td_retval[0] = pid; 181 td->td_retval[1] = 0; 182 } 183 return (error); 184 } 185 186 int __exclusive_cache_line nprocs = 1; /* process 0 */ 187 int lastpid = 0; 188 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0, 189 "Last used PID"); 190 191 /* 192 * Random component to lastpid generation. We mix in a random factor to make 193 * it a little harder to predict. We sanity check the modulus value to avoid 194 * doing it in critical paths. Don't let it be too small or we pointlessly 195 * waste randomness entropy, and don't let it be impossibly large. Using a 196 * modulus that is too big causes a LOT more process table scans and slows 197 * down fork processing as the pidchecked caching is defeated. 198 */ 199 static int randompid = 0; 200 201 static int 202 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) 203 { 204 int error, pid; 205 206 error = sysctl_wire_old_buffer(req, sizeof(int)); 207 if (error != 0) 208 return(error); 209 sx_xlock(&allproc_lock); 210 pid = randompid; 211 error = sysctl_handle_int(oidp, &pid, 0, req); 212 if (error == 0 && req->newptr != NULL) { 213 if (pid == 0) 214 randompid = 0; 215 else if (pid == 1) 216 /* generate a random PID modulus between 100 and 1123 */ 217 randompid = 100 + arc4random() % 1024; 218 else if (pid < 0 || pid > pid_max - 100) 219 /* out of range */ 220 randompid = pid_max - 100; 221 else if (pid < 100) 222 /* Make it reasonable */ 223 randompid = 100; 224 else 225 randompid = pid; 226 } 227 sx_xunlock(&allproc_lock); 228 return (error); 229 } 230 231 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, 232 0, 0, sysctl_kern_randompid, "I", "Random PID modulus. Special values: 0: disable, 1: choose random value"); 233 234 extern bitstr_t proc_id_pidmap; 235 extern bitstr_t proc_id_grpidmap; 236 extern bitstr_t proc_id_sessidmap; 237 extern bitstr_t proc_id_reapmap; 238 239 /* 240 * Find an unused process ID 241 * 242 * If RFHIGHPID is set (used during system boot), do not allocate 243 * low-numbered pids. 244 */ 245 static int 246 fork_findpid(int flags) 247 { 248 pid_t result; 249 int trypid; 250 251 trypid = lastpid + 1; 252 if (flags & RFHIGHPID) { 253 if (trypid < 10) 254 trypid = 10; 255 } else { 256 if (randompid) 257 trypid += arc4random() % randompid; 258 } 259 mtx_lock(&procid_lock); 260 retry: 261 /* 262 * If the process ID prototype has wrapped around, 263 * restart somewhat above 0, as the low-numbered procs 264 * tend to include daemons that don't exit. 265 */ 266 if (trypid >= pid_max) { 267 trypid = trypid % pid_max; 268 if (trypid < 100) 269 trypid += 100; 270 } 271 272 bit_ffc_at(&proc_id_pidmap, trypid, pid_max, &result); 273 if (result == -1) { 274 trypid = 100; 275 goto retry; 276 } 277 if (bit_test(&proc_id_grpidmap, result) || 278 bit_test(&proc_id_sessidmap, result) || 279 bit_test(&proc_id_reapmap, result)) { 280 trypid = result + 1; 281 goto retry; 282 } 283 284 /* 285 * RFHIGHPID does not mess with the lastpid counter during boot. 286 */ 287 if ((flags & RFHIGHPID) == 0) 288 lastpid = result; 289 290 bit_set(&proc_id_pidmap, result); 291 mtx_unlock(&procid_lock); 292 293 return (result); 294 } 295 296 static int 297 fork_norfproc(struct thread *td, int flags) 298 { 299 int error; 300 struct proc *p1; 301 302 KASSERT((flags & RFPROC) == 0, 303 ("fork_norfproc called with RFPROC set")); 304 p1 = td->td_proc; 305 306 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) && 307 (flags & (RFCFDG | RFFDG))) { 308 PROC_LOCK(p1); 309 if (thread_single(p1, SINGLE_BOUNDARY)) { 310 PROC_UNLOCK(p1); 311 return (ERESTART); 312 } 313 PROC_UNLOCK(p1); 314 } 315 316 error = vm_forkproc(td, NULL, NULL, NULL, flags); 317 if (error) 318 goto fail; 319 320 /* 321 * Close all file descriptors. 322 */ 323 if (flags & RFCFDG) { 324 struct filedesc *fdtmp; 325 fdtmp = fdinit(td->td_proc->p_fd, false); 326 fdescfree(td); 327 p1->p_fd = fdtmp; 328 } 329 330 /* 331 * Unshare file descriptors (from parent). 332 */ 333 if (flags & RFFDG) 334 fdunshare(td); 335 336 fail: 337 if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) && 338 (flags & (RFCFDG | RFFDG))) { 339 PROC_LOCK(p1); 340 thread_single_end(p1, SINGLE_BOUNDARY); 341 PROC_UNLOCK(p1); 342 } 343 return (error); 344 } 345 346 static void 347 do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *td2, 348 struct vmspace *vm2, struct file *fp_procdesc) 349 { 350 struct proc *p1, *pptr; 351 int trypid; 352 struct filedesc *fd; 353 struct filedesc_to_leader *fdtol; 354 struct sigacts *newsigacts; 355 356 sx_assert(&allproc_lock, SX_XLOCKED); 357 358 p1 = td->td_proc; 359 360 trypid = fork_findpid(fr->fr_flags); 361 p2->p_state = PRS_NEW; /* protect against others */ 362 p2->p_pid = trypid; 363 AUDIT_ARG_PID(p2->p_pid); 364 LIST_INSERT_HEAD(&allproc, p2, p_list); 365 allproc_gen++; 366 sx_xlock(PIDHASHLOCK(p2->p_pid)); 367 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); 368 sx_xunlock(PIDHASHLOCK(p2->p_pid)); 369 PROC_LOCK(p2); 370 PROC_LOCK(p1); 371 372 sx_xunlock(&allproc_lock); 373 374 bcopy(&p1->p_startcopy, &p2->p_startcopy, 375 __rangeof(struct proc, p_startcopy, p_endcopy)); 376 pargs_hold(p2->p_args); 377 378 PROC_UNLOCK(p1); 379 380 bzero(&p2->p_startzero, 381 __rangeof(struct proc, p_startzero, p_endzero)); 382 383 /* Tell the prison that we exist. */ 384 prison_proc_hold(p2->p_ucred->cr_prison); 385 386 PROC_UNLOCK(p2); 387 388 tidhash_add(td2); 389 390 /* 391 * Malloc things while we don't hold any locks. 392 */ 393 if (fr->fr_flags & RFSIGSHARE) 394 newsigacts = NULL; 395 else 396 newsigacts = sigacts_alloc(); 397 398 /* 399 * Copy filedesc. 400 */ 401 if (fr->fr_flags & RFCFDG) { 402 fd = fdinit(p1->p_fd, false); 403 fdtol = NULL; 404 } else if (fr->fr_flags & RFFDG) { 405 fd = fdcopy(p1->p_fd); 406 fdtol = NULL; 407 } else { 408 fd = fdshare(p1->p_fd); 409 if (p1->p_fdtol == NULL) 410 p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL, 411 p1->p_leader); 412 if ((fr->fr_flags & RFTHREAD) != 0) { 413 /* 414 * Shared file descriptor table, and shared 415 * process leaders. 416 */ 417 fdtol = p1->p_fdtol; 418 FILEDESC_XLOCK(p1->p_fd); 419 fdtol->fdl_refcount++; 420 FILEDESC_XUNLOCK(p1->p_fd); 421 } else { 422 /* 423 * Shared file descriptor table, and different 424 * process leaders. 425 */ 426 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, 427 p1->p_fd, p2); 428 } 429 } 430 /* 431 * Make a proc table entry for the new process. 432 * Start by zeroing the section of proc that is zero-initialized, 433 * then copy the section that is copied directly from the parent. 434 */ 435 436 PROC_LOCK(p2); 437 PROC_LOCK(p1); 438 439 bzero(&td2->td_startzero, 440 __rangeof(struct thread, td_startzero, td_endzero)); 441 442 bcopy(&td->td_startcopy, &td2->td_startcopy, 443 __rangeof(struct thread, td_startcopy, td_endcopy)); 444 445 bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name)); 446 td2->td_sigstk = td->td_sigstk; 447 td2->td_flags = TDF_INMEM; 448 td2->td_lend_user_pri = PRI_MAX; 449 450 #ifdef VIMAGE 451 td2->td_vnet = NULL; 452 td2->td_vnet_lpush = NULL; 453 #endif 454 455 /* 456 * Allow the scheduler to initialize the child. 457 */ 458 thread_lock(td); 459 sched_fork(td, td2); 460 thread_unlock(td); 461 462 /* 463 * Duplicate sub-structures as needed. 464 * Increase reference counts on shared objects. 465 */ 466 p2->p_flag = P_INMEM; 467 p2->p_flag2 = p1->p_flag2 & (P2_ASLR_DISABLE | P2_ASLR_ENABLE | 468 P2_ASLR_IGNSTART | P2_NOTRACE | P2_NOTRACE_EXEC | P2_TRAPCAP); 469 p2->p_swtick = ticks; 470 if (p1->p_flag & P_PROFIL) 471 startprofclock(p2); 472 473 if (fr->fr_flags & RFSIGSHARE) { 474 p2->p_sigacts = sigacts_hold(p1->p_sigacts); 475 } else { 476 sigacts_copy(newsigacts, p1->p_sigacts); 477 p2->p_sigacts = newsigacts; 478 } 479 480 if (fr->fr_flags & RFTSIGZMB) 481 p2->p_sigparent = RFTSIGNUM(fr->fr_flags); 482 else if (fr->fr_flags & RFLINUXTHPN) 483 p2->p_sigparent = SIGUSR1; 484 else 485 p2->p_sigparent = SIGCHLD; 486 487 p2->p_textvp = p1->p_textvp; 488 p2->p_fd = fd; 489 p2->p_fdtol = fdtol; 490 491 if (p1->p_flag2 & P2_INHERIT_PROTECTED) { 492 p2->p_flag |= P_PROTECTED; 493 p2->p_flag2 |= P2_INHERIT_PROTECTED; 494 } 495 496 /* 497 * p_limit is copy-on-write. Bump its refcount. 498 */ 499 lim_fork(p1, p2); 500 501 thread_cow_get_proc(td2, p2); 502 503 pstats_fork(p1->p_stats, p2->p_stats); 504 505 PROC_UNLOCK(p1); 506 PROC_UNLOCK(p2); 507 508 /* Bump references to the text vnode (for procfs). */ 509 if (p2->p_textvp) 510 vrefact(p2->p_textvp); 511 512 /* 513 * Set up linkage for kernel based threading. 514 */ 515 if ((fr->fr_flags & RFTHREAD) != 0) { 516 mtx_lock(&ppeers_lock); 517 p2->p_peers = p1->p_peers; 518 p1->p_peers = p2; 519 p2->p_leader = p1->p_leader; 520 mtx_unlock(&ppeers_lock); 521 PROC_LOCK(p1->p_leader); 522 if ((p1->p_leader->p_flag & P_WEXIT) != 0) { 523 PROC_UNLOCK(p1->p_leader); 524 /* 525 * The task leader is exiting, so process p1 is 526 * going to be killed shortly. Since p1 obviously 527 * isn't dead yet, we know that the leader is either 528 * sending SIGKILL's to all the processes in this 529 * task or is sleeping waiting for all the peers to 530 * exit. We let p1 complete the fork, but we need 531 * to go ahead and kill the new process p2 since 532 * the task leader may not get a chance to send 533 * SIGKILL to it. We leave it on the list so that 534 * the task leader will wait for this new process 535 * to commit suicide. 536 */ 537 PROC_LOCK(p2); 538 kern_psignal(p2, SIGKILL); 539 PROC_UNLOCK(p2); 540 } else 541 PROC_UNLOCK(p1->p_leader); 542 } else { 543 p2->p_peers = NULL; 544 p2->p_leader = p2; 545 } 546 547 sx_xlock(&proctree_lock); 548 PGRP_LOCK(p1->p_pgrp); 549 PROC_LOCK(p2); 550 PROC_LOCK(p1); 551 552 /* 553 * Preserve some more flags in subprocess. P_PROFIL has already 554 * been preserved. 555 */ 556 p2->p_flag |= p1->p_flag & P_SUGID; 557 td2->td_pflags |= (td->td_pflags & TDP_ALTSTACK) | TDP_FORKING; 558 SESS_LOCK(p1->p_session); 559 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) 560 p2->p_flag |= P_CONTROLT; 561 SESS_UNLOCK(p1->p_session); 562 if (fr->fr_flags & RFPPWAIT) 563 p2->p_flag |= P_PPWAIT; 564 565 p2->p_pgrp = p1->p_pgrp; 566 LIST_INSERT_AFTER(p1, p2, p_pglist); 567 PGRP_UNLOCK(p1->p_pgrp); 568 LIST_INIT(&p2->p_children); 569 LIST_INIT(&p2->p_orphans); 570 571 callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0); 572 573 /* 574 * If PF_FORK is set, the child process inherits the 575 * procfs ioctl flags from its parent. 576 */ 577 if (p1->p_pfsflags & PF_FORK) { 578 p2->p_stops = p1->p_stops; 579 p2->p_pfsflags = p1->p_pfsflags; 580 } 581 582 /* 583 * This begins the section where we must prevent the parent 584 * from being swapped. 585 */ 586 _PHOLD(p1); 587 PROC_UNLOCK(p1); 588 589 /* 590 * Attach the new process to its parent. 591 * 592 * If RFNOWAIT is set, the newly created process becomes a child 593 * of init. This effectively disassociates the child from the 594 * parent. 595 */ 596 if ((fr->fr_flags & RFNOWAIT) != 0) { 597 pptr = p1->p_reaper; 598 p2->p_reaper = pptr; 599 } else { 600 p2->p_reaper = (p1->p_treeflag & P_TREE_REAPER) != 0 ? 601 p1 : p1->p_reaper; 602 pptr = p1; 603 } 604 p2->p_pptr = pptr; 605 p2->p_oppid = pptr->p_pid; 606 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); 607 LIST_INIT(&p2->p_reaplist); 608 LIST_INSERT_HEAD(&p2->p_reaper->p_reaplist, p2, p_reapsibling); 609 if (p2->p_reaper == p1 && p1 != initproc) { 610 p2->p_reapsubtree = p2->p_pid; 611 proc_id_set_cond(PROC_ID_REAP, p2->p_pid); 612 } 613 sx_xunlock(&proctree_lock); 614 615 /* Inform accounting that we have forked. */ 616 p2->p_acflag = AFORK; 617 PROC_UNLOCK(p2); 618 619 #ifdef KTRACE 620 ktrprocfork(p1, p2); 621 #endif 622 623 /* 624 * Finish creating the child process. It will return via a different 625 * execution path later. (ie: directly into user mode) 626 */ 627 vm_forkproc(td, p2, td2, vm2, fr->fr_flags); 628 629 if (fr->fr_flags == (RFFDG | RFPROC)) { 630 VM_CNT_INC(v_forks); 631 VM_CNT_ADD(v_forkpages, p2->p_vmspace->vm_dsize + 632 p2->p_vmspace->vm_ssize); 633 } else if (fr->fr_flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) { 634 VM_CNT_INC(v_vforks); 635 VM_CNT_ADD(v_vforkpages, p2->p_vmspace->vm_dsize + 636 p2->p_vmspace->vm_ssize); 637 } else if (p1 == &proc0) { 638 VM_CNT_INC(v_kthreads); 639 VM_CNT_ADD(v_kthreadpages, p2->p_vmspace->vm_dsize + 640 p2->p_vmspace->vm_ssize); 641 } else { 642 VM_CNT_INC(v_rforks); 643 VM_CNT_ADD(v_rforkpages, p2->p_vmspace->vm_dsize + 644 p2->p_vmspace->vm_ssize); 645 } 646 647 /* 648 * Associate the process descriptor with the process before anything 649 * can happen that might cause that process to need the descriptor. 650 * However, don't do this until after fork(2) can no longer fail. 651 */ 652 if (fr->fr_flags & RFPROCDESC) 653 procdesc_new(p2, fr->fr_pd_flags); 654 655 /* 656 * Both processes are set up, now check if any loadable modules want 657 * to adjust anything. 658 */ 659 EVENTHANDLER_DIRECT_INVOKE(process_fork, p1, p2, fr->fr_flags); 660 661 /* 662 * Set the child start time and mark the process as being complete. 663 */ 664 PROC_LOCK(p2); 665 PROC_LOCK(p1); 666 microuptime(&p2->p_stats->p_start); 667 PROC_SLOCK(p2); 668 p2->p_state = PRS_NORMAL; 669 PROC_SUNLOCK(p2); 670 671 #ifdef KDTRACE_HOOKS 672 /* 673 * Tell the DTrace fasttrap provider about the new process so that any 674 * tracepoints inherited from the parent can be removed. We have to do 675 * this only after p_state is PRS_NORMAL since the fasttrap module will 676 * use pfind() later on. 677 */ 678 if ((fr->fr_flags & RFMEM) == 0 && dtrace_fasttrap_fork) 679 dtrace_fasttrap_fork(p1, p2); 680 #endif 681 if (fr->fr_flags & RFPPWAIT) { 682 td->td_pflags |= TDP_RFPPWAIT; 683 td->td_rfppwait_p = p2; 684 td->td_dbgflags |= TDB_VFORK; 685 } 686 PROC_UNLOCK(p2); 687 688 /* 689 * Tell any interested parties about the new process. 690 */ 691 knote_fork(p1->p_klist, p2->p_pid); 692 693 /* 694 * Now can be swapped. 695 */ 696 _PRELE(p1); 697 PROC_UNLOCK(p1); 698 SDT_PROBE3(proc, , , create, p2, p1, fr->fr_flags); 699 700 if (fr->fr_flags & RFPROCDESC) { 701 procdesc_finit(p2->p_procdesc, fp_procdesc); 702 fdrop(fp_procdesc, td); 703 } 704 705 /* 706 * Speculative check for PTRACE_FORK. PTRACE_FORK is not 707 * synced with forks in progress so it is OK if we miss it 708 * if being set atm. 709 */ 710 if ((p1->p_ptevents & PTRACE_FORK) != 0) { 711 sx_xlock(&proctree_lock); 712 PROC_LOCK(p2); 713 714 /* 715 * p1->p_ptevents & p1->p_pptr are protected by both 716 * process and proctree locks for modifications, 717 * so owning proctree_lock allows the race-free read. 718 */ 719 if ((p1->p_ptevents & PTRACE_FORK) != 0) { 720 /* 721 * Arrange for debugger to receive the fork event. 722 * 723 * We can report PL_FLAG_FORKED regardless of 724 * P_FOLLOWFORK settings, but it does not make a sense 725 * for runaway child. 726 */ 727 td->td_dbgflags |= TDB_FORK; 728 td->td_dbg_forked = p2->p_pid; 729 td2->td_dbgflags |= TDB_STOPATFORK; 730 proc_set_traced(p2, true); 731 CTR2(KTR_PTRACE, 732 "do_fork: attaching to new child pid %d: oppid %d", 733 p2->p_pid, p2->p_oppid); 734 proc_reparent(p2, p1->p_pptr, false); 735 } 736 PROC_UNLOCK(p2); 737 sx_xunlock(&proctree_lock); 738 } 739 740 racct_proc_fork_done(p2); 741 742 if ((fr->fr_flags & RFSTOPPED) == 0) { 743 if (fr->fr_pidp != NULL) 744 *fr->fr_pidp = p2->p_pid; 745 /* 746 * If RFSTOPPED not requested, make child runnable and 747 * add to run queue. 748 */ 749 thread_lock(td2); 750 TD_SET_CAN_RUN(td2); 751 sched_add(td2, SRQ_BORING); 752 thread_unlock(td2); 753 } else { 754 *fr->fr_procp = p2; 755 } 756 } 757 758 void 759 fork_rfppwait(struct thread *td) 760 { 761 struct proc *p, *p2; 762 763 MPASS(td->td_pflags & TDP_RFPPWAIT); 764 765 p = td->td_proc; 766 /* 767 * Preserve synchronization semantics of vfork. If 768 * waiting for child to exec or exit, fork set 769 * P_PPWAIT on child, and there we sleep on our proc 770 * (in case of exit). 771 * 772 * Do it after the ptracestop() above is finished, to 773 * not block our debugger until child execs or exits 774 * to finish vfork wait. 775 */ 776 td->td_pflags &= ~TDP_RFPPWAIT; 777 p2 = td->td_rfppwait_p; 778 again: 779 PROC_LOCK(p2); 780 while (p2->p_flag & P_PPWAIT) { 781 PROC_LOCK(p); 782 if (thread_suspend_check_needed()) { 783 PROC_UNLOCK(p2); 784 thread_suspend_check(0); 785 PROC_UNLOCK(p); 786 goto again; 787 } else { 788 PROC_UNLOCK(p); 789 } 790 cv_timedwait(&p2->p_pwait, &p2->p_mtx, hz); 791 } 792 PROC_UNLOCK(p2); 793 794 if (td->td_dbgflags & TDB_VFORK) { 795 PROC_LOCK(p); 796 if (p->p_ptevents & PTRACE_VFORK) 797 ptracestop(td, SIGTRAP, NULL); 798 td->td_dbgflags &= ~TDB_VFORK; 799 PROC_UNLOCK(p); 800 } 801 } 802 803 int 804 fork1(struct thread *td, struct fork_req *fr) 805 { 806 struct proc *p1, *newproc; 807 struct thread *td2; 808 struct vmspace *vm2; 809 struct file *fp_procdesc; 810 vm_ooffset_t mem_charged; 811 int error, nprocs_new, ok; 812 static int curfail; 813 static struct timeval lastfail; 814 int flags, pages; 815 816 flags = fr->fr_flags; 817 pages = fr->fr_pages; 818 819 if ((flags & RFSTOPPED) != 0) 820 MPASS(fr->fr_procp != NULL && fr->fr_pidp == NULL); 821 else 822 MPASS(fr->fr_procp == NULL); 823 824 /* Check for the undefined or unimplemented flags. */ 825 if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0) 826 return (EINVAL); 827 828 /* Signal value requires RFTSIGZMB. */ 829 if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0) 830 return (EINVAL); 831 832 /* Can't copy and clear. */ 833 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG)) 834 return (EINVAL); 835 836 /* Check the validity of the signal number. */ 837 if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG) 838 return (EINVAL); 839 840 if ((flags & RFPROCDESC) != 0) { 841 /* Can't not create a process yet get a process descriptor. */ 842 if ((flags & RFPROC) == 0) 843 return (EINVAL); 844 845 /* Must provide a place to put a procdesc if creating one. */ 846 if (fr->fr_pd_fd == NULL) 847 return (EINVAL); 848 849 /* Check if we are using supported flags. */ 850 if ((fr->fr_pd_flags & ~PD_ALLOWED_AT_FORK) != 0) 851 return (EINVAL); 852 } 853 854 p1 = td->td_proc; 855 856 /* 857 * Here we don't create a new process, but we divorce 858 * certain parts of a process from itself. 859 */ 860 if ((flags & RFPROC) == 0) { 861 if (fr->fr_procp != NULL) 862 *fr->fr_procp = NULL; 863 else if (fr->fr_pidp != NULL) 864 *fr->fr_pidp = 0; 865 return (fork_norfproc(td, flags)); 866 } 867 868 fp_procdesc = NULL; 869 newproc = NULL; 870 vm2 = NULL; 871 872 /* 873 * Increment the nprocs resource before allocations occur. 874 * Although process entries are dynamically created, we still 875 * keep a global limit on the maximum number we will 876 * create. There are hard-limits as to the number of processes 877 * that can run, established by the KVA and memory usage for 878 * the process data. 879 * 880 * Don't allow a nonprivileged user to use the last ten 881 * processes; don't let root exceed the limit. 882 */ 883 nprocs_new = atomic_fetchadd_int(&nprocs, 1) + 1; 884 if (nprocs_new >= maxproc - 10) { 885 if (priv_check_cred(td->td_ucred, PRIV_MAXPROC) != 0 || 886 nprocs_new >= maxproc) { 887 error = EAGAIN; 888 sx_xlock(&allproc_lock); 889 if (ppsratecheck(&lastfail, &curfail, 1)) { 890 printf("maxproc limit exceeded by uid %u " 891 "(pid %d); see tuning(7) and " 892 "login.conf(5)\n", 893 td->td_ucred->cr_ruid, p1->p_pid); 894 } 895 sx_xunlock(&allproc_lock); 896 goto fail2; 897 } 898 } 899 900 /* 901 * If required, create a process descriptor in the parent first; we 902 * will abandon it if something goes wrong. We don't finit() until 903 * later. 904 */ 905 if (flags & RFPROCDESC) { 906 error = procdesc_falloc(td, &fp_procdesc, fr->fr_pd_fd, 907 fr->fr_pd_flags, fr->fr_pd_fcaps); 908 if (error != 0) 909 goto fail2; 910 } 911 912 mem_charged = 0; 913 if (pages == 0) 914 pages = kstack_pages; 915 /* Allocate new proc. */ 916 newproc = uma_zalloc(proc_zone, M_WAITOK); 917 td2 = FIRST_THREAD_IN_PROC(newproc); 918 if (td2 == NULL) { 919 td2 = thread_alloc(pages); 920 if (td2 == NULL) { 921 error = ENOMEM; 922 goto fail2; 923 } 924 proc_linkup(newproc, td2); 925 } else { 926 if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) { 927 if (td2->td_kstack != 0) 928 vm_thread_dispose(td2); 929 if (!thread_alloc_stack(td2, pages)) { 930 error = ENOMEM; 931 goto fail2; 932 } 933 } 934 } 935 936 if ((flags & RFMEM) == 0) { 937 vm2 = vmspace_fork(p1->p_vmspace, &mem_charged); 938 if (vm2 == NULL) { 939 error = ENOMEM; 940 goto fail2; 941 } 942 if (!swap_reserve(mem_charged)) { 943 /* 944 * The swap reservation failed. The accounting 945 * from the entries of the copied vm2 will be 946 * subtracted in vmspace_free(), so force the 947 * reservation there. 948 */ 949 swap_reserve_force(mem_charged); 950 error = ENOMEM; 951 goto fail2; 952 } 953 } else 954 vm2 = NULL; 955 956 /* 957 * XXX: This is ugly; when we copy resource usage, we need to bump 958 * per-cred resource counters. 959 */ 960 proc_set_cred_init(newproc, crhold(td->td_ucred)); 961 962 /* 963 * Initialize resource accounting for the child process. 964 */ 965 error = racct_proc_fork(p1, newproc); 966 if (error != 0) { 967 error = EAGAIN; 968 goto fail1; 969 } 970 971 #ifdef MAC 972 mac_proc_init(newproc); 973 #endif 974 newproc->p_klist = knlist_alloc(&newproc->p_mtx); 975 STAILQ_INIT(&newproc->p_ktr); 976 977 sx_xlock(&allproc_lock); 978 979 /* 980 * Increment the count of procs running with this uid. Don't allow 981 * a nonprivileged user to exceed their current limit. 982 * 983 * XXXRW: Can we avoid privilege here if it's not needed? 984 */ 985 error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT); 986 if (error == 0) 987 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0); 988 else { 989 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 990 lim_cur(td, RLIMIT_NPROC)); 991 } 992 if (ok) { 993 do_fork(td, fr, newproc, td2, vm2, fp_procdesc); 994 return (0); 995 } 996 997 error = EAGAIN; 998 sx_xunlock(&allproc_lock); 999 #ifdef MAC 1000 mac_proc_destroy(newproc); 1001 #endif 1002 racct_proc_exit(newproc); 1003 fail1: 1004 crfree(newproc->p_ucred); 1005 newproc->p_ucred = NULL; 1006 fail2: 1007 if (vm2 != NULL) 1008 vmspace_free(vm2); 1009 uma_zfree(proc_zone, newproc); 1010 if ((flags & RFPROCDESC) != 0 && fp_procdesc != NULL) { 1011 fdclose(td, fp_procdesc, *fr->fr_pd_fd); 1012 fdrop(fp_procdesc, td); 1013 } 1014 atomic_add_int(&nprocs, -1); 1015 pause("fork", hz / 2); 1016 return (error); 1017 } 1018 1019 /* 1020 * Handle the return of a child process from fork1(). This function 1021 * is called from the MD fork_trampoline() entry point. 1022 */ 1023 void 1024 fork_exit(void (*callout)(void *, struct trapframe *), void *arg, 1025 struct trapframe *frame) 1026 { 1027 struct proc *p; 1028 struct thread *td; 1029 struct thread *dtd; 1030 1031 td = curthread; 1032 p = td->td_proc; 1033 KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new")); 1034 1035 CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)", 1036 td, td_get_sched(td), p->p_pid, td->td_name); 1037 1038 sched_fork_exit(td); 1039 /* 1040 * Processes normally resume in mi_switch() after being 1041 * cpu_switch()'ed to, but when children start up they arrive here 1042 * instead, so we must do much the same things as mi_switch() would. 1043 */ 1044 if ((dtd = PCPU_GET(deadthread))) { 1045 PCPU_SET(deadthread, NULL); 1046 thread_stash(dtd); 1047 } 1048 thread_unlock(td); 1049 1050 /* 1051 * cpu_fork_kthread_handler intercepts this function call to 1052 * have this call a non-return function to stay in kernel mode. 1053 * initproc has its own fork handler, but it does return. 1054 */ 1055 KASSERT(callout != NULL, ("NULL callout in fork_exit")); 1056 callout(arg, frame); 1057 1058 /* 1059 * Check if a kernel thread misbehaved and returned from its main 1060 * function. 1061 */ 1062 if (p->p_flag & P_KPROC) { 1063 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n", 1064 td->td_name, p->p_pid); 1065 kthread_exit(); 1066 } 1067 mtx_assert(&Giant, MA_NOTOWNED); 1068 1069 if (p->p_sysent->sv_schedtail != NULL) 1070 (p->p_sysent->sv_schedtail)(td); 1071 td->td_pflags &= ~TDP_FORKING; 1072 } 1073 1074 /* 1075 * Simplified back end of syscall(), used when returning from fork() 1076 * directly into user mode. This function is passed in to fork_exit() 1077 * as the first parameter and is called when returning to a new 1078 * userland process. 1079 */ 1080 void 1081 fork_return(struct thread *td, struct trapframe *frame) 1082 { 1083 struct proc *p; 1084 1085 p = td->td_proc; 1086 if (td->td_dbgflags & TDB_STOPATFORK) { 1087 PROC_LOCK(p); 1088 if ((p->p_flag & P_TRACED) != 0) { 1089 /* 1090 * Inform the debugger if one is still present. 1091 */ 1092 td->td_dbgflags |= TDB_CHILD | TDB_SCX | TDB_FSTP; 1093 ptracestop(td, SIGSTOP, NULL); 1094 td->td_dbgflags &= ~(TDB_CHILD | TDB_SCX); 1095 } else { 1096 /* 1097 * ... otherwise clear the request. 1098 */ 1099 td->td_dbgflags &= ~TDB_STOPATFORK; 1100 } 1101 PROC_UNLOCK(p); 1102 } else if (p->p_flag & P_TRACED || td->td_dbgflags & TDB_BORN) { 1103 /* 1104 * This is the start of a new thread in a traced 1105 * process. Report a system call exit event. 1106 */ 1107 PROC_LOCK(p); 1108 td->td_dbgflags |= TDB_SCX; 1109 _STOPEVENT(p, S_SCX, td->td_sa.code); 1110 if ((p->p_ptevents & PTRACE_SCX) != 0 || 1111 (td->td_dbgflags & TDB_BORN) != 0) 1112 ptracestop(td, SIGTRAP, NULL); 1113 td->td_dbgflags &= ~(TDB_SCX | TDB_BORN); 1114 PROC_UNLOCK(p); 1115 } 1116 1117 userret(td, frame); 1118 1119 #ifdef KTRACE 1120 if (KTRPOINT(td, KTR_SYSRET)) 1121 ktrsysret(SYS_fork, 0, 0); 1122 #endif 1123 } 1124