1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 #include <sys/types.h> 33 #include <sys/param.h> 34 #include <sys/sysmacros.h> 35 #include <sys/signal.h> 36 #include <sys/cred.h> 37 #include <sys/policy.h> 38 #include <sys/user.h> 39 #include <sys/systm.h> 40 #include <sys/cpuvar.h> 41 #include <sys/vfs.h> 42 #include <sys/vnode.h> 43 #include <sys/file.h> 44 #include <sys/errno.h> 45 #include <sys/time.h> 46 #include <sys/proc.h> 47 #include <sys/cmn_err.h> 48 #include <sys/acct.h> 49 #include <sys/tuneable.h> 50 #include <sys/class.h> 51 #include <sys/kmem.h> 52 #include <sys/session.h> 53 #include <sys/ucontext.h> 54 #include <sys/stack.h> 55 #include <sys/procfs.h> 56 #include <sys/prsystm.h> 57 #include <sys/vmsystm.h> 58 #include <sys/vtrace.h> 59 #include <sys/debug.h> 60 #include <sys/shm_impl.h> 61 #include <sys/door_data.h> 62 #include <vm/as.h> 63 #include <vm/rm.h> 64 #include <c2/audit.h> 65 #include <sys/var.h> 66 #include <sys/schedctl.h> 67 #include <sys/utrap.h> 68 #include <sys/task.h> 69 #include <sys/resource.h> 70 #include <sys/cyclic.h> 71 #include <sys/lgrp.h> 72 #include <sys/rctl.h> 73 #include <sys/contract_impl.h> 74 #include <sys/contract/process_impl.h> 75 #include <sys/list.h> 76 #include <sys/dtrace.h> 77 #include <sys/pool.h> 78 #include <sys/zone.h> 79 #include <sys/sdt.h> 80 #include <sys/class.h> 81 #include <sys/corectl.h> 82 #include <sys/brand.h> 83 #include <sys/fork.h> 84 85 static int64_t cfork(int, int, int); 86 static int getproc(proc_t **, int); 87 static void fork_fail(proc_t *); 88 static void forklwp_fail(proc_t *); 89 90 int fork_fail_pending; 91 92 extern struct kmem_cache *process_cache; 93 94 /* 95 * forkall system call. 96 */ 97 int64_t 98 forkall(void) 99 { 100 return (cfork(0, 0, 0)); 101 } 102 103 /* 104 * The parent is stopped until the child invokes relvm(). 105 */ 106 int64_t 107 vfork(void) 108 { 109 curthread->t_post_sys = 1; /* so vfwait() will be called */ 110 return (cfork(1, 1, 0)); 111 } 112 113 /* 114 * fork system call, aka fork1. 115 */ 116 int64_t 117 fork1(void) 118 { 119 return (cfork(0, 1, 0)); 120 } 121 122 /* 123 * The forkall(), vfork(), and fork1() system calls are no longer 124 * invoked by libc. They are retained only for the benefit of 125 * old statically-linked applications. They should be eliminated 126 * when we no longer care about such old and broken applications. 127 */ 128 129 /* 130 * forksys system call - forkx, forkallx, vforkx. 131 * This is the interface now invoked by libc. 132 */ 133 int64_t 134 forksys(int subcode, int flags) 135 { 136 switch (subcode) { 137 case 0: 138 return (cfork(0, 1, flags)); /* forkx(flags) */ 139 case 1: 140 return (cfork(0, 0, flags)); /* forkallx(flags) */ 141 case 2: 142 curthread->t_post_sys = 1; /* so vfwait() will be called */ 143 return (cfork(1, 1, flags)); /* vforkx(flags) */ 144 default: 145 return ((int64_t)set_errno(EINVAL)); 146 } 147 } 148 149 /* ARGSUSED */ 150 static int64_t 151 cfork(int isvfork, int isfork1, int flags) 152 { 153 proc_t *p = ttoproc(curthread); 154 struct as *as; 155 proc_t *cp, **orphpp; 156 klwp_t *clone; 157 kthread_t *t; 158 task_t *tk; 159 rval_t r; 160 int error; 161 int i; 162 rctl_set_t *dup_set; 163 rctl_alloc_gp_t *dup_gp; 164 rctl_entity_p_t e; 165 lwpdir_t *ldp; 166 lwpent_t *lep; 167 lwpent_t *clep; 168 169 /* 170 * Allow only these two flags. 171 */ 172 if ((flags & ~(FORK_NOSIGCHLD | FORK_WAITPID)) != 0) { 173 error = EINVAL; 174 goto forkerr; 175 } 176 177 /* 178 * fork is not supported for the /proc agent lwp. 179 */ 180 if (curthread == p->p_agenttp) { 181 error = ENOTSUP; 182 goto forkerr; 183 } 184 185 if ((error = secpolicy_basic_fork(CRED())) != 0) 186 goto forkerr; 187 188 /* 189 * If the calling lwp is doing a fork1() then the 190 * other lwps in this process are not duplicated and 191 * don't need to be held where their kernel stacks can be 192 * cloned. If doing forkall(), the process is held with 193 * SHOLDFORK, so that the lwps are at a point where their 194 * stacks can be copied which is on entry or exit from 195 * the kernel. 196 */ 197 if (!holdlwps(isfork1 ? SHOLDFORK1 : SHOLDFORK)) { 198 aston(curthread); 199 error = EINTR; 200 goto forkerr; 201 } 202 203 #if defined(__sparc) 204 /* 205 * Ensure that the user stack is fully constructed 206 * before creating the child process structure. 207 */ 208 (void) flush_user_windows_to_stack(NULL); 209 #endif 210 211 mutex_enter(&p->p_lock); 212 /* 213 * If this is vfork(), cancel any suspend request we might 214 * have gotten from some other thread via lwp_suspend(). 215 * Otherwise we could end up with a deadlock on return 216 * from the vfork() in both the parent and the child. 217 */ 218 if (isvfork) 219 curthread->t_proc_flag &= ~TP_HOLDLWP; 220 /* 221 * Prevent our resource set associations from being changed during fork. 222 */ 223 pool_barrier_enter(); 224 mutex_exit(&p->p_lock); 225 226 /* 227 * Create a child proc struct. Place a VN_HOLD on appropriate vnodes. 228 */ 229 if (getproc(&cp, 0) < 0) { 230 mutex_enter(&p->p_lock); 231 pool_barrier_exit(); 232 continuelwps(p); 233 mutex_exit(&p->p_lock); 234 error = EAGAIN; 235 goto forkerr; 236 } 237 238 TRACE_2(TR_FAC_PROC, TR_PROC_FORK, "proc_fork:cp %p p %p", cp, p); 239 240 /* 241 * Assign an address space to child 242 */ 243 if (isvfork) { 244 /* 245 * Clear any watched areas and remember the 246 * watched pages for restoring in vfwait(). 247 */ 248 as = p->p_as; 249 if (avl_numnodes(&as->a_wpage) != 0) { 250 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); 251 as_clearwatch(as); 252 p->p_wpage = as->a_wpage; 253 avl_create(&as->a_wpage, wp_compare, 254 sizeof (struct watched_page), 255 offsetof(struct watched_page, wp_link)); 256 AS_LOCK_EXIT(as, &as->a_lock); 257 } 258 cp->p_as = as; 259 cp->p_flag |= SVFORK; 260 } else { 261 /* 262 * We need to hold P_PR_LOCK until the address space has 263 * been duplicated and we've had a chance to remove from the 264 * child any DTrace probes that were in the parent. Holding 265 * P_PR_LOCK prevents any new probes from being added and any 266 * extant probes from being removed. 267 */ 268 mutex_enter(&p->p_lock); 269 sprlock_proc(p); 270 p->p_flag |= SFORKING; 271 mutex_exit(&p->p_lock); 272 273 error = as_dup(p->p_as, &cp->p_as); 274 if (error != 0) { 275 fork_fail(cp); 276 mutex_enter(&pidlock); 277 orphpp = &p->p_orphan; 278 while (*orphpp != cp) 279 orphpp = &(*orphpp)->p_nextorph; 280 *orphpp = cp->p_nextorph; 281 if (p->p_child == cp) 282 p->p_child = cp->p_sibling; 283 if (cp->p_sibling) 284 cp->p_sibling->p_psibling = cp->p_psibling; 285 if (cp->p_psibling) 286 cp->p_psibling->p_sibling = cp->p_sibling; 287 mutex_enter(&cp->p_lock); 288 tk = cp->p_task; 289 task_detach(cp); 290 ASSERT(cp->p_pool->pool_ref > 0); 291 atomic_add_32(&cp->p_pool->pool_ref, -1); 292 mutex_exit(&cp->p_lock); 293 pid_exit(cp); 294 mutex_exit(&pidlock); 295 task_rele(tk); 296 297 mutex_enter(&p->p_lock); 298 p->p_flag &= ~SFORKING; 299 pool_barrier_exit(); 300 continuelwps(p); 301 sprunlock(p); 302 /* 303 * Preserve ENOMEM error condition but 304 * map all others to EAGAIN. 305 */ 306 error = (error == ENOMEM) ? ENOMEM : EAGAIN; 307 goto forkerr; 308 } 309 cp->p_as->a_proc = cp; 310 311 /* Duplicate parent's shared memory */ 312 if (p->p_segacct) 313 shmfork(p, cp); 314 315 /* 316 * Remove all DTrace tracepoints from the child process. We 317 * need to do this _before_ duplicating USDT providers since 318 * any associated probes may be immediately enabled. 319 */ 320 if (p->p_dtrace_count > 0) 321 dtrace_fasttrap_fork(p, cp); 322 323 /* 324 * Duplicate any helper actions and providers. The SFORKING 325 * we set above informs the code to enable USDT probes that 326 * sprlock() may fail because the child is being forked. 327 */ 328 if (p->p_dtrace_helpers != NULL) { 329 ASSERT(dtrace_helpers_fork != NULL); 330 (*dtrace_helpers_fork)(p, cp); 331 } 332 333 mutex_enter(&p->p_lock); 334 p->p_flag &= ~SFORKING; 335 sprunlock(p); 336 } 337 338 /* 339 * Duplicate parent's resource controls. 340 */ 341 dup_set = rctl_set_create(); 342 for (;;) { 343 dup_gp = rctl_set_dup_prealloc(p->p_rctls); 344 mutex_enter(&p->p_rctls->rcs_lock); 345 if (rctl_set_dup_ready(p->p_rctls, dup_gp)) 346 break; 347 mutex_exit(&p->p_rctls->rcs_lock); 348 rctl_prealloc_destroy(dup_gp); 349 } 350 e.rcep_p.proc = cp; 351 e.rcep_t = RCENTITY_PROCESS; 352 cp->p_rctls = rctl_set_dup(p->p_rctls, p, cp, &e, dup_set, dup_gp, 353 RCD_DUP | RCD_CALLBACK); 354 mutex_exit(&p->p_rctls->rcs_lock); 355 356 rctl_prealloc_destroy(dup_gp); 357 358 /* 359 * Allocate the child's lwp directory and lwpid hash table. 360 */ 361 if (isfork1) 362 cp->p_lwpdir_sz = 2; 363 else 364 cp->p_lwpdir_sz = p->p_lwpdir_sz; 365 cp->p_lwpdir = cp->p_lwpfree = ldp = 366 kmem_zalloc(cp->p_lwpdir_sz * sizeof (lwpdir_t), KM_SLEEP); 367 for (i = 1; i < cp->p_lwpdir_sz; i++, ldp++) 368 ldp->ld_next = ldp + 1; 369 cp->p_tidhash_sz = (cp->p_lwpdir_sz + 2) / 2; 370 cp->p_tidhash = 371 kmem_zalloc(cp->p_tidhash_sz * sizeof (lwpdir_t *), KM_SLEEP); 372 373 /* 374 * Duplicate parent's lwps. 375 * Mutual exclusion is not needed because the process is 376 * in the hold state and only the current lwp is running. 377 */ 378 klgrpset_clear(cp->p_lgrpset); 379 if (isfork1) { 380 clone = forklwp(ttolwp(curthread), cp, curthread->t_tid); 381 if (clone == NULL) 382 goto forklwperr; 383 /* 384 * Inherit only the lwp_wait()able flag, 385 * Daemon threads should not call fork1(), but oh well... 386 */ 387 lwptot(clone)->t_proc_flag |= 388 (curthread->t_proc_flag & TP_TWAIT); 389 } else { 390 /* this is forkall(), no one can be in lwp_wait() */ 391 ASSERT(p->p_lwpwait == 0 && p->p_lwpdwait == 0); 392 /* for each entry in the parent's lwp directory... */ 393 for (i = 0, ldp = p->p_lwpdir; i < p->p_lwpdir_sz; i++, ldp++) { 394 klwp_t *clwp; 395 kthread_t *ct; 396 397 if ((lep = ldp->ld_entry) == NULL) 398 continue; 399 400 if ((t = lep->le_thread) != NULL) { 401 clwp = forklwp(ttolwp(t), cp, t->t_tid); 402 if (clwp == NULL) 403 goto forklwperr; 404 ct = lwptot(clwp); 405 /* 406 * Inherit lwp_wait()able and daemon flags. 407 */ 408 ct->t_proc_flag |= 409 (t->t_proc_flag & (TP_TWAIT|TP_DAEMON)); 410 /* 411 * Keep track of the clone of curthread to 412 * post return values through lwp_setrval(). 413 * Mark other threads for special treatment 414 * by lwp_rtt() / post_syscall(). 415 */ 416 if (t == curthread) 417 clone = clwp; 418 else 419 ct->t_flag |= T_FORKALL; 420 } else { 421 /* 422 * Replicate zombie lwps in the child. 423 */ 424 clep = kmem_zalloc(sizeof (*clep), KM_SLEEP); 425 clep->le_lwpid = lep->le_lwpid; 426 clep->le_start = lep->le_start; 427 lwp_hash_in(cp, clep); 428 } 429 } 430 } 431 432 /* 433 * Put new process in the parent's process contract, or put it 434 * in a new one if there is an active process template. Send a 435 * fork event (if requested) to whatever contract the child is 436 * a member of. Fails if the parent has been SIGKILLed. 437 */ 438 if (contract_process_fork(NULL, cp, p, B_TRUE) == NULL) 439 goto forklwperr; 440 441 /* 442 * No fork failures occur beyond this point. 443 */ 444 445 cp->p_lwpid = p->p_lwpid; 446 if (!isfork1) { 447 cp->p_lwpdaemon = p->p_lwpdaemon; 448 cp->p_zombcnt = p->p_zombcnt; 449 /* 450 * If the parent's lwp ids have wrapped around, so have the 451 * child's. 452 */ 453 cp->p_flag |= p->p_flag & SLWPWRAP; 454 } 455 456 mutex_enter(&p->p_lock); 457 corectl_path_hold(cp->p_corefile = p->p_corefile); 458 corectl_content_hold(cp->p_content = p->p_content); 459 mutex_exit(&p->p_lock); 460 461 /* 462 * Duplicate process context ops, if any. 463 */ 464 if (p->p_pctx) 465 forkpctx(p, cp); 466 467 #ifdef __sparc 468 utrap_dup(p, cp); 469 #endif 470 /* 471 * If the child process has been marked to stop on exit 472 * from this fork, arrange for all other lwps to stop in 473 * sympathy with the active lwp. 474 */ 475 if (PTOU(cp)->u_systrap && 476 prismember(&PTOU(cp)->u_exitmask, curthread->t_sysnum)) { 477 mutex_enter(&cp->p_lock); 478 t = cp->p_tlist; 479 do { 480 t->t_proc_flag |= TP_PRSTOP; 481 aston(t); /* so TP_PRSTOP will be seen */ 482 } while ((t = t->t_forw) != cp->p_tlist); 483 mutex_exit(&cp->p_lock); 484 } 485 /* 486 * If the parent process has been marked to stop on exit 487 * from this fork, and its asynchronous-stop flag has not 488 * been set, arrange for all other lwps to stop before 489 * they return back to user level. 490 */ 491 if (!(p->p_proc_flag & P_PR_ASYNC) && PTOU(p)->u_systrap && 492 prismember(&PTOU(p)->u_exitmask, curthread->t_sysnum)) { 493 mutex_enter(&p->p_lock); 494 t = p->p_tlist; 495 do { 496 t->t_proc_flag |= TP_PRSTOP; 497 aston(t); /* so TP_PRSTOP will be seen */ 498 } while ((t = t->t_forw) != p->p_tlist); 499 mutex_exit(&p->p_lock); 500 } 501 502 if (PROC_IS_BRANDED(p)) 503 BROP(p)->b_lwp_setrval(clone, p->p_pid, 1); 504 else 505 lwp_setrval(clone, p->p_pid, 1); 506 507 /* set return values for parent */ 508 r.r_val1 = (int)cp->p_pid; 509 r.r_val2 = 0; 510 511 /* 512 * pool_barrier_exit() can now be called because the child process has: 513 * - all identifying features cloned or set (p_pid, p_task, p_pool) 514 * - all resource sets associated (p_tlist->*->t_cpupart, p_as->a_mset) 515 * - any other fields set which are used in resource set binding. 516 */ 517 mutex_enter(&p->p_lock); 518 pool_barrier_exit(); 519 mutex_exit(&p->p_lock); 520 521 mutex_enter(&pidlock); 522 mutex_enter(&cp->p_lock); 523 524 /* 525 * Set flags telling the child what (not) to do on exit. 526 */ 527 if (flags & FORK_NOSIGCHLD) 528 cp->p_pidflag |= CLDNOSIGCHLD; 529 if (flags & FORK_WAITPID) 530 cp->p_pidflag |= CLDWAITPID; 531 532 /* 533 * Now that there are lwps and threads attached, add the new 534 * process to the process group. 535 */ 536 pgjoin(cp, p->p_pgidp); 537 cp->p_stat = SRUN; 538 /* 539 * We are now done with all the lwps in the child process. 540 */ 541 t = cp->p_tlist; 542 do { 543 /* 544 * Set the lwp_suspend()ed lwps running. 545 * They will suspend properly at syscall exit. 546 */ 547 if (t->t_proc_flag & TP_HOLDLWP) 548 lwp_create_done(t); 549 else { 550 /* set TS_CREATE to allow continuelwps() to work */ 551 thread_lock(t); 552 ASSERT(t->t_state == TS_STOPPED && 553 !(t->t_schedflag & (TS_CREATE|TS_CSTART))); 554 t->t_schedflag |= TS_CREATE; 555 thread_unlock(t); 556 } 557 } while ((t = t->t_forw) != cp->p_tlist); 558 mutex_exit(&cp->p_lock); 559 560 if (isvfork) { 561 CPU_STATS_ADDQ(CPU, sys, sysvfork, 1); 562 mutex_enter(&p->p_lock); 563 p->p_flag |= (SVFPARENT | SVFWAIT); 564 DTRACE_PROC1(create, proc_t *, cp); 565 cv_broadcast(&pr_pid_cv[p->p_slot]); /* inform /proc */ 566 mutex_exit(&p->p_lock); 567 /* 568 * Grab child's p_lock before dropping pidlock to ensure 569 * the process will not disappear before we set it running. 570 */ 571 mutex_enter(&cp->p_lock); 572 mutex_exit(&pidlock); 573 sigdefault(cp); 574 continuelwps(cp); 575 mutex_exit(&cp->p_lock); 576 } else { 577 CPU_STATS_ADDQ(CPU, sys, sysfork, 1); 578 DTRACE_PROC1(create, proc_t *, cp); 579 /* 580 * It is CL_FORKRET's job to drop pidlock. 581 * If we do it here, the process could be set running 582 * and disappear before CL_FORKRET() is called. 583 */ 584 CL_FORKRET(curthread, cp->p_tlist); 585 ASSERT(MUTEX_NOT_HELD(&pidlock)); 586 } 587 588 return (r.r_vals); 589 590 forklwperr: 591 if (isvfork) { 592 if (avl_numnodes(&p->p_wpage) != 0) { 593 /* restore watchpoints to parent */ 594 as = p->p_as; 595 AS_LOCK_ENTER(as, &as->a_lock, 596 RW_WRITER); 597 as->a_wpage = p->p_wpage; 598 avl_create(&p->p_wpage, wp_compare, 599 sizeof (struct watched_page), 600 offsetof(struct watched_page, wp_link)); 601 as_setwatch(as); 602 AS_LOCK_EXIT(as, &as->a_lock); 603 } 604 } else { 605 if (cp->p_segacct) 606 shmexit(cp); 607 as = cp->p_as; 608 cp->p_as = &kas; 609 as_free(as); 610 } 611 612 if (cp->p_lwpdir) { 613 for (i = 0, ldp = cp->p_lwpdir; i < cp->p_lwpdir_sz; i++, ldp++) 614 if ((lep = ldp->ld_entry) != NULL) 615 kmem_free(lep, sizeof (*lep)); 616 kmem_free(cp->p_lwpdir, 617 cp->p_lwpdir_sz * sizeof (*cp->p_lwpdir)); 618 } 619 cp->p_lwpdir = NULL; 620 cp->p_lwpfree = NULL; 621 cp->p_lwpdir_sz = 0; 622 623 if (cp->p_tidhash) 624 kmem_free(cp->p_tidhash, 625 cp->p_tidhash_sz * sizeof (*cp->p_tidhash)); 626 cp->p_tidhash = NULL; 627 cp->p_tidhash_sz = 0; 628 629 forklwp_fail(cp); 630 fork_fail(cp); 631 rctl_set_free(cp->p_rctls); 632 mutex_enter(&pidlock); 633 634 /* 635 * Detach failed child from task. 636 */ 637 mutex_enter(&cp->p_lock); 638 tk = cp->p_task; 639 task_detach(cp); 640 ASSERT(cp->p_pool->pool_ref > 0); 641 atomic_add_32(&cp->p_pool->pool_ref, -1); 642 mutex_exit(&cp->p_lock); 643 644 orphpp = &p->p_orphan; 645 while (*orphpp != cp) 646 orphpp = &(*orphpp)->p_nextorph; 647 *orphpp = cp->p_nextorph; 648 if (p->p_child == cp) 649 p->p_child = cp->p_sibling; 650 if (cp->p_sibling) 651 cp->p_sibling->p_psibling = cp->p_psibling; 652 if (cp->p_psibling) 653 cp->p_psibling->p_sibling = cp->p_sibling; 654 pid_exit(cp); 655 mutex_exit(&pidlock); 656 657 task_rele(tk); 658 659 mutex_enter(&p->p_lock); 660 pool_barrier_exit(); 661 continuelwps(p); 662 mutex_exit(&p->p_lock); 663 error = EAGAIN; 664 forkerr: 665 return ((int64_t)set_errno(error)); 666 } 667 668 /* 669 * Free allocated resources from getproc() if a fork failed. 670 */ 671 static void 672 fork_fail(proc_t *cp) 673 { 674 uf_info_t *fip = P_FINFO(cp); 675 676 fcnt_add(fip, -1); 677 sigdelq(cp, NULL, 0); 678 679 mutex_enter(&pidlock); 680 upcount_dec(crgetruid(cp->p_cred), crgetzoneid(cp->p_cred)); 681 mutex_exit(&pidlock); 682 683 /* 684 * single threaded, so no locking needed here 685 */ 686 crfree(cp->p_cred); 687 688 kmem_free(fip->fi_list, fip->fi_nfiles * sizeof (uf_entry_t)); 689 690 VN_RELE(PTOU(curproc)->u_cdir); 691 if (PTOU(curproc)->u_rdir) 692 VN_RELE(PTOU(curproc)->u_rdir); 693 if (cp->p_exec) 694 VN_RELE(cp->p_exec); 695 if (cp->p_execdir) 696 VN_RELE(cp->p_execdir); 697 if (PTOU(curproc)->u_cwd) 698 refstr_rele(PTOU(curproc)->u_cwd); 699 } 700 701 /* 702 * Clean up the lwps already created for this child process. 703 * The fork failed while duplicating all the lwps of the parent 704 * and those lwps already created must be freed. 705 * This process is invisible to the rest of the system, 706 * so we don't need to hold p->p_lock to protect the list. 707 */ 708 static void 709 forklwp_fail(proc_t *p) 710 { 711 kthread_t *t; 712 task_t *tk; 713 714 while ((t = p->p_tlist) != NULL) { 715 /* 716 * First remove the lwp from the process's p_tlist. 717 */ 718 if (t != t->t_forw) 719 p->p_tlist = t->t_forw; 720 else 721 p->p_tlist = NULL; 722 p->p_lwpcnt--; 723 t->t_forw->t_back = t->t_back; 724 t->t_back->t_forw = t->t_forw; 725 726 tk = p->p_task; 727 mutex_enter(&p->p_zone->zone_nlwps_lock); 728 tk->tk_nlwps--; 729 tk->tk_proj->kpj_nlwps--; 730 p->p_zone->zone_nlwps--; 731 mutex_exit(&p->p_zone->zone_nlwps_lock); 732 733 ASSERT(t->t_schedctl == NULL); 734 735 if (t->t_door != NULL) { 736 kmem_free(t->t_door, sizeof (door_data_t)); 737 t->t_door = NULL; 738 } 739 lwp_ctmpl_clear(ttolwp(t)); 740 741 /* 742 * Remove the thread from the all threads list. 743 * We need to hold pidlock for this. 744 */ 745 mutex_enter(&pidlock); 746 t->t_next->t_prev = t->t_prev; 747 t->t_prev->t_next = t->t_next; 748 CL_EXIT(t); /* tell the scheduler that we're exiting */ 749 cv_broadcast(&t->t_joincv); /* tell anyone in thread_join */ 750 mutex_exit(&pidlock); 751 752 /* 753 * Let the lgroup load averages know that this thread isn't 754 * going to show up (i.e. un-do what was done on behalf of 755 * this thread by the earlier lgrp_move_thread()). 756 */ 757 kpreempt_disable(); 758 lgrp_move_thread(t, NULL, 1); 759 kpreempt_enable(); 760 761 /* 762 * The thread was created TS_STOPPED. 763 * We change it to TS_FREE to avoid an 764 * ASSERT() panic in thread_free(). 765 */ 766 t->t_state = TS_FREE; 767 thread_rele(t); 768 thread_free(t); 769 } 770 } 771 772 extern struct as kas; 773 774 /* 775 * fork a kernel process. 776 */ 777 int 778 newproc(void (*pc)(), caddr_t arg, id_t cid, int pri, struct contract **ct) 779 { 780 proc_t *p; 781 struct user *up; 782 klwp_t *lwp; 783 cont_process_t *ctp = NULL; 784 rctl_entity_p_t e; 785 786 ASSERT(!(cid == syscid && ct != NULL)); 787 if (cid == syscid) { 788 rctl_alloc_gp_t *init_gp; 789 rctl_set_t *init_set; 790 791 if (getproc(&p, 1) < 0) 792 return (EAGAIN); 793 794 p->p_flag |= SNOWAIT; 795 p->p_exec = NULL; 796 p->p_execdir = NULL; 797 798 init_set = rctl_set_create(); 799 init_gp = rctl_set_init_prealloc(RCENTITY_PROCESS); 800 801 /* 802 * kernel processes do not inherit /proc tracing flags. 803 */ 804 sigemptyset(&p->p_sigmask); 805 premptyset(&p->p_fltmask); 806 up = PTOU(p); 807 up->u_systrap = 0; 808 premptyset(&(up->u_entrymask)); 809 premptyset(&(up->u_exitmask)); 810 mutex_enter(&p->p_lock); 811 e.rcep_p.proc = p; 812 e.rcep_t = RCENTITY_PROCESS; 813 p->p_rctls = rctl_set_init(RCENTITY_PROCESS, p, &e, init_set, 814 init_gp); 815 mutex_exit(&p->p_lock); 816 817 rctl_prealloc_destroy(init_gp); 818 } else { 819 rctl_alloc_gp_t *init_gp, *default_gp; 820 rctl_set_t *init_set; 821 task_t *tk, *tk_old; 822 823 if (getproc(&p, 0) < 0) 824 return (EAGAIN); 825 /* 826 * init creates a new task, distinct from the task 827 * containing kernel "processes". 828 */ 829 tk = task_create(0, p->p_zone); 830 mutex_enter(&tk->tk_zone->zone_nlwps_lock); 831 tk->tk_proj->kpj_ntasks++; 832 mutex_exit(&tk->tk_zone->zone_nlwps_lock); 833 834 default_gp = rctl_rlimit_set_prealloc(RLIM_NLIMITS); 835 init_gp = rctl_set_init_prealloc(RCENTITY_PROCESS); 836 init_set = rctl_set_create(); 837 838 mutex_enter(&pidlock); 839 mutex_enter(&p->p_lock); 840 tk_old = p->p_task; /* switch to new task */ 841 842 task_detach(p); 843 task_begin(tk, p); 844 mutex_exit(&pidlock); 845 846 e.rcep_p.proc = p; 847 e.rcep_t = RCENTITY_PROCESS; 848 p->p_rctls = rctl_set_init(RCENTITY_PROCESS, p, &e, init_set, 849 init_gp); 850 rctlproc_default_init(p, default_gp); 851 mutex_exit(&p->p_lock); 852 853 task_rele(tk_old); 854 rctl_prealloc_destroy(default_gp); 855 rctl_prealloc_destroy(init_gp); 856 } 857 858 p->p_as = &kas; 859 860 if ((lwp = lwp_create(pc, arg, 0, p, TS_STOPPED, pri, 861 &curthread->t_hold, cid, 1)) == NULL) { 862 task_t *tk; 863 fork_fail(p); 864 mutex_enter(&pidlock); 865 mutex_enter(&p->p_lock); 866 tk = p->p_task; 867 task_detach(p); 868 ASSERT(p->p_pool->pool_ref > 0); 869 atomic_add_32(&p->p_pool->pool_ref, -1); 870 mutex_exit(&p->p_lock); 871 pid_exit(p); 872 mutex_exit(&pidlock); 873 task_rele(tk); 874 875 return (EAGAIN); 876 } 877 878 if (cid != syscid) { 879 ctp = contract_process_fork(sys_process_tmpl, p, curproc, 880 B_FALSE); 881 ASSERT(ctp != NULL); 882 if (ct != NULL) 883 *ct = &ctp->conp_contract; 884 } 885 886 p->p_lwpid = 1; 887 mutex_enter(&pidlock); 888 pgjoin(p, curproc->p_pgidp); 889 p->p_stat = SRUN; 890 mutex_enter(&p->p_lock); 891 lwptot(lwp)->t_proc_flag &= ~TP_HOLDLWP; 892 lwp_create_done(lwptot(lwp)); 893 mutex_exit(&p->p_lock); 894 mutex_exit(&pidlock); 895 return (0); 896 } 897 898 /* 899 * create a child proc struct. 900 */ 901 static int 902 getproc(proc_t **cpp, int kernel) 903 { 904 proc_t *pp, *cp; 905 pid_t newpid; 906 struct user *uarea; 907 extern uint_t nproc; 908 struct cred *cr; 909 uid_t ruid; 910 zoneid_t zoneid; 911 912 if (!page_mem_avail(tune.t_minarmem)) 913 return (-1); 914 if (zone_status_get(curproc->p_zone) >= ZONE_IS_SHUTTING_DOWN) 915 return (-1); /* no point in starting new processes */ 916 917 pp = curproc; 918 cp = kmem_cache_alloc(process_cache, KM_SLEEP); 919 bzero(cp, sizeof (proc_t)); 920 921 /* 922 * Make proc entry for child process 923 */ 924 mutex_init(&cp->p_splock, NULL, MUTEX_DEFAULT, NULL); 925 mutex_init(&cp->p_crlock, NULL, MUTEX_DEFAULT, NULL); 926 mutex_init(&cp->p_pflock, NULL, MUTEX_DEFAULT, NULL); 927 #if defined(__x86) 928 mutex_init(&cp->p_ldtlock, NULL, MUTEX_DEFAULT, NULL); 929 #endif 930 mutex_init(&cp->p_maplock, NULL, MUTEX_DEFAULT, NULL); 931 cp->p_stat = SIDL; 932 cp->p_mstart = gethrtime(); 933 /* 934 * p_zone must be set before we call pid_allocate since the process 935 * will be visible after that and code such as prfind_zone will 936 * look at the p_zone field. 937 */ 938 cp->p_zone = pp->p_zone; 939 940 if ((newpid = pid_allocate(cp, PID_ALLOC_PROC)) == -1) { 941 if (nproc == v.v_proc) { 942 CPU_STATS_ADDQ(CPU, sys, procovf, 1); 943 cmn_err(CE_WARN, "out of processes"); 944 } 945 goto bad; 946 } 947 948 /* 949 * If not privileged make sure that this user hasn't exceeded 950 * v.v_maxup processes, and that users collectively haven't 951 * exceeded v.v_maxupttl processes. 952 */ 953 mutex_enter(&pidlock); 954 ASSERT(nproc < v.v_proc); /* otherwise how'd we get our pid? */ 955 cr = CRED(); 956 ruid = crgetruid(cr); 957 zoneid = crgetzoneid(cr); 958 if (nproc >= v.v_maxup && /* short-circuit; usually false */ 959 (nproc >= v.v_maxupttl || 960 upcount_get(ruid, zoneid) >= v.v_maxup) && 961 secpolicy_newproc(cr) != 0) { 962 mutex_exit(&pidlock); 963 zcmn_err(zoneid, CE_NOTE, 964 "out of per-user processes for uid %d", ruid); 965 goto bad; 966 } 967 968 /* 969 * Everything is cool, put the new proc on the active process list. 970 * It is already on the pid list and in /proc. 971 * Increment the per uid process count (upcount). 972 */ 973 nproc++; 974 upcount_inc(ruid, zoneid); 975 976 cp->p_next = practive; 977 practive->p_prev = cp; 978 practive = cp; 979 980 cp->p_ignore = pp->p_ignore; 981 cp->p_siginfo = pp->p_siginfo; 982 cp->p_flag = pp->p_flag & (SJCTL|SNOWAIT|SNOCD); 983 cp->p_sessp = pp->p_sessp; 984 sess_hold(pp); 985 cp->p_exec = pp->p_exec; 986 cp->p_execdir = pp->p_execdir; 987 cp->p_brand = pp->p_brand; 988 if (PROC_IS_BRANDED(pp)) 989 BROP(pp)->b_copy_procdata(cp, pp); 990 991 cp->p_bssbase = pp->p_bssbase; 992 cp->p_brkbase = pp->p_brkbase; 993 cp->p_brksize = pp->p_brksize; 994 cp->p_brkpageszc = pp->p_brkpageszc; 995 cp->p_stksize = pp->p_stksize; 996 cp->p_stkpageszc = pp->p_stkpageszc; 997 cp->p_stkprot = pp->p_stkprot; 998 cp->p_datprot = pp->p_datprot; 999 cp->p_usrstack = pp->p_usrstack; 1000 cp->p_model = pp->p_model; 1001 cp->p_ppid = pp->p_pid; 1002 cp->p_ancpid = pp->p_pid; 1003 cp->p_portcnt = pp->p_portcnt; 1004 1005 /* 1006 * Initialize watchpoint structures 1007 */ 1008 avl_create(&cp->p_warea, wa_compare, sizeof (struct watched_area), 1009 offsetof(struct watched_area, wa_link)); 1010 1011 /* 1012 * Initialize immediate resource control values. 1013 */ 1014 cp->p_stk_ctl = pp->p_stk_ctl; 1015 cp->p_fsz_ctl = pp->p_fsz_ctl; 1016 cp->p_vmem_ctl = pp->p_vmem_ctl; 1017 cp->p_fno_ctl = pp->p_fno_ctl; 1018 1019 /* 1020 * Link up to parent-child-sibling chain. No need to lock 1021 * in general since only a call to freeproc() (done by the 1022 * same parent as newproc()) diddles with the child chain. 1023 */ 1024 cp->p_sibling = pp->p_child; 1025 if (pp->p_child) 1026 pp->p_child->p_psibling = cp; 1027 1028 cp->p_parent = pp; 1029 pp->p_child = cp; 1030 1031 cp->p_child_ns = NULL; 1032 cp->p_sibling_ns = NULL; 1033 1034 cp->p_nextorph = pp->p_orphan; 1035 cp->p_nextofkin = pp; 1036 pp->p_orphan = cp; 1037 1038 /* 1039 * Inherit profiling state; do not inherit REALPROF profiling state. 1040 */ 1041 cp->p_prof = pp->p_prof; 1042 cp->p_rprof_cyclic = CYCLIC_NONE; 1043 1044 /* 1045 * Inherit pool pointer from the parent. Kernel processes are 1046 * always bound to the default pool. 1047 */ 1048 mutex_enter(&pp->p_lock); 1049 if (kernel) { 1050 cp->p_pool = pool_default; 1051 cp->p_flag |= SSYS; 1052 } else { 1053 cp->p_pool = pp->p_pool; 1054 } 1055 atomic_add_32(&cp->p_pool->pool_ref, 1); 1056 mutex_exit(&pp->p_lock); 1057 1058 /* 1059 * Add the child process to the current task. Kernel processes 1060 * are always attached to task0. 1061 */ 1062 mutex_enter(&cp->p_lock); 1063 if (kernel) 1064 task_attach(task0p, cp); 1065 else 1066 task_attach(pp->p_task, cp); 1067 mutex_exit(&cp->p_lock); 1068 mutex_exit(&pidlock); 1069 1070 avl_create(&cp->p_ct_held, contract_compar, sizeof (contract_t), 1071 offsetof(contract_t, ct_ctlist)); 1072 1073 /* 1074 * Duplicate any audit information kept in the process table 1075 */ 1076 #ifdef C2_AUDIT 1077 if (audit_active) /* copy audit data to cp */ 1078 audit_newproc(cp); 1079 #endif 1080 1081 crhold(cp->p_cred = cr); 1082 1083 /* 1084 * Bump up the counts on the file structures pointed at by the 1085 * parent's file table since the child will point at them too. 1086 */ 1087 fcnt_add(P_FINFO(pp), 1); 1088 1089 VN_HOLD(PTOU(pp)->u_cdir); 1090 if (PTOU(pp)->u_rdir) 1091 VN_HOLD(PTOU(pp)->u_rdir); 1092 if (PTOU(pp)->u_cwd) 1093 refstr_hold(PTOU(pp)->u_cwd); 1094 1095 /* 1096 * copy the parent's uarea. 1097 */ 1098 uarea = PTOU(cp); 1099 bcopy(PTOU(pp), uarea, sizeof (*uarea)); 1100 flist_fork(P_FINFO(pp), P_FINFO(cp)); 1101 1102 gethrestime(&uarea->u_start); 1103 uarea->u_ticks = lbolt; 1104 uarea->u_mem = rm_asrss(pp->p_as); 1105 uarea->u_acflag = AFORK; 1106 1107 /* 1108 * If inherit-on-fork, copy /proc tracing flags to child. 1109 */ 1110 if ((pp->p_proc_flag & P_PR_FORK) != 0) { 1111 cp->p_proc_flag |= pp->p_proc_flag & (P_PR_TRACE|P_PR_FORK); 1112 cp->p_sigmask = pp->p_sigmask; 1113 cp->p_fltmask = pp->p_fltmask; 1114 } else { 1115 sigemptyset(&cp->p_sigmask); 1116 premptyset(&cp->p_fltmask); 1117 uarea->u_systrap = 0; 1118 premptyset(&uarea->u_entrymask); 1119 premptyset(&uarea->u_exitmask); 1120 } 1121 /* 1122 * If microstate accounting is being inherited, mark child 1123 */ 1124 if ((pp->p_flag & SMSFORK) != 0) 1125 cp->p_flag |= pp->p_flag & (SMSFORK|SMSACCT); 1126 1127 /* 1128 * Inherit fixalignment flag from the parent 1129 */ 1130 cp->p_fixalignment = pp->p_fixalignment; 1131 1132 if (cp->p_exec) 1133 VN_HOLD(cp->p_exec); 1134 if (cp->p_execdir) 1135 VN_HOLD(cp->p_execdir); 1136 *cpp = cp; 1137 return (0); 1138 1139 bad: 1140 ASSERT(MUTEX_NOT_HELD(&pidlock)); 1141 1142 mutex_destroy(&cp->p_crlock); 1143 mutex_destroy(&cp->p_pflock); 1144 #if defined(__x86) 1145 mutex_destroy(&cp->p_ldtlock); 1146 #endif 1147 if (newpid != -1) { 1148 proc_entry_free(cp->p_pidp); 1149 (void) pid_rele(cp->p_pidp); 1150 } 1151 kmem_cache_free(process_cache, cp); 1152 1153 /* 1154 * We most likely got into this situation because some process is 1155 * forking out of control. As punishment, put it to sleep for a 1156 * bit so it can't eat the machine alive. Sleep interval is chosen 1157 * to allow no more than one fork failure per cpu per clock tick 1158 * on average (yes, I just made this up). This has two desirable 1159 * properties: (1) it sets a constant limit on the fork failure 1160 * rate, and (2) the busier the system is, the harsher the penalty 1161 * for abusing it becomes. 1162 */ 1163 INCR_COUNT(&fork_fail_pending, &pidlock); 1164 delay(fork_fail_pending / ncpus + 1); 1165 DECR_COUNT(&fork_fail_pending, &pidlock); 1166 1167 return (-1); /* out of memory or proc slots */ 1168 } 1169 1170 /* 1171 * Release virtual memory. 1172 * In the case of vfork(), the child was given exclusive access to its 1173 * parent's address space. The parent is waiting in vfwait() for the 1174 * child to release its exclusive claim via relvm(). 1175 */ 1176 void 1177 relvm() 1178 { 1179 proc_t *p = curproc; 1180 1181 ASSERT((unsigned)p->p_lwpcnt <= 1); 1182 1183 prrelvm(); /* inform /proc */ 1184 1185 if (p->p_flag & SVFORK) { 1186 proc_t *pp = p->p_parent; 1187 /* 1188 * The child process is either exec'ing or exit'ing. 1189 * The child is now separated from the parent's address 1190 * space. The parent process is made dispatchable. 1191 * 1192 * This is a delicate locking maneuver, involving 1193 * both the parent's p_lock and the child's p_lock. 1194 * As soon as the SVFORK flag is turned off, the 1195 * parent is free to run, but it must not run until 1196 * we wake it up using its p_cv because it might 1197 * exit and we would be referencing invalid memory. 1198 * Therefore, we hold the parent with its p_lock 1199 * while protecting our p_flags with our own p_lock. 1200 */ 1201 try_again: 1202 mutex_enter(&p->p_lock); /* grab child's lock first */ 1203 prbarrier(p); /* make sure /proc is blocked out */ 1204 mutex_enter(&pp->p_lock); 1205 1206 /* 1207 * Check if parent is locked by /proc. 1208 */ 1209 if (pp->p_proc_flag & P_PR_LOCK) { 1210 /* 1211 * Delay until /proc is done with the parent. 1212 * We must drop our (the child's) p->p_lock, wait 1213 * via prbarrier() on the parent, then start over. 1214 */ 1215 mutex_exit(&p->p_lock); 1216 prbarrier(pp); 1217 mutex_exit(&pp->p_lock); 1218 goto try_again; 1219 } 1220 p->p_flag &= ~SVFORK; 1221 kpreempt_disable(); 1222 p->p_as = &kas; 1223 1224 /* 1225 * notify hat of change in thread's address space 1226 */ 1227 hat_thread_exit(curthread); 1228 kpreempt_enable(); 1229 1230 /* 1231 * child sizes are copied back to parent because 1232 * child may have grown. 1233 */ 1234 pp->p_brkbase = p->p_brkbase; 1235 pp->p_brksize = p->p_brksize; 1236 pp->p_stksize = p->p_stksize; 1237 /* 1238 * The parent is no longer waiting for the vfork()d child. 1239 * Restore the parent's watched pages, if any. This is 1240 * safe because we know the parent is not locked by /proc 1241 */ 1242 pp->p_flag &= ~SVFWAIT; 1243 if (avl_numnodes(&pp->p_wpage) != 0) { 1244 pp->p_as->a_wpage = pp->p_wpage; 1245 avl_create(&pp->p_wpage, wp_compare, 1246 sizeof (struct watched_page), 1247 offsetof(struct watched_page, wp_link)); 1248 } 1249 cv_signal(&pp->p_cv); 1250 mutex_exit(&pp->p_lock); 1251 mutex_exit(&p->p_lock); 1252 } else { 1253 if (p->p_as != &kas) { 1254 struct as *as; 1255 1256 if (p->p_segacct) 1257 shmexit(p); 1258 1259 /* 1260 * We grab p_lock for the benefit of /proc 1261 */ 1262 kpreempt_disable(); 1263 mutex_enter(&p->p_lock); 1264 prbarrier(p); /* make sure /proc is blocked out */ 1265 as = p->p_as; 1266 p->p_as = &kas; 1267 mutex_exit(&p->p_lock); 1268 1269 /* 1270 * notify hat of change in thread's address space 1271 */ 1272 hat_thread_exit(curthread); 1273 kpreempt_enable(); 1274 1275 as_free(as); 1276 } 1277 } 1278 } 1279 1280 /* 1281 * Wait for child to exec or exit. 1282 * Called by parent of vfork'ed process. 1283 * See important comments in relvm(), above. 1284 */ 1285 void 1286 vfwait(pid_t pid) 1287 { 1288 int signalled = 0; 1289 proc_t *pp = ttoproc(curthread); 1290 proc_t *cp; 1291 1292 /* 1293 * Wait for child to exec or exit. 1294 */ 1295 for (;;) { 1296 mutex_enter(&pidlock); 1297 cp = prfind(pid); 1298 if (cp == NULL || cp->p_parent != pp) { 1299 /* 1300 * Child has exit()ed. 1301 */ 1302 mutex_exit(&pidlock); 1303 break; 1304 } 1305 /* 1306 * Grab the child's p_lock before releasing pidlock. 1307 * Otherwise, the child could exit and we would be 1308 * referencing invalid memory. 1309 */ 1310 mutex_enter(&cp->p_lock); 1311 mutex_exit(&pidlock); 1312 if (!(cp->p_flag & SVFORK)) { 1313 /* 1314 * Child has exec()ed or is exit()ing. 1315 */ 1316 mutex_exit(&cp->p_lock); 1317 break; 1318 } 1319 mutex_enter(&pp->p_lock); 1320 mutex_exit(&cp->p_lock); 1321 /* 1322 * We might be waked up spuriously from the cv_wait(). 1323 * We have to do the whole operation over again to be 1324 * sure the child's SVFORK flag really is turned off. 1325 * We cannot make reference to the child because it can 1326 * exit before we return and we would be referencing 1327 * invalid memory. 1328 * 1329 * Because this is potentially a very long-term wait, 1330 * we call cv_wait_sig() (for its jobcontrol and /proc 1331 * side-effects) unless there is a current signal, in 1332 * which case we use cv_wait() because we cannot return 1333 * from this function until the child has released the 1334 * address space. Calling cv_wait_sig() with a current 1335 * signal would lead to an indefinite loop here because 1336 * cv_wait_sig() returns immediately in this case. 1337 */ 1338 if (signalled) 1339 cv_wait(&pp->p_cv, &pp->p_lock); 1340 else 1341 signalled = !cv_wait_sig(&pp->p_cv, &pp->p_lock); 1342 mutex_exit(&pp->p_lock); 1343 } 1344 1345 /* restore watchpoints to parent */ 1346 if (pr_watch_active(pp)) { 1347 struct as *as = pp->p_as; 1348 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); 1349 as_setwatch(as); 1350 AS_LOCK_EXIT(as, &as->a_lock); 1351 } 1352 1353 mutex_enter(&pp->p_lock); 1354 prbarrier(pp); /* barrier against /proc locking */ 1355 pp->p_flag &= ~SVFPARENT; 1356 continuelwps(pp); 1357 mutex_exit(&pp->p_lock); 1358 } 1359