1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 31 #pragma ident "%Z%%M% %I% %E% SMI" 32 33 #include <sys/types.h> 34 #include <sys/param.h> 35 #include <sys/sysmacros.h> 36 #include <sys/signal.h> 37 #include <sys/cred.h> 38 #include <sys/policy.h> 39 #include <sys/user.h> 40 #include <sys/systm.h> 41 #include <sys/cpuvar.h> 42 #include <sys/vfs.h> 43 #include <sys/vnode.h> 44 #include <sys/file.h> 45 #include <sys/errno.h> 46 #include <sys/time.h> 47 #include <sys/proc.h> 48 #include <sys/cmn_err.h> 49 #include <sys/acct.h> 50 #include <sys/tuneable.h> 51 #include <sys/class.h> 52 #include <sys/kmem.h> 53 #include <sys/session.h> 54 #include <sys/ucontext.h> 55 #include <sys/stack.h> 56 #include <sys/procfs.h> 57 #include <sys/prsystm.h> 58 #include <sys/vmsystm.h> 59 #include <sys/vtrace.h> 60 #include <sys/debug.h> 61 #include <sys/shm_impl.h> 62 #include <sys/door_data.h> 63 #include <vm/as.h> 64 #include <vm/rm.h> 65 #include <c2/audit.h> 66 #include <sys/var.h> 67 #include <sys/schedctl.h> 68 #include <sys/utrap.h> 69 #include <sys/task.h> 70 #include <sys/resource.h> 71 #include <sys/cyclic.h> 72 #include <sys/lgrp.h> 73 #include <sys/rctl.h> 74 #include <sys/contract_impl.h> 75 #include <sys/contract/process_impl.h> 76 #include <sys/list.h> 77 #include <sys/dtrace.h> 78 #include <sys/pool.h> 79 #include <sys/zone.h> 80 #include <sys/sdt.h> 81 #include <sys/class.h> 82 #include <sys/corectl.h> 83 #include <sys/brand.h> 84 85 static int64_t cfork(int, int); 86 static int getproc(proc_t **, int); 87 static void fork_fail(proc_t *); 88 static void forklwp_fail(proc_t *); 89 90 int fork_fail_pending; 91 92 extern struct kmem_cache *process_cache; 93 94 /* 95 * forkall system call. 96 */ 97 int64_t 98 forkall(void) 99 { 100 return (cfork(0, 0)); 101 } 102 103 /* 104 * The parent is stopped until the child invokes relvm(). 105 */ 106 int64_t 107 vfork(void) 108 { 109 curthread->t_post_sys = 1; /* so vfwait() will be called */ 110 return (cfork(1, 1)); 111 } 112 113 /* 114 * fork1 system call 115 */ 116 int64_t 117 fork1(void) 118 { 119 return (cfork(0, 1)); 120 } 121 122 /* ARGSUSED */ 123 static int64_t 124 cfork(int isvfork, int isfork1) 125 { 126 proc_t *p = ttoproc(curthread); 127 struct as *as; 128 proc_t *cp, **orphpp; 129 klwp_t *clone; 130 kthread_t *t; 131 task_t *tk; 132 rval_t r; 133 int error; 134 int i; 135 rctl_set_t *dup_set; 136 rctl_alloc_gp_t *dup_gp; 137 rctl_entity_p_t e; 138 lwpdir_t *ldp; 139 lwpent_t *lep; 140 lwpent_t *clep; 141 142 /* 143 * fork is not supported for the /proc agent lwp. 144 */ 145 if (curthread == p->p_agenttp) { 146 error = ENOTSUP; 147 goto forkerr; 148 } 149 150 if ((error = secpolicy_basic_fork(CRED())) != 0) 151 goto forkerr; 152 153 /* 154 * If the calling lwp is doing a fork1() then the 155 * other lwps in this process are not duplicated and 156 * don't need to be held where their kernel stacks can be 157 * cloned. If doing forkall(), the process is held with 158 * SHOLDFORK, so that the lwps are at a point where their 159 * stacks can be copied which is on entry or exit from 160 * the kernel. 161 */ 162 if (!holdlwps(isfork1 ? SHOLDFORK1 : SHOLDFORK)) { 163 aston(curthread); 164 error = EINTR; 165 goto forkerr; 166 } 167 168 #if defined(__sparc) 169 /* 170 * Ensure that the user stack is fully constructed 171 * before creating the child process structure. 172 */ 173 (void) flush_user_windows_to_stack(NULL); 174 #endif 175 176 mutex_enter(&p->p_lock); 177 /* 178 * If this is vfork(), cancel any suspend request we might 179 * have gotten from some other thread via lwp_suspend(). 180 * Otherwise we could end up with a deadlock on return 181 * from the vfork() in both the parent and the child. 182 */ 183 if (isvfork) 184 curthread->t_proc_flag &= ~TP_HOLDLWP; 185 /* 186 * Prevent our resource set associations from being changed during fork. 187 */ 188 pool_barrier_enter(); 189 mutex_exit(&p->p_lock); 190 191 /* 192 * Create a child proc struct. Place a VN_HOLD on appropriate vnodes. 193 */ 194 if (getproc(&cp, 0) < 0) { 195 mutex_enter(&p->p_lock); 196 pool_barrier_exit(); 197 continuelwps(p); 198 mutex_exit(&p->p_lock); 199 error = EAGAIN; 200 goto forkerr; 201 } 202 203 TRACE_2(TR_FAC_PROC, TR_PROC_FORK, "proc_fork:cp %p p %p", cp, p); 204 205 /* 206 * Assign an address space to child 207 */ 208 if (isvfork) { 209 /* 210 * Clear any watched areas and remember the 211 * watched pages for restoring in vfwait(). 212 */ 213 as = p->p_as; 214 if (avl_numnodes(&as->a_wpage) != 0) { 215 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); 216 as_clearwatch(as); 217 p->p_wpage = as->a_wpage; 218 avl_create(&as->a_wpage, wp_compare, 219 sizeof (struct watched_page), 220 offsetof(struct watched_page, wp_link)); 221 AS_LOCK_EXIT(as, &as->a_lock); 222 } 223 cp->p_as = as; 224 cp->p_flag |= SVFORK; 225 } else { 226 /* 227 * We need to hold P_PR_LOCK until the address space has 228 * been duplicated and we've had a chance to remove from the 229 * child any DTrace probes that were in the parent. Holding 230 * P_PR_LOCK prevents any new probes from being added and any 231 * extant probes from being removed. 232 */ 233 mutex_enter(&p->p_lock); 234 sprlock_proc(p); 235 p->p_flag |= SFORKING; 236 mutex_exit(&p->p_lock); 237 238 error = as_dup(p->p_as, &cp->p_as); 239 if (error != 0) { 240 fork_fail(cp); 241 mutex_enter(&pidlock); 242 orphpp = &p->p_orphan; 243 while (*orphpp != cp) 244 orphpp = &(*orphpp)->p_nextorph; 245 *orphpp = cp->p_nextorph; 246 if (p->p_child == cp) 247 p->p_child = cp->p_sibling; 248 if (cp->p_sibling) 249 cp->p_sibling->p_psibling = cp->p_psibling; 250 if (cp->p_psibling) 251 cp->p_psibling->p_sibling = cp->p_sibling; 252 mutex_enter(&cp->p_lock); 253 tk = cp->p_task; 254 task_detach(cp); 255 ASSERT(cp->p_pool->pool_ref > 0); 256 atomic_add_32(&cp->p_pool->pool_ref, -1); 257 mutex_exit(&cp->p_lock); 258 pid_exit(cp); 259 mutex_exit(&pidlock); 260 task_rele(tk); 261 262 mutex_enter(&p->p_lock); 263 p->p_flag &= ~SFORKING; 264 pool_barrier_exit(); 265 continuelwps(p); 266 sprunlock(p); 267 /* 268 * Preserve ENOMEM error condition but 269 * map all others to EAGAIN. 270 */ 271 error = (error == ENOMEM) ? ENOMEM : EAGAIN; 272 goto forkerr; 273 } 274 /* Duplicate parent's shared memory */ 275 if (p->p_segacct) 276 shmfork(p, cp); 277 278 /* 279 * Remove all DTrace tracepoints from the child process. We 280 * need to do this _before_ duplicating USDT providers since 281 * any associated probes may be immediately enabled. 282 */ 283 if (p->p_dtrace_count > 0) 284 dtrace_fasttrap_fork(p, cp); 285 286 /* 287 * Duplicate any helper actions and providers. The SFORKING 288 * we set above informs the code to enable USDT probes that 289 * sprlock() may fail because the child is being forked. 290 */ 291 if (p->p_dtrace_helpers != NULL) { 292 ASSERT(dtrace_helpers_fork != NULL); 293 (*dtrace_helpers_fork)(p, cp); 294 } 295 296 mutex_enter(&p->p_lock); 297 p->p_flag &= ~SFORKING; 298 sprunlock(p); 299 } 300 301 /* 302 * Duplicate parent's resource controls. 303 */ 304 dup_set = rctl_set_create(); 305 for (;;) { 306 dup_gp = rctl_set_dup_prealloc(p->p_rctls); 307 mutex_enter(&p->p_rctls->rcs_lock); 308 if (rctl_set_dup_ready(p->p_rctls, dup_gp)) 309 break; 310 mutex_exit(&p->p_rctls->rcs_lock); 311 rctl_prealloc_destroy(dup_gp); 312 } 313 e.rcep_p.proc = cp; 314 e.rcep_t = RCENTITY_PROCESS; 315 cp->p_rctls = rctl_set_dup(p->p_rctls, p, cp, &e, dup_set, dup_gp, 316 RCD_DUP | RCD_CALLBACK); 317 mutex_exit(&p->p_rctls->rcs_lock); 318 319 rctl_prealloc_destroy(dup_gp); 320 321 /* 322 * Allocate the child's lwp directory and lwpid hash table. 323 */ 324 if (isfork1) 325 cp->p_lwpdir_sz = 2; 326 else 327 cp->p_lwpdir_sz = p->p_lwpdir_sz; 328 cp->p_lwpdir = cp->p_lwpfree = ldp = 329 kmem_zalloc(cp->p_lwpdir_sz * sizeof (lwpdir_t), KM_SLEEP); 330 for (i = 1; i < cp->p_lwpdir_sz; i++, ldp++) 331 ldp->ld_next = ldp + 1; 332 cp->p_tidhash_sz = (cp->p_lwpdir_sz + 2) / 2; 333 cp->p_tidhash = 334 kmem_zalloc(cp->p_tidhash_sz * sizeof (lwpdir_t *), KM_SLEEP); 335 336 /* 337 * Duplicate parent's lwps. 338 * Mutual exclusion is not needed because the process is 339 * in the hold state and only the current lwp is running. 340 */ 341 klgrpset_clear(cp->p_lgrpset); 342 if (isfork1) { 343 clone = forklwp(ttolwp(curthread), cp, curthread->t_tid); 344 if (clone == NULL) 345 goto forklwperr; 346 /* 347 * Inherit only the lwp_wait()able flag, 348 * Daemon threads should not call fork1(), but oh well... 349 */ 350 lwptot(clone)->t_proc_flag |= 351 (curthread->t_proc_flag & TP_TWAIT); 352 } else { 353 /* this is forkall(), no one can be in lwp_wait() */ 354 ASSERT(p->p_lwpwait == 0 && p->p_lwpdwait == 0); 355 /* for each entry in the parent's lwp directory... */ 356 for (i = 0, ldp = p->p_lwpdir; i < p->p_lwpdir_sz; i++, ldp++) { 357 klwp_t *clwp; 358 kthread_t *ct; 359 360 if ((lep = ldp->ld_entry) == NULL) 361 continue; 362 363 if ((t = lep->le_thread) != NULL) { 364 clwp = forklwp(ttolwp(t), cp, t->t_tid); 365 if (clwp == NULL) 366 goto forklwperr; 367 ct = lwptot(clwp); 368 /* 369 * Inherit lwp_wait()able and daemon flags. 370 */ 371 ct->t_proc_flag |= 372 (t->t_proc_flag & (TP_TWAIT|TP_DAEMON)); 373 /* 374 * Keep track of the clone of curthread to 375 * post return values through lwp_setrval(). 376 * Mark other threads for special treatment 377 * by lwp_rtt() / post_syscall(). 378 */ 379 if (t == curthread) 380 clone = clwp; 381 else 382 ct->t_flag |= T_FORKALL; 383 } else { 384 /* 385 * Replicate zombie lwps in the child. 386 */ 387 clep = kmem_zalloc(sizeof (*clep), KM_SLEEP); 388 clep->le_lwpid = lep->le_lwpid; 389 clep->le_start = lep->le_start; 390 lwp_hash_in(cp, clep); 391 } 392 } 393 } 394 395 /* 396 * Put new process in the parent's process contract, or put it 397 * in a new one if there is an active process template. Send a 398 * fork event (if requested) to whatever contract the child is 399 * a member of. Fails if the parent has been SIGKILLed. 400 */ 401 if (contract_process_fork(NULL, cp, p, B_TRUE) == NULL) 402 goto forklwperr; 403 404 /* 405 * No fork failures occur beyond this point. 406 */ 407 408 cp->p_lwpid = p->p_lwpid; 409 if (!isfork1) { 410 cp->p_lwpdaemon = p->p_lwpdaemon; 411 cp->p_zombcnt = p->p_zombcnt; 412 /* 413 * If the parent's lwp ids have wrapped around, so have the 414 * child's. 415 */ 416 cp->p_flag |= p->p_flag & SLWPWRAP; 417 } 418 419 mutex_enter(&p->p_lock); 420 corectl_path_hold(cp->p_corefile = p->p_corefile); 421 corectl_content_hold(cp->p_content = p->p_content); 422 mutex_exit(&p->p_lock); 423 424 /* 425 * Duplicate process context ops, if any. 426 */ 427 if (p->p_pctx) 428 forkpctx(p, cp); 429 430 #ifdef __sparc 431 utrap_dup(p, cp); 432 #endif 433 /* 434 * If the child process has been marked to stop on exit 435 * from this fork, arrange for all other lwps to stop in 436 * sympathy with the active lwp. 437 */ 438 if (PTOU(cp)->u_systrap && 439 prismember(&PTOU(cp)->u_exitmask, curthread->t_sysnum)) { 440 mutex_enter(&cp->p_lock); 441 t = cp->p_tlist; 442 do { 443 t->t_proc_flag |= TP_PRSTOP; 444 aston(t); /* so TP_PRSTOP will be seen */ 445 } while ((t = t->t_forw) != cp->p_tlist); 446 mutex_exit(&cp->p_lock); 447 } 448 /* 449 * If the parent process has been marked to stop on exit 450 * from this fork, and its asynchronous-stop flag has not 451 * been set, arrange for all other lwps to stop before 452 * they return back to user level. 453 */ 454 if (!(p->p_proc_flag & P_PR_ASYNC) && PTOU(p)->u_systrap && 455 prismember(&PTOU(p)->u_exitmask, curthread->t_sysnum)) { 456 mutex_enter(&p->p_lock); 457 t = p->p_tlist; 458 do { 459 t->t_proc_flag |= TP_PRSTOP; 460 aston(t); /* so TP_PRSTOP will be seen */ 461 } while ((t = t->t_forw) != p->p_tlist); 462 mutex_exit(&p->p_lock); 463 } 464 465 if (PROC_IS_BRANDED(p)) 466 BROP(p)->b_lwp_setrval(clone, p->p_pid, 1); 467 else 468 lwp_setrval(clone, p->p_pid, 1); 469 470 /* set return values for parent */ 471 r.r_val1 = (int)cp->p_pid; 472 r.r_val2 = 0; 473 474 /* 475 * pool_barrier_exit() can now be called because the child process has: 476 * - all identifying features cloned or set (p_pid, p_task, p_pool) 477 * - all resource sets associated (p_tlist->*->t_cpupart, p_as->a_mset) 478 * - any other fields set which are used in resource set binding. 479 */ 480 mutex_enter(&p->p_lock); 481 pool_barrier_exit(); 482 mutex_exit(&p->p_lock); 483 484 mutex_enter(&pidlock); 485 mutex_enter(&cp->p_lock); 486 487 /* 488 * Now that there are lwps and threads attached, add the new 489 * process to the process group. 490 */ 491 pgjoin(cp, p->p_pgidp); 492 cp->p_stat = SRUN; 493 /* 494 * We are now done with all the lwps in the child process. 495 */ 496 t = cp->p_tlist; 497 do { 498 /* 499 * Set the lwp_suspend()ed lwps running. 500 * They will suspend properly at syscall exit. 501 */ 502 if (t->t_proc_flag & TP_HOLDLWP) 503 lwp_create_done(t); 504 else { 505 /* set TS_CREATE to allow continuelwps() to work */ 506 thread_lock(t); 507 ASSERT(t->t_state == TS_STOPPED && 508 !(t->t_schedflag & (TS_CREATE|TS_CSTART))); 509 t->t_schedflag |= TS_CREATE; 510 thread_unlock(t); 511 } 512 } while ((t = t->t_forw) != cp->p_tlist); 513 mutex_exit(&cp->p_lock); 514 515 if (isvfork) { 516 CPU_STATS_ADDQ(CPU, sys, sysvfork, 1); 517 mutex_enter(&p->p_lock); 518 p->p_flag |= SVFWAIT; 519 DTRACE_PROC1(create, proc_t *, cp); 520 cv_broadcast(&pr_pid_cv[p->p_slot]); /* inform /proc */ 521 mutex_exit(&p->p_lock); 522 /* 523 * Grab child's p_lock before dropping pidlock to ensure 524 * the process will not disappear before we set it running. 525 */ 526 mutex_enter(&cp->p_lock); 527 mutex_exit(&pidlock); 528 sigdefault(cp); 529 continuelwps(cp); 530 mutex_exit(&cp->p_lock); 531 } else { 532 CPU_STATS_ADDQ(CPU, sys, sysfork, 1); 533 DTRACE_PROC1(create, proc_t *, cp); 534 /* 535 * It is CL_FORKRET's job to drop pidlock. 536 * If we do it here, the process could be set running 537 * and disappear before CL_FORKRET() is called. 538 */ 539 CL_FORKRET(curthread, cp->p_tlist); 540 ASSERT(MUTEX_NOT_HELD(&pidlock)); 541 } 542 543 return (r.r_vals); 544 545 forklwperr: 546 if (isvfork) { 547 if (avl_numnodes(&p->p_wpage) != 0) { 548 /* restore watchpoints to parent */ 549 as = p->p_as; 550 AS_LOCK_ENTER(as, &as->a_lock, 551 RW_WRITER); 552 as->a_wpage = p->p_wpage; 553 avl_create(&p->p_wpage, wp_compare, 554 sizeof (struct watched_page), 555 offsetof(struct watched_page, wp_link)); 556 as_setwatch(as); 557 AS_LOCK_EXIT(as, &as->a_lock); 558 } 559 } else { 560 if (cp->p_segacct) 561 shmexit(cp); 562 as = cp->p_as; 563 cp->p_as = &kas; 564 as_free(as); 565 } 566 567 if (cp->p_lwpdir) { 568 for (i = 0, ldp = cp->p_lwpdir; i < cp->p_lwpdir_sz; i++, ldp++) 569 if ((lep = ldp->ld_entry) != NULL) 570 kmem_free(lep, sizeof (*lep)); 571 kmem_free(cp->p_lwpdir, 572 cp->p_lwpdir_sz * sizeof (*cp->p_lwpdir)); 573 } 574 cp->p_lwpdir = NULL; 575 cp->p_lwpfree = NULL; 576 cp->p_lwpdir_sz = 0; 577 578 if (cp->p_tidhash) 579 kmem_free(cp->p_tidhash, 580 cp->p_tidhash_sz * sizeof (*cp->p_tidhash)); 581 cp->p_tidhash = NULL; 582 cp->p_tidhash_sz = 0; 583 584 forklwp_fail(cp); 585 fork_fail(cp); 586 rctl_set_free(cp->p_rctls); 587 mutex_enter(&pidlock); 588 589 /* 590 * Detach failed child from task. 591 */ 592 mutex_enter(&cp->p_lock); 593 tk = cp->p_task; 594 task_detach(cp); 595 ASSERT(cp->p_pool->pool_ref > 0); 596 atomic_add_32(&cp->p_pool->pool_ref, -1); 597 mutex_exit(&cp->p_lock); 598 599 orphpp = &p->p_orphan; 600 while (*orphpp != cp) 601 orphpp = &(*orphpp)->p_nextorph; 602 *orphpp = cp->p_nextorph; 603 if (p->p_child == cp) 604 p->p_child = cp->p_sibling; 605 if (cp->p_sibling) 606 cp->p_sibling->p_psibling = cp->p_psibling; 607 if (cp->p_psibling) 608 cp->p_psibling->p_sibling = cp->p_sibling; 609 pid_exit(cp); 610 mutex_exit(&pidlock); 611 612 task_rele(tk); 613 614 mutex_enter(&p->p_lock); 615 pool_barrier_exit(); 616 continuelwps(p); 617 mutex_exit(&p->p_lock); 618 error = EAGAIN; 619 forkerr: 620 return ((int64_t)set_errno(error)); 621 } 622 623 /* 624 * Free allocated resources from getproc() if a fork failed. 625 */ 626 static void 627 fork_fail(proc_t *cp) 628 { 629 uf_info_t *fip = P_FINFO(cp); 630 631 fcnt_add(fip, -1); 632 sigdelq(cp, NULL, 0); 633 634 mutex_enter(&pidlock); 635 upcount_dec(crgetruid(cp->p_cred), crgetzoneid(cp->p_cred)); 636 mutex_exit(&pidlock); 637 638 /* 639 * single threaded, so no locking needed here 640 */ 641 crfree(cp->p_cred); 642 643 kmem_free(fip->fi_list, fip->fi_nfiles * sizeof (uf_entry_t)); 644 645 VN_RELE(u.u_cdir); 646 if (u.u_rdir) 647 VN_RELE(u.u_rdir); 648 if (cp->p_exec) 649 VN_RELE(cp->p_exec); 650 if (cp->p_execdir) 651 VN_RELE(cp->p_execdir); 652 if (u.u_cwd) 653 refstr_rele(u.u_cwd); 654 } 655 656 /* 657 * Clean up the lwps already created for this child process. 658 * The fork failed while duplicating all the lwps of the parent 659 * and those lwps already created must be freed. 660 * This process is invisible to the rest of the system, 661 * so we don't need to hold p->p_lock to protect the list. 662 */ 663 static void 664 forklwp_fail(proc_t *p) 665 { 666 kthread_t *t; 667 task_t *tk; 668 669 while ((t = p->p_tlist) != NULL) { 670 /* 671 * First remove the lwp from the process's p_tlist. 672 */ 673 if (t != t->t_forw) 674 p->p_tlist = t->t_forw; 675 else 676 p->p_tlist = NULL; 677 p->p_lwpcnt--; 678 t->t_forw->t_back = t->t_back; 679 t->t_back->t_forw = t->t_forw; 680 681 tk = p->p_task; 682 mutex_enter(&p->p_zone->zone_nlwps_lock); 683 tk->tk_nlwps--; 684 tk->tk_proj->kpj_nlwps--; 685 p->p_zone->zone_nlwps--; 686 mutex_exit(&p->p_zone->zone_nlwps_lock); 687 688 ASSERT(t->t_schedctl == NULL); 689 690 if (t->t_door != NULL) { 691 kmem_free(t->t_door, sizeof (door_data_t)); 692 t->t_door = NULL; 693 } 694 lwp_ctmpl_clear(ttolwp(t)); 695 696 /* 697 * Remove the thread from the all threads list. 698 * We need to hold pidlock for this. 699 */ 700 mutex_enter(&pidlock); 701 t->t_next->t_prev = t->t_prev; 702 t->t_prev->t_next = t->t_next; 703 CL_EXIT(t); /* tell the scheduler that we're exiting */ 704 cv_broadcast(&t->t_joincv); /* tell anyone in thread_join */ 705 mutex_exit(&pidlock); 706 707 /* 708 * Let the lgroup load averages know that this thread isn't 709 * going to show up (i.e. un-do what was done on behalf of 710 * this thread by the earlier lgrp_move_thread()). 711 */ 712 kpreempt_disable(); 713 lgrp_move_thread(t, NULL, 1); 714 kpreempt_enable(); 715 716 /* 717 * The thread was created TS_STOPPED. 718 * We change it to TS_FREE to avoid an 719 * ASSERT() panic in thread_free(). 720 */ 721 t->t_state = TS_FREE; 722 thread_rele(t); 723 thread_free(t); 724 } 725 } 726 727 extern struct as kas; 728 729 /* 730 * fork a kernel process. 731 */ 732 int 733 newproc(void (*pc)(), caddr_t arg, id_t cid, int pri, struct contract **ct) 734 { 735 proc_t *p; 736 struct user *up; 737 klwp_t *lwp; 738 cont_process_t *ctp = NULL; 739 rctl_entity_p_t e; 740 741 ASSERT(!(cid == syscid && ct != NULL)); 742 if (cid == syscid) { 743 rctl_alloc_gp_t *init_gp; 744 rctl_set_t *init_set; 745 746 if (getproc(&p, 1) < 0) 747 return (EAGAIN); 748 749 p->p_flag |= SNOWAIT; 750 p->p_exec = NULL; 751 p->p_execdir = NULL; 752 753 init_set = rctl_set_create(); 754 init_gp = rctl_set_init_prealloc(RCENTITY_PROCESS); 755 756 /* 757 * kernel processes do not inherit /proc tracing flags. 758 */ 759 sigemptyset(&p->p_sigmask); 760 premptyset(&p->p_fltmask); 761 up = PTOU(p); 762 up->u_systrap = 0; 763 premptyset(&(up->u_entrymask)); 764 premptyset(&(up->u_exitmask)); 765 mutex_enter(&p->p_lock); 766 e.rcep_p.proc = p; 767 e.rcep_t = RCENTITY_PROCESS; 768 p->p_rctls = rctl_set_init(RCENTITY_PROCESS, p, &e, init_set, 769 init_gp); 770 mutex_exit(&p->p_lock); 771 772 rctl_prealloc_destroy(init_gp); 773 } else { 774 rctl_alloc_gp_t *init_gp, *default_gp; 775 rctl_set_t *init_set; 776 task_t *tk, *tk_old; 777 778 if (getproc(&p, 0) < 0) 779 return (EAGAIN); 780 /* 781 * init creates a new task, distinct from the task 782 * containing kernel "processes". 783 */ 784 tk = task_create(0, p->p_zone); 785 mutex_enter(&tk->tk_zone->zone_nlwps_lock); 786 tk->tk_proj->kpj_ntasks++; 787 mutex_exit(&tk->tk_zone->zone_nlwps_lock); 788 789 default_gp = rctl_rlimit_set_prealloc(RLIM_NLIMITS); 790 init_gp = rctl_set_init_prealloc(RCENTITY_PROCESS); 791 init_set = rctl_set_create(); 792 793 mutex_enter(&pidlock); 794 mutex_enter(&p->p_lock); 795 tk_old = p->p_task; /* switch to new task */ 796 797 task_detach(p); 798 task_begin(tk, p); 799 mutex_exit(&pidlock); 800 801 e.rcep_p.proc = p; 802 e.rcep_t = RCENTITY_PROCESS; 803 p->p_rctls = rctl_set_init(RCENTITY_PROCESS, p, &e, init_set, 804 init_gp); 805 rctlproc_default_init(p, default_gp); 806 mutex_exit(&p->p_lock); 807 808 task_rele(tk_old); 809 rctl_prealloc_destroy(default_gp); 810 rctl_prealloc_destroy(init_gp); 811 } 812 813 p->p_as = &kas; 814 815 if ((lwp = lwp_create(pc, arg, 0, p, TS_STOPPED, pri, 816 &curthread->t_hold, cid, 1)) == NULL) { 817 task_t *tk; 818 fork_fail(p); 819 mutex_enter(&pidlock); 820 mutex_enter(&p->p_lock); 821 tk = p->p_task; 822 task_detach(p); 823 ASSERT(p->p_pool->pool_ref > 0); 824 atomic_add_32(&p->p_pool->pool_ref, -1); 825 mutex_exit(&p->p_lock); 826 pid_exit(p); 827 mutex_exit(&pidlock); 828 task_rele(tk); 829 830 return (EAGAIN); 831 } 832 833 if (cid != syscid) { 834 ctp = contract_process_fork(sys_process_tmpl, p, curproc, 835 B_FALSE); 836 ASSERT(ctp != NULL); 837 if (ct != NULL) 838 *ct = &ctp->conp_contract; 839 } 840 841 p->p_lwpid = 1; 842 mutex_enter(&pidlock); 843 pgjoin(p, curproc->p_pgidp); 844 p->p_stat = SRUN; 845 mutex_enter(&p->p_lock); 846 lwptot(lwp)->t_proc_flag &= ~TP_HOLDLWP; 847 lwp_create_done(lwptot(lwp)); 848 mutex_exit(&p->p_lock); 849 mutex_exit(&pidlock); 850 return (0); 851 } 852 853 /* 854 * create a child proc struct. 855 */ 856 static int 857 getproc(proc_t **cpp, int kernel) 858 { 859 proc_t *pp, *cp; 860 pid_t newpid; 861 struct user *uarea; 862 extern uint_t nproc; 863 struct cred *cr; 864 uid_t ruid; 865 zoneid_t zoneid; 866 867 if (!page_mem_avail(tune.t_minarmem)) 868 return (-1); 869 if (zone_status_get(curproc->p_zone) >= ZONE_IS_SHUTTING_DOWN) 870 return (-1); /* no point in starting new processes */ 871 872 pp = curproc; 873 cp = kmem_cache_alloc(process_cache, KM_SLEEP); 874 bzero(cp, sizeof (proc_t)); 875 876 /* 877 * Make proc entry for child process 878 */ 879 mutex_init(&cp->p_splock, NULL, MUTEX_DEFAULT, NULL); 880 mutex_init(&cp->p_crlock, NULL, MUTEX_DEFAULT, NULL); 881 mutex_init(&cp->p_pflock, NULL, MUTEX_DEFAULT, NULL); 882 #if defined(__x86) 883 mutex_init(&cp->p_ldtlock, NULL, MUTEX_DEFAULT, NULL); 884 #endif 885 mutex_init(&cp->p_maplock, NULL, MUTEX_DEFAULT, NULL); 886 cp->p_stat = SIDL; 887 cp->p_mstart = gethrtime(); 888 889 if ((newpid = pid_allocate(cp, PID_ALLOC_PROC)) == -1) { 890 if (nproc == v.v_proc) { 891 CPU_STATS_ADDQ(CPU, sys, procovf, 1); 892 cmn_err(CE_WARN, "out of processes"); 893 } 894 goto bad; 895 } 896 897 /* 898 * If not privileged make sure that this user hasn't exceeded 899 * v.v_maxup processes, and that users collectively haven't 900 * exceeded v.v_maxupttl processes. 901 */ 902 mutex_enter(&pidlock); 903 ASSERT(nproc < v.v_proc); /* otherwise how'd we get our pid? */ 904 cr = CRED(); 905 ruid = crgetruid(cr); 906 zoneid = crgetzoneid(cr); 907 if (nproc >= v.v_maxup && /* short-circuit; usually false */ 908 (nproc >= v.v_maxupttl || 909 upcount_get(ruid, zoneid) >= v.v_maxup) && 910 secpolicy_newproc(cr) != 0) { 911 mutex_exit(&pidlock); 912 zcmn_err(zoneid, CE_NOTE, 913 "out of per-user processes for uid %d", ruid); 914 goto bad; 915 } 916 917 /* 918 * Everything is cool, put the new proc on the active process list. 919 * It is already on the pid list and in /proc. 920 * Increment the per uid process count (upcount). 921 */ 922 nproc++; 923 upcount_inc(ruid, zoneid); 924 925 cp->p_next = practive; 926 practive->p_prev = cp; 927 practive = cp; 928 929 cp->p_ignore = pp->p_ignore; 930 cp->p_siginfo = pp->p_siginfo; 931 cp->p_flag = pp->p_flag & (SJCTL|SNOWAIT|SNOCD); 932 cp->p_sessp = pp->p_sessp; 933 sess_hold(pp); 934 cp->p_exec = pp->p_exec; 935 cp->p_execdir = pp->p_execdir; 936 cp->p_zone = pp->p_zone; 937 cp->p_brand = pp->p_brand; 938 if (PROC_IS_BRANDED(pp)) 939 BROP(pp)->b_copy_procdata(cp, pp); 940 941 cp->p_bssbase = pp->p_bssbase; 942 cp->p_brkbase = pp->p_brkbase; 943 cp->p_brksize = pp->p_brksize; 944 cp->p_brkpageszc = pp->p_brkpageszc; 945 cp->p_stksize = pp->p_stksize; 946 cp->p_stkpageszc = pp->p_stkpageszc; 947 cp->p_stkprot = pp->p_stkprot; 948 cp->p_datprot = pp->p_datprot; 949 cp->p_usrstack = pp->p_usrstack; 950 cp->p_model = pp->p_model; 951 cp->p_ppid = pp->p_pid; 952 cp->p_ancpid = pp->p_pid; 953 cp->p_portcnt = pp->p_portcnt; 954 955 /* 956 * Initialize watchpoint structures 957 */ 958 avl_create(&cp->p_warea, wa_compare, sizeof (struct watched_area), 959 offsetof(struct watched_area, wa_link)); 960 961 /* 962 * Initialize immediate resource control values. 963 */ 964 cp->p_stk_ctl = pp->p_stk_ctl; 965 cp->p_fsz_ctl = pp->p_fsz_ctl; 966 cp->p_vmem_ctl = pp->p_vmem_ctl; 967 cp->p_fno_ctl = pp->p_fno_ctl; 968 969 /* 970 * Link up to parent-child-sibling chain. No need to lock 971 * in general since only a call to freeproc() (done by the 972 * same parent as newproc()) diddles with the child chain. 973 */ 974 cp->p_sibling = pp->p_child; 975 if (pp->p_child) 976 pp->p_child->p_psibling = cp; 977 978 cp->p_parent = pp; 979 pp->p_child = cp; 980 981 cp->p_child_ns = NULL; 982 cp->p_sibling_ns = NULL; 983 984 cp->p_nextorph = pp->p_orphan; 985 cp->p_nextofkin = pp; 986 pp->p_orphan = cp; 987 988 /* 989 * Inherit profiling state; do not inherit REALPROF profiling state. 990 */ 991 cp->p_prof = pp->p_prof; 992 cp->p_rprof_cyclic = CYCLIC_NONE; 993 994 /* 995 * Inherit pool pointer from the parent. Kernel processes are 996 * always bound to the default pool. 997 */ 998 mutex_enter(&pp->p_lock); 999 if (kernel) { 1000 cp->p_pool = pool_default; 1001 cp->p_flag |= SSYS; 1002 } else { 1003 cp->p_pool = pp->p_pool; 1004 } 1005 atomic_add_32(&cp->p_pool->pool_ref, 1); 1006 mutex_exit(&pp->p_lock); 1007 1008 /* 1009 * Add the child process to the current task. Kernel processes 1010 * are always attached to task0. 1011 */ 1012 mutex_enter(&cp->p_lock); 1013 if (kernel) 1014 task_attach(task0p, cp); 1015 else 1016 task_attach(pp->p_task, cp); 1017 mutex_exit(&cp->p_lock); 1018 mutex_exit(&pidlock); 1019 1020 avl_create(&cp->p_ct_held, contract_compar, sizeof (contract_t), 1021 offsetof(contract_t, ct_ctlist)); 1022 1023 /* 1024 * Duplicate any audit information kept in the process table 1025 */ 1026 #ifdef C2_AUDIT 1027 if (audit_active) /* copy audit data to cp */ 1028 audit_newproc(cp); 1029 #endif 1030 1031 crhold(cp->p_cred = cr); 1032 1033 /* 1034 * Bump up the counts on the file structures pointed at by the 1035 * parent's file table since the child will point at them too. 1036 */ 1037 fcnt_add(P_FINFO(pp), 1); 1038 1039 VN_HOLD(u.u_cdir); 1040 if (u.u_rdir) 1041 VN_HOLD(u.u_rdir); 1042 if (u.u_cwd) 1043 refstr_hold(u.u_cwd); 1044 1045 /* 1046 * copy the parent's uarea. 1047 */ 1048 uarea = PTOU(cp); 1049 bcopy(PTOU(pp), uarea, sizeof (user_t)); 1050 flist_fork(P_FINFO(pp), P_FINFO(cp)); 1051 1052 gethrestime(&uarea->u_start); 1053 uarea->u_ticks = lbolt; 1054 uarea->u_mem = rm_asrss(pp->p_as); 1055 uarea->u_acflag = AFORK; 1056 1057 /* 1058 * If inherit-on-fork, copy /proc tracing flags to child. 1059 */ 1060 if ((pp->p_proc_flag & P_PR_FORK) != 0) { 1061 cp->p_proc_flag |= pp->p_proc_flag & (P_PR_TRACE|P_PR_FORK); 1062 cp->p_sigmask = pp->p_sigmask; 1063 cp->p_fltmask = pp->p_fltmask; 1064 } else { 1065 sigemptyset(&cp->p_sigmask); 1066 premptyset(&cp->p_fltmask); 1067 uarea->u_systrap = 0; 1068 premptyset(&uarea->u_entrymask); 1069 premptyset(&uarea->u_exitmask); 1070 } 1071 /* 1072 * If microstate accounting is being inherited, mark child 1073 */ 1074 if ((pp->p_flag & SMSFORK) != 0) 1075 cp->p_flag |= pp->p_flag & (SMSFORK|SMSACCT); 1076 1077 /* 1078 * Inherit fixalignment flag from the parent 1079 */ 1080 cp->p_fixalignment = pp->p_fixalignment; 1081 1082 if (cp->p_exec) 1083 VN_HOLD(cp->p_exec); 1084 if (cp->p_execdir) 1085 VN_HOLD(cp->p_execdir); 1086 *cpp = cp; 1087 return (0); 1088 1089 bad: 1090 ASSERT(MUTEX_NOT_HELD(&pidlock)); 1091 1092 mutex_destroy(&cp->p_crlock); 1093 mutex_destroy(&cp->p_pflock); 1094 #if defined(__x86) 1095 mutex_destroy(&cp->p_ldtlock); 1096 #endif 1097 if (newpid != -1) { 1098 proc_entry_free(cp->p_pidp); 1099 (void) pid_rele(cp->p_pidp); 1100 } 1101 kmem_cache_free(process_cache, cp); 1102 1103 /* 1104 * We most likely got into this situation because some process is 1105 * forking out of control. As punishment, put it to sleep for a 1106 * bit so it can't eat the machine alive. Sleep interval is chosen 1107 * to allow no more than one fork failure per cpu per clock tick 1108 * on average (yes, I just made this up). This has two desirable 1109 * properties: (1) it sets a constant limit on the fork failure 1110 * rate, and (2) the busier the system is, the harsher the penalty 1111 * for abusing it becomes. 1112 */ 1113 INCR_COUNT(&fork_fail_pending, &pidlock); 1114 delay(fork_fail_pending / ncpus + 1); 1115 DECR_COUNT(&fork_fail_pending, &pidlock); 1116 1117 return (-1); /* out of memory or proc slots */ 1118 } 1119 1120 /* 1121 * Release virtual memory. 1122 * In the case of vfork(), the child was given exclusive access to its 1123 * parent's address space. The parent is waiting in vfwait() for the 1124 * child to release its exclusive claim via relvm(). 1125 */ 1126 void 1127 relvm() 1128 { 1129 proc_t *p = curproc; 1130 1131 ASSERT((unsigned)p->p_lwpcnt <= 1); 1132 1133 prrelvm(); /* inform /proc */ 1134 1135 if (p->p_flag & SVFORK) { 1136 proc_t *pp = p->p_parent; 1137 /* 1138 * The child process is either exec'ing or exit'ing. 1139 * The child is now separated from the parent's address 1140 * space. The parent process is made dispatchable. 1141 * 1142 * This is a delicate locking maneuver, involving 1143 * both the parent's p_lock and the child's p_lock. 1144 * As soon as the SVFORK flag is turned off, the 1145 * parent is free to run, but it must not run until 1146 * we wake it up using its p_cv because it might 1147 * exit and we would be referencing invalid memory. 1148 * Therefore, we hold the parent with its p_lock 1149 * while protecting our p_flags with our own p_lock. 1150 */ 1151 try_again: 1152 mutex_enter(&p->p_lock); /* grab child's lock first */ 1153 prbarrier(p); /* make sure /proc is blocked out */ 1154 mutex_enter(&pp->p_lock); 1155 1156 /* 1157 * Check if parent is locked by /proc. 1158 */ 1159 if (pp->p_proc_flag & P_PR_LOCK) { 1160 /* 1161 * Delay until /proc is done with the parent. 1162 * We must drop our (the child's) p->p_lock, wait 1163 * via prbarrier() on the parent, then start over. 1164 */ 1165 mutex_exit(&p->p_lock); 1166 prbarrier(pp); 1167 mutex_exit(&pp->p_lock); 1168 goto try_again; 1169 } 1170 p->p_flag &= ~SVFORK; 1171 kpreempt_disable(); 1172 p->p_as = &kas; 1173 1174 /* 1175 * notify hat of change in thread's address space 1176 */ 1177 hat_thread_exit(curthread); 1178 kpreempt_enable(); 1179 1180 /* 1181 * child sizes are copied back to parent because 1182 * child may have grown. 1183 */ 1184 pp->p_brkbase = p->p_brkbase; 1185 pp->p_brksize = p->p_brksize; 1186 pp->p_stksize = p->p_stksize; 1187 /* 1188 * The parent is no longer waiting for the vfork()d child. 1189 * Restore the parent's watched pages, if any. This is 1190 * safe because we know the parent is not locked by /proc 1191 */ 1192 pp->p_flag &= ~SVFWAIT; 1193 if (avl_numnodes(&pp->p_wpage) != 0) { 1194 pp->p_as->a_wpage = pp->p_wpage; 1195 avl_create(&pp->p_wpage, wp_compare, 1196 sizeof (struct watched_page), 1197 offsetof(struct watched_page, wp_link)); 1198 } 1199 cv_signal(&pp->p_cv); 1200 mutex_exit(&pp->p_lock); 1201 mutex_exit(&p->p_lock); 1202 } else { 1203 if (p->p_as != &kas) { 1204 struct as *as; 1205 1206 if (p->p_segacct) 1207 shmexit(p); 1208 1209 /* 1210 * We grab p_lock for the benefit of /proc 1211 */ 1212 kpreempt_disable(); 1213 mutex_enter(&p->p_lock); 1214 prbarrier(p); /* make sure /proc is blocked out */ 1215 as = p->p_as; 1216 p->p_as = &kas; 1217 mutex_exit(&p->p_lock); 1218 1219 /* 1220 * notify hat of change in thread's address space 1221 */ 1222 hat_thread_exit(curthread); 1223 kpreempt_enable(); 1224 1225 as_free(as); 1226 } 1227 } 1228 } 1229 1230 /* 1231 * Wait for child to exec or exit. 1232 * Called by parent of vfork'ed process. 1233 * See important comments in relvm(), above. 1234 */ 1235 void 1236 vfwait(pid_t pid) 1237 { 1238 int signalled = 0; 1239 proc_t *pp = ttoproc(curthread); 1240 proc_t *cp; 1241 1242 /* 1243 * Wait for child to exec or exit. 1244 */ 1245 for (;;) { 1246 mutex_enter(&pidlock); 1247 cp = prfind(pid); 1248 if (cp == NULL || cp->p_parent != pp) { 1249 /* 1250 * Child has exit()ed. 1251 */ 1252 mutex_exit(&pidlock); 1253 break; 1254 } 1255 /* 1256 * Grab the child's p_lock before releasing pidlock. 1257 * Otherwise, the child could exit and we would be 1258 * referencing invalid memory. 1259 */ 1260 mutex_enter(&cp->p_lock); 1261 mutex_exit(&pidlock); 1262 if (!(cp->p_flag & SVFORK)) { 1263 /* 1264 * Child has exec()ed or is exit()ing. 1265 */ 1266 mutex_exit(&cp->p_lock); 1267 break; 1268 } 1269 mutex_enter(&pp->p_lock); 1270 mutex_exit(&cp->p_lock); 1271 /* 1272 * We might be waked up spuriously from the cv_wait(). 1273 * We have to do the whole operation over again to be 1274 * sure the child's SVFORK flag really is turned off. 1275 * We cannot make reference to the child because it can 1276 * exit before we return and we would be referencing 1277 * invalid memory. 1278 * 1279 * Because this is potentially a very long-term wait, 1280 * we call cv_wait_sig() (for its jobcontrol and /proc 1281 * side-effects) unless there is a current signal, in 1282 * which case we use cv_wait() because we cannot return 1283 * from this function until the child has released the 1284 * address space. Calling cv_wait_sig() with a current 1285 * signal would lead to an indefinite loop here because 1286 * cv_wait_sig() returns immediately in this case. 1287 */ 1288 if (signalled) 1289 cv_wait(&pp->p_cv, &pp->p_lock); 1290 else 1291 signalled = !cv_wait_sig(&pp->p_cv, &pp->p_lock); 1292 mutex_exit(&pp->p_lock); 1293 } 1294 1295 /* restore watchpoints to parent */ 1296 if (pr_watch_active(pp)) { 1297 struct as *as = pp->p_as; 1298 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); 1299 as_setwatch(as); 1300 AS_LOCK_EXIT(as, &as->a_lock); 1301 } 1302 1303 mutex_enter(&pp->p_lock); 1304 prbarrier(pp); /* barrier against /proc locking */ 1305 continuelwps(pp); 1306 mutex_exit(&pp->p_lock); 1307 } 1308