1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/param.h> 28 #include <sys/types.h> 29 #include <sys/sysmacros.h> 30 #include <sys/systm.h> 31 #include <sys/thread.h> 32 #include <sys/proc.h> 33 #include <sys/task.h> 34 #include <sys/project.h> 35 #include <sys/signal.h> 36 #include <sys/errno.h> 37 #include <sys/vmparam.h> 38 #include <sys/stack.h> 39 #include <sys/procfs.h> 40 #include <sys/prsystm.h> 41 #include <sys/cpuvar.h> 42 #include <sys/kmem.h> 43 #include <sys/vtrace.h> 44 #include <sys/door.h> 45 #include <vm/seg_kp.h> 46 #include <sys/debug.h> 47 #include <sys/tnf.h> 48 #include <sys/schedctl.h> 49 #include <sys/poll.h> 50 #include <sys/copyops.h> 51 #include <sys/lwp_upimutex_impl.h> 52 #include <sys/cpupart.h> 53 #include <sys/lgrp.h> 54 #include <sys/rctl.h> 55 #include <sys/contract_impl.h> 56 #include <sys/cpc_impl.h> 57 #include <sys/sdt.h> 58 #include <sys/cmn_err.h> 59 #include <sys/brand.h> 60 #include <sys/cyclic.h> 61 62 /* hash function for the lwpid hash table, p->p_tidhash[] */ 63 #define TIDHASH(tid, hash_sz) ((tid) & ((hash_sz) - 1)) 64 65 void *segkp_lwp; /* cookie for pool of segkp resources */ 66 extern void reapq_move_lq_to_tq(kthread_t *); 67 extern void freectx_ctx(struct ctxop *); 68 69 /* 70 * Create a thread that appears to be stopped at sys_rtt. 71 */ 72 klwp_t * 73 lwp_create(void (*proc)(), caddr_t arg, size_t len, proc_t *p, 74 int state, int pri, const k_sigset_t *smask, int cid, id_t lwpid) 75 { 76 klwp_t *lwp = NULL; 77 kthread_t *t; 78 kthread_t *tx; 79 cpupart_t *oldpart = NULL; 80 size_t stksize; 81 caddr_t lwpdata = NULL; 82 processorid_t binding; 83 int err = 0; 84 kproject_t *oldkpj, *newkpj; 85 void *bufp = NULL; 86 klwp_t *curlwp = ttolwp(curthread); 87 lwpent_t *lep; 88 lwpdir_t *old_dir = NULL; 89 uint_t old_dirsz = 0; 90 tidhash_t *old_hash = NULL; 91 uint_t old_hashsz = 0; 92 ret_tidhash_t *ret_tidhash = NULL; 93 int i; 94 int rctlfail = 0; 95 boolean_t branded = 0; 96 struct ctxop *ctx = NULL; 97 98 mutex_enter(&p->p_lock); 99 mutex_enter(&p->p_zone->zone_nlwps_lock); 100 /* 101 * don't enforce rctl limits on system processes 102 */ 103 if (cid != syscid) { 104 if (p->p_task->tk_nlwps >= p->p_task->tk_nlwps_ctl) 105 if (rctl_test(rc_task_lwps, p->p_task->tk_rctls, p, 106 1, 0) & RCT_DENY) 107 rctlfail = 1; 108 if (p->p_task->tk_proj->kpj_nlwps >= 109 p->p_task->tk_proj->kpj_nlwps_ctl) 110 if (rctl_test(rc_project_nlwps, 111 p->p_task->tk_proj->kpj_rctls, p, 1, 0) 112 & RCT_DENY) 113 rctlfail = 1; 114 if (p->p_zone->zone_nlwps >= p->p_zone->zone_nlwps_ctl) 115 if (rctl_test(rc_zone_nlwps, p->p_zone->zone_rctls, p, 116 1, 0) & RCT_DENY) 117 rctlfail = 1; 118 } 119 if (rctlfail) { 120 mutex_exit(&p->p_zone->zone_nlwps_lock); 121 mutex_exit(&p->p_lock); 122 return (NULL); 123 } 124 p->p_task->tk_nlwps++; 125 p->p_task->tk_proj->kpj_nlwps++; 126 p->p_zone->zone_nlwps++; 127 mutex_exit(&p->p_zone->zone_nlwps_lock); 128 mutex_exit(&p->p_lock); 129 130 if (curlwp == NULL || (stksize = curlwp->lwp_childstksz) == 0) 131 stksize = lwp_default_stksize; 132 133 /* 134 * Try to reclaim a <lwp,stack> from 'deathrow' 135 */ 136 if (stksize == lwp_default_stksize) { 137 if (lwp_reapcnt > 0) { 138 mutex_enter(&reaplock); 139 if ((t = lwp_deathrow) != NULL) { 140 ASSERT(t->t_swap); 141 lwp_deathrow = t->t_forw; 142 lwp_reapcnt--; 143 lwpdata = t->t_swap; 144 lwp = t->t_lwp; 145 ctx = t->t_ctx; 146 t->t_swap = NULL; 147 t->t_lwp = NULL; 148 t->t_ctx = NULL; 149 reapq_move_lq_to_tq(t); 150 } 151 mutex_exit(&reaplock); 152 if (lwp != NULL) { 153 lwp_stk_fini(lwp); 154 } 155 if (ctx != NULL) { 156 freectx_ctx(ctx); 157 } 158 } 159 if (lwpdata == NULL && 160 (lwpdata = (caddr_t)segkp_cache_get(segkp_lwp)) == NULL) { 161 mutex_enter(&p->p_lock); 162 mutex_enter(&p->p_zone->zone_nlwps_lock); 163 p->p_task->tk_nlwps--; 164 p->p_task->tk_proj->kpj_nlwps--; 165 p->p_zone->zone_nlwps--; 166 mutex_exit(&p->p_zone->zone_nlwps_lock); 167 mutex_exit(&p->p_lock); 168 return (NULL); 169 } 170 } else { 171 stksize = roundup(stksize, PAGESIZE); 172 if ((lwpdata = (caddr_t)segkp_get(segkp, stksize, 173 (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED))) == NULL) { 174 mutex_enter(&p->p_lock); 175 mutex_enter(&p->p_zone->zone_nlwps_lock); 176 p->p_task->tk_nlwps--; 177 p->p_task->tk_proj->kpj_nlwps--; 178 p->p_zone->zone_nlwps--; 179 mutex_exit(&p->p_zone->zone_nlwps_lock); 180 mutex_exit(&p->p_lock); 181 return (NULL); 182 } 183 } 184 185 /* 186 * Create a thread, initializing the stack pointer 187 */ 188 t = thread_create(lwpdata, stksize, NULL, NULL, 0, p, TS_STOPPED, pri); 189 190 t->t_swap = lwpdata; /* Start of page-able data */ 191 if (lwp == NULL) 192 lwp = kmem_cache_alloc(lwp_cache, KM_SLEEP); 193 bzero(lwp, sizeof (*lwp)); 194 t->t_lwp = lwp; 195 196 t->t_hold = *smask; 197 lwp->lwp_thread = t; 198 lwp->lwp_procp = p; 199 lwp->lwp_sigaltstack.ss_flags = SS_DISABLE; 200 if (curlwp != NULL && curlwp->lwp_childstksz != 0) 201 lwp->lwp_childstksz = curlwp->lwp_childstksz; 202 203 t->t_stk = lwp_stk_init(lwp, t->t_stk); 204 thread_load(t, proc, arg, len); 205 206 /* 207 * Allocate the SIGPROF buffer if ITIMER_REALPROF is in effect. 208 */ 209 if (p->p_rprof_cyclic != CYCLIC_NONE) 210 t->t_rprof = kmem_zalloc(sizeof (struct rprof), KM_SLEEP); 211 212 if (cid != NOCLASS) 213 (void) CL_ALLOC(&bufp, cid, KM_SLEEP); 214 215 /* 216 * Allocate an lwp directory entry for the new lwp. 217 */ 218 lep = kmem_zalloc(sizeof (*lep), KM_SLEEP); 219 220 mutex_enter(&p->p_lock); 221 grow: 222 /* 223 * Grow the lwp (thread) directory and lwpid hash table if necessary. 224 * A note on the growth algorithm: 225 * The new lwp directory size is computed as: 226 * new = 2 * old + 2 227 * Starting with an initial size of 2 (see exec_common()), 228 * this yields numbers that are a power of two minus 2: 229 * 2, 6, 14, 30, 62, 126, 254, 510, 1022, ... 230 * The size of the lwpid hash table must be a power of two 231 * and must be commensurate in size with the lwp directory 232 * so that hash bucket chains remain short. Therefore, 233 * the lwpid hash table size is computed as: 234 * hashsz = (dirsz + 2) / 2 235 * which leads to these hash table sizes corresponding to 236 * the above directory sizes: 237 * 2, 4, 8, 16, 32, 64, 128, 256, 512, ... 238 * A note on growing the hash table: 239 * For performance reasons, code in lwp_unpark() does not 240 * acquire curproc->p_lock when searching the hash table. 241 * Rather, it calls lwp_hash_lookup_and_lock() which 242 * acquires only the individual hash bucket lock, taking 243 * care to deal with reallocation of the hash table 244 * during the time it takes to acquire the lock. 245 * 246 * This is sufficient to protect the integrity of the 247 * hash table, but it requires us to acquire all of the 248 * old hash bucket locks before growing the hash table 249 * and to release them afterwards. It also requires us 250 * not to free the old hash table because some thread 251 * in lwp_hash_lookup_and_lock() might still be trying 252 * to acquire the old bucket lock. 253 * 254 * So we adopt the tactic of keeping all of the retired 255 * hash tables on a linked list, so they can be safely 256 * freed when the process exits or execs. 257 * 258 * Because the hash table grows in powers of two, the 259 * total size of all of the hash tables will be slightly 260 * less than twice the size of the largest hash table. 261 */ 262 while (p->p_lwpfree == NULL) { 263 uint_t dirsz = p->p_lwpdir_sz; 264 lwpdir_t *new_dir; 265 uint_t new_dirsz; 266 lwpdir_t *ldp; 267 tidhash_t *new_hash; 268 uint_t new_hashsz; 269 270 mutex_exit(&p->p_lock); 271 272 /* 273 * Prepare to remember the old p_tidhash for later 274 * kmem_free()ing when the process exits or execs. 275 */ 276 if (ret_tidhash == NULL) 277 ret_tidhash = kmem_zalloc(sizeof (ret_tidhash_t), 278 KM_SLEEP); 279 if (old_dir != NULL) 280 kmem_free(old_dir, old_dirsz * sizeof (*old_dir)); 281 if (old_hash != NULL) 282 kmem_free(old_hash, old_hashsz * sizeof (*old_hash)); 283 284 new_dirsz = 2 * dirsz + 2; 285 new_dir = kmem_zalloc(new_dirsz * sizeof (lwpdir_t), KM_SLEEP); 286 for (ldp = new_dir, i = 1; i < new_dirsz; i++, ldp++) 287 ldp->ld_next = ldp + 1; 288 new_hashsz = (new_dirsz + 2) / 2; 289 new_hash = kmem_zalloc(new_hashsz * sizeof (tidhash_t), 290 KM_SLEEP); 291 292 mutex_enter(&p->p_lock); 293 if (p == curproc) 294 prbarrier(p); 295 296 if (dirsz != p->p_lwpdir_sz || p->p_lwpfree != NULL) { 297 /* 298 * Someone else beat us to it or some lwp exited. 299 * Set up to free our memory and take a lap. 300 */ 301 old_dir = new_dir; 302 old_dirsz = new_dirsz; 303 old_hash = new_hash; 304 old_hashsz = new_hashsz; 305 } else { 306 /* 307 * For the benefit of lwp_hash_lookup_and_lock(), 308 * called from lwp_unpark(), which searches the 309 * tid hash table without acquiring p->p_lock, 310 * we must acquire all of the tid hash table 311 * locks before replacing p->p_tidhash. 312 */ 313 old_hash = p->p_tidhash; 314 old_hashsz = p->p_tidhash_sz; 315 for (i = 0; i < old_hashsz; i++) { 316 mutex_enter(&old_hash[i].th_lock); 317 mutex_enter(&new_hash[i].th_lock); 318 } 319 320 /* 321 * We simply hash in all of the old directory entries. 322 * This works because the old directory has no empty 323 * slots and the new hash table starts out empty. 324 * This reproduces the original directory ordering 325 * (required for /proc directory semantics). 326 */ 327 old_dir = p->p_lwpdir; 328 old_dirsz = p->p_lwpdir_sz; 329 p->p_lwpdir = new_dir; 330 p->p_lwpfree = new_dir; 331 p->p_lwpdir_sz = new_dirsz; 332 for (ldp = old_dir, i = 0; i < old_dirsz; i++, ldp++) 333 lwp_hash_in(p, ldp->ld_entry, 334 new_hash, new_hashsz, 0); 335 336 /* 337 * Remember the old hash table along with all 338 * of the previously-remembered hash tables. 339 * We will free them at process exit or exec. 340 */ 341 ret_tidhash->rth_tidhash = old_hash; 342 ret_tidhash->rth_tidhash_sz = old_hashsz; 343 ret_tidhash->rth_next = p->p_ret_tidhash; 344 p->p_ret_tidhash = ret_tidhash; 345 346 /* 347 * Now establish the new tid hash table. 348 * As soon as we assign p->p_tidhash, 349 * code in lwp_unpark() can start using it. 350 */ 351 membar_producer(); 352 p->p_tidhash = new_hash; 353 354 /* 355 * It is necessary that p_tidhash reach global 356 * visibility before p_tidhash_sz. Otherwise, 357 * code in lwp_hash_lookup_and_lock() could 358 * index into the old p_tidhash using the new 359 * p_tidhash_sz and thereby access invalid data. 360 */ 361 membar_producer(); 362 p->p_tidhash_sz = new_hashsz; 363 364 /* 365 * Release the locks; allow lwp_unpark() to carry on. 366 */ 367 for (i = 0; i < old_hashsz; i++) { 368 mutex_exit(&old_hash[i].th_lock); 369 mutex_exit(&new_hash[i].th_lock); 370 } 371 372 /* 373 * Avoid freeing these objects below. 374 */ 375 ret_tidhash = NULL; 376 old_hash = NULL; 377 old_hashsz = 0; 378 } 379 } 380 381 /* 382 * Block the process against /proc while we manipulate p->p_tlist, 383 * unless lwp_create() was called by /proc for the PCAGENT operation. 384 * We want to do this early enough so that we don't drop p->p_lock 385 * until the thread is put on the p->p_tlist. 386 */ 387 if (p == curproc) { 388 prbarrier(p); 389 /* 390 * If the current lwp has been requested to stop, do so now. 391 * Otherwise we have a race condition between /proc attempting 392 * to stop the process and this thread creating a new lwp 393 * that was not seen when the /proc PCSTOP request was issued. 394 * We rely on stop() to call prbarrier(p) before returning. 395 */ 396 while ((curthread->t_proc_flag & TP_PRSTOP) && 397 !ttolwp(curthread)->lwp_nostop) 398 stop(PR_REQUESTED, 0); 399 400 /* 401 * If process is exiting, there could be a race between 402 * the agent lwp creation and the new lwp currently being 403 * created. So to prevent this race lwp creation is failed 404 * if the process is exiting. 405 */ 406 if (p->p_flag & (SEXITLWPS|SKILLED)) { 407 err = 1; 408 goto error; 409 } 410 411 /* 412 * Since we might have dropped p->p_lock, the 413 * lwp directory free list might have changed. 414 */ 415 if (p->p_lwpfree == NULL) 416 goto grow; 417 } 418 419 kpreempt_disable(); /* can't grab cpu_lock here */ 420 421 /* 422 * Inherit processor and processor set bindings from curthread, 423 * unless we're creating a new kernel process, in which case 424 * clear all bindings. 425 */ 426 if (cid == syscid) { 427 t->t_bind_cpu = binding = PBIND_NONE; 428 t->t_cpupart = oldpart = &cp_default; 429 t->t_bind_pset = PS_NONE; 430 t->t_bindflag = (uchar_t)default_binding_mode; 431 } else { 432 binding = curthread->t_bind_cpu; 433 t->t_bind_cpu = binding; 434 oldpart = t->t_cpupart; 435 t->t_cpupart = curthread->t_cpupart; 436 t->t_bind_pset = curthread->t_bind_pset; 437 t->t_bindflag = curthread->t_bindflag | 438 (uchar_t)default_binding_mode; 439 } 440 441 /* 442 * thread_create() initializes this thread's home lgroup to the root. 443 * Choose a more suitable lgroup, since this thread is associated 444 * with an lwp. 445 */ 446 ASSERT(oldpart != NULL); 447 if (binding != PBIND_NONE && t->t_affinitycnt == 0) { 448 t->t_bound_cpu = cpu[binding]; 449 if (t->t_lpl != t->t_bound_cpu->cpu_lpl) 450 lgrp_move_thread(t, t->t_bound_cpu->cpu_lpl, 1); 451 } else { 452 lgrp_move_thread(t, lgrp_choose(t, t->t_cpupart), 1); 453 } 454 455 kpreempt_enable(); 456 457 /* 458 * make sure lpl points to our own partition 459 */ 460 ASSERT(t->t_lpl >= t->t_cpupart->cp_lgrploads); 461 ASSERT(t->t_lpl < t->t_cpupart->cp_lgrploads + 462 t->t_cpupart->cp_nlgrploads); 463 464 /* 465 * If we're creating a new process, then inherit the project from our 466 * parent. If we're only creating an additional lwp then use the 467 * project pointer of the target process. 468 */ 469 if (p->p_task == NULL) 470 newkpj = ttoproj(curthread); 471 else 472 newkpj = p->p_task->tk_proj; 473 474 /* 475 * It is safe to point the thread to the new project without holding it 476 * since we're holding the target process' p_lock here and therefore 477 * we're guaranteed that it will not move to another project. 478 */ 479 oldkpj = ttoproj(t); 480 if (newkpj != oldkpj) { 481 t->t_proj = newkpj; 482 (void) project_hold(newkpj); 483 project_rele(oldkpj); 484 } 485 486 if (cid != NOCLASS) { 487 /* 488 * If the lwp is being created in the current process 489 * and matches the current thread's scheduling class, 490 * we should propagate the current thread's scheduling 491 * parameters by calling CL_FORK. Otherwise just use 492 * the defaults by calling CL_ENTERCLASS. 493 */ 494 if (p != curproc || curthread->t_cid != cid) { 495 err = CL_ENTERCLASS(t, cid, NULL, NULL, bufp); 496 t->t_pri = pri; /* CL_ENTERCLASS may have changed it */ 497 /* 498 * We don't call schedctl_set_cidpri(t) here 499 * because the schedctl data is not yet set 500 * up for the newly-created lwp. 501 */ 502 } else { 503 t->t_clfuncs = &(sclass[cid].cl_funcs->thread); 504 err = CL_FORK(curthread, t, bufp); 505 t->t_cid = cid; 506 } 507 if (err) 508 goto error; 509 else 510 bufp = NULL; 511 } 512 513 /* 514 * If we were given an lwpid then use it, else allocate one. 515 */ 516 if (lwpid != 0) 517 t->t_tid = lwpid; 518 else { 519 /* 520 * lwp/thread id 0 is never valid; reserved for special checks. 521 * lwp/thread id 1 is reserved for the main thread. 522 * Start again at 2 when INT_MAX has been reached 523 * (id_t is a signed 32-bit integer). 524 */ 525 id_t prev_id = p->p_lwpid; /* last allocated tid */ 526 527 do { /* avoid lwpid duplication */ 528 if (p->p_lwpid == INT_MAX) { 529 p->p_flag |= SLWPWRAP; 530 p->p_lwpid = 1; 531 } 532 if ((t->t_tid = ++p->p_lwpid) == prev_id) { 533 /* 534 * All lwpids are allocated; fail the request. 535 */ 536 err = 1; 537 goto error; 538 } 539 /* 540 * We only need to worry about colliding with an id 541 * that's already in use if this process has 542 * cycled through all available lwp ids. 543 */ 544 if ((p->p_flag & SLWPWRAP) == 0) 545 break; 546 } while (lwp_hash_lookup(p, t->t_tid) != NULL); 547 } 548 549 /* 550 * If this is a branded process, let the brand do any necessary lwp 551 * initialization. 552 */ 553 if (PROC_IS_BRANDED(p)) { 554 if (BROP(p)->b_initlwp(lwp)) { 555 err = 1; 556 goto error; 557 } 558 branded = 1; 559 } 560 561 if (t->t_tid == 1) { 562 kpreempt_disable(); 563 ASSERT(t->t_lpl != NULL); 564 p->p_t1_lgrpid = t->t_lpl->lpl_lgrpid; 565 kpreempt_enable(); 566 if (p->p_tr_lgrpid != LGRP_NONE && 567 p->p_tr_lgrpid != p->p_t1_lgrpid) { 568 lgrp_update_trthr_migrations(1); 569 } 570 } 571 572 p->p_lwpcnt++; 573 t->t_waitfor = -1; 574 575 /* 576 * Turn microstate accounting on for thread if on for process. 577 */ 578 if (p->p_flag & SMSACCT) 579 t->t_proc_flag |= TP_MSACCT; 580 581 /* 582 * If the process has watchpoints, mark the new thread as such. 583 */ 584 if (pr_watch_active(p)) 585 watch_enable(t); 586 587 /* 588 * The lwp is being created in the stopped state. 589 * We set all the necessary flags to indicate that fact here. 590 * We omit the TS_CREATE flag from t_schedflag so that the lwp 591 * cannot be set running until the caller is finished with it, 592 * even if lwp_continue() is called on it after we drop p->p_lock. 593 * When the caller is finished with the newly-created lwp, 594 * the caller must call lwp_create_done() to allow the lwp 595 * to be set running. If the TP_HOLDLWP is left set, the 596 * lwp will suspend itself after reaching system call exit. 597 */ 598 init_mstate(t, LMS_STOPPED); 599 t->t_proc_flag |= TP_HOLDLWP; 600 t->t_schedflag |= (TS_ALLSTART & ~(TS_CSTART | TS_CREATE)); 601 t->t_whystop = PR_SUSPENDED; 602 t->t_whatstop = SUSPEND_NORMAL; 603 t->t_sig_check = 1; /* ensure that TP_HOLDLWP is honored */ 604 605 /* 606 * Set system call processing flags in case tracing or profiling 607 * is set. The first system call will evaluate these and turn 608 * them off if they aren't needed. 609 */ 610 t->t_pre_sys = 1; 611 t->t_post_sys = 1; 612 613 /* 614 * Insert the new thread into the list of all threads. 615 */ 616 if ((tx = p->p_tlist) == NULL) { 617 t->t_back = t; 618 t->t_forw = t; 619 p->p_tlist = t; 620 } else { 621 t->t_forw = tx; 622 t->t_back = tx->t_back; 623 tx->t_back->t_forw = t; 624 tx->t_back = t; 625 } 626 627 /* 628 * Insert the new lwp into an lwp directory slot position 629 * and into the lwpid hash table. 630 */ 631 lep->le_thread = t; 632 lep->le_lwpid = t->t_tid; 633 lep->le_start = t->t_start; 634 lwp_hash_in(p, lep, p->p_tidhash, p->p_tidhash_sz, 1); 635 636 if (state == TS_RUN) { 637 /* 638 * We set the new lwp running immediately. 639 */ 640 t->t_proc_flag &= ~TP_HOLDLWP; 641 lwp_create_done(t); 642 } 643 644 error: 645 if (err) { 646 /* 647 * We have failed to create an lwp, so decrement the number 648 * of lwps in the task and let the lgroup load averages know 649 * that this thread isn't going to show up. 650 */ 651 kpreempt_disable(); 652 lgrp_move_thread(t, NULL, 1); 653 kpreempt_enable(); 654 655 ASSERT(MUTEX_HELD(&p->p_lock)); 656 mutex_enter(&p->p_zone->zone_nlwps_lock); 657 p->p_task->tk_nlwps--; 658 p->p_task->tk_proj->kpj_nlwps--; 659 p->p_zone->zone_nlwps--; 660 mutex_exit(&p->p_zone->zone_nlwps_lock); 661 if (cid != NOCLASS && bufp != NULL) 662 CL_FREE(cid, bufp); 663 664 if (branded) 665 BROP(p)->b_freelwp(lwp); 666 667 mutex_exit(&p->p_lock); 668 t->t_state = TS_FREE; 669 thread_rele(t); 670 671 /* 672 * We need to remove t from the list of all threads 673 * because thread_exit()/lwp_exit() isn't called on t. 674 */ 675 mutex_enter(&pidlock); 676 ASSERT(t != t->t_next); /* t0 never exits */ 677 t->t_next->t_prev = t->t_prev; 678 t->t_prev->t_next = t->t_next; 679 mutex_exit(&pidlock); 680 681 thread_free(t); 682 kmem_free(lep, sizeof (*lep)); 683 lwp = NULL; 684 } else { 685 mutex_exit(&p->p_lock); 686 } 687 688 if (old_dir != NULL) 689 kmem_free(old_dir, old_dirsz * sizeof (*old_dir)); 690 if (old_hash != NULL) 691 kmem_free(old_hash, old_hashsz * sizeof (*old_hash)); 692 if (ret_tidhash != NULL) 693 kmem_free(ret_tidhash, sizeof (ret_tidhash_t)); 694 695 DTRACE_PROC1(lwp__create, kthread_t *, t); 696 return (lwp); 697 } 698 699 /* 700 * lwp_create_done() is called by the caller of lwp_create() to set the 701 * newly-created lwp running after the caller has finished manipulating it. 702 */ 703 void 704 lwp_create_done(kthread_t *t) 705 { 706 proc_t *p = ttoproc(t); 707 708 ASSERT(MUTEX_HELD(&p->p_lock)); 709 710 /* 711 * We set the TS_CREATE and TS_CSTART flags and call setrun_locked(). 712 * (The absence of the TS_CREATE flag prevents the lwp from running 713 * until we are finished with it, even if lwp_continue() is called on 714 * it by some other lwp in the process or elsewhere in the kernel.) 715 */ 716 thread_lock(t); 717 ASSERT(t->t_state == TS_STOPPED && !(t->t_schedflag & TS_CREATE)); 718 /* 719 * If TS_CSTART is set, lwp_continue(t) has been called and 720 * has already incremented p_lwprcnt; avoid doing this twice. 721 */ 722 if (!(t->t_schedflag & TS_CSTART)) 723 p->p_lwprcnt++; 724 t->t_schedflag |= (TS_CSTART | TS_CREATE); 725 setrun_locked(t); 726 thread_unlock(t); 727 } 728 729 /* 730 * Copy an LWP's active templates, and clear the latest contracts. 731 */ 732 void 733 lwp_ctmpl_copy(klwp_t *dst, klwp_t *src) 734 { 735 int i; 736 737 for (i = 0; i < ct_ntypes; i++) { 738 dst->lwp_ct_active[i] = ctmpl_dup(src->lwp_ct_active[i]); 739 dst->lwp_ct_latest[i] = NULL; 740 } 741 } 742 743 /* 744 * Clear an LWP's contract template state. 745 */ 746 void 747 lwp_ctmpl_clear(klwp_t *lwp) 748 { 749 ct_template_t *tmpl; 750 int i; 751 752 for (i = 0; i < ct_ntypes; i++) { 753 if ((tmpl = lwp->lwp_ct_active[i]) != NULL) { 754 ctmpl_free(tmpl); 755 lwp->lwp_ct_active[i] = NULL; 756 } 757 758 if (lwp->lwp_ct_latest[i] != NULL) { 759 contract_rele(lwp->lwp_ct_latest[i]); 760 lwp->lwp_ct_latest[i] = NULL; 761 } 762 } 763 } 764 765 /* 766 * Individual lwp exit. 767 * If this is the last lwp, exit the whole process. 768 */ 769 void 770 lwp_exit(void) 771 { 772 kthread_t *t = curthread; 773 klwp_t *lwp = ttolwp(t); 774 proc_t *p = ttoproc(t); 775 776 ASSERT(MUTEX_HELD(&p->p_lock)); 777 778 mutex_exit(&p->p_lock); 779 780 #if defined(__sparc) 781 /* 782 * Ensure that the user stack is fully abandoned.. 783 */ 784 trash_user_windows(); 785 #endif 786 787 tsd_exit(); /* free thread specific data */ 788 789 kcpc_passivate(); /* Clean up performance counter state */ 790 791 pollcleanup(); 792 793 if (t->t_door) 794 door_slam(); 795 796 if (t->t_schedctl != NULL) 797 schedctl_lwp_cleanup(t); 798 799 if (t->t_upimutex != NULL) 800 upimutex_cleanup(); 801 802 /* 803 * Perform any brand specific exit processing, then release any 804 * brand data associated with the lwp 805 */ 806 if (PROC_IS_BRANDED(p)) 807 BROP(p)->b_lwpexit(lwp); 808 809 mutex_enter(&p->p_lock); 810 lwp_cleanup(); 811 812 /* 813 * When this process is dumping core, its lwps are held here 814 * until the core dump is finished. Then exitlwps() is called 815 * again to release these lwps so that they can finish exiting. 816 */ 817 if (p->p_flag & SCOREDUMP) 818 stop(PR_SUSPENDED, SUSPEND_NORMAL); 819 820 /* 821 * Block the process against /proc now that we have really acquired 822 * p->p_lock (to decrement p_lwpcnt and manipulate p_tlist at least). 823 */ 824 prbarrier(p); 825 826 /* 827 * Call proc_exit() if this is the last non-daemon lwp in the process. 828 */ 829 if (!(t->t_proc_flag & TP_DAEMON) && 830 p->p_lwpcnt == p->p_lwpdaemon + 1) { 831 mutex_exit(&p->p_lock); 832 if (proc_exit(CLD_EXITED, 0) == 0) { 833 /* Restarting init. */ 834 return; 835 } 836 837 /* 838 * proc_exit() returns a non-zero value when some other 839 * lwp got there first. We just have to continue in 840 * lwp_exit(). 841 */ 842 mutex_enter(&p->p_lock); 843 ASSERT(curproc->p_flag & SEXITLWPS); 844 prbarrier(p); 845 } 846 847 DTRACE_PROC(lwp__exit); 848 849 /* 850 * If the lwp is a detached lwp or if the process is exiting, 851 * remove (lwp_hash_out()) the lwp from the lwp directory. 852 * Otherwise null out the lwp's le_thread pointer in the lwp 853 * directory so that other threads will see it as a zombie lwp. 854 */ 855 prlwpexit(t); /* notify /proc */ 856 if (!(t->t_proc_flag & TP_TWAIT) || (p->p_flag & SEXITLWPS)) 857 lwp_hash_out(p, t->t_tid); 858 else { 859 ASSERT(!(t->t_proc_flag & TP_DAEMON)); 860 p->p_lwpdir[t->t_dslot].ld_entry->le_thread = NULL; 861 p->p_zombcnt++; 862 cv_broadcast(&p->p_lwpexit); 863 } 864 if (t->t_proc_flag & TP_DAEMON) { 865 p->p_lwpdaemon--; 866 t->t_proc_flag &= ~TP_DAEMON; 867 } 868 t->t_proc_flag &= ~TP_TWAIT; 869 870 /* 871 * Maintain accurate lwp count for task.max-lwps resource control. 872 */ 873 mutex_enter(&p->p_zone->zone_nlwps_lock); 874 p->p_task->tk_nlwps--; 875 p->p_task->tk_proj->kpj_nlwps--; 876 p->p_zone->zone_nlwps--; 877 mutex_exit(&p->p_zone->zone_nlwps_lock); 878 879 CL_EXIT(t); /* tell the scheduler that t is exiting */ 880 ASSERT(p->p_lwpcnt != 0); 881 p->p_lwpcnt--; 882 883 /* 884 * If all remaining non-daemon lwps are waiting in lwp_wait(), 885 * wake them up so someone can return EDEADLK. 886 * (See the block comment preceeding lwp_wait().) 887 */ 888 if (p->p_lwpcnt == p->p_lwpdaemon + (p->p_lwpwait - p->p_lwpdwait)) 889 cv_broadcast(&p->p_lwpexit); 890 891 t->t_proc_flag |= TP_LWPEXIT; 892 term_mstate(t); 893 894 #ifndef NPROBE 895 /* Kernel probe */ 896 if (t->t_tnf_tpdp) 897 tnf_thread_exit(); 898 #endif /* NPROBE */ 899 900 t->t_forw->t_back = t->t_back; 901 t->t_back->t_forw = t->t_forw; 902 if (t == p->p_tlist) 903 p->p_tlist = t->t_forw; 904 905 /* 906 * Clean up the signal state. 907 */ 908 if (t->t_sigqueue != NULL) 909 sigdelq(p, t, 0); 910 if (lwp->lwp_curinfo != NULL) { 911 siginfofree(lwp->lwp_curinfo); 912 lwp->lwp_curinfo = NULL; 913 } 914 915 thread_rele(t); 916 917 /* 918 * Terminated lwps are associated with process zero and are put onto 919 * death-row by resume(). Avoid preemption after resetting t->t_procp. 920 */ 921 t->t_preempt++; 922 923 if (t->t_ctx != NULL) 924 exitctx(t); 925 if (p->p_pctx != NULL) 926 exitpctx(p); 927 928 t->t_procp = &p0; 929 930 /* 931 * Notify the HAT about the change of address space 932 */ 933 hat_thread_exit(t); 934 /* 935 * When this is the last running lwp in this process and some lwp is 936 * waiting for this condition to become true, or this thread was being 937 * suspended, then the waiting lwp is awakened. 938 * 939 * Also, if the process is exiting, we may have a thread waiting in 940 * exitlwps() that needs to be notified. 941 */ 942 if (--p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP) || 943 (p->p_flag & SEXITLWPS)) 944 cv_broadcast(&p->p_holdlwps); 945 946 /* 947 * Need to drop p_lock so we can reacquire pidlock. 948 */ 949 mutex_exit(&p->p_lock); 950 mutex_enter(&pidlock); 951 952 ASSERT(t != t->t_next); /* t0 never exits */ 953 t->t_next->t_prev = t->t_prev; 954 t->t_prev->t_next = t->t_next; 955 cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */ 956 mutex_exit(&pidlock); 957 958 lwp_pcb_exit(); 959 960 t->t_state = TS_ZOMB; 961 swtch_from_zombie(); 962 /* never returns */ 963 } 964 965 966 /* 967 * Cleanup function for an exiting lwp. 968 * Called both from lwp_exit() and from proc_exit(). 969 * p->p_lock is repeatedly released and grabbed in this function. 970 */ 971 void 972 lwp_cleanup(void) 973 { 974 kthread_t *t = curthread; 975 proc_t *p = ttoproc(t); 976 977 ASSERT(MUTEX_HELD(&p->p_lock)); 978 979 /* untimeout any lwp-bound realtime timers */ 980 if (p->p_itimer != NULL) 981 timer_lwpexit(); 982 983 /* 984 * If this is the /proc agent lwp that is exiting, readjust p_lwpid 985 * so it appears that the agent never existed, and clear p_agenttp. 986 */ 987 if (t == p->p_agenttp) { 988 ASSERT(t->t_tid == p->p_lwpid); 989 p->p_lwpid--; 990 p->p_agenttp = NULL; 991 } 992 993 /* 994 * Do lgroup bookkeeping to account for thread exiting. 995 */ 996 kpreempt_disable(); 997 lgrp_move_thread(t, NULL, 1); 998 if (t->t_tid == 1) { 999 p->p_t1_lgrpid = LGRP_NONE; 1000 } 1001 kpreempt_enable(); 1002 1003 lwp_ctmpl_clear(ttolwp(t)); 1004 } 1005 1006 int 1007 lwp_suspend(kthread_t *t) 1008 { 1009 int tid; 1010 proc_t *p = ttoproc(t); 1011 1012 ASSERT(MUTEX_HELD(&p->p_lock)); 1013 1014 /* 1015 * Set the thread's TP_HOLDLWP flag so it will stop in holdlwp(). 1016 * If an lwp is stopping itself, there is no need to wait. 1017 */ 1018 top: 1019 t->t_proc_flag |= TP_HOLDLWP; 1020 if (t == curthread) { 1021 t->t_sig_check = 1; 1022 } else { 1023 /* 1024 * Make sure the lwp stops promptly. 1025 */ 1026 thread_lock(t); 1027 t->t_sig_check = 1; 1028 /* 1029 * XXX Should use virtual stop like /proc does instead of 1030 * XXX waking the thread to get it to stop. 1031 */ 1032 if (ISWAKEABLE(t) || ISWAITING(t)) { 1033 setrun_locked(t); 1034 } else if (t->t_state == TS_ONPROC && t->t_cpu != CPU) { 1035 poke_cpu(t->t_cpu->cpu_id); 1036 } 1037 1038 tid = t->t_tid; /* remember thread ID */ 1039 /* 1040 * Wait for lwp to stop 1041 */ 1042 while (!SUSPENDED(t)) { 1043 /* 1044 * Drop the thread lock before waiting and reacquire it 1045 * afterwards, so the thread can change its t_state 1046 * field. 1047 */ 1048 thread_unlock(t); 1049 1050 /* 1051 * Check if aborted by exitlwps(). 1052 */ 1053 if (p->p_flag & SEXITLWPS) 1054 lwp_exit(); 1055 1056 /* 1057 * Cooperate with jobcontrol signals and /proc stopping 1058 * by calling cv_wait_sig() to wait for the target 1059 * lwp to stop. Just using cv_wait() can lead to 1060 * deadlock because, if some other lwp has stopped 1061 * by either of these mechanisms, then p_lwprcnt will 1062 * never become zero if we do a cv_wait(). 1063 */ 1064 if (!cv_wait_sig(&p->p_holdlwps, &p->p_lock)) 1065 return (EINTR); 1066 1067 /* 1068 * Check to see if thread died while we were 1069 * waiting for it to suspend. 1070 */ 1071 if (idtot(p, tid) == NULL) 1072 return (ESRCH); 1073 1074 thread_lock(t); 1075 /* 1076 * If the TP_HOLDLWP flag went away, lwp_continue() 1077 * or vfork() must have been called while we were 1078 * waiting, so start over again. 1079 */ 1080 if ((t->t_proc_flag & TP_HOLDLWP) == 0) { 1081 thread_unlock(t); 1082 goto top; 1083 } 1084 } 1085 thread_unlock(t); 1086 } 1087 return (0); 1088 } 1089 1090 /* 1091 * continue a lwp that's been stopped by lwp_suspend(). 1092 */ 1093 void 1094 lwp_continue(kthread_t *t) 1095 { 1096 proc_t *p = ttoproc(t); 1097 int was_suspended = t->t_proc_flag & TP_HOLDLWP; 1098 1099 ASSERT(MUTEX_HELD(&p->p_lock)); 1100 1101 t->t_proc_flag &= ~TP_HOLDLWP; 1102 thread_lock(t); 1103 if (SUSPENDED(t) && 1104 !(p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH))) { 1105 p->p_lwprcnt++; 1106 t->t_schedflag |= TS_CSTART; 1107 setrun_locked(t); 1108 } 1109 thread_unlock(t); 1110 /* 1111 * Wakeup anyone waiting for this thread to be suspended 1112 */ 1113 if (was_suspended) 1114 cv_broadcast(&p->p_holdlwps); 1115 } 1116 1117 /* 1118 * ******************************** 1119 * Miscellaneous lwp routines * 1120 * ******************************** 1121 */ 1122 /* 1123 * When a process is undergoing a forkall(), its p_flag is set to SHOLDFORK. 1124 * This will cause the process's lwps to stop at a hold point. A hold 1125 * point is where a kernel thread has a flat stack. This is at the 1126 * return from a system call and at the return from a user level trap. 1127 * 1128 * When a process is undergoing a fork1() or vfork(), its p_flag is set to 1129 * SHOLDFORK1. This will cause the process's lwps to stop at a modified 1130 * hold point. The lwps in the process are not being cloned, so they 1131 * are held at the usual hold points and also within issig_forreal(). 1132 * This has the side-effect that their system calls do not return 1133 * showing EINTR. 1134 * 1135 * An lwp can also be held. This is identified by the TP_HOLDLWP flag on 1136 * the thread. The TP_HOLDLWP flag is set in lwp_suspend(), where the active 1137 * lwp is waiting for the target lwp to be stopped. 1138 */ 1139 void 1140 holdlwp(void) 1141 { 1142 proc_t *p = curproc; 1143 kthread_t *t = curthread; 1144 1145 mutex_enter(&p->p_lock); 1146 /* 1147 * Don't terminate immediately if the process is dumping core. 1148 * Once the process has dumped core, all lwps are terminated. 1149 */ 1150 if (!(p->p_flag & SCOREDUMP)) { 1151 if ((p->p_flag & SEXITLWPS) || (t->t_proc_flag & TP_EXITLWP)) 1152 lwp_exit(); 1153 } 1154 if (!(ISHOLD(p)) && !(p->p_flag & (SHOLDFORK1 | SHOLDWATCH))) { 1155 mutex_exit(&p->p_lock); 1156 return; 1157 } 1158 /* 1159 * stop() decrements p->p_lwprcnt and cv_signal()s &p->p_holdlwps 1160 * when p->p_lwprcnt becomes zero. 1161 */ 1162 stop(PR_SUSPENDED, SUSPEND_NORMAL); 1163 if (p->p_flag & SEXITLWPS) 1164 lwp_exit(); 1165 mutex_exit(&p->p_lock); 1166 } 1167 1168 /* 1169 * Have all lwps within the process hold at a point where they are 1170 * cloneable (SHOLDFORK) or just safe w.r.t. fork1 (SHOLDFORK1). 1171 */ 1172 int 1173 holdlwps(int holdflag) 1174 { 1175 proc_t *p = curproc; 1176 1177 ASSERT(holdflag == SHOLDFORK || holdflag == SHOLDFORK1); 1178 mutex_enter(&p->p_lock); 1179 schedctl_finish_sigblock(curthread); 1180 again: 1181 while (p->p_flag & (SEXITLWPS | SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) { 1182 /* 1183 * If another lwp is doing a forkall() or proc_exit(), bail out. 1184 */ 1185 if (p->p_flag & (SEXITLWPS | SHOLDFORK)) { 1186 mutex_exit(&p->p_lock); 1187 return (0); 1188 } 1189 /* 1190 * Another lwp is doing a fork1() or is undergoing 1191 * watchpoint activity. We hold here for it to complete. 1192 */ 1193 stop(PR_SUSPENDED, SUSPEND_NORMAL); 1194 } 1195 p->p_flag |= holdflag; 1196 pokelwps(p); 1197 --p->p_lwprcnt; 1198 /* 1199 * Wait for the process to become quiescent (p->p_lwprcnt == 0). 1200 */ 1201 while (p->p_lwprcnt > 0) { 1202 /* 1203 * Check if aborted by exitlwps(). 1204 * Also check if SHOLDWATCH is set; it takes precedence. 1205 */ 1206 if (p->p_flag & (SEXITLWPS | SHOLDWATCH)) { 1207 p->p_lwprcnt++; 1208 p->p_flag &= ~holdflag; 1209 cv_broadcast(&p->p_holdlwps); 1210 goto again; 1211 } 1212 /* 1213 * Cooperate with jobcontrol signals and /proc stopping. 1214 * If some other lwp has stopped by either of these 1215 * mechanisms, then p_lwprcnt will never become zero 1216 * and the process will appear deadlocked unless we 1217 * stop here in sympathy with the other lwp before 1218 * doing the cv_wait() below. 1219 * 1220 * If the other lwp stops after we do the cv_wait(), it 1221 * will wake us up to loop around and do the sympathy stop. 1222 * 1223 * Since stop() drops p->p_lock, we must start from 1224 * the top again on returning from stop(). 1225 */ 1226 if (p->p_stopsig | (curthread->t_proc_flag & TP_PRSTOP)) { 1227 int whystop = p->p_stopsig? PR_JOBCONTROL : 1228 PR_REQUESTED; 1229 p->p_lwprcnt++; 1230 p->p_flag &= ~holdflag; 1231 stop(whystop, p->p_stopsig); 1232 goto again; 1233 } 1234 cv_wait(&p->p_holdlwps, &p->p_lock); 1235 } 1236 p->p_lwprcnt++; 1237 p->p_flag &= ~holdflag; 1238 mutex_exit(&p->p_lock); 1239 return (1); 1240 } 1241 1242 /* 1243 * See comments for holdwatch(), below. 1244 */ 1245 static int 1246 holdcheck(int clearflags) 1247 { 1248 proc_t *p = curproc; 1249 1250 /* 1251 * If we are trying to exit, that takes precedence over anything else. 1252 */ 1253 if (p->p_flag & SEXITLWPS) { 1254 p->p_lwprcnt++; 1255 p->p_flag &= ~clearflags; 1256 lwp_exit(); 1257 } 1258 1259 /* 1260 * If another thread is calling fork1(), stop the current thread so the 1261 * other can complete. 1262 */ 1263 if (p->p_flag & SHOLDFORK1) { 1264 p->p_lwprcnt++; 1265 stop(PR_SUSPENDED, SUSPEND_NORMAL); 1266 if (p->p_flag & SEXITLWPS) { 1267 p->p_flag &= ~clearflags; 1268 lwp_exit(); 1269 } 1270 return (-1); 1271 } 1272 1273 /* 1274 * If another thread is calling fork(), then indicate we are doing 1275 * watchpoint activity. This will cause holdlwps() above to stop the 1276 * forking thread, at which point we can continue with watchpoint 1277 * activity. 1278 */ 1279 if (p->p_flag & SHOLDFORK) { 1280 p->p_lwprcnt++; 1281 while (p->p_flag & SHOLDFORK) { 1282 p->p_flag |= SHOLDWATCH; 1283 cv_broadcast(&p->p_holdlwps); 1284 cv_wait(&p->p_holdlwps, &p->p_lock); 1285 p->p_flag &= ~SHOLDWATCH; 1286 } 1287 return (-1); 1288 } 1289 1290 return (0); 1291 } 1292 1293 /* 1294 * Stop all lwps within the process, holding themselves in the kernel while the 1295 * active lwp undergoes watchpoint activity. This is more complicated than 1296 * expected because stop() relies on calling holdwatch() in order to copyin data 1297 * from the user's address space. A double barrier is used to prevent an 1298 * infinite loop. 1299 * 1300 * o The first thread into holdwatch() is the 'master' thread and does 1301 * the following: 1302 * 1303 * - Sets SHOLDWATCH on the current process 1304 * - Sets TP_WATCHSTOP on the current thread 1305 * - Waits for all threads to be either stopped or have 1306 * TP_WATCHSTOP set. 1307 * - Sets the SWATCHOK flag on the process 1308 * - Unsets TP_WATCHSTOP 1309 * - Waits for the other threads to completely stop 1310 * - Unsets SWATCHOK 1311 * 1312 * o If SHOLDWATCH is already set when we enter this function, then another 1313 * thread is already trying to stop this thread. This 'slave' thread 1314 * does the following: 1315 * 1316 * - Sets TP_WATCHSTOP on the current thread 1317 * - Waits for SWATCHOK flag to be set 1318 * - Calls stop() 1319 * 1320 * o If SWATCHOK is set on the process, then this function immediately 1321 * returns, as we must have been called via stop(). 1322 * 1323 * In addition, there are other flags that take precedence over SHOLDWATCH: 1324 * 1325 * o If SEXITLWPS is set, exit immediately. 1326 * 1327 * o If SHOLDFORK1 is set, wait for fork1() to complete. 1328 * 1329 * o If SHOLDFORK is set, then watchpoint activity takes precedence In this 1330 * case, set SHOLDWATCH, signalling the forking thread to stop first. 1331 * 1332 * o If the process is being stopped via /proc (TP_PRSTOP is set), then we 1333 * stop the current thread. 1334 * 1335 * Returns 0 if all threads have been quiesced. Returns non-zero if not all 1336 * threads were stopped, or the list of watched pages has changed. 1337 */ 1338 int 1339 holdwatch(void) 1340 { 1341 proc_t *p = curproc; 1342 kthread_t *t = curthread; 1343 int ret = 0; 1344 1345 mutex_enter(&p->p_lock); 1346 1347 p->p_lwprcnt--; 1348 1349 /* 1350 * Check for bail-out conditions as outlined above. 1351 */ 1352 if (holdcheck(0) != 0) { 1353 mutex_exit(&p->p_lock); 1354 return (-1); 1355 } 1356 1357 if (!(p->p_flag & SHOLDWATCH)) { 1358 /* 1359 * We are the master watchpoint thread. Set SHOLDWATCH and poke 1360 * the other threads. 1361 */ 1362 p->p_flag |= SHOLDWATCH; 1363 pokelwps(p); 1364 1365 /* 1366 * Wait for all threads to be stopped or have TP_WATCHSTOP set. 1367 */ 1368 while (pr_allstopped(p, 1) > 0) { 1369 if (holdcheck(SHOLDWATCH) != 0) { 1370 p->p_flag &= ~SHOLDWATCH; 1371 mutex_exit(&p->p_lock); 1372 return (-1); 1373 } 1374 1375 cv_wait(&p->p_holdlwps, &p->p_lock); 1376 } 1377 1378 /* 1379 * All threads are now stopped or in the process of stopping. 1380 * Set SWATCHOK and let them stop completely. 1381 */ 1382 p->p_flag |= SWATCHOK; 1383 t->t_proc_flag &= ~TP_WATCHSTOP; 1384 cv_broadcast(&p->p_holdlwps); 1385 1386 while (pr_allstopped(p, 0) > 0) { 1387 /* 1388 * At first glance, it may appear that we don't need a 1389 * call to holdcheck() here. But if the process gets a 1390 * SIGKILL signal, one of our stopped threads may have 1391 * been awakened and is waiting in exitlwps(), which 1392 * takes precedence over watchpoints. 1393 */ 1394 if (holdcheck(SHOLDWATCH | SWATCHOK) != 0) { 1395 p->p_flag &= ~(SHOLDWATCH | SWATCHOK); 1396 mutex_exit(&p->p_lock); 1397 return (-1); 1398 } 1399 1400 cv_wait(&p->p_holdlwps, &p->p_lock); 1401 } 1402 1403 /* 1404 * All threads are now completely stopped. 1405 */ 1406 p->p_flag &= ~SWATCHOK; 1407 p->p_flag &= ~SHOLDWATCH; 1408 p->p_lwprcnt++; 1409 1410 } else if (!(p->p_flag & SWATCHOK)) { 1411 1412 /* 1413 * SHOLDWATCH is set, so another thread is trying to do 1414 * watchpoint activity. Indicate this thread is stopping, and 1415 * wait for the OK from the master thread. 1416 */ 1417 t->t_proc_flag |= TP_WATCHSTOP; 1418 cv_broadcast(&p->p_holdlwps); 1419 1420 while (!(p->p_flag & SWATCHOK)) { 1421 if (holdcheck(0) != 0) { 1422 t->t_proc_flag &= ~TP_WATCHSTOP; 1423 mutex_exit(&p->p_lock); 1424 return (-1); 1425 } 1426 1427 cv_wait(&p->p_holdlwps, &p->p_lock); 1428 } 1429 1430 /* 1431 * Once the master thread has given the OK, this thread can 1432 * actually call stop(). 1433 */ 1434 t->t_proc_flag &= ~TP_WATCHSTOP; 1435 p->p_lwprcnt++; 1436 1437 stop(PR_SUSPENDED, SUSPEND_NORMAL); 1438 1439 /* 1440 * It's not OK to do watchpoint activity, notify caller to 1441 * retry. 1442 */ 1443 ret = -1; 1444 1445 } else { 1446 1447 /* 1448 * The only way we can hit the case where SHOLDWATCH is set and 1449 * SWATCHOK is set is if we are triggering this from within a 1450 * stop() call. Assert that this is the case. 1451 */ 1452 1453 ASSERT(t->t_proc_flag & TP_STOPPING); 1454 p->p_lwprcnt++; 1455 } 1456 1457 mutex_exit(&p->p_lock); 1458 1459 return (ret); 1460 } 1461 1462 /* 1463 * force all interruptible lwps to trap into the kernel. 1464 */ 1465 void 1466 pokelwps(proc_t *p) 1467 { 1468 kthread_t *t; 1469 1470 ASSERT(MUTEX_HELD(&p->p_lock)); 1471 1472 t = p->p_tlist; 1473 do { 1474 if (t == curthread) 1475 continue; 1476 thread_lock(t); 1477 aston(t); /* make thread trap or do post_syscall */ 1478 if (ISWAKEABLE(t) || ISWAITING(t)) { 1479 setrun_locked(t); 1480 } else if (t->t_state == TS_STOPPED) { 1481 /* 1482 * Ensure that proc_exit() is not blocked by lwps 1483 * that were stopped via jobcontrol or /proc. 1484 */ 1485 if (p->p_flag & SEXITLWPS) { 1486 p->p_stopsig = 0; 1487 t->t_schedflag |= (TS_XSTART | TS_PSTART); 1488 setrun_locked(t); 1489 } 1490 /* 1491 * If we are holding lwps for a forkall(), 1492 * force lwps that have been suspended via 1493 * lwp_suspend() and are suspended inside 1494 * of a system call to proceed to their 1495 * holdlwp() points where they are clonable. 1496 */ 1497 if ((p->p_flag & SHOLDFORK) && SUSPENDED(t)) { 1498 if ((t->t_schedflag & TS_CSTART) == 0) { 1499 p->p_lwprcnt++; 1500 t->t_schedflag |= TS_CSTART; 1501 setrun_locked(t); 1502 } 1503 } 1504 } else if (t->t_state == TS_ONPROC) { 1505 if (t->t_cpu != CPU) 1506 poke_cpu(t->t_cpu->cpu_id); 1507 } 1508 thread_unlock(t); 1509 } while ((t = t->t_forw) != p->p_tlist); 1510 } 1511 1512 /* 1513 * undo the effects of holdlwps() or holdwatch(). 1514 */ 1515 void 1516 continuelwps(proc_t *p) 1517 { 1518 kthread_t *t; 1519 1520 /* 1521 * If this flag is set, then the original holdwatch() didn't actually 1522 * stop the process. See comments for holdwatch(). 1523 */ 1524 if (p->p_flag & SWATCHOK) { 1525 ASSERT(curthread->t_proc_flag & TP_STOPPING); 1526 return; 1527 } 1528 1529 ASSERT(MUTEX_HELD(&p->p_lock)); 1530 ASSERT((p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) == 0); 1531 1532 t = p->p_tlist; 1533 do { 1534 thread_lock(t); /* SUSPENDED looks at t_schedflag */ 1535 if (SUSPENDED(t) && !(t->t_proc_flag & TP_HOLDLWP)) { 1536 p->p_lwprcnt++; 1537 t->t_schedflag |= TS_CSTART; 1538 setrun_locked(t); 1539 } 1540 thread_unlock(t); 1541 } while ((t = t->t_forw) != p->p_tlist); 1542 } 1543 1544 /* 1545 * Force all other LWPs in the current process other than the caller to exit, 1546 * and then cv_wait() on p_holdlwps for them to exit. The exitlwps() function 1547 * is typically used in these situations: 1548 * 1549 * (a) prior to an exec() system call 1550 * (b) prior to dumping a core file 1551 * (c) prior to a uadmin() shutdown 1552 * 1553 * If the 'coredump' flag is set, other LWPs are quiesced but not destroyed. 1554 * Multiple threads in the process can call this function at one time by 1555 * triggering execs or core dumps simultaneously, so the SEXITLWPS bit is used 1556 * to declare one particular thread the winner who gets to kill the others. 1557 * If a thread wins the exitlwps() dance, zero is returned; otherwise an 1558 * appropriate errno value is returned to caller for its system call to return. 1559 */ 1560 int 1561 exitlwps(int coredump) 1562 { 1563 proc_t *p = curproc; 1564 int heldcnt; 1565 1566 if (curthread->t_door) 1567 door_slam(); 1568 if (p->p_door_list) 1569 door_revoke_all(); 1570 if (curthread->t_schedctl != NULL) 1571 schedctl_lwp_cleanup(curthread); 1572 1573 /* 1574 * Ensure that before starting to wait for other lwps to exit, 1575 * cleanup all upimutexes held by curthread. Otherwise, some other 1576 * lwp could be waiting (uninterruptibly) for a upimutex held by 1577 * curthread, and the call to pokelwps() below would deadlock. 1578 * Even if a blocked upimutex_lock is made interruptible, 1579 * curthread's upimutexes need to be unlocked: do it here. 1580 */ 1581 if (curthread->t_upimutex != NULL) 1582 upimutex_cleanup(); 1583 1584 /* 1585 * Grab p_lock in order to check and set SEXITLWPS to declare a winner. 1586 * We must also block any further /proc access from this point forward. 1587 */ 1588 mutex_enter(&p->p_lock); 1589 prbarrier(p); 1590 1591 if (p->p_flag & SEXITLWPS) { 1592 mutex_exit(&p->p_lock); 1593 aston(curthread); /* force a trip through post_syscall */ 1594 return (set_errno(EINTR)); 1595 } 1596 1597 p->p_flag |= SEXITLWPS; 1598 if (coredump) /* tell other lwps to stop, not exit */ 1599 p->p_flag |= SCOREDUMP; 1600 1601 /* 1602 * Give precedence to exitlwps() if a holdlwps() is 1603 * in progress. The lwp doing the holdlwps() operation 1604 * is aborted when it is awakened. 1605 */ 1606 while (p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) { 1607 cv_broadcast(&p->p_holdlwps); 1608 cv_wait(&p->p_holdlwps, &p->p_lock); 1609 prbarrier(p); 1610 } 1611 p->p_flag |= SHOLDFORK; 1612 pokelwps(p); 1613 1614 /* 1615 * Wait for process to become quiescent. 1616 */ 1617 --p->p_lwprcnt; 1618 while (p->p_lwprcnt > 0) { 1619 cv_wait(&p->p_holdlwps, &p->p_lock); 1620 prbarrier(p); 1621 } 1622 p->p_lwprcnt++; 1623 ASSERT(p->p_lwprcnt == 1); 1624 1625 /* 1626 * The SCOREDUMP flag puts the process into a quiescent 1627 * state. The process's lwps remain attached to this 1628 * process until exitlwps() is called again without the 1629 * 'coredump' flag set, then the lwps are terminated 1630 * and the process can exit. 1631 */ 1632 if (coredump) { 1633 p->p_flag &= ~(SCOREDUMP | SHOLDFORK | SEXITLWPS); 1634 goto out; 1635 } 1636 1637 /* 1638 * Determine if there are any lwps left dangling in 1639 * the stopped state. This happens when exitlwps() 1640 * aborts a holdlwps() operation. 1641 */ 1642 p->p_flag &= ~SHOLDFORK; 1643 if ((heldcnt = p->p_lwpcnt) > 1) { 1644 kthread_t *t; 1645 for (t = curthread->t_forw; --heldcnt > 0; t = t->t_forw) { 1646 t->t_proc_flag &= ~TP_TWAIT; 1647 lwp_continue(t); 1648 } 1649 } 1650 1651 /* 1652 * Wait for all other lwps to exit. 1653 */ 1654 --p->p_lwprcnt; 1655 while (p->p_lwpcnt > 1) { 1656 cv_wait(&p->p_holdlwps, &p->p_lock); 1657 prbarrier(p); 1658 } 1659 ++p->p_lwprcnt; 1660 ASSERT(p->p_lwpcnt == 1 && p->p_lwprcnt == 1); 1661 1662 p->p_flag &= ~SEXITLWPS; 1663 curthread->t_proc_flag &= ~TP_TWAIT; 1664 1665 out: 1666 if (!coredump && p->p_zombcnt) { /* cleanup the zombie lwps */ 1667 lwpdir_t *ldp; 1668 lwpent_t *lep; 1669 int i; 1670 1671 for (ldp = p->p_lwpdir, i = 0; i < p->p_lwpdir_sz; i++, ldp++) { 1672 lep = ldp->ld_entry; 1673 if (lep != NULL && lep->le_thread != curthread) { 1674 ASSERT(lep->le_thread == NULL); 1675 p->p_zombcnt--; 1676 lwp_hash_out(p, lep->le_lwpid); 1677 } 1678 } 1679 ASSERT(p->p_zombcnt == 0); 1680 } 1681 1682 /* 1683 * If some other LWP in the process wanted us to suspend ourself, 1684 * then we will not do it. The other LWP is now terminated and 1685 * no one will ever continue us again if we suspend ourself. 1686 */ 1687 curthread->t_proc_flag &= ~TP_HOLDLWP; 1688 p->p_flag &= ~(SHOLDFORK | SHOLDFORK1 | SHOLDWATCH | SLWPWRAP); 1689 mutex_exit(&p->p_lock); 1690 return (0); 1691 } 1692 1693 /* 1694 * duplicate a lwp. 1695 */ 1696 klwp_t * 1697 forklwp(klwp_t *lwp, proc_t *cp, id_t lwpid) 1698 { 1699 klwp_t *clwp; 1700 void *tregs, *tfpu; 1701 kthread_t *t = lwptot(lwp); 1702 kthread_t *ct; 1703 proc_t *p = lwptoproc(lwp); 1704 int cid; 1705 void *bufp; 1706 void *brand_data; 1707 int val; 1708 1709 ASSERT(p == curproc); 1710 ASSERT(t == curthread || (SUSPENDED(t) && lwp->lwp_asleep == 0)); 1711 1712 #if defined(__sparc) 1713 if (t == curthread) 1714 (void) flush_user_windows_to_stack(NULL); 1715 #endif 1716 1717 if (t == curthread) 1718 /* copy args out of registers first */ 1719 (void) save_syscall_args(); 1720 1721 clwp = lwp_create(cp->p_lwpcnt == 0 ? lwp_rtt_initial : lwp_rtt, 1722 NULL, 0, cp, TS_STOPPED, t->t_pri, &t->t_hold, NOCLASS, lwpid); 1723 if (clwp == NULL) 1724 return (NULL); 1725 1726 /* 1727 * most of the parent's lwp can be copied to its duplicate, 1728 * except for the fields that are unique to each lwp, like 1729 * lwp_thread, lwp_procp, lwp_regs, and lwp_ap. 1730 */ 1731 ct = clwp->lwp_thread; 1732 tregs = clwp->lwp_regs; 1733 tfpu = clwp->lwp_fpu; 1734 brand_data = clwp->lwp_brand; 1735 1736 /* 1737 * Copy parent lwp to child lwp. Hold child's p_lock to prevent 1738 * mstate_aggr_state() from reading stale mstate entries copied 1739 * from lwp to clwp. 1740 */ 1741 mutex_enter(&cp->p_lock); 1742 *clwp = *lwp; 1743 1744 /* clear microstate and resource usage data in new lwp */ 1745 init_mstate(ct, LMS_STOPPED); 1746 bzero(&clwp->lwp_ru, sizeof (clwp->lwp_ru)); 1747 mutex_exit(&cp->p_lock); 1748 1749 /* fix up child's lwp */ 1750 1751 clwp->lwp_pcb.pcb_flags = 0; 1752 #if defined(__sparc) 1753 clwp->lwp_pcb.pcb_step = STEP_NONE; 1754 #endif 1755 clwp->lwp_cursig = 0; 1756 clwp->lwp_extsig = 0; 1757 clwp->lwp_curinfo = (struct sigqueue *)0; 1758 clwp->lwp_thread = ct; 1759 ct->t_sysnum = t->t_sysnum; 1760 clwp->lwp_regs = tregs; 1761 clwp->lwp_fpu = tfpu; 1762 clwp->lwp_brand = brand_data; 1763 clwp->lwp_ap = clwp->lwp_arg; 1764 clwp->lwp_procp = cp; 1765 bzero(clwp->lwp_timer, sizeof (clwp->lwp_timer)); 1766 clwp->lwp_lastfault = 0; 1767 clwp->lwp_lastfaddr = 0; 1768 1769 /* copy parent's struct regs to child. */ 1770 lwp_forkregs(lwp, clwp); 1771 1772 /* 1773 * Fork thread context ops, if any. 1774 */ 1775 if (t->t_ctx) 1776 forkctx(t, ct); 1777 1778 /* fix door state in the child */ 1779 if (t->t_door) 1780 door_fork(t, ct); 1781 1782 /* copy current contract templates, clear latest contracts */ 1783 lwp_ctmpl_copy(clwp, lwp); 1784 1785 mutex_enter(&cp->p_lock); 1786 /* lwp_create() set the TP_HOLDLWP flag */ 1787 if (!(t->t_proc_flag & TP_HOLDLWP)) 1788 ct->t_proc_flag &= ~TP_HOLDLWP; 1789 if (cp->p_flag & SMSACCT) 1790 ct->t_proc_flag |= TP_MSACCT; 1791 mutex_exit(&cp->p_lock); 1792 1793 /* Allow brand to propagate brand-specific state */ 1794 if (PROC_IS_BRANDED(p)) 1795 BROP(p)->b_forklwp(lwp, clwp); 1796 1797 retry: 1798 cid = t->t_cid; 1799 1800 val = CL_ALLOC(&bufp, cid, KM_SLEEP); 1801 ASSERT(val == 0); 1802 1803 mutex_enter(&p->p_lock); 1804 if (cid != t->t_cid) { 1805 /* 1806 * Someone just changed this thread's scheduling class, 1807 * so try pre-allocating the buffer again. Hopefully we 1808 * don't hit this often. 1809 */ 1810 mutex_exit(&p->p_lock); 1811 CL_FREE(cid, bufp); 1812 goto retry; 1813 } 1814 1815 ct->t_unpark = t->t_unpark; 1816 ct->t_clfuncs = t->t_clfuncs; 1817 CL_FORK(t, ct, bufp); 1818 ct->t_cid = t->t_cid; /* after data allocated so prgetpsinfo works */ 1819 mutex_exit(&p->p_lock); 1820 1821 return (clwp); 1822 } 1823 1824 /* 1825 * Add a new lwp entry to the lwp directory and to the lwpid hash table. 1826 */ 1827 void 1828 lwp_hash_in(proc_t *p, lwpent_t *lep, tidhash_t *tidhash, uint_t tidhash_sz, 1829 int do_lock) 1830 { 1831 tidhash_t *thp = &tidhash[TIDHASH(lep->le_lwpid, tidhash_sz)]; 1832 lwpdir_t **ldpp; 1833 lwpdir_t *ldp; 1834 kthread_t *t; 1835 1836 /* 1837 * Allocate a directory element from the free list. 1838 * Code elsewhere guarantees a free slot. 1839 */ 1840 ldp = p->p_lwpfree; 1841 p->p_lwpfree = ldp->ld_next; 1842 ASSERT(ldp->ld_entry == NULL); 1843 ldp->ld_entry = lep; 1844 1845 if (do_lock) 1846 mutex_enter(&thp->th_lock); 1847 1848 /* 1849 * Insert it into the lwpid hash table. 1850 */ 1851 ldpp = &thp->th_list; 1852 ldp->ld_next = *ldpp; 1853 *ldpp = ldp; 1854 1855 /* 1856 * Set the active thread's directory slot entry. 1857 */ 1858 if ((t = lep->le_thread) != NULL) { 1859 ASSERT(lep->le_lwpid == t->t_tid); 1860 t->t_dslot = (int)(ldp - p->p_lwpdir); 1861 } 1862 1863 if (do_lock) 1864 mutex_exit(&thp->th_lock); 1865 } 1866 1867 /* 1868 * Remove an lwp from the lwpid hash table and free its directory entry. 1869 * This is done when a detached lwp exits in lwp_exit() or 1870 * when a non-detached lwp is waited for in lwp_wait() or 1871 * when a zombie lwp is detached in lwp_detach(). 1872 */ 1873 void 1874 lwp_hash_out(proc_t *p, id_t lwpid) 1875 { 1876 tidhash_t *thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)]; 1877 lwpdir_t **ldpp; 1878 lwpdir_t *ldp; 1879 lwpent_t *lep; 1880 1881 mutex_enter(&thp->th_lock); 1882 for (ldpp = &thp->th_list; 1883 (ldp = *ldpp) != NULL; ldpp = &ldp->ld_next) { 1884 lep = ldp->ld_entry; 1885 if (lep->le_lwpid == lwpid) { 1886 prlwpfree(p, lep); /* /proc deals with le_trace */ 1887 *ldpp = ldp->ld_next; 1888 ldp->ld_entry = NULL; 1889 ldp->ld_next = p->p_lwpfree; 1890 p->p_lwpfree = ldp; 1891 kmem_free(lep, sizeof (*lep)); 1892 break; 1893 } 1894 } 1895 mutex_exit(&thp->th_lock); 1896 } 1897 1898 /* 1899 * Lookup an lwp in the lwpid hash table by lwpid. 1900 */ 1901 lwpdir_t * 1902 lwp_hash_lookup(proc_t *p, id_t lwpid) 1903 { 1904 tidhash_t *thp; 1905 lwpdir_t *ldp; 1906 1907 /* 1908 * The process may be exiting, after p_tidhash has been set to NULL in 1909 * proc_exit() but before prfee() has been called. Return failure in 1910 * this case. 1911 */ 1912 if (p->p_tidhash == NULL) 1913 return (NULL); 1914 1915 thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)]; 1916 for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) { 1917 if (ldp->ld_entry->le_lwpid == lwpid) 1918 return (ldp); 1919 } 1920 1921 return (NULL); 1922 } 1923 1924 /* 1925 * Same as lwp_hash_lookup(), but acquire and return 1926 * the tid hash table entry lock on success. 1927 */ 1928 lwpdir_t * 1929 lwp_hash_lookup_and_lock(proc_t *p, id_t lwpid, kmutex_t **mpp) 1930 { 1931 tidhash_t *tidhash; 1932 uint_t tidhash_sz; 1933 tidhash_t *thp; 1934 lwpdir_t *ldp; 1935 1936 top: 1937 tidhash_sz = p->p_tidhash_sz; 1938 membar_consumer(); 1939 if ((tidhash = p->p_tidhash) == NULL) 1940 return (NULL); 1941 1942 thp = &tidhash[TIDHASH(lwpid, tidhash_sz)]; 1943 mutex_enter(&thp->th_lock); 1944 1945 /* 1946 * Since we are not holding p->p_lock, the tid hash table 1947 * may have changed. If so, start over. If not, then 1948 * it cannot change until after we drop &thp->th_lock; 1949 */ 1950 if (tidhash != p->p_tidhash || tidhash_sz != p->p_tidhash_sz) { 1951 mutex_exit(&thp->th_lock); 1952 goto top; 1953 } 1954 1955 for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) { 1956 if (ldp->ld_entry->le_lwpid == lwpid) { 1957 *mpp = &thp->th_lock; 1958 return (ldp); 1959 } 1960 } 1961 1962 mutex_exit(&thp->th_lock); 1963 return (NULL); 1964 } 1965 1966 /* 1967 * Update the indicated LWP usage statistic for the current LWP. 1968 */ 1969 void 1970 lwp_stat_update(lwp_stat_id_t lwp_stat_id, long inc) 1971 { 1972 klwp_t *lwp = ttolwp(curthread); 1973 1974 if (lwp == NULL) 1975 return; 1976 1977 switch (lwp_stat_id) { 1978 case LWP_STAT_INBLK: 1979 lwp->lwp_ru.inblock += inc; 1980 break; 1981 case LWP_STAT_OUBLK: 1982 lwp->lwp_ru.oublock += inc; 1983 break; 1984 case LWP_STAT_MSGRCV: 1985 lwp->lwp_ru.msgrcv += inc; 1986 break; 1987 case LWP_STAT_MSGSND: 1988 lwp->lwp_ru.msgsnd += inc; 1989 break; 1990 default: 1991 panic("lwp_stat_update: invalid lwp_stat_id 0x%x", lwp_stat_id); 1992 } 1993 } 1994