1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/param.h> 28 #include <sys/types.h> 29 #include <sys/sysmacros.h> 30 #include <sys/systm.h> 31 #include <sys/thread.h> 32 #include <sys/proc.h> 33 #include <sys/task.h> 34 #include <sys/project.h> 35 #include <sys/signal.h> 36 #include <sys/errno.h> 37 #include <sys/vmparam.h> 38 #include <sys/stack.h> 39 #include <sys/procfs.h> 40 #include <sys/prsystm.h> 41 #include <sys/cpuvar.h> 42 #include <sys/kmem.h> 43 #include <sys/vtrace.h> 44 #include <sys/door.h> 45 #include <vm/seg_kp.h> 46 #include <sys/debug.h> 47 #include <sys/tnf.h> 48 #include <sys/schedctl.h> 49 #include <sys/poll.h> 50 #include <sys/copyops.h> 51 #include <sys/lwp_upimutex_impl.h> 52 #include <sys/cpupart.h> 53 #include <sys/lgrp.h> 54 #include <sys/rctl.h> 55 #include <sys/contract_impl.h> 56 #include <sys/cpc_impl.h> 57 #include <sys/sdt.h> 58 #include <sys/cmn_err.h> 59 #include <sys/brand.h> 60 #include <sys/cyclic.h> 61 #include <sys/pool.h> 62 63 /* hash function for the lwpid hash table, p->p_tidhash[] */ 64 #define TIDHASH(tid, hash_sz) ((tid) & ((hash_sz) - 1)) 65 66 void *segkp_lwp; /* cookie for pool of segkp resources */ 67 extern void reapq_move_lq_to_tq(kthread_t *); 68 extern void freectx_ctx(struct ctxop *); 69 70 /* 71 * Create a kernel thread associated with a particular system process. Give 72 * it an LWP so that microstate accounting will be available for it. 73 */ 74 kthread_t * 75 lwp_kernel_create(proc_t *p, void (*proc)(), void *arg, int state, pri_t pri) 76 { 77 klwp_t *lwp; 78 79 VERIFY((p->p_flag & SSYS) != 0); 80 81 lwp = lwp_create(proc, arg, 0, p, state, pri, &t0.t_hold, syscid, 0); 82 83 VERIFY(lwp != NULL); 84 85 return (lwptot(lwp)); 86 } 87 88 /* 89 * Create a thread that appears to be stopped at sys_rtt. 90 */ 91 klwp_t * 92 lwp_create(void (*proc)(), caddr_t arg, size_t len, proc_t *p, 93 int state, int pri, const k_sigset_t *smask, int cid, id_t lwpid) 94 { 95 klwp_t *lwp = NULL; 96 kthread_t *t; 97 kthread_t *tx; 98 cpupart_t *oldpart = NULL; 99 size_t stksize; 100 caddr_t lwpdata = NULL; 101 processorid_t binding; 102 int err = 0; 103 kproject_t *oldkpj, *newkpj; 104 void *bufp = NULL; 105 klwp_t *curlwp; 106 lwpent_t *lep; 107 lwpdir_t *old_dir = NULL; 108 uint_t old_dirsz = 0; 109 tidhash_t *old_hash = NULL; 110 uint_t old_hashsz = 0; 111 ret_tidhash_t *ret_tidhash = NULL; 112 int i; 113 int rctlfail = 0; 114 boolean_t branded = 0; 115 struct ctxop *ctx = NULL; 116 117 ASSERT(cid != sysdccid); /* system threads must start in SYS */ 118 119 ASSERT(p != &p0); /* No new LWPs in p0. */ 120 121 mutex_enter(&p->p_lock); 122 mutex_enter(&p->p_zone->zone_nlwps_lock); 123 /* 124 * don't enforce rctl limits on system processes 125 */ 126 if (!CLASS_KERNEL(cid)) { 127 if (p->p_task->tk_nlwps >= p->p_task->tk_nlwps_ctl) 128 if (rctl_test(rc_task_lwps, p->p_task->tk_rctls, p, 129 1, 0) & RCT_DENY) 130 rctlfail = 1; 131 if (p->p_task->tk_proj->kpj_nlwps >= 132 p->p_task->tk_proj->kpj_nlwps_ctl) 133 if (rctl_test(rc_project_nlwps, 134 p->p_task->tk_proj->kpj_rctls, p, 1, 0) 135 & RCT_DENY) 136 rctlfail = 1; 137 if (p->p_zone->zone_nlwps >= p->p_zone->zone_nlwps_ctl) 138 if (rctl_test(rc_zone_nlwps, p->p_zone->zone_rctls, p, 139 1, 0) & RCT_DENY) 140 rctlfail = 1; 141 } 142 if (rctlfail) { 143 mutex_exit(&p->p_zone->zone_nlwps_lock); 144 mutex_exit(&p->p_lock); 145 return (NULL); 146 } 147 p->p_task->tk_nlwps++; 148 p->p_task->tk_proj->kpj_nlwps++; 149 p->p_zone->zone_nlwps++; 150 mutex_exit(&p->p_zone->zone_nlwps_lock); 151 mutex_exit(&p->p_lock); 152 153 if (CLASS_KERNEL(cid)) { 154 curlwp = NULL; /* don't inherit from curlwp */ 155 stksize = lwp_default_stksize; 156 } else { 157 curlwp = ttolwp(curthread); 158 if (curlwp == NULL || (stksize = curlwp->lwp_childstksz) == 0) 159 stksize = lwp_default_stksize; 160 } 161 162 /* 163 * For system threads, we sleep for our swap reservation, and the 164 * thread stack can't be swapped. 165 * 166 * Otherwise, try to reclaim a <lwp,stack> from 'deathrow' 167 */ 168 if (CLASS_KERNEL(cid)) { 169 lwpdata = (caddr_t)segkp_get(segkp, stksize, 170 (KPD_NO_ANON | KPD_HASREDZONE | KPD_LOCKED)); 171 172 } else if (stksize == lwp_default_stksize) { 173 if (lwp_reapcnt > 0) { 174 mutex_enter(&reaplock); 175 if ((t = lwp_deathrow) != NULL) { 176 ASSERT(t->t_swap); 177 lwp_deathrow = t->t_forw; 178 lwp_reapcnt--; 179 lwpdata = t->t_swap; 180 lwp = t->t_lwp; 181 ctx = t->t_ctx; 182 t->t_swap = NULL; 183 t->t_lwp = NULL; 184 t->t_ctx = NULL; 185 reapq_move_lq_to_tq(t); 186 } 187 mutex_exit(&reaplock); 188 if (lwp != NULL) { 189 lwp_stk_fini(lwp); 190 } 191 if (ctx != NULL) { 192 freectx_ctx(ctx); 193 } 194 } 195 if (lwpdata == NULL && 196 (lwpdata = (caddr_t)segkp_cache_get(segkp_lwp)) == NULL) { 197 mutex_enter(&p->p_lock); 198 mutex_enter(&p->p_zone->zone_nlwps_lock); 199 p->p_task->tk_nlwps--; 200 p->p_task->tk_proj->kpj_nlwps--; 201 p->p_zone->zone_nlwps--; 202 mutex_exit(&p->p_zone->zone_nlwps_lock); 203 mutex_exit(&p->p_lock); 204 return (NULL); 205 } 206 } else { 207 stksize = roundup(stksize, PAGESIZE); 208 if ((lwpdata = (caddr_t)segkp_get(segkp, stksize, 209 (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED))) == NULL) { 210 mutex_enter(&p->p_lock); 211 mutex_enter(&p->p_zone->zone_nlwps_lock); 212 p->p_task->tk_nlwps--; 213 p->p_task->tk_proj->kpj_nlwps--; 214 p->p_zone->zone_nlwps--; 215 mutex_exit(&p->p_zone->zone_nlwps_lock); 216 mutex_exit(&p->p_lock); 217 return (NULL); 218 } 219 } 220 221 /* 222 * Create a thread, initializing the stack pointer 223 */ 224 t = thread_create(lwpdata, stksize, NULL, NULL, 0, p, TS_STOPPED, pri); 225 226 t->t_swap = lwpdata; /* Start of page-able data */ 227 if (lwp == NULL) 228 lwp = kmem_cache_alloc(lwp_cache, KM_SLEEP); 229 bzero(lwp, sizeof (*lwp)); 230 t->t_lwp = lwp; 231 232 t->t_hold = *smask; 233 lwp->lwp_thread = t; 234 lwp->lwp_procp = p; 235 lwp->lwp_sigaltstack.ss_flags = SS_DISABLE; 236 if (curlwp != NULL && curlwp->lwp_childstksz != 0) 237 lwp->lwp_childstksz = curlwp->lwp_childstksz; 238 239 t->t_stk = lwp_stk_init(lwp, t->t_stk); 240 thread_load(t, proc, arg, len); 241 242 /* 243 * Allocate the SIGPROF buffer if ITIMER_REALPROF is in effect. 244 */ 245 if (p->p_rprof_cyclic != CYCLIC_NONE) 246 t->t_rprof = kmem_zalloc(sizeof (struct rprof), KM_SLEEP); 247 248 if (cid != NOCLASS) 249 (void) CL_ALLOC(&bufp, cid, KM_SLEEP); 250 251 /* 252 * Allocate an lwp directory entry for the new lwp. 253 */ 254 lep = kmem_zalloc(sizeof (*lep), KM_SLEEP); 255 256 mutex_enter(&p->p_lock); 257 grow: 258 /* 259 * Grow the lwp (thread) directory and lwpid hash table if necessary. 260 * A note on the growth algorithm: 261 * The new lwp directory size is computed as: 262 * new = 2 * old + 2 263 * Starting with an initial size of 2 (see exec_common()), 264 * this yields numbers that are a power of two minus 2: 265 * 2, 6, 14, 30, 62, 126, 254, 510, 1022, ... 266 * The size of the lwpid hash table must be a power of two 267 * and must be commensurate in size with the lwp directory 268 * so that hash bucket chains remain short. Therefore, 269 * the lwpid hash table size is computed as: 270 * hashsz = (dirsz + 2) / 2 271 * which leads to these hash table sizes corresponding to 272 * the above directory sizes: 273 * 2, 4, 8, 16, 32, 64, 128, 256, 512, ... 274 * A note on growing the hash table: 275 * For performance reasons, code in lwp_unpark() does not 276 * acquire curproc->p_lock when searching the hash table. 277 * Rather, it calls lwp_hash_lookup_and_lock() which 278 * acquires only the individual hash bucket lock, taking 279 * care to deal with reallocation of the hash table 280 * during the time it takes to acquire the lock. 281 * 282 * This is sufficient to protect the integrity of the 283 * hash table, but it requires us to acquire all of the 284 * old hash bucket locks before growing the hash table 285 * and to release them afterwards. It also requires us 286 * not to free the old hash table because some thread 287 * in lwp_hash_lookup_and_lock() might still be trying 288 * to acquire the old bucket lock. 289 * 290 * So we adopt the tactic of keeping all of the retired 291 * hash tables on a linked list, so they can be safely 292 * freed when the process exits or execs. 293 * 294 * Because the hash table grows in powers of two, the 295 * total size of all of the hash tables will be slightly 296 * less than twice the size of the largest hash table. 297 */ 298 while (p->p_lwpfree == NULL) { 299 uint_t dirsz = p->p_lwpdir_sz; 300 lwpdir_t *new_dir; 301 uint_t new_dirsz; 302 lwpdir_t *ldp; 303 tidhash_t *new_hash; 304 uint_t new_hashsz; 305 306 mutex_exit(&p->p_lock); 307 308 /* 309 * Prepare to remember the old p_tidhash for later 310 * kmem_free()ing when the process exits or execs. 311 */ 312 if (ret_tidhash == NULL) 313 ret_tidhash = kmem_zalloc(sizeof (ret_tidhash_t), 314 KM_SLEEP); 315 if (old_dir != NULL) 316 kmem_free(old_dir, old_dirsz * sizeof (*old_dir)); 317 if (old_hash != NULL) 318 kmem_free(old_hash, old_hashsz * sizeof (*old_hash)); 319 320 new_dirsz = 2 * dirsz + 2; 321 new_dir = kmem_zalloc(new_dirsz * sizeof (lwpdir_t), KM_SLEEP); 322 for (ldp = new_dir, i = 1; i < new_dirsz; i++, ldp++) 323 ldp->ld_next = ldp + 1; 324 new_hashsz = (new_dirsz + 2) / 2; 325 new_hash = kmem_zalloc(new_hashsz * sizeof (tidhash_t), 326 KM_SLEEP); 327 328 mutex_enter(&p->p_lock); 329 if (p == curproc) 330 prbarrier(p); 331 332 if (dirsz != p->p_lwpdir_sz || p->p_lwpfree != NULL) { 333 /* 334 * Someone else beat us to it or some lwp exited. 335 * Set up to free our memory and take a lap. 336 */ 337 old_dir = new_dir; 338 old_dirsz = new_dirsz; 339 old_hash = new_hash; 340 old_hashsz = new_hashsz; 341 } else { 342 /* 343 * For the benefit of lwp_hash_lookup_and_lock(), 344 * called from lwp_unpark(), which searches the 345 * tid hash table without acquiring p->p_lock, 346 * we must acquire all of the tid hash table 347 * locks before replacing p->p_tidhash. 348 */ 349 old_hash = p->p_tidhash; 350 old_hashsz = p->p_tidhash_sz; 351 for (i = 0; i < old_hashsz; i++) { 352 mutex_enter(&old_hash[i].th_lock); 353 mutex_enter(&new_hash[i].th_lock); 354 } 355 356 /* 357 * We simply hash in all of the old directory entries. 358 * This works because the old directory has no empty 359 * slots and the new hash table starts out empty. 360 * This reproduces the original directory ordering 361 * (required for /proc directory semantics). 362 */ 363 old_dir = p->p_lwpdir; 364 old_dirsz = p->p_lwpdir_sz; 365 p->p_lwpdir = new_dir; 366 p->p_lwpfree = new_dir; 367 p->p_lwpdir_sz = new_dirsz; 368 for (ldp = old_dir, i = 0; i < old_dirsz; i++, ldp++) 369 lwp_hash_in(p, ldp->ld_entry, 370 new_hash, new_hashsz, 0); 371 372 /* 373 * Remember the old hash table along with all 374 * of the previously-remembered hash tables. 375 * We will free them at process exit or exec. 376 */ 377 ret_tidhash->rth_tidhash = old_hash; 378 ret_tidhash->rth_tidhash_sz = old_hashsz; 379 ret_tidhash->rth_next = p->p_ret_tidhash; 380 p->p_ret_tidhash = ret_tidhash; 381 382 /* 383 * Now establish the new tid hash table. 384 * As soon as we assign p->p_tidhash, 385 * code in lwp_unpark() can start using it. 386 */ 387 membar_producer(); 388 p->p_tidhash = new_hash; 389 390 /* 391 * It is necessary that p_tidhash reach global 392 * visibility before p_tidhash_sz. Otherwise, 393 * code in lwp_hash_lookup_and_lock() could 394 * index into the old p_tidhash using the new 395 * p_tidhash_sz and thereby access invalid data. 396 */ 397 membar_producer(); 398 p->p_tidhash_sz = new_hashsz; 399 400 /* 401 * Release the locks; allow lwp_unpark() to carry on. 402 */ 403 for (i = 0; i < old_hashsz; i++) { 404 mutex_exit(&old_hash[i].th_lock); 405 mutex_exit(&new_hash[i].th_lock); 406 } 407 408 /* 409 * Avoid freeing these objects below. 410 */ 411 ret_tidhash = NULL; 412 old_hash = NULL; 413 old_hashsz = 0; 414 } 415 } 416 417 /* 418 * Block the process against /proc while we manipulate p->p_tlist, 419 * unless lwp_create() was called by /proc for the PCAGENT operation. 420 * We want to do this early enough so that we don't drop p->p_lock 421 * until the thread is put on the p->p_tlist. 422 */ 423 if (p == curproc) { 424 prbarrier(p); 425 /* 426 * If the current lwp has been requested to stop, do so now. 427 * Otherwise we have a race condition between /proc attempting 428 * to stop the process and this thread creating a new lwp 429 * that was not seen when the /proc PCSTOP request was issued. 430 * We rely on stop() to call prbarrier(p) before returning. 431 */ 432 while ((curthread->t_proc_flag & TP_PRSTOP) && 433 !ttolwp(curthread)->lwp_nostop) { 434 /* 435 * We called pool_barrier_enter() before calling 436 * here to lwp_create(). We have to call 437 * pool_barrier_exit() before stopping. 438 */ 439 pool_barrier_exit(); 440 prbarrier(p); 441 stop(PR_REQUESTED, 0); 442 /* 443 * And we have to repeat the call to 444 * pool_barrier_enter after stopping. 445 */ 446 pool_barrier_enter(); 447 prbarrier(p); 448 } 449 450 /* 451 * If process is exiting, there could be a race between 452 * the agent lwp creation and the new lwp currently being 453 * created. So to prevent this race lwp creation is failed 454 * if the process is exiting. 455 */ 456 if (p->p_flag & (SEXITLWPS|SKILLED)) { 457 err = 1; 458 goto error; 459 } 460 461 /* 462 * Since we might have dropped p->p_lock, the 463 * lwp directory free list might have changed. 464 */ 465 if (p->p_lwpfree == NULL) 466 goto grow; 467 } 468 469 kpreempt_disable(); /* can't grab cpu_lock here */ 470 471 /* 472 * Inherit processor and processor set bindings from curthread. 473 * 474 * For kernel LWPs, we do not inherit processor set bindings at 475 * process creation time (i.e. when p != curproc). After the 476 * kernel process is created, any subsequent LWPs must be created 477 * by threads in the kernel process, at which point we *will* 478 * inherit processor set bindings. 479 */ 480 if (CLASS_KERNEL(cid) && p != curproc) { 481 t->t_bind_cpu = binding = PBIND_NONE; 482 t->t_cpupart = oldpart = &cp_default; 483 t->t_bind_pset = PS_NONE; 484 t->t_bindflag = (uchar_t)default_binding_mode; 485 } else { 486 binding = curthread->t_bind_cpu; 487 t->t_bind_cpu = binding; 488 oldpart = t->t_cpupart; 489 t->t_cpupart = curthread->t_cpupart; 490 t->t_bind_pset = curthread->t_bind_pset; 491 t->t_bindflag = curthread->t_bindflag | 492 (uchar_t)default_binding_mode; 493 } 494 495 /* 496 * thread_create() initializes this thread's home lgroup to the root. 497 * Choose a more suitable lgroup, since this thread is associated 498 * with an lwp. 499 */ 500 ASSERT(oldpart != NULL); 501 if (binding != PBIND_NONE && t->t_affinitycnt == 0) { 502 t->t_bound_cpu = cpu[binding]; 503 if (t->t_lpl != t->t_bound_cpu->cpu_lpl) 504 lgrp_move_thread(t, t->t_bound_cpu->cpu_lpl, 1); 505 } else if (CLASS_KERNEL(cid)) { 506 /* 507 * For kernel threads, assign ourselves to the root lgrp. 508 */ 509 lgrp_move_thread(t, 510 &curthread->t_cpupart->cp_lgrploads[LGRP_ROOTID], 1); 511 } else { 512 lgrp_move_thread(t, lgrp_choose(t, t->t_cpupart), 1); 513 } 514 515 kpreempt_enable(); 516 517 /* 518 * make sure lpl points to our own partition 519 */ 520 ASSERT(t->t_lpl >= t->t_cpupart->cp_lgrploads); 521 ASSERT(t->t_lpl < t->t_cpupart->cp_lgrploads + 522 t->t_cpupart->cp_nlgrploads); 523 524 /* 525 * If we're creating a new process, then inherit the project from our 526 * parent. If we're only creating an additional lwp then use the 527 * project pointer of the target process. 528 */ 529 if (p->p_task == NULL) 530 newkpj = ttoproj(curthread); 531 else 532 newkpj = p->p_task->tk_proj; 533 534 /* 535 * It is safe to point the thread to the new project without holding it 536 * since we're holding the target process' p_lock here and therefore 537 * we're guaranteed that it will not move to another project. 538 */ 539 oldkpj = ttoproj(t); 540 if (newkpj != oldkpj) { 541 t->t_proj = newkpj; 542 (void) project_hold(newkpj); 543 project_rele(oldkpj); 544 } 545 546 if (cid != NOCLASS) { 547 /* 548 * If the lwp is being created in the current process 549 * and matches the current thread's scheduling class, 550 * we should propagate the current thread's scheduling 551 * parameters by calling CL_FORK. Otherwise just use 552 * the defaults by calling CL_ENTERCLASS. 553 */ 554 if (p != curproc || curthread->t_cid != cid) { 555 err = CL_ENTERCLASS(t, cid, NULL, NULL, bufp); 556 t->t_pri = pri; /* CL_ENTERCLASS may have changed it */ 557 /* 558 * We don't call schedctl_set_cidpri(t) here 559 * because the schedctl data is not yet set 560 * up for the newly-created lwp. 561 */ 562 } else { 563 t->t_clfuncs = &(sclass[cid].cl_funcs->thread); 564 err = CL_FORK(curthread, t, bufp); 565 t->t_cid = cid; 566 } 567 if (err) 568 goto error; 569 else 570 bufp = NULL; 571 } 572 573 /* 574 * If we were given an lwpid then use it, else allocate one. 575 */ 576 if (lwpid != 0) 577 t->t_tid = lwpid; 578 else { 579 /* 580 * lwp/thread id 0 is never valid; reserved for special checks. 581 * lwp/thread id 1 is reserved for the main thread. 582 * Start again at 2 when INT_MAX has been reached 583 * (id_t is a signed 32-bit integer). 584 */ 585 id_t prev_id = p->p_lwpid; /* last allocated tid */ 586 587 do { /* avoid lwpid duplication */ 588 if (p->p_lwpid == INT_MAX) { 589 p->p_flag |= SLWPWRAP; 590 p->p_lwpid = 1; 591 } 592 if ((t->t_tid = ++p->p_lwpid) == prev_id) { 593 /* 594 * All lwpids are allocated; fail the request. 595 */ 596 err = 1; 597 goto error; 598 } 599 /* 600 * We only need to worry about colliding with an id 601 * that's already in use if this process has 602 * cycled through all available lwp ids. 603 */ 604 if ((p->p_flag & SLWPWRAP) == 0) 605 break; 606 } while (lwp_hash_lookup(p, t->t_tid) != NULL); 607 } 608 609 /* 610 * If this is a branded process, let the brand do any necessary lwp 611 * initialization. 612 */ 613 if (PROC_IS_BRANDED(p)) { 614 if (BROP(p)->b_initlwp(lwp)) { 615 err = 1; 616 goto error; 617 } 618 branded = 1; 619 } 620 621 if (t->t_tid == 1) { 622 kpreempt_disable(); 623 ASSERT(t->t_lpl != NULL); 624 p->p_t1_lgrpid = t->t_lpl->lpl_lgrpid; 625 kpreempt_enable(); 626 if (p->p_tr_lgrpid != LGRP_NONE && 627 p->p_tr_lgrpid != p->p_t1_lgrpid) { 628 lgrp_update_trthr_migrations(1); 629 } 630 } 631 632 p->p_lwpcnt++; 633 t->t_waitfor = -1; 634 635 /* 636 * Turn microstate accounting on for thread if on for process. 637 */ 638 if (p->p_flag & SMSACCT) 639 t->t_proc_flag |= TP_MSACCT; 640 641 /* 642 * If the process has watchpoints, mark the new thread as such. 643 */ 644 if (pr_watch_active(p)) 645 watch_enable(t); 646 647 /* 648 * The lwp is being created in the stopped state. 649 * We set all the necessary flags to indicate that fact here. 650 * We omit the TS_CREATE flag from t_schedflag so that the lwp 651 * cannot be set running until the caller is finished with it, 652 * even if lwp_continue() is called on it after we drop p->p_lock. 653 * When the caller is finished with the newly-created lwp, 654 * the caller must call lwp_create_done() to allow the lwp 655 * to be set running. If the TP_HOLDLWP is left set, the 656 * lwp will suspend itself after reaching system call exit. 657 */ 658 init_mstate(t, LMS_STOPPED); 659 t->t_proc_flag |= TP_HOLDLWP; 660 t->t_schedflag |= (TS_ALLSTART & ~(TS_CSTART | TS_CREATE)); 661 t->t_whystop = PR_SUSPENDED; 662 t->t_whatstop = SUSPEND_NORMAL; 663 t->t_sig_check = 1; /* ensure that TP_HOLDLWP is honored */ 664 665 /* 666 * Set system call processing flags in case tracing or profiling 667 * is set. The first system call will evaluate these and turn 668 * them off if they aren't needed. 669 */ 670 t->t_pre_sys = 1; 671 t->t_post_sys = 1; 672 673 /* 674 * Insert the new thread into the list of all threads. 675 */ 676 if ((tx = p->p_tlist) == NULL) { 677 t->t_back = t; 678 t->t_forw = t; 679 p->p_tlist = t; 680 } else { 681 t->t_forw = tx; 682 t->t_back = tx->t_back; 683 tx->t_back->t_forw = t; 684 tx->t_back = t; 685 } 686 687 /* 688 * Insert the new lwp into an lwp directory slot position 689 * and into the lwpid hash table. 690 */ 691 lep->le_thread = t; 692 lep->le_lwpid = t->t_tid; 693 lep->le_start = t->t_start; 694 lwp_hash_in(p, lep, p->p_tidhash, p->p_tidhash_sz, 1); 695 696 if (state == TS_RUN) { 697 /* 698 * We set the new lwp running immediately. 699 */ 700 t->t_proc_flag &= ~TP_HOLDLWP; 701 lwp_create_done(t); 702 } 703 704 error: 705 if (err) { 706 if (CLASS_KERNEL(cid)) { 707 /* 708 * This should only happen if a system process runs 709 * out of lwpids, which shouldn't occur. 710 */ 711 panic("Failed to create a system LWP"); 712 } 713 /* 714 * We have failed to create an lwp, so decrement the number 715 * of lwps in the task and let the lgroup load averages know 716 * that this thread isn't going to show up. 717 */ 718 kpreempt_disable(); 719 lgrp_move_thread(t, NULL, 1); 720 kpreempt_enable(); 721 722 ASSERT(MUTEX_HELD(&p->p_lock)); 723 mutex_enter(&p->p_zone->zone_nlwps_lock); 724 p->p_task->tk_nlwps--; 725 p->p_task->tk_proj->kpj_nlwps--; 726 p->p_zone->zone_nlwps--; 727 mutex_exit(&p->p_zone->zone_nlwps_lock); 728 if (cid != NOCLASS && bufp != NULL) 729 CL_FREE(cid, bufp); 730 731 if (branded) 732 BROP(p)->b_freelwp(lwp); 733 734 mutex_exit(&p->p_lock); 735 t->t_state = TS_FREE; 736 thread_rele(t); 737 738 /* 739 * We need to remove t from the list of all threads 740 * because thread_exit()/lwp_exit() isn't called on t. 741 */ 742 mutex_enter(&pidlock); 743 ASSERT(t != t->t_next); /* t0 never exits */ 744 t->t_next->t_prev = t->t_prev; 745 t->t_prev->t_next = t->t_next; 746 mutex_exit(&pidlock); 747 748 thread_free(t); 749 kmem_free(lep, sizeof (*lep)); 750 lwp = NULL; 751 } else { 752 mutex_exit(&p->p_lock); 753 } 754 755 if (old_dir != NULL) 756 kmem_free(old_dir, old_dirsz * sizeof (*old_dir)); 757 if (old_hash != NULL) 758 kmem_free(old_hash, old_hashsz * sizeof (*old_hash)); 759 if (ret_tidhash != NULL) 760 kmem_free(ret_tidhash, sizeof (ret_tidhash_t)); 761 762 DTRACE_PROC1(lwp__create, kthread_t *, t); 763 return (lwp); 764 } 765 766 /* 767 * lwp_create_done() is called by the caller of lwp_create() to set the 768 * newly-created lwp running after the caller has finished manipulating it. 769 */ 770 void 771 lwp_create_done(kthread_t *t) 772 { 773 proc_t *p = ttoproc(t); 774 775 ASSERT(MUTEX_HELD(&p->p_lock)); 776 777 /* 778 * We set the TS_CREATE and TS_CSTART flags and call setrun_locked(). 779 * (The absence of the TS_CREATE flag prevents the lwp from running 780 * until we are finished with it, even if lwp_continue() is called on 781 * it by some other lwp in the process or elsewhere in the kernel.) 782 */ 783 thread_lock(t); 784 ASSERT(t->t_state == TS_STOPPED && !(t->t_schedflag & TS_CREATE)); 785 /* 786 * If TS_CSTART is set, lwp_continue(t) has been called and 787 * has already incremented p_lwprcnt; avoid doing this twice. 788 */ 789 if (!(t->t_schedflag & TS_CSTART)) 790 p->p_lwprcnt++; 791 t->t_schedflag |= (TS_CSTART | TS_CREATE); 792 setrun_locked(t); 793 thread_unlock(t); 794 } 795 796 /* 797 * Copy an LWP's active templates, and clear the latest contracts. 798 */ 799 void 800 lwp_ctmpl_copy(klwp_t *dst, klwp_t *src) 801 { 802 int i; 803 804 for (i = 0; i < ct_ntypes; i++) { 805 dst->lwp_ct_active[i] = ctmpl_dup(src->lwp_ct_active[i]); 806 dst->lwp_ct_latest[i] = NULL; 807 } 808 } 809 810 /* 811 * Clear an LWP's contract template state. 812 */ 813 void 814 lwp_ctmpl_clear(klwp_t *lwp) 815 { 816 ct_template_t *tmpl; 817 int i; 818 819 for (i = 0; i < ct_ntypes; i++) { 820 if ((tmpl = lwp->lwp_ct_active[i]) != NULL) { 821 ctmpl_free(tmpl); 822 lwp->lwp_ct_active[i] = NULL; 823 } 824 825 if (lwp->lwp_ct_latest[i] != NULL) { 826 contract_rele(lwp->lwp_ct_latest[i]); 827 lwp->lwp_ct_latest[i] = NULL; 828 } 829 } 830 } 831 832 /* 833 * Individual lwp exit. 834 * If this is the last lwp, exit the whole process. 835 */ 836 void 837 lwp_exit(void) 838 { 839 kthread_t *t = curthread; 840 klwp_t *lwp = ttolwp(t); 841 proc_t *p = ttoproc(t); 842 843 ASSERT(MUTEX_HELD(&p->p_lock)); 844 845 mutex_exit(&p->p_lock); 846 847 #if defined(__sparc) 848 /* 849 * Ensure that the user stack is fully abandoned.. 850 */ 851 trash_user_windows(); 852 #endif 853 854 tsd_exit(); /* free thread specific data */ 855 856 kcpc_passivate(); /* Clean up performance counter state */ 857 858 pollcleanup(); 859 860 if (t->t_door) 861 door_slam(); 862 863 if (t->t_schedctl != NULL) 864 schedctl_lwp_cleanup(t); 865 866 if (t->t_upimutex != NULL) 867 upimutex_cleanup(); 868 869 /* 870 * Perform any brand specific exit processing, then release any 871 * brand data associated with the lwp 872 */ 873 if (PROC_IS_BRANDED(p)) 874 BROP(p)->b_lwpexit(lwp); 875 876 mutex_enter(&p->p_lock); 877 lwp_cleanup(); 878 879 /* 880 * When this process is dumping core, its lwps are held here 881 * until the core dump is finished. Then exitlwps() is called 882 * again to release these lwps so that they can finish exiting. 883 */ 884 if (p->p_flag & SCOREDUMP) 885 stop(PR_SUSPENDED, SUSPEND_NORMAL); 886 887 /* 888 * Block the process against /proc now that we have really acquired 889 * p->p_lock (to decrement p_lwpcnt and manipulate p_tlist at least). 890 */ 891 prbarrier(p); 892 893 /* 894 * Call proc_exit() if this is the last non-daemon lwp in the process. 895 */ 896 if (!(t->t_proc_flag & TP_DAEMON) && 897 p->p_lwpcnt == p->p_lwpdaemon + 1) { 898 mutex_exit(&p->p_lock); 899 if (proc_exit(CLD_EXITED, 0) == 0) { 900 /* Restarting init. */ 901 return; 902 } 903 904 /* 905 * proc_exit() returns a non-zero value when some other 906 * lwp got there first. We just have to continue in 907 * lwp_exit(). 908 */ 909 mutex_enter(&p->p_lock); 910 ASSERT(curproc->p_flag & SEXITLWPS); 911 prbarrier(p); 912 } 913 914 DTRACE_PROC(lwp__exit); 915 916 /* 917 * If the lwp is a detached lwp or if the process is exiting, 918 * remove (lwp_hash_out()) the lwp from the lwp directory. 919 * Otherwise null out the lwp's le_thread pointer in the lwp 920 * directory so that other threads will see it as a zombie lwp. 921 */ 922 prlwpexit(t); /* notify /proc */ 923 if (!(t->t_proc_flag & TP_TWAIT) || (p->p_flag & SEXITLWPS)) 924 lwp_hash_out(p, t->t_tid); 925 else { 926 ASSERT(!(t->t_proc_flag & TP_DAEMON)); 927 p->p_lwpdir[t->t_dslot].ld_entry->le_thread = NULL; 928 p->p_zombcnt++; 929 cv_broadcast(&p->p_lwpexit); 930 } 931 if (t->t_proc_flag & TP_DAEMON) { 932 p->p_lwpdaemon--; 933 t->t_proc_flag &= ~TP_DAEMON; 934 } 935 t->t_proc_flag &= ~TP_TWAIT; 936 937 /* 938 * Maintain accurate lwp count for task.max-lwps resource control. 939 */ 940 mutex_enter(&p->p_zone->zone_nlwps_lock); 941 p->p_task->tk_nlwps--; 942 p->p_task->tk_proj->kpj_nlwps--; 943 p->p_zone->zone_nlwps--; 944 mutex_exit(&p->p_zone->zone_nlwps_lock); 945 946 CL_EXIT(t); /* tell the scheduler that t is exiting */ 947 ASSERT(p->p_lwpcnt != 0); 948 p->p_lwpcnt--; 949 950 /* 951 * If all remaining non-daemon lwps are waiting in lwp_wait(), 952 * wake them up so someone can return EDEADLK. 953 * (See the block comment preceeding lwp_wait().) 954 */ 955 if (p->p_lwpcnt == p->p_lwpdaemon + (p->p_lwpwait - p->p_lwpdwait)) 956 cv_broadcast(&p->p_lwpexit); 957 958 t->t_proc_flag |= TP_LWPEXIT; 959 term_mstate(t); 960 961 #ifndef NPROBE 962 /* Kernel probe */ 963 if (t->t_tnf_tpdp) 964 tnf_thread_exit(); 965 #endif /* NPROBE */ 966 967 t->t_forw->t_back = t->t_back; 968 t->t_back->t_forw = t->t_forw; 969 if (t == p->p_tlist) 970 p->p_tlist = t->t_forw; 971 972 /* 973 * Clean up the signal state. 974 */ 975 if (t->t_sigqueue != NULL) 976 sigdelq(p, t, 0); 977 if (lwp->lwp_curinfo != NULL) { 978 siginfofree(lwp->lwp_curinfo); 979 lwp->lwp_curinfo = NULL; 980 } 981 982 thread_rele(t); 983 984 /* 985 * Terminated lwps are associated with process zero and are put onto 986 * death-row by resume(). Avoid preemption after resetting t->t_procp. 987 */ 988 t->t_preempt++; 989 990 if (t->t_ctx != NULL) 991 exitctx(t); 992 if (p->p_pctx != NULL) 993 exitpctx(p); 994 995 t->t_procp = &p0; 996 997 /* 998 * Notify the HAT about the change of address space 999 */ 1000 hat_thread_exit(t); 1001 /* 1002 * When this is the last running lwp in this process and some lwp is 1003 * waiting for this condition to become true, or this thread was being 1004 * suspended, then the waiting lwp is awakened. 1005 * 1006 * Also, if the process is exiting, we may have a thread waiting in 1007 * exitlwps() that needs to be notified. 1008 */ 1009 if (--p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP) || 1010 (p->p_flag & SEXITLWPS)) 1011 cv_broadcast(&p->p_holdlwps); 1012 1013 /* 1014 * Need to drop p_lock so we can reacquire pidlock. 1015 */ 1016 mutex_exit(&p->p_lock); 1017 mutex_enter(&pidlock); 1018 1019 ASSERT(t != t->t_next); /* t0 never exits */ 1020 t->t_next->t_prev = t->t_prev; 1021 t->t_prev->t_next = t->t_next; 1022 cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */ 1023 mutex_exit(&pidlock); 1024 1025 lwp_pcb_exit(); 1026 1027 t->t_state = TS_ZOMB; 1028 swtch_from_zombie(); 1029 /* never returns */ 1030 } 1031 1032 1033 /* 1034 * Cleanup function for an exiting lwp. 1035 * Called both from lwp_exit() and from proc_exit(). 1036 * p->p_lock is repeatedly released and grabbed in this function. 1037 */ 1038 void 1039 lwp_cleanup(void) 1040 { 1041 kthread_t *t = curthread; 1042 proc_t *p = ttoproc(t); 1043 1044 ASSERT(MUTEX_HELD(&p->p_lock)); 1045 1046 /* untimeout any lwp-bound realtime timers */ 1047 if (p->p_itimer != NULL) 1048 timer_lwpexit(); 1049 1050 /* 1051 * If this is the /proc agent lwp that is exiting, readjust p_lwpid 1052 * so it appears that the agent never existed, and clear p_agenttp. 1053 */ 1054 if (t == p->p_agenttp) { 1055 ASSERT(t->t_tid == p->p_lwpid); 1056 p->p_lwpid--; 1057 p->p_agenttp = NULL; 1058 } 1059 1060 /* 1061 * Do lgroup bookkeeping to account for thread exiting. 1062 */ 1063 kpreempt_disable(); 1064 lgrp_move_thread(t, NULL, 1); 1065 if (t->t_tid == 1) { 1066 p->p_t1_lgrpid = LGRP_NONE; 1067 } 1068 kpreempt_enable(); 1069 1070 lwp_ctmpl_clear(ttolwp(t)); 1071 } 1072 1073 int 1074 lwp_suspend(kthread_t *t) 1075 { 1076 int tid; 1077 proc_t *p = ttoproc(t); 1078 1079 ASSERT(MUTEX_HELD(&p->p_lock)); 1080 1081 /* 1082 * Set the thread's TP_HOLDLWP flag so it will stop in holdlwp(). 1083 * If an lwp is stopping itself, there is no need to wait. 1084 */ 1085 top: 1086 t->t_proc_flag |= TP_HOLDLWP; 1087 if (t == curthread) { 1088 t->t_sig_check = 1; 1089 } else { 1090 /* 1091 * Make sure the lwp stops promptly. 1092 */ 1093 thread_lock(t); 1094 t->t_sig_check = 1; 1095 /* 1096 * XXX Should use virtual stop like /proc does instead of 1097 * XXX waking the thread to get it to stop. 1098 */ 1099 if (ISWAKEABLE(t) || ISWAITING(t)) { 1100 setrun_locked(t); 1101 } else if (t->t_state == TS_ONPROC && t->t_cpu != CPU) { 1102 poke_cpu(t->t_cpu->cpu_id); 1103 } 1104 1105 tid = t->t_tid; /* remember thread ID */ 1106 /* 1107 * Wait for lwp to stop 1108 */ 1109 while (!SUSPENDED(t)) { 1110 /* 1111 * Drop the thread lock before waiting and reacquire it 1112 * afterwards, so the thread can change its t_state 1113 * field. 1114 */ 1115 thread_unlock(t); 1116 1117 /* 1118 * Check if aborted by exitlwps(). 1119 */ 1120 if (p->p_flag & SEXITLWPS) 1121 lwp_exit(); 1122 1123 /* 1124 * Cooperate with jobcontrol signals and /proc stopping 1125 * by calling cv_wait_sig() to wait for the target 1126 * lwp to stop. Just using cv_wait() can lead to 1127 * deadlock because, if some other lwp has stopped 1128 * by either of these mechanisms, then p_lwprcnt will 1129 * never become zero if we do a cv_wait(). 1130 */ 1131 if (!cv_wait_sig(&p->p_holdlwps, &p->p_lock)) 1132 return (EINTR); 1133 1134 /* 1135 * Check to see if thread died while we were 1136 * waiting for it to suspend. 1137 */ 1138 if (idtot(p, tid) == NULL) 1139 return (ESRCH); 1140 1141 thread_lock(t); 1142 /* 1143 * If the TP_HOLDLWP flag went away, lwp_continue() 1144 * or vfork() must have been called while we were 1145 * waiting, so start over again. 1146 */ 1147 if ((t->t_proc_flag & TP_HOLDLWP) == 0) { 1148 thread_unlock(t); 1149 goto top; 1150 } 1151 } 1152 thread_unlock(t); 1153 } 1154 return (0); 1155 } 1156 1157 /* 1158 * continue a lwp that's been stopped by lwp_suspend(). 1159 */ 1160 void 1161 lwp_continue(kthread_t *t) 1162 { 1163 proc_t *p = ttoproc(t); 1164 int was_suspended = t->t_proc_flag & TP_HOLDLWP; 1165 1166 ASSERT(MUTEX_HELD(&p->p_lock)); 1167 1168 t->t_proc_flag &= ~TP_HOLDLWP; 1169 thread_lock(t); 1170 if (SUSPENDED(t) && 1171 !(p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH))) { 1172 p->p_lwprcnt++; 1173 t->t_schedflag |= TS_CSTART; 1174 setrun_locked(t); 1175 } 1176 thread_unlock(t); 1177 /* 1178 * Wakeup anyone waiting for this thread to be suspended 1179 */ 1180 if (was_suspended) 1181 cv_broadcast(&p->p_holdlwps); 1182 } 1183 1184 /* 1185 * ******************************** 1186 * Miscellaneous lwp routines * 1187 * ******************************** 1188 */ 1189 /* 1190 * When a process is undergoing a forkall(), its p_flag is set to SHOLDFORK. 1191 * This will cause the process's lwps to stop at a hold point. A hold 1192 * point is where a kernel thread has a flat stack. This is at the 1193 * return from a system call and at the return from a user level trap. 1194 * 1195 * When a process is undergoing a fork1() or vfork(), its p_flag is set to 1196 * SHOLDFORK1. This will cause the process's lwps to stop at a modified 1197 * hold point. The lwps in the process are not being cloned, so they 1198 * are held at the usual hold points and also within issig_forreal(). 1199 * This has the side-effect that their system calls do not return 1200 * showing EINTR. 1201 * 1202 * An lwp can also be held. This is identified by the TP_HOLDLWP flag on 1203 * the thread. The TP_HOLDLWP flag is set in lwp_suspend(), where the active 1204 * lwp is waiting for the target lwp to be stopped. 1205 */ 1206 void 1207 holdlwp(void) 1208 { 1209 proc_t *p = curproc; 1210 kthread_t *t = curthread; 1211 1212 mutex_enter(&p->p_lock); 1213 /* 1214 * Don't terminate immediately if the process is dumping core. 1215 * Once the process has dumped core, all lwps are terminated. 1216 */ 1217 if (!(p->p_flag & SCOREDUMP)) { 1218 if ((p->p_flag & SEXITLWPS) || (t->t_proc_flag & TP_EXITLWP)) 1219 lwp_exit(); 1220 } 1221 if (!(ISHOLD(p)) && !(p->p_flag & (SHOLDFORK1 | SHOLDWATCH))) { 1222 mutex_exit(&p->p_lock); 1223 return; 1224 } 1225 /* 1226 * stop() decrements p->p_lwprcnt and cv_signal()s &p->p_holdlwps 1227 * when p->p_lwprcnt becomes zero. 1228 */ 1229 stop(PR_SUSPENDED, SUSPEND_NORMAL); 1230 if (p->p_flag & SEXITLWPS) 1231 lwp_exit(); 1232 mutex_exit(&p->p_lock); 1233 } 1234 1235 /* 1236 * Have all lwps within the process hold at a point where they are 1237 * cloneable (SHOLDFORK) or just safe w.r.t. fork1 (SHOLDFORK1). 1238 */ 1239 int 1240 holdlwps(int holdflag) 1241 { 1242 proc_t *p = curproc; 1243 1244 ASSERT(holdflag == SHOLDFORK || holdflag == SHOLDFORK1); 1245 mutex_enter(&p->p_lock); 1246 schedctl_finish_sigblock(curthread); 1247 again: 1248 while (p->p_flag & (SEXITLWPS | SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) { 1249 /* 1250 * If another lwp is doing a forkall() or proc_exit(), bail out. 1251 */ 1252 if (p->p_flag & (SEXITLWPS | SHOLDFORK)) { 1253 mutex_exit(&p->p_lock); 1254 return (0); 1255 } 1256 /* 1257 * Another lwp is doing a fork1() or is undergoing 1258 * watchpoint activity. We hold here for it to complete. 1259 */ 1260 stop(PR_SUSPENDED, SUSPEND_NORMAL); 1261 } 1262 p->p_flag |= holdflag; 1263 pokelwps(p); 1264 --p->p_lwprcnt; 1265 /* 1266 * Wait for the process to become quiescent (p->p_lwprcnt == 0). 1267 */ 1268 while (p->p_lwprcnt > 0) { 1269 /* 1270 * Check if aborted by exitlwps(). 1271 * Also check if SHOLDWATCH is set; it takes precedence. 1272 */ 1273 if (p->p_flag & (SEXITLWPS | SHOLDWATCH)) { 1274 p->p_lwprcnt++; 1275 p->p_flag &= ~holdflag; 1276 cv_broadcast(&p->p_holdlwps); 1277 goto again; 1278 } 1279 /* 1280 * Cooperate with jobcontrol signals and /proc stopping. 1281 * If some other lwp has stopped by either of these 1282 * mechanisms, then p_lwprcnt will never become zero 1283 * and the process will appear deadlocked unless we 1284 * stop here in sympathy with the other lwp before 1285 * doing the cv_wait() below. 1286 * 1287 * If the other lwp stops after we do the cv_wait(), it 1288 * will wake us up to loop around and do the sympathy stop. 1289 * 1290 * Since stop() drops p->p_lock, we must start from 1291 * the top again on returning from stop(). 1292 */ 1293 if (p->p_stopsig | (curthread->t_proc_flag & TP_PRSTOP)) { 1294 int whystop = p->p_stopsig? PR_JOBCONTROL : 1295 PR_REQUESTED; 1296 p->p_lwprcnt++; 1297 p->p_flag &= ~holdflag; 1298 stop(whystop, p->p_stopsig); 1299 goto again; 1300 } 1301 cv_wait(&p->p_holdlwps, &p->p_lock); 1302 } 1303 p->p_lwprcnt++; 1304 p->p_flag &= ~holdflag; 1305 mutex_exit(&p->p_lock); 1306 return (1); 1307 } 1308 1309 /* 1310 * See comments for holdwatch(), below. 1311 */ 1312 static int 1313 holdcheck(int clearflags) 1314 { 1315 proc_t *p = curproc; 1316 1317 /* 1318 * If we are trying to exit, that takes precedence over anything else. 1319 */ 1320 if (p->p_flag & SEXITLWPS) { 1321 p->p_lwprcnt++; 1322 p->p_flag &= ~clearflags; 1323 lwp_exit(); 1324 } 1325 1326 /* 1327 * If another thread is calling fork1(), stop the current thread so the 1328 * other can complete. 1329 */ 1330 if (p->p_flag & SHOLDFORK1) { 1331 p->p_lwprcnt++; 1332 stop(PR_SUSPENDED, SUSPEND_NORMAL); 1333 if (p->p_flag & SEXITLWPS) { 1334 p->p_flag &= ~clearflags; 1335 lwp_exit(); 1336 } 1337 return (-1); 1338 } 1339 1340 /* 1341 * If another thread is calling fork(), then indicate we are doing 1342 * watchpoint activity. This will cause holdlwps() above to stop the 1343 * forking thread, at which point we can continue with watchpoint 1344 * activity. 1345 */ 1346 if (p->p_flag & SHOLDFORK) { 1347 p->p_lwprcnt++; 1348 while (p->p_flag & SHOLDFORK) { 1349 p->p_flag |= SHOLDWATCH; 1350 cv_broadcast(&p->p_holdlwps); 1351 cv_wait(&p->p_holdlwps, &p->p_lock); 1352 p->p_flag &= ~SHOLDWATCH; 1353 } 1354 return (-1); 1355 } 1356 1357 return (0); 1358 } 1359 1360 /* 1361 * Stop all lwps within the process, holding themselves in the kernel while the 1362 * active lwp undergoes watchpoint activity. This is more complicated than 1363 * expected because stop() relies on calling holdwatch() in order to copyin data 1364 * from the user's address space. A double barrier is used to prevent an 1365 * infinite loop. 1366 * 1367 * o The first thread into holdwatch() is the 'master' thread and does 1368 * the following: 1369 * 1370 * - Sets SHOLDWATCH on the current process 1371 * - Sets TP_WATCHSTOP on the current thread 1372 * - Waits for all threads to be either stopped or have 1373 * TP_WATCHSTOP set. 1374 * - Sets the SWATCHOK flag on the process 1375 * - Unsets TP_WATCHSTOP 1376 * - Waits for the other threads to completely stop 1377 * - Unsets SWATCHOK 1378 * 1379 * o If SHOLDWATCH is already set when we enter this function, then another 1380 * thread is already trying to stop this thread. This 'slave' thread 1381 * does the following: 1382 * 1383 * - Sets TP_WATCHSTOP on the current thread 1384 * - Waits for SWATCHOK flag to be set 1385 * - Calls stop() 1386 * 1387 * o If SWATCHOK is set on the process, then this function immediately 1388 * returns, as we must have been called via stop(). 1389 * 1390 * In addition, there are other flags that take precedence over SHOLDWATCH: 1391 * 1392 * o If SEXITLWPS is set, exit immediately. 1393 * 1394 * o If SHOLDFORK1 is set, wait for fork1() to complete. 1395 * 1396 * o If SHOLDFORK is set, then watchpoint activity takes precedence In this 1397 * case, set SHOLDWATCH, signalling the forking thread to stop first. 1398 * 1399 * o If the process is being stopped via /proc (TP_PRSTOP is set), then we 1400 * stop the current thread. 1401 * 1402 * Returns 0 if all threads have been quiesced. Returns non-zero if not all 1403 * threads were stopped, or the list of watched pages has changed. 1404 */ 1405 int 1406 holdwatch(void) 1407 { 1408 proc_t *p = curproc; 1409 kthread_t *t = curthread; 1410 int ret = 0; 1411 1412 mutex_enter(&p->p_lock); 1413 1414 p->p_lwprcnt--; 1415 1416 /* 1417 * Check for bail-out conditions as outlined above. 1418 */ 1419 if (holdcheck(0) != 0) { 1420 mutex_exit(&p->p_lock); 1421 return (-1); 1422 } 1423 1424 if (!(p->p_flag & SHOLDWATCH)) { 1425 /* 1426 * We are the master watchpoint thread. Set SHOLDWATCH and poke 1427 * the other threads. 1428 */ 1429 p->p_flag |= SHOLDWATCH; 1430 pokelwps(p); 1431 1432 /* 1433 * Wait for all threads to be stopped or have TP_WATCHSTOP set. 1434 */ 1435 while (pr_allstopped(p, 1) > 0) { 1436 if (holdcheck(SHOLDWATCH) != 0) { 1437 p->p_flag &= ~SHOLDWATCH; 1438 mutex_exit(&p->p_lock); 1439 return (-1); 1440 } 1441 1442 cv_wait(&p->p_holdlwps, &p->p_lock); 1443 } 1444 1445 /* 1446 * All threads are now stopped or in the process of stopping. 1447 * Set SWATCHOK and let them stop completely. 1448 */ 1449 p->p_flag |= SWATCHOK; 1450 t->t_proc_flag &= ~TP_WATCHSTOP; 1451 cv_broadcast(&p->p_holdlwps); 1452 1453 while (pr_allstopped(p, 0) > 0) { 1454 /* 1455 * At first glance, it may appear that we don't need a 1456 * call to holdcheck() here. But if the process gets a 1457 * SIGKILL signal, one of our stopped threads may have 1458 * been awakened and is waiting in exitlwps(), which 1459 * takes precedence over watchpoints. 1460 */ 1461 if (holdcheck(SHOLDWATCH | SWATCHOK) != 0) { 1462 p->p_flag &= ~(SHOLDWATCH | SWATCHOK); 1463 mutex_exit(&p->p_lock); 1464 return (-1); 1465 } 1466 1467 cv_wait(&p->p_holdlwps, &p->p_lock); 1468 } 1469 1470 /* 1471 * All threads are now completely stopped. 1472 */ 1473 p->p_flag &= ~SWATCHOK; 1474 p->p_flag &= ~SHOLDWATCH; 1475 p->p_lwprcnt++; 1476 1477 } else if (!(p->p_flag & SWATCHOK)) { 1478 1479 /* 1480 * SHOLDWATCH is set, so another thread is trying to do 1481 * watchpoint activity. Indicate this thread is stopping, and 1482 * wait for the OK from the master thread. 1483 */ 1484 t->t_proc_flag |= TP_WATCHSTOP; 1485 cv_broadcast(&p->p_holdlwps); 1486 1487 while (!(p->p_flag & SWATCHOK)) { 1488 if (holdcheck(0) != 0) { 1489 t->t_proc_flag &= ~TP_WATCHSTOP; 1490 mutex_exit(&p->p_lock); 1491 return (-1); 1492 } 1493 1494 cv_wait(&p->p_holdlwps, &p->p_lock); 1495 } 1496 1497 /* 1498 * Once the master thread has given the OK, this thread can 1499 * actually call stop(). 1500 */ 1501 t->t_proc_flag &= ~TP_WATCHSTOP; 1502 p->p_lwprcnt++; 1503 1504 stop(PR_SUSPENDED, SUSPEND_NORMAL); 1505 1506 /* 1507 * It's not OK to do watchpoint activity, notify caller to 1508 * retry. 1509 */ 1510 ret = -1; 1511 1512 } else { 1513 1514 /* 1515 * The only way we can hit the case where SHOLDWATCH is set and 1516 * SWATCHOK is set is if we are triggering this from within a 1517 * stop() call. Assert that this is the case. 1518 */ 1519 1520 ASSERT(t->t_proc_flag & TP_STOPPING); 1521 p->p_lwprcnt++; 1522 } 1523 1524 mutex_exit(&p->p_lock); 1525 1526 return (ret); 1527 } 1528 1529 /* 1530 * force all interruptible lwps to trap into the kernel. 1531 */ 1532 void 1533 pokelwps(proc_t *p) 1534 { 1535 kthread_t *t; 1536 1537 ASSERT(MUTEX_HELD(&p->p_lock)); 1538 1539 t = p->p_tlist; 1540 do { 1541 if (t == curthread) 1542 continue; 1543 thread_lock(t); 1544 aston(t); /* make thread trap or do post_syscall */ 1545 if (ISWAKEABLE(t) || ISWAITING(t)) { 1546 setrun_locked(t); 1547 } else if (t->t_state == TS_STOPPED) { 1548 /* 1549 * Ensure that proc_exit() is not blocked by lwps 1550 * that were stopped via jobcontrol or /proc. 1551 */ 1552 if (p->p_flag & SEXITLWPS) { 1553 p->p_stopsig = 0; 1554 t->t_schedflag |= (TS_XSTART | TS_PSTART); 1555 setrun_locked(t); 1556 } 1557 /* 1558 * If we are holding lwps for a forkall(), 1559 * force lwps that have been suspended via 1560 * lwp_suspend() and are suspended inside 1561 * of a system call to proceed to their 1562 * holdlwp() points where they are clonable. 1563 */ 1564 if ((p->p_flag & SHOLDFORK) && SUSPENDED(t)) { 1565 if ((t->t_schedflag & TS_CSTART) == 0) { 1566 p->p_lwprcnt++; 1567 t->t_schedflag |= TS_CSTART; 1568 setrun_locked(t); 1569 } 1570 } 1571 } else if (t->t_state == TS_ONPROC) { 1572 if (t->t_cpu != CPU) 1573 poke_cpu(t->t_cpu->cpu_id); 1574 } 1575 thread_unlock(t); 1576 } while ((t = t->t_forw) != p->p_tlist); 1577 } 1578 1579 /* 1580 * undo the effects of holdlwps() or holdwatch(). 1581 */ 1582 void 1583 continuelwps(proc_t *p) 1584 { 1585 kthread_t *t; 1586 1587 /* 1588 * If this flag is set, then the original holdwatch() didn't actually 1589 * stop the process. See comments for holdwatch(). 1590 */ 1591 if (p->p_flag & SWATCHOK) { 1592 ASSERT(curthread->t_proc_flag & TP_STOPPING); 1593 return; 1594 } 1595 1596 ASSERT(MUTEX_HELD(&p->p_lock)); 1597 ASSERT((p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) == 0); 1598 1599 t = p->p_tlist; 1600 do { 1601 thread_lock(t); /* SUSPENDED looks at t_schedflag */ 1602 if (SUSPENDED(t) && !(t->t_proc_flag & TP_HOLDLWP)) { 1603 p->p_lwprcnt++; 1604 t->t_schedflag |= TS_CSTART; 1605 setrun_locked(t); 1606 } 1607 thread_unlock(t); 1608 } while ((t = t->t_forw) != p->p_tlist); 1609 } 1610 1611 /* 1612 * Force all other LWPs in the current process other than the caller to exit, 1613 * and then cv_wait() on p_holdlwps for them to exit. The exitlwps() function 1614 * is typically used in these situations: 1615 * 1616 * (a) prior to an exec() system call 1617 * (b) prior to dumping a core file 1618 * (c) prior to a uadmin() shutdown 1619 * 1620 * If the 'coredump' flag is set, other LWPs are quiesced but not destroyed. 1621 * Multiple threads in the process can call this function at one time by 1622 * triggering execs or core dumps simultaneously, so the SEXITLWPS bit is used 1623 * to declare one particular thread the winner who gets to kill the others. 1624 * If a thread wins the exitlwps() dance, zero is returned; otherwise an 1625 * appropriate errno value is returned to caller for its system call to return. 1626 */ 1627 int 1628 exitlwps(int coredump) 1629 { 1630 proc_t *p = curproc; 1631 int heldcnt; 1632 1633 if (curthread->t_door) 1634 door_slam(); 1635 if (p->p_door_list) 1636 door_revoke_all(); 1637 if (curthread->t_schedctl != NULL) 1638 schedctl_lwp_cleanup(curthread); 1639 1640 /* 1641 * Ensure that before starting to wait for other lwps to exit, 1642 * cleanup all upimutexes held by curthread. Otherwise, some other 1643 * lwp could be waiting (uninterruptibly) for a upimutex held by 1644 * curthread, and the call to pokelwps() below would deadlock. 1645 * Even if a blocked upimutex_lock is made interruptible, 1646 * curthread's upimutexes need to be unlocked: do it here. 1647 */ 1648 if (curthread->t_upimutex != NULL) 1649 upimutex_cleanup(); 1650 1651 /* 1652 * Grab p_lock in order to check and set SEXITLWPS to declare a winner. 1653 * We must also block any further /proc access from this point forward. 1654 */ 1655 mutex_enter(&p->p_lock); 1656 prbarrier(p); 1657 1658 if (p->p_flag & SEXITLWPS) { 1659 mutex_exit(&p->p_lock); 1660 aston(curthread); /* force a trip through post_syscall */ 1661 return (set_errno(EINTR)); 1662 } 1663 1664 p->p_flag |= SEXITLWPS; 1665 if (coredump) /* tell other lwps to stop, not exit */ 1666 p->p_flag |= SCOREDUMP; 1667 1668 /* 1669 * Give precedence to exitlwps() if a holdlwps() is 1670 * in progress. The lwp doing the holdlwps() operation 1671 * is aborted when it is awakened. 1672 */ 1673 while (p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) { 1674 cv_broadcast(&p->p_holdlwps); 1675 cv_wait(&p->p_holdlwps, &p->p_lock); 1676 prbarrier(p); 1677 } 1678 p->p_flag |= SHOLDFORK; 1679 pokelwps(p); 1680 1681 /* 1682 * Wait for process to become quiescent. 1683 */ 1684 --p->p_lwprcnt; 1685 while (p->p_lwprcnt > 0) { 1686 cv_wait(&p->p_holdlwps, &p->p_lock); 1687 prbarrier(p); 1688 } 1689 p->p_lwprcnt++; 1690 ASSERT(p->p_lwprcnt == 1); 1691 1692 /* 1693 * The SCOREDUMP flag puts the process into a quiescent 1694 * state. The process's lwps remain attached to this 1695 * process until exitlwps() is called again without the 1696 * 'coredump' flag set, then the lwps are terminated 1697 * and the process can exit. 1698 */ 1699 if (coredump) { 1700 p->p_flag &= ~(SCOREDUMP | SHOLDFORK | SEXITLWPS); 1701 goto out; 1702 } 1703 1704 /* 1705 * Determine if there are any lwps left dangling in 1706 * the stopped state. This happens when exitlwps() 1707 * aborts a holdlwps() operation. 1708 */ 1709 p->p_flag &= ~SHOLDFORK; 1710 if ((heldcnt = p->p_lwpcnt) > 1) { 1711 kthread_t *t; 1712 for (t = curthread->t_forw; --heldcnt > 0; t = t->t_forw) { 1713 t->t_proc_flag &= ~TP_TWAIT; 1714 lwp_continue(t); 1715 } 1716 } 1717 1718 /* 1719 * Wait for all other lwps to exit. 1720 */ 1721 --p->p_lwprcnt; 1722 while (p->p_lwpcnt > 1) { 1723 cv_wait(&p->p_holdlwps, &p->p_lock); 1724 prbarrier(p); 1725 } 1726 ++p->p_lwprcnt; 1727 ASSERT(p->p_lwpcnt == 1 && p->p_lwprcnt == 1); 1728 1729 p->p_flag &= ~SEXITLWPS; 1730 curthread->t_proc_flag &= ~TP_TWAIT; 1731 1732 out: 1733 if (!coredump && p->p_zombcnt) { /* cleanup the zombie lwps */ 1734 lwpdir_t *ldp; 1735 lwpent_t *lep; 1736 int i; 1737 1738 for (ldp = p->p_lwpdir, i = 0; i < p->p_lwpdir_sz; i++, ldp++) { 1739 lep = ldp->ld_entry; 1740 if (lep != NULL && lep->le_thread != curthread) { 1741 ASSERT(lep->le_thread == NULL); 1742 p->p_zombcnt--; 1743 lwp_hash_out(p, lep->le_lwpid); 1744 } 1745 } 1746 ASSERT(p->p_zombcnt == 0); 1747 } 1748 1749 /* 1750 * If some other LWP in the process wanted us to suspend ourself, 1751 * then we will not do it. The other LWP is now terminated and 1752 * no one will ever continue us again if we suspend ourself. 1753 */ 1754 curthread->t_proc_flag &= ~TP_HOLDLWP; 1755 p->p_flag &= ~(SHOLDFORK | SHOLDFORK1 | SHOLDWATCH | SLWPWRAP); 1756 mutex_exit(&p->p_lock); 1757 return (0); 1758 } 1759 1760 /* 1761 * duplicate a lwp. 1762 */ 1763 klwp_t * 1764 forklwp(klwp_t *lwp, proc_t *cp, id_t lwpid) 1765 { 1766 klwp_t *clwp; 1767 void *tregs, *tfpu; 1768 kthread_t *t = lwptot(lwp); 1769 kthread_t *ct; 1770 proc_t *p = lwptoproc(lwp); 1771 int cid; 1772 void *bufp; 1773 void *brand_data; 1774 int val; 1775 1776 ASSERT(p == curproc); 1777 ASSERT(t == curthread || (SUSPENDED(t) && lwp->lwp_asleep == 0)); 1778 1779 #if defined(__sparc) 1780 if (t == curthread) 1781 (void) flush_user_windows_to_stack(NULL); 1782 #endif 1783 1784 if (t == curthread) 1785 /* copy args out of registers first */ 1786 (void) save_syscall_args(); 1787 1788 clwp = lwp_create(cp->p_lwpcnt == 0 ? lwp_rtt_initial : lwp_rtt, 1789 NULL, 0, cp, TS_STOPPED, t->t_pri, &t->t_hold, NOCLASS, lwpid); 1790 if (clwp == NULL) 1791 return (NULL); 1792 1793 /* 1794 * most of the parent's lwp can be copied to its duplicate, 1795 * except for the fields that are unique to each lwp, like 1796 * lwp_thread, lwp_procp, lwp_regs, and lwp_ap. 1797 */ 1798 ct = clwp->lwp_thread; 1799 tregs = clwp->lwp_regs; 1800 tfpu = clwp->lwp_fpu; 1801 brand_data = clwp->lwp_brand; 1802 1803 /* 1804 * Copy parent lwp to child lwp. Hold child's p_lock to prevent 1805 * mstate_aggr_state() from reading stale mstate entries copied 1806 * from lwp to clwp. 1807 */ 1808 mutex_enter(&cp->p_lock); 1809 *clwp = *lwp; 1810 1811 /* clear microstate and resource usage data in new lwp */ 1812 init_mstate(ct, LMS_STOPPED); 1813 bzero(&clwp->lwp_ru, sizeof (clwp->lwp_ru)); 1814 mutex_exit(&cp->p_lock); 1815 1816 /* fix up child's lwp */ 1817 1818 clwp->lwp_pcb.pcb_flags = 0; 1819 #if defined(__sparc) 1820 clwp->lwp_pcb.pcb_step = STEP_NONE; 1821 #endif 1822 clwp->lwp_cursig = 0; 1823 clwp->lwp_extsig = 0; 1824 clwp->lwp_curinfo = (struct sigqueue *)0; 1825 clwp->lwp_thread = ct; 1826 ct->t_sysnum = t->t_sysnum; 1827 clwp->lwp_regs = tregs; 1828 clwp->lwp_fpu = tfpu; 1829 clwp->lwp_brand = brand_data; 1830 clwp->lwp_ap = clwp->lwp_arg; 1831 clwp->lwp_procp = cp; 1832 bzero(clwp->lwp_timer, sizeof (clwp->lwp_timer)); 1833 clwp->lwp_lastfault = 0; 1834 clwp->lwp_lastfaddr = 0; 1835 1836 /* copy parent's struct regs to child. */ 1837 lwp_forkregs(lwp, clwp); 1838 1839 /* 1840 * Fork thread context ops, if any. 1841 */ 1842 if (t->t_ctx) 1843 forkctx(t, ct); 1844 1845 /* fix door state in the child */ 1846 if (t->t_door) 1847 door_fork(t, ct); 1848 1849 /* copy current contract templates, clear latest contracts */ 1850 lwp_ctmpl_copy(clwp, lwp); 1851 1852 mutex_enter(&cp->p_lock); 1853 /* lwp_create() set the TP_HOLDLWP flag */ 1854 if (!(t->t_proc_flag & TP_HOLDLWP)) 1855 ct->t_proc_flag &= ~TP_HOLDLWP; 1856 if (cp->p_flag & SMSACCT) 1857 ct->t_proc_flag |= TP_MSACCT; 1858 mutex_exit(&cp->p_lock); 1859 1860 /* Allow brand to propagate brand-specific state */ 1861 if (PROC_IS_BRANDED(p)) 1862 BROP(p)->b_forklwp(lwp, clwp); 1863 1864 retry: 1865 cid = t->t_cid; 1866 1867 val = CL_ALLOC(&bufp, cid, KM_SLEEP); 1868 ASSERT(val == 0); 1869 1870 mutex_enter(&p->p_lock); 1871 if (cid != t->t_cid) { 1872 /* 1873 * Someone just changed this thread's scheduling class, 1874 * so try pre-allocating the buffer again. Hopefully we 1875 * don't hit this often. 1876 */ 1877 mutex_exit(&p->p_lock); 1878 CL_FREE(cid, bufp); 1879 goto retry; 1880 } 1881 1882 ct->t_unpark = t->t_unpark; 1883 ct->t_clfuncs = t->t_clfuncs; 1884 CL_FORK(t, ct, bufp); 1885 ct->t_cid = t->t_cid; /* after data allocated so prgetpsinfo works */ 1886 mutex_exit(&p->p_lock); 1887 1888 return (clwp); 1889 } 1890 1891 /* 1892 * Add a new lwp entry to the lwp directory and to the lwpid hash table. 1893 */ 1894 void 1895 lwp_hash_in(proc_t *p, lwpent_t *lep, tidhash_t *tidhash, uint_t tidhash_sz, 1896 int do_lock) 1897 { 1898 tidhash_t *thp = &tidhash[TIDHASH(lep->le_lwpid, tidhash_sz)]; 1899 lwpdir_t **ldpp; 1900 lwpdir_t *ldp; 1901 kthread_t *t; 1902 1903 /* 1904 * Allocate a directory element from the free list. 1905 * Code elsewhere guarantees a free slot. 1906 */ 1907 ldp = p->p_lwpfree; 1908 p->p_lwpfree = ldp->ld_next; 1909 ASSERT(ldp->ld_entry == NULL); 1910 ldp->ld_entry = lep; 1911 1912 if (do_lock) 1913 mutex_enter(&thp->th_lock); 1914 1915 /* 1916 * Insert it into the lwpid hash table. 1917 */ 1918 ldpp = &thp->th_list; 1919 ldp->ld_next = *ldpp; 1920 *ldpp = ldp; 1921 1922 /* 1923 * Set the active thread's directory slot entry. 1924 */ 1925 if ((t = lep->le_thread) != NULL) { 1926 ASSERT(lep->le_lwpid == t->t_tid); 1927 t->t_dslot = (int)(ldp - p->p_lwpdir); 1928 } 1929 1930 if (do_lock) 1931 mutex_exit(&thp->th_lock); 1932 } 1933 1934 /* 1935 * Remove an lwp from the lwpid hash table and free its directory entry. 1936 * This is done when a detached lwp exits in lwp_exit() or 1937 * when a non-detached lwp is waited for in lwp_wait() or 1938 * when a zombie lwp is detached in lwp_detach(). 1939 */ 1940 void 1941 lwp_hash_out(proc_t *p, id_t lwpid) 1942 { 1943 tidhash_t *thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)]; 1944 lwpdir_t **ldpp; 1945 lwpdir_t *ldp; 1946 lwpent_t *lep; 1947 1948 mutex_enter(&thp->th_lock); 1949 for (ldpp = &thp->th_list; 1950 (ldp = *ldpp) != NULL; ldpp = &ldp->ld_next) { 1951 lep = ldp->ld_entry; 1952 if (lep->le_lwpid == lwpid) { 1953 prlwpfree(p, lep); /* /proc deals with le_trace */ 1954 *ldpp = ldp->ld_next; 1955 ldp->ld_entry = NULL; 1956 ldp->ld_next = p->p_lwpfree; 1957 p->p_lwpfree = ldp; 1958 kmem_free(lep, sizeof (*lep)); 1959 break; 1960 } 1961 } 1962 mutex_exit(&thp->th_lock); 1963 } 1964 1965 /* 1966 * Lookup an lwp in the lwpid hash table by lwpid. 1967 */ 1968 lwpdir_t * 1969 lwp_hash_lookup(proc_t *p, id_t lwpid) 1970 { 1971 tidhash_t *thp; 1972 lwpdir_t *ldp; 1973 1974 /* 1975 * The process may be exiting, after p_tidhash has been set to NULL in 1976 * proc_exit() but before prfee() has been called. Return failure in 1977 * this case. 1978 */ 1979 if (p->p_tidhash == NULL) 1980 return (NULL); 1981 1982 thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)]; 1983 for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) { 1984 if (ldp->ld_entry->le_lwpid == lwpid) 1985 return (ldp); 1986 } 1987 1988 return (NULL); 1989 } 1990 1991 /* 1992 * Same as lwp_hash_lookup(), but acquire and return 1993 * the tid hash table entry lock on success. 1994 */ 1995 lwpdir_t * 1996 lwp_hash_lookup_and_lock(proc_t *p, id_t lwpid, kmutex_t **mpp) 1997 { 1998 tidhash_t *tidhash; 1999 uint_t tidhash_sz; 2000 tidhash_t *thp; 2001 lwpdir_t *ldp; 2002 2003 top: 2004 tidhash_sz = p->p_tidhash_sz; 2005 membar_consumer(); 2006 if ((tidhash = p->p_tidhash) == NULL) 2007 return (NULL); 2008 2009 thp = &tidhash[TIDHASH(lwpid, tidhash_sz)]; 2010 mutex_enter(&thp->th_lock); 2011 2012 /* 2013 * Since we are not holding p->p_lock, the tid hash table 2014 * may have changed. If so, start over. If not, then 2015 * it cannot change until after we drop &thp->th_lock; 2016 */ 2017 if (tidhash != p->p_tidhash || tidhash_sz != p->p_tidhash_sz) { 2018 mutex_exit(&thp->th_lock); 2019 goto top; 2020 } 2021 2022 for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) { 2023 if (ldp->ld_entry->le_lwpid == lwpid) { 2024 *mpp = &thp->th_lock; 2025 return (ldp); 2026 } 2027 } 2028 2029 mutex_exit(&thp->th_lock); 2030 return (NULL); 2031 } 2032 2033 /* 2034 * Update the indicated LWP usage statistic for the current LWP. 2035 */ 2036 void 2037 lwp_stat_update(lwp_stat_id_t lwp_stat_id, long inc) 2038 { 2039 klwp_t *lwp = ttolwp(curthread); 2040 2041 if (lwp == NULL) 2042 return; 2043 2044 switch (lwp_stat_id) { 2045 case LWP_STAT_INBLK: 2046 lwp->lwp_ru.inblock += inc; 2047 break; 2048 case LWP_STAT_OUBLK: 2049 lwp->lwp_ru.oublock += inc; 2050 break; 2051 case LWP_STAT_MSGRCV: 2052 lwp->lwp_ru.msgrcv += inc; 2053 break; 2054 case LWP_STAT_MSGSND: 2055 lwp->lwp_ru.msgsnd += inc; 2056 break; 2057 default: 2058 panic("lwp_stat_update: invalid lwp_stat_id 0x%x", lwp_stat_id); 2059 } 2060 } 2061