1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/param.h> 28 #include <sys/types.h> 29 #include <sys/sysmacros.h> 30 #include <sys/systm.h> 31 #include <sys/thread.h> 32 #include <sys/proc.h> 33 #include <sys/task.h> 34 #include <sys/project.h> 35 #include <sys/signal.h> 36 #include <sys/errno.h> 37 #include <sys/vmparam.h> 38 #include <sys/stack.h> 39 #include <sys/procfs.h> 40 #include <sys/prsystm.h> 41 #include <sys/cpuvar.h> 42 #include <sys/kmem.h> 43 #include <sys/vtrace.h> 44 #include <sys/door.h> 45 #include <vm/seg_kp.h> 46 #include <sys/debug.h> 47 #include <sys/tnf.h> 48 #include <sys/schedctl.h> 49 #include <sys/poll.h> 50 #include <sys/copyops.h> 51 #include <sys/lwp_upimutex_impl.h> 52 #include <sys/cpupart.h> 53 #include <sys/lgrp.h> 54 #include <sys/rctl.h> 55 #include <sys/contract_impl.h> 56 #include <sys/cpc_impl.h> 57 #include <sys/sdt.h> 58 #include <sys/cmn_err.h> 59 #include <sys/brand.h> 60 #include <sys/cyclic.h> 61 #include <sys/pool.h> 62 63 /* hash function for the lwpid hash table, p->p_tidhash[] */ 64 #define TIDHASH(tid, hash_sz) ((tid) & ((hash_sz) - 1)) 65 66 void *segkp_lwp; /* cookie for pool of segkp resources */ 67 extern void reapq_move_lq_to_tq(kthread_t *); 68 extern void freectx_ctx(struct ctxop *); 69 70 /* 71 * Create a kernel thread associated with a particular system process. Give 72 * it an LWP so that microstate accounting will be available for it. 73 */ 74 kthread_t * 75 lwp_kernel_create(proc_t *p, void (*proc)(), void *arg, int state, pri_t pri) 76 { 77 klwp_t *lwp; 78 79 VERIFY((p->p_flag & SSYS) != 0); 80 81 lwp = lwp_create(proc, arg, 0, p, state, pri, &t0.t_hold, syscid, 0); 82 83 VERIFY(lwp != NULL); 84 85 return (lwptot(lwp)); 86 } 87 88 /* 89 * Create a thread that appears to be stopped at sys_rtt. 90 */ 91 klwp_t * 92 lwp_create(void (*proc)(), caddr_t arg, size_t len, proc_t *p, 93 int state, int pri, const k_sigset_t *smask, int cid, id_t lwpid) 94 { 95 klwp_t *lwp = NULL; 96 kthread_t *t; 97 kthread_t *tx; 98 cpupart_t *oldpart = NULL; 99 size_t stksize; 100 caddr_t lwpdata = NULL; 101 processorid_t binding; 102 int err = 0; 103 kproject_t *oldkpj, *newkpj; 104 void *bufp = NULL; 105 klwp_t *curlwp; 106 lwpent_t *lep; 107 lwpdir_t *old_dir = NULL; 108 uint_t old_dirsz = 0; 109 tidhash_t *old_hash = NULL; 110 uint_t old_hashsz = 0; 111 ret_tidhash_t *ret_tidhash = NULL; 112 int i; 113 int rctlfail = 0; 114 boolean_t branded = 0; 115 struct ctxop *ctx = NULL; 116 117 ASSERT(cid != sysdccid); /* system threads must start in SYS */ 118 119 ASSERT(p != &p0); /* No new LWPs in p0. */ 120 121 mutex_enter(&p->p_lock); 122 mutex_enter(&p->p_zone->zone_nlwps_lock); 123 /* 124 * don't enforce rctl limits on system processes 125 */ 126 if (!CLASS_KERNEL(cid)) { 127 if (p->p_task->tk_nlwps >= p->p_task->tk_nlwps_ctl) 128 if (rctl_test(rc_task_lwps, p->p_task->tk_rctls, p, 129 1, 0) & RCT_DENY) 130 rctlfail = 1; 131 if (p->p_task->tk_proj->kpj_nlwps >= 132 p->p_task->tk_proj->kpj_nlwps_ctl) 133 if (rctl_test(rc_project_nlwps, 134 p->p_task->tk_proj->kpj_rctls, p, 1, 0) 135 & RCT_DENY) 136 rctlfail = 1; 137 if (p->p_zone->zone_nlwps >= p->p_zone->zone_nlwps_ctl) 138 if (rctl_test(rc_zone_nlwps, p->p_zone->zone_rctls, p, 139 1, 0) & RCT_DENY) 140 rctlfail = 1; 141 } 142 if (rctlfail) { 143 mutex_exit(&p->p_zone->zone_nlwps_lock); 144 mutex_exit(&p->p_lock); 145 return (NULL); 146 } 147 p->p_task->tk_nlwps++; 148 p->p_task->tk_proj->kpj_nlwps++; 149 p->p_zone->zone_nlwps++; 150 mutex_exit(&p->p_zone->zone_nlwps_lock); 151 mutex_exit(&p->p_lock); 152 153 curlwp = ttolwp(curthread); 154 if (curlwp == NULL || (stksize = curlwp->lwp_childstksz) == 0) 155 stksize = lwp_default_stksize; 156 157 if (CLASS_KERNEL(cid)) { 158 /* 159 * Since we are creating an LWP in an SSYS process, we do not 160 * inherit anything from the current thread's LWP. We set 161 * stksize and lwpdata to 0 in order to let thread_create() 162 * allocate a regular kernel thread stack for this thread. 163 */ 164 curlwp = NULL; 165 stksize = 0; 166 lwpdata = NULL; 167 168 } else if (stksize == lwp_default_stksize) { 169 /* 170 * Try to reuse an <lwp,stack> from the LWP deathrow. 171 */ 172 if (lwp_reapcnt > 0) { 173 mutex_enter(&reaplock); 174 if ((t = lwp_deathrow) != NULL) { 175 ASSERT(t->t_swap); 176 lwp_deathrow = t->t_forw; 177 lwp_reapcnt--; 178 lwpdata = t->t_swap; 179 lwp = t->t_lwp; 180 ctx = t->t_ctx; 181 t->t_swap = NULL; 182 t->t_lwp = NULL; 183 t->t_ctx = NULL; 184 reapq_move_lq_to_tq(t); 185 } 186 mutex_exit(&reaplock); 187 if (lwp != NULL) { 188 lwp_stk_fini(lwp); 189 } 190 if (ctx != NULL) { 191 freectx_ctx(ctx); 192 } 193 } 194 if (lwpdata == NULL && 195 (lwpdata = (caddr_t)segkp_cache_get(segkp_lwp)) == NULL) { 196 mutex_enter(&p->p_lock); 197 mutex_enter(&p->p_zone->zone_nlwps_lock); 198 p->p_task->tk_nlwps--; 199 p->p_task->tk_proj->kpj_nlwps--; 200 p->p_zone->zone_nlwps--; 201 mutex_exit(&p->p_zone->zone_nlwps_lock); 202 mutex_exit(&p->p_lock); 203 return (NULL); 204 } 205 } else { 206 stksize = roundup(stksize, PAGESIZE); 207 if ((lwpdata = (caddr_t)segkp_get(segkp, stksize, 208 (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED))) == NULL) { 209 mutex_enter(&p->p_lock); 210 mutex_enter(&p->p_zone->zone_nlwps_lock); 211 p->p_task->tk_nlwps--; 212 p->p_task->tk_proj->kpj_nlwps--; 213 p->p_zone->zone_nlwps--; 214 mutex_exit(&p->p_zone->zone_nlwps_lock); 215 mutex_exit(&p->p_lock); 216 return (NULL); 217 } 218 } 219 220 /* 221 * Create a thread, initializing the stack pointer 222 */ 223 t = thread_create(lwpdata, stksize, NULL, NULL, 0, p, TS_STOPPED, pri); 224 225 /* 226 * If a non-NULL stack base is passed in, thread_create() assumes 227 * that the stack might be statically allocated (as opposed to being 228 * allocated from segkp), and so it does not set t_swap. Since 229 * the lwpdata was allocated from segkp, we must set t_swap to point 230 * to it ourselves. 231 * 232 * This would be less confusing if t_swap had a better name; it really 233 * indicates that the stack is allocated from segkp, regardless of 234 * whether or not it is swappable. 235 */ 236 if (lwpdata != NULL) { 237 ASSERT(!CLASS_KERNEL(cid)); 238 ASSERT(t->t_swap == NULL); 239 t->t_swap = lwpdata; /* Start of page-able data */ 240 } 241 242 /* 243 * If the stack and lwp can be reused, mark the thread as such. 244 * When we get to reapq_add() from resume_from_zombie(), these 245 * threads will go onto lwp_deathrow instead of thread_deathrow. 246 */ 247 if (!CLASS_KERNEL(cid) && stksize == lwp_default_stksize) 248 t->t_flag |= T_LWPREUSE; 249 250 if (lwp == NULL) 251 lwp = kmem_cache_alloc(lwp_cache, KM_SLEEP); 252 bzero(lwp, sizeof (*lwp)); 253 t->t_lwp = lwp; 254 255 t->t_hold = *smask; 256 lwp->lwp_thread = t; 257 lwp->lwp_procp = p; 258 lwp->lwp_sigaltstack.ss_flags = SS_DISABLE; 259 if (curlwp != NULL && curlwp->lwp_childstksz != 0) 260 lwp->lwp_childstksz = curlwp->lwp_childstksz; 261 262 t->t_stk = lwp_stk_init(lwp, t->t_stk); 263 thread_load(t, proc, arg, len); 264 265 /* 266 * Allocate the SIGPROF buffer if ITIMER_REALPROF is in effect. 267 */ 268 if (p->p_rprof_cyclic != CYCLIC_NONE) 269 t->t_rprof = kmem_zalloc(sizeof (struct rprof), KM_SLEEP); 270 271 if (cid != NOCLASS) 272 (void) CL_ALLOC(&bufp, cid, KM_SLEEP); 273 274 /* 275 * Allocate an lwp directory entry for the new lwp. 276 */ 277 lep = kmem_zalloc(sizeof (*lep), KM_SLEEP); 278 279 mutex_enter(&p->p_lock); 280 grow: 281 /* 282 * Grow the lwp (thread) directory and lwpid hash table if necessary. 283 * A note on the growth algorithm: 284 * The new lwp directory size is computed as: 285 * new = 2 * old + 2 286 * Starting with an initial size of 2 (see exec_common()), 287 * this yields numbers that are a power of two minus 2: 288 * 2, 6, 14, 30, 62, 126, 254, 510, 1022, ... 289 * The size of the lwpid hash table must be a power of two 290 * and must be commensurate in size with the lwp directory 291 * so that hash bucket chains remain short. Therefore, 292 * the lwpid hash table size is computed as: 293 * hashsz = (dirsz + 2) / 2 294 * which leads to these hash table sizes corresponding to 295 * the above directory sizes: 296 * 2, 4, 8, 16, 32, 64, 128, 256, 512, ... 297 * A note on growing the hash table: 298 * For performance reasons, code in lwp_unpark() does not 299 * acquire curproc->p_lock when searching the hash table. 300 * Rather, it calls lwp_hash_lookup_and_lock() which 301 * acquires only the individual hash bucket lock, taking 302 * care to deal with reallocation of the hash table 303 * during the time it takes to acquire the lock. 304 * 305 * This is sufficient to protect the integrity of the 306 * hash table, but it requires us to acquire all of the 307 * old hash bucket locks before growing the hash table 308 * and to release them afterwards. It also requires us 309 * not to free the old hash table because some thread 310 * in lwp_hash_lookup_and_lock() might still be trying 311 * to acquire the old bucket lock. 312 * 313 * So we adopt the tactic of keeping all of the retired 314 * hash tables on a linked list, so they can be safely 315 * freed when the process exits or execs. 316 * 317 * Because the hash table grows in powers of two, the 318 * total size of all of the hash tables will be slightly 319 * less than twice the size of the largest hash table. 320 */ 321 while (p->p_lwpfree == NULL) { 322 uint_t dirsz = p->p_lwpdir_sz; 323 lwpdir_t *new_dir; 324 uint_t new_dirsz; 325 lwpdir_t *ldp; 326 tidhash_t *new_hash; 327 uint_t new_hashsz; 328 329 mutex_exit(&p->p_lock); 330 331 /* 332 * Prepare to remember the old p_tidhash for later 333 * kmem_free()ing when the process exits or execs. 334 */ 335 if (ret_tidhash == NULL) 336 ret_tidhash = kmem_zalloc(sizeof (ret_tidhash_t), 337 KM_SLEEP); 338 if (old_dir != NULL) 339 kmem_free(old_dir, old_dirsz * sizeof (*old_dir)); 340 if (old_hash != NULL) 341 kmem_free(old_hash, old_hashsz * sizeof (*old_hash)); 342 343 new_dirsz = 2 * dirsz + 2; 344 new_dir = kmem_zalloc(new_dirsz * sizeof (lwpdir_t), KM_SLEEP); 345 for (ldp = new_dir, i = 1; i < new_dirsz; i++, ldp++) 346 ldp->ld_next = ldp + 1; 347 new_hashsz = (new_dirsz + 2) / 2; 348 new_hash = kmem_zalloc(new_hashsz * sizeof (tidhash_t), 349 KM_SLEEP); 350 351 mutex_enter(&p->p_lock); 352 if (p == curproc) 353 prbarrier(p); 354 355 if (dirsz != p->p_lwpdir_sz || p->p_lwpfree != NULL) { 356 /* 357 * Someone else beat us to it or some lwp exited. 358 * Set up to free our memory and take a lap. 359 */ 360 old_dir = new_dir; 361 old_dirsz = new_dirsz; 362 old_hash = new_hash; 363 old_hashsz = new_hashsz; 364 } else { 365 /* 366 * For the benefit of lwp_hash_lookup_and_lock(), 367 * called from lwp_unpark(), which searches the 368 * tid hash table without acquiring p->p_lock, 369 * we must acquire all of the tid hash table 370 * locks before replacing p->p_tidhash. 371 */ 372 old_hash = p->p_tidhash; 373 old_hashsz = p->p_tidhash_sz; 374 for (i = 0; i < old_hashsz; i++) { 375 mutex_enter(&old_hash[i].th_lock); 376 mutex_enter(&new_hash[i].th_lock); 377 } 378 379 /* 380 * We simply hash in all of the old directory entries. 381 * This works because the old directory has no empty 382 * slots and the new hash table starts out empty. 383 * This reproduces the original directory ordering 384 * (required for /proc directory semantics). 385 */ 386 old_dir = p->p_lwpdir; 387 old_dirsz = p->p_lwpdir_sz; 388 p->p_lwpdir = new_dir; 389 p->p_lwpfree = new_dir; 390 p->p_lwpdir_sz = new_dirsz; 391 for (ldp = old_dir, i = 0; i < old_dirsz; i++, ldp++) 392 lwp_hash_in(p, ldp->ld_entry, 393 new_hash, new_hashsz, 0); 394 395 /* 396 * Remember the old hash table along with all 397 * of the previously-remembered hash tables. 398 * We will free them at process exit or exec. 399 */ 400 ret_tidhash->rth_tidhash = old_hash; 401 ret_tidhash->rth_tidhash_sz = old_hashsz; 402 ret_tidhash->rth_next = p->p_ret_tidhash; 403 p->p_ret_tidhash = ret_tidhash; 404 405 /* 406 * Now establish the new tid hash table. 407 * As soon as we assign p->p_tidhash, 408 * code in lwp_unpark() can start using it. 409 */ 410 membar_producer(); 411 p->p_tidhash = new_hash; 412 413 /* 414 * It is necessary that p_tidhash reach global 415 * visibility before p_tidhash_sz. Otherwise, 416 * code in lwp_hash_lookup_and_lock() could 417 * index into the old p_tidhash using the new 418 * p_tidhash_sz and thereby access invalid data. 419 */ 420 membar_producer(); 421 p->p_tidhash_sz = new_hashsz; 422 423 /* 424 * Release the locks; allow lwp_unpark() to carry on. 425 */ 426 for (i = 0; i < old_hashsz; i++) { 427 mutex_exit(&old_hash[i].th_lock); 428 mutex_exit(&new_hash[i].th_lock); 429 } 430 431 /* 432 * Avoid freeing these objects below. 433 */ 434 ret_tidhash = NULL; 435 old_hash = NULL; 436 old_hashsz = 0; 437 } 438 } 439 440 /* 441 * Block the process against /proc while we manipulate p->p_tlist, 442 * unless lwp_create() was called by /proc for the PCAGENT operation. 443 * We want to do this early enough so that we don't drop p->p_lock 444 * until the thread is put on the p->p_tlist. 445 */ 446 if (p == curproc) { 447 prbarrier(p); 448 /* 449 * If the current lwp has been requested to stop, do so now. 450 * Otherwise we have a race condition between /proc attempting 451 * to stop the process and this thread creating a new lwp 452 * that was not seen when the /proc PCSTOP request was issued. 453 * We rely on stop() to call prbarrier(p) before returning. 454 */ 455 while ((curthread->t_proc_flag & TP_PRSTOP) && 456 !ttolwp(curthread)->lwp_nostop) { 457 /* 458 * We called pool_barrier_enter() before calling 459 * here to lwp_create(). We have to call 460 * pool_barrier_exit() before stopping. 461 */ 462 pool_barrier_exit(); 463 prbarrier(p); 464 stop(PR_REQUESTED, 0); 465 /* 466 * And we have to repeat the call to 467 * pool_barrier_enter after stopping. 468 */ 469 pool_barrier_enter(); 470 prbarrier(p); 471 } 472 473 /* 474 * If process is exiting, there could be a race between 475 * the agent lwp creation and the new lwp currently being 476 * created. So to prevent this race lwp creation is failed 477 * if the process is exiting. 478 */ 479 if (p->p_flag & (SEXITLWPS|SKILLED)) { 480 err = 1; 481 goto error; 482 } 483 484 /* 485 * Since we might have dropped p->p_lock, the 486 * lwp directory free list might have changed. 487 */ 488 if (p->p_lwpfree == NULL) 489 goto grow; 490 } 491 492 kpreempt_disable(); /* can't grab cpu_lock here */ 493 494 /* 495 * Inherit processor and processor set bindings from curthread. 496 * 497 * For kernel LWPs, we do not inherit processor set bindings at 498 * process creation time (i.e. when p != curproc). After the 499 * kernel process is created, any subsequent LWPs must be created 500 * by threads in the kernel process, at which point we *will* 501 * inherit processor set bindings. 502 */ 503 if (CLASS_KERNEL(cid) && p != curproc) { 504 t->t_bind_cpu = binding = PBIND_NONE; 505 t->t_cpupart = oldpart = &cp_default; 506 t->t_bind_pset = PS_NONE; 507 t->t_bindflag = (uchar_t)default_binding_mode; 508 } else { 509 binding = curthread->t_bind_cpu; 510 t->t_bind_cpu = binding; 511 oldpart = t->t_cpupart; 512 t->t_cpupart = curthread->t_cpupart; 513 t->t_bind_pset = curthread->t_bind_pset; 514 t->t_bindflag = curthread->t_bindflag | 515 (uchar_t)default_binding_mode; 516 } 517 518 /* 519 * thread_create() initializes this thread's home lgroup to the root. 520 * Choose a more suitable lgroup, since this thread is associated 521 * with an lwp. 522 */ 523 ASSERT(oldpart != NULL); 524 if (binding != PBIND_NONE && t->t_affinitycnt == 0) { 525 t->t_bound_cpu = cpu[binding]; 526 if (t->t_lpl != t->t_bound_cpu->cpu_lpl) 527 lgrp_move_thread(t, t->t_bound_cpu->cpu_lpl, 1); 528 } else if (CLASS_KERNEL(cid)) { 529 /* 530 * Kernel threads are always in the root lgrp. 531 */ 532 lgrp_move_thread(t, 533 &t->t_cpupart->cp_lgrploads[LGRP_ROOTID], 1); 534 } else { 535 lgrp_move_thread(t, lgrp_choose(t, t->t_cpupart), 1); 536 } 537 538 kpreempt_enable(); 539 540 /* 541 * make sure lpl points to our own partition 542 */ 543 ASSERT(t->t_lpl >= t->t_cpupart->cp_lgrploads); 544 ASSERT(t->t_lpl < t->t_cpupart->cp_lgrploads + 545 t->t_cpupart->cp_nlgrploads); 546 547 /* 548 * It is safe to point the thread to the new project without holding it 549 * since we're holding the target process' p_lock here and therefore 550 * we're guaranteed that it will not move to another project. 551 */ 552 newkpj = p->p_task->tk_proj; 553 oldkpj = ttoproj(t); 554 if (newkpj != oldkpj) { 555 t->t_proj = newkpj; 556 (void) project_hold(newkpj); 557 project_rele(oldkpj); 558 } 559 560 if (cid != NOCLASS) { 561 /* 562 * If the lwp is being created in the current process 563 * and matches the current thread's scheduling class, 564 * we should propagate the current thread's scheduling 565 * parameters by calling CL_FORK. Otherwise just use 566 * the defaults by calling CL_ENTERCLASS. 567 */ 568 if (p != curproc || curthread->t_cid != cid) { 569 err = CL_ENTERCLASS(t, cid, NULL, NULL, bufp); 570 t->t_pri = pri; /* CL_ENTERCLASS may have changed it */ 571 /* 572 * We don't call schedctl_set_cidpri(t) here 573 * because the schedctl data is not yet set 574 * up for the newly-created lwp. 575 */ 576 } else { 577 t->t_clfuncs = &(sclass[cid].cl_funcs->thread); 578 err = CL_FORK(curthread, t, bufp); 579 t->t_cid = cid; 580 } 581 if (err) 582 goto error; 583 else 584 bufp = NULL; 585 } 586 587 /* 588 * If we were given an lwpid then use it, else allocate one. 589 */ 590 if (lwpid != 0) 591 t->t_tid = lwpid; 592 else { 593 /* 594 * lwp/thread id 0 is never valid; reserved for special checks. 595 * lwp/thread id 1 is reserved for the main thread. 596 * Start again at 2 when INT_MAX has been reached 597 * (id_t is a signed 32-bit integer). 598 */ 599 id_t prev_id = p->p_lwpid; /* last allocated tid */ 600 601 do { /* avoid lwpid duplication */ 602 if (p->p_lwpid == INT_MAX) { 603 p->p_flag |= SLWPWRAP; 604 p->p_lwpid = 1; 605 } 606 if ((t->t_tid = ++p->p_lwpid) == prev_id) { 607 /* 608 * All lwpids are allocated; fail the request. 609 */ 610 err = 1; 611 goto error; 612 } 613 /* 614 * We only need to worry about colliding with an id 615 * that's already in use if this process has 616 * cycled through all available lwp ids. 617 */ 618 if ((p->p_flag & SLWPWRAP) == 0) 619 break; 620 } while (lwp_hash_lookup(p, t->t_tid) != NULL); 621 } 622 623 /* 624 * If this is a branded process, let the brand do any necessary lwp 625 * initialization. 626 */ 627 if (PROC_IS_BRANDED(p)) { 628 if (BROP(p)->b_initlwp(lwp)) { 629 err = 1; 630 goto error; 631 } 632 branded = 1; 633 } 634 635 if (t->t_tid == 1) { 636 kpreempt_disable(); 637 ASSERT(t->t_lpl != NULL); 638 p->p_t1_lgrpid = t->t_lpl->lpl_lgrpid; 639 kpreempt_enable(); 640 if (p->p_tr_lgrpid != LGRP_NONE && 641 p->p_tr_lgrpid != p->p_t1_lgrpid) { 642 lgrp_update_trthr_migrations(1); 643 } 644 } 645 646 p->p_lwpcnt++; 647 t->t_waitfor = -1; 648 649 /* 650 * Turn microstate accounting on for thread if on for process. 651 */ 652 if (p->p_flag & SMSACCT) 653 t->t_proc_flag |= TP_MSACCT; 654 655 /* 656 * If the process has watchpoints, mark the new thread as such. 657 */ 658 if (pr_watch_active(p)) 659 watch_enable(t); 660 661 /* 662 * The lwp is being created in the stopped state. 663 * We set all the necessary flags to indicate that fact here. 664 * We omit the TS_CREATE flag from t_schedflag so that the lwp 665 * cannot be set running until the caller is finished with it, 666 * even if lwp_continue() is called on it after we drop p->p_lock. 667 * When the caller is finished with the newly-created lwp, 668 * the caller must call lwp_create_done() to allow the lwp 669 * to be set running. If the TP_HOLDLWP is left set, the 670 * lwp will suspend itself after reaching system call exit. 671 */ 672 init_mstate(t, LMS_STOPPED); 673 t->t_proc_flag |= TP_HOLDLWP; 674 t->t_schedflag |= (TS_ALLSTART & ~(TS_CSTART | TS_CREATE)); 675 t->t_whystop = PR_SUSPENDED; 676 t->t_whatstop = SUSPEND_NORMAL; 677 t->t_sig_check = 1; /* ensure that TP_HOLDLWP is honored */ 678 679 /* 680 * Set system call processing flags in case tracing or profiling 681 * is set. The first system call will evaluate these and turn 682 * them off if they aren't needed. 683 */ 684 t->t_pre_sys = 1; 685 t->t_post_sys = 1; 686 687 /* 688 * Insert the new thread into the list of all threads. 689 */ 690 if ((tx = p->p_tlist) == NULL) { 691 t->t_back = t; 692 t->t_forw = t; 693 p->p_tlist = t; 694 } else { 695 t->t_forw = tx; 696 t->t_back = tx->t_back; 697 tx->t_back->t_forw = t; 698 tx->t_back = t; 699 } 700 701 /* 702 * Insert the new lwp into an lwp directory slot position 703 * and into the lwpid hash table. 704 */ 705 lep->le_thread = t; 706 lep->le_lwpid = t->t_tid; 707 lep->le_start = t->t_start; 708 lwp_hash_in(p, lep, p->p_tidhash, p->p_tidhash_sz, 1); 709 710 if (state == TS_RUN) { 711 /* 712 * We set the new lwp running immediately. 713 */ 714 t->t_proc_flag &= ~TP_HOLDLWP; 715 lwp_create_done(t); 716 } 717 718 error: 719 if (err) { 720 if (CLASS_KERNEL(cid)) { 721 /* 722 * This should only happen if a system process runs 723 * out of lwpids, which shouldn't occur. 724 */ 725 panic("Failed to create a system LWP"); 726 } 727 /* 728 * We have failed to create an lwp, so decrement the number 729 * of lwps in the task and let the lgroup load averages know 730 * that this thread isn't going to show up. 731 */ 732 kpreempt_disable(); 733 lgrp_move_thread(t, NULL, 1); 734 kpreempt_enable(); 735 736 ASSERT(MUTEX_HELD(&p->p_lock)); 737 mutex_enter(&p->p_zone->zone_nlwps_lock); 738 p->p_task->tk_nlwps--; 739 p->p_task->tk_proj->kpj_nlwps--; 740 p->p_zone->zone_nlwps--; 741 mutex_exit(&p->p_zone->zone_nlwps_lock); 742 if (cid != NOCLASS && bufp != NULL) 743 CL_FREE(cid, bufp); 744 745 if (branded) 746 BROP(p)->b_freelwp(lwp); 747 748 mutex_exit(&p->p_lock); 749 t->t_state = TS_FREE; 750 thread_rele(t); 751 752 /* 753 * We need to remove t from the list of all threads 754 * because thread_exit()/lwp_exit() isn't called on t. 755 */ 756 mutex_enter(&pidlock); 757 ASSERT(t != t->t_next); /* t0 never exits */ 758 t->t_next->t_prev = t->t_prev; 759 t->t_prev->t_next = t->t_next; 760 mutex_exit(&pidlock); 761 762 thread_free(t); 763 kmem_free(lep, sizeof (*lep)); 764 lwp = NULL; 765 } else { 766 mutex_exit(&p->p_lock); 767 } 768 769 if (old_dir != NULL) 770 kmem_free(old_dir, old_dirsz * sizeof (*old_dir)); 771 if (old_hash != NULL) 772 kmem_free(old_hash, old_hashsz * sizeof (*old_hash)); 773 if (ret_tidhash != NULL) 774 kmem_free(ret_tidhash, sizeof (ret_tidhash_t)); 775 776 DTRACE_PROC1(lwp__create, kthread_t *, t); 777 return (lwp); 778 } 779 780 /* 781 * lwp_create_done() is called by the caller of lwp_create() to set the 782 * newly-created lwp running after the caller has finished manipulating it. 783 */ 784 void 785 lwp_create_done(kthread_t *t) 786 { 787 proc_t *p = ttoproc(t); 788 789 ASSERT(MUTEX_HELD(&p->p_lock)); 790 791 /* 792 * We set the TS_CREATE and TS_CSTART flags and call setrun_locked(). 793 * (The absence of the TS_CREATE flag prevents the lwp from running 794 * until we are finished with it, even if lwp_continue() is called on 795 * it by some other lwp in the process or elsewhere in the kernel.) 796 */ 797 thread_lock(t); 798 ASSERT(t->t_state == TS_STOPPED && !(t->t_schedflag & TS_CREATE)); 799 /* 800 * If TS_CSTART is set, lwp_continue(t) has been called and 801 * has already incremented p_lwprcnt; avoid doing this twice. 802 */ 803 if (!(t->t_schedflag & TS_CSTART)) 804 p->p_lwprcnt++; 805 t->t_schedflag |= (TS_CSTART | TS_CREATE); 806 setrun_locked(t); 807 thread_unlock(t); 808 } 809 810 /* 811 * Copy an LWP's active templates, and clear the latest contracts. 812 */ 813 void 814 lwp_ctmpl_copy(klwp_t *dst, klwp_t *src) 815 { 816 int i; 817 818 for (i = 0; i < ct_ntypes; i++) { 819 dst->lwp_ct_active[i] = ctmpl_dup(src->lwp_ct_active[i]); 820 dst->lwp_ct_latest[i] = NULL; 821 } 822 } 823 824 /* 825 * Clear an LWP's contract template state. 826 */ 827 void 828 lwp_ctmpl_clear(klwp_t *lwp) 829 { 830 ct_template_t *tmpl; 831 int i; 832 833 for (i = 0; i < ct_ntypes; i++) { 834 if ((tmpl = lwp->lwp_ct_active[i]) != NULL) { 835 ctmpl_free(tmpl); 836 lwp->lwp_ct_active[i] = NULL; 837 } 838 839 if (lwp->lwp_ct_latest[i] != NULL) { 840 contract_rele(lwp->lwp_ct_latest[i]); 841 lwp->lwp_ct_latest[i] = NULL; 842 } 843 } 844 } 845 846 /* 847 * Individual lwp exit. 848 * If this is the last lwp, exit the whole process. 849 */ 850 void 851 lwp_exit(void) 852 { 853 kthread_t *t = curthread; 854 klwp_t *lwp = ttolwp(t); 855 proc_t *p = ttoproc(t); 856 857 ASSERT(MUTEX_HELD(&p->p_lock)); 858 859 mutex_exit(&p->p_lock); 860 861 #if defined(__sparc) 862 /* 863 * Ensure that the user stack is fully abandoned.. 864 */ 865 trash_user_windows(); 866 #endif 867 868 tsd_exit(); /* free thread specific data */ 869 870 kcpc_passivate(); /* Clean up performance counter state */ 871 872 pollcleanup(); 873 874 if (t->t_door) 875 door_slam(); 876 877 if (t->t_schedctl != NULL) 878 schedctl_lwp_cleanup(t); 879 880 if (t->t_upimutex != NULL) 881 upimutex_cleanup(); 882 883 /* 884 * Perform any brand specific exit processing, then release any 885 * brand data associated with the lwp 886 */ 887 if (PROC_IS_BRANDED(p)) 888 BROP(p)->b_lwpexit(lwp); 889 890 mutex_enter(&p->p_lock); 891 lwp_cleanup(); 892 893 /* 894 * When this process is dumping core, its lwps are held here 895 * until the core dump is finished. Then exitlwps() is called 896 * again to release these lwps so that they can finish exiting. 897 */ 898 if (p->p_flag & SCOREDUMP) 899 stop(PR_SUSPENDED, SUSPEND_NORMAL); 900 901 /* 902 * Call proc_exit() if this is the last non-daemon lwp in the process. 903 */ 904 if (!(t->t_proc_flag & TP_DAEMON) && 905 p->p_lwpcnt == p->p_lwpdaemon + 1) { 906 mutex_exit(&p->p_lock); 907 if (proc_exit(CLD_EXITED, 0) == 0) { 908 /* Restarting init. */ 909 return; 910 } 911 912 /* 913 * proc_exit() returns a non-zero value when some other 914 * lwp got there first. We just have to continue in 915 * lwp_exit(). 916 */ 917 mutex_enter(&p->p_lock); 918 ASSERT(curproc->p_flag & SEXITLWPS); 919 } 920 921 mutex_exit(&p->p_lock); 922 923 lwp_pcb_exit(); 924 925 mutex_enter(&p->p_lock); 926 927 /* 928 * Block the process against /proc now that we have really acquired 929 * p->p_lock (to decrement p_lwpcnt and manipulate p_tlist at least). 930 */ 931 prbarrier(p); 932 933 DTRACE_PROC(lwp__exit); 934 935 /* 936 * If the lwp is a detached lwp or if the process is exiting, 937 * remove (lwp_hash_out()) the lwp from the lwp directory. 938 * Otherwise null out the lwp's le_thread pointer in the lwp 939 * directory so that other threads will see it as a zombie lwp. 940 */ 941 prlwpexit(t); /* notify /proc */ 942 if (!(t->t_proc_flag & TP_TWAIT) || (p->p_flag & SEXITLWPS)) 943 lwp_hash_out(p, t->t_tid); 944 else { 945 ASSERT(!(t->t_proc_flag & TP_DAEMON)); 946 p->p_lwpdir[t->t_dslot].ld_entry->le_thread = NULL; 947 p->p_zombcnt++; 948 cv_broadcast(&p->p_lwpexit); 949 } 950 if (t->t_proc_flag & TP_DAEMON) { 951 p->p_lwpdaemon--; 952 t->t_proc_flag &= ~TP_DAEMON; 953 } 954 t->t_proc_flag &= ~TP_TWAIT; 955 956 /* 957 * Maintain accurate lwp count for task.max-lwps resource control. 958 */ 959 mutex_enter(&p->p_zone->zone_nlwps_lock); 960 p->p_task->tk_nlwps--; 961 p->p_task->tk_proj->kpj_nlwps--; 962 p->p_zone->zone_nlwps--; 963 mutex_exit(&p->p_zone->zone_nlwps_lock); 964 965 CL_EXIT(t); /* tell the scheduler that t is exiting */ 966 ASSERT(p->p_lwpcnt != 0); 967 p->p_lwpcnt--; 968 969 /* 970 * If all remaining non-daemon lwps are waiting in lwp_wait(), 971 * wake them up so someone can return EDEADLK. 972 * (See the block comment preceeding lwp_wait().) 973 */ 974 if (p->p_lwpcnt == p->p_lwpdaemon + (p->p_lwpwait - p->p_lwpdwait)) 975 cv_broadcast(&p->p_lwpexit); 976 977 t->t_proc_flag |= TP_LWPEXIT; 978 term_mstate(t); 979 980 #ifndef NPROBE 981 /* Kernel probe */ 982 if (t->t_tnf_tpdp) 983 tnf_thread_exit(); 984 #endif /* NPROBE */ 985 986 t->t_forw->t_back = t->t_back; 987 t->t_back->t_forw = t->t_forw; 988 if (t == p->p_tlist) 989 p->p_tlist = t->t_forw; 990 991 /* 992 * Clean up the signal state. 993 */ 994 if (t->t_sigqueue != NULL) 995 sigdelq(p, t, 0); 996 if (lwp->lwp_curinfo != NULL) { 997 siginfofree(lwp->lwp_curinfo); 998 lwp->lwp_curinfo = NULL; 999 } 1000 1001 thread_rele(t); 1002 1003 /* 1004 * Terminated lwps are associated with process zero and are put onto 1005 * death-row by resume(). Avoid preemption after resetting t->t_procp. 1006 */ 1007 t->t_preempt++; 1008 1009 if (t->t_ctx != NULL) 1010 exitctx(t); 1011 if (p->p_pctx != NULL) 1012 exitpctx(p); 1013 1014 t->t_procp = &p0; 1015 1016 /* 1017 * Notify the HAT about the change of address space 1018 */ 1019 hat_thread_exit(t); 1020 /* 1021 * When this is the last running lwp in this process and some lwp is 1022 * waiting for this condition to become true, or this thread was being 1023 * suspended, then the waiting lwp is awakened. 1024 * 1025 * Also, if the process is exiting, we may have a thread waiting in 1026 * exitlwps() that needs to be notified. 1027 */ 1028 if (--p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP) || 1029 (p->p_flag & SEXITLWPS)) 1030 cv_broadcast(&p->p_holdlwps); 1031 1032 /* 1033 * Need to drop p_lock so we can reacquire pidlock. 1034 */ 1035 mutex_exit(&p->p_lock); 1036 mutex_enter(&pidlock); 1037 1038 ASSERT(t != t->t_next); /* t0 never exits */ 1039 t->t_next->t_prev = t->t_prev; 1040 t->t_prev->t_next = t->t_next; 1041 cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */ 1042 mutex_exit(&pidlock); 1043 1044 t->t_state = TS_ZOMB; 1045 swtch_from_zombie(); 1046 /* never returns */ 1047 } 1048 1049 1050 /* 1051 * Cleanup function for an exiting lwp. 1052 * Called both from lwp_exit() and from proc_exit(). 1053 * p->p_lock is repeatedly released and grabbed in this function. 1054 */ 1055 void 1056 lwp_cleanup(void) 1057 { 1058 kthread_t *t = curthread; 1059 proc_t *p = ttoproc(t); 1060 1061 ASSERT(MUTEX_HELD(&p->p_lock)); 1062 1063 /* untimeout any lwp-bound realtime timers */ 1064 if (p->p_itimer != NULL) 1065 timer_lwpexit(); 1066 1067 /* 1068 * If this is the /proc agent lwp that is exiting, readjust p_lwpid 1069 * so it appears that the agent never existed, and clear p_agenttp. 1070 */ 1071 if (t == p->p_agenttp) { 1072 ASSERT(t->t_tid == p->p_lwpid); 1073 p->p_lwpid--; 1074 p->p_agenttp = NULL; 1075 } 1076 1077 /* 1078 * Do lgroup bookkeeping to account for thread exiting. 1079 */ 1080 kpreempt_disable(); 1081 lgrp_move_thread(t, NULL, 1); 1082 if (t->t_tid == 1) { 1083 p->p_t1_lgrpid = LGRP_NONE; 1084 } 1085 kpreempt_enable(); 1086 1087 lwp_ctmpl_clear(ttolwp(t)); 1088 } 1089 1090 int 1091 lwp_suspend(kthread_t *t) 1092 { 1093 int tid; 1094 proc_t *p = ttoproc(t); 1095 1096 ASSERT(MUTEX_HELD(&p->p_lock)); 1097 1098 /* 1099 * Set the thread's TP_HOLDLWP flag so it will stop in holdlwp(). 1100 * If an lwp is stopping itself, there is no need to wait. 1101 */ 1102 top: 1103 t->t_proc_flag |= TP_HOLDLWP; 1104 if (t == curthread) { 1105 t->t_sig_check = 1; 1106 } else { 1107 /* 1108 * Make sure the lwp stops promptly. 1109 */ 1110 thread_lock(t); 1111 t->t_sig_check = 1; 1112 /* 1113 * XXX Should use virtual stop like /proc does instead of 1114 * XXX waking the thread to get it to stop. 1115 */ 1116 if (ISWAKEABLE(t) || ISWAITING(t)) { 1117 setrun_locked(t); 1118 } else if (t->t_state == TS_ONPROC && t->t_cpu != CPU) { 1119 poke_cpu(t->t_cpu->cpu_id); 1120 } 1121 1122 tid = t->t_tid; /* remember thread ID */ 1123 /* 1124 * Wait for lwp to stop 1125 */ 1126 while (!SUSPENDED(t)) { 1127 /* 1128 * Drop the thread lock before waiting and reacquire it 1129 * afterwards, so the thread can change its t_state 1130 * field. 1131 */ 1132 thread_unlock(t); 1133 1134 /* 1135 * Check if aborted by exitlwps(). 1136 */ 1137 if (p->p_flag & SEXITLWPS) 1138 lwp_exit(); 1139 1140 /* 1141 * Cooperate with jobcontrol signals and /proc stopping 1142 * by calling cv_wait_sig() to wait for the target 1143 * lwp to stop. Just using cv_wait() can lead to 1144 * deadlock because, if some other lwp has stopped 1145 * by either of these mechanisms, then p_lwprcnt will 1146 * never become zero if we do a cv_wait(). 1147 */ 1148 if (!cv_wait_sig(&p->p_holdlwps, &p->p_lock)) 1149 return (EINTR); 1150 1151 /* 1152 * Check to see if thread died while we were 1153 * waiting for it to suspend. 1154 */ 1155 if (idtot(p, tid) == NULL) 1156 return (ESRCH); 1157 1158 thread_lock(t); 1159 /* 1160 * If the TP_HOLDLWP flag went away, lwp_continue() 1161 * or vfork() must have been called while we were 1162 * waiting, so start over again. 1163 */ 1164 if ((t->t_proc_flag & TP_HOLDLWP) == 0) { 1165 thread_unlock(t); 1166 goto top; 1167 } 1168 } 1169 thread_unlock(t); 1170 } 1171 return (0); 1172 } 1173 1174 /* 1175 * continue a lwp that's been stopped by lwp_suspend(). 1176 */ 1177 void 1178 lwp_continue(kthread_t *t) 1179 { 1180 proc_t *p = ttoproc(t); 1181 int was_suspended = t->t_proc_flag & TP_HOLDLWP; 1182 1183 ASSERT(MUTEX_HELD(&p->p_lock)); 1184 1185 t->t_proc_flag &= ~TP_HOLDLWP; 1186 thread_lock(t); 1187 if (SUSPENDED(t) && 1188 !(p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH))) { 1189 p->p_lwprcnt++; 1190 t->t_schedflag |= TS_CSTART; 1191 setrun_locked(t); 1192 } 1193 thread_unlock(t); 1194 /* 1195 * Wakeup anyone waiting for this thread to be suspended 1196 */ 1197 if (was_suspended) 1198 cv_broadcast(&p->p_holdlwps); 1199 } 1200 1201 /* 1202 * ******************************** 1203 * Miscellaneous lwp routines * 1204 * ******************************** 1205 */ 1206 /* 1207 * When a process is undergoing a forkall(), its p_flag is set to SHOLDFORK. 1208 * This will cause the process's lwps to stop at a hold point. A hold 1209 * point is where a kernel thread has a flat stack. This is at the 1210 * return from a system call and at the return from a user level trap. 1211 * 1212 * When a process is undergoing a fork1() or vfork(), its p_flag is set to 1213 * SHOLDFORK1. This will cause the process's lwps to stop at a modified 1214 * hold point. The lwps in the process are not being cloned, so they 1215 * are held at the usual hold points and also within issig_forreal(). 1216 * This has the side-effect that their system calls do not return 1217 * showing EINTR. 1218 * 1219 * An lwp can also be held. This is identified by the TP_HOLDLWP flag on 1220 * the thread. The TP_HOLDLWP flag is set in lwp_suspend(), where the active 1221 * lwp is waiting for the target lwp to be stopped. 1222 */ 1223 void 1224 holdlwp(void) 1225 { 1226 proc_t *p = curproc; 1227 kthread_t *t = curthread; 1228 1229 mutex_enter(&p->p_lock); 1230 /* 1231 * Don't terminate immediately if the process is dumping core. 1232 * Once the process has dumped core, all lwps are terminated. 1233 */ 1234 if (!(p->p_flag & SCOREDUMP)) { 1235 if ((p->p_flag & SEXITLWPS) || (t->t_proc_flag & TP_EXITLWP)) 1236 lwp_exit(); 1237 } 1238 if (!(ISHOLD(p)) && !(p->p_flag & (SHOLDFORK1 | SHOLDWATCH))) { 1239 mutex_exit(&p->p_lock); 1240 return; 1241 } 1242 /* 1243 * stop() decrements p->p_lwprcnt and cv_signal()s &p->p_holdlwps 1244 * when p->p_lwprcnt becomes zero. 1245 */ 1246 stop(PR_SUSPENDED, SUSPEND_NORMAL); 1247 if (p->p_flag & SEXITLWPS) 1248 lwp_exit(); 1249 mutex_exit(&p->p_lock); 1250 } 1251 1252 /* 1253 * Have all lwps within the process hold at a point where they are 1254 * cloneable (SHOLDFORK) or just safe w.r.t. fork1 (SHOLDFORK1). 1255 */ 1256 int 1257 holdlwps(int holdflag) 1258 { 1259 proc_t *p = curproc; 1260 1261 ASSERT(holdflag == SHOLDFORK || holdflag == SHOLDFORK1); 1262 mutex_enter(&p->p_lock); 1263 schedctl_finish_sigblock(curthread); 1264 again: 1265 while (p->p_flag & (SEXITLWPS | SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) { 1266 /* 1267 * If another lwp is doing a forkall() or proc_exit(), bail out. 1268 */ 1269 if (p->p_flag & (SEXITLWPS | SHOLDFORK)) { 1270 mutex_exit(&p->p_lock); 1271 return (0); 1272 } 1273 /* 1274 * Another lwp is doing a fork1() or is undergoing 1275 * watchpoint activity. We hold here for it to complete. 1276 */ 1277 stop(PR_SUSPENDED, SUSPEND_NORMAL); 1278 } 1279 p->p_flag |= holdflag; 1280 pokelwps(p); 1281 --p->p_lwprcnt; 1282 /* 1283 * Wait for the process to become quiescent (p->p_lwprcnt == 0). 1284 */ 1285 while (p->p_lwprcnt > 0) { 1286 /* 1287 * Check if aborted by exitlwps(). 1288 * Also check if SHOLDWATCH is set; it takes precedence. 1289 */ 1290 if (p->p_flag & (SEXITLWPS | SHOLDWATCH)) { 1291 p->p_lwprcnt++; 1292 p->p_flag &= ~holdflag; 1293 cv_broadcast(&p->p_holdlwps); 1294 goto again; 1295 } 1296 /* 1297 * Cooperate with jobcontrol signals and /proc stopping. 1298 * If some other lwp has stopped by either of these 1299 * mechanisms, then p_lwprcnt will never become zero 1300 * and the process will appear deadlocked unless we 1301 * stop here in sympathy with the other lwp before 1302 * doing the cv_wait() below. 1303 * 1304 * If the other lwp stops after we do the cv_wait(), it 1305 * will wake us up to loop around and do the sympathy stop. 1306 * 1307 * Since stop() drops p->p_lock, we must start from 1308 * the top again on returning from stop(). 1309 */ 1310 if (p->p_stopsig | (curthread->t_proc_flag & TP_PRSTOP)) { 1311 int whystop = p->p_stopsig? PR_JOBCONTROL : 1312 PR_REQUESTED; 1313 p->p_lwprcnt++; 1314 p->p_flag &= ~holdflag; 1315 stop(whystop, p->p_stopsig); 1316 goto again; 1317 } 1318 cv_wait(&p->p_holdlwps, &p->p_lock); 1319 } 1320 p->p_lwprcnt++; 1321 p->p_flag &= ~holdflag; 1322 mutex_exit(&p->p_lock); 1323 return (1); 1324 } 1325 1326 /* 1327 * See comments for holdwatch(), below. 1328 */ 1329 static int 1330 holdcheck(int clearflags) 1331 { 1332 proc_t *p = curproc; 1333 1334 /* 1335 * If we are trying to exit, that takes precedence over anything else. 1336 */ 1337 if (p->p_flag & SEXITLWPS) { 1338 p->p_lwprcnt++; 1339 p->p_flag &= ~clearflags; 1340 lwp_exit(); 1341 } 1342 1343 /* 1344 * If another thread is calling fork1(), stop the current thread so the 1345 * other can complete. 1346 */ 1347 if (p->p_flag & SHOLDFORK1) { 1348 p->p_lwprcnt++; 1349 stop(PR_SUSPENDED, SUSPEND_NORMAL); 1350 if (p->p_flag & SEXITLWPS) { 1351 p->p_flag &= ~clearflags; 1352 lwp_exit(); 1353 } 1354 return (-1); 1355 } 1356 1357 /* 1358 * If another thread is calling fork(), then indicate we are doing 1359 * watchpoint activity. This will cause holdlwps() above to stop the 1360 * forking thread, at which point we can continue with watchpoint 1361 * activity. 1362 */ 1363 if (p->p_flag & SHOLDFORK) { 1364 p->p_lwprcnt++; 1365 while (p->p_flag & SHOLDFORK) { 1366 p->p_flag |= SHOLDWATCH; 1367 cv_broadcast(&p->p_holdlwps); 1368 cv_wait(&p->p_holdlwps, &p->p_lock); 1369 p->p_flag &= ~SHOLDWATCH; 1370 } 1371 return (-1); 1372 } 1373 1374 return (0); 1375 } 1376 1377 /* 1378 * Stop all lwps within the process, holding themselves in the kernel while the 1379 * active lwp undergoes watchpoint activity. This is more complicated than 1380 * expected because stop() relies on calling holdwatch() in order to copyin data 1381 * from the user's address space. A double barrier is used to prevent an 1382 * infinite loop. 1383 * 1384 * o The first thread into holdwatch() is the 'master' thread and does 1385 * the following: 1386 * 1387 * - Sets SHOLDWATCH on the current process 1388 * - Sets TP_WATCHSTOP on the current thread 1389 * - Waits for all threads to be either stopped or have 1390 * TP_WATCHSTOP set. 1391 * - Sets the SWATCHOK flag on the process 1392 * - Unsets TP_WATCHSTOP 1393 * - Waits for the other threads to completely stop 1394 * - Unsets SWATCHOK 1395 * 1396 * o If SHOLDWATCH is already set when we enter this function, then another 1397 * thread is already trying to stop this thread. This 'slave' thread 1398 * does the following: 1399 * 1400 * - Sets TP_WATCHSTOP on the current thread 1401 * - Waits for SWATCHOK flag to be set 1402 * - Calls stop() 1403 * 1404 * o If SWATCHOK is set on the process, then this function immediately 1405 * returns, as we must have been called via stop(). 1406 * 1407 * In addition, there are other flags that take precedence over SHOLDWATCH: 1408 * 1409 * o If SEXITLWPS is set, exit immediately. 1410 * 1411 * o If SHOLDFORK1 is set, wait for fork1() to complete. 1412 * 1413 * o If SHOLDFORK is set, then watchpoint activity takes precedence In this 1414 * case, set SHOLDWATCH, signalling the forking thread to stop first. 1415 * 1416 * o If the process is being stopped via /proc (TP_PRSTOP is set), then we 1417 * stop the current thread. 1418 * 1419 * Returns 0 if all threads have been quiesced. Returns non-zero if not all 1420 * threads were stopped, or the list of watched pages has changed. 1421 */ 1422 int 1423 holdwatch(void) 1424 { 1425 proc_t *p = curproc; 1426 kthread_t *t = curthread; 1427 int ret = 0; 1428 1429 mutex_enter(&p->p_lock); 1430 1431 p->p_lwprcnt--; 1432 1433 /* 1434 * Check for bail-out conditions as outlined above. 1435 */ 1436 if (holdcheck(0) != 0) { 1437 mutex_exit(&p->p_lock); 1438 return (-1); 1439 } 1440 1441 if (!(p->p_flag & SHOLDWATCH)) { 1442 /* 1443 * We are the master watchpoint thread. Set SHOLDWATCH and poke 1444 * the other threads. 1445 */ 1446 p->p_flag |= SHOLDWATCH; 1447 pokelwps(p); 1448 1449 /* 1450 * Wait for all threads to be stopped or have TP_WATCHSTOP set. 1451 */ 1452 while (pr_allstopped(p, 1) > 0) { 1453 if (holdcheck(SHOLDWATCH) != 0) { 1454 p->p_flag &= ~SHOLDWATCH; 1455 mutex_exit(&p->p_lock); 1456 return (-1); 1457 } 1458 1459 cv_wait(&p->p_holdlwps, &p->p_lock); 1460 } 1461 1462 /* 1463 * All threads are now stopped or in the process of stopping. 1464 * Set SWATCHOK and let them stop completely. 1465 */ 1466 p->p_flag |= SWATCHOK; 1467 t->t_proc_flag &= ~TP_WATCHSTOP; 1468 cv_broadcast(&p->p_holdlwps); 1469 1470 while (pr_allstopped(p, 0) > 0) { 1471 /* 1472 * At first glance, it may appear that we don't need a 1473 * call to holdcheck() here. But if the process gets a 1474 * SIGKILL signal, one of our stopped threads may have 1475 * been awakened and is waiting in exitlwps(), which 1476 * takes precedence over watchpoints. 1477 */ 1478 if (holdcheck(SHOLDWATCH | SWATCHOK) != 0) { 1479 p->p_flag &= ~(SHOLDWATCH | SWATCHOK); 1480 mutex_exit(&p->p_lock); 1481 return (-1); 1482 } 1483 1484 cv_wait(&p->p_holdlwps, &p->p_lock); 1485 } 1486 1487 /* 1488 * All threads are now completely stopped. 1489 */ 1490 p->p_flag &= ~SWATCHOK; 1491 p->p_flag &= ~SHOLDWATCH; 1492 p->p_lwprcnt++; 1493 1494 } else if (!(p->p_flag & SWATCHOK)) { 1495 1496 /* 1497 * SHOLDWATCH is set, so another thread is trying to do 1498 * watchpoint activity. Indicate this thread is stopping, and 1499 * wait for the OK from the master thread. 1500 */ 1501 t->t_proc_flag |= TP_WATCHSTOP; 1502 cv_broadcast(&p->p_holdlwps); 1503 1504 while (!(p->p_flag & SWATCHOK)) { 1505 if (holdcheck(0) != 0) { 1506 t->t_proc_flag &= ~TP_WATCHSTOP; 1507 mutex_exit(&p->p_lock); 1508 return (-1); 1509 } 1510 1511 cv_wait(&p->p_holdlwps, &p->p_lock); 1512 } 1513 1514 /* 1515 * Once the master thread has given the OK, this thread can 1516 * actually call stop(). 1517 */ 1518 t->t_proc_flag &= ~TP_WATCHSTOP; 1519 p->p_lwprcnt++; 1520 1521 stop(PR_SUSPENDED, SUSPEND_NORMAL); 1522 1523 /* 1524 * It's not OK to do watchpoint activity, notify caller to 1525 * retry. 1526 */ 1527 ret = -1; 1528 1529 } else { 1530 1531 /* 1532 * The only way we can hit the case where SHOLDWATCH is set and 1533 * SWATCHOK is set is if we are triggering this from within a 1534 * stop() call. Assert that this is the case. 1535 */ 1536 1537 ASSERT(t->t_proc_flag & TP_STOPPING); 1538 p->p_lwprcnt++; 1539 } 1540 1541 mutex_exit(&p->p_lock); 1542 1543 return (ret); 1544 } 1545 1546 /* 1547 * force all interruptible lwps to trap into the kernel. 1548 */ 1549 void 1550 pokelwps(proc_t *p) 1551 { 1552 kthread_t *t; 1553 1554 ASSERT(MUTEX_HELD(&p->p_lock)); 1555 1556 t = p->p_tlist; 1557 do { 1558 if (t == curthread) 1559 continue; 1560 thread_lock(t); 1561 aston(t); /* make thread trap or do post_syscall */ 1562 if (ISWAKEABLE(t) || ISWAITING(t)) { 1563 setrun_locked(t); 1564 } else if (t->t_state == TS_STOPPED) { 1565 /* 1566 * Ensure that proc_exit() is not blocked by lwps 1567 * that were stopped via jobcontrol or /proc. 1568 */ 1569 if (p->p_flag & SEXITLWPS) { 1570 p->p_stopsig = 0; 1571 t->t_schedflag |= (TS_XSTART | TS_PSTART); 1572 setrun_locked(t); 1573 } 1574 /* 1575 * If we are holding lwps for a forkall(), 1576 * force lwps that have been suspended via 1577 * lwp_suspend() and are suspended inside 1578 * of a system call to proceed to their 1579 * holdlwp() points where they are clonable. 1580 */ 1581 if ((p->p_flag & SHOLDFORK) && SUSPENDED(t)) { 1582 if ((t->t_schedflag & TS_CSTART) == 0) { 1583 p->p_lwprcnt++; 1584 t->t_schedflag |= TS_CSTART; 1585 setrun_locked(t); 1586 } 1587 } 1588 } else if (t->t_state == TS_ONPROC) { 1589 if (t->t_cpu != CPU) 1590 poke_cpu(t->t_cpu->cpu_id); 1591 } 1592 thread_unlock(t); 1593 } while ((t = t->t_forw) != p->p_tlist); 1594 } 1595 1596 /* 1597 * undo the effects of holdlwps() or holdwatch(). 1598 */ 1599 void 1600 continuelwps(proc_t *p) 1601 { 1602 kthread_t *t; 1603 1604 /* 1605 * If this flag is set, then the original holdwatch() didn't actually 1606 * stop the process. See comments for holdwatch(). 1607 */ 1608 if (p->p_flag & SWATCHOK) { 1609 ASSERT(curthread->t_proc_flag & TP_STOPPING); 1610 return; 1611 } 1612 1613 ASSERT(MUTEX_HELD(&p->p_lock)); 1614 ASSERT((p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) == 0); 1615 1616 t = p->p_tlist; 1617 do { 1618 thread_lock(t); /* SUSPENDED looks at t_schedflag */ 1619 if (SUSPENDED(t) && !(t->t_proc_flag & TP_HOLDLWP)) { 1620 p->p_lwprcnt++; 1621 t->t_schedflag |= TS_CSTART; 1622 setrun_locked(t); 1623 } 1624 thread_unlock(t); 1625 } while ((t = t->t_forw) != p->p_tlist); 1626 } 1627 1628 /* 1629 * Force all other LWPs in the current process other than the caller to exit, 1630 * and then cv_wait() on p_holdlwps for them to exit. The exitlwps() function 1631 * is typically used in these situations: 1632 * 1633 * (a) prior to an exec() system call 1634 * (b) prior to dumping a core file 1635 * (c) prior to a uadmin() shutdown 1636 * 1637 * If the 'coredump' flag is set, other LWPs are quiesced but not destroyed. 1638 * Multiple threads in the process can call this function at one time by 1639 * triggering execs or core dumps simultaneously, so the SEXITLWPS bit is used 1640 * to declare one particular thread the winner who gets to kill the others. 1641 * If a thread wins the exitlwps() dance, zero is returned; otherwise an 1642 * appropriate errno value is returned to caller for its system call to return. 1643 */ 1644 int 1645 exitlwps(int coredump) 1646 { 1647 proc_t *p = curproc; 1648 int heldcnt; 1649 1650 if (curthread->t_door) 1651 door_slam(); 1652 if (p->p_door_list) 1653 door_revoke_all(); 1654 if (curthread->t_schedctl != NULL) 1655 schedctl_lwp_cleanup(curthread); 1656 1657 /* 1658 * Ensure that before starting to wait for other lwps to exit, 1659 * cleanup all upimutexes held by curthread. Otherwise, some other 1660 * lwp could be waiting (uninterruptibly) for a upimutex held by 1661 * curthread, and the call to pokelwps() below would deadlock. 1662 * Even if a blocked upimutex_lock is made interruptible, 1663 * curthread's upimutexes need to be unlocked: do it here. 1664 */ 1665 if (curthread->t_upimutex != NULL) 1666 upimutex_cleanup(); 1667 1668 /* 1669 * Grab p_lock in order to check and set SEXITLWPS to declare a winner. 1670 * We must also block any further /proc access from this point forward. 1671 */ 1672 mutex_enter(&p->p_lock); 1673 prbarrier(p); 1674 1675 if (p->p_flag & SEXITLWPS) { 1676 mutex_exit(&p->p_lock); 1677 aston(curthread); /* force a trip through post_syscall */ 1678 return (set_errno(EINTR)); 1679 } 1680 1681 p->p_flag |= SEXITLWPS; 1682 if (coredump) /* tell other lwps to stop, not exit */ 1683 p->p_flag |= SCOREDUMP; 1684 1685 /* 1686 * Give precedence to exitlwps() if a holdlwps() is 1687 * in progress. The lwp doing the holdlwps() operation 1688 * is aborted when it is awakened. 1689 */ 1690 while (p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) { 1691 cv_broadcast(&p->p_holdlwps); 1692 cv_wait(&p->p_holdlwps, &p->p_lock); 1693 prbarrier(p); 1694 } 1695 p->p_flag |= SHOLDFORK; 1696 pokelwps(p); 1697 1698 /* 1699 * Wait for process to become quiescent. 1700 */ 1701 --p->p_lwprcnt; 1702 while (p->p_lwprcnt > 0) { 1703 cv_wait(&p->p_holdlwps, &p->p_lock); 1704 prbarrier(p); 1705 } 1706 p->p_lwprcnt++; 1707 ASSERT(p->p_lwprcnt == 1); 1708 1709 /* 1710 * The SCOREDUMP flag puts the process into a quiescent 1711 * state. The process's lwps remain attached to this 1712 * process until exitlwps() is called again without the 1713 * 'coredump' flag set, then the lwps are terminated 1714 * and the process can exit. 1715 */ 1716 if (coredump) { 1717 p->p_flag &= ~(SCOREDUMP | SHOLDFORK | SEXITLWPS); 1718 goto out; 1719 } 1720 1721 /* 1722 * Determine if there are any lwps left dangling in 1723 * the stopped state. This happens when exitlwps() 1724 * aborts a holdlwps() operation. 1725 */ 1726 p->p_flag &= ~SHOLDFORK; 1727 if ((heldcnt = p->p_lwpcnt) > 1) { 1728 kthread_t *t; 1729 for (t = curthread->t_forw; --heldcnt > 0; t = t->t_forw) { 1730 t->t_proc_flag &= ~TP_TWAIT; 1731 lwp_continue(t); 1732 } 1733 } 1734 1735 /* 1736 * Wait for all other lwps to exit. 1737 */ 1738 --p->p_lwprcnt; 1739 while (p->p_lwpcnt > 1) { 1740 cv_wait(&p->p_holdlwps, &p->p_lock); 1741 prbarrier(p); 1742 } 1743 ++p->p_lwprcnt; 1744 ASSERT(p->p_lwpcnt == 1 && p->p_lwprcnt == 1); 1745 1746 p->p_flag &= ~SEXITLWPS; 1747 curthread->t_proc_flag &= ~TP_TWAIT; 1748 1749 out: 1750 if (!coredump && p->p_zombcnt) { /* cleanup the zombie lwps */ 1751 lwpdir_t *ldp; 1752 lwpent_t *lep; 1753 int i; 1754 1755 for (ldp = p->p_lwpdir, i = 0; i < p->p_lwpdir_sz; i++, ldp++) { 1756 lep = ldp->ld_entry; 1757 if (lep != NULL && lep->le_thread != curthread) { 1758 ASSERT(lep->le_thread == NULL); 1759 p->p_zombcnt--; 1760 lwp_hash_out(p, lep->le_lwpid); 1761 } 1762 } 1763 ASSERT(p->p_zombcnt == 0); 1764 } 1765 1766 /* 1767 * If some other LWP in the process wanted us to suspend ourself, 1768 * then we will not do it. The other LWP is now terminated and 1769 * no one will ever continue us again if we suspend ourself. 1770 */ 1771 curthread->t_proc_flag &= ~TP_HOLDLWP; 1772 p->p_flag &= ~(SHOLDFORK | SHOLDFORK1 | SHOLDWATCH | SLWPWRAP); 1773 mutex_exit(&p->p_lock); 1774 return (0); 1775 } 1776 1777 /* 1778 * duplicate a lwp. 1779 */ 1780 klwp_t * 1781 forklwp(klwp_t *lwp, proc_t *cp, id_t lwpid) 1782 { 1783 klwp_t *clwp; 1784 void *tregs, *tfpu; 1785 kthread_t *t = lwptot(lwp); 1786 kthread_t *ct; 1787 proc_t *p = lwptoproc(lwp); 1788 int cid; 1789 void *bufp; 1790 void *brand_data; 1791 int val; 1792 1793 ASSERT(p == curproc); 1794 ASSERT(t == curthread || (SUSPENDED(t) && lwp->lwp_asleep == 0)); 1795 1796 #if defined(__sparc) 1797 if (t == curthread) 1798 (void) flush_user_windows_to_stack(NULL); 1799 #endif 1800 1801 if (t == curthread) 1802 /* copy args out of registers first */ 1803 (void) save_syscall_args(); 1804 1805 clwp = lwp_create(cp->p_lwpcnt == 0 ? lwp_rtt_initial : lwp_rtt, 1806 NULL, 0, cp, TS_STOPPED, t->t_pri, &t->t_hold, NOCLASS, lwpid); 1807 if (clwp == NULL) 1808 return (NULL); 1809 1810 /* 1811 * most of the parent's lwp can be copied to its duplicate, 1812 * except for the fields that are unique to each lwp, like 1813 * lwp_thread, lwp_procp, lwp_regs, and lwp_ap. 1814 */ 1815 ct = clwp->lwp_thread; 1816 tregs = clwp->lwp_regs; 1817 tfpu = clwp->lwp_fpu; 1818 brand_data = clwp->lwp_brand; 1819 1820 /* 1821 * Copy parent lwp to child lwp. Hold child's p_lock to prevent 1822 * mstate_aggr_state() from reading stale mstate entries copied 1823 * from lwp to clwp. 1824 */ 1825 mutex_enter(&cp->p_lock); 1826 *clwp = *lwp; 1827 1828 /* clear microstate and resource usage data in new lwp */ 1829 init_mstate(ct, LMS_STOPPED); 1830 bzero(&clwp->lwp_ru, sizeof (clwp->lwp_ru)); 1831 mutex_exit(&cp->p_lock); 1832 1833 /* fix up child's lwp */ 1834 1835 clwp->lwp_pcb.pcb_flags = 0; 1836 #if defined(__sparc) 1837 clwp->lwp_pcb.pcb_step = STEP_NONE; 1838 #endif 1839 clwp->lwp_cursig = 0; 1840 clwp->lwp_extsig = 0; 1841 clwp->lwp_curinfo = (struct sigqueue *)0; 1842 clwp->lwp_thread = ct; 1843 ct->t_sysnum = t->t_sysnum; 1844 clwp->lwp_regs = tregs; 1845 clwp->lwp_fpu = tfpu; 1846 clwp->lwp_brand = brand_data; 1847 clwp->lwp_ap = clwp->lwp_arg; 1848 clwp->lwp_procp = cp; 1849 bzero(clwp->lwp_timer, sizeof (clwp->lwp_timer)); 1850 clwp->lwp_lastfault = 0; 1851 clwp->lwp_lastfaddr = 0; 1852 1853 /* copy parent's struct regs to child. */ 1854 lwp_forkregs(lwp, clwp); 1855 1856 /* 1857 * Fork thread context ops, if any. 1858 */ 1859 if (t->t_ctx) 1860 forkctx(t, ct); 1861 1862 /* fix door state in the child */ 1863 if (t->t_door) 1864 door_fork(t, ct); 1865 1866 /* copy current contract templates, clear latest contracts */ 1867 lwp_ctmpl_copy(clwp, lwp); 1868 1869 mutex_enter(&cp->p_lock); 1870 /* lwp_create() set the TP_HOLDLWP flag */ 1871 if (!(t->t_proc_flag & TP_HOLDLWP)) 1872 ct->t_proc_flag &= ~TP_HOLDLWP; 1873 if (cp->p_flag & SMSACCT) 1874 ct->t_proc_flag |= TP_MSACCT; 1875 mutex_exit(&cp->p_lock); 1876 1877 /* Allow brand to propagate brand-specific state */ 1878 if (PROC_IS_BRANDED(p)) 1879 BROP(p)->b_forklwp(lwp, clwp); 1880 1881 retry: 1882 cid = t->t_cid; 1883 1884 val = CL_ALLOC(&bufp, cid, KM_SLEEP); 1885 ASSERT(val == 0); 1886 1887 mutex_enter(&p->p_lock); 1888 if (cid != t->t_cid) { 1889 /* 1890 * Someone just changed this thread's scheduling class, 1891 * so try pre-allocating the buffer again. Hopefully we 1892 * don't hit this often. 1893 */ 1894 mutex_exit(&p->p_lock); 1895 CL_FREE(cid, bufp); 1896 goto retry; 1897 } 1898 1899 ct->t_unpark = t->t_unpark; 1900 ct->t_clfuncs = t->t_clfuncs; 1901 CL_FORK(t, ct, bufp); 1902 ct->t_cid = t->t_cid; /* after data allocated so prgetpsinfo works */ 1903 mutex_exit(&p->p_lock); 1904 1905 return (clwp); 1906 } 1907 1908 /* 1909 * Add a new lwp entry to the lwp directory and to the lwpid hash table. 1910 */ 1911 void 1912 lwp_hash_in(proc_t *p, lwpent_t *lep, tidhash_t *tidhash, uint_t tidhash_sz, 1913 int do_lock) 1914 { 1915 tidhash_t *thp = &tidhash[TIDHASH(lep->le_lwpid, tidhash_sz)]; 1916 lwpdir_t **ldpp; 1917 lwpdir_t *ldp; 1918 kthread_t *t; 1919 1920 /* 1921 * Allocate a directory element from the free list. 1922 * Code elsewhere guarantees a free slot. 1923 */ 1924 ldp = p->p_lwpfree; 1925 p->p_lwpfree = ldp->ld_next; 1926 ASSERT(ldp->ld_entry == NULL); 1927 ldp->ld_entry = lep; 1928 1929 if (do_lock) 1930 mutex_enter(&thp->th_lock); 1931 1932 /* 1933 * Insert it into the lwpid hash table. 1934 */ 1935 ldpp = &thp->th_list; 1936 ldp->ld_next = *ldpp; 1937 *ldpp = ldp; 1938 1939 /* 1940 * Set the active thread's directory slot entry. 1941 */ 1942 if ((t = lep->le_thread) != NULL) { 1943 ASSERT(lep->le_lwpid == t->t_tid); 1944 t->t_dslot = (int)(ldp - p->p_lwpdir); 1945 } 1946 1947 if (do_lock) 1948 mutex_exit(&thp->th_lock); 1949 } 1950 1951 /* 1952 * Remove an lwp from the lwpid hash table and free its directory entry. 1953 * This is done when a detached lwp exits in lwp_exit() or 1954 * when a non-detached lwp is waited for in lwp_wait() or 1955 * when a zombie lwp is detached in lwp_detach(). 1956 */ 1957 void 1958 lwp_hash_out(proc_t *p, id_t lwpid) 1959 { 1960 tidhash_t *thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)]; 1961 lwpdir_t **ldpp; 1962 lwpdir_t *ldp; 1963 lwpent_t *lep; 1964 1965 mutex_enter(&thp->th_lock); 1966 for (ldpp = &thp->th_list; 1967 (ldp = *ldpp) != NULL; ldpp = &ldp->ld_next) { 1968 lep = ldp->ld_entry; 1969 if (lep->le_lwpid == lwpid) { 1970 prlwpfree(p, lep); /* /proc deals with le_trace */ 1971 *ldpp = ldp->ld_next; 1972 ldp->ld_entry = NULL; 1973 ldp->ld_next = p->p_lwpfree; 1974 p->p_lwpfree = ldp; 1975 kmem_free(lep, sizeof (*lep)); 1976 break; 1977 } 1978 } 1979 mutex_exit(&thp->th_lock); 1980 } 1981 1982 /* 1983 * Lookup an lwp in the lwpid hash table by lwpid. 1984 */ 1985 lwpdir_t * 1986 lwp_hash_lookup(proc_t *p, id_t lwpid) 1987 { 1988 tidhash_t *thp; 1989 lwpdir_t *ldp; 1990 1991 /* 1992 * The process may be exiting, after p_tidhash has been set to NULL in 1993 * proc_exit() but before prfee() has been called. Return failure in 1994 * this case. 1995 */ 1996 if (p->p_tidhash == NULL) 1997 return (NULL); 1998 1999 thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)]; 2000 for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) { 2001 if (ldp->ld_entry->le_lwpid == lwpid) 2002 return (ldp); 2003 } 2004 2005 return (NULL); 2006 } 2007 2008 /* 2009 * Same as lwp_hash_lookup(), but acquire and return 2010 * the tid hash table entry lock on success. 2011 */ 2012 lwpdir_t * 2013 lwp_hash_lookup_and_lock(proc_t *p, id_t lwpid, kmutex_t **mpp) 2014 { 2015 tidhash_t *tidhash; 2016 uint_t tidhash_sz; 2017 tidhash_t *thp; 2018 lwpdir_t *ldp; 2019 2020 top: 2021 tidhash_sz = p->p_tidhash_sz; 2022 membar_consumer(); 2023 if ((tidhash = p->p_tidhash) == NULL) 2024 return (NULL); 2025 2026 thp = &tidhash[TIDHASH(lwpid, tidhash_sz)]; 2027 mutex_enter(&thp->th_lock); 2028 2029 /* 2030 * Since we are not holding p->p_lock, the tid hash table 2031 * may have changed. If so, start over. If not, then 2032 * it cannot change until after we drop &thp->th_lock; 2033 */ 2034 if (tidhash != p->p_tidhash || tidhash_sz != p->p_tidhash_sz) { 2035 mutex_exit(&thp->th_lock); 2036 goto top; 2037 } 2038 2039 for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) { 2040 if (ldp->ld_entry->le_lwpid == lwpid) { 2041 *mpp = &thp->th_lock; 2042 return (ldp); 2043 } 2044 } 2045 2046 mutex_exit(&thp->th_lock); 2047 return (NULL); 2048 } 2049 2050 /* 2051 * Update the indicated LWP usage statistic for the current LWP. 2052 */ 2053 void 2054 lwp_stat_update(lwp_stat_id_t lwp_stat_id, long inc) 2055 { 2056 klwp_t *lwp = ttolwp(curthread); 2057 2058 if (lwp == NULL) 2059 return; 2060 2061 switch (lwp_stat_id) { 2062 case LWP_STAT_INBLK: 2063 lwp->lwp_ru.inblock += inc; 2064 break; 2065 case LWP_STAT_OUBLK: 2066 lwp->lwp_ru.oublock += inc; 2067 break; 2068 case LWP_STAT_MSGRCV: 2069 lwp->lwp_ru.msgrcv += inc; 2070 break; 2071 case LWP_STAT_MSGSND: 2072 lwp->lwp_ru.msgsnd += inc; 2073 break; 2074 default: 2075 panic("lwp_stat_update: invalid lwp_stat_id 0x%x", lwp_stat_id); 2076 } 2077 } 2078