1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/types.h> 33 #include <sys/sysmacros.h> 34 #include <sys/systm.h> 35 #include <sys/thread.h> 36 #include <sys/proc.h> 37 #include <sys/task.h> 38 #include <sys/project.h> 39 #include <sys/signal.h> 40 #include <sys/errno.h> 41 #include <sys/vmparam.h> 42 #include <sys/stack.h> 43 #include <sys/procfs.h> 44 #include <sys/prsystm.h> 45 #include <sys/cpuvar.h> 46 #include <sys/kmem.h> 47 #include <sys/vtrace.h> 48 #include <sys/door.h> 49 #include <vm/seg_kp.h> 50 #include <sys/debug.h> 51 #include <sys/schedctl.h> 52 #include <sys/poll.h> 53 #include <sys/copyops.h> 54 #include <sys/lwp_upimutex_impl.h> 55 #include <sys/cpupart.h> 56 #include <sys/lgrp.h> 57 #include <sys/rctl.h> 58 #include <sys/contract_impl.h> 59 #include <sys/cpc_impl.h> 60 #include <sys/sdt.h> 61 #include <sys/cmn_err.h> 62 #include <sys/brand.h> 63 #include <sys/cyclic.h> 64 #include <sys/pool.h> 65 66 /* hash function for the lwpid hash table, p->p_tidhash[] */ 67 #define TIDHASH(tid, hash_sz) ((tid) & ((hash_sz) - 1)) 68 69 void *segkp_lwp; /* cookie for pool of segkp resources */ 70 extern void reapq_move_lq_to_tq(kthread_t *); 71 extern void freectx_ctx(struct ctxop *); 72 73 /* 74 * Create a kernel thread associated with a particular system process. Give 75 * it an LWP so that microstate accounting will be available for it. 76 */ 77 kthread_t * 78 lwp_kernel_create(proc_t *p, void (*proc)(), void *arg, int state, pri_t pri) 79 { 80 klwp_t *lwp; 81 82 VERIFY((p->p_flag & SSYS) != 0); 83 84 lwp = lwp_create(proc, arg, 0, p, state, pri, &t0.t_hold, syscid, 0); 85 86 VERIFY(lwp != NULL); 87 88 return (lwptot(lwp)); 89 } 90 91 /* 92 * Create a thread that appears to be stopped at sys_rtt. 93 */ 94 klwp_t * 95 lwp_create(void (*proc)(), caddr_t arg, size_t len, proc_t *p, 96 int state, int pri, const k_sigset_t *smask, int cid, id_t lwpid) 97 { 98 klwp_t *lwp = NULL; 99 kthread_t *t; 100 kthread_t *tx; 101 cpupart_t *oldpart = NULL; 102 size_t stksize; 103 caddr_t lwpdata = NULL; 104 processorid_t binding; 105 int err = 0; 106 kproject_t *oldkpj, *newkpj; 107 void *bufp = NULL; 108 klwp_t *curlwp; 109 lwpent_t *lep; 110 lwpdir_t *old_dir = NULL; 111 uint_t old_dirsz = 0; 112 tidhash_t *old_hash = NULL; 113 uint_t old_hashsz = 0; 114 ret_tidhash_t *ret_tidhash = NULL; 115 int i; 116 int rctlfail = 0; 117 boolean_t branded = 0; 118 struct ctxop *ctx = NULL; 119 120 ASSERT(cid != sysdccid); /* system threads must start in SYS */ 121 122 ASSERT(p != &p0); /* No new LWPs in p0. */ 123 124 mutex_enter(&p->p_lock); 125 mutex_enter(&p->p_zone->zone_nlwps_lock); 126 /* 127 * don't enforce rctl limits on system processes 128 */ 129 if (!CLASS_KERNEL(cid)) { 130 if (p->p_task->tk_nlwps >= p->p_task->tk_nlwps_ctl) 131 if (rctl_test(rc_task_lwps, p->p_task->tk_rctls, p, 132 1, 0) & RCT_DENY) 133 rctlfail = 1; 134 if (p->p_task->tk_proj->kpj_nlwps >= 135 p->p_task->tk_proj->kpj_nlwps_ctl) 136 if (rctl_test(rc_project_nlwps, 137 p->p_task->tk_proj->kpj_rctls, p, 1, 0) 138 & RCT_DENY) 139 rctlfail = 1; 140 if (p->p_zone->zone_nlwps >= p->p_zone->zone_nlwps_ctl) 141 if (rctl_test(rc_zone_nlwps, p->p_zone->zone_rctls, p, 142 1, 0) & RCT_DENY) 143 rctlfail = 1; 144 } 145 if (rctlfail) { 146 mutex_exit(&p->p_zone->zone_nlwps_lock); 147 mutex_exit(&p->p_lock); 148 atomic_inc_32(&p->p_zone->zone_ffcap); 149 return (NULL); 150 } 151 p->p_task->tk_nlwps++; 152 p->p_task->tk_proj->kpj_nlwps++; 153 p->p_zone->zone_nlwps++; 154 mutex_exit(&p->p_zone->zone_nlwps_lock); 155 mutex_exit(&p->p_lock); 156 157 curlwp = ttolwp(curthread); 158 if (curlwp == NULL || (stksize = curlwp->lwp_childstksz) == 0) 159 stksize = lwp_default_stksize; 160 161 if (CLASS_KERNEL(cid)) { 162 /* 163 * Since we are creating an LWP in an SSYS process, we do not 164 * inherit anything from the current thread's LWP. We set 165 * stksize and lwpdata to 0 in order to let thread_create() 166 * allocate a regular kernel thread stack for this thread. 167 */ 168 curlwp = NULL; 169 stksize = 0; 170 lwpdata = NULL; 171 172 } else if (stksize == lwp_default_stksize) { 173 /* 174 * Try to reuse an <lwp,stack> from the LWP deathrow. 175 */ 176 if (lwp_reapcnt > 0) { 177 mutex_enter(&reaplock); 178 if ((t = lwp_deathrow) != NULL) { 179 ASSERT(t->t_swap); 180 lwp_deathrow = t->t_forw; 181 lwp_reapcnt--; 182 lwpdata = t->t_swap; 183 lwp = t->t_lwp; 184 ctx = t->t_ctx; 185 t->t_swap = NULL; 186 t->t_lwp = NULL; 187 t->t_ctx = NULL; 188 reapq_move_lq_to_tq(t); 189 } 190 mutex_exit(&reaplock); 191 if (lwp != NULL) { 192 lwp_stk_fini(lwp); 193 } 194 if (ctx != NULL) { 195 freectx_ctx(ctx); 196 } 197 } 198 if (lwpdata == NULL && 199 (lwpdata = (caddr_t)segkp_cache_get(segkp_lwp)) == NULL) { 200 mutex_enter(&p->p_lock); 201 mutex_enter(&p->p_zone->zone_nlwps_lock); 202 p->p_task->tk_nlwps--; 203 p->p_task->tk_proj->kpj_nlwps--; 204 p->p_zone->zone_nlwps--; 205 mutex_exit(&p->p_zone->zone_nlwps_lock); 206 mutex_exit(&p->p_lock); 207 atomic_inc_32(&p->p_zone->zone_ffnomem); 208 return (NULL); 209 } 210 } else { 211 stksize = roundup(stksize, PAGESIZE); 212 if ((lwpdata = (caddr_t)segkp_get(segkp, stksize, 213 (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED))) == NULL) { 214 mutex_enter(&p->p_lock); 215 mutex_enter(&p->p_zone->zone_nlwps_lock); 216 p->p_task->tk_nlwps--; 217 p->p_task->tk_proj->kpj_nlwps--; 218 p->p_zone->zone_nlwps--; 219 mutex_exit(&p->p_zone->zone_nlwps_lock); 220 mutex_exit(&p->p_lock); 221 atomic_inc_32(&p->p_zone->zone_ffnomem); 222 return (NULL); 223 } 224 } 225 226 /* 227 * Create a thread, initializing the stack pointer 228 */ 229 t = thread_create(lwpdata, stksize, NULL, NULL, 0, p, TS_STOPPED, pri); 230 231 /* 232 * If a non-NULL stack base is passed in, thread_create() assumes 233 * that the stack might be statically allocated (as opposed to being 234 * allocated from segkp), and so it does not set t_swap. Since 235 * the lwpdata was allocated from segkp, we must set t_swap to point 236 * to it ourselves. 237 * 238 * This would be less confusing if t_swap had a better name; it really 239 * indicates that the stack is allocated from segkp, regardless of 240 * whether or not it is swappable. 241 */ 242 if (lwpdata != NULL) { 243 ASSERT(!CLASS_KERNEL(cid)); 244 ASSERT(t->t_swap == NULL); 245 t->t_swap = lwpdata; /* Start of page-able data */ 246 } 247 248 /* 249 * If the stack and lwp can be reused, mark the thread as such. 250 * When we get to reapq_add() from resume_from_zombie(), these 251 * threads will go onto lwp_deathrow instead of thread_deathrow. 252 */ 253 if (!CLASS_KERNEL(cid) && stksize == lwp_default_stksize) 254 t->t_flag |= T_LWPREUSE; 255 256 if (lwp == NULL) 257 lwp = kmem_cache_alloc(lwp_cache, KM_SLEEP); 258 bzero(lwp, sizeof (*lwp)); 259 t->t_lwp = lwp; 260 261 t->t_hold = *smask; 262 lwp->lwp_thread = t; 263 lwp->lwp_procp = p; 264 lwp->lwp_sigaltstack.ss_flags = SS_DISABLE; 265 if (curlwp != NULL && curlwp->lwp_childstksz != 0) 266 lwp->lwp_childstksz = curlwp->lwp_childstksz; 267 268 t->t_stk = lwp_stk_init(lwp, t->t_stk); 269 thread_load(t, proc, arg, len); 270 271 /* 272 * Allocate the SIGPROF buffer if ITIMER_REALPROF is in effect. 273 */ 274 if (p->p_rprof_cyclic != CYCLIC_NONE) 275 t->t_rprof = kmem_zalloc(sizeof (struct rprof), KM_SLEEP); 276 277 if (cid != NOCLASS) 278 (void) CL_ALLOC(&bufp, cid, KM_SLEEP); 279 280 /* 281 * Allocate an lwp directory entry for the new lwp. 282 */ 283 lep = kmem_zalloc(sizeof (*lep), KM_SLEEP); 284 285 mutex_enter(&p->p_lock); 286 grow: 287 /* 288 * Grow the lwp (thread) directory and lwpid hash table if necessary. 289 * A note on the growth algorithm: 290 * The new lwp directory size is computed as: 291 * new = 2 * old + 2 292 * Starting with an initial size of 2 (see exec_common()), 293 * this yields numbers that are a power of two minus 2: 294 * 2, 6, 14, 30, 62, 126, 254, 510, 1022, ... 295 * The size of the lwpid hash table must be a power of two 296 * and must be commensurate in size with the lwp directory 297 * so that hash bucket chains remain short. Therefore, 298 * the lwpid hash table size is computed as: 299 * hashsz = (dirsz + 2) / 2 300 * which leads to these hash table sizes corresponding to 301 * the above directory sizes: 302 * 2, 4, 8, 16, 32, 64, 128, 256, 512, ... 303 * A note on growing the hash table: 304 * For performance reasons, code in lwp_unpark() does not 305 * acquire curproc->p_lock when searching the hash table. 306 * Rather, it calls lwp_hash_lookup_and_lock() which 307 * acquires only the individual hash bucket lock, taking 308 * care to deal with reallocation of the hash table 309 * during the time it takes to acquire the lock. 310 * 311 * This is sufficient to protect the integrity of the 312 * hash table, but it requires us to acquire all of the 313 * old hash bucket locks before growing the hash table 314 * and to release them afterwards. It also requires us 315 * not to free the old hash table because some thread 316 * in lwp_hash_lookup_and_lock() might still be trying 317 * to acquire the old bucket lock. 318 * 319 * So we adopt the tactic of keeping all of the retired 320 * hash tables on a linked list, so they can be safely 321 * freed when the process exits or execs. 322 * 323 * Because the hash table grows in powers of two, the 324 * total size of all of the hash tables will be slightly 325 * less than twice the size of the largest hash table. 326 */ 327 while (p->p_lwpfree == NULL) { 328 uint_t dirsz = p->p_lwpdir_sz; 329 lwpdir_t *new_dir; 330 uint_t new_dirsz; 331 lwpdir_t *ldp; 332 tidhash_t *new_hash; 333 uint_t new_hashsz; 334 335 mutex_exit(&p->p_lock); 336 337 /* 338 * Prepare to remember the old p_tidhash for later 339 * kmem_free()ing when the process exits or execs. 340 */ 341 if (ret_tidhash == NULL) 342 ret_tidhash = kmem_zalloc(sizeof (ret_tidhash_t), 343 KM_SLEEP); 344 if (old_dir != NULL) 345 kmem_free(old_dir, old_dirsz * sizeof (*old_dir)); 346 if (old_hash != NULL) 347 kmem_free(old_hash, old_hashsz * sizeof (*old_hash)); 348 349 new_dirsz = 2 * dirsz + 2; 350 new_dir = kmem_zalloc(new_dirsz * sizeof (lwpdir_t), KM_SLEEP); 351 for (ldp = new_dir, i = 1; i < new_dirsz; i++, ldp++) 352 ldp->ld_next = ldp + 1; 353 new_hashsz = (new_dirsz + 2) / 2; 354 new_hash = kmem_zalloc(new_hashsz * sizeof (tidhash_t), 355 KM_SLEEP); 356 357 mutex_enter(&p->p_lock); 358 if (p == curproc) 359 prbarrier(p); 360 361 if (dirsz != p->p_lwpdir_sz || p->p_lwpfree != NULL) { 362 /* 363 * Someone else beat us to it or some lwp exited. 364 * Set up to free our memory and take a lap. 365 */ 366 old_dir = new_dir; 367 old_dirsz = new_dirsz; 368 old_hash = new_hash; 369 old_hashsz = new_hashsz; 370 } else { 371 /* 372 * For the benefit of lwp_hash_lookup_and_lock(), 373 * called from lwp_unpark(), which searches the 374 * tid hash table without acquiring p->p_lock, 375 * we must acquire all of the tid hash table 376 * locks before replacing p->p_tidhash. 377 */ 378 old_hash = p->p_tidhash; 379 old_hashsz = p->p_tidhash_sz; 380 for (i = 0; i < old_hashsz; i++) { 381 mutex_enter(&old_hash[i].th_lock); 382 mutex_enter(&new_hash[i].th_lock); 383 } 384 385 /* 386 * We simply hash in all of the old directory entries. 387 * This works because the old directory has no empty 388 * slots and the new hash table starts out empty. 389 * This reproduces the original directory ordering 390 * (required for /proc directory semantics). 391 */ 392 old_dir = p->p_lwpdir; 393 old_dirsz = p->p_lwpdir_sz; 394 p->p_lwpdir = new_dir; 395 p->p_lwpfree = new_dir; 396 p->p_lwpdir_sz = new_dirsz; 397 for (ldp = old_dir, i = 0; i < old_dirsz; i++, ldp++) 398 lwp_hash_in(p, ldp->ld_entry, 399 new_hash, new_hashsz, 0); 400 401 /* 402 * Remember the old hash table along with all 403 * of the previously-remembered hash tables. 404 * We will free them at process exit or exec. 405 */ 406 ret_tidhash->rth_tidhash = old_hash; 407 ret_tidhash->rth_tidhash_sz = old_hashsz; 408 ret_tidhash->rth_next = p->p_ret_tidhash; 409 p->p_ret_tidhash = ret_tidhash; 410 411 /* 412 * Now establish the new tid hash table. 413 * As soon as we assign p->p_tidhash, 414 * code in lwp_unpark() can start using it. 415 */ 416 membar_producer(); 417 p->p_tidhash = new_hash; 418 419 /* 420 * It is necessary that p_tidhash reach global 421 * visibility before p_tidhash_sz. Otherwise, 422 * code in lwp_hash_lookup_and_lock() could 423 * index into the old p_tidhash using the new 424 * p_tidhash_sz and thereby access invalid data. 425 */ 426 membar_producer(); 427 p->p_tidhash_sz = new_hashsz; 428 429 /* 430 * Release the locks; allow lwp_unpark() to carry on. 431 */ 432 for (i = 0; i < old_hashsz; i++) { 433 mutex_exit(&old_hash[i].th_lock); 434 mutex_exit(&new_hash[i].th_lock); 435 } 436 437 /* 438 * Avoid freeing these objects below. 439 */ 440 ret_tidhash = NULL; 441 old_hash = NULL; 442 old_hashsz = 0; 443 } 444 } 445 446 /* 447 * Block the process against /proc while we manipulate p->p_tlist, 448 * unless lwp_create() was called by /proc for the PCAGENT operation. 449 * We want to do this early enough so that we don't drop p->p_lock 450 * until the thread is put on the p->p_tlist. 451 */ 452 if (p == curproc) { 453 prbarrier(p); 454 /* 455 * If the current lwp has been requested to stop, do so now. 456 * Otherwise we have a race condition between /proc attempting 457 * to stop the process and this thread creating a new lwp 458 * that was not seen when the /proc PCSTOP request was issued. 459 * We rely on stop() to call prbarrier(p) before returning. 460 */ 461 while ((curthread->t_proc_flag & TP_PRSTOP) && 462 !ttolwp(curthread)->lwp_nostop) { 463 /* 464 * We called pool_barrier_enter() before calling 465 * here to lwp_create(). We have to call 466 * pool_barrier_exit() before stopping. 467 */ 468 pool_barrier_exit(); 469 prbarrier(p); 470 stop(PR_REQUESTED, 0); 471 /* 472 * And we have to repeat the call to 473 * pool_barrier_enter after stopping. 474 */ 475 pool_barrier_enter(); 476 prbarrier(p); 477 } 478 479 /* 480 * If process is exiting, there could be a race between 481 * the agent lwp creation and the new lwp currently being 482 * created. So to prevent this race lwp creation is failed 483 * if the process is exiting. 484 */ 485 if (p->p_flag & (SEXITLWPS|SKILLED)) { 486 err = 1; 487 goto error; 488 } 489 490 /* 491 * Since we might have dropped p->p_lock, the 492 * lwp directory free list might have changed. 493 */ 494 if (p->p_lwpfree == NULL) 495 goto grow; 496 } 497 498 kpreempt_disable(); /* can't grab cpu_lock here */ 499 500 /* 501 * Inherit processor and processor set bindings from curthread. 502 * 503 * For kernel LWPs, we do not inherit processor set bindings at 504 * process creation time (i.e. when p != curproc). After the 505 * kernel process is created, any subsequent LWPs must be created 506 * by threads in the kernel process, at which point we *will* 507 * inherit processor set bindings. 508 */ 509 if (CLASS_KERNEL(cid) && p != curproc) { 510 t->t_bind_cpu = binding = PBIND_NONE; 511 t->t_cpupart = oldpart = &cp_default; 512 t->t_bind_pset = PS_NONE; 513 t->t_bindflag = (uchar_t)default_binding_mode; 514 } else { 515 binding = curthread->t_bind_cpu; 516 t->t_bind_cpu = binding; 517 oldpart = t->t_cpupart; 518 t->t_cpupart = curthread->t_cpupart; 519 t->t_bind_pset = curthread->t_bind_pset; 520 t->t_bindflag = curthread->t_bindflag | 521 (uchar_t)default_binding_mode; 522 } 523 524 /* 525 * thread_create() initializes this thread's home lgroup to the root. 526 * Choose a more suitable lgroup, since this thread is associated 527 * with an lwp. 528 */ 529 ASSERT(oldpart != NULL); 530 if (binding != PBIND_NONE && t->t_affinitycnt == 0) { 531 t->t_bound_cpu = cpu[binding]; 532 if (t->t_lpl != t->t_bound_cpu->cpu_lpl) 533 lgrp_move_thread(t, t->t_bound_cpu->cpu_lpl, 1); 534 } else if (CLASS_KERNEL(cid)) { 535 /* 536 * Kernel threads are always in the root lgrp. 537 */ 538 lgrp_move_thread(t, 539 &t->t_cpupart->cp_lgrploads[LGRP_ROOTID], 1); 540 } else { 541 lgrp_move_thread(t, lgrp_choose(t, t->t_cpupart), 1); 542 } 543 544 kpreempt_enable(); 545 546 /* 547 * make sure lpl points to our own partition 548 */ 549 ASSERT(t->t_lpl >= t->t_cpupart->cp_lgrploads); 550 ASSERT(t->t_lpl < t->t_cpupart->cp_lgrploads + 551 t->t_cpupart->cp_nlgrploads); 552 553 /* 554 * It is safe to point the thread to the new project without holding it 555 * since we're holding the target process' p_lock here and therefore 556 * we're guaranteed that it will not move to another project. 557 */ 558 newkpj = p->p_task->tk_proj; 559 oldkpj = ttoproj(t); 560 if (newkpj != oldkpj) { 561 t->t_proj = newkpj; 562 (void) project_hold(newkpj); 563 project_rele(oldkpj); 564 } 565 566 if (cid != NOCLASS) { 567 /* 568 * If the lwp is being created in the current process 569 * and matches the current thread's scheduling class, 570 * we should propagate the current thread's scheduling 571 * parameters by calling CL_FORK. Otherwise just use 572 * the defaults by calling CL_ENTERCLASS. 573 */ 574 if (p != curproc || curthread->t_cid != cid) { 575 err = CL_ENTERCLASS(t, cid, NULL, NULL, bufp); 576 t->t_pri = pri; /* CL_ENTERCLASS may have changed it */ 577 /* 578 * We don't call schedctl_set_cidpri(t) here 579 * because the schedctl data is not yet set 580 * up for the newly-created lwp. 581 */ 582 } else { 583 t->t_clfuncs = &(sclass[cid].cl_funcs->thread); 584 err = CL_FORK(curthread, t, bufp); 585 t->t_cid = cid; 586 } 587 if (err) { 588 atomic_inc_32(&p->p_zone->zone_ffmisc); 589 goto error; 590 } else { 591 bufp = NULL; 592 } 593 } 594 595 /* 596 * If we were given an lwpid then use it, else allocate one. 597 */ 598 if (lwpid != 0) 599 t->t_tid = lwpid; 600 else { 601 /* 602 * lwp/thread id 0 is never valid; reserved for special checks. 603 * lwp/thread id 1 is reserved for the main thread. 604 * Start again at 2 when INT_MAX has been reached 605 * (id_t is a signed 32-bit integer). 606 */ 607 id_t prev_id = p->p_lwpid; /* last allocated tid */ 608 609 do { /* avoid lwpid duplication */ 610 if (p->p_lwpid == INT_MAX) { 611 p->p_flag |= SLWPWRAP; 612 p->p_lwpid = 1; 613 } 614 if ((t->t_tid = ++p->p_lwpid) == prev_id) { 615 /* 616 * All lwpids are allocated; fail the request. 617 */ 618 err = 1; 619 atomic_inc_32(&p->p_zone->zone_ffnoproc); 620 goto error; 621 } 622 /* 623 * We only need to worry about colliding with an id 624 * that's already in use if this process has 625 * cycled through all available lwp ids. 626 */ 627 if ((p->p_flag & SLWPWRAP) == 0) 628 break; 629 } while (lwp_hash_lookup(p, t->t_tid) != NULL); 630 } 631 632 /* 633 * If this is a branded process, let the brand do any necessary lwp 634 * initialization. 635 */ 636 if (PROC_IS_BRANDED(p)) { 637 if (BROP(p)->b_initlwp(lwp)) { 638 err = 1; 639 atomic_inc_32(&p->p_zone->zone_ffmisc); 640 goto error; 641 } 642 branded = 1; 643 } 644 645 if (t->t_tid == 1) { 646 kpreempt_disable(); 647 ASSERT(t->t_lpl != NULL); 648 p->p_t1_lgrpid = t->t_lpl->lpl_lgrpid; 649 kpreempt_enable(); 650 if (p->p_tr_lgrpid != LGRP_NONE && 651 p->p_tr_lgrpid != p->p_t1_lgrpid) { 652 lgrp_update_trthr_migrations(1); 653 } 654 } 655 656 p->p_lwpcnt++; 657 t->t_waitfor = -1; 658 659 /* 660 * Turn microstate accounting on for thread if on for process. 661 */ 662 if (p->p_flag & SMSACCT) 663 t->t_proc_flag |= TP_MSACCT; 664 665 /* 666 * If the process has watchpoints, mark the new thread as such. 667 */ 668 if (pr_watch_active(p)) 669 watch_enable(t); 670 671 /* 672 * The lwp is being created in the stopped state. 673 * We set all the necessary flags to indicate that fact here. 674 * We omit the TS_CREATE flag from t_schedflag so that the lwp 675 * cannot be set running until the caller is finished with it, 676 * even if lwp_continue() is called on it after we drop p->p_lock. 677 * When the caller is finished with the newly-created lwp, 678 * the caller must call lwp_create_done() to allow the lwp 679 * to be set running. If the TP_HOLDLWP is left set, the 680 * lwp will suspend itself after reaching system call exit. 681 */ 682 init_mstate(t, LMS_STOPPED); 683 t->t_proc_flag |= TP_HOLDLWP; 684 t->t_schedflag |= (TS_ALLSTART & ~(TS_CSTART | TS_CREATE)); 685 t->t_whystop = PR_SUSPENDED; 686 t->t_whatstop = SUSPEND_NORMAL; 687 t->t_sig_check = 1; /* ensure that TP_HOLDLWP is honored */ 688 689 /* 690 * Set system call processing flags in case tracing or profiling 691 * is set. The first system call will evaluate these and turn 692 * them off if they aren't needed. 693 */ 694 t->t_pre_sys = 1; 695 t->t_post_sys = 1; 696 697 /* 698 * Insert the new thread into the list of all threads. 699 */ 700 if ((tx = p->p_tlist) == NULL) { 701 t->t_back = t; 702 t->t_forw = t; 703 p->p_tlist = t; 704 } else { 705 t->t_forw = tx; 706 t->t_back = tx->t_back; 707 tx->t_back->t_forw = t; 708 tx->t_back = t; 709 } 710 711 /* 712 * Insert the new lwp into an lwp directory slot position 713 * and into the lwpid hash table. 714 */ 715 lep->le_thread = t; 716 lep->le_lwpid = t->t_tid; 717 lep->le_start = t->t_start; 718 lwp_hash_in(p, lep, p->p_tidhash, p->p_tidhash_sz, 1); 719 720 lwp_fp_init(lwp); 721 722 if (state == TS_RUN) { 723 /* 724 * We set the new lwp running immediately. 725 */ 726 t->t_proc_flag &= ~TP_HOLDLWP; 727 lwp_create_done(t); 728 } 729 730 error: 731 if (err) { 732 if (CLASS_KERNEL(cid)) { 733 /* 734 * This should only happen if a system process runs 735 * out of lwpids, which shouldn't occur. 736 */ 737 panic("Failed to create a system LWP"); 738 } 739 /* 740 * We have failed to create an lwp, so decrement the number 741 * of lwps in the task and let the lgroup load averages know 742 * that this thread isn't going to show up. 743 */ 744 kpreempt_disable(); 745 lgrp_move_thread(t, NULL, 1); 746 kpreempt_enable(); 747 748 ASSERT(MUTEX_HELD(&p->p_lock)); 749 mutex_enter(&p->p_zone->zone_nlwps_lock); 750 p->p_task->tk_nlwps--; 751 p->p_task->tk_proj->kpj_nlwps--; 752 p->p_zone->zone_nlwps--; 753 mutex_exit(&p->p_zone->zone_nlwps_lock); 754 if (cid != NOCLASS && bufp != NULL) 755 CL_FREE(cid, bufp); 756 757 if (branded) 758 BROP(p)->b_freelwp(lwp); 759 760 mutex_exit(&p->p_lock); 761 t->t_state = TS_FREE; 762 thread_rele(t); 763 764 /* 765 * We need to remove t from the list of all threads 766 * because thread_exit()/lwp_exit() isn't called on t. 767 */ 768 mutex_enter(&pidlock); 769 ASSERT(t != t->t_next); /* t0 never exits */ 770 t->t_next->t_prev = t->t_prev; 771 t->t_prev->t_next = t->t_next; 772 mutex_exit(&pidlock); 773 774 thread_free(t); 775 kmem_free(lep, sizeof (*lep)); 776 lwp = NULL; 777 } else { 778 mutex_exit(&p->p_lock); 779 } 780 781 if (old_dir != NULL) 782 kmem_free(old_dir, old_dirsz * sizeof (*old_dir)); 783 if (old_hash != NULL) 784 kmem_free(old_hash, old_hashsz * sizeof (*old_hash)); 785 if (ret_tidhash != NULL) 786 kmem_free(ret_tidhash, sizeof (ret_tidhash_t)); 787 788 DTRACE_PROC1(lwp__create, kthread_t *, t); 789 return (lwp); 790 } 791 792 /* 793 * lwp_create_done() is called by the caller of lwp_create() to set the 794 * newly-created lwp running after the caller has finished manipulating it. 795 */ 796 void 797 lwp_create_done(kthread_t *t) 798 { 799 proc_t *p = ttoproc(t); 800 801 ASSERT(MUTEX_HELD(&p->p_lock)); 802 803 /* 804 * We set the TS_CREATE and TS_CSTART flags and call setrun_locked(). 805 * (The absence of the TS_CREATE flag prevents the lwp from running 806 * until we are finished with it, even if lwp_continue() is called on 807 * it by some other lwp in the process or elsewhere in the kernel.) 808 */ 809 thread_lock(t); 810 ASSERT(t->t_state == TS_STOPPED && !(t->t_schedflag & TS_CREATE)); 811 /* 812 * If TS_CSTART is set, lwp_continue(t) has been called and 813 * has already incremented p_lwprcnt; avoid doing this twice. 814 */ 815 if (!(t->t_schedflag & TS_CSTART)) 816 p->p_lwprcnt++; 817 t->t_schedflag |= (TS_CSTART | TS_CREATE); 818 setrun_locked(t); 819 thread_unlock(t); 820 } 821 822 /* 823 * Copy an LWP's active templates, and clear the latest contracts. 824 */ 825 void 826 lwp_ctmpl_copy(klwp_t *dst, klwp_t *src) 827 { 828 int i; 829 830 for (i = 0; i < ct_ntypes; i++) { 831 dst->lwp_ct_active[i] = ctmpl_dup(src->lwp_ct_active[i]); 832 dst->lwp_ct_latest[i] = NULL; 833 } 834 } 835 836 /* 837 * Clear an LWP's contract template state. 838 */ 839 void 840 lwp_ctmpl_clear(klwp_t *lwp) 841 { 842 ct_template_t *tmpl; 843 int i; 844 845 for (i = 0; i < ct_ntypes; i++) { 846 if ((tmpl = lwp->lwp_ct_active[i]) != NULL) { 847 ctmpl_free(tmpl); 848 lwp->lwp_ct_active[i] = NULL; 849 } 850 851 if (lwp->lwp_ct_latest[i] != NULL) { 852 contract_rele(lwp->lwp_ct_latest[i]); 853 lwp->lwp_ct_latest[i] = NULL; 854 } 855 } 856 } 857 858 /* 859 * Individual lwp exit. 860 * If this is the last lwp, exit the whole process. 861 */ 862 void 863 lwp_exit(void) 864 { 865 kthread_t *t = curthread; 866 klwp_t *lwp = ttolwp(t); 867 proc_t *p = ttoproc(t); 868 869 ASSERT(MUTEX_HELD(&p->p_lock)); 870 871 mutex_exit(&p->p_lock); 872 873 #if defined(__sparc) 874 /* 875 * Ensure that the user stack is fully abandoned.. 876 */ 877 trash_user_windows(); 878 #endif 879 880 tsd_exit(); /* free thread specific data */ 881 882 kcpc_passivate(); /* Clean up performance counter state */ 883 884 pollcleanup(); 885 886 if (t->t_door) 887 door_slam(); 888 889 if (t->t_schedctl != NULL) 890 schedctl_lwp_cleanup(t); 891 892 if (t->t_upimutex != NULL) 893 upimutex_cleanup(); 894 895 /* 896 * Perform any brand specific exit processing, then release any 897 * brand data associated with the lwp 898 */ 899 if (PROC_IS_BRANDED(p)) 900 BROP(p)->b_lwpexit(lwp); 901 902 lwp_pcb_exit(); 903 904 mutex_enter(&p->p_lock); 905 lwp_cleanup(); 906 907 /* 908 * When this process is dumping core, its lwps are held here 909 * until the core dump is finished. Then exitlwps() is called 910 * again to release these lwps so that they can finish exiting. 911 */ 912 if (p->p_flag & SCOREDUMP) 913 stop(PR_SUSPENDED, SUSPEND_NORMAL); 914 915 /* 916 * Block the process against /proc now that we have really acquired 917 * p->p_lock (to decrement p_lwpcnt and manipulate p_tlist at least). 918 */ 919 prbarrier(p); 920 921 /* 922 * Call proc_exit() if this is the last non-daemon lwp in the process. 923 */ 924 if (!(t->t_proc_flag & TP_DAEMON) && 925 p->p_lwpcnt == p->p_lwpdaemon + 1) { 926 mutex_exit(&p->p_lock); 927 if (proc_exit(CLD_EXITED, 0) == 0) { 928 /* Restarting init. */ 929 return; 930 } 931 932 /* 933 * proc_exit() returns a non-zero value when some other 934 * lwp got there first. We just have to continue in 935 * lwp_exit(). 936 */ 937 mutex_enter(&p->p_lock); 938 ASSERT(curproc->p_flag & SEXITLWPS); 939 prbarrier(p); 940 } 941 942 DTRACE_PROC(lwp__exit); 943 944 /* 945 * If the lwp is a detached lwp or if the process is exiting, 946 * remove (lwp_hash_out()) the lwp from the lwp directory. 947 * Otherwise null out the lwp's le_thread pointer in the lwp 948 * directory so that other threads will see it as a zombie lwp. 949 */ 950 prlwpexit(t); /* notify /proc */ 951 if (!(t->t_proc_flag & TP_TWAIT) || (p->p_flag & SEXITLWPS)) 952 lwp_hash_out(p, t->t_tid); 953 else { 954 ASSERT(!(t->t_proc_flag & TP_DAEMON)); 955 p->p_lwpdir[t->t_dslot].ld_entry->le_thread = NULL; 956 p->p_zombcnt++; 957 cv_broadcast(&p->p_lwpexit); 958 } 959 if (t->t_proc_flag & TP_DAEMON) { 960 p->p_lwpdaemon--; 961 t->t_proc_flag &= ~TP_DAEMON; 962 } 963 t->t_proc_flag &= ~TP_TWAIT; 964 965 /* 966 * Maintain accurate lwp count for task.max-lwps resource control. 967 */ 968 mutex_enter(&p->p_zone->zone_nlwps_lock); 969 p->p_task->tk_nlwps--; 970 p->p_task->tk_proj->kpj_nlwps--; 971 p->p_zone->zone_nlwps--; 972 mutex_exit(&p->p_zone->zone_nlwps_lock); 973 974 CL_EXIT(t); /* tell the scheduler that t is exiting */ 975 ASSERT(p->p_lwpcnt != 0); 976 p->p_lwpcnt--; 977 978 /* 979 * If all remaining non-daemon lwps are waiting in lwp_wait(), 980 * wake them up so someone can return EDEADLK. 981 * (See the block comment preceeding lwp_wait().) 982 */ 983 if (p->p_lwpcnt == p->p_lwpdaemon + (p->p_lwpwait - p->p_lwpdwait)) 984 cv_broadcast(&p->p_lwpexit); 985 986 t->t_proc_flag |= TP_LWPEXIT; 987 term_mstate(t); 988 989 t->t_forw->t_back = t->t_back; 990 t->t_back->t_forw = t->t_forw; 991 if (t == p->p_tlist) 992 p->p_tlist = t->t_forw; 993 994 /* 995 * Clean up the signal state. 996 */ 997 if (t->t_sigqueue != NULL) 998 sigdelq(p, t, 0); 999 if (lwp->lwp_curinfo != NULL) { 1000 siginfofree(lwp->lwp_curinfo); 1001 lwp->lwp_curinfo = NULL; 1002 } 1003 1004 /* 1005 * If we have spymaster information (that is, if we're an agent LWP), 1006 * free that now. 1007 */ 1008 if (lwp->lwp_spymaster != NULL) { 1009 kmem_free(lwp->lwp_spymaster, sizeof (psinfo_t)); 1010 lwp->lwp_spymaster = NULL; 1011 } 1012 1013 thread_rele(t); 1014 1015 /* 1016 * Terminated lwps are associated with process zero and are put onto 1017 * death-row by resume(). Avoid preemption after resetting t->t_procp. 1018 */ 1019 t->t_preempt++; 1020 1021 if (t->t_ctx != NULL) 1022 exitctx(t); 1023 if (p->p_pctx != NULL) 1024 exitpctx(p); 1025 1026 t->t_procp = &p0; 1027 1028 /* 1029 * Notify the HAT about the change of address space 1030 */ 1031 hat_thread_exit(t); 1032 /* 1033 * When this is the last running lwp in this process and some lwp is 1034 * waiting for this condition to become true, or this thread was being 1035 * suspended, then the waiting lwp is awakened. 1036 * 1037 * Also, if the process is exiting, we may have a thread waiting in 1038 * exitlwps() that needs to be notified. 1039 */ 1040 if (--p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP) || 1041 (p->p_flag & SEXITLWPS)) 1042 cv_broadcast(&p->p_holdlwps); 1043 1044 /* 1045 * Need to drop p_lock so we can reacquire pidlock. 1046 */ 1047 mutex_exit(&p->p_lock); 1048 mutex_enter(&pidlock); 1049 1050 ASSERT(t != t->t_next); /* t0 never exits */ 1051 t->t_next->t_prev = t->t_prev; 1052 t->t_prev->t_next = t->t_next; 1053 cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */ 1054 mutex_exit(&pidlock); 1055 1056 t->t_state = TS_ZOMB; 1057 swtch_from_zombie(); 1058 /* never returns */ 1059 } 1060 1061 1062 /* 1063 * Cleanup function for an exiting lwp. 1064 * Called both from lwp_exit() and from proc_exit(). 1065 * p->p_lock is repeatedly released and grabbed in this function. 1066 */ 1067 void 1068 lwp_cleanup(void) 1069 { 1070 kthread_t *t = curthread; 1071 proc_t *p = ttoproc(t); 1072 1073 ASSERT(MUTEX_HELD(&p->p_lock)); 1074 1075 /* untimeout any lwp-bound realtime timers */ 1076 if (p->p_itimer != NULL) 1077 timer_lwpexit(); 1078 1079 /* 1080 * If this is the /proc agent lwp that is exiting, readjust p_lwpid 1081 * so it appears that the agent never existed, and clear p_agenttp. 1082 */ 1083 if (t == p->p_agenttp) { 1084 ASSERT(t->t_tid == p->p_lwpid); 1085 p->p_lwpid--; 1086 p->p_agenttp = NULL; 1087 } 1088 1089 /* 1090 * Do lgroup bookkeeping to account for thread exiting. 1091 */ 1092 kpreempt_disable(); 1093 lgrp_move_thread(t, NULL, 1); 1094 if (t->t_tid == 1) { 1095 p->p_t1_lgrpid = LGRP_NONE; 1096 } 1097 kpreempt_enable(); 1098 1099 lwp_ctmpl_clear(ttolwp(t)); 1100 } 1101 1102 int 1103 lwp_suspend(kthread_t *t) 1104 { 1105 int tid; 1106 proc_t *p = ttoproc(t); 1107 1108 ASSERT(MUTEX_HELD(&p->p_lock)); 1109 1110 /* 1111 * Set the thread's TP_HOLDLWP flag so it will stop in holdlwp(). 1112 * If an lwp is stopping itself, there is no need to wait. 1113 */ 1114 top: 1115 t->t_proc_flag |= TP_HOLDLWP; 1116 if (t == curthread) { 1117 t->t_sig_check = 1; 1118 } else { 1119 /* 1120 * Make sure the lwp stops promptly. 1121 */ 1122 thread_lock(t); 1123 t->t_sig_check = 1; 1124 /* 1125 * XXX Should use virtual stop like /proc does instead of 1126 * XXX waking the thread to get it to stop. 1127 */ 1128 if (ISWAKEABLE(t) || ISWAITING(t)) { 1129 setrun_locked(t); 1130 } else if (t->t_state == TS_ONPROC && t->t_cpu != CPU) { 1131 poke_cpu(t->t_cpu->cpu_id); 1132 } 1133 1134 tid = t->t_tid; /* remember thread ID */ 1135 /* 1136 * Wait for lwp to stop 1137 */ 1138 while (!SUSPENDED(t)) { 1139 /* 1140 * Drop the thread lock before waiting and reacquire it 1141 * afterwards, so the thread can change its t_state 1142 * field. 1143 */ 1144 thread_unlock(t); 1145 1146 /* 1147 * Check if aborted by exitlwps(). 1148 */ 1149 if (p->p_flag & SEXITLWPS) 1150 lwp_exit(); 1151 1152 /* 1153 * Cooperate with jobcontrol signals and /proc stopping 1154 * by calling cv_wait_sig() to wait for the target 1155 * lwp to stop. Just using cv_wait() can lead to 1156 * deadlock because, if some other lwp has stopped 1157 * by either of these mechanisms, then p_lwprcnt will 1158 * never become zero if we do a cv_wait(). 1159 */ 1160 if (!cv_wait_sig(&p->p_holdlwps, &p->p_lock)) 1161 return (EINTR); 1162 1163 /* 1164 * Check to see if thread died while we were 1165 * waiting for it to suspend. 1166 */ 1167 if (idtot(p, tid) == NULL) 1168 return (ESRCH); 1169 1170 thread_lock(t); 1171 /* 1172 * If the TP_HOLDLWP flag went away, lwp_continue() 1173 * or vfork() must have been called while we were 1174 * waiting, so start over again. 1175 */ 1176 if ((t->t_proc_flag & TP_HOLDLWP) == 0) { 1177 thread_unlock(t); 1178 goto top; 1179 } 1180 } 1181 thread_unlock(t); 1182 } 1183 return (0); 1184 } 1185 1186 /* 1187 * continue a lwp that's been stopped by lwp_suspend(). 1188 */ 1189 void 1190 lwp_continue(kthread_t *t) 1191 { 1192 proc_t *p = ttoproc(t); 1193 int was_suspended = t->t_proc_flag & TP_HOLDLWP; 1194 1195 ASSERT(MUTEX_HELD(&p->p_lock)); 1196 1197 t->t_proc_flag &= ~TP_HOLDLWP; 1198 thread_lock(t); 1199 if (SUSPENDED(t) && 1200 !(p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH))) { 1201 p->p_lwprcnt++; 1202 t->t_schedflag |= TS_CSTART; 1203 setrun_locked(t); 1204 } 1205 thread_unlock(t); 1206 /* 1207 * Wakeup anyone waiting for this thread to be suspended 1208 */ 1209 if (was_suspended) 1210 cv_broadcast(&p->p_holdlwps); 1211 } 1212 1213 /* 1214 * ******************************** 1215 * Miscellaneous lwp routines * 1216 * ******************************** 1217 */ 1218 /* 1219 * When a process is undergoing a forkall(), its p_flag is set to SHOLDFORK. 1220 * This will cause the process's lwps to stop at a hold point. A hold 1221 * point is where a kernel thread has a flat stack. This is at the 1222 * return from a system call and at the return from a user level trap. 1223 * 1224 * When a process is undergoing a fork1() or vfork(), its p_flag is set to 1225 * SHOLDFORK1. This will cause the process's lwps to stop at a modified 1226 * hold point. The lwps in the process are not being cloned, so they 1227 * are held at the usual hold points and also within issig_forreal(). 1228 * This has the side-effect that their system calls do not return 1229 * showing EINTR. 1230 * 1231 * An lwp can also be held. This is identified by the TP_HOLDLWP flag on 1232 * the thread. The TP_HOLDLWP flag is set in lwp_suspend(), where the active 1233 * lwp is waiting for the target lwp to be stopped. 1234 */ 1235 void 1236 holdlwp(void) 1237 { 1238 proc_t *p = curproc; 1239 kthread_t *t = curthread; 1240 1241 mutex_enter(&p->p_lock); 1242 /* 1243 * Don't terminate immediately if the process is dumping core. 1244 * Once the process has dumped core, all lwps are terminated. 1245 */ 1246 if (!(p->p_flag & SCOREDUMP)) { 1247 if ((p->p_flag & SEXITLWPS) || (t->t_proc_flag & TP_EXITLWP)) 1248 lwp_exit(); 1249 } 1250 if (!(ISHOLD(p)) && !(p->p_flag & (SHOLDFORK1 | SHOLDWATCH))) { 1251 mutex_exit(&p->p_lock); 1252 return; 1253 } 1254 /* 1255 * stop() decrements p->p_lwprcnt and cv_signal()s &p->p_holdlwps 1256 * when p->p_lwprcnt becomes zero. 1257 */ 1258 stop(PR_SUSPENDED, SUSPEND_NORMAL); 1259 if (p->p_flag & SEXITLWPS) 1260 lwp_exit(); 1261 mutex_exit(&p->p_lock); 1262 } 1263 1264 /* 1265 * Have all lwps within the process hold at a point where they are 1266 * cloneable (SHOLDFORK) or just safe w.r.t. fork1 (SHOLDFORK1). 1267 */ 1268 int 1269 holdlwps(int holdflag) 1270 { 1271 proc_t *p = curproc; 1272 1273 ASSERT(holdflag == SHOLDFORK || holdflag == SHOLDFORK1); 1274 mutex_enter(&p->p_lock); 1275 schedctl_finish_sigblock(curthread); 1276 again: 1277 while (p->p_flag & (SEXITLWPS | SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) { 1278 /* 1279 * If another lwp is doing a forkall() or proc_exit(), bail out. 1280 */ 1281 if (p->p_flag & (SEXITLWPS | SHOLDFORK)) { 1282 mutex_exit(&p->p_lock); 1283 return (0); 1284 } 1285 /* 1286 * Another lwp is doing a fork1() or is undergoing 1287 * watchpoint activity. We hold here for it to complete. 1288 */ 1289 stop(PR_SUSPENDED, SUSPEND_NORMAL); 1290 } 1291 p->p_flag |= holdflag; 1292 pokelwps(p); 1293 --p->p_lwprcnt; 1294 /* 1295 * Wait for the process to become quiescent (p->p_lwprcnt == 0). 1296 */ 1297 while (p->p_lwprcnt > 0) { 1298 /* 1299 * Check if aborted by exitlwps(). 1300 * Also check if SHOLDWATCH is set; it takes precedence. 1301 */ 1302 if (p->p_flag & (SEXITLWPS | SHOLDWATCH)) { 1303 p->p_lwprcnt++; 1304 p->p_flag &= ~holdflag; 1305 cv_broadcast(&p->p_holdlwps); 1306 goto again; 1307 } 1308 /* 1309 * Cooperate with jobcontrol signals and /proc stopping. 1310 * If some other lwp has stopped by either of these 1311 * mechanisms, then p_lwprcnt will never become zero 1312 * and the process will appear deadlocked unless we 1313 * stop here in sympathy with the other lwp before 1314 * doing the cv_wait() below. 1315 * 1316 * If the other lwp stops after we do the cv_wait(), it 1317 * will wake us up to loop around and do the sympathy stop. 1318 * 1319 * Since stop() drops p->p_lock, we must start from 1320 * the top again on returning from stop(). 1321 */ 1322 if (p->p_stopsig | (curthread->t_proc_flag & TP_PRSTOP)) { 1323 int whystop = p->p_stopsig? PR_JOBCONTROL : 1324 PR_REQUESTED; 1325 p->p_lwprcnt++; 1326 p->p_flag &= ~holdflag; 1327 stop(whystop, p->p_stopsig); 1328 goto again; 1329 } 1330 cv_wait(&p->p_holdlwps, &p->p_lock); 1331 } 1332 p->p_lwprcnt++; 1333 p->p_flag &= ~holdflag; 1334 mutex_exit(&p->p_lock); 1335 return (1); 1336 } 1337 1338 /* 1339 * See comments for holdwatch(), below. 1340 */ 1341 static int 1342 holdcheck(int clearflags) 1343 { 1344 proc_t *p = curproc; 1345 1346 /* 1347 * If we are trying to exit, that takes precedence over anything else. 1348 */ 1349 if (p->p_flag & SEXITLWPS) { 1350 p->p_lwprcnt++; 1351 p->p_flag &= ~clearflags; 1352 lwp_exit(); 1353 } 1354 1355 /* 1356 * If another thread is calling fork1(), stop the current thread so the 1357 * other can complete. 1358 */ 1359 if (p->p_flag & SHOLDFORK1) { 1360 p->p_lwprcnt++; 1361 stop(PR_SUSPENDED, SUSPEND_NORMAL); 1362 if (p->p_flag & SEXITLWPS) { 1363 p->p_flag &= ~clearflags; 1364 lwp_exit(); 1365 } 1366 return (-1); 1367 } 1368 1369 /* 1370 * If another thread is calling fork(), then indicate we are doing 1371 * watchpoint activity. This will cause holdlwps() above to stop the 1372 * forking thread, at which point we can continue with watchpoint 1373 * activity. 1374 */ 1375 if (p->p_flag & SHOLDFORK) { 1376 p->p_lwprcnt++; 1377 while (p->p_flag & SHOLDFORK) { 1378 p->p_flag |= SHOLDWATCH; 1379 cv_broadcast(&p->p_holdlwps); 1380 cv_wait(&p->p_holdlwps, &p->p_lock); 1381 p->p_flag &= ~SHOLDWATCH; 1382 } 1383 return (-1); 1384 } 1385 1386 return (0); 1387 } 1388 1389 /* 1390 * Stop all lwps within the process, holding themselves in the kernel while the 1391 * active lwp undergoes watchpoint activity. This is more complicated than 1392 * expected because stop() relies on calling holdwatch() in order to copyin data 1393 * from the user's address space. A double barrier is used to prevent an 1394 * infinite loop. 1395 * 1396 * o The first thread into holdwatch() is the 'master' thread and does 1397 * the following: 1398 * 1399 * - Sets SHOLDWATCH on the current process 1400 * - Sets TP_WATCHSTOP on the current thread 1401 * - Waits for all threads to be either stopped or have 1402 * TP_WATCHSTOP set. 1403 * - Sets the SWATCHOK flag on the process 1404 * - Unsets TP_WATCHSTOP 1405 * - Waits for the other threads to completely stop 1406 * - Unsets SWATCHOK 1407 * 1408 * o If SHOLDWATCH is already set when we enter this function, then another 1409 * thread is already trying to stop this thread. This 'slave' thread 1410 * does the following: 1411 * 1412 * - Sets TP_WATCHSTOP on the current thread 1413 * - Waits for SWATCHOK flag to be set 1414 * - Calls stop() 1415 * 1416 * o If SWATCHOK is set on the process, then this function immediately 1417 * returns, as we must have been called via stop(). 1418 * 1419 * In addition, there are other flags that take precedence over SHOLDWATCH: 1420 * 1421 * o If SEXITLWPS is set, exit immediately. 1422 * 1423 * o If SHOLDFORK1 is set, wait for fork1() to complete. 1424 * 1425 * o If SHOLDFORK is set, then watchpoint activity takes precedence In this 1426 * case, set SHOLDWATCH, signalling the forking thread to stop first. 1427 * 1428 * o If the process is being stopped via /proc (TP_PRSTOP is set), then we 1429 * stop the current thread. 1430 * 1431 * Returns 0 if all threads have been quiesced. Returns non-zero if not all 1432 * threads were stopped, or the list of watched pages has changed. 1433 */ 1434 int 1435 holdwatch(void) 1436 { 1437 proc_t *p = curproc; 1438 kthread_t *t = curthread; 1439 int ret = 0; 1440 1441 mutex_enter(&p->p_lock); 1442 1443 p->p_lwprcnt--; 1444 1445 /* 1446 * Check for bail-out conditions as outlined above. 1447 */ 1448 if (holdcheck(0) != 0) { 1449 mutex_exit(&p->p_lock); 1450 return (-1); 1451 } 1452 1453 if (!(p->p_flag & SHOLDWATCH)) { 1454 /* 1455 * We are the master watchpoint thread. Set SHOLDWATCH and poke 1456 * the other threads. 1457 */ 1458 p->p_flag |= SHOLDWATCH; 1459 pokelwps(p); 1460 1461 /* 1462 * Wait for all threads to be stopped or have TP_WATCHSTOP set. 1463 */ 1464 while (pr_allstopped(p, 1) > 0) { 1465 if (holdcheck(SHOLDWATCH) != 0) { 1466 p->p_flag &= ~SHOLDWATCH; 1467 mutex_exit(&p->p_lock); 1468 return (-1); 1469 } 1470 1471 cv_wait(&p->p_holdlwps, &p->p_lock); 1472 } 1473 1474 /* 1475 * All threads are now stopped or in the process of stopping. 1476 * Set SWATCHOK and let them stop completely. 1477 */ 1478 p->p_flag |= SWATCHOK; 1479 t->t_proc_flag &= ~TP_WATCHSTOP; 1480 cv_broadcast(&p->p_holdlwps); 1481 1482 while (pr_allstopped(p, 0) > 0) { 1483 /* 1484 * At first glance, it may appear that we don't need a 1485 * call to holdcheck() here. But if the process gets a 1486 * SIGKILL signal, one of our stopped threads may have 1487 * been awakened and is waiting in exitlwps(), which 1488 * takes precedence over watchpoints. 1489 */ 1490 if (holdcheck(SHOLDWATCH | SWATCHOK) != 0) { 1491 p->p_flag &= ~(SHOLDWATCH | SWATCHOK); 1492 mutex_exit(&p->p_lock); 1493 return (-1); 1494 } 1495 1496 cv_wait(&p->p_holdlwps, &p->p_lock); 1497 } 1498 1499 /* 1500 * All threads are now completely stopped. 1501 */ 1502 p->p_flag &= ~SWATCHOK; 1503 p->p_flag &= ~SHOLDWATCH; 1504 p->p_lwprcnt++; 1505 1506 } else if (!(p->p_flag & SWATCHOK)) { 1507 1508 /* 1509 * SHOLDWATCH is set, so another thread is trying to do 1510 * watchpoint activity. Indicate this thread is stopping, and 1511 * wait for the OK from the master thread. 1512 */ 1513 t->t_proc_flag |= TP_WATCHSTOP; 1514 cv_broadcast(&p->p_holdlwps); 1515 1516 while (!(p->p_flag & SWATCHOK)) { 1517 if (holdcheck(0) != 0) { 1518 t->t_proc_flag &= ~TP_WATCHSTOP; 1519 mutex_exit(&p->p_lock); 1520 return (-1); 1521 } 1522 1523 cv_wait(&p->p_holdlwps, &p->p_lock); 1524 } 1525 1526 /* 1527 * Once the master thread has given the OK, this thread can 1528 * actually call stop(). 1529 */ 1530 t->t_proc_flag &= ~TP_WATCHSTOP; 1531 p->p_lwprcnt++; 1532 1533 stop(PR_SUSPENDED, SUSPEND_NORMAL); 1534 1535 /* 1536 * It's not OK to do watchpoint activity, notify caller to 1537 * retry. 1538 */ 1539 ret = -1; 1540 1541 } else { 1542 1543 /* 1544 * The only way we can hit the case where SHOLDWATCH is set and 1545 * SWATCHOK is set is if we are triggering this from within a 1546 * stop() call. Assert that this is the case. 1547 */ 1548 1549 ASSERT(t->t_proc_flag & TP_STOPPING); 1550 p->p_lwprcnt++; 1551 } 1552 1553 mutex_exit(&p->p_lock); 1554 1555 return (ret); 1556 } 1557 1558 /* 1559 * force all interruptible lwps to trap into the kernel. 1560 */ 1561 void 1562 pokelwps(proc_t *p) 1563 { 1564 kthread_t *t; 1565 1566 ASSERT(MUTEX_HELD(&p->p_lock)); 1567 1568 t = p->p_tlist; 1569 do { 1570 if (t == curthread) 1571 continue; 1572 thread_lock(t); 1573 aston(t); /* make thread trap or do post_syscall */ 1574 if (ISWAKEABLE(t) || ISWAITING(t)) { 1575 setrun_locked(t); 1576 } else if (t->t_state == TS_STOPPED) { 1577 /* 1578 * Ensure that proc_exit() is not blocked by lwps 1579 * that were stopped via jobcontrol or /proc. 1580 */ 1581 if (p->p_flag & SEXITLWPS) { 1582 p->p_stopsig = 0; 1583 t->t_schedflag |= (TS_XSTART | TS_PSTART); 1584 setrun_locked(t); 1585 } 1586 /* 1587 * If we are holding lwps for a forkall(), 1588 * force lwps that have been suspended via 1589 * lwp_suspend() and are suspended inside 1590 * of a system call to proceed to their 1591 * holdlwp() points where they are clonable. 1592 */ 1593 if ((p->p_flag & SHOLDFORK) && SUSPENDED(t)) { 1594 if ((t->t_schedflag & TS_CSTART) == 0) { 1595 p->p_lwprcnt++; 1596 t->t_schedflag |= TS_CSTART; 1597 setrun_locked(t); 1598 } 1599 } 1600 } else if (t->t_state == TS_ONPROC) { 1601 if (t->t_cpu != CPU) 1602 poke_cpu(t->t_cpu->cpu_id); 1603 } 1604 thread_unlock(t); 1605 } while ((t = t->t_forw) != p->p_tlist); 1606 } 1607 1608 /* 1609 * undo the effects of holdlwps() or holdwatch(). 1610 */ 1611 void 1612 continuelwps(proc_t *p) 1613 { 1614 kthread_t *t; 1615 1616 /* 1617 * If this flag is set, then the original holdwatch() didn't actually 1618 * stop the process. See comments for holdwatch(). 1619 */ 1620 if (p->p_flag & SWATCHOK) { 1621 ASSERT(curthread->t_proc_flag & TP_STOPPING); 1622 return; 1623 } 1624 1625 ASSERT(MUTEX_HELD(&p->p_lock)); 1626 ASSERT((p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) == 0); 1627 1628 t = p->p_tlist; 1629 do { 1630 thread_lock(t); /* SUSPENDED looks at t_schedflag */ 1631 if (SUSPENDED(t) && !(t->t_proc_flag & TP_HOLDLWP)) { 1632 p->p_lwprcnt++; 1633 t->t_schedflag |= TS_CSTART; 1634 setrun_locked(t); 1635 } 1636 thread_unlock(t); 1637 } while ((t = t->t_forw) != p->p_tlist); 1638 } 1639 1640 /* 1641 * Force all other LWPs in the current process other than the caller to exit, 1642 * and then cv_wait() on p_holdlwps for them to exit. The exitlwps() function 1643 * is typically used in these situations: 1644 * 1645 * (a) prior to an exec() system call 1646 * (b) prior to dumping a core file 1647 * (c) prior to a uadmin() shutdown 1648 * 1649 * If the 'coredump' flag is set, other LWPs are quiesced but not destroyed. 1650 * Multiple threads in the process can call this function at one time by 1651 * triggering execs or core dumps simultaneously, so the SEXITLWPS bit is used 1652 * to declare one particular thread the winner who gets to kill the others. 1653 * If a thread wins the exitlwps() dance, zero is returned; otherwise an 1654 * appropriate errno value is returned to caller for its system call to return. 1655 */ 1656 int 1657 exitlwps(int coredump) 1658 { 1659 proc_t *p = curproc; 1660 int heldcnt; 1661 1662 if (curthread->t_door) 1663 door_slam(); 1664 if (p->p_door_list) 1665 door_revoke_all(); 1666 if (curthread->t_schedctl != NULL) 1667 schedctl_lwp_cleanup(curthread); 1668 1669 /* 1670 * Ensure that before starting to wait for other lwps to exit, 1671 * cleanup all upimutexes held by curthread. Otherwise, some other 1672 * lwp could be waiting (uninterruptibly) for a upimutex held by 1673 * curthread, and the call to pokelwps() below would deadlock. 1674 * Even if a blocked upimutex_lock is made interruptible, 1675 * curthread's upimutexes need to be unlocked: do it here. 1676 */ 1677 if (curthread->t_upimutex != NULL) 1678 upimutex_cleanup(); 1679 1680 /* 1681 * Grab p_lock in order to check and set SEXITLWPS to declare a winner. 1682 * We must also block any further /proc access from this point forward. 1683 */ 1684 mutex_enter(&p->p_lock); 1685 prbarrier(p); 1686 1687 if (p->p_flag & SEXITLWPS) { 1688 mutex_exit(&p->p_lock); 1689 aston(curthread); /* force a trip through post_syscall */ 1690 return (set_errno(EINTR)); 1691 } 1692 1693 p->p_flag |= SEXITLWPS; 1694 if (coredump) /* tell other lwps to stop, not exit */ 1695 p->p_flag |= SCOREDUMP; 1696 1697 /* 1698 * Give precedence to exitlwps() if a holdlwps() is 1699 * in progress. The lwp doing the holdlwps() operation 1700 * is aborted when it is awakened. 1701 */ 1702 while (p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) { 1703 cv_broadcast(&p->p_holdlwps); 1704 cv_wait(&p->p_holdlwps, &p->p_lock); 1705 prbarrier(p); 1706 } 1707 p->p_flag |= SHOLDFORK; 1708 pokelwps(p); 1709 1710 /* 1711 * Wait for process to become quiescent. 1712 */ 1713 --p->p_lwprcnt; 1714 while (p->p_lwprcnt > 0) { 1715 cv_wait(&p->p_holdlwps, &p->p_lock); 1716 prbarrier(p); 1717 } 1718 p->p_lwprcnt++; 1719 ASSERT(p->p_lwprcnt == 1); 1720 1721 /* 1722 * The SCOREDUMP flag puts the process into a quiescent 1723 * state. The process's lwps remain attached to this 1724 * process until exitlwps() is called again without the 1725 * 'coredump' flag set, then the lwps are terminated 1726 * and the process can exit. 1727 */ 1728 if (coredump) { 1729 p->p_flag &= ~(SCOREDUMP | SHOLDFORK | SEXITLWPS); 1730 goto out; 1731 } 1732 1733 /* 1734 * Determine if there are any lwps left dangling in 1735 * the stopped state. This happens when exitlwps() 1736 * aborts a holdlwps() operation. 1737 */ 1738 p->p_flag &= ~SHOLDFORK; 1739 if ((heldcnt = p->p_lwpcnt) > 1) { 1740 kthread_t *t; 1741 for (t = curthread->t_forw; --heldcnt > 0; t = t->t_forw) { 1742 t->t_proc_flag &= ~TP_TWAIT; 1743 lwp_continue(t); 1744 } 1745 } 1746 1747 /* 1748 * Wait for all other lwps to exit. 1749 */ 1750 --p->p_lwprcnt; 1751 while (p->p_lwpcnt > 1) { 1752 cv_wait(&p->p_holdlwps, &p->p_lock); 1753 prbarrier(p); 1754 } 1755 ++p->p_lwprcnt; 1756 ASSERT(p->p_lwpcnt == 1 && p->p_lwprcnt == 1); 1757 1758 p->p_flag &= ~SEXITLWPS; 1759 curthread->t_proc_flag &= ~TP_TWAIT; 1760 1761 out: 1762 if (!coredump && p->p_zombcnt) { /* cleanup the zombie lwps */ 1763 lwpdir_t *ldp; 1764 lwpent_t *lep; 1765 int i; 1766 1767 for (ldp = p->p_lwpdir, i = 0; i < p->p_lwpdir_sz; i++, ldp++) { 1768 lep = ldp->ld_entry; 1769 if (lep != NULL && lep->le_thread != curthread) { 1770 ASSERT(lep->le_thread == NULL); 1771 p->p_zombcnt--; 1772 lwp_hash_out(p, lep->le_lwpid); 1773 } 1774 } 1775 ASSERT(p->p_zombcnt == 0); 1776 } 1777 1778 /* 1779 * If some other LWP in the process wanted us to suspend ourself, 1780 * then we will not do it. The other LWP is now terminated and 1781 * no one will ever continue us again if we suspend ourself. 1782 */ 1783 curthread->t_proc_flag &= ~TP_HOLDLWP; 1784 p->p_flag &= ~(SHOLDFORK | SHOLDFORK1 | SHOLDWATCH | SLWPWRAP); 1785 mutex_exit(&p->p_lock); 1786 return (0); 1787 } 1788 1789 /* 1790 * duplicate a lwp. 1791 */ 1792 klwp_t * 1793 forklwp(klwp_t *lwp, proc_t *cp, id_t lwpid) 1794 { 1795 klwp_t *clwp; 1796 void *tregs, *tfpu; 1797 kthread_t *t = lwptot(lwp); 1798 kthread_t *ct; 1799 proc_t *p = lwptoproc(lwp); 1800 int cid; 1801 void *bufp; 1802 void *brand_data; 1803 int val; 1804 1805 ASSERT(p == curproc); 1806 ASSERT(t == curthread || (SUSPENDED(t) && lwp->lwp_asleep == 0)); 1807 1808 #if defined(__sparc) 1809 if (t == curthread) 1810 (void) flush_user_windows_to_stack(NULL); 1811 #endif 1812 1813 if (t == curthread) 1814 /* copy args out of registers first */ 1815 (void) save_syscall_args(); 1816 1817 clwp = lwp_create(cp->p_lwpcnt == 0 ? lwp_rtt_initial : lwp_rtt, 1818 NULL, 0, cp, TS_STOPPED, t->t_pri, &t->t_hold, NOCLASS, lwpid); 1819 if (clwp == NULL) 1820 return (NULL); 1821 1822 /* 1823 * most of the parent's lwp can be copied to its duplicate, 1824 * except for the fields that are unique to each lwp, like 1825 * lwp_thread, lwp_procp, lwp_regs, and lwp_ap. 1826 */ 1827 ct = clwp->lwp_thread; 1828 tregs = clwp->lwp_regs; 1829 tfpu = clwp->lwp_fpu; 1830 brand_data = clwp->lwp_brand; 1831 1832 /* 1833 * Copy parent lwp to child lwp. Hold child's p_lock to prevent 1834 * mstate_aggr_state() from reading stale mstate entries copied 1835 * from lwp to clwp. 1836 */ 1837 mutex_enter(&cp->p_lock); 1838 *clwp = *lwp; 1839 1840 /* clear microstate and resource usage data in new lwp */ 1841 init_mstate(ct, LMS_STOPPED); 1842 bzero(&clwp->lwp_ru, sizeof (clwp->lwp_ru)); 1843 mutex_exit(&cp->p_lock); 1844 1845 /* fix up child's lwp */ 1846 1847 clwp->lwp_pcb.pcb_flags = 0; 1848 #if defined(__sparc) 1849 clwp->lwp_pcb.pcb_step = STEP_NONE; 1850 #endif 1851 clwp->lwp_cursig = 0; 1852 clwp->lwp_extsig = 0; 1853 clwp->lwp_curinfo = (struct sigqueue *)0; 1854 clwp->lwp_thread = ct; 1855 ct->t_sysnum = t->t_sysnum; 1856 clwp->lwp_regs = tregs; 1857 clwp->lwp_fpu = tfpu; 1858 clwp->lwp_brand = brand_data; 1859 clwp->lwp_ap = clwp->lwp_arg; 1860 clwp->lwp_procp = cp; 1861 bzero(clwp->lwp_timer, sizeof (clwp->lwp_timer)); 1862 clwp->lwp_lastfault = 0; 1863 clwp->lwp_lastfaddr = 0; 1864 1865 /* copy parent's struct regs to child. */ 1866 lwp_forkregs(lwp, clwp); 1867 1868 /* 1869 * Fork thread context ops, if any. 1870 */ 1871 if (t->t_ctx) 1872 forkctx(t, ct); 1873 1874 /* fix door state in the child */ 1875 if (t->t_door) 1876 door_fork(t, ct); 1877 1878 /* copy current contract templates, clear latest contracts */ 1879 lwp_ctmpl_copy(clwp, lwp); 1880 1881 mutex_enter(&cp->p_lock); 1882 /* lwp_create() set the TP_HOLDLWP flag */ 1883 if (!(t->t_proc_flag & TP_HOLDLWP)) 1884 ct->t_proc_flag &= ~TP_HOLDLWP; 1885 if (cp->p_flag & SMSACCT) 1886 ct->t_proc_flag |= TP_MSACCT; 1887 mutex_exit(&cp->p_lock); 1888 1889 /* Allow brand to propagate brand-specific state */ 1890 if (PROC_IS_BRANDED(p)) 1891 BROP(p)->b_forklwp(lwp, clwp); 1892 1893 retry: 1894 cid = t->t_cid; 1895 1896 val = CL_ALLOC(&bufp, cid, KM_SLEEP); 1897 ASSERT(val == 0); 1898 1899 mutex_enter(&p->p_lock); 1900 if (cid != t->t_cid) { 1901 /* 1902 * Someone just changed this thread's scheduling class, 1903 * so try pre-allocating the buffer again. Hopefully we 1904 * don't hit this often. 1905 */ 1906 mutex_exit(&p->p_lock); 1907 CL_FREE(cid, bufp); 1908 goto retry; 1909 } 1910 1911 ct->t_unpark = t->t_unpark; 1912 ct->t_clfuncs = t->t_clfuncs; 1913 CL_FORK(t, ct, bufp); 1914 ct->t_cid = t->t_cid; /* after data allocated so prgetpsinfo works */ 1915 mutex_exit(&p->p_lock); 1916 1917 return (clwp); 1918 } 1919 1920 /* 1921 * Add a new lwp entry to the lwp directory and to the lwpid hash table. 1922 */ 1923 void 1924 lwp_hash_in(proc_t *p, lwpent_t *lep, tidhash_t *tidhash, uint_t tidhash_sz, 1925 int do_lock) 1926 { 1927 tidhash_t *thp = &tidhash[TIDHASH(lep->le_lwpid, tidhash_sz)]; 1928 lwpdir_t **ldpp; 1929 lwpdir_t *ldp; 1930 kthread_t *t; 1931 1932 /* 1933 * Allocate a directory element from the free list. 1934 * Code elsewhere guarantees a free slot. 1935 */ 1936 ldp = p->p_lwpfree; 1937 p->p_lwpfree = ldp->ld_next; 1938 ASSERT(ldp->ld_entry == NULL); 1939 ldp->ld_entry = lep; 1940 1941 if (do_lock) 1942 mutex_enter(&thp->th_lock); 1943 1944 /* 1945 * Insert it into the lwpid hash table. 1946 */ 1947 ldpp = &thp->th_list; 1948 ldp->ld_next = *ldpp; 1949 *ldpp = ldp; 1950 1951 /* 1952 * Set the active thread's directory slot entry. 1953 */ 1954 if ((t = lep->le_thread) != NULL) { 1955 ASSERT(lep->le_lwpid == t->t_tid); 1956 t->t_dslot = (int)(ldp - p->p_lwpdir); 1957 } 1958 1959 if (do_lock) 1960 mutex_exit(&thp->th_lock); 1961 } 1962 1963 /* 1964 * Remove an lwp from the lwpid hash table and free its directory entry. 1965 * This is done when a detached lwp exits in lwp_exit() or 1966 * when a non-detached lwp is waited for in lwp_wait() or 1967 * when a zombie lwp is detached in lwp_detach(). 1968 */ 1969 void 1970 lwp_hash_out(proc_t *p, id_t lwpid) 1971 { 1972 tidhash_t *thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)]; 1973 lwpdir_t **ldpp; 1974 lwpdir_t *ldp; 1975 lwpent_t *lep; 1976 1977 mutex_enter(&thp->th_lock); 1978 for (ldpp = &thp->th_list; 1979 (ldp = *ldpp) != NULL; ldpp = &ldp->ld_next) { 1980 lep = ldp->ld_entry; 1981 if (lep->le_lwpid == lwpid) { 1982 prlwpfree(p, lep); /* /proc deals with le_trace */ 1983 *ldpp = ldp->ld_next; 1984 ldp->ld_entry = NULL; 1985 ldp->ld_next = p->p_lwpfree; 1986 p->p_lwpfree = ldp; 1987 kmem_free(lep, sizeof (*lep)); 1988 break; 1989 } 1990 } 1991 mutex_exit(&thp->th_lock); 1992 } 1993 1994 /* 1995 * Lookup an lwp in the lwpid hash table by lwpid. 1996 */ 1997 lwpdir_t * 1998 lwp_hash_lookup(proc_t *p, id_t lwpid) 1999 { 2000 tidhash_t *thp; 2001 lwpdir_t *ldp; 2002 2003 /* 2004 * The process may be exiting, after p_tidhash has been set to NULL in 2005 * proc_exit() but before prfee() has been called. Return failure in 2006 * this case. 2007 */ 2008 if (p->p_tidhash == NULL) 2009 return (NULL); 2010 2011 thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)]; 2012 for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) { 2013 if (ldp->ld_entry->le_lwpid == lwpid) 2014 return (ldp); 2015 } 2016 2017 return (NULL); 2018 } 2019 2020 /* 2021 * Same as lwp_hash_lookup(), but acquire and return 2022 * the tid hash table entry lock on success. 2023 */ 2024 lwpdir_t * 2025 lwp_hash_lookup_and_lock(proc_t *p, id_t lwpid, kmutex_t **mpp) 2026 { 2027 tidhash_t *tidhash; 2028 uint_t tidhash_sz; 2029 tidhash_t *thp; 2030 lwpdir_t *ldp; 2031 2032 top: 2033 tidhash_sz = p->p_tidhash_sz; 2034 membar_consumer(); 2035 if ((tidhash = p->p_tidhash) == NULL) 2036 return (NULL); 2037 2038 thp = &tidhash[TIDHASH(lwpid, tidhash_sz)]; 2039 mutex_enter(&thp->th_lock); 2040 2041 /* 2042 * Since we are not holding p->p_lock, the tid hash table 2043 * may have changed. If so, start over. If not, then 2044 * it cannot change until after we drop &thp->th_lock; 2045 */ 2046 if (tidhash != p->p_tidhash || tidhash_sz != p->p_tidhash_sz) { 2047 mutex_exit(&thp->th_lock); 2048 goto top; 2049 } 2050 2051 for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) { 2052 if (ldp->ld_entry->le_lwpid == lwpid) { 2053 *mpp = &thp->th_lock; 2054 return (ldp); 2055 } 2056 } 2057 2058 mutex_exit(&thp->th_lock); 2059 return (NULL); 2060 } 2061 2062 /* 2063 * Update the indicated LWP usage statistic for the current LWP. 2064 */ 2065 void 2066 lwp_stat_update(lwp_stat_id_t lwp_stat_id, long inc) 2067 { 2068 klwp_t *lwp = ttolwp(curthread); 2069 2070 if (lwp == NULL) 2071 return; 2072 2073 switch (lwp_stat_id) { 2074 case LWP_STAT_INBLK: 2075 lwp->lwp_ru.inblock += inc; 2076 break; 2077 case LWP_STAT_OUBLK: 2078 lwp->lwp_ru.oublock += inc; 2079 break; 2080 case LWP_STAT_MSGRCV: 2081 lwp->lwp_ru.msgrcv += inc; 2082 break; 2083 case LWP_STAT_MSGSND: 2084 lwp->lwp_ru.msgsnd += inc; 2085 break; 2086 default: 2087 panic("lwp_stat_update: invalid lwp_stat_id 0x%x", lwp_stat_id); 2088 } 2089 } 2090