1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "lint.h" 30 #include "thr_uberdata.h" 31 #include <procfs.h> 32 #include <sys/uio.h> 33 #include <ctype.h> 34 35 #undef errno 36 extern int errno; 37 38 /* 39 * Between Solaris 2.5 and Solaris 9, __threaded was used to indicate 40 * "we are linked with libthread". The Sun Workshop 6 update 1 compilation 41 * system used it illegally (it is a consolidation private symbol). 42 * To accommodate this and possibly other abusers of the symbol, 43 * we make it always equal to 1 now that libthread has been folded 44 * into libc. The new __libc_threaded symbol is used to indicate 45 * the new meaning, "more than one thread exists". 46 */ 47 int __threaded = 1; /* always equal to 1 */ 48 int __libc_threaded = 0; /* zero until first thr_create() */ 49 50 /* 51 * thr_concurrency and pthread_concurrency are not used by the library. 52 * They exist solely to hold and return the values set by calls to 53 * thr_setconcurrency() and pthread_setconcurrency(). 54 * Because thr_concurrency is affected by the THR_NEW_LWP flag 55 * to thr_create(), thr_concurrency is protected by link_lock. 56 */ 57 static int thr_concurrency = 1; 58 static int pthread_concurrency; 59 60 #define HASHTBLSZ 1024 /* must be a power of two */ 61 #define TIDHASH(tid, udp) (tid & (udp)->hash_mask) 62 63 /* initial allocation, just enough for one lwp */ 64 #pragma align 64(init_hash_table) 65 thr_hash_table_t init_hash_table[1] = { 66 { DEFAULTMUTEX, DEFAULTCV, NULL }, 67 }; 68 69 extern const Lc_interface rtld_funcs[]; 70 71 /* 72 * The weak version is known to libc_db and mdb. 73 */ 74 #pragma weak _uberdata = __uberdata 75 uberdata_t __uberdata = { 76 { DEFAULTMUTEX, NULL, 0 }, /* link_lock */ 77 { RECURSIVEMUTEX, NULL, 0 }, /* fork_lock */ 78 { RECURSIVEMUTEX, NULL, 0 }, /* atfork_lock */ 79 { DEFAULTMUTEX, NULL, 0 }, /* tdb_hash_lock */ 80 { 0, }, /* tdb_hash_lock_stats */ 81 { { 0 }, }, /* siguaction[NSIG] */ 82 {{ DEFAULTMUTEX, NULL, 0 }, /* bucket[NBUCKETS] */ 83 { DEFAULTMUTEX, NULL, 0 }, 84 { DEFAULTMUTEX, NULL, 0 }, 85 { DEFAULTMUTEX, NULL, 0 }, 86 { DEFAULTMUTEX, NULL, 0 }, 87 { DEFAULTMUTEX, NULL, 0 }, 88 { DEFAULTMUTEX, NULL, 0 }, 89 { DEFAULTMUTEX, NULL, 0 }, 90 { DEFAULTMUTEX, NULL, 0 }, 91 { DEFAULTMUTEX, NULL, 0 }}, 92 { RECURSIVEMUTEX, NULL, NULL }, /* atexit_root */ 93 { DEFAULTMUTEX, 0, 0, NULL }, /* tsd_metadata */ 94 { DEFAULTMUTEX, {0, 0}, {0, 0} }, /* tls_metadata */ 95 0, /* primary_map */ 96 0, /* bucket_init */ 97 0, /* pad[0] */ 98 0, /* pad[1] */ 99 { 0 }, /* uberflags */ 100 NULL, /* queue_head */ 101 init_hash_table, /* thr_hash_table */ 102 1, /* hash_size: size of the hash table */ 103 0, /* hash_mask: hash_size - 1 */ 104 NULL, /* ulwp_one */ 105 NULL, /* all_lwps */ 106 NULL, /* all_zombies */ 107 0, /* nthreads */ 108 0, /* nzombies */ 109 0, /* ndaemons */ 110 0, /* pid */ 111 sigacthandler, /* sigacthandler */ 112 NULL, /* lwp_stacks */ 113 NULL, /* lwp_laststack */ 114 0, /* nfreestack */ 115 10, /* thread_stack_cache */ 116 NULL, /* ulwp_freelist */ 117 NULL, /* ulwp_lastfree */ 118 NULL, /* ulwp_replace_free */ 119 NULL, /* ulwp_replace_last */ 120 NULL, /* atforklist */ 121 NULL, /* robustlocks */ 122 NULL, /* __tdb_bootstrap */ 123 { /* tdb */ 124 NULL, /* tdb_sync_addr_hash */ 125 0, /* tdb_register_count */ 126 0, /* tdb_hash_alloc_failed */ 127 NULL, /* tdb_sync_addr_free */ 128 NULL, /* tdb_sync_addr_last */ 129 0, /* tdb_sync_alloc */ 130 { 0, 0 }, /* tdb_ev_global_mask */ 131 tdb_events, /* tdb_events array */ 132 }, 133 }; 134 135 /* 136 * The weak version is known to libc_db and mdb. 137 */ 138 #pragma weak _tdb_bootstrap = __tdb_bootstrap 139 uberdata_t **__tdb_bootstrap = NULL; 140 141 int thread_queue_fifo = 4; 142 int thread_queue_dump = 0; 143 int thread_cond_wait_defer = 0; 144 int thread_error_detection = 0; 145 int thread_async_safe = 0; 146 int thread_stack_cache = 10; 147 148 int thread_door_noreserve = 0; 149 150 static ulwp_t *ulwp_alloc(void); 151 static void ulwp_free(ulwp_t *); 152 153 /* 154 * Insert the lwp into the hash table. 155 */ 156 void 157 hash_in_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp) 158 { 159 ulwp->ul_hash = udp->thr_hash_table[ix].hash_bucket; 160 udp->thr_hash_table[ix].hash_bucket = ulwp; 161 ulwp->ul_ix = ix; 162 } 163 164 void 165 hash_in(ulwp_t *ulwp, uberdata_t *udp) 166 { 167 int ix = TIDHASH(ulwp->ul_lwpid, udp); 168 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock; 169 170 lmutex_lock(mp); 171 hash_in_unlocked(ulwp, ix, udp); 172 lmutex_unlock(mp); 173 } 174 175 /* 176 * Delete the lwp from the hash table. 177 */ 178 void 179 hash_out_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp) 180 { 181 ulwp_t **ulwpp; 182 183 for (ulwpp = &udp->thr_hash_table[ix].hash_bucket; 184 ulwp != *ulwpp; 185 ulwpp = &(*ulwpp)->ul_hash) 186 ; 187 *ulwpp = ulwp->ul_hash; 188 ulwp->ul_hash = NULL; 189 ulwp->ul_ix = -1; 190 } 191 192 void 193 hash_out(ulwp_t *ulwp, uberdata_t *udp) 194 { 195 int ix; 196 197 if ((ix = ulwp->ul_ix) >= 0) { 198 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock; 199 200 lmutex_lock(mp); 201 hash_out_unlocked(ulwp, ix, udp); 202 lmutex_unlock(mp); 203 } 204 } 205 206 static void 207 ulwp_clean(ulwp_t *ulwp) 208 { 209 ulwp->ul_self = NULL; 210 ulwp->ul_rval = NULL; 211 ulwp->ul_lwpid = 0; 212 ulwp->ul_pri = 0; 213 ulwp->ul_mappedpri = 0; 214 ulwp->ul_policy = 0; 215 ulwp->ul_pri_mapped = 0; 216 ulwp->ul_mutator = 0; 217 ulwp->ul_pleasestop = 0; 218 ulwp->ul_stop = 0; 219 ulwp->ul_dead = 0; 220 ulwp->ul_unwind = 0; 221 ulwp->ul_detached = 0; 222 ulwp->ul_stopping = 0; 223 ulwp->ul_sp = 0; 224 ulwp->ul_critical = 0; 225 ulwp->ul_cancelable = 0; 226 ulwp->ul_preempt = 0; 227 ulwp->ul_sigsuspend = 0; 228 ulwp->ul_cancel_pending = 0; 229 ulwp->ul_cancel_disabled = 0; 230 ulwp->ul_cancel_async = 0; 231 ulwp->ul_save_async = 0; 232 ulwp->ul_cursig = 0; 233 ulwp->ul_created = 0; 234 ulwp->ul_replace = 0; 235 ulwp->ul_schedctl_called = NULL; 236 ulwp->ul_errno = 0; 237 ulwp->ul_errnop = NULL; 238 ulwp->ul_clnup_hdr = NULL; 239 ulwp->ul_schedctl = NULL; 240 ulwp->ul_bindflags = 0; 241 (void) _private_memset(&ulwp->ul_td_evbuf, 0, 242 sizeof (ulwp->ul_td_evbuf)); 243 ulwp->ul_td_events_enable = 0; 244 ulwp->ul_qtype = 0; 245 ulwp->ul_usropts = 0; 246 ulwp->ul_startpc = NULL; 247 ulwp->ul_startarg = NULL; 248 ulwp->ul_wchan = NULL; 249 ulwp->ul_link = NULL; 250 ulwp->ul_sleepq = NULL; 251 ulwp->ul_mxchain = NULL; 252 ulwp->ul_epri = 0; 253 ulwp->ul_emappedpri = 0; 254 /* PROBE_SUPPORT begin */ 255 ulwp->ul_tpdp = NULL; 256 /* PROBE_SUPPORT end */ 257 ulwp->ul_siglink = NULL; 258 (void) _private_memset(ulwp->ul_ftsd, 0, 259 sizeof (void *) * TSD_NFAST); 260 ulwp->ul_stsd = NULL; 261 (void) _private_memset(&ulwp->ul_spinlock, 0, 262 sizeof (ulwp->ul_spinlock)); 263 ulwp->ul_spin_lock_spin = 0; 264 ulwp->ul_spin_lock_spin2 = 0; 265 ulwp->ul_spin_lock_sleep = 0; 266 ulwp->ul_spin_lock_wakeup = 0; 267 ulwp->ul_ex_unwind = NULL; 268 } 269 270 static int stackprot; 271 272 /* 273 * Answer the question, "Is the lwp in question really dead?" 274 * We must inquire of the operating system to be really sure 275 * because the lwp may have called lwp_exit() but it has not 276 * yet completed the exit. 277 */ 278 static int 279 dead_and_buried(ulwp_t *ulwp) 280 { 281 if (ulwp->ul_lwpid == (lwpid_t)(-1)) 282 return (1); 283 if (ulwp->ul_dead && ulwp->ul_detached && 284 __lwp_kill(ulwp->ul_lwpid, 0) == ESRCH) { 285 ulwp->ul_lwpid = (lwpid_t)(-1); 286 return (1); 287 } 288 return (0); 289 } 290 291 /* 292 * Attempt to keep the stack cache within the specified cache limit. 293 */ 294 static void 295 trim_stack_cache(int cache_limit) 296 { 297 ulwp_t *self = curthread; 298 uberdata_t *udp = self->ul_uberdata; 299 ulwp_t *prev = NULL; 300 ulwp_t **ulwpp = &udp->lwp_stacks; 301 ulwp_t *ulwp; 302 303 ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, self)); 304 305 while (udp->nfreestack > cache_limit && (ulwp = *ulwpp) != NULL) { 306 if (dead_and_buried(ulwp)) { 307 *ulwpp = ulwp->ul_next; 308 if (ulwp == udp->lwp_laststack) 309 udp->lwp_laststack = prev; 310 hash_out(ulwp, udp); 311 udp->nfreestack--; 312 (void) _private_munmap(ulwp->ul_stk, ulwp->ul_mapsiz); 313 /* 314 * Now put the free ulwp on the ulwp freelist. 315 */ 316 ulwp->ul_mapsiz = 0; 317 ulwp->ul_next = NULL; 318 if (udp->ulwp_freelist == NULL) 319 udp->ulwp_freelist = udp->ulwp_lastfree = ulwp; 320 else { 321 udp->ulwp_lastfree->ul_next = ulwp; 322 udp->ulwp_lastfree = ulwp; 323 } 324 } else { 325 prev = ulwp; 326 ulwpp = &ulwp->ul_next; 327 } 328 } 329 } 330 331 /* 332 * Find an unused stack of the requested size 333 * or create a new stack of the requested size. 334 * Return a pointer to the ulwp_t structure referring to the stack, or NULL. 335 * thr_exit() stores 1 in the ul_dead member. 336 * thr_join() stores -1 in the ul_lwpid member. 337 */ 338 ulwp_t * 339 find_stack(size_t stksize, size_t guardsize) 340 { 341 static size_t pagesize = 0; 342 343 uberdata_t *udp = curthread->ul_uberdata; 344 size_t mapsize; 345 ulwp_t *prev; 346 ulwp_t *ulwp; 347 ulwp_t **ulwpp; 348 void *stk; 349 350 /* 351 * The stack is allocated PROT_READ|PROT_WRITE|PROT_EXEC 352 * unless overridden by the system's configuration. 353 */ 354 if (stackprot == 0) { /* do this once */ 355 long lprot = _sysconf(_SC_STACK_PROT); 356 if (lprot <= 0) 357 lprot = (PROT_READ|PROT_WRITE|PROT_EXEC); 358 stackprot = (int)lprot; 359 } 360 if (pagesize == 0) /* do this once */ 361 pagesize = _sysconf(_SC_PAGESIZE); 362 363 /* 364 * One megabyte stacks by default, but subtract off 365 * two pages for the system-created red zones. 366 * Round up a non-zero stack size to a pagesize multiple. 367 */ 368 if (stksize == 0) 369 stksize = DEFAULTSTACK - 2 * pagesize; 370 else 371 stksize = ((stksize + pagesize - 1) & -pagesize); 372 373 /* 374 * Round up the mapping size to a multiple of pagesize. 375 * Note: mmap() provides at least one page of red zone 376 * so we deduct that from the value of guardsize. 377 */ 378 if (guardsize != 0) 379 guardsize = ((guardsize + pagesize - 1) & -pagesize) - pagesize; 380 mapsize = stksize + guardsize; 381 382 lmutex_lock(&udp->link_lock); 383 for (prev = NULL, ulwpp = &udp->lwp_stacks; 384 (ulwp = *ulwpp) != NULL; 385 prev = ulwp, ulwpp = &ulwp->ul_next) { 386 if (ulwp->ul_mapsiz == mapsize && 387 ulwp->ul_guardsize == guardsize && 388 dead_and_buried(ulwp)) { 389 /* 390 * The previous lwp is gone; reuse the stack. 391 * Remove the ulwp from the stack list. 392 */ 393 *ulwpp = ulwp->ul_next; 394 ulwp->ul_next = NULL; 395 if (ulwp == udp->lwp_laststack) 396 udp->lwp_laststack = prev; 397 hash_out(ulwp, udp); 398 udp->nfreestack--; 399 lmutex_unlock(&udp->link_lock); 400 ulwp_clean(ulwp); 401 return (ulwp); 402 } 403 } 404 405 /* 406 * None of the cached stacks matched our mapping size. 407 * Reduce the stack cache to get rid of possibly 408 * very old stacks that will never be reused. 409 */ 410 if (udp->nfreestack > udp->thread_stack_cache) 411 trim_stack_cache(udp->thread_stack_cache); 412 else if (udp->nfreestack > 0) 413 trim_stack_cache(udp->nfreestack - 1); 414 lmutex_unlock(&udp->link_lock); 415 416 /* 417 * Create a new stack. 418 */ 419 if ((stk = _private_mmap(NULL, mapsize, stackprot, 420 MAP_PRIVATE|MAP_NORESERVE|MAP_ANON, -1, (off_t)0)) != MAP_FAILED) { 421 /* 422 * We have allocated our stack. Now allocate the ulwp. 423 */ 424 ulwp = ulwp_alloc(); 425 if (ulwp == NULL) 426 (void) _private_munmap(stk, mapsize); 427 else { 428 ulwp->ul_stk = stk; 429 ulwp->ul_mapsiz = mapsize; 430 ulwp->ul_guardsize = guardsize; 431 ulwp->ul_stktop = (uintptr_t)stk + mapsize; 432 ulwp->ul_stksiz = stksize; 433 ulwp->ul_ix = -1; 434 if (guardsize) /* protect the extra red zone */ 435 (void) _private_mprotect(stk, 436 guardsize, PROT_NONE); 437 } 438 } 439 return (ulwp); 440 } 441 442 /* 443 * Get a ulwp_t structure from the free list or allocate a new one. 444 * Such ulwp_t's do not have a stack allocated by the library. 445 */ 446 static ulwp_t * 447 ulwp_alloc(void) 448 { 449 ulwp_t *self = curthread; 450 uberdata_t *udp = self->ul_uberdata; 451 size_t tls_size; 452 ulwp_t *prev; 453 ulwp_t *ulwp; 454 ulwp_t **ulwpp; 455 caddr_t data; 456 457 lmutex_lock(&udp->link_lock); 458 for (prev = NULL, ulwpp = &udp->ulwp_freelist; 459 (ulwp = *ulwpp) != NULL; 460 prev = ulwp, ulwpp = &ulwp->ul_next) { 461 if (dead_and_buried(ulwp)) { 462 *ulwpp = ulwp->ul_next; 463 ulwp->ul_next = NULL; 464 if (ulwp == udp->ulwp_lastfree) 465 udp->ulwp_lastfree = prev; 466 hash_out(ulwp, udp); 467 lmutex_unlock(&udp->link_lock); 468 ulwp_clean(ulwp); 469 return (ulwp); 470 } 471 } 472 lmutex_unlock(&udp->link_lock); 473 474 tls_size = roundup64(udp->tls_metadata.static_tls.tls_size); 475 data = lmalloc(sizeof (*ulwp) + tls_size); 476 if (data != NULL) { 477 /* LINTED pointer cast may result in improper alignment */ 478 ulwp = (ulwp_t *)(data + tls_size); 479 } 480 return (ulwp); 481 } 482 483 /* 484 * Free a ulwp structure. 485 * If there is an associated stack, put it on the stack list and 486 * munmap() previously freed stacks up to the residual cache limit. 487 * Else put it on the ulwp free list and never call lfree() on it. 488 */ 489 static void 490 ulwp_free(ulwp_t *ulwp) 491 { 492 uberdata_t *udp = curthread->ul_uberdata; 493 494 ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, curthread)); 495 ulwp->ul_next = NULL; 496 if (ulwp == udp->ulwp_one) /* don't reuse the primoridal stack */ 497 /*EMPTY*/; 498 else if (ulwp->ul_mapsiz != 0) { 499 if (udp->lwp_stacks == NULL) 500 udp->lwp_stacks = udp->lwp_laststack = ulwp; 501 else { 502 udp->lwp_laststack->ul_next = ulwp; 503 udp->lwp_laststack = ulwp; 504 } 505 if (++udp->nfreestack > udp->thread_stack_cache) 506 trim_stack_cache(udp->thread_stack_cache); 507 } else { 508 if (udp->ulwp_freelist == NULL) 509 udp->ulwp_freelist = udp->ulwp_lastfree = ulwp; 510 else { 511 udp->ulwp_lastfree->ul_next = ulwp; 512 udp->ulwp_lastfree = ulwp; 513 } 514 } 515 } 516 517 /* 518 * Find a named lwp and return a pointer to its hash list location. 519 * On success, returns with the hash lock held. 520 */ 521 ulwp_t ** 522 find_lwpp(thread_t tid) 523 { 524 uberdata_t *udp = curthread->ul_uberdata; 525 int ix = TIDHASH(tid, udp); 526 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock; 527 ulwp_t *ulwp; 528 ulwp_t **ulwpp; 529 530 if (tid == 0) 531 return (NULL); 532 533 lmutex_lock(mp); 534 for (ulwpp = &udp->thr_hash_table[ix].hash_bucket; 535 (ulwp = *ulwpp) != NULL; 536 ulwpp = &ulwp->ul_hash) { 537 if (ulwp->ul_lwpid == tid) 538 return (ulwpp); 539 } 540 lmutex_unlock(mp); 541 return (NULL); 542 } 543 544 /* 545 * Wake up all lwps waiting on this lwp for some reason. 546 */ 547 void 548 ulwp_broadcast(ulwp_t *ulwp) 549 { 550 ulwp_t *self = curthread; 551 uberdata_t *udp = self->ul_uberdata; 552 553 ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self)); 554 (void) cond_broadcast_internal(ulwp_condvar(ulwp, udp)); 555 } 556 557 /* 558 * Find a named lwp and return a pointer to it. 559 * Returns with the hash lock held. 560 */ 561 ulwp_t * 562 find_lwp(thread_t tid) 563 { 564 ulwp_t *self = curthread; 565 uberdata_t *udp = self->ul_uberdata; 566 ulwp_t *ulwp = NULL; 567 ulwp_t **ulwpp; 568 569 if (self->ul_lwpid == tid) { 570 ulwp = self; 571 ulwp_lock(ulwp, udp); 572 } else if ((ulwpp = find_lwpp(tid)) != NULL) { 573 ulwp = *ulwpp; 574 } 575 576 if (ulwp && ulwp->ul_dead) { 577 ulwp_unlock(ulwp, udp); 578 ulwp = NULL; 579 } 580 581 return (ulwp); 582 } 583 584 int 585 _thrp_create(void *stk, size_t stksize, void *(*func)(void *), void *arg, 586 long flags, thread_t *new_thread, pri_t priority, int policy, 587 size_t guardsize) 588 { 589 ulwp_t *self = curthread; 590 uberdata_t *udp = self->ul_uberdata; 591 ucontext_t uc; 592 uint_t lwp_flags; 593 thread_t tid; 594 int error = 0; 595 ulwp_t *ulwp; 596 597 /* 598 * Enforce the restriction of not creating any threads 599 * until the primary link map has been initialized. 600 * Also, disallow thread creation to a child of vfork(). 601 */ 602 if (!self->ul_primarymap || self->ul_vfork) 603 return (ENOTSUP); 604 605 if (udp->hash_size == 1) 606 finish_init(); 607 608 if (((stk || stksize) && stksize < MINSTACK) || 609 priority < THREAD_MIN_PRIORITY || priority > THREAD_MAX_PRIORITY) 610 return (EINVAL); 611 612 if (stk == NULL) { 613 if ((ulwp = find_stack(stksize, guardsize)) == NULL) 614 return (ENOMEM); 615 stksize = ulwp->ul_mapsiz - ulwp->ul_guardsize; 616 } else { 617 /* initialize the private stack */ 618 if ((ulwp = ulwp_alloc()) == NULL) 619 return (ENOMEM); 620 ulwp->ul_stk = stk; 621 ulwp->ul_stktop = (uintptr_t)stk + stksize; 622 ulwp->ul_stksiz = stksize; 623 ulwp->ul_ix = -1; 624 } 625 ulwp->ul_errnop = &ulwp->ul_errno; 626 627 lwp_flags = LWP_SUSPENDED; 628 if (flags & (THR_DETACHED|THR_DAEMON)) { 629 flags |= THR_DETACHED; 630 lwp_flags |= LWP_DETACHED; 631 } 632 if (flags & THR_DAEMON) 633 lwp_flags |= LWP_DAEMON; 634 635 /* creating a thread: enforce mt-correctness in _mutex_lock() */ 636 self->ul_async_safe = 1; 637 638 /* per-thread copies of global variables, for speed */ 639 ulwp->ul_queue_fifo = self->ul_queue_fifo; 640 ulwp->ul_cond_wait_defer = self->ul_cond_wait_defer; 641 ulwp->ul_error_detection = self->ul_error_detection; 642 ulwp->ul_async_safe = self->ul_async_safe; 643 ulwp->ul_max_spinners = self->ul_max_spinners; 644 ulwp->ul_adaptive_spin = self->ul_adaptive_spin; 645 ulwp->ul_release_spin = self->ul_release_spin; 646 ulwp->ul_queue_spin = self->ul_queue_spin; 647 ulwp->ul_door_noreserve = self->ul_door_noreserve; 648 649 ulwp->ul_primarymap = self->ul_primarymap; 650 ulwp->ul_self = ulwp; 651 ulwp->ul_uberdata = udp; 652 653 /* debugger support */ 654 ulwp->ul_usropts = flags; 655 656 #ifdef __sparc 657 /* 658 * We cache several instructions in the thread structure for use 659 * by the fasttrap DTrace provider. When changing this, read the 660 * comment in fasttrap.h for the all the other places that must 661 * be changed. 662 */ 663 ulwp->ul_dsave = 0x9de04000; /* save %g1, %g0, %sp */ 664 ulwp->ul_drestore = 0x81e80000; /* restore %g0, %g0, %g0 */ 665 ulwp->ul_dftret = 0x91d0203a; /* ta 0x3a */ 666 ulwp->ul_dreturn = 0x81ca0000; /* return %o0 */ 667 #endif 668 669 ulwp->ul_startpc = func; 670 ulwp->ul_startarg = arg; 671 _fpinherit(ulwp); 672 /* 673 * Defer signals on the new thread until its TLS constructors 674 * have been called. _thr_setup() will call sigon() after 675 * it has called tls_setup(). 676 */ 677 ulwp->ul_sigdefer = 1; 678 679 if (setup_context(&uc, _thr_setup, ulwp, 680 (caddr_t)ulwp->ul_stk + ulwp->ul_guardsize, stksize) != 0) 681 error = EAGAIN; 682 683 /* 684 * Call enter_critical() to avoid being suspended until we 685 * have linked the new thread into the proper lists. 686 * This is necessary because forkall() and fork1() must 687 * suspend all threads and they must see a complete list. 688 */ 689 enter_critical(self); 690 uc.uc_sigmask = ulwp->ul_sigmask = self->ul_sigmask; 691 if (error != 0 || 692 (error = __lwp_create(&uc, lwp_flags, &tid)) != 0) { 693 exit_critical(self); 694 ulwp->ul_lwpid = (lwpid_t)(-1); 695 ulwp->ul_dead = 1; 696 ulwp->ul_detached = 1; 697 lmutex_lock(&udp->link_lock); 698 ulwp_free(ulwp); 699 lmutex_unlock(&udp->link_lock); 700 return (error); 701 } 702 self->ul_nocancel = 0; /* cancellation is now possible */ 703 ulwp->ul_nocancel = 0; 704 udp->uberflags.uf_mt = 1; 705 if (new_thread) 706 *new_thread = tid; 707 if (flags & THR_DETACHED) 708 ulwp->ul_detached = 1; 709 ulwp->ul_lwpid = tid; 710 ulwp->ul_stop = TSTP_REGULAR; 711 if (flags & THR_SUSPENDED) 712 ulwp->ul_created = 1; 713 ulwp->ul_policy = policy; 714 ulwp->ul_pri = priority; 715 716 lmutex_lock(&udp->link_lock); 717 ulwp->ul_forw = udp->all_lwps; 718 ulwp->ul_back = udp->all_lwps->ul_back; 719 ulwp->ul_back->ul_forw = ulwp; 720 ulwp->ul_forw->ul_back = ulwp; 721 hash_in(ulwp, udp); 722 udp->nthreads++; 723 if (flags & THR_DAEMON) 724 udp->ndaemons++; 725 if (flags & THR_NEW_LWP) 726 thr_concurrency++; 727 __libc_threaded = 1; /* inform stdio */ 728 lmutex_unlock(&udp->link_lock); 729 730 if (__td_event_report(self, TD_CREATE, udp)) { 731 self->ul_td_evbuf.eventnum = TD_CREATE; 732 self->ul_td_evbuf.eventdata = (void *)(uintptr_t)tid; 733 tdb_event(TD_CREATE, udp); 734 } 735 736 exit_critical(self); 737 738 if (!(flags & THR_SUSPENDED)) 739 (void) _thrp_continue(tid, TSTP_REGULAR); 740 741 return (0); 742 } 743 744 #pragma weak thr_create = _thr_create 745 int 746 _thr_create(void *stk, size_t stksize, void *(*func)(void *), void *arg, 747 long flags, thread_t *new_thread) 748 { 749 return (_thrp_create(stk, stksize, func, arg, flags, new_thread, 750 curthread->ul_pri, curthread->ul_policy, 0)); 751 } 752 753 /* 754 * A special cancellation cleanup hook for DCE. 755 * cleanuphndlr, when it is not NULL, will contain a callback 756 * function to be called before a thread is terminated in 757 * _thr_exit() as a result of being cancelled. 758 */ 759 static void (*cleanuphndlr)(void) = NULL; 760 761 /* 762 * _pthread_setcleanupinit: sets the cleanup hook. 763 */ 764 int 765 _pthread_setcleanupinit(void (*func)(void)) 766 { 767 cleanuphndlr = func; 768 return (0); 769 } 770 771 void 772 _thrp_exit() 773 { 774 ulwp_t *self = curthread; 775 uberdata_t *udp = self->ul_uberdata; 776 ulwp_t *replace = NULL; 777 778 if (__td_event_report(self, TD_DEATH, udp)) { 779 self->ul_td_evbuf.eventnum = TD_DEATH; 780 tdb_event(TD_DEATH, udp); 781 } 782 783 ASSERT(self->ul_sigdefer != 0); 784 785 lmutex_lock(&udp->link_lock); 786 udp->nthreads--; 787 if (self->ul_usropts & THR_NEW_LWP) 788 thr_concurrency--; 789 if (self->ul_usropts & THR_DAEMON) 790 udp->ndaemons--; 791 else if (udp->nthreads == udp->ndaemons) { 792 /* 793 * We are the last non-daemon thread exiting. 794 * Exit the process. We retain our TSD and TLS so 795 * that atexit() application functions can use them. 796 */ 797 lmutex_unlock(&udp->link_lock); 798 exit(0); 799 thr_panic("_thrp_exit(): exit(0) returned"); 800 } 801 lmutex_unlock(&udp->link_lock); 802 803 tsd_exit(); /* deallocate thread-specific data */ 804 tls_exit(); /* deallocate thread-local storage */ 805 heldlock_exit(); /* deal with left-over held locks */ 806 807 /* block all signals to finish exiting */ 808 block_all_signals(self); 809 /* also prevent ourself from being suspended */ 810 enter_critical(self); 811 rwl_free(self); 812 lmutex_lock(&udp->link_lock); 813 ulwp_free(self); 814 (void) ulwp_lock(self, udp); 815 816 if (self->ul_mapsiz && !self->ul_detached) { 817 /* 818 * We want to free the stack for reuse but must keep 819 * the ulwp_t struct for the benefit of thr_join(). 820 * For this purpose we allocate a replacement ulwp_t. 821 */ 822 if ((replace = udp->ulwp_replace_free) == NULL) 823 replace = lmalloc(REPLACEMENT_SIZE); 824 else if ((udp->ulwp_replace_free = replace->ul_next) == NULL) 825 udp->ulwp_replace_last = NULL; 826 } 827 828 if (udp->all_lwps == self) 829 udp->all_lwps = self->ul_forw; 830 if (udp->all_lwps == self) 831 udp->all_lwps = NULL; 832 else { 833 self->ul_forw->ul_back = self->ul_back; 834 self->ul_back->ul_forw = self->ul_forw; 835 } 836 self->ul_forw = self->ul_back = NULL; 837 /* collect queue lock statistics before marking ourself dead */ 838 record_spin_locks(self); 839 self->ul_dead = 1; 840 self->ul_pleasestop = 0; 841 if (replace != NULL) { 842 int ix = self->ul_ix; /* the hash index */ 843 (void) _private_memcpy(replace, self, REPLACEMENT_SIZE); 844 replace->ul_self = replace; 845 replace->ul_next = NULL; /* clone not on stack list */ 846 replace->ul_mapsiz = 0; /* allows clone to be freed */ 847 replace->ul_replace = 1; /* requires clone to be freed */ 848 hash_out_unlocked(self, ix, udp); 849 hash_in_unlocked(replace, ix, udp); 850 ASSERT(!(self->ul_detached)); 851 self->ul_detached = 1; /* this frees the stack */ 852 self->ul_schedctl = NULL; 853 self->ul_schedctl_called = &udp->uberflags; 854 set_curthread(self = replace); 855 /* 856 * Having just changed the address of curthread, we 857 * must reset the ownership of the locks we hold so 858 * that assertions will not fire when we release them. 859 */ 860 udp->link_lock.mutex_owner = (uintptr_t)self; 861 ulwp_mutex(self, udp)->mutex_owner = (uintptr_t)self; 862 /* 863 * NOTE: 864 * On i386, %gs still references the original, not the 865 * replacement, ulwp structure. Fetching the replacement 866 * curthread pointer via %gs:0 works correctly since the 867 * original ulwp structure will not be reallocated until 868 * this lwp has completed its lwp_exit() system call (see 869 * dead_and_buried()), but from here on out, we must make 870 * no references to %gs:<offset> other than %gs:0. 871 */ 872 } 873 /* 874 * Put non-detached terminated threads in the all_zombies list. 875 */ 876 if (!self->ul_detached) { 877 udp->nzombies++; 878 if (udp->all_zombies == NULL) { 879 ASSERT(udp->nzombies == 1); 880 udp->all_zombies = self->ul_forw = self->ul_back = self; 881 } else { 882 self->ul_forw = udp->all_zombies; 883 self->ul_back = udp->all_zombies->ul_back; 884 self->ul_back->ul_forw = self; 885 self->ul_forw->ul_back = self; 886 } 887 } 888 /* 889 * Notify everyone waiting for this thread. 890 */ 891 ulwp_broadcast(self); 892 (void) ulwp_unlock(self, udp); 893 /* 894 * Prevent any more references to the schedctl data. 895 * We are exiting and continue_fork() may not find us. 896 * Do this just before dropping link_lock, since fork 897 * serializes on link_lock. 898 */ 899 self->ul_schedctl = NULL; 900 self->ul_schedctl_called = &udp->uberflags; 901 lmutex_unlock(&udp->link_lock); 902 903 ASSERT(self->ul_critical == 1); 904 ASSERT(self->ul_preempt == 0); 905 _lwp_terminate(); /* never returns */ 906 thr_panic("_thrp_exit(): _lwp_terminate() returned"); 907 } 908 909 void 910 collect_queue_statistics() 911 { 912 uberdata_t *udp = curthread->ul_uberdata; 913 ulwp_t *ulwp; 914 915 if (thread_queue_dump) { 916 lmutex_lock(&udp->link_lock); 917 if ((ulwp = udp->all_lwps) != NULL) { 918 do { 919 record_spin_locks(ulwp); 920 } while ((ulwp = ulwp->ul_forw) != udp->all_lwps); 921 } 922 lmutex_unlock(&udp->link_lock); 923 } 924 } 925 926 void 927 _thr_exit_common(void *status, int unwind) 928 { 929 ulwp_t *self = curthread; 930 int cancelled = (self->ul_cancel_pending && status == PTHREAD_CANCELED); 931 932 ASSERT(self->ul_critical == 0 && self->ul_preempt == 0); 933 934 /* 935 * Disable cancellation and call the special DCE cancellation 936 * cleanup hook if it is enabled. Do nothing else before calling 937 * the DCE cancellation cleanup hook; it may call longjmp() and 938 * never return here. 939 */ 940 self->ul_cancel_disabled = 1; 941 self->ul_cancel_async = 0; 942 self->ul_save_async = 0; 943 self->ul_cancelable = 0; 944 self->ul_cancel_pending = 0; 945 if (cancelled && cleanuphndlr != NULL) 946 (*cleanuphndlr)(); 947 948 /* 949 * Block application signals while we are exiting. 950 * We call out to C++, TSD, and TLS destructors while exiting 951 * and these are application-defined, so we cannot be assured 952 * that they won't reset the signal mask. We use sigoff() to 953 * defer any signals that may be received as a result of this 954 * bad behavior. Such signals will be lost to the process 955 * when the thread finishes exiting. 956 */ 957 (void) _thr_sigsetmask(SIG_SETMASK, &maskset, NULL); 958 sigoff(self); 959 960 self->ul_rval = status; 961 962 /* 963 * If thr_exit is being called from the places where 964 * C++ destructors are to be called such as cancellation 965 * points, then set this flag. It is checked in _t_cancel() 966 * to decide whether _ex_unwind() is to be called or not. 967 */ 968 if (unwind) 969 self->ul_unwind = 1; 970 971 /* 972 * _thrp_unwind() will eventually call _thrp_exit(). 973 * It never returns. 974 */ 975 _thrp_unwind(NULL); 976 thr_panic("_thr_exit_common(): _thrp_unwind() returned"); 977 } 978 979 /* 980 * Called when a thread returns from its start function. 981 * We are at the top of the stack; no unwinding is necessary. 982 */ 983 void 984 _thr_terminate(void *status) 985 { 986 _thr_exit_common(status, 0); 987 } 988 989 #pragma weak thr_exit = _thr_exit 990 #pragma weak pthread_exit = _thr_exit 991 #pragma weak _pthread_exit = _thr_exit 992 void 993 _thr_exit(void *status) 994 { 995 _thr_exit_common(status, 1); 996 } 997 998 int 999 _thrp_join(thread_t tid, thread_t *departed, void **status, int do_cancel) 1000 { 1001 uberdata_t *udp = curthread->ul_uberdata; 1002 mutex_t *mp; 1003 void *rval; 1004 thread_t found; 1005 ulwp_t *ulwp; 1006 ulwp_t **ulwpp; 1007 int replace; 1008 int error; 1009 1010 if (do_cancel) 1011 error = lwp_wait(tid, &found); 1012 else { 1013 while ((error = __lwp_wait(tid, &found)) == EINTR) 1014 ; 1015 } 1016 if (error) 1017 return (error); 1018 1019 /* 1020 * We must hold link_lock to avoid a race condition with find_stack(). 1021 */ 1022 lmutex_lock(&udp->link_lock); 1023 if ((ulwpp = find_lwpp(found)) == NULL) { 1024 /* 1025 * lwp_wait() found an lwp that the library doesn't know 1026 * about. It must have been created with _lwp_create(). 1027 * Just return its lwpid; we can't know its status. 1028 */ 1029 lmutex_unlock(&udp->link_lock); 1030 rval = NULL; 1031 } else { 1032 /* 1033 * Remove ulwp from the hash table. 1034 */ 1035 ulwp = *ulwpp; 1036 *ulwpp = ulwp->ul_hash; 1037 ulwp->ul_hash = NULL; 1038 /* 1039 * Remove ulwp from all_zombies list. 1040 */ 1041 ASSERT(udp->nzombies >= 1); 1042 if (udp->all_zombies == ulwp) 1043 udp->all_zombies = ulwp->ul_forw; 1044 if (udp->all_zombies == ulwp) 1045 udp->all_zombies = NULL; 1046 else { 1047 ulwp->ul_forw->ul_back = ulwp->ul_back; 1048 ulwp->ul_back->ul_forw = ulwp->ul_forw; 1049 } 1050 ulwp->ul_forw = ulwp->ul_back = NULL; 1051 udp->nzombies--; 1052 ASSERT(ulwp->ul_dead && !ulwp->ul_detached && 1053 !(ulwp->ul_usropts & (THR_DETACHED|THR_DAEMON))); 1054 /* 1055 * We can't call ulwp_unlock(ulwp) after we set 1056 * ulwp->ul_ix = -1 so we have to get a pointer to the 1057 * ulwp's hash table mutex now in order to unlock it below. 1058 */ 1059 mp = ulwp_mutex(ulwp, udp); 1060 ulwp->ul_lwpid = (lwpid_t)(-1); 1061 ulwp->ul_ix = -1; 1062 rval = ulwp->ul_rval; 1063 replace = ulwp->ul_replace; 1064 lmutex_unlock(mp); 1065 if (replace) { 1066 ulwp->ul_next = NULL; 1067 if (udp->ulwp_replace_free == NULL) 1068 udp->ulwp_replace_free = 1069 udp->ulwp_replace_last = ulwp; 1070 else { 1071 udp->ulwp_replace_last->ul_next = ulwp; 1072 udp->ulwp_replace_last = ulwp; 1073 } 1074 } 1075 lmutex_unlock(&udp->link_lock); 1076 } 1077 1078 if (departed != NULL) 1079 *departed = found; 1080 if (status != NULL) 1081 *status = rval; 1082 return (0); 1083 } 1084 1085 #pragma weak thr_join = _thr_join 1086 int 1087 _thr_join(thread_t tid, thread_t *departed, void **status) 1088 { 1089 int error = _thrp_join(tid, departed, status, 1); 1090 return ((error == EINVAL)? ESRCH : error); 1091 } 1092 1093 /* 1094 * pthread_join() differs from Solaris thr_join(): 1095 * It does not return the departed thread's id 1096 * and hence does not have a "departed" argument. 1097 * It returns EINVAL if tid refers to a detached thread. 1098 */ 1099 #pragma weak pthread_join = _pthread_join 1100 int 1101 _pthread_join(pthread_t tid, void **status) 1102 { 1103 return ((tid == 0)? ESRCH : _thrp_join(tid, NULL, status, 1)); 1104 } 1105 1106 #pragma weak pthread_detach = _thr_detach 1107 #pragma weak _pthread_detach = _thr_detach 1108 int 1109 _thr_detach(thread_t tid) 1110 { 1111 uberdata_t *udp = curthread->ul_uberdata; 1112 ulwp_t *ulwp; 1113 ulwp_t **ulwpp; 1114 int error = 0; 1115 1116 if ((ulwpp = find_lwpp(tid)) == NULL) 1117 return (ESRCH); 1118 ulwp = *ulwpp; 1119 1120 if (ulwp->ul_dead) { 1121 ulwp_unlock(ulwp, udp); 1122 error = _thrp_join(tid, NULL, NULL, 0); 1123 } else { 1124 error = __lwp_detach(tid); 1125 ulwp->ul_detached = 1; 1126 ulwp->ul_usropts |= THR_DETACHED; 1127 ulwp_unlock(ulwp, udp); 1128 } 1129 return (error); 1130 } 1131 1132 /* 1133 * Static local string compare function to avoid calling strncmp() 1134 * (and hence the dynamic linker) during library initialization. 1135 */ 1136 static int 1137 sncmp(const char *s1, const char *s2, size_t n) 1138 { 1139 n++; 1140 while (--n != 0 && *s1 == *s2++) 1141 if (*s1++ == '\0') 1142 return (0); 1143 return (n == 0 ? 0 : *(uchar_t *)s1 - *(uchar_t *)--s2); 1144 } 1145 1146 static const char * 1147 ematch(const char *ev, const char *match) 1148 { 1149 int c; 1150 1151 while ((c = *match++) != '\0') { 1152 if (*ev++ != c) 1153 return (NULL); 1154 } 1155 if (*ev++ != '=') 1156 return (NULL); 1157 return (ev); 1158 } 1159 1160 static int 1161 envvar(const char *ev, const char *match, int limit) 1162 { 1163 int val = -1; 1164 const char *ename; 1165 1166 if ((ename = ematch(ev, match)) != NULL) { 1167 int c; 1168 for (val = 0; (c = *ename) != '\0'; ename++) { 1169 if (!isdigit(c)) { 1170 val = -1; 1171 break; 1172 } 1173 val = val * 10 + (c - '0'); 1174 if (val > limit) { 1175 val = limit; 1176 break; 1177 } 1178 } 1179 } 1180 return (val); 1181 } 1182 1183 static void 1184 etest(const char *ev) 1185 { 1186 int value; 1187 1188 if ((value = envvar(ev, "QUEUE_SPIN", 1000000)) >= 0) 1189 thread_queue_spin = value; 1190 if ((value = envvar(ev, "ADAPTIVE_SPIN", 1000000)) >= 0) { 1191 thread_adaptive_spin = value; 1192 thread_release_spin = (value + 1) / 2; 1193 } 1194 if ((value = envvar(ev, "RELEASE_SPIN", 1000000)) >= 0) 1195 thread_release_spin = value; 1196 if ((value = envvar(ev, "MAX_SPINNERS", 100)) >= 0) 1197 thread_max_spinners = value; 1198 if ((value = envvar(ev, "QUEUE_FIFO", 8)) >= 0) 1199 thread_queue_fifo = value; 1200 #if defined(THREAD_DEBUG) 1201 if ((value = envvar(ev, "QUEUE_VERIFY", 1)) >= 0) 1202 thread_queue_verify = value; 1203 #endif 1204 if ((value = envvar(ev, "QUEUE_DUMP", 1)) >= 0) 1205 thread_queue_dump = value; 1206 if ((value = envvar(ev, "STACK_CACHE", 10000)) >= 0) 1207 thread_stack_cache = value; 1208 if ((value = envvar(ev, "COND_WAIT_DEFER", 1)) >= 0) 1209 thread_cond_wait_defer = value; 1210 if ((value = envvar(ev, "ERROR_DETECTION", 2)) >= 0) 1211 thread_error_detection = value; 1212 if ((value = envvar(ev, "ASYNC_SAFE", 1)) >= 0) 1213 thread_async_safe = value; 1214 if ((value = envvar(ev, "DOOR_NORESERVE", 1)) >= 0) 1215 thread_door_noreserve = value; 1216 } 1217 1218 /* 1219 * Look for and evaluate environment variables of the form "_THREAD_*". 1220 * For compatibility with the past, we also look for environment 1221 * names of the form "LIBTHREAD_*". 1222 */ 1223 static void 1224 set_thread_vars() 1225 { 1226 extern const char **_environ; 1227 const char **pev; 1228 const char *ev; 1229 char c; 1230 1231 if ((pev = _environ) == NULL) 1232 return; 1233 while ((ev = *pev++) != NULL) { 1234 c = *ev; 1235 if (c == '_' && sncmp(ev, "_THREAD_", 8) == 0) 1236 etest(ev + 8); 1237 if (c == 'L' && sncmp(ev, "LIBTHREAD_", 10) == 0) 1238 etest(ev + 10); 1239 } 1240 } 1241 1242 /* PROBE_SUPPORT begin */ 1243 #pragma weak __tnf_probe_notify 1244 extern void __tnf_probe_notify(void); 1245 /* PROBE_SUPPORT end */ 1246 1247 /* same as atexit() but private to the library */ 1248 extern int _atexit(void (*)(void)); 1249 1250 /* same as _cleanup() but private to the library */ 1251 extern void __cleanup(void); 1252 1253 extern void atfork_init(void); 1254 1255 #ifdef __amd64 1256 extern void __amd64id(void); 1257 #endif 1258 1259 /* 1260 * libc_init() is called by ld.so.1 for library initialization. 1261 * We perform minimal initialization; enough to work with the main thread. 1262 */ 1263 void 1264 libc_init(void) 1265 { 1266 uberdata_t *udp = &__uberdata; 1267 ulwp_t *oldself = __curthread(); 1268 ucontext_t uc; 1269 ulwp_t *self; 1270 struct rlimit rl; 1271 caddr_t data; 1272 size_t tls_size; 1273 int setmask; 1274 1275 /* 1276 * For the initial stage of initialization, we must be careful 1277 * not to call any function that could possibly call _cerror(). 1278 * For this purpose, we call only the raw system call wrappers. 1279 */ 1280 1281 #ifdef __amd64 1282 /* 1283 * Gather information about cache layouts for optimized 1284 * AMD assembler strfoo() and memfoo() functions. 1285 */ 1286 __amd64id(); 1287 #endif 1288 1289 /* 1290 * Every libc, regardless of which link map, must register __cleanup(). 1291 */ 1292 (void) _atexit(__cleanup); 1293 1294 /* 1295 * We keep our uberdata on one of (a) the first alternate link map 1296 * or (b) the primary link map. We switch to the primary link map 1297 * and stay there once we see it. All intermediate link maps are 1298 * subject to being unloaded at any time. 1299 */ 1300 if (oldself != NULL && (oldself->ul_primarymap || !primary_link_map)) { 1301 __tdb_bootstrap = oldself->ul_uberdata->tdb_bootstrap; 1302 mutex_setup(); 1303 atfork_init(); /* every link map needs atfork() processing */ 1304 return; 1305 } 1306 1307 /* 1308 * To establish the main stack information, we have to get our context. 1309 * This is also convenient to use for getting our signal mask. 1310 */ 1311 uc.uc_flags = UC_ALL; 1312 (void) __getcontext_syscall(&uc); 1313 ASSERT(uc.uc_link == NULL); 1314 1315 tls_size = roundup64(udp->tls_metadata.static_tls.tls_size); 1316 ASSERT(primary_link_map || tls_size == 0); 1317 data = lmalloc(sizeof (ulwp_t) + tls_size); 1318 if (data == NULL) 1319 thr_panic("cannot allocate thread structure for main thread"); 1320 /* LINTED pointer cast may result in improper alignment */ 1321 self = (ulwp_t *)(data + tls_size); 1322 init_hash_table[0].hash_bucket = self; 1323 1324 self->ul_sigmask = uc.uc_sigmask; 1325 delete_reserved_signals(&self->ul_sigmask); 1326 /* 1327 * Are the old and new sets different? 1328 * (This can happen if we are currently blocking SIGCANCEL.) 1329 * If so, we must explicitly set our signal mask, below. 1330 */ 1331 setmask = 1332 ((self->ul_sigmask.__sigbits[0] ^ uc.uc_sigmask.__sigbits[0]) | 1333 (self->ul_sigmask.__sigbits[1] ^ uc.uc_sigmask.__sigbits[1])); 1334 1335 #ifdef __sparc 1336 /* 1337 * We cache several instructions in the thread structure for use 1338 * by the fasttrap DTrace provider. When changing this, read the 1339 * comment in fasttrap.h for the all the other places that must 1340 * be changed. 1341 */ 1342 self->ul_dsave = 0x9de04000; /* save %g1, %g0, %sp */ 1343 self->ul_drestore = 0x81e80000; /* restore %g0, %g0, %g0 */ 1344 self->ul_dftret = 0x91d0203a; /* ta 0x3a */ 1345 self->ul_dreturn = 0x81ca0000; /* return %o0 */ 1346 #endif 1347 1348 self->ul_stktop = 1349 (uintptr_t)uc.uc_stack.ss_sp + uc.uc_stack.ss_size; 1350 (void) _private_getrlimit(RLIMIT_STACK, &rl); 1351 self->ul_stksiz = rl.rlim_cur; 1352 self->ul_stk = (caddr_t)(self->ul_stktop - self->ul_stksiz); 1353 1354 self->ul_forw = self->ul_back = self; 1355 self->ul_hash = NULL; 1356 self->ul_ix = 0; 1357 self->ul_lwpid = 1; /* __lwp_self() */ 1358 self->ul_main = 1; 1359 self->ul_self = self; 1360 self->ul_uberdata = udp; 1361 if (oldself != NULL) { 1362 int i; 1363 1364 ASSERT(primary_link_map); 1365 ASSERT(oldself->ul_main == 1); 1366 self->ul_stsd = oldself->ul_stsd; 1367 for (i = 0; i < TSD_NFAST; i++) 1368 self->ul_ftsd[i] = oldself->ul_ftsd[i]; 1369 self->ul_tls = oldself->ul_tls; 1370 /* 1371 * Retrieve all pointers to uberdata allocated 1372 * while running on previous link maps. 1373 * We would like to do a structure assignment here, but 1374 * gcc turns structure assignments into calls to memcpy(), 1375 * a function exported from libc. We can't call any such 1376 * external functions until we establish curthread, below, 1377 * so we just call our private version of memcpy(). 1378 */ 1379 (void) _private_memcpy(udp, 1380 oldself->ul_uberdata, sizeof (*udp)); 1381 /* 1382 * These items point to global data on the primary link map. 1383 */ 1384 udp->thr_hash_table = init_hash_table; 1385 udp->sigacthandler = sigacthandler; 1386 udp->tdb.tdb_events = tdb_events; 1387 ASSERT(udp->nthreads == 1 && !udp->uberflags.uf_mt); 1388 ASSERT(udp->lwp_stacks == NULL); 1389 ASSERT(udp->ulwp_freelist == NULL); 1390 ASSERT(udp->ulwp_replace_free == NULL); 1391 ASSERT(udp->hash_size == 1); 1392 } 1393 udp->all_lwps = self; 1394 udp->ulwp_one = self; 1395 udp->pid = _private_getpid(); 1396 udp->nthreads = 1; 1397 /* 1398 * In every link map, tdb_bootstrap points to the same piece of 1399 * allocated memory. When the primary link map is initialized, 1400 * the allocated memory is assigned a pointer to the one true 1401 * uberdata. This allows libc_db to initialize itself regardless 1402 * of which instance of libc it finds in the address space. 1403 */ 1404 if (udp->tdb_bootstrap == NULL) 1405 udp->tdb_bootstrap = lmalloc(sizeof (uberdata_t *)); 1406 __tdb_bootstrap = udp->tdb_bootstrap; 1407 if (primary_link_map) { 1408 self->ul_primarymap = 1; 1409 udp->primary_map = 1; 1410 *udp->tdb_bootstrap = udp; 1411 } 1412 /* 1413 * Cancellation can't happen until: 1414 * pthread_cancel() is called 1415 * or: 1416 * another thread is created 1417 * For now, as a single-threaded process, set the flag that tells 1418 * PROLOGUE/EPILOGUE (in scalls.c) that cancellation can't happen. 1419 */ 1420 self->ul_nocancel = 1; 1421 1422 #if defined(__amd64) 1423 (void) ___lwp_private(_LWP_SETPRIVATE, _LWP_FSBASE, self); 1424 #elif defined(__i386) 1425 (void) ___lwp_private(_LWP_SETPRIVATE, _LWP_GSBASE, self); 1426 #endif /* __i386 || __amd64 */ 1427 set_curthread(self); /* redundant on i386 */ 1428 /* 1429 * Now curthread is established and it is safe to call any 1430 * function in libc except one that uses thread-local storage. 1431 */ 1432 self->ul_errnop = &errno; 1433 if (oldself != NULL) { 1434 /* tls_size was zero when oldself was allocated */ 1435 lfree(oldself, sizeof (ulwp_t)); 1436 } 1437 mutex_setup(); 1438 atfork_init(); 1439 signal_init(); 1440 1441 /* 1442 * If the stack is unlimited, we set the size to zero to disable 1443 * stack checking. 1444 * XXX: Work harder here. Get the stack size from /proc/self/rmap 1445 */ 1446 if (self->ul_stksiz == RLIM_INFINITY) { 1447 self->ul_ustack.ss_sp = (void *)self->ul_stktop; 1448 self->ul_ustack.ss_size = 0; 1449 } else { 1450 self->ul_ustack.ss_sp = self->ul_stk; 1451 self->ul_ustack.ss_size = self->ul_stksiz; 1452 } 1453 self->ul_ustack.ss_flags = 0; 1454 (void) _private_setustack(&self->ul_ustack); 1455 1456 /* 1457 * Get the variables that affect thread behavior from the environment. 1458 */ 1459 set_thread_vars(); 1460 udp->uberflags.uf_thread_error_detection = (char)thread_error_detection; 1461 udp->thread_stack_cache = thread_stack_cache; 1462 1463 /* 1464 * Make per-thread copies of global variables, for speed. 1465 */ 1466 self->ul_queue_fifo = (char)thread_queue_fifo; 1467 self->ul_cond_wait_defer = (char)thread_cond_wait_defer; 1468 self->ul_error_detection = (char)thread_error_detection; 1469 self->ul_async_safe = (char)thread_async_safe; 1470 self->ul_door_noreserve = (char)thread_door_noreserve; 1471 self->ul_max_spinners = (uchar_t)thread_max_spinners; 1472 self->ul_adaptive_spin = thread_adaptive_spin; 1473 self->ul_release_spin = thread_release_spin; 1474 self->ul_queue_spin = thread_queue_spin; 1475 1476 /* 1477 * When we have initialized the primary link map, inform 1478 * the dynamic linker about our interface functions. 1479 */ 1480 if (self->ul_primarymap) 1481 _ld_libc((void *)rtld_funcs); 1482 1483 /* 1484 * Defer signals until TLS constructors have been called. 1485 */ 1486 sigoff(self); 1487 tls_setup(); 1488 sigon(self); 1489 if (setmask) 1490 (void) restore_signals(self); 1491 1492 /* PROBE_SUPPORT begin */ 1493 if (self->ul_primarymap && __tnf_probe_notify != NULL) 1494 __tnf_probe_notify(); 1495 /* PROBE_SUPPORT end */ 1496 1497 init_sigev_thread(); 1498 init_aio(); 1499 1500 /* 1501 * We need to reset __threaded dynamically at runtime, so that 1502 * __threaded can be bound to __threaded outside libc which may not 1503 * have initial value of 1 (without a copy relocation in a.out). 1504 */ 1505 __threaded = 1; 1506 } 1507 1508 #pragma fini(libc_fini) 1509 void 1510 libc_fini() 1511 { 1512 /* 1513 * If we are doing fini processing for the instance of libc 1514 * on the first alternate link map (this happens only when 1515 * the dynamic linker rejects a bad audit library), then clear 1516 * __curthread(). We abandon whatever memory was allocated by 1517 * lmalloc() while running on this alternate link-map but we 1518 * don't care (and can't find the memory in any case); we just 1519 * want to protect the application from this bad audit library. 1520 * No fini processing is done by libc in the normal case. 1521 */ 1522 1523 uberdata_t *udp = curthread->ul_uberdata; 1524 1525 if (udp->primary_map == 0 && udp == &__uberdata) 1526 set_curthread(NULL); 1527 } 1528 1529 /* 1530 * finish_init is called when we are about to become multi-threaded, 1531 * that is, on the first call to thr_create(). 1532 */ 1533 void 1534 finish_init() 1535 { 1536 ulwp_t *self = curthread; 1537 uberdata_t *udp = self->ul_uberdata; 1538 thr_hash_table_t *htp; 1539 void *data; 1540 int i; 1541 1542 /* 1543 * No locks needed here; we are single-threaded on the first call. 1544 * We can be called only after the primary link map has been set up. 1545 */ 1546 ASSERT(self->ul_primarymap); 1547 ASSERT(self == udp->ulwp_one); 1548 ASSERT(!udp->uberflags.uf_mt); 1549 ASSERT(udp->hash_size == 1); 1550 1551 /* 1552 * First allocate the queue_head array if not already allocated. 1553 */ 1554 if (udp->queue_head == NULL) 1555 queue_alloc(); 1556 1557 /* 1558 * Now allocate the thread hash table. 1559 */ 1560 if ((data = _private_mmap(NULL, HASHTBLSZ * sizeof (thr_hash_table_t), 1561 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0)) 1562 == MAP_FAILED) 1563 thr_panic("cannot allocate thread hash table"); 1564 1565 udp->thr_hash_table = htp = (thr_hash_table_t *)data; 1566 udp->hash_size = HASHTBLSZ; 1567 udp->hash_mask = HASHTBLSZ - 1; 1568 1569 for (i = 0; i < HASHTBLSZ; i++, htp++) { 1570 htp->hash_lock.mutex_flag = LOCK_INITED; 1571 htp->hash_lock.mutex_magic = MUTEX_MAGIC; 1572 htp->hash_cond.cond_magic = COND_MAGIC; 1573 } 1574 hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp); 1575 1576 /* 1577 * Set up the SIGCANCEL handler for threads cancellation. 1578 */ 1579 setup_cancelsig(SIGCANCEL); 1580 1581 /* 1582 * Arrange to do special things on exit -- 1583 * - collect queue statistics from all remaining active threads. 1584 * - grab assert_lock to ensure that assertion failures 1585 * and a core dump take precedence over _exit(). 1586 * - dump queue statistics to stderr if _THREAD_QUEUE_DUMP is set. 1587 * (Functions are called in the reverse order of their registration.) 1588 */ 1589 (void) _atexit(dump_queue_statistics); 1590 (void) _atexit(grab_assert_lock); 1591 (void) _atexit(collect_queue_statistics); 1592 } 1593 1594 /* 1595 * Used only by postfork1_child(), below. 1596 */ 1597 static void 1598 mark_dead_and_buried(ulwp_t *ulwp) 1599 { 1600 ulwp->ul_dead = 1; 1601 ulwp->ul_lwpid = (lwpid_t)(-1); 1602 ulwp->ul_hash = NULL; 1603 ulwp->ul_ix = -1; 1604 ulwp->ul_schedctl = NULL; 1605 ulwp->ul_schedctl_called = NULL; 1606 } 1607 1608 /* 1609 * This is called from fork1() in the child. 1610 * Reset our data structures to reflect one lwp. 1611 */ 1612 void 1613 postfork1_child() 1614 { 1615 ulwp_t *self = curthread; 1616 uberdata_t *udp = self->ul_uberdata; 1617 mutex_t *mp; 1618 ulwp_t *next; 1619 ulwp_t *ulwp; 1620 int i; 1621 1622 /* daemon threads shouldn't call fork1(), but oh well... */ 1623 self->ul_usropts &= ~THR_DAEMON; 1624 udp->nthreads = 1; 1625 udp->ndaemons = 0; 1626 udp->uberflags.uf_mt = 0; 1627 __libc_threaded = 0; 1628 for (i = 0; i < udp->hash_size; i++) 1629 udp->thr_hash_table[i].hash_bucket = NULL; 1630 self->ul_lwpid = __lwp_self(); 1631 hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp); 1632 1633 /* no one in the child is on a sleep queue; reinitialize */ 1634 if (udp->queue_head) { 1635 (void) _private_memset(udp->queue_head, 0, 1636 2 * QHASHSIZE * sizeof (queue_head_t)); 1637 for (i = 0; i < 2 * QHASHSIZE; i++) { 1638 mp = &udp->queue_head[i].qh_lock; 1639 mp->mutex_flag = LOCK_INITED; 1640 mp->mutex_magic = MUTEX_MAGIC; 1641 } 1642 } 1643 1644 /* 1645 * All lwps except ourself are gone. Mark them so. 1646 * First mark all of the lwps that have already been freed. 1647 * Then mark and free all of the active lwps except ourself. 1648 * Since we are single-threaded, no locks are required here. 1649 */ 1650 for (ulwp = udp->lwp_stacks; ulwp != NULL; ulwp = ulwp->ul_next) 1651 mark_dead_and_buried(ulwp); 1652 for (ulwp = udp->ulwp_freelist; ulwp != NULL; ulwp = ulwp->ul_next) 1653 mark_dead_and_buried(ulwp); 1654 for (ulwp = self->ul_forw; ulwp != self; ulwp = next) { 1655 next = ulwp->ul_forw; 1656 ulwp->ul_forw = ulwp->ul_back = NULL; 1657 mark_dead_and_buried(ulwp); 1658 tsd_free(ulwp); 1659 tls_free(ulwp); 1660 rwl_free(ulwp); 1661 heldlock_free(ulwp); 1662 ulwp_free(ulwp); 1663 } 1664 self->ul_forw = self->ul_back = udp->all_lwps = self; 1665 if (self != udp->ulwp_one) 1666 mark_dead_and_buried(udp->ulwp_one); 1667 if ((ulwp = udp->all_zombies) != NULL) { 1668 ASSERT(udp->nzombies != 0); 1669 do { 1670 next = ulwp->ul_forw; 1671 ulwp->ul_forw = ulwp->ul_back = NULL; 1672 mark_dead_and_buried(ulwp); 1673 udp->nzombies--; 1674 if (ulwp->ul_replace) { 1675 ulwp->ul_next = NULL; 1676 if (udp->ulwp_replace_free == NULL) { 1677 udp->ulwp_replace_free = 1678 udp->ulwp_replace_last = ulwp; 1679 } else { 1680 udp->ulwp_replace_last->ul_next = ulwp; 1681 udp->ulwp_replace_last = ulwp; 1682 } 1683 } 1684 } while ((ulwp = next) != udp->all_zombies); 1685 ASSERT(udp->nzombies == 0); 1686 udp->all_zombies = NULL; 1687 udp->nzombies = 0; 1688 } 1689 trim_stack_cache(0); 1690 1691 /* 1692 * Do post-fork1 processing for subsystems that need it. 1693 */ 1694 postfork1_child_tpool(); 1695 postfork1_child_sigev_aio(); 1696 postfork1_child_sigev_mq(); 1697 postfork1_child_sigev_timer(); 1698 postfork1_child_aio(); 1699 } 1700 1701 #pragma weak thr_setprio = _thr_setprio 1702 #pragma weak pthread_setschedprio = _thr_setprio 1703 #pragma weak _pthread_setschedprio = _thr_setprio 1704 int 1705 _thr_setprio(thread_t tid, int priority) 1706 { 1707 struct sched_param param; 1708 1709 (void) _memset(¶m, 0, sizeof (param)); 1710 param.sched_priority = priority; 1711 return (_thread_setschedparam_main(tid, 0, ¶m, PRIO_SET_PRIO)); 1712 } 1713 1714 #pragma weak thr_getprio = _thr_getprio 1715 int 1716 _thr_getprio(thread_t tid, int *priority) 1717 { 1718 uberdata_t *udp = curthread->ul_uberdata; 1719 ulwp_t *ulwp; 1720 int error = 0; 1721 1722 if ((ulwp = find_lwp(tid)) == NULL) 1723 error = ESRCH; 1724 else { 1725 *priority = ulwp->ul_pri; 1726 ulwp_unlock(ulwp, udp); 1727 } 1728 return (error); 1729 } 1730 1731 lwpid_t 1732 lwp_self(void) 1733 { 1734 return (curthread->ul_lwpid); 1735 } 1736 1737 #pragma weak _ti_thr_self = _thr_self 1738 #pragma weak thr_self = _thr_self 1739 #pragma weak pthread_self = _thr_self 1740 #pragma weak _pthread_self = _thr_self 1741 thread_t 1742 _thr_self() 1743 { 1744 return (curthread->ul_lwpid); 1745 } 1746 1747 #pragma weak thr_main = _thr_main 1748 int 1749 _thr_main() 1750 { 1751 ulwp_t *self = __curthread(); 1752 1753 return ((self == NULL)? -1 : self->ul_main); 1754 } 1755 1756 int 1757 _thrp_cancelled(void) 1758 { 1759 return (curthread->ul_rval == PTHREAD_CANCELED); 1760 } 1761 1762 int 1763 _thrp_stksegment(ulwp_t *ulwp, stack_t *stk) 1764 { 1765 stk->ss_sp = (void *)ulwp->ul_stktop; 1766 stk->ss_size = ulwp->ul_stksiz; 1767 stk->ss_flags = 0; 1768 return (0); 1769 } 1770 1771 #pragma weak thr_stksegment = _thr_stksegment 1772 int 1773 _thr_stksegment(stack_t *stk) 1774 { 1775 return (_thrp_stksegment(curthread, stk)); 1776 } 1777 1778 void 1779 force_continue(ulwp_t *ulwp) 1780 { 1781 #if defined(THREAD_DEBUG) 1782 ulwp_t *self = curthread; 1783 uberdata_t *udp = self->ul_uberdata; 1784 #endif 1785 int error; 1786 timespec_t ts; 1787 1788 ASSERT(MUTEX_OWNED(&udp->fork_lock, self)); 1789 ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self)); 1790 1791 for (;;) { 1792 error = __lwp_continue(ulwp->ul_lwpid); 1793 if (error != 0 && error != EINTR) 1794 break; 1795 error = 0; 1796 if (ulwp->ul_stopping) { /* he is stopping himself */ 1797 ts.tv_sec = 0; /* give him a chance to run */ 1798 ts.tv_nsec = 100000; /* 100 usecs or clock tick */ 1799 (void) __nanosleep(&ts, NULL); 1800 } 1801 if (!ulwp->ul_stopping) /* he is running now */ 1802 break; /* so we are done */ 1803 /* 1804 * He is marked as being in the process of stopping 1805 * himself. Loop around and continue him again. 1806 * He may not have been stopped the first time. 1807 */ 1808 } 1809 } 1810 1811 /* 1812 * Suspend an lwp with lwp_suspend(), then move it to a safe 1813 * point, that is, to a point where ul_critical is zero. 1814 * On return, the ulwp_lock() is dropped as with ulwp_unlock(). 1815 * If 'link_dropped' is non-NULL, then 'link_lock' is held on entry. 1816 * If we have to drop link_lock, we store 1 through link_dropped. 1817 * If the lwp exits before it can be suspended, we return ESRCH. 1818 */ 1819 int 1820 safe_suspend(ulwp_t *ulwp, uchar_t whystopped, int *link_dropped) 1821 { 1822 ulwp_t *self = curthread; 1823 uberdata_t *udp = self->ul_uberdata; 1824 cond_t *cvp = ulwp_condvar(ulwp, udp); 1825 mutex_t *mp = ulwp_mutex(ulwp, udp); 1826 thread_t tid = ulwp->ul_lwpid; 1827 int ix = ulwp->ul_ix; 1828 int error = 0; 1829 1830 ASSERT(whystopped == TSTP_REGULAR || 1831 whystopped == TSTP_MUTATOR || 1832 whystopped == TSTP_FORK); 1833 ASSERT(ulwp != self); 1834 ASSERT(!ulwp->ul_stop); 1835 ASSERT(MUTEX_OWNED(&udp->fork_lock, self)); 1836 ASSERT(MUTEX_OWNED(mp, self)); 1837 1838 if (link_dropped != NULL) 1839 *link_dropped = 0; 1840 1841 /* 1842 * We must grab the target's spin lock before suspending it. 1843 * See the comments below and in _thrp_suspend() for why. 1844 */ 1845 spin_lock_set(&ulwp->ul_spinlock); 1846 (void) ___lwp_suspend(tid); 1847 spin_lock_clear(&ulwp->ul_spinlock); 1848 1849 top: 1850 if (ulwp->ul_critical == 0 || ulwp->ul_stopping) { 1851 /* thread is already safe */ 1852 ulwp->ul_stop |= whystopped; 1853 } else { 1854 /* 1855 * Setting ul_pleasestop causes the target thread to stop 1856 * itself in _thrp_suspend(), below, after we drop its lock. 1857 * We must continue the critical thread before dropping 1858 * link_lock because the critical thread may be holding 1859 * the queue lock for link_lock. This is delicate. 1860 */ 1861 ulwp->ul_pleasestop |= whystopped; 1862 force_continue(ulwp); 1863 if (link_dropped != NULL) { 1864 *link_dropped = 1; 1865 lmutex_unlock(&udp->link_lock); 1866 /* be sure to drop link_lock only once */ 1867 link_dropped = NULL; 1868 } 1869 1870 /* 1871 * The thread may disappear by calling thr_exit() so we 1872 * cannot rely on the ulwp pointer after dropping the lock. 1873 * Instead, we search the hash table to find it again. 1874 * When we return, we may find that the thread has been 1875 * continued by some other thread. The suspend/continue 1876 * interfaces are prone to such race conditions by design. 1877 */ 1878 while (ulwp && !ulwp->ul_dead && !ulwp->ul_stop && 1879 (ulwp->ul_pleasestop & whystopped)) { 1880 (void) _cond_wait(cvp, mp); 1881 for (ulwp = udp->thr_hash_table[ix].hash_bucket; 1882 ulwp != NULL; ulwp = ulwp->ul_hash) { 1883 if (ulwp->ul_lwpid == tid) 1884 break; 1885 } 1886 } 1887 1888 if (ulwp == NULL || ulwp->ul_dead) 1889 error = ESRCH; 1890 else { 1891 /* 1892 * Do another lwp_suspend() to make sure we don't 1893 * return until the target thread is fully stopped 1894 * in the kernel. Don't apply lwp_suspend() until 1895 * we know that the target is not holding any 1896 * queue locks, that is, that it has completed 1897 * ulwp_unlock(self) and has, or at least is 1898 * about to, call lwp_suspend() on itself. We do 1899 * this by grabbing the target's spin lock. 1900 */ 1901 ASSERT(ulwp->ul_lwpid == tid); 1902 spin_lock_set(&ulwp->ul_spinlock); 1903 (void) ___lwp_suspend(tid); 1904 spin_lock_clear(&ulwp->ul_spinlock); 1905 /* 1906 * If some other thread did a thr_continue() 1907 * on the target thread we have to start over. 1908 */ 1909 if (!ulwp->ul_stopping || !(ulwp->ul_stop & whystopped)) 1910 goto top; 1911 } 1912 } 1913 1914 (void) cond_broadcast_internal(cvp); 1915 lmutex_unlock(mp); 1916 return (error); 1917 } 1918 1919 int 1920 _thrp_suspend(thread_t tid, uchar_t whystopped) 1921 { 1922 ulwp_t *self = curthread; 1923 uberdata_t *udp = self->ul_uberdata; 1924 ulwp_t *ulwp; 1925 int error = 0; 1926 1927 ASSERT((whystopped & (TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) != 0); 1928 ASSERT((whystopped & ~(TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) == 0); 1929 1930 /* 1931 * We can't suspend anyone except ourself while 1932 * some other thread is performing a fork. 1933 * This also allows only one suspension at a time. 1934 */ 1935 if (tid != self->ul_lwpid) 1936 fork_lock_enter(); 1937 1938 if ((ulwp = find_lwp(tid)) == NULL) 1939 error = ESRCH; 1940 else if (whystopped == TSTP_MUTATOR && !ulwp->ul_mutator) { 1941 ulwp_unlock(ulwp, udp); 1942 error = EINVAL; 1943 } else if (ulwp->ul_stop) { /* already stopped */ 1944 ulwp->ul_stop |= whystopped; 1945 ulwp_broadcast(ulwp); 1946 ulwp_unlock(ulwp, udp); 1947 } else if (ulwp != self) { 1948 /* 1949 * After suspending the other thread, move it out of a 1950 * critical section and deal with the schedctl mappings. 1951 * safe_suspend() suspends the other thread, calls 1952 * ulwp_broadcast(ulwp) and drops the ulwp lock. 1953 */ 1954 error = safe_suspend(ulwp, whystopped, NULL); 1955 } else { 1956 int schedctl_after_fork = 0; 1957 1958 /* 1959 * We are suspending ourself. We must not take a signal 1960 * until we return from lwp_suspend() and clear ul_stopping. 1961 * This is to guard against siglongjmp(). 1962 */ 1963 enter_critical(self); 1964 self->ul_sp = stkptr(); 1965 _flush_windows(); /* sparc */ 1966 self->ul_pleasestop = 0; 1967 self->ul_stop |= whystopped; 1968 /* 1969 * Grab our spin lock before dropping ulwp_mutex(self). 1970 * This prevents the suspending thread from applying 1971 * lwp_suspend() to us before we emerge from 1972 * lmutex_unlock(mp) and have dropped mp's queue lock. 1973 */ 1974 spin_lock_set(&self->ul_spinlock); 1975 self->ul_stopping = 1; 1976 ulwp_broadcast(self); 1977 ulwp_unlock(self, udp); 1978 /* 1979 * From this point until we return from lwp_suspend(), 1980 * we must not call any function that might invoke the 1981 * dynamic linker, that is, we can only call functions 1982 * private to the library. 1983 * 1984 * Also, this is a nasty race condition for a process 1985 * that is undergoing a forkall() operation: 1986 * Once we clear our spinlock (below), we are vulnerable 1987 * to being suspended by the forkall() thread before 1988 * we manage to suspend ourself in ___lwp_suspend(). 1989 * See safe_suspend() and force_continue(). 1990 * 1991 * To avoid a SIGSEGV due to the disappearance 1992 * of the schedctl mappings in the child process, 1993 * which can happen in spin_lock_clear() if we 1994 * are suspended while we are in the middle of 1995 * its call to preempt(), we preemptively clear 1996 * our own schedctl pointer before dropping our 1997 * spinlock. We reinstate it, in both the parent 1998 * and (if this really is a forkall()) the child. 1999 */ 2000 if (whystopped & TSTP_FORK) { 2001 schedctl_after_fork = 1; 2002 self->ul_schedctl = NULL; 2003 self->ul_schedctl_called = &udp->uberflags; 2004 } 2005 spin_lock_clear(&self->ul_spinlock); 2006 (void) ___lwp_suspend(tid); 2007 /* 2008 * Somebody else continued us. 2009 * We can't grab ulwp_lock(self) 2010 * until after clearing ul_stopping. 2011 * force_continue() relies on this. 2012 */ 2013 self->ul_stopping = 0; 2014 self->ul_sp = 0; 2015 if (schedctl_after_fork) { 2016 self->ul_schedctl_called = NULL; 2017 self->ul_schedctl = NULL; 2018 (void) setup_schedctl(); 2019 } 2020 ulwp_lock(self, udp); 2021 ulwp_broadcast(self); 2022 ulwp_unlock(self, udp); 2023 exit_critical(self); 2024 } 2025 2026 if (tid != self->ul_lwpid) 2027 fork_lock_exit(); 2028 2029 return (error); 2030 } 2031 2032 /* 2033 * Suspend all lwps other than ourself in preparation for fork. 2034 */ 2035 void 2036 suspend_fork() 2037 { 2038 ulwp_t *self = curthread; 2039 uberdata_t *udp = self->ul_uberdata; 2040 ulwp_t *ulwp; 2041 int link_dropped; 2042 2043 ASSERT(MUTEX_OWNED(&udp->fork_lock, self)); 2044 top: 2045 lmutex_lock(&udp->link_lock); 2046 2047 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2048 ulwp_lock(ulwp, udp); 2049 if (ulwp->ul_stop) { /* already stopped */ 2050 ulwp->ul_stop |= TSTP_FORK; 2051 ulwp_broadcast(ulwp); 2052 ulwp_unlock(ulwp, udp); 2053 } else { 2054 /* 2055 * Move the stopped lwp out of a critical section. 2056 */ 2057 if (safe_suspend(ulwp, TSTP_FORK, &link_dropped) || 2058 link_dropped) 2059 goto top; 2060 } 2061 } 2062 2063 lmutex_unlock(&udp->link_lock); 2064 } 2065 2066 void 2067 continue_fork(int child) 2068 { 2069 ulwp_t *self = curthread; 2070 uberdata_t *udp = self->ul_uberdata; 2071 ulwp_t *ulwp; 2072 2073 ASSERT(MUTEX_OWNED(&udp->fork_lock, self)); 2074 2075 /* 2076 * Clear the schedctl pointers in the child of forkall(). 2077 */ 2078 if (child) { 2079 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2080 ulwp->ul_schedctl_called = 2081 ulwp->ul_dead? &udp->uberflags : NULL; 2082 ulwp->ul_schedctl = NULL; 2083 } 2084 } 2085 2086 /* 2087 * Set all lwps that were stopped for fork() running again. 2088 */ 2089 lmutex_lock(&udp->link_lock); 2090 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2091 mutex_t *mp = ulwp_mutex(ulwp, udp); 2092 lmutex_lock(mp); 2093 ASSERT(ulwp->ul_stop & TSTP_FORK); 2094 ulwp->ul_stop &= ~TSTP_FORK; 2095 ulwp_broadcast(ulwp); 2096 if (!ulwp->ul_stop) 2097 force_continue(ulwp); 2098 lmutex_unlock(mp); 2099 } 2100 lmutex_unlock(&udp->link_lock); 2101 } 2102 2103 int 2104 _thrp_continue(thread_t tid, uchar_t whystopped) 2105 { 2106 uberdata_t *udp = curthread->ul_uberdata; 2107 ulwp_t *ulwp; 2108 mutex_t *mp; 2109 int error = 0; 2110 2111 ASSERT(whystopped == TSTP_REGULAR || 2112 whystopped == TSTP_MUTATOR); 2113 2114 /* 2115 * We single-thread the entire thread suspend/continue mechanism. 2116 */ 2117 fork_lock_enter(); 2118 2119 if ((ulwp = find_lwp(tid)) == NULL) { 2120 fork_lock_exit(); 2121 return (ESRCH); 2122 } 2123 2124 mp = ulwp_mutex(ulwp, udp); 2125 if ((whystopped == TSTP_MUTATOR && !ulwp->ul_mutator)) { 2126 error = EINVAL; 2127 } else if (ulwp->ul_stop & whystopped) { 2128 ulwp->ul_stop &= ~whystopped; 2129 ulwp_broadcast(ulwp); 2130 if (!ulwp->ul_stop) { 2131 if (whystopped == TSTP_REGULAR && ulwp->ul_created) { 2132 ulwp->ul_sp = 0; 2133 ulwp->ul_created = 0; 2134 } 2135 force_continue(ulwp); 2136 } 2137 } 2138 lmutex_unlock(mp); 2139 2140 fork_lock_exit(); 2141 return (error); 2142 } 2143 2144 #pragma weak thr_suspend = _thr_suspend 2145 int 2146 _thr_suspend(thread_t tid) 2147 { 2148 return (_thrp_suspend(tid, TSTP_REGULAR)); 2149 } 2150 2151 #pragma weak thr_continue = _thr_continue 2152 int 2153 _thr_continue(thread_t tid) 2154 { 2155 return (_thrp_continue(tid, TSTP_REGULAR)); 2156 } 2157 2158 #pragma weak thr_yield = _thr_yield 2159 void 2160 _thr_yield() 2161 { 2162 lwp_yield(); 2163 } 2164 2165 #pragma weak thr_kill = _thr_kill 2166 #pragma weak pthread_kill = _thr_kill 2167 #pragma weak _pthread_kill = _thr_kill 2168 int 2169 _thr_kill(thread_t tid, int sig) 2170 { 2171 if (sig == SIGCANCEL) 2172 return (EINVAL); 2173 return (__lwp_kill(tid, sig)); 2174 } 2175 2176 /* 2177 * Exit a critical section, take deferred actions if necessary. 2178 */ 2179 void 2180 do_exit_critical() 2181 { 2182 ulwp_t *self = curthread; 2183 int sig; 2184 2185 ASSERT(self->ul_critical == 0); 2186 if (self->ul_dead) 2187 return; 2188 2189 while (self->ul_pleasestop || 2190 (self->ul_cursig != 0 && self->ul_sigdefer == 0)) { 2191 /* 2192 * Avoid a recursive call to exit_critical() in _thrp_suspend() 2193 * by keeping self->ul_critical == 1 here. 2194 */ 2195 self->ul_critical++; 2196 while (self->ul_pleasestop) { 2197 /* 2198 * Guard against suspending ourself while on a sleep 2199 * queue. See the comments in call_user_handler(). 2200 */ 2201 unsleep_self(); 2202 set_parking_flag(self, 0); 2203 (void) _thrp_suspend(self->ul_lwpid, 2204 self->ul_pleasestop); 2205 } 2206 self->ul_critical--; 2207 2208 if ((sig = self->ul_cursig) != 0 && self->ul_sigdefer == 0) { 2209 /* 2210 * Clear ul_cursig before proceeding. 2211 * This protects us from the dynamic linker's 2212 * calls to bind_guard()/bind_clear() in the 2213 * event that it is invoked to resolve a symbol 2214 * like take_deferred_signal() below. 2215 */ 2216 self->ul_cursig = 0; 2217 take_deferred_signal(sig); 2218 ASSERT(self->ul_cursig == 0); 2219 } 2220 } 2221 ASSERT(self->ul_critical == 0); 2222 } 2223 2224 int 2225 _ti_bind_guard(int bindflag) 2226 { 2227 ulwp_t *self = curthread; 2228 2229 if ((self->ul_bindflags & bindflag) == bindflag) 2230 return (0); 2231 enter_critical(self); 2232 self->ul_bindflags |= bindflag; 2233 return (1); 2234 } 2235 2236 int 2237 _ti_bind_clear(int bindflag) 2238 { 2239 ulwp_t *self = curthread; 2240 2241 if ((self->ul_bindflags & bindflag) == 0) 2242 return (self->ul_bindflags); 2243 self->ul_bindflags &= ~bindflag; 2244 exit_critical(self); 2245 return (self->ul_bindflags); 2246 } 2247 2248 /* 2249 * sigoff() and sigon() enable cond_wait() to behave (optionally) like 2250 * it does in the old libthread (see the comments in cond_wait_queue()). 2251 * Also, signals are deferred at thread startup until TLS constructors 2252 * have all been called, at which time _thr_setup() calls sigon(). 2253 * 2254 * _sigoff() and _sigon() are external consolidation-private interfaces to 2255 * sigoff() and sigon(), respectively, in libc. These are used in libnsl. 2256 * Also, _sigoff() and _sigon() are called from dbx's run-time checking 2257 * (librtc.so) to defer signals during its critical sections (not to be 2258 * confused with libc critical sections [see exit_critical() above]). 2259 */ 2260 void 2261 _sigoff(void) 2262 { 2263 sigoff(curthread); 2264 } 2265 2266 void 2267 _sigon(void) 2268 { 2269 sigon(curthread); 2270 } 2271 2272 void 2273 sigon(ulwp_t *self) 2274 { 2275 int sig; 2276 2277 ASSERT(self->ul_sigdefer > 0); 2278 if (--self->ul_sigdefer == 0) { 2279 if ((sig = self->ul_cursig) != 0 && self->ul_critical == 0) { 2280 self->ul_cursig = 0; 2281 take_deferred_signal(sig); 2282 ASSERT(self->ul_cursig == 0); 2283 } 2284 } 2285 } 2286 2287 #pragma weak thr_getconcurrency = _thr_getconcurrency 2288 int 2289 _thr_getconcurrency() 2290 { 2291 return (thr_concurrency); 2292 } 2293 2294 #pragma weak pthread_getconcurrency = _pthread_getconcurrency 2295 int 2296 _pthread_getconcurrency() 2297 { 2298 return (pthread_concurrency); 2299 } 2300 2301 #pragma weak thr_setconcurrency = _thr_setconcurrency 2302 int 2303 _thr_setconcurrency(int new_level) 2304 { 2305 uberdata_t *udp = curthread->ul_uberdata; 2306 2307 if (new_level < 0) 2308 return (EINVAL); 2309 if (new_level > 65536) /* 65536 is totally arbitrary */ 2310 return (EAGAIN); 2311 lmutex_lock(&udp->link_lock); 2312 if (new_level > thr_concurrency) 2313 thr_concurrency = new_level; 2314 lmutex_unlock(&udp->link_lock); 2315 return (0); 2316 } 2317 2318 #pragma weak pthread_setconcurrency = _pthread_setconcurrency 2319 int 2320 _pthread_setconcurrency(int new_level) 2321 { 2322 if (new_level < 0) 2323 return (EINVAL); 2324 if (new_level > 65536) /* 65536 is totally arbitrary */ 2325 return (EAGAIN); 2326 pthread_concurrency = new_level; 2327 return (0); 2328 } 2329 2330 #pragma weak thr_min_stack = _thr_min_stack 2331 #pragma weak __pthread_min_stack = _thr_min_stack 2332 size_t 2333 _thr_min_stack(void) 2334 { 2335 return (MINSTACK); 2336 } 2337 2338 int 2339 __nthreads(void) 2340 { 2341 return (curthread->ul_uberdata->nthreads); 2342 } 2343 2344 /* 2345 * XXX 2346 * The remainder of this file implements the private interfaces to java for 2347 * garbage collection. It is no longer used, at least by java 1.2. 2348 * It can all go away once all old JVMs have disappeared. 2349 */ 2350 2351 int suspendingallmutators; /* when non-zero, suspending all mutators. */ 2352 int suspendedallmutators; /* when non-zero, all mutators suspended. */ 2353 int mutatorsbarrier; /* when non-zero, mutators barrier imposed. */ 2354 mutex_t mutatorslock = DEFAULTMUTEX; /* used to enforce mutators barrier. */ 2355 cond_t mutatorscv = DEFAULTCV; /* where non-mutators sleep. */ 2356 2357 /* 2358 * Get the available register state for the target thread. 2359 * Return non-volatile registers: TRS_NONVOLATILE 2360 */ 2361 #pragma weak thr_getstate = _thr_getstate 2362 int 2363 _thr_getstate(thread_t tid, int *flag, lwpid_t *lwp, stack_t *ss, gregset_t rs) 2364 { 2365 ulwp_t *self = curthread; 2366 uberdata_t *udp = self->ul_uberdata; 2367 ulwp_t **ulwpp; 2368 ulwp_t *ulwp; 2369 int error = 0; 2370 int trs_flag = TRS_LWPID; 2371 2372 if (tid == 0 || self->ul_lwpid == tid) { 2373 ulwp = self; 2374 ulwp_lock(ulwp, udp); 2375 } else if ((ulwpp = find_lwpp(tid)) != NULL) { 2376 ulwp = *ulwpp; 2377 } else { 2378 if (flag) 2379 *flag = TRS_INVALID; 2380 return (ESRCH); 2381 } 2382 2383 if (ulwp->ul_dead) { 2384 trs_flag = TRS_INVALID; 2385 } else if (!ulwp->ul_stop && !suspendedallmutators) { 2386 error = EINVAL; 2387 trs_flag = TRS_INVALID; 2388 } else if (ulwp->ul_stop) { 2389 trs_flag = TRS_NONVOLATILE; 2390 getgregs(ulwp, rs); 2391 } 2392 2393 if (flag) 2394 *flag = trs_flag; 2395 if (lwp) 2396 *lwp = tid; 2397 if (ss != NULL) 2398 (void) _thrp_stksegment(ulwp, ss); 2399 2400 ulwp_unlock(ulwp, udp); 2401 return (error); 2402 } 2403 2404 /* 2405 * Set the appropriate register state for the target thread. 2406 * This is not used by java. It exists solely for the MSTC test suite. 2407 */ 2408 #pragma weak thr_setstate = _thr_setstate 2409 int 2410 _thr_setstate(thread_t tid, int flag, gregset_t rs) 2411 { 2412 uberdata_t *udp = curthread->ul_uberdata; 2413 ulwp_t *ulwp; 2414 int error = 0; 2415 2416 if ((ulwp = find_lwp(tid)) == NULL) 2417 return (ESRCH); 2418 2419 if (!ulwp->ul_stop && !suspendedallmutators) 2420 error = EINVAL; 2421 else if (rs != NULL) { 2422 switch (flag) { 2423 case TRS_NONVOLATILE: 2424 /* do /proc stuff here? */ 2425 if (ulwp->ul_stop) 2426 setgregs(ulwp, rs); 2427 else 2428 error = EINVAL; 2429 break; 2430 case TRS_LWPID: /* do /proc stuff here? */ 2431 default: 2432 error = EINVAL; 2433 break; 2434 } 2435 } 2436 2437 ulwp_unlock(ulwp, udp); 2438 return (error); 2439 } 2440 2441 int 2442 getlwpstatus(thread_t tid, struct lwpstatus *sp) 2443 { 2444 extern ssize_t _pread(int, void *, size_t, off_t); 2445 char buf[100]; 2446 int fd; 2447 2448 /* "/proc/self/lwp/%u/lwpstatus" w/o stdio */ 2449 (void) strcpy(buf, "/proc/self/lwp/"); 2450 ultos((uint64_t)tid, 10, buf + strlen(buf)); 2451 (void) strcat(buf, "/lwpstatus"); 2452 if ((fd = _open(buf, O_RDONLY, 0)) >= 0) { 2453 while (_pread(fd, sp, sizeof (*sp), 0) == sizeof (*sp)) { 2454 if (sp->pr_flags & PR_STOPPED) { 2455 (void) _close(fd); 2456 return (0); 2457 } 2458 lwp_yield(); /* give him a chance to stop */ 2459 } 2460 (void) _close(fd); 2461 } 2462 return (-1); 2463 } 2464 2465 int 2466 putlwpregs(thread_t tid, prgregset_t prp) 2467 { 2468 extern ssize_t _writev(int, const struct iovec *, int); 2469 char buf[100]; 2470 int fd; 2471 long dstop_sreg[2]; 2472 long run_null[2]; 2473 iovec_t iov[3]; 2474 2475 /* "/proc/self/lwp/%u/lwpctl" w/o stdio */ 2476 (void) strcpy(buf, "/proc/self/lwp/"); 2477 ultos((uint64_t)tid, 10, buf + strlen(buf)); 2478 (void) strcat(buf, "/lwpctl"); 2479 if ((fd = _open(buf, O_WRONLY, 0)) >= 0) { 2480 dstop_sreg[0] = PCDSTOP; /* direct it to stop */ 2481 dstop_sreg[1] = PCSREG; /* set the registers */ 2482 iov[0].iov_base = (caddr_t)dstop_sreg; 2483 iov[0].iov_len = sizeof (dstop_sreg); 2484 iov[1].iov_base = (caddr_t)prp; /* from the register set */ 2485 iov[1].iov_len = sizeof (prgregset_t); 2486 run_null[0] = PCRUN; /* make it runnable again */ 2487 run_null[1] = 0; 2488 iov[2].iov_base = (caddr_t)run_null; 2489 iov[2].iov_len = sizeof (run_null); 2490 if (_writev(fd, iov, 3) >= 0) { 2491 (void) _close(fd); 2492 return (0); 2493 } 2494 (void) _close(fd); 2495 } 2496 return (-1); 2497 } 2498 2499 static ulong_t 2500 gettsp_slow(thread_t tid) 2501 { 2502 char buf[100]; 2503 struct lwpstatus status; 2504 2505 if (getlwpstatus(tid, &status) != 0) { 2506 /* "__gettsp(%u): can't read lwpstatus" w/o stdio */ 2507 (void) strcpy(buf, "__gettsp("); 2508 ultos((uint64_t)tid, 10, buf + strlen(buf)); 2509 (void) strcat(buf, "): can't read lwpstatus"); 2510 thr_panic(buf); 2511 } 2512 return (status.pr_reg[R_SP]); 2513 } 2514 2515 ulong_t 2516 __gettsp(thread_t tid) 2517 { 2518 uberdata_t *udp = curthread->ul_uberdata; 2519 ulwp_t *ulwp; 2520 ulong_t result; 2521 2522 if ((ulwp = find_lwp(tid)) == NULL) 2523 return (0); 2524 2525 if (ulwp->ul_stop && (result = ulwp->ul_sp) != 0) { 2526 ulwp_unlock(ulwp, udp); 2527 return (result); 2528 } 2529 2530 result = gettsp_slow(tid); 2531 ulwp_unlock(ulwp, udp); 2532 return (result); 2533 } 2534 2535 /* 2536 * This tells java stack walkers how to find the ucontext 2537 * structure passed to signal handlers. 2538 */ 2539 #pragma weak thr_sighndlrinfo = _thr_sighndlrinfo 2540 void 2541 _thr_sighndlrinfo(void (**func)(), int *funcsize) 2542 { 2543 *func = &__sighndlr; 2544 *funcsize = (char *)&__sighndlrend - (char *)&__sighndlr; 2545 } 2546 2547 /* 2548 * Mark a thread a mutator or reset a mutator to being a default, 2549 * non-mutator thread. 2550 */ 2551 #pragma weak thr_setmutator = _thr_setmutator 2552 int 2553 _thr_setmutator(thread_t tid, int enabled) 2554 { 2555 ulwp_t *self = curthread; 2556 uberdata_t *udp = self->ul_uberdata; 2557 ulwp_t *ulwp; 2558 int error; 2559 2560 enabled = enabled?1:0; 2561 top: 2562 if (tid == 0) { 2563 ulwp = self; 2564 ulwp_lock(ulwp, udp); 2565 } else if ((ulwp = find_lwp(tid)) == NULL) { 2566 return (ESRCH); 2567 } 2568 2569 /* 2570 * The target thread should be the caller itself or a suspended thread. 2571 * This prevents the target from also changing its ul_mutator field. 2572 */ 2573 error = 0; 2574 if (ulwp != self && !ulwp->ul_stop && enabled) 2575 error = EINVAL; 2576 else if (ulwp->ul_mutator != enabled) { 2577 lmutex_lock(&mutatorslock); 2578 if (mutatorsbarrier) { 2579 ulwp_unlock(ulwp, udp); 2580 while (mutatorsbarrier) 2581 (void) _cond_wait(&mutatorscv, &mutatorslock); 2582 lmutex_unlock(&mutatorslock); 2583 goto top; 2584 } 2585 ulwp->ul_mutator = enabled; 2586 lmutex_unlock(&mutatorslock); 2587 } 2588 2589 ulwp_unlock(ulwp, udp); 2590 return (error); 2591 } 2592 2593 /* 2594 * Establish a barrier against new mutators. Any non-mutator trying 2595 * to become a mutator is suspended until the barrier is removed. 2596 */ 2597 #pragma weak thr_mutators_barrier = _thr_mutators_barrier 2598 void 2599 _thr_mutators_barrier(int enabled) 2600 { 2601 int oldvalue; 2602 2603 lmutex_lock(&mutatorslock); 2604 2605 /* 2606 * Wait if trying to set the barrier while it is already set. 2607 */ 2608 while (mutatorsbarrier && enabled) 2609 (void) _cond_wait(&mutatorscv, &mutatorslock); 2610 2611 oldvalue = mutatorsbarrier; 2612 mutatorsbarrier = enabled; 2613 /* 2614 * Wakeup any blocked non-mutators when barrier is removed. 2615 */ 2616 if (oldvalue && !enabled) 2617 (void) cond_broadcast_internal(&mutatorscv); 2618 lmutex_unlock(&mutatorslock); 2619 } 2620 2621 /* 2622 * Suspend the set of all mutators except for the caller. The list 2623 * of actively running threads is searched and only the mutators 2624 * in this list are suspended. Actively running non-mutators remain 2625 * running. Any other thread is suspended. 2626 */ 2627 #pragma weak thr_suspend_allmutators = _thr_suspend_allmutators 2628 int 2629 _thr_suspend_allmutators(void) 2630 { 2631 ulwp_t *self = curthread; 2632 uberdata_t *udp = self->ul_uberdata; 2633 ulwp_t *ulwp; 2634 int link_dropped; 2635 2636 /* 2637 * We single-thread the entire thread suspend/continue mechanism. 2638 */ 2639 fork_lock_enter(); 2640 2641 top: 2642 lmutex_lock(&udp->link_lock); 2643 2644 if (suspendingallmutators || suspendedallmutators) { 2645 lmutex_unlock(&udp->link_lock); 2646 fork_lock_exit(); 2647 return (EINVAL); 2648 } 2649 suspendingallmutators = 1; 2650 2651 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2652 ulwp_lock(ulwp, udp); 2653 if (!ulwp->ul_mutator) { 2654 ulwp_unlock(ulwp, udp); 2655 } else if (ulwp->ul_stop) { /* already stopped */ 2656 ulwp->ul_stop |= TSTP_MUTATOR; 2657 ulwp_broadcast(ulwp); 2658 ulwp_unlock(ulwp, udp); 2659 } else { 2660 /* 2661 * Move the stopped lwp out of a critical section. 2662 */ 2663 if (safe_suspend(ulwp, TSTP_MUTATOR, &link_dropped) || 2664 link_dropped) { 2665 suspendingallmutators = 0; 2666 goto top; 2667 } 2668 } 2669 } 2670 2671 suspendedallmutators = 1; 2672 suspendingallmutators = 0; 2673 lmutex_unlock(&udp->link_lock); 2674 fork_lock_exit(); 2675 return (0); 2676 } 2677 2678 /* 2679 * Suspend the target mutator. The caller is permitted to suspend 2680 * itself. If a mutator barrier is enabled, the caller will suspend 2681 * itself as though it had been suspended by thr_suspend_allmutators(). 2682 * When the barrier is removed, this thread will be resumed. Any 2683 * suspended mutator, whether suspended by thr_suspend_mutator(), or by 2684 * thr_suspend_allmutators(), can be resumed by thr_continue_mutator(). 2685 */ 2686 #pragma weak thr_suspend_mutator = _thr_suspend_mutator 2687 int 2688 _thr_suspend_mutator(thread_t tid) 2689 { 2690 if (tid == 0) 2691 tid = curthread->ul_lwpid; 2692 return (_thrp_suspend(tid, TSTP_MUTATOR)); 2693 } 2694 2695 /* 2696 * Resume the set of all suspended mutators. 2697 */ 2698 #pragma weak thr_continue_allmutators = _thr_continue_allmutators 2699 int 2700 _thr_continue_allmutators() 2701 { 2702 ulwp_t *self = curthread; 2703 uberdata_t *udp = self->ul_uberdata; 2704 ulwp_t *ulwp; 2705 2706 /* 2707 * We single-thread the entire thread suspend/continue mechanism. 2708 */ 2709 fork_lock_enter(); 2710 2711 lmutex_lock(&udp->link_lock); 2712 if (!suspendedallmutators) { 2713 lmutex_unlock(&udp->link_lock); 2714 fork_lock_exit(); 2715 return (EINVAL); 2716 } 2717 suspendedallmutators = 0; 2718 2719 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2720 mutex_t *mp = ulwp_mutex(ulwp, udp); 2721 lmutex_lock(mp); 2722 if (ulwp->ul_stop & TSTP_MUTATOR) { 2723 ulwp->ul_stop &= ~TSTP_MUTATOR; 2724 ulwp_broadcast(ulwp); 2725 if (!ulwp->ul_stop) 2726 force_continue(ulwp); 2727 } 2728 lmutex_unlock(mp); 2729 } 2730 2731 lmutex_unlock(&udp->link_lock); 2732 fork_lock_exit(); 2733 return (0); 2734 } 2735 2736 /* 2737 * Resume a suspended mutator. 2738 */ 2739 #pragma weak thr_continue_mutator = _thr_continue_mutator 2740 int 2741 _thr_continue_mutator(thread_t tid) 2742 { 2743 return (_thrp_continue(tid, TSTP_MUTATOR)); 2744 } 2745 2746 #pragma weak thr_wait_mutator = _thr_wait_mutator 2747 int 2748 _thr_wait_mutator(thread_t tid, int dontwait) 2749 { 2750 uberdata_t *udp = curthread->ul_uberdata; 2751 ulwp_t *ulwp; 2752 int error = 0; 2753 2754 top: 2755 if ((ulwp = find_lwp(tid)) == NULL) 2756 return (ESRCH); 2757 2758 if (!ulwp->ul_mutator) 2759 error = EINVAL; 2760 else if (dontwait) { 2761 if (!(ulwp->ul_stop & TSTP_MUTATOR)) 2762 error = EWOULDBLOCK; 2763 } else if (!(ulwp->ul_stop & TSTP_MUTATOR)) { 2764 cond_t *cvp = ulwp_condvar(ulwp, udp); 2765 mutex_t *mp = ulwp_mutex(ulwp, udp); 2766 2767 (void) _cond_wait(cvp, mp); 2768 (void) lmutex_unlock(mp); 2769 goto top; 2770 } 2771 2772 ulwp_unlock(ulwp, udp); 2773 return (error); 2774 } 2775 2776 /* PROBE_SUPPORT begin */ 2777 2778 void 2779 thr_probe_setup(void *data) 2780 { 2781 curthread->ul_tpdp = data; 2782 } 2783 2784 static void * 2785 _thread_probe_getfunc() 2786 { 2787 return (curthread->ul_tpdp); 2788 } 2789 2790 void * (*thr_probe_getfunc_addr)(void) = _thread_probe_getfunc; 2791 2792 /* ARGSUSED */ 2793 void 2794 _resume(ulwp_t *ulwp, caddr_t sp, int dontsave) 2795 { 2796 /* never called */ 2797 } 2798 2799 /* ARGSUSED */ 2800 void 2801 _resume_ret(ulwp_t *oldlwp) 2802 { 2803 /* never called */ 2804 } 2805 2806 /* PROBE_SUPPORT end */ 2807