1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "lint.h" 30 #include "thr_uberdata.h" 31 #include <procfs.h> 32 #include <sys/uio.h> 33 #include <ctype.h> 34 35 #undef errno 36 extern int errno; 37 38 /* 39 * Between Solaris 2.5 and Solaris 9, __threaded was used to indicate 40 * "we are linked with libthread". The Sun Workshop 6 update 1 compilation 41 * system used it illegally (it is a consolidation private symbol). 42 * To accommodate this and possibly other abusers of the symbol, 43 * we make it always equal to 1 now that libthread has been folded 44 * into libc. The new __libc_threaded symbol is used to indicate 45 * the new meaning, "more than one thread exists". 46 */ 47 int __threaded = 1; /* always equal to 1 */ 48 int __libc_threaded = 0; /* zero until first thr_create() */ 49 50 /* 51 * thr_concurrency and pthread_concurrency are not used by the library. 52 * They exist solely to hold and return the values set by calls to 53 * thr_setconcurrency() and pthread_setconcurrency(). 54 * Because thr_concurrency is affected by the THR_NEW_LWP flag 55 * to thr_create(), thr_concurrency is protected by link_lock. 56 */ 57 static int thr_concurrency = 1; 58 static int pthread_concurrency; 59 60 #define HASHTBLSZ 1024 /* must be a power of two */ 61 #define TIDHASH(tid, udp) (tid & (udp)->hash_mask) 62 63 /* initial allocation, just enough for one lwp */ 64 #pragma align 64(init_hash_table) 65 thr_hash_table_t init_hash_table[1] = { 66 { DEFAULTMUTEX, DEFAULTCV, NULL }, 67 }; 68 69 extern const Lc_interface rtld_funcs[]; 70 71 /* 72 * The weak version is known to libc_db and mdb. 73 */ 74 #pragma weak _uberdata = __uberdata 75 uberdata_t __uberdata = { 76 { DEFAULTMUTEX, NULL, 0 }, /* link_lock */ 77 { RECURSIVEMUTEX, NULL, 0 }, /* fork_lock */ 78 { RECURSIVEMUTEX, NULL, 0 }, /* atfork_lock */ 79 { RECURSIVEMUTEX, NULL, 0 }, /* callout_lock */ 80 { DEFAULTMUTEX, NULL, 0 }, /* tdb_hash_lock */ 81 { 0, }, /* tdb_hash_lock_stats */ 82 { { 0 }, }, /* siguaction[NSIG] */ 83 {{ DEFAULTMUTEX, NULL, 0 }, /* bucket[NBUCKETS] */ 84 { DEFAULTMUTEX, NULL, 0 }, 85 { DEFAULTMUTEX, NULL, 0 }, 86 { DEFAULTMUTEX, NULL, 0 }, 87 { DEFAULTMUTEX, NULL, 0 }, 88 { DEFAULTMUTEX, NULL, 0 }, 89 { DEFAULTMUTEX, NULL, 0 }, 90 { DEFAULTMUTEX, NULL, 0 }, 91 { DEFAULTMUTEX, NULL, 0 }, 92 { DEFAULTMUTEX, NULL, 0 }}, 93 { RECURSIVEMUTEX, NULL, NULL }, /* atexit_root */ 94 { DEFAULTMUTEX, 0, 0, NULL }, /* tsd_metadata */ 95 { DEFAULTMUTEX, {0, 0}, {0, 0} }, /* tls_metadata */ 96 0, /* primary_map */ 97 0, /* bucket_init */ 98 0, /* pad[0] */ 99 0, /* pad[1] */ 100 { 0 }, /* uberflags */ 101 NULL, /* queue_head */ 102 init_hash_table, /* thr_hash_table */ 103 1, /* hash_size: size of the hash table */ 104 0, /* hash_mask: hash_size - 1 */ 105 NULL, /* ulwp_one */ 106 NULL, /* all_lwps */ 107 NULL, /* all_zombies */ 108 0, /* nthreads */ 109 0, /* nzombies */ 110 0, /* ndaemons */ 111 0, /* pid */ 112 sigacthandler, /* sigacthandler */ 113 NULL, /* lwp_stacks */ 114 NULL, /* lwp_laststack */ 115 0, /* nfreestack */ 116 10, /* thread_stack_cache */ 117 NULL, /* ulwp_freelist */ 118 NULL, /* ulwp_lastfree */ 119 NULL, /* ulwp_replace_free */ 120 NULL, /* ulwp_replace_last */ 121 NULL, /* atforklist */ 122 NULL, /* robustlocks */ 123 NULL, /* __tdb_bootstrap */ 124 { /* tdb */ 125 NULL, /* tdb_sync_addr_hash */ 126 0, /* tdb_register_count */ 127 0, /* tdb_hash_alloc_failed */ 128 NULL, /* tdb_sync_addr_free */ 129 NULL, /* tdb_sync_addr_last */ 130 0, /* tdb_sync_alloc */ 131 { 0, 0 }, /* tdb_ev_global_mask */ 132 tdb_events, /* tdb_events array */ 133 }, 134 }; 135 136 /* 137 * The weak version is known to libc_db and mdb. 138 */ 139 #pragma weak _tdb_bootstrap = __tdb_bootstrap 140 uberdata_t **__tdb_bootstrap = NULL; 141 142 int thread_queue_fifo = 4; 143 int thread_queue_dump = 0; 144 int thread_cond_wait_defer = 0; 145 int thread_error_detection = 0; 146 int thread_async_safe = 0; 147 int thread_stack_cache = 10; 148 149 int thread_door_noreserve = 0; 150 151 static ulwp_t *ulwp_alloc(void); 152 static void ulwp_free(ulwp_t *); 153 154 /* 155 * Insert the lwp into the hash table. 156 */ 157 void 158 hash_in_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp) 159 { 160 ulwp->ul_hash = udp->thr_hash_table[ix].hash_bucket; 161 udp->thr_hash_table[ix].hash_bucket = ulwp; 162 ulwp->ul_ix = ix; 163 } 164 165 void 166 hash_in(ulwp_t *ulwp, uberdata_t *udp) 167 { 168 int ix = TIDHASH(ulwp->ul_lwpid, udp); 169 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock; 170 171 lmutex_lock(mp); 172 hash_in_unlocked(ulwp, ix, udp); 173 lmutex_unlock(mp); 174 } 175 176 /* 177 * Delete the lwp from the hash table. 178 */ 179 void 180 hash_out_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp) 181 { 182 ulwp_t **ulwpp; 183 184 for (ulwpp = &udp->thr_hash_table[ix].hash_bucket; 185 ulwp != *ulwpp; 186 ulwpp = &(*ulwpp)->ul_hash) 187 ; 188 *ulwpp = ulwp->ul_hash; 189 ulwp->ul_hash = NULL; 190 ulwp->ul_ix = -1; 191 } 192 193 void 194 hash_out(ulwp_t *ulwp, uberdata_t *udp) 195 { 196 int ix; 197 198 if ((ix = ulwp->ul_ix) >= 0) { 199 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock; 200 201 lmutex_lock(mp); 202 hash_out_unlocked(ulwp, ix, udp); 203 lmutex_unlock(mp); 204 } 205 } 206 207 static void 208 ulwp_clean(ulwp_t *ulwp) 209 { 210 ulwp->ul_self = NULL; 211 ulwp->ul_rval = NULL; 212 ulwp->ul_lwpid = 0; 213 ulwp->ul_pri = 0; 214 ulwp->ul_mappedpri = 0; 215 ulwp->ul_policy = 0; 216 ulwp->ul_pri_mapped = 0; 217 ulwp->ul_mutator = 0; 218 ulwp->ul_pleasestop = 0; 219 ulwp->ul_stop = 0; 220 ulwp->ul_dead = 0; 221 ulwp->ul_unwind = 0; 222 ulwp->ul_detached = 0; 223 ulwp->ul_stopping = 0; 224 ulwp->ul_sp = 0; 225 ulwp->ul_critical = 0; 226 ulwp->ul_cancelable = 0; 227 ulwp->ul_preempt = 0; 228 ulwp->ul_sigsuspend = 0; 229 ulwp->ul_cancel_pending = 0; 230 ulwp->ul_cancel_disabled = 0; 231 ulwp->ul_cancel_async = 0; 232 ulwp->ul_save_async = 0; 233 ulwp->ul_cursig = 0; 234 ulwp->ul_created = 0; 235 ulwp->ul_replace = 0; 236 ulwp->ul_schedctl_called = NULL; 237 ulwp->ul_errno = 0; 238 ulwp->ul_errnop = NULL; 239 ulwp->ul_clnup_hdr = NULL; 240 ulwp->ul_schedctl = NULL; 241 ulwp->ul_bindflags = 0; 242 (void) _private_memset(&ulwp->ul_td_evbuf, 0, 243 sizeof (ulwp->ul_td_evbuf)); 244 ulwp->ul_td_events_enable = 0; 245 ulwp->ul_qtype = 0; 246 ulwp->ul_usropts = 0; 247 ulwp->ul_startpc = NULL; 248 ulwp->ul_startarg = NULL; 249 ulwp->ul_wchan = NULL; 250 ulwp->ul_link = NULL; 251 ulwp->ul_sleepq = NULL; 252 ulwp->ul_mxchain = NULL; 253 ulwp->ul_epri = 0; 254 ulwp->ul_emappedpri = 0; 255 /* PROBE_SUPPORT begin */ 256 ulwp->ul_tpdp = NULL; 257 /* PROBE_SUPPORT end */ 258 ulwp->ul_siglink = NULL; 259 (void) _private_memset(ulwp->ul_ftsd, 0, 260 sizeof (void *) * TSD_NFAST); 261 ulwp->ul_stsd = NULL; 262 (void) _private_memset(&ulwp->ul_spinlock, 0, 263 sizeof (ulwp->ul_spinlock)); 264 ulwp->ul_spin_lock_spin = 0; 265 ulwp->ul_spin_lock_spin2 = 0; 266 ulwp->ul_spin_lock_sleep = 0; 267 ulwp->ul_spin_lock_wakeup = 0; 268 ulwp->ul_ex_unwind = NULL; 269 } 270 271 static int stackprot; 272 273 /* 274 * Answer the question, "Is the lwp in question really dead?" 275 * We must inquire of the operating system to be really sure 276 * because the lwp may have called lwp_exit() but it has not 277 * yet completed the exit. 278 */ 279 static int 280 dead_and_buried(ulwp_t *ulwp) 281 { 282 if (ulwp->ul_lwpid == (lwpid_t)(-1)) 283 return (1); 284 if (ulwp->ul_dead && ulwp->ul_detached && 285 __lwp_kill(ulwp->ul_lwpid, 0) == ESRCH) { 286 ulwp->ul_lwpid = (lwpid_t)(-1); 287 return (1); 288 } 289 return (0); 290 } 291 292 /* 293 * Attempt to keep the stack cache within the specified cache limit. 294 */ 295 static void 296 trim_stack_cache(int cache_limit) 297 { 298 ulwp_t *self = curthread; 299 uberdata_t *udp = self->ul_uberdata; 300 ulwp_t *prev = NULL; 301 ulwp_t **ulwpp = &udp->lwp_stacks; 302 ulwp_t *ulwp; 303 304 ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, self)); 305 306 while (udp->nfreestack > cache_limit && (ulwp = *ulwpp) != NULL) { 307 if (dead_and_buried(ulwp)) { 308 *ulwpp = ulwp->ul_next; 309 if (ulwp == udp->lwp_laststack) 310 udp->lwp_laststack = prev; 311 hash_out(ulwp, udp); 312 udp->nfreestack--; 313 (void) _private_munmap(ulwp->ul_stk, ulwp->ul_mapsiz); 314 /* 315 * Now put the free ulwp on the ulwp freelist. 316 */ 317 ulwp->ul_mapsiz = 0; 318 ulwp->ul_next = NULL; 319 if (udp->ulwp_freelist == NULL) 320 udp->ulwp_freelist = udp->ulwp_lastfree = ulwp; 321 else { 322 udp->ulwp_lastfree->ul_next = ulwp; 323 udp->ulwp_lastfree = ulwp; 324 } 325 } else { 326 prev = ulwp; 327 ulwpp = &ulwp->ul_next; 328 } 329 } 330 } 331 332 /* 333 * Find an unused stack of the requested size 334 * or create a new stack of the requested size. 335 * Return a pointer to the ulwp_t structure referring to the stack, or NULL. 336 * thr_exit() stores 1 in the ul_dead member. 337 * thr_join() stores -1 in the ul_lwpid member. 338 */ 339 ulwp_t * 340 find_stack(size_t stksize, size_t guardsize) 341 { 342 static size_t pagesize = 0; 343 344 uberdata_t *udp = curthread->ul_uberdata; 345 size_t mapsize; 346 ulwp_t *prev; 347 ulwp_t *ulwp; 348 ulwp_t **ulwpp; 349 void *stk; 350 351 /* 352 * The stack is allocated PROT_READ|PROT_WRITE|PROT_EXEC 353 * unless overridden by the system's configuration. 354 */ 355 if (stackprot == 0) { /* do this once */ 356 long lprot = _sysconf(_SC_STACK_PROT); 357 if (lprot <= 0) 358 lprot = (PROT_READ|PROT_WRITE|PROT_EXEC); 359 stackprot = (int)lprot; 360 } 361 if (pagesize == 0) /* do this once */ 362 pagesize = _sysconf(_SC_PAGESIZE); 363 364 /* 365 * One megabyte stacks by default, but subtract off 366 * two pages for the system-created red zones. 367 * Round up a non-zero stack size to a pagesize multiple. 368 */ 369 if (stksize == 0) 370 stksize = DEFAULTSTACK - 2 * pagesize; 371 else 372 stksize = ((stksize + pagesize - 1) & -pagesize); 373 374 /* 375 * Round up the mapping size to a multiple of pagesize. 376 * Note: mmap() provides at least one page of red zone 377 * so we deduct that from the value of guardsize. 378 */ 379 if (guardsize != 0) 380 guardsize = ((guardsize + pagesize - 1) & -pagesize) - pagesize; 381 mapsize = stksize + guardsize; 382 383 lmutex_lock(&udp->link_lock); 384 for (prev = NULL, ulwpp = &udp->lwp_stacks; 385 (ulwp = *ulwpp) != NULL; 386 prev = ulwp, ulwpp = &ulwp->ul_next) { 387 if (ulwp->ul_mapsiz == mapsize && 388 ulwp->ul_guardsize == guardsize && 389 dead_and_buried(ulwp)) { 390 /* 391 * The previous lwp is gone; reuse the stack. 392 * Remove the ulwp from the stack list. 393 */ 394 *ulwpp = ulwp->ul_next; 395 ulwp->ul_next = NULL; 396 if (ulwp == udp->lwp_laststack) 397 udp->lwp_laststack = prev; 398 hash_out(ulwp, udp); 399 udp->nfreestack--; 400 lmutex_unlock(&udp->link_lock); 401 ulwp_clean(ulwp); 402 return (ulwp); 403 } 404 } 405 406 /* 407 * None of the cached stacks matched our mapping size. 408 * Reduce the stack cache to get rid of possibly 409 * very old stacks that will never be reused. 410 */ 411 if (udp->nfreestack > udp->thread_stack_cache) 412 trim_stack_cache(udp->thread_stack_cache); 413 else if (udp->nfreestack > 0) 414 trim_stack_cache(udp->nfreestack - 1); 415 lmutex_unlock(&udp->link_lock); 416 417 /* 418 * Create a new stack. 419 */ 420 if ((stk = _private_mmap(NULL, mapsize, stackprot, 421 MAP_PRIVATE|MAP_NORESERVE|MAP_ANON, -1, (off_t)0)) != MAP_FAILED) { 422 /* 423 * We have allocated our stack. Now allocate the ulwp. 424 */ 425 ulwp = ulwp_alloc(); 426 if (ulwp == NULL) 427 (void) _private_munmap(stk, mapsize); 428 else { 429 ulwp->ul_stk = stk; 430 ulwp->ul_mapsiz = mapsize; 431 ulwp->ul_guardsize = guardsize; 432 ulwp->ul_stktop = (uintptr_t)stk + mapsize; 433 ulwp->ul_stksiz = stksize; 434 ulwp->ul_ix = -1; 435 if (guardsize) /* protect the extra red zone */ 436 (void) _private_mprotect(stk, 437 guardsize, PROT_NONE); 438 } 439 } 440 return (ulwp); 441 } 442 443 /* 444 * Get a ulwp_t structure from the free list or allocate a new one. 445 * Such ulwp_t's do not have a stack allocated by the library. 446 */ 447 static ulwp_t * 448 ulwp_alloc(void) 449 { 450 ulwp_t *self = curthread; 451 uberdata_t *udp = self->ul_uberdata; 452 size_t tls_size; 453 ulwp_t *prev; 454 ulwp_t *ulwp; 455 ulwp_t **ulwpp; 456 caddr_t data; 457 458 lmutex_lock(&udp->link_lock); 459 for (prev = NULL, ulwpp = &udp->ulwp_freelist; 460 (ulwp = *ulwpp) != NULL; 461 prev = ulwp, ulwpp = &ulwp->ul_next) { 462 if (dead_and_buried(ulwp)) { 463 *ulwpp = ulwp->ul_next; 464 ulwp->ul_next = NULL; 465 if (ulwp == udp->ulwp_lastfree) 466 udp->ulwp_lastfree = prev; 467 hash_out(ulwp, udp); 468 lmutex_unlock(&udp->link_lock); 469 ulwp_clean(ulwp); 470 return (ulwp); 471 } 472 } 473 lmutex_unlock(&udp->link_lock); 474 475 tls_size = roundup64(udp->tls_metadata.static_tls.tls_size); 476 data = lmalloc(sizeof (*ulwp) + tls_size); 477 if (data != NULL) { 478 /* LINTED pointer cast may result in improper alignment */ 479 ulwp = (ulwp_t *)(data + tls_size); 480 } 481 return (ulwp); 482 } 483 484 /* 485 * Free a ulwp structure. 486 * If there is an associated stack, put it on the stack list and 487 * munmap() previously freed stacks up to the residual cache limit. 488 * Else put it on the ulwp free list and never call lfree() on it. 489 */ 490 static void 491 ulwp_free(ulwp_t *ulwp) 492 { 493 uberdata_t *udp = curthread->ul_uberdata; 494 495 ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, curthread)); 496 ulwp->ul_next = NULL; 497 if (ulwp == udp->ulwp_one) /* don't reuse the primoridal stack */ 498 /*EMPTY*/; 499 else if (ulwp->ul_mapsiz != 0) { 500 if (udp->lwp_stacks == NULL) 501 udp->lwp_stacks = udp->lwp_laststack = ulwp; 502 else { 503 udp->lwp_laststack->ul_next = ulwp; 504 udp->lwp_laststack = ulwp; 505 } 506 if (++udp->nfreestack > udp->thread_stack_cache) 507 trim_stack_cache(udp->thread_stack_cache); 508 } else { 509 if (udp->ulwp_freelist == NULL) 510 udp->ulwp_freelist = udp->ulwp_lastfree = ulwp; 511 else { 512 udp->ulwp_lastfree->ul_next = ulwp; 513 udp->ulwp_lastfree = ulwp; 514 } 515 } 516 } 517 518 /* 519 * Find a named lwp and return a pointer to its hash list location. 520 * On success, returns with the hash lock held. 521 */ 522 ulwp_t ** 523 find_lwpp(thread_t tid) 524 { 525 uberdata_t *udp = curthread->ul_uberdata; 526 int ix = TIDHASH(tid, udp); 527 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock; 528 ulwp_t *ulwp; 529 ulwp_t **ulwpp; 530 531 if (tid == 0) 532 return (NULL); 533 534 lmutex_lock(mp); 535 for (ulwpp = &udp->thr_hash_table[ix].hash_bucket; 536 (ulwp = *ulwpp) != NULL; 537 ulwpp = &ulwp->ul_hash) { 538 if (ulwp->ul_lwpid == tid) 539 return (ulwpp); 540 } 541 lmutex_unlock(mp); 542 return (NULL); 543 } 544 545 /* 546 * Wake up all lwps waiting on this lwp for some reason. 547 */ 548 void 549 ulwp_broadcast(ulwp_t *ulwp) 550 { 551 ulwp_t *self = curthread; 552 uberdata_t *udp = self->ul_uberdata; 553 554 ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self)); 555 (void) cond_broadcast_internal(ulwp_condvar(ulwp, udp)); 556 } 557 558 /* 559 * Find a named lwp and return a pointer to it. 560 * Returns with the hash lock held. 561 */ 562 ulwp_t * 563 find_lwp(thread_t tid) 564 { 565 ulwp_t *self = curthread; 566 uberdata_t *udp = self->ul_uberdata; 567 ulwp_t *ulwp = NULL; 568 ulwp_t **ulwpp; 569 570 if (self->ul_lwpid == tid) { 571 ulwp = self; 572 ulwp_lock(ulwp, udp); 573 } else if ((ulwpp = find_lwpp(tid)) != NULL) { 574 ulwp = *ulwpp; 575 } 576 577 if (ulwp && ulwp->ul_dead) { 578 ulwp_unlock(ulwp, udp); 579 ulwp = NULL; 580 } 581 582 return (ulwp); 583 } 584 585 int 586 _thrp_create(void *stk, size_t stksize, void *(*func)(void *), void *arg, 587 long flags, thread_t *new_thread, pri_t priority, int policy, 588 size_t guardsize) 589 { 590 ulwp_t *self = curthread; 591 uberdata_t *udp = self->ul_uberdata; 592 ucontext_t uc; 593 uint_t lwp_flags; 594 thread_t tid; 595 int error = 0; 596 ulwp_t *ulwp; 597 598 /* 599 * Enforce the restriction of not creating any threads 600 * until the primary link map has been initialized. 601 * Also, disallow thread creation to a child of vfork(). 602 */ 603 if (!self->ul_primarymap || self->ul_vfork) 604 return (ENOTSUP); 605 606 if (udp->hash_size == 1) 607 finish_init(); 608 609 if (((stk || stksize) && stksize < MINSTACK) || 610 priority < THREAD_MIN_PRIORITY || priority > THREAD_MAX_PRIORITY) 611 return (EINVAL); 612 613 if (stk == NULL) { 614 if ((ulwp = find_stack(stksize, guardsize)) == NULL) 615 return (ENOMEM); 616 stksize = ulwp->ul_mapsiz - ulwp->ul_guardsize; 617 } else { 618 /* initialize the private stack */ 619 if ((ulwp = ulwp_alloc()) == NULL) 620 return (ENOMEM); 621 ulwp->ul_stk = stk; 622 ulwp->ul_stktop = (uintptr_t)stk + stksize; 623 ulwp->ul_stksiz = stksize; 624 ulwp->ul_ix = -1; 625 } 626 ulwp->ul_errnop = &ulwp->ul_errno; 627 628 lwp_flags = LWP_SUSPENDED; 629 if (flags & (THR_DETACHED|THR_DAEMON)) { 630 flags |= THR_DETACHED; 631 lwp_flags |= LWP_DETACHED; 632 } 633 if (flags & THR_DAEMON) 634 lwp_flags |= LWP_DAEMON; 635 636 /* creating a thread: enforce mt-correctness in _mutex_lock() */ 637 self->ul_async_safe = 1; 638 639 /* per-thread copies of global variables, for speed */ 640 ulwp->ul_queue_fifo = self->ul_queue_fifo; 641 ulwp->ul_cond_wait_defer = self->ul_cond_wait_defer; 642 ulwp->ul_error_detection = self->ul_error_detection; 643 ulwp->ul_async_safe = self->ul_async_safe; 644 ulwp->ul_max_spinners = self->ul_max_spinners; 645 ulwp->ul_adaptive_spin = self->ul_adaptive_spin; 646 ulwp->ul_queue_spin = self->ul_queue_spin; 647 ulwp->ul_door_noreserve = self->ul_door_noreserve; 648 649 ulwp->ul_primarymap = self->ul_primarymap; 650 ulwp->ul_self = ulwp; 651 ulwp->ul_uberdata = udp; 652 653 /* debugger support */ 654 ulwp->ul_usropts = flags; 655 656 #ifdef __sparc 657 /* 658 * We cache several instructions in the thread structure for use 659 * by the fasttrap DTrace provider. When changing this, read the 660 * comment in fasttrap.h for the all the other places that must 661 * be changed. 662 */ 663 ulwp->ul_dsave = 0x9de04000; /* save %g1, %g0, %sp */ 664 ulwp->ul_drestore = 0x81e80000; /* restore %g0, %g0, %g0 */ 665 ulwp->ul_dftret = 0x91d0203a; /* ta 0x3a */ 666 ulwp->ul_dreturn = 0x81ca0000; /* return %o0 */ 667 #endif 668 669 ulwp->ul_startpc = func; 670 ulwp->ul_startarg = arg; 671 _fpinherit(ulwp); 672 /* 673 * Defer signals on the new thread until its TLS constructors 674 * have been called. _thr_setup() will call sigon() after 675 * it has called tls_setup(). 676 */ 677 ulwp->ul_sigdefer = 1; 678 679 if (setup_context(&uc, _thr_setup, ulwp, 680 (caddr_t)ulwp->ul_stk + ulwp->ul_guardsize, stksize) != 0) 681 error = EAGAIN; 682 683 /* 684 * Call enter_critical() to avoid being suspended until we 685 * have linked the new thread into the proper lists. 686 * This is necessary because forkall() and fork1() must 687 * suspend all threads and they must see a complete list. 688 */ 689 enter_critical(self); 690 uc.uc_sigmask = ulwp->ul_sigmask = self->ul_sigmask; 691 if (error != 0 || 692 (error = __lwp_create(&uc, lwp_flags, &tid)) != 0) { 693 exit_critical(self); 694 ulwp->ul_lwpid = (lwpid_t)(-1); 695 ulwp->ul_dead = 1; 696 ulwp->ul_detached = 1; 697 lmutex_lock(&udp->link_lock); 698 ulwp_free(ulwp); 699 lmutex_unlock(&udp->link_lock); 700 return (error); 701 } 702 self->ul_nocancel = 0; /* cancellation is now possible */ 703 ulwp->ul_nocancel = 0; 704 udp->uberflags.uf_mt = 1; 705 if (new_thread) 706 *new_thread = tid; 707 if (flags & THR_DETACHED) 708 ulwp->ul_detached = 1; 709 ulwp->ul_lwpid = tid; 710 ulwp->ul_stop = TSTP_REGULAR; 711 if (flags & THR_SUSPENDED) 712 ulwp->ul_created = 1; 713 ulwp->ul_policy = policy; 714 ulwp->ul_pri = priority; 715 716 lmutex_lock(&udp->link_lock); 717 ulwp->ul_forw = udp->all_lwps; 718 ulwp->ul_back = udp->all_lwps->ul_back; 719 ulwp->ul_back->ul_forw = ulwp; 720 ulwp->ul_forw->ul_back = ulwp; 721 hash_in(ulwp, udp); 722 udp->nthreads++; 723 if (flags & THR_DAEMON) 724 udp->ndaemons++; 725 if (flags & THR_NEW_LWP) 726 thr_concurrency++; 727 __libc_threaded = 1; /* inform stdio */ 728 lmutex_unlock(&udp->link_lock); 729 730 if (__td_event_report(self, TD_CREATE, udp)) { 731 self->ul_td_evbuf.eventnum = TD_CREATE; 732 self->ul_td_evbuf.eventdata = (void *)(uintptr_t)tid; 733 tdb_event(TD_CREATE, udp); 734 } 735 736 exit_critical(self); 737 738 if (!(flags & THR_SUSPENDED)) 739 (void) _thrp_continue(tid, TSTP_REGULAR); 740 741 return (0); 742 } 743 744 #pragma weak thr_create = _thr_create 745 int 746 _thr_create(void *stk, size_t stksize, void *(*func)(void *), void *arg, 747 long flags, thread_t *new_thread) 748 { 749 return (_thrp_create(stk, stksize, func, arg, flags, new_thread, 750 curthread->ul_pri, curthread->ul_policy, 0)); 751 } 752 753 /* 754 * A special cancellation cleanup hook for DCE. 755 * cleanuphndlr, when it is not NULL, will contain a callback 756 * function to be called before a thread is terminated in 757 * _thr_exit() as a result of being cancelled. 758 */ 759 static void (*cleanuphndlr)(void) = NULL; 760 761 /* 762 * _pthread_setcleanupinit: sets the cleanup hook. 763 */ 764 int 765 _pthread_setcleanupinit(void (*func)(void)) 766 { 767 cleanuphndlr = func; 768 return (0); 769 } 770 771 void 772 _thrp_exit() 773 { 774 ulwp_t *self = curthread; 775 uberdata_t *udp = self->ul_uberdata; 776 ulwp_t *replace = NULL; 777 778 if (__td_event_report(self, TD_DEATH, udp)) { 779 self->ul_td_evbuf.eventnum = TD_DEATH; 780 tdb_event(TD_DEATH, udp); 781 } 782 783 ASSERT(self->ul_sigdefer != 0); 784 785 lmutex_lock(&udp->link_lock); 786 udp->nthreads--; 787 if (self->ul_usropts & THR_NEW_LWP) 788 thr_concurrency--; 789 if (self->ul_usropts & THR_DAEMON) 790 udp->ndaemons--; 791 else if (udp->nthreads == udp->ndaemons) { 792 /* 793 * We are the last non-daemon thread exiting. 794 * Exit the process. We retain our TSD and TLS so 795 * that atexit() application functions can use them. 796 */ 797 lmutex_unlock(&udp->link_lock); 798 exit(0); 799 thr_panic("_thrp_exit(): exit(0) returned"); 800 } 801 lmutex_unlock(&udp->link_lock); 802 803 tsd_exit(); /* deallocate thread-specific data */ 804 tls_exit(); /* deallocate thread-local storage */ 805 heldlock_exit(); /* deal with left-over held locks */ 806 807 /* block all signals to finish exiting */ 808 block_all_signals(self); 809 /* also prevent ourself from being suspended */ 810 enter_critical(self); 811 rwl_free(self); 812 lmutex_lock(&udp->link_lock); 813 ulwp_free(self); 814 (void) ulwp_lock(self, udp); 815 816 if (self->ul_mapsiz && !self->ul_detached) { 817 /* 818 * We want to free the stack for reuse but must keep 819 * the ulwp_t struct for the benefit of thr_join(). 820 * For this purpose we allocate a replacement ulwp_t. 821 */ 822 if ((replace = udp->ulwp_replace_free) == NULL) 823 replace = lmalloc(REPLACEMENT_SIZE); 824 else if ((udp->ulwp_replace_free = replace->ul_next) == NULL) 825 udp->ulwp_replace_last = NULL; 826 } 827 828 if (udp->all_lwps == self) 829 udp->all_lwps = self->ul_forw; 830 if (udp->all_lwps == self) 831 udp->all_lwps = NULL; 832 else { 833 self->ul_forw->ul_back = self->ul_back; 834 self->ul_back->ul_forw = self->ul_forw; 835 } 836 self->ul_forw = self->ul_back = NULL; 837 /* collect queue lock statistics before marking ourself dead */ 838 record_spin_locks(self); 839 self->ul_dead = 1; 840 self->ul_pleasestop = 0; 841 if (replace != NULL) { 842 int ix = self->ul_ix; /* the hash index */ 843 (void) _private_memcpy(replace, self, REPLACEMENT_SIZE); 844 replace->ul_self = replace; 845 replace->ul_next = NULL; /* clone not on stack list */ 846 replace->ul_mapsiz = 0; /* allows clone to be freed */ 847 replace->ul_replace = 1; /* requires clone to be freed */ 848 hash_out_unlocked(self, ix, udp); 849 hash_in_unlocked(replace, ix, udp); 850 ASSERT(!(self->ul_detached)); 851 self->ul_detached = 1; /* this frees the stack */ 852 self->ul_schedctl = NULL; 853 self->ul_schedctl_called = &udp->uberflags; 854 set_curthread(self = replace); 855 /* 856 * Having just changed the address of curthread, we 857 * must reset the ownership of the locks we hold so 858 * that assertions will not fire when we release them. 859 */ 860 udp->link_lock.mutex_owner = (uintptr_t)self; 861 ulwp_mutex(self, udp)->mutex_owner = (uintptr_t)self; 862 /* 863 * NOTE: 864 * On i386, %gs still references the original, not the 865 * replacement, ulwp structure. Fetching the replacement 866 * curthread pointer via %gs:0 works correctly since the 867 * original ulwp structure will not be reallocated until 868 * this lwp has completed its lwp_exit() system call (see 869 * dead_and_buried()), but from here on out, we must make 870 * no references to %gs:<offset> other than %gs:0. 871 */ 872 } 873 /* 874 * Put non-detached terminated threads in the all_zombies list. 875 */ 876 if (!self->ul_detached) { 877 udp->nzombies++; 878 if (udp->all_zombies == NULL) { 879 ASSERT(udp->nzombies == 1); 880 udp->all_zombies = self->ul_forw = self->ul_back = self; 881 } else { 882 self->ul_forw = udp->all_zombies; 883 self->ul_back = udp->all_zombies->ul_back; 884 self->ul_back->ul_forw = self; 885 self->ul_forw->ul_back = self; 886 } 887 } 888 /* 889 * Notify everyone waiting for this thread. 890 */ 891 ulwp_broadcast(self); 892 (void) ulwp_unlock(self, udp); 893 /* 894 * Prevent any more references to the schedctl data. 895 * We are exiting and continue_fork() may not find us. 896 * Do this just before dropping link_lock, since fork 897 * serializes on link_lock. 898 */ 899 self->ul_schedctl = NULL; 900 self->ul_schedctl_called = &udp->uberflags; 901 lmutex_unlock(&udp->link_lock); 902 903 ASSERT(self->ul_critical == 1); 904 ASSERT(self->ul_preempt == 0); 905 _lwp_terminate(); /* never returns */ 906 thr_panic("_thrp_exit(): _lwp_terminate() returned"); 907 } 908 909 void 910 collect_queue_statistics() 911 { 912 uberdata_t *udp = curthread->ul_uberdata; 913 ulwp_t *ulwp; 914 915 if (thread_queue_dump) { 916 lmutex_lock(&udp->link_lock); 917 if ((ulwp = udp->all_lwps) != NULL) { 918 do { 919 record_spin_locks(ulwp); 920 } while ((ulwp = ulwp->ul_forw) != udp->all_lwps); 921 } 922 lmutex_unlock(&udp->link_lock); 923 } 924 } 925 926 void 927 _thr_exit_common(void *status, int unwind) 928 { 929 ulwp_t *self = curthread; 930 int cancelled = (self->ul_cancel_pending && status == PTHREAD_CANCELED); 931 932 ASSERT(self->ul_critical == 0 && self->ul_preempt == 0); 933 934 /* 935 * Disable cancellation and call the special DCE cancellation 936 * cleanup hook if it is enabled. Do nothing else before calling 937 * the DCE cancellation cleanup hook; it may call longjmp() and 938 * never return here. 939 */ 940 self->ul_cancel_disabled = 1; 941 self->ul_cancel_async = 0; 942 self->ul_save_async = 0; 943 self->ul_cancelable = 0; 944 self->ul_cancel_pending = 0; 945 if (cancelled && cleanuphndlr != NULL) 946 (*cleanuphndlr)(); 947 948 /* 949 * Block application signals while we are exiting. 950 * We call out to C++, TSD, and TLS destructors while exiting 951 * and these are application-defined, so we cannot be assured 952 * that they won't reset the signal mask. We use sigoff() to 953 * defer any signals that may be received as a result of this 954 * bad behavior. Such signals will be lost to the process 955 * when the thread finishes exiting. 956 */ 957 (void) _thr_sigsetmask(SIG_SETMASK, &maskset, NULL); 958 sigoff(self); 959 960 self->ul_rval = status; 961 962 /* 963 * If thr_exit is being called from the places where 964 * C++ destructors are to be called such as cancellation 965 * points, then set this flag. It is checked in _t_cancel() 966 * to decide whether _ex_unwind() is to be called or not. 967 */ 968 if (unwind) 969 self->ul_unwind = 1; 970 971 /* 972 * _thrp_unwind() will eventually call _thrp_exit(). 973 * It never returns. 974 */ 975 _thrp_unwind(NULL); 976 thr_panic("_thr_exit_common(): _thrp_unwind() returned"); 977 } 978 979 /* 980 * Called when a thread returns from its start function. 981 * We are at the top of the stack; no unwinding is necessary. 982 */ 983 void 984 _thr_terminate(void *status) 985 { 986 _thr_exit_common(status, 0); 987 } 988 989 #pragma weak thr_exit = _thr_exit 990 #pragma weak pthread_exit = _thr_exit 991 #pragma weak _pthread_exit = _thr_exit 992 void 993 _thr_exit(void *status) 994 { 995 _thr_exit_common(status, 1); 996 } 997 998 int 999 _thrp_join(thread_t tid, thread_t *departed, void **status, int do_cancel) 1000 { 1001 uberdata_t *udp = curthread->ul_uberdata; 1002 mutex_t *mp; 1003 void *rval; 1004 thread_t found; 1005 ulwp_t *ulwp; 1006 ulwp_t **ulwpp; 1007 int replace; 1008 int error; 1009 1010 if (do_cancel) 1011 error = lwp_wait(tid, &found); 1012 else { 1013 while ((error = __lwp_wait(tid, &found)) == EINTR) 1014 ; 1015 } 1016 if (error) 1017 return (error); 1018 1019 /* 1020 * We must hold link_lock to avoid a race condition with find_stack(). 1021 */ 1022 lmutex_lock(&udp->link_lock); 1023 if ((ulwpp = find_lwpp(found)) == NULL) { 1024 /* 1025 * lwp_wait() found an lwp that the library doesn't know 1026 * about. It must have been created with _lwp_create(). 1027 * Just return its lwpid; we can't know its status. 1028 */ 1029 lmutex_unlock(&udp->link_lock); 1030 rval = NULL; 1031 } else { 1032 /* 1033 * Remove ulwp from the hash table. 1034 */ 1035 ulwp = *ulwpp; 1036 *ulwpp = ulwp->ul_hash; 1037 ulwp->ul_hash = NULL; 1038 /* 1039 * Remove ulwp from all_zombies list. 1040 */ 1041 ASSERT(udp->nzombies >= 1); 1042 if (udp->all_zombies == ulwp) 1043 udp->all_zombies = ulwp->ul_forw; 1044 if (udp->all_zombies == ulwp) 1045 udp->all_zombies = NULL; 1046 else { 1047 ulwp->ul_forw->ul_back = ulwp->ul_back; 1048 ulwp->ul_back->ul_forw = ulwp->ul_forw; 1049 } 1050 ulwp->ul_forw = ulwp->ul_back = NULL; 1051 udp->nzombies--; 1052 ASSERT(ulwp->ul_dead && !ulwp->ul_detached && 1053 !(ulwp->ul_usropts & (THR_DETACHED|THR_DAEMON))); 1054 /* 1055 * We can't call ulwp_unlock(ulwp) after we set 1056 * ulwp->ul_ix = -1 so we have to get a pointer to the 1057 * ulwp's hash table mutex now in order to unlock it below. 1058 */ 1059 mp = ulwp_mutex(ulwp, udp); 1060 ulwp->ul_lwpid = (lwpid_t)(-1); 1061 ulwp->ul_ix = -1; 1062 rval = ulwp->ul_rval; 1063 replace = ulwp->ul_replace; 1064 lmutex_unlock(mp); 1065 if (replace) { 1066 ulwp->ul_next = NULL; 1067 if (udp->ulwp_replace_free == NULL) 1068 udp->ulwp_replace_free = 1069 udp->ulwp_replace_last = ulwp; 1070 else { 1071 udp->ulwp_replace_last->ul_next = ulwp; 1072 udp->ulwp_replace_last = ulwp; 1073 } 1074 } 1075 lmutex_unlock(&udp->link_lock); 1076 } 1077 1078 if (departed != NULL) 1079 *departed = found; 1080 if (status != NULL) 1081 *status = rval; 1082 return (0); 1083 } 1084 1085 #pragma weak thr_join = _thr_join 1086 int 1087 _thr_join(thread_t tid, thread_t *departed, void **status) 1088 { 1089 int error = _thrp_join(tid, departed, status, 1); 1090 return ((error == EINVAL)? ESRCH : error); 1091 } 1092 1093 /* 1094 * pthread_join() differs from Solaris thr_join(): 1095 * It does not return the departed thread's id 1096 * and hence does not have a "departed" argument. 1097 * It returns EINVAL if tid refers to a detached thread. 1098 */ 1099 #pragma weak pthread_join = _pthread_join 1100 int 1101 _pthread_join(pthread_t tid, void **status) 1102 { 1103 return ((tid == 0)? ESRCH : _thrp_join(tid, NULL, status, 1)); 1104 } 1105 1106 #pragma weak pthread_detach = _thr_detach 1107 #pragma weak _pthread_detach = _thr_detach 1108 int 1109 _thr_detach(thread_t tid) 1110 { 1111 uberdata_t *udp = curthread->ul_uberdata; 1112 ulwp_t *ulwp; 1113 ulwp_t **ulwpp; 1114 int error = 0; 1115 1116 if ((ulwpp = find_lwpp(tid)) == NULL) 1117 return (ESRCH); 1118 ulwp = *ulwpp; 1119 1120 if (ulwp->ul_dead) { 1121 ulwp_unlock(ulwp, udp); 1122 error = _thrp_join(tid, NULL, NULL, 0); 1123 } else { 1124 error = __lwp_detach(tid); 1125 ulwp->ul_detached = 1; 1126 ulwp->ul_usropts |= THR_DETACHED; 1127 ulwp_unlock(ulwp, udp); 1128 } 1129 return (error); 1130 } 1131 1132 /* 1133 * Static local string compare function to avoid calling strncmp() 1134 * (and hence the dynamic linker) during library initialization. 1135 */ 1136 static int 1137 sncmp(const char *s1, const char *s2, size_t n) 1138 { 1139 n++; 1140 while (--n != 0 && *s1 == *s2++) 1141 if (*s1++ == '\0') 1142 return (0); 1143 return (n == 0 ? 0 : *(uchar_t *)s1 - *(uchar_t *)--s2); 1144 } 1145 1146 static const char * 1147 ematch(const char *ev, const char *match) 1148 { 1149 int c; 1150 1151 while ((c = *match++) != '\0') { 1152 if (*ev++ != c) 1153 return (NULL); 1154 } 1155 if (*ev++ != '=') 1156 return (NULL); 1157 return (ev); 1158 } 1159 1160 static int 1161 envvar(const char *ev, const char *match, int limit) 1162 { 1163 int val = -1; 1164 const char *ename; 1165 1166 if ((ename = ematch(ev, match)) != NULL) { 1167 int c; 1168 for (val = 0; (c = *ename) != '\0'; ename++) { 1169 if (!isdigit(c)) { 1170 val = -1; 1171 break; 1172 } 1173 val = val * 10 + (c - '0'); 1174 if (val > limit) { 1175 val = limit; 1176 break; 1177 } 1178 } 1179 } 1180 return (val); 1181 } 1182 1183 static void 1184 etest(const char *ev) 1185 { 1186 int value; 1187 1188 if ((value = envvar(ev, "QUEUE_SPIN", 1000000)) >= 0) 1189 thread_queue_spin = value; 1190 if ((value = envvar(ev, "ADAPTIVE_SPIN", 1000000)) >= 0) 1191 thread_adaptive_spin = value; 1192 if ((value = envvar(ev, "MAX_SPINNERS", 255)) >= 0) 1193 thread_max_spinners = value; 1194 if ((value = envvar(ev, "QUEUE_FIFO", 8)) >= 0) 1195 thread_queue_fifo = value; 1196 #if defined(THREAD_DEBUG) 1197 if ((value = envvar(ev, "QUEUE_VERIFY", 1)) >= 0) 1198 thread_queue_verify = value; 1199 #endif 1200 if ((value = envvar(ev, "QUEUE_DUMP", 1)) >= 0) 1201 thread_queue_dump = value; 1202 if ((value = envvar(ev, "STACK_CACHE", 10000)) >= 0) 1203 thread_stack_cache = value; 1204 if ((value = envvar(ev, "COND_WAIT_DEFER", 1)) >= 0) 1205 thread_cond_wait_defer = value; 1206 if ((value = envvar(ev, "ERROR_DETECTION", 2)) >= 0) 1207 thread_error_detection = value; 1208 if ((value = envvar(ev, "ASYNC_SAFE", 1)) >= 0) 1209 thread_async_safe = value; 1210 if ((value = envvar(ev, "DOOR_NORESERVE", 1)) >= 0) 1211 thread_door_noreserve = value; 1212 } 1213 1214 /* 1215 * Look for and evaluate environment variables of the form "_THREAD_*". 1216 * For compatibility with the past, we also look for environment 1217 * names of the form "LIBTHREAD_*". 1218 */ 1219 static void 1220 set_thread_vars() 1221 { 1222 extern const char **_environ; 1223 const char **pev; 1224 const char *ev; 1225 char c; 1226 1227 if ((pev = _environ) == NULL) 1228 return; 1229 while ((ev = *pev++) != NULL) { 1230 c = *ev; 1231 if (c == '_' && sncmp(ev, "_THREAD_", 8) == 0) 1232 etest(ev + 8); 1233 if (c == 'L' && sncmp(ev, "LIBTHREAD_", 10) == 0) 1234 etest(ev + 10); 1235 } 1236 } 1237 1238 /* PROBE_SUPPORT begin */ 1239 #pragma weak __tnf_probe_notify 1240 extern void __tnf_probe_notify(void); 1241 /* PROBE_SUPPORT end */ 1242 1243 /* same as atexit() but private to the library */ 1244 extern int _atexit(void (*)(void)); 1245 1246 /* same as _cleanup() but private to the library */ 1247 extern void __cleanup(void); 1248 1249 extern void atfork_init(void); 1250 1251 #ifdef __amd64 1252 extern void __amd64id(void); 1253 #endif 1254 1255 /* 1256 * libc_init() is called by ld.so.1 for library initialization. 1257 * We perform minimal initialization; enough to work with the main thread. 1258 */ 1259 void 1260 libc_init(void) 1261 { 1262 uberdata_t *udp = &__uberdata; 1263 ulwp_t *oldself = __curthread(); 1264 ucontext_t uc; 1265 ulwp_t *self; 1266 struct rlimit rl; 1267 caddr_t data; 1268 size_t tls_size; 1269 int setmask; 1270 1271 /* 1272 * For the initial stage of initialization, we must be careful 1273 * not to call any function that could possibly call _cerror(). 1274 * For this purpose, we call only the raw system call wrappers. 1275 */ 1276 1277 #ifdef __amd64 1278 /* 1279 * Gather information about cache layouts for optimized 1280 * AMD assembler strfoo() and memfoo() functions. 1281 */ 1282 __amd64id(); 1283 #endif 1284 1285 /* 1286 * Every libc, regardless of which link map, must register __cleanup(). 1287 */ 1288 (void) _atexit(__cleanup); 1289 1290 /* 1291 * We keep our uberdata on one of (a) the first alternate link map 1292 * or (b) the primary link map. We switch to the primary link map 1293 * and stay there once we see it. All intermediate link maps are 1294 * subject to being unloaded at any time. 1295 */ 1296 if (oldself != NULL && (oldself->ul_primarymap || !primary_link_map)) { 1297 __tdb_bootstrap = oldself->ul_uberdata->tdb_bootstrap; 1298 mutex_setup(); 1299 atfork_init(); /* every link map needs atfork() processing */ 1300 return; 1301 } 1302 1303 /* 1304 * To establish the main stack information, we have to get our context. 1305 * This is also convenient to use for getting our signal mask. 1306 */ 1307 uc.uc_flags = UC_ALL; 1308 (void) __getcontext_syscall(&uc); 1309 ASSERT(uc.uc_link == NULL); 1310 1311 tls_size = roundup64(udp->tls_metadata.static_tls.tls_size); 1312 ASSERT(primary_link_map || tls_size == 0); 1313 data = lmalloc(sizeof (ulwp_t) + tls_size); 1314 if (data == NULL) 1315 thr_panic("cannot allocate thread structure for main thread"); 1316 /* LINTED pointer cast may result in improper alignment */ 1317 self = (ulwp_t *)(data + tls_size); 1318 init_hash_table[0].hash_bucket = self; 1319 1320 self->ul_sigmask = uc.uc_sigmask; 1321 delete_reserved_signals(&self->ul_sigmask); 1322 /* 1323 * Are the old and new sets different? 1324 * (This can happen if we are currently blocking SIGCANCEL.) 1325 * If so, we must explicitly set our signal mask, below. 1326 */ 1327 setmask = 1328 ((self->ul_sigmask.__sigbits[0] ^ uc.uc_sigmask.__sigbits[0]) | 1329 (self->ul_sigmask.__sigbits[1] ^ uc.uc_sigmask.__sigbits[1])); 1330 1331 #ifdef __sparc 1332 /* 1333 * We cache several instructions in the thread structure for use 1334 * by the fasttrap DTrace provider. When changing this, read the 1335 * comment in fasttrap.h for the all the other places that must 1336 * be changed. 1337 */ 1338 self->ul_dsave = 0x9de04000; /* save %g1, %g0, %sp */ 1339 self->ul_drestore = 0x81e80000; /* restore %g0, %g0, %g0 */ 1340 self->ul_dftret = 0x91d0203a; /* ta 0x3a */ 1341 self->ul_dreturn = 0x81ca0000; /* return %o0 */ 1342 #endif 1343 1344 self->ul_stktop = 1345 (uintptr_t)uc.uc_stack.ss_sp + uc.uc_stack.ss_size; 1346 (void) _private_getrlimit(RLIMIT_STACK, &rl); 1347 self->ul_stksiz = rl.rlim_cur; 1348 self->ul_stk = (caddr_t)(self->ul_stktop - self->ul_stksiz); 1349 1350 self->ul_forw = self->ul_back = self; 1351 self->ul_hash = NULL; 1352 self->ul_ix = 0; 1353 self->ul_lwpid = 1; /* __lwp_self() */ 1354 self->ul_main = 1; 1355 self->ul_self = self; 1356 self->ul_uberdata = udp; 1357 if (oldself != NULL) { 1358 int i; 1359 1360 ASSERT(primary_link_map); 1361 ASSERT(oldself->ul_main == 1); 1362 self->ul_stsd = oldself->ul_stsd; 1363 for (i = 0; i < TSD_NFAST; i++) 1364 self->ul_ftsd[i] = oldself->ul_ftsd[i]; 1365 self->ul_tls = oldself->ul_tls; 1366 /* 1367 * Retrieve all pointers to uberdata allocated 1368 * while running on previous link maps. 1369 * We would like to do a structure assignment here, but 1370 * gcc turns structure assignments into calls to memcpy(), 1371 * a function exported from libc. We can't call any such 1372 * external functions until we establish curthread, below, 1373 * so we just call our private version of memcpy(). 1374 */ 1375 (void) _private_memcpy(udp, 1376 oldself->ul_uberdata, sizeof (*udp)); 1377 /* 1378 * These items point to global data on the primary link map. 1379 */ 1380 udp->thr_hash_table = init_hash_table; 1381 udp->sigacthandler = sigacthandler; 1382 udp->tdb.tdb_events = tdb_events; 1383 ASSERT(udp->nthreads == 1 && !udp->uberflags.uf_mt); 1384 ASSERT(udp->lwp_stacks == NULL); 1385 ASSERT(udp->ulwp_freelist == NULL); 1386 ASSERT(udp->ulwp_replace_free == NULL); 1387 ASSERT(udp->hash_size == 1); 1388 } 1389 udp->all_lwps = self; 1390 udp->ulwp_one = self; 1391 udp->pid = _private_getpid(); 1392 udp->nthreads = 1; 1393 /* 1394 * In every link map, tdb_bootstrap points to the same piece of 1395 * allocated memory. When the primary link map is initialized, 1396 * the allocated memory is assigned a pointer to the one true 1397 * uberdata. This allows libc_db to initialize itself regardless 1398 * of which instance of libc it finds in the address space. 1399 */ 1400 if (udp->tdb_bootstrap == NULL) 1401 udp->tdb_bootstrap = lmalloc(sizeof (uberdata_t *)); 1402 __tdb_bootstrap = udp->tdb_bootstrap; 1403 if (primary_link_map) { 1404 self->ul_primarymap = 1; 1405 udp->primary_map = 1; 1406 *udp->tdb_bootstrap = udp; 1407 } 1408 /* 1409 * Cancellation can't happen until: 1410 * pthread_cancel() is called 1411 * or: 1412 * another thread is created 1413 * For now, as a single-threaded process, set the flag that tells 1414 * PROLOGUE/EPILOGUE (in scalls.c) that cancellation can't happen. 1415 */ 1416 self->ul_nocancel = 1; 1417 1418 #if defined(__amd64) 1419 (void) ___lwp_private(_LWP_SETPRIVATE, _LWP_FSBASE, self); 1420 #elif defined(__i386) 1421 (void) ___lwp_private(_LWP_SETPRIVATE, _LWP_GSBASE, self); 1422 #endif /* __i386 || __amd64 */ 1423 set_curthread(self); /* redundant on i386 */ 1424 /* 1425 * Now curthread is established and it is safe to call any 1426 * function in libc except one that uses thread-local storage. 1427 */ 1428 self->ul_errnop = &errno; 1429 if (oldself != NULL) { 1430 /* tls_size was zero when oldself was allocated */ 1431 lfree(oldself, sizeof (ulwp_t)); 1432 } 1433 mutex_setup(); 1434 atfork_init(); 1435 signal_init(); 1436 1437 /* 1438 * If the stack is unlimited, we set the size to zero to disable 1439 * stack checking. 1440 * XXX: Work harder here. Get the stack size from /proc/self/rmap 1441 */ 1442 if (self->ul_stksiz == RLIM_INFINITY) { 1443 self->ul_ustack.ss_sp = (void *)self->ul_stktop; 1444 self->ul_ustack.ss_size = 0; 1445 } else { 1446 self->ul_ustack.ss_sp = self->ul_stk; 1447 self->ul_ustack.ss_size = self->ul_stksiz; 1448 } 1449 self->ul_ustack.ss_flags = 0; 1450 (void) _private_setustack(&self->ul_ustack); 1451 1452 /* 1453 * Get the variables that affect thread behavior from the environment. 1454 */ 1455 set_thread_vars(); 1456 udp->uberflags.uf_thread_error_detection = (char)thread_error_detection; 1457 udp->thread_stack_cache = thread_stack_cache; 1458 1459 /* 1460 * Make per-thread copies of global variables, for speed. 1461 */ 1462 self->ul_queue_fifo = (char)thread_queue_fifo; 1463 self->ul_cond_wait_defer = (char)thread_cond_wait_defer; 1464 self->ul_error_detection = (char)thread_error_detection; 1465 self->ul_async_safe = (char)thread_async_safe; 1466 self->ul_door_noreserve = (char)thread_door_noreserve; 1467 self->ul_max_spinners = (uint8_t)thread_max_spinners; 1468 self->ul_adaptive_spin = thread_adaptive_spin; 1469 self->ul_queue_spin = thread_queue_spin; 1470 1471 /* 1472 * When we have initialized the primary link map, inform 1473 * the dynamic linker about our interface functions. 1474 */ 1475 if (self->ul_primarymap) 1476 _ld_libc((void *)rtld_funcs); 1477 1478 /* 1479 * Defer signals until TLS constructors have been called. 1480 */ 1481 sigoff(self); 1482 tls_setup(); 1483 sigon(self); 1484 if (setmask) 1485 (void) restore_signals(self); 1486 1487 /* PROBE_SUPPORT begin */ 1488 if (self->ul_primarymap && __tnf_probe_notify != NULL) 1489 __tnf_probe_notify(); 1490 /* PROBE_SUPPORT end */ 1491 1492 init_sigev_thread(); 1493 init_aio(); 1494 1495 /* 1496 * We need to reset __threaded dynamically at runtime, so that 1497 * __threaded can be bound to __threaded outside libc which may not 1498 * have initial value of 1 (without a copy relocation in a.out). 1499 */ 1500 __threaded = 1; 1501 } 1502 1503 #pragma fini(libc_fini) 1504 void 1505 libc_fini() 1506 { 1507 /* 1508 * If we are doing fini processing for the instance of libc 1509 * on the first alternate link map (this happens only when 1510 * the dynamic linker rejects a bad audit library), then clear 1511 * __curthread(). We abandon whatever memory was allocated by 1512 * lmalloc() while running on this alternate link-map but we 1513 * don't care (and can't find the memory in any case); we just 1514 * want to protect the application from this bad audit library. 1515 * No fini processing is done by libc in the normal case. 1516 */ 1517 1518 uberdata_t *udp = curthread->ul_uberdata; 1519 1520 if (udp->primary_map == 0 && udp == &__uberdata) 1521 set_curthread(NULL); 1522 } 1523 1524 /* 1525 * finish_init is called when we are about to become multi-threaded, 1526 * that is, on the first call to thr_create(). 1527 */ 1528 void 1529 finish_init() 1530 { 1531 ulwp_t *self = curthread; 1532 uberdata_t *udp = self->ul_uberdata; 1533 thr_hash_table_t *htp; 1534 void *data; 1535 int i; 1536 1537 /* 1538 * No locks needed here; we are single-threaded on the first call. 1539 * We can be called only after the primary link map has been set up. 1540 */ 1541 ASSERT(self->ul_primarymap); 1542 ASSERT(self == udp->ulwp_one); 1543 ASSERT(!udp->uberflags.uf_mt); 1544 ASSERT(udp->hash_size == 1); 1545 1546 /* 1547 * First allocate the queue_head array if not already allocated. 1548 */ 1549 if (udp->queue_head == NULL) 1550 queue_alloc(); 1551 1552 /* 1553 * Now allocate the thread hash table. 1554 */ 1555 if ((data = _private_mmap(NULL, HASHTBLSZ * sizeof (thr_hash_table_t), 1556 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0)) 1557 == MAP_FAILED) 1558 thr_panic("cannot allocate thread hash table"); 1559 1560 udp->thr_hash_table = htp = (thr_hash_table_t *)data; 1561 udp->hash_size = HASHTBLSZ; 1562 udp->hash_mask = HASHTBLSZ - 1; 1563 1564 for (i = 0; i < HASHTBLSZ; i++, htp++) { 1565 htp->hash_lock.mutex_flag = LOCK_INITED; 1566 htp->hash_lock.mutex_magic = MUTEX_MAGIC; 1567 htp->hash_cond.cond_magic = COND_MAGIC; 1568 } 1569 hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp); 1570 1571 /* 1572 * Set up the SIGCANCEL handler for threads cancellation. 1573 */ 1574 setup_cancelsig(SIGCANCEL); 1575 1576 /* 1577 * Arrange to do special things on exit -- 1578 * - collect queue statistics from all remaining active threads. 1579 * - grab assert_lock to ensure that assertion failures 1580 * and a core dump take precedence over _exit(). 1581 * - dump queue statistics to stderr if _THREAD_QUEUE_DUMP is set. 1582 * (Functions are called in the reverse order of their registration.) 1583 */ 1584 (void) _atexit(dump_queue_statistics); 1585 (void) _atexit(grab_assert_lock); 1586 (void) _atexit(collect_queue_statistics); 1587 } 1588 1589 /* 1590 * Used only by postfork1_child(), below. 1591 */ 1592 static void 1593 mark_dead_and_buried(ulwp_t *ulwp) 1594 { 1595 ulwp->ul_dead = 1; 1596 ulwp->ul_lwpid = (lwpid_t)(-1); 1597 ulwp->ul_hash = NULL; 1598 ulwp->ul_ix = -1; 1599 ulwp->ul_schedctl = NULL; 1600 ulwp->ul_schedctl_called = NULL; 1601 } 1602 1603 /* 1604 * This is called from fork1() in the child. 1605 * Reset our data structures to reflect one lwp. 1606 */ 1607 void 1608 postfork1_child() 1609 { 1610 ulwp_t *self = curthread; 1611 uberdata_t *udp = self->ul_uberdata; 1612 mutex_t *mp; 1613 ulwp_t *next; 1614 ulwp_t *ulwp; 1615 int i; 1616 1617 /* daemon threads shouldn't call fork1(), but oh well... */ 1618 self->ul_usropts &= ~THR_DAEMON; 1619 udp->nthreads = 1; 1620 udp->ndaemons = 0; 1621 udp->uberflags.uf_mt = 0; 1622 __libc_threaded = 0; 1623 for (i = 0; i < udp->hash_size; i++) 1624 udp->thr_hash_table[i].hash_bucket = NULL; 1625 self->ul_lwpid = __lwp_self(); 1626 hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp); 1627 1628 /* 1629 * Some thread in the parent might have been suspended while 1630 * holding udp->callout_lock. Reinitialize the child's copy. 1631 */ 1632 _private_mutex_init(&udp->callout_lock, 1633 USYNC_THREAD | LOCK_RECURSIVE, NULL); 1634 1635 /* no one in the child is on a sleep queue; reinitialize */ 1636 if (udp->queue_head) { 1637 (void) _private_memset(udp->queue_head, 0, 1638 2 * QHASHSIZE * sizeof (queue_head_t)); 1639 for (i = 0; i < 2 * QHASHSIZE; i++) { 1640 mp = &udp->queue_head[i].qh_lock; 1641 mp->mutex_flag = LOCK_INITED; 1642 mp->mutex_magic = MUTEX_MAGIC; 1643 } 1644 } 1645 1646 /* 1647 * All lwps except ourself are gone. Mark them so. 1648 * First mark all of the lwps that have already been freed. 1649 * Then mark and free all of the active lwps except ourself. 1650 * Since we are single-threaded, no locks are required here. 1651 */ 1652 for (ulwp = udp->lwp_stacks; ulwp != NULL; ulwp = ulwp->ul_next) 1653 mark_dead_and_buried(ulwp); 1654 for (ulwp = udp->ulwp_freelist; ulwp != NULL; ulwp = ulwp->ul_next) 1655 mark_dead_and_buried(ulwp); 1656 for (ulwp = self->ul_forw; ulwp != self; ulwp = next) { 1657 next = ulwp->ul_forw; 1658 ulwp->ul_forw = ulwp->ul_back = NULL; 1659 mark_dead_and_buried(ulwp); 1660 tsd_free(ulwp); 1661 tls_free(ulwp); 1662 rwl_free(ulwp); 1663 heldlock_free(ulwp); 1664 ulwp_free(ulwp); 1665 } 1666 self->ul_forw = self->ul_back = udp->all_lwps = self; 1667 if (self != udp->ulwp_one) 1668 mark_dead_and_buried(udp->ulwp_one); 1669 if ((ulwp = udp->all_zombies) != NULL) { 1670 ASSERT(udp->nzombies != 0); 1671 do { 1672 next = ulwp->ul_forw; 1673 ulwp->ul_forw = ulwp->ul_back = NULL; 1674 mark_dead_and_buried(ulwp); 1675 udp->nzombies--; 1676 if (ulwp->ul_replace) { 1677 ulwp->ul_next = NULL; 1678 if (udp->ulwp_replace_free == NULL) { 1679 udp->ulwp_replace_free = 1680 udp->ulwp_replace_last = ulwp; 1681 } else { 1682 udp->ulwp_replace_last->ul_next = ulwp; 1683 udp->ulwp_replace_last = ulwp; 1684 } 1685 } 1686 } while ((ulwp = next) != udp->all_zombies); 1687 ASSERT(udp->nzombies == 0); 1688 udp->all_zombies = NULL; 1689 udp->nzombies = 0; 1690 } 1691 trim_stack_cache(0); 1692 1693 /* 1694 * Do post-fork1 processing for subsystems that need it. 1695 */ 1696 postfork1_child_tpool(); 1697 postfork1_child_sigev_aio(); 1698 postfork1_child_sigev_mq(); 1699 postfork1_child_sigev_timer(); 1700 postfork1_child_aio(); 1701 } 1702 1703 #pragma weak thr_setprio = _thr_setprio 1704 #pragma weak pthread_setschedprio = _thr_setprio 1705 #pragma weak _pthread_setschedprio = _thr_setprio 1706 int 1707 _thr_setprio(thread_t tid, int priority) 1708 { 1709 struct sched_param param; 1710 1711 (void) _memset(¶m, 0, sizeof (param)); 1712 param.sched_priority = priority; 1713 return (_thread_setschedparam_main(tid, 0, ¶m, PRIO_SET_PRIO)); 1714 } 1715 1716 #pragma weak thr_getprio = _thr_getprio 1717 int 1718 _thr_getprio(thread_t tid, int *priority) 1719 { 1720 uberdata_t *udp = curthread->ul_uberdata; 1721 ulwp_t *ulwp; 1722 int error = 0; 1723 1724 if ((ulwp = find_lwp(tid)) == NULL) 1725 error = ESRCH; 1726 else { 1727 *priority = ulwp->ul_pri; 1728 ulwp_unlock(ulwp, udp); 1729 } 1730 return (error); 1731 } 1732 1733 lwpid_t 1734 lwp_self(void) 1735 { 1736 return (curthread->ul_lwpid); 1737 } 1738 1739 #pragma weak _ti_thr_self = _thr_self 1740 #pragma weak thr_self = _thr_self 1741 #pragma weak pthread_self = _thr_self 1742 #pragma weak _pthread_self = _thr_self 1743 thread_t 1744 _thr_self() 1745 { 1746 return (curthread->ul_lwpid); 1747 } 1748 1749 #pragma weak thr_main = _thr_main 1750 int 1751 _thr_main() 1752 { 1753 ulwp_t *self = __curthread(); 1754 1755 return ((self == NULL)? -1 : self->ul_main); 1756 } 1757 1758 int 1759 _thrp_cancelled(void) 1760 { 1761 return (curthread->ul_rval == PTHREAD_CANCELED); 1762 } 1763 1764 int 1765 _thrp_stksegment(ulwp_t *ulwp, stack_t *stk) 1766 { 1767 stk->ss_sp = (void *)ulwp->ul_stktop; 1768 stk->ss_size = ulwp->ul_stksiz; 1769 stk->ss_flags = 0; 1770 return (0); 1771 } 1772 1773 #pragma weak thr_stksegment = _thr_stksegment 1774 int 1775 _thr_stksegment(stack_t *stk) 1776 { 1777 return (_thrp_stksegment(curthread, stk)); 1778 } 1779 1780 void 1781 force_continue(ulwp_t *ulwp) 1782 { 1783 #if defined(THREAD_DEBUG) 1784 ulwp_t *self = curthread; 1785 uberdata_t *udp = self->ul_uberdata; 1786 #endif 1787 int error; 1788 timespec_t ts; 1789 1790 ASSERT(MUTEX_OWNED(&udp->fork_lock, self)); 1791 ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self)); 1792 1793 for (;;) { 1794 error = __lwp_continue(ulwp->ul_lwpid); 1795 if (error != 0 && error != EINTR) 1796 break; 1797 error = 0; 1798 if (ulwp->ul_stopping) { /* he is stopping himself */ 1799 ts.tv_sec = 0; /* give him a chance to run */ 1800 ts.tv_nsec = 100000; /* 100 usecs or clock tick */ 1801 (void) __nanosleep(&ts, NULL); 1802 } 1803 if (!ulwp->ul_stopping) /* he is running now */ 1804 break; /* so we are done */ 1805 /* 1806 * He is marked as being in the process of stopping 1807 * himself. Loop around and continue him again. 1808 * He may not have been stopped the first time. 1809 */ 1810 } 1811 } 1812 1813 /* 1814 * Suspend an lwp with lwp_suspend(), then move it to a safe 1815 * point, that is, to a point where ul_critical is zero. 1816 * On return, the ulwp_lock() is dropped as with ulwp_unlock(). 1817 * If 'link_dropped' is non-NULL, then 'link_lock' is held on entry. 1818 * If we have to drop link_lock, we store 1 through link_dropped. 1819 * If the lwp exits before it can be suspended, we return ESRCH. 1820 */ 1821 int 1822 safe_suspend(ulwp_t *ulwp, uchar_t whystopped, int *link_dropped) 1823 { 1824 ulwp_t *self = curthread; 1825 uberdata_t *udp = self->ul_uberdata; 1826 cond_t *cvp = ulwp_condvar(ulwp, udp); 1827 mutex_t *mp = ulwp_mutex(ulwp, udp); 1828 thread_t tid = ulwp->ul_lwpid; 1829 int ix = ulwp->ul_ix; 1830 int error = 0; 1831 1832 ASSERT(whystopped == TSTP_REGULAR || 1833 whystopped == TSTP_MUTATOR || 1834 whystopped == TSTP_FORK); 1835 ASSERT(ulwp != self); 1836 ASSERT(!ulwp->ul_stop); 1837 ASSERT(MUTEX_OWNED(&udp->fork_lock, self)); 1838 ASSERT(MUTEX_OWNED(mp, self)); 1839 1840 if (link_dropped != NULL) 1841 *link_dropped = 0; 1842 1843 /* 1844 * We must grab the target's spin lock before suspending it. 1845 * See the comments below and in _thrp_suspend() for why. 1846 */ 1847 spin_lock_set(&ulwp->ul_spinlock); 1848 (void) ___lwp_suspend(tid); 1849 spin_lock_clear(&ulwp->ul_spinlock); 1850 1851 top: 1852 if (ulwp->ul_critical == 0 || ulwp->ul_stopping) { 1853 /* thread is already safe */ 1854 ulwp->ul_stop |= whystopped; 1855 } else { 1856 /* 1857 * Setting ul_pleasestop causes the target thread to stop 1858 * itself in _thrp_suspend(), below, after we drop its lock. 1859 * We must continue the critical thread before dropping 1860 * link_lock because the critical thread may be holding 1861 * the queue lock for link_lock. This is delicate. 1862 */ 1863 ulwp->ul_pleasestop |= whystopped; 1864 force_continue(ulwp); 1865 if (link_dropped != NULL) { 1866 *link_dropped = 1; 1867 lmutex_unlock(&udp->link_lock); 1868 /* be sure to drop link_lock only once */ 1869 link_dropped = NULL; 1870 } 1871 1872 /* 1873 * The thread may disappear by calling thr_exit() so we 1874 * cannot rely on the ulwp pointer after dropping the lock. 1875 * Instead, we search the hash table to find it again. 1876 * When we return, we may find that the thread has been 1877 * continued by some other thread. The suspend/continue 1878 * interfaces are prone to such race conditions by design. 1879 */ 1880 while (ulwp && !ulwp->ul_dead && !ulwp->ul_stop && 1881 (ulwp->ul_pleasestop & whystopped)) { 1882 (void) _cond_wait(cvp, mp); 1883 for (ulwp = udp->thr_hash_table[ix].hash_bucket; 1884 ulwp != NULL; ulwp = ulwp->ul_hash) { 1885 if (ulwp->ul_lwpid == tid) 1886 break; 1887 } 1888 } 1889 1890 if (ulwp == NULL || ulwp->ul_dead) 1891 error = ESRCH; 1892 else { 1893 /* 1894 * Do another lwp_suspend() to make sure we don't 1895 * return until the target thread is fully stopped 1896 * in the kernel. Don't apply lwp_suspend() until 1897 * we know that the target is not holding any 1898 * queue locks, that is, that it has completed 1899 * ulwp_unlock(self) and has, or at least is 1900 * about to, call lwp_suspend() on itself. We do 1901 * this by grabbing the target's spin lock. 1902 */ 1903 ASSERT(ulwp->ul_lwpid == tid); 1904 spin_lock_set(&ulwp->ul_spinlock); 1905 (void) ___lwp_suspend(tid); 1906 spin_lock_clear(&ulwp->ul_spinlock); 1907 /* 1908 * If some other thread did a thr_continue() 1909 * on the target thread we have to start over. 1910 */ 1911 if (!ulwp->ul_stopping || !(ulwp->ul_stop & whystopped)) 1912 goto top; 1913 } 1914 } 1915 1916 (void) cond_broadcast_internal(cvp); 1917 lmutex_unlock(mp); 1918 return (error); 1919 } 1920 1921 int 1922 _thrp_suspend(thread_t tid, uchar_t whystopped) 1923 { 1924 ulwp_t *self = curthread; 1925 uberdata_t *udp = self->ul_uberdata; 1926 ulwp_t *ulwp; 1927 int error = 0; 1928 1929 ASSERT((whystopped & (TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) != 0); 1930 ASSERT((whystopped & ~(TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) == 0); 1931 1932 /* 1933 * We can't suspend anyone except ourself while 1934 * some other thread is performing a fork. 1935 * This also allows only one suspension at a time. 1936 */ 1937 if (tid != self->ul_lwpid) 1938 fork_lock_enter(); 1939 1940 if ((ulwp = find_lwp(tid)) == NULL) 1941 error = ESRCH; 1942 else if (whystopped == TSTP_MUTATOR && !ulwp->ul_mutator) { 1943 ulwp_unlock(ulwp, udp); 1944 error = EINVAL; 1945 } else if (ulwp->ul_stop) { /* already stopped */ 1946 ulwp->ul_stop |= whystopped; 1947 ulwp_broadcast(ulwp); 1948 ulwp_unlock(ulwp, udp); 1949 } else if (ulwp != self) { 1950 /* 1951 * After suspending the other thread, move it out of a 1952 * critical section and deal with the schedctl mappings. 1953 * safe_suspend() suspends the other thread, calls 1954 * ulwp_broadcast(ulwp) and drops the ulwp lock. 1955 */ 1956 error = safe_suspend(ulwp, whystopped, NULL); 1957 } else { 1958 int schedctl_after_fork = 0; 1959 1960 /* 1961 * We are suspending ourself. We must not take a signal 1962 * until we return from lwp_suspend() and clear ul_stopping. 1963 * This is to guard against siglongjmp(). 1964 */ 1965 enter_critical(self); 1966 self->ul_sp = stkptr(); 1967 _flush_windows(); /* sparc */ 1968 self->ul_pleasestop = 0; 1969 self->ul_stop |= whystopped; 1970 /* 1971 * Grab our spin lock before dropping ulwp_mutex(self). 1972 * This prevents the suspending thread from applying 1973 * lwp_suspend() to us before we emerge from 1974 * lmutex_unlock(mp) and have dropped mp's queue lock. 1975 */ 1976 spin_lock_set(&self->ul_spinlock); 1977 self->ul_stopping = 1; 1978 ulwp_broadcast(self); 1979 ulwp_unlock(self, udp); 1980 /* 1981 * From this point until we return from lwp_suspend(), 1982 * we must not call any function that might invoke the 1983 * dynamic linker, that is, we can only call functions 1984 * private to the library. 1985 * 1986 * Also, this is a nasty race condition for a process 1987 * that is undergoing a forkall() operation: 1988 * Once we clear our spinlock (below), we are vulnerable 1989 * to being suspended by the forkall() thread before 1990 * we manage to suspend ourself in ___lwp_suspend(). 1991 * See safe_suspend() and force_continue(). 1992 * 1993 * To avoid a SIGSEGV due to the disappearance 1994 * of the schedctl mappings in the child process, 1995 * which can happen in spin_lock_clear() if we 1996 * are suspended while we are in the middle of 1997 * its call to preempt(), we preemptively clear 1998 * our own schedctl pointer before dropping our 1999 * spinlock. We reinstate it, in both the parent 2000 * and (if this really is a forkall()) the child. 2001 */ 2002 if (whystopped & TSTP_FORK) { 2003 schedctl_after_fork = 1; 2004 self->ul_schedctl = NULL; 2005 self->ul_schedctl_called = &udp->uberflags; 2006 } 2007 spin_lock_clear(&self->ul_spinlock); 2008 (void) ___lwp_suspend(tid); 2009 /* 2010 * Somebody else continued us. 2011 * We can't grab ulwp_lock(self) 2012 * until after clearing ul_stopping. 2013 * force_continue() relies on this. 2014 */ 2015 self->ul_stopping = 0; 2016 self->ul_sp = 0; 2017 if (schedctl_after_fork) { 2018 self->ul_schedctl_called = NULL; 2019 self->ul_schedctl = NULL; 2020 (void) setup_schedctl(); 2021 } 2022 ulwp_lock(self, udp); 2023 ulwp_broadcast(self); 2024 ulwp_unlock(self, udp); 2025 exit_critical(self); 2026 } 2027 2028 if (tid != self->ul_lwpid) 2029 fork_lock_exit(); 2030 2031 return (error); 2032 } 2033 2034 /* 2035 * Suspend all lwps other than ourself in preparation for fork. 2036 */ 2037 void 2038 suspend_fork() 2039 { 2040 ulwp_t *self = curthread; 2041 uberdata_t *udp = self->ul_uberdata; 2042 ulwp_t *ulwp; 2043 int link_dropped; 2044 2045 ASSERT(MUTEX_OWNED(&udp->fork_lock, self)); 2046 top: 2047 lmutex_lock(&udp->link_lock); 2048 2049 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2050 ulwp_lock(ulwp, udp); 2051 if (ulwp->ul_stop) { /* already stopped */ 2052 ulwp->ul_stop |= TSTP_FORK; 2053 ulwp_broadcast(ulwp); 2054 ulwp_unlock(ulwp, udp); 2055 } else { 2056 /* 2057 * Move the stopped lwp out of a critical section. 2058 */ 2059 if (safe_suspend(ulwp, TSTP_FORK, &link_dropped) || 2060 link_dropped) 2061 goto top; 2062 } 2063 } 2064 2065 lmutex_unlock(&udp->link_lock); 2066 } 2067 2068 void 2069 continue_fork(int child) 2070 { 2071 ulwp_t *self = curthread; 2072 uberdata_t *udp = self->ul_uberdata; 2073 ulwp_t *ulwp; 2074 2075 ASSERT(MUTEX_OWNED(&udp->fork_lock, self)); 2076 2077 /* 2078 * Clear the schedctl pointers in the child of forkall(). 2079 */ 2080 if (child) { 2081 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2082 ulwp->ul_schedctl_called = 2083 ulwp->ul_dead? &udp->uberflags : NULL; 2084 ulwp->ul_schedctl = NULL; 2085 } 2086 } 2087 2088 /* 2089 * Set all lwps that were stopped for fork() running again. 2090 */ 2091 lmutex_lock(&udp->link_lock); 2092 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2093 mutex_t *mp = ulwp_mutex(ulwp, udp); 2094 lmutex_lock(mp); 2095 ASSERT(ulwp->ul_stop & TSTP_FORK); 2096 ulwp->ul_stop &= ~TSTP_FORK; 2097 ulwp_broadcast(ulwp); 2098 if (!ulwp->ul_stop) 2099 force_continue(ulwp); 2100 lmutex_unlock(mp); 2101 } 2102 lmutex_unlock(&udp->link_lock); 2103 } 2104 2105 int 2106 _thrp_continue(thread_t tid, uchar_t whystopped) 2107 { 2108 uberdata_t *udp = curthread->ul_uberdata; 2109 ulwp_t *ulwp; 2110 mutex_t *mp; 2111 int error = 0; 2112 2113 ASSERT(whystopped == TSTP_REGULAR || 2114 whystopped == TSTP_MUTATOR); 2115 2116 /* 2117 * We single-thread the entire thread suspend/continue mechanism. 2118 */ 2119 fork_lock_enter(); 2120 2121 if ((ulwp = find_lwp(tid)) == NULL) { 2122 fork_lock_exit(); 2123 return (ESRCH); 2124 } 2125 2126 mp = ulwp_mutex(ulwp, udp); 2127 if ((whystopped == TSTP_MUTATOR && !ulwp->ul_mutator)) { 2128 error = EINVAL; 2129 } else if (ulwp->ul_stop & whystopped) { 2130 ulwp->ul_stop &= ~whystopped; 2131 ulwp_broadcast(ulwp); 2132 if (!ulwp->ul_stop) { 2133 if (whystopped == TSTP_REGULAR && ulwp->ul_created) { 2134 ulwp->ul_sp = 0; 2135 ulwp->ul_created = 0; 2136 } 2137 force_continue(ulwp); 2138 } 2139 } 2140 lmutex_unlock(mp); 2141 2142 fork_lock_exit(); 2143 return (error); 2144 } 2145 2146 #pragma weak thr_suspend = _thr_suspend 2147 int 2148 _thr_suspend(thread_t tid) 2149 { 2150 return (_thrp_suspend(tid, TSTP_REGULAR)); 2151 } 2152 2153 #pragma weak thr_continue = _thr_continue 2154 int 2155 _thr_continue(thread_t tid) 2156 { 2157 return (_thrp_continue(tid, TSTP_REGULAR)); 2158 } 2159 2160 #pragma weak thr_yield = _thr_yield 2161 void 2162 _thr_yield() 2163 { 2164 lwp_yield(); 2165 } 2166 2167 #pragma weak thr_kill = _thr_kill 2168 #pragma weak pthread_kill = _thr_kill 2169 #pragma weak _pthread_kill = _thr_kill 2170 int 2171 _thr_kill(thread_t tid, int sig) 2172 { 2173 if (sig == SIGCANCEL) 2174 return (EINVAL); 2175 return (__lwp_kill(tid, sig)); 2176 } 2177 2178 /* 2179 * Exit a critical section, take deferred actions if necessary. 2180 */ 2181 void 2182 do_exit_critical() 2183 { 2184 ulwp_t *self = curthread; 2185 int sig; 2186 2187 ASSERT(self->ul_critical == 0); 2188 if (self->ul_dead) 2189 return; 2190 2191 while (self->ul_pleasestop || 2192 (self->ul_cursig != 0 && self->ul_sigdefer == 0)) { 2193 /* 2194 * Avoid a recursive call to exit_critical() in _thrp_suspend() 2195 * by keeping self->ul_critical == 1 here. 2196 */ 2197 self->ul_critical++; 2198 while (self->ul_pleasestop) { 2199 /* 2200 * Guard against suspending ourself while on a sleep 2201 * queue. See the comments in call_user_handler(). 2202 */ 2203 unsleep_self(); 2204 set_parking_flag(self, 0); 2205 (void) _thrp_suspend(self->ul_lwpid, 2206 self->ul_pleasestop); 2207 } 2208 self->ul_critical--; 2209 2210 if ((sig = self->ul_cursig) != 0 && self->ul_sigdefer == 0) { 2211 /* 2212 * Clear ul_cursig before proceeding. 2213 * This protects us from the dynamic linker's 2214 * calls to bind_guard()/bind_clear() in the 2215 * event that it is invoked to resolve a symbol 2216 * like take_deferred_signal() below. 2217 */ 2218 self->ul_cursig = 0; 2219 take_deferred_signal(sig); 2220 ASSERT(self->ul_cursig == 0); 2221 } 2222 } 2223 ASSERT(self->ul_critical == 0); 2224 } 2225 2226 int 2227 _ti_bind_guard(int bindflag) 2228 { 2229 ulwp_t *self = curthread; 2230 2231 if ((self->ul_bindflags & bindflag) == bindflag) 2232 return (0); 2233 enter_critical(self); 2234 self->ul_bindflags |= bindflag; 2235 return (1); 2236 } 2237 2238 int 2239 _ti_bind_clear(int bindflag) 2240 { 2241 ulwp_t *self = curthread; 2242 2243 if ((self->ul_bindflags & bindflag) == 0) 2244 return (self->ul_bindflags); 2245 self->ul_bindflags &= ~bindflag; 2246 exit_critical(self); 2247 return (self->ul_bindflags); 2248 } 2249 2250 /* 2251 * sigoff() and sigon() enable cond_wait() to behave (optionally) like 2252 * it does in the old libthread (see the comments in cond_wait_queue()). 2253 * Also, signals are deferred at thread startup until TLS constructors 2254 * have all been called, at which time _thr_setup() calls sigon(). 2255 * 2256 * _sigoff() and _sigon() are external consolidation-private interfaces to 2257 * sigoff() and sigon(), respectively, in libc. These are used in libnsl. 2258 * Also, _sigoff() and _sigon() are called from dbx's run-time checking 2259 * (librtc.so) to defer signals during its critical sections (not to be 2260 * confused with libc critical sections [see exit_critical() above]). 2261 */ 2262 void 2263 _sigoff(void) 2264 { 2265 sigoff(curthread); 2266 } 2267 2268 void 2269 _sigon(void) 2270 { 2271 sigon(curthread); 2272 } 2273 2274 void 2275 sigon(ulwp_t *self) 2276 { 2277 int sig; 2278 2279 ASSERT(self->ul_sigdefer > 0); 2280 if (--self->ul_sigdefer == 0) { 2281 if ((sig = self->ul_cursig) != 0 && self->ul_critical == 0) { 2282 self->ul_cursig = 0; 2283 take_deferred_signal(sig); 2284 ASSERT(self->ul_cursig == 0); 2285 } 2286 } 2287 } 2288 2289 #pragma weak thr_getconcurrency = _thr_getconcurrency 2290 int 2291 _thr_getconcurrency() 2292 { 2293 return (thr_concurrency); 2294 } 2295 2296 #pragma weak pthread_getconcurrency = _pthread_getconcurrency 2297 int 2298 _pthread_getconcurrency() 2299 { 2300 return (pthread_concurrency); 2301 } 2302 2303 #pragma weak thr_setconcurrency = _thr_setconcurrency 2304 int 2305 _thr_setconcurrency(int new_level) 2306 { 2307 uberdata_t *udp = curthread->ul_uberdata; 2308 2309 if (new_level < 0) 2310 return (EINVAL); 2311 if (new_level > 65536) /* 65536 is totally arbitrary */ 2312 return (EAGAIN); 2313 lmutex_lock(&udp->link_lock); 2314 if (new_level > thr_concurrency) 2315 thr_concurrency = new_level; 2316 lmutex_unlock(&udp->link_lock); 2317 return (0); 2318 } 2319 2320 #pragma weak pthread_setconcurrency = _pthread_setconcurrency 2321 int 2322 _pthread_setconcurrency(int new_level) 2323 { 2324 if (new_level < 0) 2325 return (EINVAL); 2326 if (new_level > 65536) /* 65536 is totally arbitrary */ 2327 return (EAGAIN); 2328 pthread_concurrency = new_level; 2329 return (0); 2330 } 2331 2332 #pragma weak thr_min_stack = _thr_min_stack 2333 #pragma weak __pthread_min_stack = _thr_min_stack 2334 size_t 2335 _thr_min_stack(void) 2336 { 2337 return (MINSTACK); 2338 } 2339 2340 int 2341 __nthreads(void) 2342 { 2343 return (curthread->ul_uberdata->nthreads); 2344 } 2345 2346 /* 2347 * XXX 2348 * The remainder of this file implements the private interfaces to java for 2349 * garbage collection. It is no longer used, at least by java 1.2. 2350 * It can all go away once all old JVMs have disappeared. 2351 */ 2352 2353 int suspendingallmutators; /* when non-zero, suspending all mutators. */ 2354 int suspendedallmutators; /* when non-zero, all mutators suspended. */ 2355 int mutatorsbarrier; /* when non-zero, mutators barrier imposed. */ 2356 mutex_t mutatorslock = DEFAULTMUTEX; /* used to enforce mutators barrier. */ 2357 cond_t mutatorscv = DEFAULTCV; /* where non-mutators sleep. */ 2358 2359 /* 2360 * Get the available register state for the target thread. 2361 * Return non-volatile registers: TRS_NONVOLATILE 2362 */ 2363 #pragma weak thr_getstate = _thr_getstate 2364 int 2365 _thr_getstate(thread_t tid, int *flag, lwpid_t *lwp, stack_t *ss, gregset_t rs) 2366 { 2367 ulwp_t *self = curthread; 2368 uberdata_t *udp = self->ul_uberdata; 2369 ulwp_t **ulwpp; 2370 ulwp_t *ulwp; 2371 int error = 0; 2372 int trs_flag = TRS_LWPID; 2373 2374 if (tid == 0 || self->ul_lwpid == tid) { 2375 ulwp = self; 2376 ulwp_lock(ulwp, udp); 2377 } else if ((ulwpp = find_lwpp(tid)) != NULL) { 2378 ulwp = *ulwpp; 2379 } else { 2380 if (flag) 2381 *flag = TRS_INVALID; 2382 return (ESRCH); 2383 } 2384 2385 if (ulwp->ul_dead) { 2386 trs_flag = TRS_INVALID; 2387 } else if (!ulwp->ul_stop && !suspendedallmutators) { 2388 error = EINVAL; 2389 trs_flag = TRS_INVALID; 2390 } else if (ulwp->ul_stop) { 2391 trs_flag = TRS_NONVOLATILE; 2392 getgregs(ulwp, rs); 2393 } 2394 2395 if (flag) 2396 *flag = trs_flag; 2397 if (lwp) 2398 *lwp = tid; 2399 if (ss != NULL) 2400 (void) _thrp_stksegment(ulwp, ss); 2401 2402 ulwp_unlock(ulwp, udp); 2403 return (error); 2404 } 2405 2406 /* 2407 * Set the appropriate register state for the target thread. 2408 * This is not used by java. It exists solely for the MSTC test suite. 2409 */ 2410 #pragma weak thr_setstate = _thr_setstate 2411 int 2412 _thr_setstate(thread_t tid, int flag, gregset_t rs) 2413 { 2414 uberdata_t *udp = curthread->ul_uberdata; 2415 ulwp_t *ulwp; 2416 int error = 0; 2417 2418 if ((ulwp = find_lwp(tid)) == NULL) 2419 return (ESRCH); 2420 2421 if (!ulwp->ul_stop && !suspendedallmutators) 2422 error = EINVAL; 2423 else if (rs != NULL) { 2424 switch (flag) { 2425 case TRS_NONVOLATILE: 2426 /* do /proc stuff here? */ 2427 if (ulwp->ul_stop) 2428 setgregs(ulwp, rs); 2429 else 2430 error = EINVAL; 2431 break; 2432 case TRS_LWPID: /* do /proc stuff here? */ 2433 default: 2434 error = EINVAL; 2435 break; 2436 } 2437 } 2438 2439 ulwp_unlock(ulwp, udp); 2440 return (error); 2441 } 2442 2443 int 2444 getlwpstatus(thread_t tid, struct lwpstatus *sp) 2445 { 2446 extern ssize_t _pread(int, void *, size_t, off_t); 2447 char buf[100]; 2448 int fd; 2449 2450 /* "/proc/self/lwp/%u/lwpstatus" w/o stdio */ 2451 (void) strcpy(buf, "/proc/self/lwp/"); 2452 ultos((uint64_t)tid, 10, buf + strlen(buf)); 2453 (void) strcat(buf, "/lwpstatus"); 2454 if ((fd = _open(buf, O_RDONLY, 0)) >= 0) { 2455 while (_pread(fd, sp, sizeof (*sp), 0) == sizeof (*sp)) { 2456 if (sp->pr_flags & PR_STOPPED) { 2457 (void) _close(fd); 2458 return (0); 2459 } 2460 lwp_yield(); /* give him a chance to stop */ 2461 } 2462 (void) _close(fd); 2463 } 2464 return (-1); 2465 } 2466 2467 int 2468 putlwpregs(thread_t tid, prgregset_t prp) 2469 { 2470 extern ssize_t _writev(int, const struct iovec *, int); 2471 char buf[100]; 2472 int fd; 2473 long dstop_sreg[2]; 2474 long run_null[2]; 2475 iovec_t iov[3]; 2476 2477 /* "/proc/self/lwp/%u/lwpctl" w/o stdio */ 2478 (void) strcpy(buf, "/proc/self/lwp/"); 2479 ultos((uint64_t)tid, 10, buf + strlen(buf)); 2480 (void) strcat(buf, "/lwpctl"); 2481 if ((fd = _open(buf, O_WRONLY, 0)) >= 0) { 2482 dstop_sreg[0] = PCDSTOP; /* direct it to stop */ 2483 dstop_sreg[1] = PCSREG; /* set the registers */ 2484 iov[0].iov_base = (caddr_t)dstop_sreg; 2485 iov[0].iov_len = sizeof (dstop_sreg); 2486 iov[1].iov_base = (caddr_t)prp; /* from the register set */ 2487 iov[1].iov_len = sizeof (prgregset_t); 2488 run_null[0] = PCRUN; /* make it runnable again */ 2489 run_null[1] = 0; 2490 iov[2].iov_base = (caddr_t)run_null; 2491 iov[2].iov_len = sizeof (run_null); 2492 if (_writev(fd, iov, 3) >= 0) { 2493 (void) _close(fd); 2494 return (0); 2495 } 2496 (void) _close(fd); 2497 } 2498 return (-1); 2499 } 2500 2501 static ulong_t 2502 gettsp_slow(thread_t tid) 2503 { 2504 char buf[100]; 2505 struct lwpstatus status; 2506 2507 if (getlwpstatus(tid, &status) != 0) { 2508 /* "__gettsp(%u): can't read lwpstatus" w/o stdio */ 2509 (void) strcpy(buf, "__gettsp("); 2510 ultos((uint64_t)tid, 10, buf + strlen(buf)); 2511 (void) strcat(buf, "): can't read lwpstatus"); 2512 thr_panic(buf); 2513 } 2514 return (status.pr_reg[R_SP]); 2515 } 2516 2517 ulong_t 2518 __gettsp(thread_t tid) 2519 { 2520 uberdata_t *udp = curthread->ul_uberdata; 2521 ulwp_t *ulwp; 2522 ulong_t result; 2523 2524 if ((ulwp = find_lwp(tid)) == NULL) 2525 return (0); 2526 2527 if (ulwp->ul_stop && (result = ulwp->ul_sp) != 0) { 2528 ulwp_unlock(ulwp, udp); 2529 return (result); 2530 } 2531 2532 result = gettsp_slow(tid); 2533 ulwp_unlock(ulwp, udp); 2534 return (result); 2535 } 2536 2537 /* 2538 * This tells java stack walkers how to find the ucontext 2539 * structure passed to signal handlers. 2540 */ 2541 #pragma weak thr_sighndlrinfo = _thr_sighndlrinfo 2542 void 2543 _thr_sighndlrinfo(void (**func)(), int *funcsize) 2544 { 2545 *func = &__sighndlr; 2546 *funcsize = (char *)&__sighndlrend - (char *)&__sighndlr; 2547 } 2548 2549 /* 2550 * Mark a thread a mutator or reset a mutator to being a default, 2551 * non-mutator thread. 2552 */ 2553 #pragma weak thr_setmutator = _thr_setmutator 2554 int 2555 _thr_setmutator(thread_t tid, int enabled) 2556 { 2557 ulwp_t *self = curthread; 2558 uberdata_t *udp = self->ul_uberdata; 2559 ulwp_t *ulwp; 2560 int error; 2561 2562 enabled = enabled?1:0; 2563 top: 2564 if (tid == 0) { 2565 ulwp = self; 2566 ulwp_lock(ulwp, udp); 2567 } else if ((ulwp = find_lwp(tid)) == NULL) { 2568 return (ESRCH); 2569 } 2570 2571 /* 2572 * The target thread should be the caller itself or a suspended thread. 2573 * This prevents the target from also changing its ul_mutator field. 2574 */ 2575 error = 0; 2576 if (ulwp != self && !ulwp->ul_stop && enabled) 2577 error = EINVAL; 2578 else if (ulwp->ul_mutator != enabled) { 2579 lmutex_lock(&mutatorslock); 2580 if (mutatorsbarrier) { 2581 ulwp_unlock(ulwp, udp); 2582 while (mutatorsbarrier) 2583 (void) _cond_wait(&mutatorscv, &mutatorslock); 2584 lmutex_unlock(&mutatorslock); 2585 goto top; 2586 } 2587 ulwp->ul_mutator = enabled; 2588 lmutex_unlock(&mutatorslock); 2589 } 2590 2591 ulwp_unlock(ulwp, udp); 2592 return (error); 2593 } 2594 2595 /* 2596 * Establish a barrier against new mutators. Any non-mutator trying 2597 * to become a mutator is suspended until the barrier is removed. 2598 */ 2599 #pragma weak thr_mutators_barrier = _thr_mutators_barrier 2600 void 2601 _thr_mutators_barrier(int enabled) 2602 { 2603 int oldvalue; 2604 2605 lmutex_lock(&mutatorslock); 2606 2607 /* 2608 * Wait if trying to set the barrier while it is already set. 2609 */ 2610 while (mutatorsbarrier && enabled) 2611 (void) _cond_wait(&mutatorscv, &mutatorslock); 2612 2613 oldvalue = mutatorsbarrier; 2614 mutatorsbarrier = enabled; 2615 /* 2616 * Wakeup any blocked non-mutators when barrier is removed. 2617 */ 2618 if (oldvalue && !enabled) 2619 (void) cond_broadcast_internal(&mutatorscv); 2620 lmutex_unlock(&mutatorslock); 2621 } 2622 2623 /* 2624 * Suspend the set of all mutators except for the caller. The list 2625 * of actively running threads is searched and only the mutators 2626 * in this list are suspended. Actively running non-mutators remain 2627 * running. Any other thread is suspended. 2628 */ 2629 #pragma weak thr_suspend_allmutators = _thr_suspend_allmutators 2630 int 2631 _thr_suspend_allmutators(void) 2632 { 2633 ulwp_t *self = curthread; 2634 uberdata_t *udp = self->ul_uberdata; 2635 ulwp_t *ulwp; 2636 int link_dropped; 2637 2638 /* 2639 * We single-thread the entire thread suspend/continue mechanism. 2640 */ 2641 fork_lock_enter(); 2642 2643 top: 2644 lmutex_lock(&udp->link_lock); 2645 2646 if (suspendingallmutators || suspendedallmutators) { 2647 lmutex_unlock(&udp->link_lock); 2648 fork_lock_exit(); 2649 return (EINVAL); 2650 } 2651 suspendingallmutators = 1; 2652 2653 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2654 ulwp_lock(ulwp, udp); 2655 if (!ulwp->ul_mutator) { 2656 ulwp_unlock(ulwp, udp); 2657 } else if (ulwp->ul_stop) { /* already stopped */ 2658 ulwp->ul_stop |= TSTP_MUTATOR; 2659 ulwp_broadcast(ulwp); 2660 ulwp_unlock(ulwp, udp); 2661 } else { 2662 /* 2663 * Move the stopped lwp out of a critical section. 2664 */ 2665 if (safe_suspend(ulwp, TSTP_MUTATOR, &link_dropped) || 2666 link_dropped) { 2667 suspendingallmutators = 0; 2668 goto top; 2669 } 2670 } 2671 } 2672 2673 suspendedallmutators = 1; 2674 suspendingallmutators = 0; 2675 lmutex_unlock(&udp->link_lock); 2676 fork_lock_exit(); 2677 return (0); 2678 } 2679 2680 /* 2681 * Suspend the target mutator. The caller is permitted to suspend 2682 * itself. If a mutator barrier is enabled, the caller will suspend 2683 * itself as though it had been suspended by thr_suspend_allmutators(). 2684 * When the barrier is removed, this thread will be resumed. Any 2685 * suspended mutator, whether suspended by thr_suspend_mutator(), or by 2686 * thr_suspend_allmutators(), can be resumed by thr_continue_mutator(). 2687 */ 2688 #pragma weak thr_suspend_mutator = _thr_suspend_mutator 2689 int 2690 _thr_suspend_mutator(thread_t tid) 2691 { 2692 if (tid == 0) 2693 tid = curthread->ul_lwpid; 2694 return (_thrp_suspend(tid, TSTP_MUTATOR)); 2695 } 2696 2697 /* 2698 * Resume the set of all suspended mutators. 2699 */ 2700 #pragma weak thr_continue_allmutators = _thr_continue_allmutators 2701 int 2702 _thr_continue_allmutators() 2703 { 2704 ulwp_t *self = curthread; 2705 uberdata_t *udp = self->ul_uberdata; 2706 ulwp_t *ulwp; 2707 2708 /* 2709 * We single-thread the entire thread suspend/continue mechanism. 2710 */ 2711 fork_lock_enter(); 2712 2713 lmutex_lock(&udp->link_lock); 2714 if (!suspendedallmutators) { 2715 lmutex_unlock(&udp->link_lock); 2716 fork_lock_exit(); 2717 return (EINVAL); 2718 } 2719 suspendedallmutators = 0; 2720 2721 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2722 mutex_t *mp = ulwp_mutex(ulwp, udp); 2723 lmutex_lock(mp); 2724 if (ulwp->ul_stop & TSTP_MUTATOR) { 2725 ulwp->ul_stop &= ~TSTP_MUTATOR; 2726 ulwp_broadcast(ulwp); 2727 if (!ulwp->ul_stop) 2728 force_continue(ulwp); 2729 } 2730 lmutex_unlock(mp); 2731 } 2732 2733 lmutex_unlock(&udp->link_lock); 2734 fork_lock_exit(); 2735 return (0); 2736 } 2737 2738 /* 2739 * Resume a suspended mutator. 2740 */ 2741 #pragma weak thr_continue_mutator = _thr_continue_mutator 2742 int 2743 _thr_continue_mutator(thread_t tid) 2744 { 2745 return (_thrp_continue(tid, TSTP_MUTATOR)); 2746 } 2747 2748 #pragma weak thr_wait_mutator = _thr_wait_mutator 2749 int 2750 _thr_wait_mutator(thread_t tid, int dontwait) 2751 { 2752 uberdata_t *udp = curthread->ul_uberdata; 2753 ulwp_t *ulwp; 2754 int error = 0; 2755 2756 top: 2757 if ((ulwp = find_lwp(tid)) == NULL) 2758 return (ESRCH); 2759 2760 if (!ulwp->ul_mutator) 2761 error = EINVAL; 2762 else if (dontwait) { 2763 if (!(ulwp->ul_stop & TSTP_MUTATOR)) 2764 error = EWOULDBLOCK; 2765 } else if (!(ulwp->ul_stop & TSTP_MUTATOR)) { 2766 cond_t *cvp = ulwp_condvar(ulwp, udp); 2767 mutex_t *mp = ulwp_mutex(ulwp, udp); 2768 2769 (void) _cond_wait(cvp, mp); 2770 (void) lmutex_unlock(mp); 2771 goto top; 2772 } 2773 2774 ulwp_unlock(ulwp, udp); 2775 return (error); 2776 } 2777 2778 /* PROBE_SUPPORT begin */ 2779 2780 void 2781 thr_probe_setup(void *data) 2782 { 2783 curthread->ul_tpdp = data; 2784 } 2785 2786 static void * 2787 _thread_probe_getfunc() 2788 { 2789 return (curthread->ul_tpdp); 2790 } 2791 2792 void * (*thr_probe_getfunc_addr)(void) = _thread_probe_getfunc; 2793 2794 /* ARGSUSED */ 2795 void 2796 _resume(ulwp_t *ulwp, caddr_t sp, int dontsave) 2797 { 2798 /* never called */ 2799 } 2800 2801 /* ARGSUSED */ 2802 void 2803 _resume_ret(ulwp_t *oldlwp) 2804 { 2805 /* never called */ 2806 } 2807 2808 /* PROBE_SUPPORT end */ 2809