1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include "lint.h" 28 #include "thr_uberdata.h" 29 #include <pthread.h> 30 #include <procfs.h> 31 #include <sys/uio.h> 32 #include <ctype.h> 33 #include "libc.h" 34 35 /* 36 * These symbols should not be exported from libc, but 37 * /lib/libm.so.2 references _thr_main. libm needs to be fixed. 38 * Also, some older versions of the Studio compiler/debugger 39 * components reference them. These need to be fixed, too. 40 */ 41 #pragma weak _thr_main = thr_main 42 #pragma weak _thr_create = thr_create 43 #pragma weak _thr_join = thr_join 44 #pragma weak _thr_self = thr_self 45 46 #undef errno 47 extern int errno; 48 49 /* 50 * Between Solaris 2.5 and Solaris 9, __threaded was used to indicate 51 * "we are linked with libthread". The Sun Workshop 6 update 1 compilation 52 * system used it illegally (it is a consolidation private symbol). 53 * To accommodate this and possibly other abusers of the symbol, 54 * we make it always equal to 1 now that libthread has been folded 55 * into libc. The new __libc_threaded symbol is used to indicate 56 * the new meaning, "more than one thread exists". 57 */ 58 int __threaded = 1; /* always equal to 1 */ 59 int __libc_threaded = 0; /* zero until first thr_create() */ 60 61 /* 62 * thr_concurrency and pthread_concurrency are not used by the library. 63 * They exist solely to hold and return the values set by calls to 64 * thr_setconcurrency() and pthread_setconcurrency(). 65 * Because thr_concurrency is affected by the THR_NEW_LWP flag 66 * to thr_create(), thr_concurrency is protected by link_lock. 67 */ 68 static int thr_concurrency = 1; 69 static int pthread_concurrency; 70 71 #define HASHTBLSZ 1024 /* must be a power of two */ 72 #define TIDHASH(tid, udp) (tid & (udp)->hash_mask) 73 74 /* initial allocation, just enough for one lwp */ 75 #pragma align 64(init_hash_table) 76 thr_hash_table_t init_hash_table[1] = { 77 { DEFAULTMUTEX, DEFAULTCV, NULL }, 78 }; 79 80 extern const Lc_interface rtld_funcs[]; 81 82 /* 83 * The weak version is known to libc_db and mdb. 84 */ 85 #pragma weak _uberdata = __uberdata 86 uberdata_t __uberdata = { 87 { DEFAULTMUTEX, NULL, 0 }, /* link_lock */ 88 { RECURSIVEMUTEX, NULL, 0 }, /* ld_lock */ 89 { RECURSIVEMUTEX, NULL, 0 }, /* fork_lock */ 90 { RECURSIVEMUTEX, NULL, 0 }, /* atfork_lock */ 91 { RECURSIVEMUTEX, NULL, 0 }, /* callout_lock */ 92 { DEFAULTMUTEX, NULL, 0 }, /* tdb_hash_lock */ 93 { 0, }, /* tdb_hash_lock_stats */ 94 { { 0 }, }, /* siguaction[NSIG] */ 95 {{ DEFAULTMUTEX, NULL, 0 }, /* bucket[NBUCKETS] */ 96 { DEFAULTMUTEX, NULL, 0 }, 97 { DEFAULTMUTEX, NULL, 0 }, 98 { DEFAULTMUTEX, NULL, 0 }, 99 { DEFAULTMUTEX, NULL, 0 }, 100 { DEFAULTMUTEX, NULL, 0 }, 101 { DEFAULTMUTEX, NULL, 0 }, 102 { DEFAULTMUTEX, NULL, 0 }, 103 { DEFAULTMUTEX, NULL, 0 }, 104 { DEFAULTMUTEX, NULL, 0 }}, 105 { RECURSIVEMUTEX, NULL, NULL }, /* atexit_root */ 106 { DEFAULTMUTEX, 0, 0, NULL }, /* tsd_metadata */ 107 { DEFAULTMUTEX, {0, 0}, {0, 0} }, /* tls_metadata */ 108 0, /* primary_map */ 109 0, /* bucket_init */ 110 0, /* pad[0] */ 111 0, /* pad[1] */ 112 { 0 }, /* uberflags */ 113 NULL, /* queue_head */ 114 init_hash_table, /* thr_hash_table */ 115 1, /* hash_size: size of the hash table */ 116 0, /* hash_mask: hash_size - 1 */ 117 NULL, /* ulwp_one */ 118 NULL, /* all_lwps */ 119 NULL, /* all_zombies */ 120 0, /* nthreads */ 121 0, /* nzombies */ 122 0, /* ndaemons */ 123 0, /* pid */ 124 sigacthandler, /* sigacthandler */ 125 NULL, /* lwp_stacks */ 126 NULL, /* lwp_laststack */ 127 0, /* nfreestack */ 128 10, /* thread_stack_cache */ 129 NULL, /* ulwp_freelist */ 130 NULL, /* ulwp_lastfree */ 131 NULL, /* ulwp_replace_free */ 132 NULL, /* ulwp_replace_last */ 133 NULL, /* atforklist */ 134 NULL, /* robustlocks */ 135 NULL, /* __tdb_bootstrap */ 136 { /* tdb */ 137 NULL, /* tdb_sync_addr_hash */ 138 0, /* tdb_register_count */ 139 0, /* tdb_hash_alloc_failed */ 140 NULL, /* tdb_sync_addr_free */ 141 NULL, /* tdb_sync_addr_last */ 142 0, /* tdb_sync_alloc */ 143 { 0, 0 }, /* tdb_ev_global_mask */ 144 tdb_events, /* tdb_events array */ 145 }, 146 }; 147 148 /* 149 * The weak version is known to libc_db and mdb. 150 */ 151 #pragma weak _tdb_bootstrap = __tdb_bootstrap 152 uberdata_t **__tdb_bootstrap = NULL; 153 154 int thread_queue_fifo = 4; 155 int thread_queue_dump = 0; 156 int thread_cond_wait_defer = 0; 157 int thread_error_detection = 0; 158 int thread_async_safe = 0; 159 int thread_stack_cache = 10; 160 int thread_door_noreserve = 0; 161 int thread_locks_misaligned = 0; 162 163 static ulwp_t *ulwp_alloc(void); 164 static void ulwp_free(ulwp_t *); 165 166 /* 167 * Insert the lwp into the hash table. 168 */ 169 void 170 hash_in_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp) 171 { 172 ulwp->ul_hash = udp->thr_hash_table[ix].hash_bucket; 173 udp->thr_hash_table[ix].hash_bucket = ulwp; 174 ulwp->ul_ix = ix; 175 } 176 177 void 178 hash_in(ulwp_t *ulwp, uberdata_t *udp) 179 { 180 int ix = TIDHASH(ulwp->ul_lwpid, udp); 181 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock; 182 183 lmutex_lock(mp); 184 hash_in_unlocked(ulwp, ix, udp); 185 lmutex_unlock(mp); 186 } 187 188 /* 189 * Delete the lwp from the hash table. 190 */ 191 void 192 hash_out_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp) 193 { 194 ulwp_t **ulwpp; 195 196 for (ulwpp = &udp->thr_hash_table[ix].hash_bucket; 197 ulwp != *ulwpp; 198 ulwpp = &(*ulwpp)->ul_hash) 199 ; 200 *ulwpp = ulwp->ul_hash; 201 ulwp->ul_hash = NULL; 202 ulwp->ul_ix = -1; 203 } 204 205 void 206 hash_out(ulwp_t *ulwp, uberdata_t *udp) 207 { 208 int ix; 209 210 if ((ix = ulwp->ul_ix) >= 0) { 211 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock; 212 213 lmutex_lock(mp); 214 hash_out_unlocked(ulwp, ix, udp); 215 lmutex_unlock(mp); 216 } 217 } 218 219 /* 220 * Retain stack information for thread structures that are being recycled for 221 * new threads. All other members of the thread structure should be zeroed. 222 */ 223 static void 224 ulwp_clean(ulwp_t *ulwp) 225 { 226 caddr_t stk = ulwp->ul_stk; 227 size_t mapsiz = ulwp->ul_mapsiz; 228 size_t guardsize = ulwp->ul_guardsize; 229 uintptr_t stktop = ulwp->ul_stktop; 230 size_t stksiz = ulwp->ul_stksiz; 231 232 (void) memset(ulwp, 0, sizeof (*ulwp)); 233 234 ulwp->ul_stk = stk; 235 ulwp->ul_mapsiz = mapsiz; 236 ulwp->ul_guardsize = guardsize; 237 ulwp->ul_stktop = stktop; 238 ulwp->ul_stksiz = stksiz; 239 } 240 241 static int stackprot; 242 243 /* 244 * Answer the question, "Is the lwp in question really dead?" 245 * We must inquire of the operating system to be really sure 246 * because the lwp may have called lwp_exit() but it has not 247 * yet completed the exit. 248 */ 249 static int 250 dead_and_buried(ulwp_t *ulwp) 251 { 252 if (ulwp->ul_lwpid == (lwpid_t)(-1)) 253 return (1); 254 if (ulwp->ul_dead && ulwp->ul_detached && 255 _lwp_kill(ulwp->ul_lwpid, 0) == ESRCH) { 256 ulwp->ul_lwpid = (lwpid_t)(-1); 257 return (1); 258 } 259 return (0); 260 } 261 262 /* 263 * Attempt to keep the stack cache within the specified cache limit. 264 */ 265 static void 266 trim_stack_cache(int cache_limit) 267 { 268 ulwp_t *self = curthread; 269 uberdata_t *udp = self->ul_uberdata; 270 ulwp_t *prev = NULL; 271 ulwp_t **ulwpp = &udp->lwp_stacks; 272 ulwp_t *ulwp; 273 274 ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, self)); 275 276 while (udp->nfreestack > cache_limit && (ulwp = *ulwpp) != NULL) { 277 if (dead_and_buried(ulwp)) { 278 *ulwpp = ulwp->ul_next; 279 if (ulwp == udp->lwp_laststack) 280 udp->lwp_laststack = prev; 281 hash_out(ulwp, udp); 282 udp->nfreestack--; 283 (void) munmap(ulwp->ul_stk, ulwp->ul_mapsiz); 284 /* 285 * Now put the free ulwp on the ulwp freelist. 286 */ 287 ulwp->ul_mapsiz = 0; 288 ulwp->ul_next = NULL; 289 if (udp->ulwp_freelist == NULL) 290 udp->ulwp_freelist = udp->ulwp_lastfree = ulwp; 291 else { 292 udp->ulwp_lastfree->ul_next = ulwp; 293 udp->ulwp_lastfree = ulwp; 294 } 295 } else { 296 prev = ulwp; 297 ulwpp = &ulwp->ul_next; 298 } 299 } 300 } 301 302 /* 303 * Find an unused stack of the requested size 304 * or create a new stack of the requested size. 305 * Return a pointer to the ulwp_t structure referring to the stack, or NULL. 306 * thr_exit() stores 1 in the ul_dead member. 307 * thr_join() stores -1 in the ul_lwpid member. 308 */ 309 ulwp_t * 310 find_stack(size_t stksize, size_t guardsize) 311 { 312 static size_t pagesize = 0; 313 314 uberdata_t *udp = curthread->ul_uberdata; 315 size_t mapsize; 316 ulwp_t *prev; 317 ulwp_t *ulwp; 318 ulwp_t **ulwpp; 319 void *stk; 320 321 /* 322 * The stack is allocated PROT_READ|PROT_WRITE|PROT_EXEC 323 * unless overridden by the system's configuration. 324 */ 325 if (stackprot == 0) { /* do this once */ 326 long lprot = _sysconf(_SC_STACK_PROT); 327 if (lprot <= 0) 328 lprot = (PROT_READ|PROT_WRITE|PROT_EXEC); 329 stackprot = (int)lprot; 330 } 331 if (pagesize == 0) /* do this once */ 332 pagesize = _sysconf(_SC_PAGESIZE); 333 334 /* 335 * One megabyte stacks by default, but subtract off 336 * two pages for the system-created red zones. 337 * Round up a non-zero stack size to a pagesize multiple. 338 */ 339 if (stksize == 0) 340 stksize = DEFAULTSTACK - 2 * pagesize; 341 else 342 stksize = ((stksize + pagesize - 1) & -pagesize); 343 344 /* 345 * Round up the mapping size to a multiple of pagesize. 346 * Note: mmap() provides at least one page of red zone 347 * so we deduct that from the value of guardsize. 348 */ 349 if (guardsize != 0) 350 guardsize = ((guardsize + pagesize - 1) & -pagesize) - pagesize; 351 mapsize = stksize + guardsize; 352 353 lmutex_lock(&udp->link_lock); 354 for (prev = NULL, ulwpp = &udp->lwp_stacks; 355 (ulwp = *ulwpp) != NULL; 356 prev = ulwp, ulwpp = &ulwp->ul_next) { 357 if (ulwp->ul_mapsiz == mapsize && 358 ulwp->ul_guardsize == guardsize && 359 dead_and_buried(ulwp)) { 360 /* 361 * The previous lwp is gone; reuse the stack. 362 * Remove the ulwp from the stack list. 363 */ 364 *ulwpp = ulwp->ul_next; 365 ulwp->ul_next = NULL; 366 if (ulwp == udp->lwp_laststack) 367 udp->lwp_laststack = prev; 368 hash_out(ulwp, udp); 369 udp->nfreestack--; 370 lmutex_unlock(&udp->link_lock); 371 ulwp_clean(ulwp); 372 return (ulwp); 373 } 374 } 375 376 /* 377 * None of the cached stacks matched our mapping size. 378 * Reduce the stack cache to get rid of possibly 379 * very old stacks that will never be reused. 380 */ 381 if (udp->nfreestack > udp->thread_stack_cache) 382 trim_stack_cache(udp->thread_stack_cache); 383 else if (udp->nfreestack > 0) 384 trim_stack_cache(udp->nfreestack - 1); 385 lmutex_unlock(&udp->link_lock); 386 387 /* 388 * Create a new stack. 389 */ 390 if ((stk = mmap(NULL, mapsize, stackprot, 391 MAP_PRIVATE|MAP_NORESERVE|MAP_ANON, -1, (off_t)0)) != MAP_FAILED) { 392 /* 393 * We have allocated our stack. Now allocate the ulwp. 394 */ 395 ulwp = ulwp_alloc(); 396 if (ulwp == NULL) 397 (void) munmap(stk, mapsize); 398 else { 399 ulwp->ul_stk = stk; 400 ulwp->ul_mapsiz = mapsize; 401 ulwp->ul_guardsize = guardsize; 402 ulwp->ul_stktop = (uintptr_t)stk + mapsize; 403 ulwp->ul_stksiz = stksize; 404 ulwp->ul_ix = -1; 405 if (guardsize) /* protect the extra red zone */ 406 (void) mprotect(stk, guardsize, PROT_NONE); 407 } 408 } 409 return (ulwp); 410 } 411 412 /* 413 * Get a ulwp_t structure from the free list or allocate a new one. 414 * Such ulwp_t's do not have a stack allocated by the library. 415 */ 416 static ulwp_t * 417 ulwp_alloc(void) 418 { 419 ulwp_t *self = curthread; 420 uberdata_t *udp = self->ul_uberdata; 421 size_t tls_size; 422 ulwp_t *prev; 423 ulwp_t *ulwp; 424 ulwp_t **ulwpp; 425 caddr_t data; 426 427 lmutex_lock(&udp->link_lock); 428 for (prev = NULL, ulwpp = &udp->ulwp_freelist; 429 (ulwp = *ulwpp) != NULL; 430 prev = ulwp, ulwpp = &ulwp->ul_next) { 431 if (dead_and_buried(ulwp)) { 432 *ulwpp = ulwp->ul_next; 433 ulwp->ul_next = NULL; 434 if (ulwp == udp->ulwp_lastfree) 435 udp->ulwp_lastfree = prev; 436 hash_out(ulwp, udp); 437 lmutex_unlock(&udp->link_lock); 438 ulwp_clean(ulwp); 439 return (ulwp); 440 } 441 } 442 lmutex_unlock(&udp->link_lock); 443 444 tls_size = roundup64(udp->tls_metadata.static_tls.tls_size); 445 data = lmalloc(sizeof (*ulwp) + tls_size); 446 if (data != NULL) { 447 /* LINTED pointer cast may result in improper alignment */ 448 ulwp = (ulwp_t *)(data + tls_size); 449 } 450 return (ulwp); 451 } 452 453 /* 454 * Free a ulwp structure. 455 * If there is an associated stack, put it on the stack list and 456 * munmap() previously freed stacks up to the residual cache limit. 457 * Else put it on the ulwp free list and never call lfree() on it. 458 */ 459 static void 460 ulwp_free(ulwp_t *ulwp) 461 { 462 uberdata_t *udp = curthread->ul_uberdata; 463 464 ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, curthread)); 465 ulwp->ul_next = NULL; 466 if (ulwp == udp->ulwp_one) /* don't reuse the primoridal stack */ 467 /*EMPTY*/; 468 else if (ulwp->ul_mapsiz != 0) { 469 if (udp->lwp_stacks == NULL) 470 udp->lwp_stacks = udp->lwp_laststack = ulwp; 471 else { 472 udp->lwp_laststack->ul_next = ulwp; 473 udp->lwp_laststack = ulwp; 474 } 475 if (++udp->nfreestack > udp->thread_stack_cache) 476 trim_stack_cache(udp->thread_stack_cache); 477 } else { 478 if (udp->ulwp_freelist == NULL) 479 udp->ulwp_freelist = udp->ulwp_lastfree = ulwp; 480 else { 481 udp->ulwp_lastfree->ul_next = ulwp; 482 udp->ulwp_lastfree = ulwp; 483 } 484 } 485 } 486 487 /* 488 * Find a named lwp and return a pointer to its hash list location. 489 * On success, returns with the hash lock held. 490 */ 491 ulwp_t ** 492 find_lwpp(thread_t tid) 493 { 494 uberdata_t *udp = curthread->ul_uberdata; 495 int ix = TIDHASH(tid, udp); 496 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock; 497 ulwp_t *ulwp; 498 ulwp_t **ulwpp; 499 500 if (tid == 0) 501 return (NULL); 502 503 lmutex_lock(mp); 504 for (ulwpp = &udp->thr_hash_table[ix].hash_bucket; 505 (ulwp = *ulwpp) != NULL; 506 ulwpp = &ulwp->ul_hash) { 507 if (ulwp->ul_lwpid == tid) 508 return (ulwpp); 509 } 510 lmutex_unlock(mp); 511 return (NULL); 512 } 513 514 /* 515 * Wake up all lwps waiting on this lwp for some reason. 516 */ 517 void 518 ulwp_broadcast(ulwp_t *ulwp) 519 { 520 ulwp_t *self = curthread; 521 uberdata_t *udp = self->ul_uberdata; 522 523 ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self)); 524 (void) cond_broadcast(ulwp_condvar(ulwp, udp)); 525 } 526 527 /* 528 * Find a named lwp and return a pointer to it. 529 * Returns with the hash lock held. 530 */ 531 ulwp_t * 532 find_lwp(thread_t tid) 533 { 534 ulwp_t *self = curthread; 535 uberdata_t *udp = self->ul_uberdata; 536 ulwp_t *ulwp = NULL; 537 ulwp_t **ulwpp; 538 539 if (self->ul_lwpid == tid) { 540 ulwp = self; 541 ulwp_lock(ulwp, udp); 542 } else if ((ulwpp = find_lwpp(tid)) != NULL) { 543 ulwp = *ulwpp; 544 } 545 546 if (ulwp && ulwp->ul_dead) { 547 ulwp_unlock(ulwp, udp); 548 ulwp = NULL; 549 } 550 551 return (ulwp); 552 } 553 554 int 555 _thrp_create(void *stk, size_t stksize, void *(*func)(void *), void *arg, 556 long flags, thread_t *new_thread, size_t guardsize) 557 { 558 ulwp_t *self = curthread; 559 uberdata_t *udp = self->ul_uberdata; 560 ucontext_t uc; 561 uint_t lwp_flags; 562 thread_t tid; 563 int error; 564 ulwp_t *ulwp; 565 566 /* 567 * Enforce the restriction of not creating any threads 568 * until the primary link map has been initialized. 569 * Also, disallow thread creation to a child of vfork(). 570 */ 571 if (!self->ul_primarymap || self->ul_vfork) 572 return (ENOTSUP); 573 574 if (udp->hash_size == 1) 575 finish_init(); 576 577 if ((stk || stksize) && stksize < MINSTACK) 578 return (EINVAL); 579 580 if (stk == NULL) { 581 if ((ulwp = find_stack(stksize, guardsize)) == NULL) 582 return (ENOMEM); 583 stksize = ulwp->ul_mapsiz - ulwp->ul_guardsize; 584 } else { 585 /* initialize the private stack */ 586 if ((ulwp = ulwp_alloc()) == NULL) 587 return (ENOMEM); 588 ulwp->ul_stk = stk; 589 ulwp->ul_stktop = (uintptr_t)stk + stksize; 590 ulwp->ul_stksiz = stksize; 591 ulwp->ul_ix = -1; 592 } 593 ulwp->ul_errnop = &ulwp->ul_errno; 594 595 lwp_flags = LWP_SUSPENDED; 596 if (flags & (THR_DETACHED|THR_DAEMON)) { 597 flags |= THR_DETACHED; 598 lwp_flags |= LWP_DETACHED; 599 } 600 if (flags & THR_DAEMON) 601 lwp_flags |= LWP_DAEMON; 602 603 /* creating a thread: enforce mt-correctness in mutex_lock() */ 604 self->ul_async_safe = 1; 605 606 /* per-thread copies of global variables, for speed */ 607 ulwp->ul_queue_fifo = self->ul_queue_fifo; 608 ulwp->ul_cond_wait_defer = self->ul_cond_wait_defer; 609 ulwp->ul_error_detection = self->ul_error_detection; 610 ulwp->ul_async_safe = self->ul_async_safe; 611 ulwp->ul_max_spinners = self->ul_max_spinners; 612 ulwp->ul_adaptive_spin = self->ul_adaptive_spin; 613 ulwp->ul_queue_spin = self->ul_queue_spin; 614 ulwp->ul_door_noreserve = self->ul_door_noreserve; 615 ulwp->ul_misaligned = self->ul_misaligned; 616 617 /* new thread inherits creating thread's scheduling parameters */ 618 ulwp->ul_policy = self->ul_policy; 619 ulwp->ul_pri = (self->ul_epri? self->ul_epri : self->ul_pri); 620 ulwp->ul_cid = self->ul_cid; 621 ulwp->ul_rtclassid = self->ul_rtclassid; 622 623 ulwp->ul_primarymap = self->ul_primarymap; 624 ulwp->ul_self = ulwp; 625 ulwp->ul_uberdata = udp; 626 627 /* debugger support */ 628 ulwp->ul_usropts = flags; 629 630 #ifdef __sparc 631 /* 632 * We cache several instructions in the thread structure for use 633 * by the fasttrap DTrace provider. When changing this, read the 634 * comment in fasttrap.h for the all the other places that must 635 * be changed. 636 */ 637 ulwp->ul_dsave = 0x9de04000; /* save %g1, %g0, %sp */ 638 ulwp->ul_drestore = 0x81e80000; /* restore %g0, %g0, %g0 */ 639 ulwp->ul_dftret = 0x91d0203a; /* ta 0x3a */ 640 ulwp->ul_dreturn = 0x81ca0000; /* return %o0 */ 641 #endif 642 643 ulwp->ul_startpc = func; 644 ulwp->ul_startarg = arg; 645 _fpinherit(ulwp); 646 /* 647 * Defer signals on the new thread until its TLS constructors 648 * have been called. _thrp_setup() will call sigon() after 649 * it has called tls_setup(). 650 */ 651 ulwp->ul_sigdefer = 1; 652 653 error = setup_context(&uc, _thrp_setup, ulwp, 654 (caddr_t)ulwp->ul_stk + ulwp->ul_guardsize, stksize); 655 if (error != 0 && stk != NULL) /* inaccessible stack */ 656 error = EFAULT; 657 658 /* 659 * Call enter_critical() to avoid being suspended until we 660 * have linked the new thread into the proper lists. 661 * This is necessary because forkall() and fork1() must 662 * suspend all threads and they must see a complete list. 663 */ 664 enter_critical(self); 665 uc.uc_sigmask = ulwp->ul_sigmask = self->ul_sigmask; 666 if (error != 0 || 667 (error = __lwp_create(&uc, lwp_flags, &tid)) != 0) { 668 exit_critical(self); 669 ulwp->ul_lwpid = (lwpid_t)(-1); 670 ulwp->ul_dead = 1; 671 ulwp->ul_detached = 1; 672 lmutex_lock(&udp->link_lock); 673 ulwp_free(ulwp); 674 lmutex_unlock(&udp->link_lock); 675 return (error); 676 } 677 self->ul_nocancel = 0; /* cancellation is now possible */ 678 udp->uberflags.uf_mt = 1; 679 if (new_thread) 680 *new_thread = tid; 681 if (flags & THR_DETACHED) 682 ulwp->ul_detached = 1; 683 ulwp->ul_lwpid = tid; 684 ulwp->ul_stop = TSTP_REGULAR; 685 if (flags & THR_SUSPENDED) 686 ulwp->ul_created = 1; 687 688 lmutex_lock(&udp->link_lock); 689 ulwp->ul_forw = udp->all_lwps; 690 ulwp->ul_back = udp->all_lwps->ul_back; 691 ulwp->ul_back->ul_forw = ulwp; 692 ulwp->ul_forw->ul_back = ulwp; 693 hash_in(ulwp, udp); 694 udp->nthreads++; 695 if (flags & THR_DAEMON) 696 udp->ndaemons++; 697 if (flags & THR_NEW_LWP) 698 thr_concurrency++; 699 __libc_threaded = 1; /* inform stdio */ 700 lmutex_unlock(&udp->link_lock); 701 702 if (__td_event_report(self, TD_CREATE, udp)) { 703 self->ul_td_evbuf.eventnum = TD_CREATE; 704 self->ul_td_evbuf.eventdata = (void *)(uintptr_t)tid; 705 tdb_event(TD_CREATE, udp); 706 } 707 708 exit_critical(self); 709 710 if (!(flags & THR_SUSPENDED)) 711 (void) _thrp_continue(tid, TSTP_REGULAR); 712 713 return (0); 714 } 715 716 int 717 thr_create(void *stk, size_t stksize, void *(*func)(void *), void *arg, 718 long flags, thread_t *new_thread) 719 { 720 return (_thrp_create(stk, stksize, func, arg, flags, new_thread, 0)); 721 } 722 723 /* 724 * A special cancellation cleanup hook for DCE. 725 * cleanuphndlr, when it is not NULL, will contain a callback 726 * function to be called before a thread is terminated in 727 * thr_exit() as a result of being cancelled. 728 */ 729 static void (*cleanuphndlr)(void) = NULL; 730 731 /* 732 * _pthread_setcleanupinit: sets the cleanup hook. 733 */ 734 int 735 _pthread_setcleanupinit(void (*func)(void)) 736 { 737 cleanuphndlr = func; 738 return (0); 739 } 740 741 void 742 _thrp_exit() 743 { 744 ulwp_t *self = curthread; 745 uberdata_t *udp = self->ul_uberdata; 746 ulwp_t *replace = NULL; 747 748 if (__td_event_report(self, TD_DEATH, udp)) { 749 self->ul_td_evbuf.eventnum = TD_DEATH; 750 tdb_event(TD_DEATH, udp); 751 } 752 753 ASSERT(self->ul_sigdefer != 0); 754 755 lmutex_lock(&udp->link_lock); 756 udp->nthreads--; 757 if (self->ul_usropts & THR_NEW_LWP) 758 thr_concurrency--; 759 if (self->ul_usropts & THR_DAEMON) 760 udp->ndaemons--; 761 else if (udp->nthreads == udp->ndaemons) { 762 /* 763 * We are the last non-daemon thread exiting. 764 * Exit the process. We retain our TSD and TLS so 765 * that atexit() application functions can use them. 766 */ 767 lmutex_unlock(&udp->link_lock); 768 exit(0); 769 thr_panic("_thrp_exit(): exit(0) returned"); 770 } 771 lmutex_unlock(&udp->link_lock); 772 773 tsd_exit(); /* deallocate thread-specific data */ 774 tls_exit(); /* deallocate thread-local storage */ 775 heldlock_exit(); /* deal with left-over held locks */ 776 777 /* block all signals to finish exiting */ 778 block_all_signals(self); 779 /* also prevent ourself from being suspended */ 780 enter_critical(self); 781 rwl_free(self); 782 lmutex_lock(&udp->link_lock); 783 ulwp_free(self); 784 (void) ulwp_lock(self, udp); 785 786 if (self->ul_mapsiz && !self->ul_detached) { 787 /* 788 * We want to free the stack for reuse but must keep 789 * the ulwp_t struct for the benefit of thr_join(). 790 * For this purpose we allocate a replacement ulwp_t. 791 */ 792 if ((replace = udp->ulwp_replace_free) == NULL) 793 replace = lmalloc(REPLACEMENT_SIZE); 794 else if ((udp->ulwp_replace_free = replace->ul_next) == NULL) 795 udp->ulwp_replace_last = NULL; 796 } 797 798 if (udp->all_lwps == self) 799 udp->all_lwps = self->ul_forw; 800 if (udp->all_lwps == self) 801 udp->all_lwps = NULL; 802 else { 803 self->ul_forw->ul_back = self->ul_back; 804 self->ul_back->ul_forw = self->ul_forw; 805 } 806 self->ul_forw = self->ul_back = NULL; 807 #if defined(THREAD_DEBUG) 808 /* collect queue lock statistics before marking ourself dead */ 809 record_spin_locks(self); 810 #endif 811 self->ul_dead = 1; 812 self->ul_pleasestop = 0; 813 if (replace != NULL) { 814 int ix = self->ul_ix; /* the hash index */ 815 (void) memcpy(replace, self, REPLACEMENT_SIZE); 816 replace->ul_self = replace; 817 replace->ul_next = NULL; /* clone not on stack list */ 818 replace->ul_mapsiz = 0; /* allows clone to be freed */ 819 replace->ul_replace = 1; /* requires clone to be freed */ 820 hash_out_unlocked(self, ix, udp); 821 hash_in_unlocked(replace, ix, udp); 822 ASSERT(!(self->ul_detached)); 823 self->ul_detached = 1; /* this frees the stack */ 824 self->ul_schedctl = NULL; 825 self->ul_schedctl_called = &udp->uberflags; 826 set_curthread(self = replace); 827 /* 828 * Having just changed the address of curthread, we 829 * must reset the ownership of the locks we hold so 830 * that assertions will not fire when we release them. 831 */ 832 udp->link_lock.mutex_owner = (uintptr_t)self; 833 ulwp_mutex(self, udp)->mutex_owner = (uintptr_t)self; 834 /* 835 * NOTE: 836 * On i386, %gs still references the original, not the 837 * replacement, ulwp structure. Fetching the replacement 838 * curthread pointer via %gs:0 works correctly since the 839 * original ulwp structure will not be reallocated until 840 * this lwp has completed its lwp_exit() system call (see 841 * dead_and_buried()), but from here on out, we must make 842 * no references to %gs:<offset> other than %gs:0. 843 */ 844 } 845 /* 846 * Put non-detached terminated threads in the all_zombies list. 847 */ 848 if (!self->ul_detached) { 849 udp->nzombies++; 850 if (udp->all_zombies == NULL) { 851 ASSERT(udp->nzombies == 1); 852 udp->all_zombies = self->ul_forw = self->ul_back = self; 853 } else { 854 self->ul_forw = udp->all_zombies; 855 self->ul_back = udp->all_zombies->ul_back; 856 self->ul_back->ul_forw = self; 857 self->ul_forw->ul_back = self; 858 } 859 } 860 /* 861 * Notify everyone waiting for this thread. 862 */ 863 ulwp_broadcast(self); 864 (void) ulwp_unlock(self, udp); 865 /* 866 * Prevent any more references to the schedctl data. 867 * We are exiting and continue_fork() may not find us. 868 * Do this just before dropping link_lock, since fork 869 * serializes on link_lock. 870 */ 871 self->ul_schedctl = NULL; 872 self->ul_schedctl_called = &udp->uberflags; 873 lmutex_unlock(&udp->link_lock); 874 875 ASSERT(self->ul_critical == 1); 876 ASSERT(self->ul_preempt == 0); 877 _lwp_terminate(); /* never returns */ 878 thr_panic("_thrp_exit(): _lwp_terminate() returned"); 879 } 880 881 #if defined(THREAD_DEBUG) 882 void 883 collect_queue_statistics() 884 { 885 uberdata_t *udp = curthread->ul_uberdata; 886 ulwp_t *ulwp; 887 888 if (thread_queue_dump) { 889 lmutex_lock(&udp->link_lock); 890 if ((ulwp = udp->all_lwps) != NULL) { 891 do { 892 record_spin_locks(ulwp); 893 } while ((ulwp = ulwp->ul_forw) != udp->all_lwps); 894 } 895 lmutex_unlock(&udp->link_lock); 896 } 897 } 898 #endif 899 900 static void __NORETURN 901 _thrp_exit_common(void *status, int unwind) 902 { 903 ulwp_t *self = curthread; 904 int cancelled = (self->ul_cancel_pending && status == PTHREAD_CANCELED); 905 906 ASSERT(self->ul_critical == 0 && self->ul_preempt == 0); 907 908 /* 909 * Disable cancellation and call the special DCE cancellation 910 * cleanup hook if it is enabled. Do nothing else before calling 911 * the DCE cancellation cleanup hook; it may call longjmp() and 912 * never return here. 913 */ 914 self->ul_cancel_disabled = 1; 915 self->ul_cancel_async = 0; 916 self->ul_save_async = 0; 917 self->ul_cancelable = 0; 918 self->ul_cancel_pending = 0; 919 set_cancel_pending_flag(self, 1); 920 if (cancelled && cleanuphndlr != NULL) 921 (*cleanuphndlr)(); 922 923 /* 924 * Block application signals while we are exiting. 925 * We call out to C++, TSD, and TLS destructors while exiting 926 * and these are application-defined, so we cannot be assured 927 * that they won't reset the signal mask. We use sigoff() to 928 * defer any signals that may be received as a result of this 929 * bad behavior. Such signals will be lost to the process 930 * when the thread finishes exiting. 931 */ 932 (void) thr_sigsetmask(SIG_SETMASK, &maskset, NULL); 933 sigoff(self); 934 935 self->ul_rval = status; 936 937 /* 938 * If thr_exit is being called from the places where 939 * C++ destructors are to be called such as cancellation 940 * points, then set this flag. It is checked in _t_cancel() 941 * to decide whether _ex_unwind() is to be called or not. 942 */ 943 if (unwind) 944 self->ul_unwind = 1; 945 946 /* 947 * _thrp_unwind() will eventually call _thrp_exit(). 948 * It never returns. 949 */ 950 _thrp_unwind(NULL); 951 thr_panic("_thrp_exit_common(): _thrp_unwind() returned"); 952 953 for (;;) /* to shut the compiler up about __NORETURN */ 954 continue; 955 } 956 957 /* 958 * Called when a thread returns from its start function. 959 * We are at the top of the stack; no unwinding is necessary. 960 */ 961 void 962 _thrp_terminate(void *status) 963 { 964 _thrp_exit_common(status, 0); 965 } 966 967 #pragma weak pthread_exit = thr_exit 968 #pragma weak _thr_exit = thr_exit 969 void 970 thr_exit(void *status) 971 { 972 _thrp_exit_common(status, 1); 973 } 974 975 int 976 _thrp_join(thread_t tid, thread_t *departed, void **status, int do_cancel) 977 { 978 uberdata_t *udp = curthread->ul_uberdata; 979 mutex_t *mp; 980 void *rval; 981 thread_t found; 982 ulwp_t *ulwp; 983 ulwp_t **ulwpp; 984 int replace; 985 int error; 986 987 if (do_cancel) 988 error = lwp_wait(tid, &found); 989 else { 990 while ((error = __lwp_wait(tid, &found)) == EINTR) 991 ; 992 } 993 if (error) 994 return (error); 995 996 /* 997 * We must hold link_lock to avoid a race condition with find_stack(). 998 */ 999 lmutex_lock(&udp->link_lock); 1000 if ((ulwpp = find_lwpp(found)) == NULL) { 1001 /* 1002 * lwp_wait() found an lwp that the library doesn't know 1003 * about. It must have been created with _lwp_create(). 1004 * Just return its lwpid; we can't know its status. 1005 */ 1006 lmutex_unlock(&udp->link_lock); 1007 rval = NULL; 1008 } else { 1009 /* 1010 * Remove ulwp from the hash table. 1011 */ 1012 ulwp = *ulwpp; 1013 *ulwpp = ulwp->ul_hash; 1014 ulwp->ul_hash = NULL; 1015 /* 1016 * Remove ulwp from all_zombies list. 1017 */ 1018 ASSERT(udp->nzombies >= 1); 1019 if (udp->all_zombies == ulwp) 1020 udp->all_zombies = ulwp->ul_forw; 1021 if (udp->all_zombies == ulwp) 1022 udp->all_zombies = NULL; 1023 else { 1024 ulwp->ul_forw->ul_back = ulwp->ul_back; 1025 ulwp->ul_back->ul_forw = ulwp->ul_forw; 1026 } 1027 ulwp->ul_forw = ulwp->ul_back = NULL; 1028 udp->nzombies--; 1029 ASSERT(ulwp->ul_dead && !ulwp->ul_detached && 1030 !(ulwp->ul_usropts & (THR_DETACHED|THR_DAEMON))); 1031 /* 1032 * We can't call ulwp_unlock(ulwp) after we set 1033 * ulwp->ul_ix = -1 so we have to get a pointer to the 1034 * ulwp's hash table mutex now in order to unlock it below. 1035 */ 1036 mp = ulwp_mutex(ulwp, udp); 1037 ulwp->ul_lwpid = (lwpid_t)(-1); 1038 ulwp->ul_ix = -1; 1039 rval = ulwp->ul_rval; 1040 replace = ulwp->ul_replace; 1041 lmutex_unlock(mp); 1042 if (replace) { 1043 ulwp->ul_next = NULL; 1044 if (udp->ulwp_replace_free == NULL) 1045 udp->ulwp_replace_free = 1046 udp->ulwp_replace_last = ulwp; 1047 else { 1048 udp->ulwp_replace_last->ul_next = ulwp; 1049 udp->ulwp_replace_last = ulwp; 1050 } 1051 } 1052 lmutex_unlock(&udp->link_lock); 1053 } 1054 1055 if (departed != NULL) 1056 *departed = found; 1057 if (status != NULL) 1058 *status = rval; 1059 return (0); 1060 } 1061 1062 int 1063 thr_join(thread_t tid, thread_t *departed, void **status) 1064 { 1065 int error = _thrp_join(tid, departed, status, 1); 1066 return ((error == EINVAL)? ESRCH : error); 1067 } 1068 1069 /* 1070 * pthread_join() differs from Solaris thr_join(): 1071 * It does not return the departed thread's id 1072 * and hence does not have a "departed" argument. 1073 * It returns EINVAL if tid refers to a detached thread. 1074 */ 1075 #pragma weak _pthread_join = pthread_join 1076 int 1077 pthread_join(pthread_t tid, void **status) 1078 { 1079 return ((tid == 0)? ESRCH : _thrp_join(tid, NULL, status, 1)); 1080 } 1081 1082 int 1083 pthread_detach(pthread_t tid) 1084 { 1085 uberdata_t *udp = curthread->ul_uberdata; 1086 ulwp_t *ulwp; 1087 ulwp_t **ulwpp; 1088 int error = 0; 1089 1090 if ((ulwpp = find_lwpp(tid)) == NULL) 1091 return (ESRCH); 1092 ulwp = *ulwpp; 1093 1094 if (ulwp->ul_dead) { 1095 ulwp_unlock(ulwp, udp); 1096 error = _thrp_join(tid, NULL, NULL, 0); 1097 } else { 1098 error = __lwp_detach(tid); 1099 ulwp->ul_detached = 1; 1100 ulwp->ul_usropts |= THR_DETACHED; 1101 ulwp_unlock(ulwp, udp); 1102 } 1103 return (error); 1104 } 1105 1106 static const char * 1107 ematch(const char *ev, const char *match) 1108 { 1109 int c; 1110 1111 while ((c = *match++) != '\0') { 1112 if (*ev++ != c) 1113 return (NULL); 1114 } 1115 if (*ev++ != '=') 1116 return (NULL); 1117 return (ev); 1118 } 1119 1120 static int 1121 envvar(const char *ev, const char *match, int limit) 1122 { 1123 int val = -1; 1124 const char *ename; 1125 1126 if ((ename = ematch(ev, match)) != NULL) { 1127 int c; 1128 for (val = 0; (c = *ename) != '\0'; ename++) { 1129 if (!isdigit(c)) { 1130 val = -1; 1131 break; 1132 } 1133 val = val * 10 + (c - '0'); 1134 if (val > limit) { 1135 val = limit; 1136 break; 1137 } 1138 } 1139 } 1140 return (val); 1141 } 1142 1143 static void 1144 etest(const char *ev) 1145 { 1146 int value; 1147 1148 if ((value = envvar(ev, "QUEUE_SPIN", 1000000)) >= 0) 1149 thread_queue_spin = value; 1150 if ((value = envvar(ev, "ADAPTIVE_SPIN", 1000000)) >= 0) 1151 thread_adaptive_spin = value; 1152 if ((value = envvar(ev, "MAX_SPINNERS", 255)) >= 0) 1153 thread_max_spinners = value; 1154 if ((value = envvar(ev, "QUEUE_FIFO", 8)) >= 0) 1155 thread_queue_fifo = value; 1156 #if defined(THREAD_DEBUG) 1157 if ((value = envvar(ev, "QUEUE_VERIFY", 1)) >= 0) 1158 thread_queue_verify = value; 1159 if ((value = envvar(ev, "QUEUE_DUMP", 1)) >= 0) 1160 thread_queue_dump = value; 1161 #endif 1162 if ((value = envvar(ev, "STACK_CACHE", 10000)) >= 0) 1163 thread_stack_cache = value; 1164 if ((value = envvar(ev, "COND_WAIT_DEFER", 1)) >= 0) 1165 thread_cond_wait_defer = value; 1166 if ((value = envvar(ev, "ERROR_DETECTION", 2)) >= 0) 1167 thread_error_detection = value; 1168 if ((value = envvar(ev, "ASYNC_SAFE", 1)) >= 0) 1169 thread_async_safe = value; 1170 if ((value = envvar(ev, "DOOR_NORESERVE", 1)) >= 0) 1171 thread_door_noreserve = value; 1172 if ((value = envvar(ev, "LOCKS_MISALIGNED", 1)) >= 0) 1173 thread_locks_misaligned = value; 1174 } 1175 1176 /* 1177 * Look for and evaluate environment variables of the form "_THREAD_*". 1178 * For compatibility with the past, we also look for environment 1179 * names of the form "LIBTHREAD_*". 1180 */ 1181 static void 1182 set_thread_vars() 1183 { 1184 extern const char **_environ; 1185 const char **pev; 1186 const char *ev; 1187 char c; 1188 1189 if ((pev = _environ) == NULL) 1190 return; 1191 while ((ev = *pev++) != NULL) { 1192 c = *ev; 1193 if (c == '_' && strncmp(ev, "_THREAD_", 8) == 0) 1194 etest(ev + 8); 1195 if (c == 'L' && strncmp(ev, "LIBTHREAD_", 10) == 0) 1196 etest(ev + 10); 1197 } 1198 } 1199 1200 /* PROBE_SUPPORT begin */ 1201 #pragma weak __tnf_probe_notify 1202 extern void __tnf_probe_notify(void); 1203 /* PROBE_SUPPORT end */ 1204 1205 /* same as atexit() but private to the library */ 1206 extern int _atexit(void (*)(void)); 1207 1208 /* same as _cleanup() but private to the library */ 1209 extern void __cleanup(void); 1210 1211 extern void atfork_init(void); 1212 1213 #ifdef __amd64 1214 extern void __proc64id(void); 1215 #endif 1216 1217 /* 1218 * libc_init() is called by ld.so.1 for library initialization. 1219 * We perform minimal initialization; enough to work with the main thread. 1220 */ 1221 void 1222 libc_init(void) 1223 { 1224 uberdata_t *udp = &__uberdata; 1225 ulwp_t *oldself = __curthread(); 1226 ucontext_t uc; 1227 ulwp_t *self; 1228 struct rlimit rl; 1229 caddr_t data; 1230 size_t tls_size; 1231 int setmask; 1232 1233 /* 1234 * For the initial stage of initialization, we must be careful 1235 * not to call any function that could possibly call _cerror(). 1236 * For this purpose, we call only the raw system call wrappers. 1237 */ 1238 1239 #ifdef __amd64 1240 /* 1241 * Gather information about cache layouts for optimized 1242 * AMD and Intel assembler strfoo() and memfoo() functions. 1243 */ 1244 __proc64id(); 1245 #endif 1246 1247 /* 1248 * Every libc, regardless of which link map, must register __cleanup(). 1249 */ 1250 (void) _atexit(__cleanup); 1251 1252 /* 1253 * We keep our uberdata on one of (a) the first alternate link map 1254 * or (b) the primary link map. We switch to the primary link map 1255 * and stay there once we see it. All intermediate link maps are 1256 * subject to being unloaded at any time. 1257 */ 1258 if (oldself != NULL && (oldself->ul_primarymap || !primary_link_map)) { 1259 __tdb_bootstrap = oldself->ul_uberdata->tdb_bootstrap; 1260 mutex_setup(); 1261 atfork_init(); /* every link map needs atfork() processing */ 1262 return; 1263 } 1264 1265 /* 1266 * To establish the main stack information, we have to get our context. 1267 * This is also convenient to use for getting our signal mask. 1268 */ 1269 uc.uc_flags = UC_ALL; 1270 (void) __getcontext(&uc); 1271 ASSERT(uc.uc_link == NULL); 1272 1273 tls_size = roundup64(udp->tls_metadata.static_tls.tls_size); 1274 ASSERT(primary_link_map || tls_size == 0); 1275 data = lmalloc(sizeof (ulwp_t) + tls_size); 1276 if (data == NULL) 1277 thr_panic("cannot allocate thread structure for main thread"); 1278 /* LINTED pointer cast may result in improper alignment */ 1279 self = (ulwp_t *)(data + tls_size); 1280 init_hash_table[0].hash_bucket = self; 1281 1282 self->ul_sigmask = uc.uc_sigmask; 1283 delete_reserved_signals(&self->ul_sigmask); 1284 /* 1285 * Are the old and new sets different? 1286 * (This can happen if we are currently blocking SIGCANCEL.) 1287 * If so, we must explicitly set our signal mask, below. 1288 */ 1289 setmask = 1290 ((self->ul_sigmask.__sigbits[0] ^ uc.uc_sigmask.__sigbits[0]) | 1291 (self->ul_sigmask.__sigbits[1] ^ uc.uc_sigmask.__sigbits[1])); 1292 1293 #ifdef __sparc 1294 /* 1295 * We cache several instructions in the thread structure for use 1296 * by the fasttrap DTrace provider. When changing this, read the 1297 * comment in fasttrap.h for the all the other places that must 1298 * be changed. 1299 */ 1300 self->ul_dsave = 0x9de04000; /* save %g1, %g0, %sp */ 1301 self->ul_drestore = 0x81e80000; /* restore %g0, %g0, %g0 */ 1302 self->ul_dftret = 0x91d0203a; /* ta 0x3a */ 1303 self->ul_dreturn = 0x81ca0000; /* return %o0 */ 1304 #endif 1305 1306 self->ul_stktop = (uintptr_t)uc.uc_stack.ss_sp + uc.uc_stack.ss_size; 1307 (void) getrlimit(RLIMIT_STACK, &rl); 1308 self->ul_stksiz = rl.rlim_cur; 1309 self->ul_stk = (caddr_t)(self->ul_stktop - self->ul_stksiz); 1310 1311 self->ul_forw = self->ul_back = self; 1312 self->ul_hash = NULL; 1313 self->ul_ix = 0; 1314 self->ul_lwpid = 1; /* _lwp_self() */ 1315 self->ul_main = 1; 1316 self->ul_self = self; 1317 self->ul_policy = -1; /* initialize only when needed */ 1318 self->ul_pri = 0; 1319 self->ul_cid = 0; 1320 self->ul_rtclassid = -1; 1321 self->ul_uberdata = udp; 1322 if (oldself != NULL) { 1323 int i; 1324 1325 ASSERT(primary_link_map); 1326 ASSERT(oldself->ul_main == 1); 1327 self->ul_stsd = oldself->ul_stsd; 1328 for (i = 0; i < TSD_NFAST; i++) 1329 self->ul_ftsd[i] = oldself->ul_ftsd[i]; 1330 self->ul_tls = oldself->ul_tls; 1331 /* 1332 * Retrieve all pointers to uberdata allocated 1333 * while running on previous link maps. 1334 * We would like to do a structure assignment here, but 1335 * gcc turns structure assignments into calls to memcpy(), 1336 * a function exported from libc. We can't call any such 1337 * external functions until we establish curthread, below, 1338 * so we just call our private version of memcpy(). 1339 */ 1340 (void) memcpy(udp, oldself->ul_uberdata, sizeof (*udp)); 1341 /* 1342 * These items point to global data on the primary link map. 1343 */ 1344 udp->thr_hash_table = init_hash_table; 1345 udp->sigacthandler = sigacthandler; 1346 udp->tdb.tdb_events = tdb_events; 1347 ASSERT(udp->nthreads == 1 && !udp->uberflags.uf_mt); 1348 ASSERT(udp->lwp_stacks == NULL); 1349 ASSERT(udp->ulwp_freelist == NULL); 1350 ASSERT(udp->ulwp_replace_free == NULL); 1351 ASSERT(udp->hash_size == 1); 1352 } 1353 udp->all_lwps = self; 1354 udp->ulwp_one = self; 1355 udp->pid = getpid(); 1356 udp->nthreads = 1; 1357 /* 1358 * In every link map, tdb_bootstrap points to the same piece of 1359 * allocated memory. When the primary link map is initialized, 1360 * the allocated memory is assigned a pointer to the one true 1361 * uberdata. This allows libc_db to initialize itself regardless 1362 * of which instance of libc it finds in the address space. 1363 */ 1364 if (udp->tdb_bootstrap == NULL) 1365 udp->tdb_bootstrap = lmalloc(sizeof (uberdata_t *)); 1366 __tdb_bootstrap = udp->tdb_bootstrap; 1367 if (primary_link_map) { 1368 self->ul_primarymap = 1; 1369 udp->primary_map = 1; 1370 *udp->tdb_bootstrap = udp; 1371 } 1372 /* 1373 * Cancellation can't happen until: 1374 * pthread_cancel() is called 1375 * or: 1376 * another thread is created 1377 * For now, as a single-threaded process, set the flag that tells 1378 * PROLOGUE/EPILOGUE (in scalls.c) that cancellation can't happen. 1379 */ 1380 self->ul_nocancel = 1; 1381 1382 #if defined(__amd64) 1383 (void) ___lwp_private(_LWP_SETPRIVATE, _LWP_FSBASE, self); 1384 #elif defined(__i386) 1385 (void) ___lwp_private(_LWP_SETPRIVATE, _LWP_GSBASE, self); 1386 #endif /* __i386 || __amd64 */ 1387 set_curthread(self); /* redundant on i386 */ 1388 /* 1389 * Now curthread is established and it is safe to call any 1390 * function in libc except one that uses thread-local storage. 1391 */ 1392 self->ul_errnop = &errno; 1393 if (oldself != NULL) { 1394 /* tls_size was zero when oldself was allocated */ 1395 lfree(oldself, sizeof (ulwp_t)); 1396 } 1397 mutex_setup(); 1398 atfork_init(); 1399 signal_init(); 1400 1401 /* 1402 * If the stack is unlimited, we set the size to zero to disable 1403 * stack checking. 1404 * XXX: Work harder here. Get the stack size from /proc/self/rmap 1405 */ 1406 if (self->ul_stksiz == RLIM_INFINITY) { 1407 self->ul_ustack.ss_sp = (void *)self->ul_stktop; 1408 self->ul_ustack.ss_size = 0; 1409 } else { 1410 self->ul_ustack.ss_sp = self->ul_stk; 1411 self->ul_ustack.ss_size = self->ul_stksiz; 1412 } 1413 self->ul_ustack.ss_flags = 0; 1414 (void) setustack(&self->ul_ustack); 1415 1416 /* 1417 * Get the variables that affect thread behavior from the environment. 1418 */ 1419 set_thread_vars(); 1420 udp->uberflags.uf_thread_error_detection = (char)thread_error_detection; 1421 udp->thread_stack_cache = thread_stack_cache; 1422 1423 /* 1424 * Make per-thread copies of global variables, for speed. 1425 */ 1426 self->ul_queue_fifo = (char)thread_queue_fifo; 1427 self->ul_cond_wait_defer = (char)thread_cond_wait_defer; 1428 self->ul_error_detection = (char)thread_error_detection; 1429 self->ul_async_safe = (char)thread_async_safe; 1430 self->ul_door_noreserve = (char)thread_door_noreserve; 1431 self->ul_misaligned = (char)thread_locks_misaligned; 1432 self->ul_max_spinners = (uint8_t)thread_max_spinners; 1433 self->ul_adaptive_spin = thread_adaptive_spin; 1434 self->ul_queue_spin = thread_queue_spin; 1435 1436 /* 1437 * When we have initialized the primary link map, inform 1438 * the dynamic linker about our interface functions. 1439 */ 1440 if (self->ul_primarymap) 1441 _ld_libc((void *)rtld_funcs); 1442 1443 /* 1444 * Defer signals until TLS constructors have been called. 1445 */ 1446 sigoff(self); 1447 tls_setup(); 1448 sigon(self); 1449 if (setmask) 1450 (void) restore_signals(self); 1451 1452 /* 1453 * Make private copies of __xpg4 and __xpg6 so libc can test 1454 * them after this point without invoking the dynamic linker. 1455 */ 1456 libc__xpg4 = __xpg4; 1457 libc__xpg6 = __xpg6; 1458 1459 /* PROBE_SUPPORT begin */ 1460 if (self->ul_primarymap && __tnf_probe_notify != NULL) 1461 __tnf_probe_notify(); 1462 /* PROBE_SUPPORT end */ 1463 1464 init_sigev_thread(); 1465 init_aio(); 1466 1467 /* 1468 * We need to reset __threaded dynamically at runtime, so that 1469 * __threaded can be bound to __threaded outside libc which may not 1470 * have initial value of 1 (without a copy relocation in a.out). 1471 */ 1472 __threaded = 1; 1473 } 1474 1475 #pragma fini(libc_fini) 1476 void 1477 libc_fini() 1478 { 1479 /* 1480 * If we are doing fini processing for the instance of libc 1481 * on the first alternate link map (this happens only when 1482 * the dynamic linker rejects a bad audit library), then clear 1483 * __curthread(). We abandon whatever memory was allocated by 1484 * lmalloc() while running on this alternate link-map but we 1485 * don't care (and can't find the memory in any case); we just 1486 * want to protect the application from this bad audit library. 1487 * No fini processing is done by libc in the normal case. 1488 */ 1489 1490 uberdata_t *udp = curthread->ul_uberdata; 1491 1492 if (udp->primary_map == 0 && udp == &__uberdata) 1493 set_curthread(NULL); 1494 } 1495 1496 /* 1497 * finish_init is called when we are about to become multi-threaded, 1498 * that is, on the first call to thr_create(). 1499 */ 1500 void 1501 finish_init() 1502 { 1503 ulwp_t *self = curthread; 1504 uberdata_t *udp = self->ul_uberdata; 1505 thr_hash_table_t *htp; 1506 void *data; 1507 int i; 1508 1509 /* 1510 * No locks needed here; we are single-threaded on the first call. 1511 * We can be called only after the primary link map has been set up. 1512 */ 1513 ASSERT(self->ul_primarymap); 1514 ASSERT(self == udp->ulwp_one); 1515 ASSERT(!udp->uberflags.uf_mt); 1516 ASSERT(udp->hash_size == 1); 1517 1518 /* 1519 * Initialize self->ul_policy, self->ul_cid, and self->ul_pri. 1520 */ 1521 update_sched(self); 1522 1523 /* 1524 * Allocate the queue_head array if not already allocated. 1525 */ 1526 if (udp->queue_head == NULL) 1527 queue_alloc(); 1528 1529 /* 1530 * Now allocate the thread hash table. 1531 */ 1532 if ((data = mmap(NULL, HASHTBLSZ * sizeof (thr_hash_table_t), 1533 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0)) 1534 == MAP_FAILED) 1535 thr_panic("cannot allocate thread hash table"); 1536 1537 udp->thr_hash_table = htp = (thr_hash_table_t *)data; 1538 udp->hash_size = HASHTBLSZ; 1539 udp->hash_mask = HASHTBLSZ - 1; 1540 1541 for (i = 0; i < HASHTBLSZ; i++, htp++) { 1542 htp->hash_lock.mutex_flag = LOCK_INITED; 1543 htp->hash_lock.mutex_magic = MUTEX_MAGIC; 1544 htp->hash_cond.cond_magic = COND_MAGIC; 1545 } 1546 hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp); 1547 1548 /* 1549 * Set up the SIGCANCEL handler for threads cancellation. 1550 */ 1551 setup_cancelsig(SIGCANCEL); 1552 1553 /* 1554 * Arrange to do special things on exit -- 1555 * - collect queue statistics from all remaining active threads. 1556 * - dump queue statistics to stderr if _THREAD_QUEUE_DUMP is set. 1557 * - grab assert_lock to ensure that assertion failures 1558 * and a core dump take precedence over _exit(). 1559 * (Functions are called in the reverse order of their registration.) 1560 */ 1561 (void) _atexit(grab_assert_lock); 1562 #if defined(THREAD_DEBUG) 1563 (void) _atexit(dump_queue_statistics); 1564 (void) _atexit(collect_queue_statistics); 1565 #endif 1566 } 1567 1568 /* 1569 * Used only by postfork1_child(), below. 1570 */ 1571 static void 1572 mark_dead_and_buried(ulwp_t *ulwp) 1573 { 1574 ulwp->ul_dead = 1; 1575 ulwp->ul_lwpid = (lwpid_t)(-1); 1576 ulwp->ul_hash = NULL; 1577 ulwp->ul_ix = -1; 1578 ulwp->ul_schedctl = NULL; 1579 ulwp->ul_schedctl_called = NULL; 1580 } 1581 1582 /* 1583 * This is called from fork1() in the child. 1584 * Reset our data structures to reflect one lwp. 1585 */ 1586 void 1587 postfork1_child() 1588 { 1589 ulwp_t *self = curthread; 1590 uberdata_t *udp = self->ul_uberdata; 1591 queue_head_t *qp; 1592 ulwp_t *next; 1593 ulwp_t *ulwp; 1594 int i; 1595 1596 /* daemon threads shouldn't call fork1(), but oh well... */ 1597 self->ul_usropts &= ~THR_DAEMON; 1598 udp->nthreads = 1; 1599 udp->ndaemons = 0; 1600 udp->uberflags.uf_mt = 0; 1601 __libc_threaded = 0; 1602 for (i = 0; i < udp->hash_size; i++) 1603 udp->thr_hash_table[i].hash_bucket = NULL; 1604 self->ul_lwpid = _lwp_self(); 1605 hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp); 1606 1607 /* 1608 * Some thread in the parent might have been suspended 1609 * while holding udp->callout_lock or udp->ld_lock. 1610 * Reinitialize the child's copies. 1611 */ 1612 (void) mutex_init(&udp->callout_lock, 1613 USYNC_THREAD | LOCK_RECURSIVE, NULL); 1614 (void) mutex_init(&udp->ld_lock, 1615 USYNC_THREAD | LOCK_RECURSIVE, NULL); 1616 1617 /* no one in the child is on a sleep queue; reinitialize */ 1618 if ((qp = udp->queue_head) != NULL) { 1619 (void) memset(qp, 0, 2 * QHASHSIZE * sizeof (queue_head_t)); 1620 for (i = 0; i < 2 * QHASHSIZE; qp++, i++) { 1621 qp->qh_type = (i < QHASHSIZE)? MX : CV; 1622 qp->qh_lock.mutex_flag = LOCK_INITED; 1623 qp->qh_lock.mutex_magic = MUTEX_MAGIC; 1624 qp->qh_hlist = &qp->qh_def_root; 1625 #if defined(THREAD_DEBUG) 1626 qp->qh_hlen = 1; 1627 qp->qh_hmax = 1; 1628 #endif 1629 } 1630 } 1631 1632 /* 1633 * All lwps except ourself are gone. Mark them so. 1634 * First mark all of the lwps that have already been freed. 1635 * Then mark and free all of the active lwps except ourself. 1636 * Since we are single-threaded, no locks are required here. 1637 */ 1638 for (ulwp = udp->lwp_stacks; ulwp != NULL; ulwp = ulwp->ul_next) 1639 mark_dead_and_buried(ulwp); 1640 for (ulwp = udp->ulwp_freelist; ulwp != NULL; ulwp = ulwp->ul_next) 1641 mark_dead_and_buried(ulwp); 1642 for (ulwp = self->ul_forw; ulwp != self; ulwp = next) { 1643 next = ulwp->ul_forw; 1644 ulwp->ul_forw = ulwp->ul_back = NULL; 1645 mark_dead_and_buried(ulwp); 1646 tsd_free(ulwp); 1647 tls_free(ulwp); 1648 rwl_free(ulwp); 1649 heldlock_free(ulwp); 1650 ulwp_free(ulwp); 1651 } 1652 self->ul_forw = self->ul_back = udp->all_lwps = self; 1653 if (self != udp->ulwp_one) 1654 mark_dead_and_buried(udp->ulwp_one); 1655 if ((ulwp = udp->all_zombies) != NULL) { 1656 ASSERT(udp->nzombies != 0); 1657 do { 1658 next = ulwp->ul_forw; 1659 ulwp->ul_forw = ulwp->ul_back = NULL; 1660 mark_dead_and_buried(ulwp); 1661 udp->nzombies--; 1662 if (ulwp->ul_replace) { 1663 ulwp->ul_next = NULL; 1664 if (udp->ulwp_replace_free == NULL) { 1665 udp->ulwp_replace_free = 1666 udp->ulwp_replace_last = ulwp; 1667 } else { 1668 udp->ulwp_replace_last->ul_next = ulwp; 1669 udp->ulwp_replace_last = ulwp; 1670 } 1671 } 1672 } while ((ulwp = next) != udp->all_zombies); 1673 ASSERT(udp->nzombies == 0); 1674 udp->all_zombies = NULL; 1675 udp->nzombies = 0; 1676 } 1677 trim_stack_cache(0); 1678 1679 /* 1680 * Do post-fork1 processing for subsystems that need it. 1681 */ 1682 postfork1_child_tpool(); 1683 postfork1_child_sigev_aio(); 1684 postfork1_child_sigev_mq(); 1685 postfork1_child_sigev_timer(); 1686 postfork1_child_aio(); 1687 } 1688 1689 lwpid_t 1690 lwp_self(void) 1691 { 1692 return (curthread->ul_lwpid); 1693 } 1694 1695 #pragma weak _ti_thr_self = thr_self 1696 #pragma weak pthread_self = thr_self 1697 thread_t 1698 thr_self() 1699 { 1700 return (curthread->ul_lwpid); 1701 } 1702 1703 int 1704 thr_main() 1705 { 1706 ulwp_t *self = __curthread(); 1707 1708 return ((self == NULL)? -1 : self->ul_main); 1709 } 1710 1711 int 1712 _thrp_cancelled(void) 1713 { 1714 return (curthread->ul_rval == PTHREAD_CANCELED); 1715 } 1716 1717 int 1718 _thrp_stksegment(ulwp_t *ulwp, stack_t *stk) 1719 { 1720 stk->ss_sp = (void *)ulwp->ul_stktop; 1721 stk->ss_size = ulwp->ul_stksiz; 1722 stk->ss_flags = 0; 1723 return (0); 1724 } 1725 1726 #pragma weak _thr_stksegment = thr_stksegment 1727 int 1728 thr_stksegment(stack_t *stk) 1729 { 1730 return (_thrp_stksegment(curthread, stk)); 1731 } 1732 1733 void 1734 force_continue(ulwp_t *ulwp) 1735 { 1736 #if defined(THREAD_DEBUG) 1737 ulwp_t *self = curthread; 1738 uberdata_t *udp = self->ul_uberdata; 1739 #endif 1740 int error; 1741 timespec_t ts; 1742 1743 ASSERT(MUTEX_OWNED(&udp->fork_lock, self)); 1744 ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self)); 1745 1746 for (;;) { 1747 error = _lwp_continue(ulwp->ul_lwpid); 1748 if (error != 0 && error != EINTR) 1749 break; 1750 error = 0; 1751 if (ulwp->ul_stopping) { /* he is stopping himself */ 1752 ts.tv_sec = 0; /* give him a chance to run */ 1753 ts.tv_nsec = 100000; /* 100 usecs or clock tick */ 1754 (void) __nanosleep(&ts, NULL); 1755 } 1756 if (!ulwp->ul_stopping) /* he is running now */ 1757 break; /* so we are done */ 1758 /* 1759 * He is marked as being in the process of stopping 1760 * himself. Loop around and continue him again. 1761 * He may not have been stopped the first time. 1762 */ 1763 } 1764 } 1765 1766 /* 1767 * Suspend an lwp with lwp_suspend(), then move it to a safe 1768 * point, that is, to a point where ul_critical is zero. 1769 * On return, the ulwp_lock() is dropped as with ulwp_unlock(). 1770 * If 'link_dropped' is non-NULL, then 'link_lock' is held on entry. 1771 * If we have to drop link_lock, we store 1 through link_dropped. 1772 * If the lwp exits before it can be suspended, we return ESRCH. 1773 */ 1774 int 1775 safe_suspend(ulwp_t *ulwp, uchar_t whystopped, int *link_dropped) 1776 { 1777 ulwp_t *self = curthread; 1778 uberdata_t *udp = self->ul_uberdata; 1779 cond_t *cvp = ulwp_condvar(ulwp, udp); 1780 mutex_t *mp = ulwp_mutex(ulwp, udp); 1781 thread_t tid = ulwp->ul_lwpid; 1782 int ix = ulwp->ul_ix; 1783 int error = 0; 1784 1785 ASSERT(whystopped == TSTP_REGULAR || 1786 whystopped == TSTP_MUTATOR || 1787 whystopped == TSTP_FORK); 1788 ASSERT(ulwp != self); 1789 ASSERT(!ulwp->ul_stop); 1790 ASSERT(MUTEX_OWNED(&udp->fork_lock, self)); 1791 ASSERT(MUTEX_OWNED(mp, self)); 1792 1793 if (link_dropped != NULL) 1794 *link_dropped = 0; 1795 1796 /* 1797 * We must grab the target's spin lock before suspending it. 1798 * See the comments below and in _thrp_suspend() for why. 1799 */ 1800 spin_lock_set(&ulwp->ul_spinlock); 1801 (void) ___lwp_suspend(tid); 1802 spin_lock_clear(&ulwp->ul_spinlock); 1803 1804 top: 1805 if (ulwp->ul_critical == 0 || ulwp->ul_stopping) { 1806 /* thread is already safe */ 1807 ulwp->ul_stop |= whystopped; 1808 } else { 1809 /* 1810 * Setting ul_pleasestop causes the target thread to stop 1811 * itself in _thrp_suspend(), below, after we drop its lock. 1812 * We must continue the critical thread before dropping 1813 * link_lock because the critical thread may be holding 1814 * the queue lock for link_lock. This is delicate. 1815 */ 1816 ulwp->ul_pleasestop |= whystopped; 1817 force_continue(ulwp); 1818 if (link_dropped != NULL) { 1819 *link_dropped = 1; 1820 lmutex_unlock(&udp->link_lock); 1821 /* be sure to drop link_lock only once */ 1822 link_dropped = NULL; 1823 } 1824 1825 /* 1826 * The thread may disappear by calling thr_exit() so we 1827 * cannot rely on the ulwp pointer after dropping the lock. 1828 * Instead, we search the hash table to find it again. 1829 * When we return, we may find that the thread has been 1830 * continued by some other thread. The suspend/continue 1831 * interfaces are prone to such race conditions by design. 1832 */ 1833 while (ulwp && !ulwp->ul_dead && !ulwp->ul_stop && 1834 (ulwp->ul_pleasestop & whystopped)) { 1835 (void) __cond_wait(cvp, mp); 1836 for (ulwp = udp->thr_hash_table[ix].hash_bucket; 1837 ulwp != NULL; ulwp = ulwp->ul_hash) { 1838 if (ulwp->ul_lwpid == tid) 1839 break; 1840 } 1841 } 1842 1843 if (ulwp == NULL || ulwp->ul_dead) 1844 error = ESRCH; 1845 else { 1846 /* 1847 * Do another lwp_suspend() to make sure we don't 1848 * return until the target thread is fully stopped 1849 * in the kernel. Don't apply lwp_suspend() until 1850 * we know that the target is not holding any 1851 * queue locks, that is, that it has completed 1852 * ulwp_unlock(self) and has, or at least is 1853 * about to, call lwp_suspend() on itself. We do 1854 * this by grabbing the target's spin lock. 1855 */ 1856 ASSERT(ulwp->ul_lwpid == tid); 1857 spin_lock_set(&ulwp->ul_spinlock); 1858 (void) ___lwp_suspend(tid); 1859 spin_lock_clear(&ulwp->ul_spinlock); 1860 /* 1861 * If some other thread did a thr_continue() 1862 * on the target thread we have to start over. 1863 */ 1864 if (!ulwp->ul_stopping || !(ulwp->ul_stop & whystopped)) 1865 goto top; 1866 } 1867 } 1868 1869 (void) cond_broadcast(cvp); 1870 lmutex_unlock(mp); 1871 return (error); 1872 } 1873 1874 int 1875 _thrp_suspend(thread_t tid, uchar_t whystopped) 1876 { 1877 ulwp_t *self = curthread; 1878 uberdata_t *udp = self->ul_uberdata; 1879 ulwp_t *ulwp; 1880 int error = 0; 1881 1882 ASSERT((whystopped & (TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) != 0); 1883 ASSERT((whystopped & ~(TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) == 0); 1884 1885 /* 1886 * We can't suspend anyone except ourself while 1887 * some other thread is performing a fork. 1888 * This also allows only one suspension at a time. 1889 */ 1890 if (tid != self->ul_lwpid) 1891 fork_lock_enter(); 1892 1893 if ((ulwp = find_lwp(tid)) == NULL) 1894 error = ESRCH; 1895 else if (whystopped == TSTP_MUTATOR && !ulwp->ul_mutator) { 1896 ulwp_unlock(ulwp, udp); 1897 error = EINVAL; 1898 } else if (ulwp->ul_stop) { /* already stopped */ 1899 ulwp->ul_stop |= whystopped; 1900 ulwp_broadcast(ulwp); 1901 ulwp_unlock(ulwp, udp); 1902 } else if (ulwp != self) { 1903 /* 1904 * After suspending the other thread, move it out of a 1905 * critical section and deal with the schedctl mappings. 1906 * safe_suspend() suspends the other thread, calls 1907 * ulwp_broadcast(ulwp) and drops the ulwp lock. 1908 */ 1909 error = safe_suspend(ulwp, whystopped, NULL); 1910 } else { 1911 int schedctl_after_fork = 0; 1912 1913 /* 1914 * We are suspending ourself. We must not take a signal 1915 * until we return from lwp_suspend() and clear ul_stopping. 1916 * This is to guard against siglongjmp(). 1917 */ 1918 enter_critical(self); 1919 self->ul_sp = stkptr(); 1920 _flush_windows(); /* sparc */ 1921 self->ul_pleasestop = 0; 1922 self->ul_stop |= whystopped; 1923 /* 1924 * Grab our spin lock before dropping ulwp_mutex(self). 1925 * This prevents the suspending thread from applying 1926 * lwp_suspend() to us before we emerge from 1927 * lmutex_unlock(mp) and have dropped mp's queue lock. 1928 */ 1929 spin_lock_set(&self->ul_spinlock); 1930 self->ul_stopping = 1; 1931 ulwp_broadcast(self); 1932 ulwp_unlock(self, udp); 1933 /* 1934 * From this point until we return from lwp_suspend(), 1935 * we must not call any function that might invoke the 1936 * dynamic linker, that is, we can only call functions 1937 * private to the library. 1938 * 1939 * Also, this is a nasty race condition for a process 1940 * that is undergoing a forkall() operation: 1941 * Once we clear our spinlock (below), we are vulnerable 1942 * to being suspended by the forkall() thread before 1943 * we manage to suspend ourself in ___lwp_suspend(). 1944 * See safe_suspend() and force_continue(). 1945 * 1946 * To avoid a SIGSEGV due to the disappearance 1947 * of the schedctl mappings in the child process, 1948 * which can happen in spin_lock_clear() if we 1949 * are suspended while we are in the middle of 1950 * its call to preempt(), we preemptively clear 1951 * our own schedctl pointer before dropping our 1952 * spinlock. We reinstate it, in both the parent 1953 * and (if this really is a forkall()) the child. 1954 */ 1955 if (whystopped & TSTP_FORK) { 1956 schedctl_after_fork = 1; 1957 self->ul_schedctl = NULL; 1958 self->ul_schedctl_called = &udp->uberflags; 1959 } 1960 spin_lock_clear(&self->ul_spinlock); 1961 (void) ___lwp_suspend(tid); 1962 /* 1963 * Somebody else continued us. 1964 * We can't grab ulwp_lock(self) 1965 * until after clearing ul_stopping. 1966 * force_continue() relies on this. 1967 */ 1968 self->ul_stopping = 0; 1969 self->ul_sp = 0; 1970 if (schedctl_after_fork) { 1971 self->ul_schedctl_called = NULL; 1972 self->ul_schedctl = NULL; 1973 (void) setup_schedctl(); 1974 } 1975 ulwp_lock(self, udp); 1976 ulwp_broadcast(self); 1977 ulwp_unlock(self, udp); 1978 exit_critical(self); 1979 } 1980 1981 if (tid != self->ul_lwpid) 1982 fork_lock_exit(); 1983 1984 return (error); 1985 } 1986 1987 /* 1988 * Suspend all lwps other than ourself in preparation for fork. 1989 */ 1990 void 1991 suspend_fork() 1992 { 1993 ulwp_t *self = curthread; 1994 uberdata_t *udp = self->ul_uberdata; 1995 ulwp_t *ulwp; 1996 int link_dropped; 1997 1998 ASSERT(MUTEX_OWNED(&udp->fork_lock, self)); 1999 top: 2000 lmutex_lock(&udp->link_lock); 2001 2002 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2003 ulwp_lock(ulwp, udp); 2004 if (ulwp->ul_stop) { /* already stopped */ 2005 ulwp->ul_stop |= TSTP_FORK; 2006 ulwp_broadcast(ulwp); 2007 ulwp_unlock(ulwp, udp); 2008 } else { 2009 /* 2010 * Move the stopped lwp out of a critical section. 2011 */ 2012 if (safe_suspend(ulwp, TSTP_FORK, &link_dropped) || 2013 link_dropped) 2014 goto top; 2015 } 2016 } 2017 2018 lmutex_unlock(&udp->link_lock); 2019 } 2020 2021 void 2022 continue_fork(int child) 2023 { 2024 ulwp_t *self = curthread; 2025 uberdata_t *udp = self->ul_uberdata; 2026 ulwp_t *ulwp; 2027 2028 ASSERT(MUTEX_OWNED(&udp->fork_lock, self)); 2029 2030 /* 2031 * Clear the schedctl pointers in the child of forkall(). 2032 */ 2033 if (child) { 2034 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2035 ulwp->ul_schedctl_called = 2036 ulwp->ul_dead? &udp->uberflags : NULL; 2037 ulwp->ul_schedctl = NULL; 2038 } 2039 } 2040 2041 /* 2042 * Set all lwps that were stopped for fork() running again. 2043 */ 2044 lmutex_lock(&udp->link_lock); 2045 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2046 mutex_t *mp = ulwp_mutex(ulwp, udp); 2047 lmutex_lock(mp); 2048 ASSERT(ulwp->ul_stop & TSTP_FORK); 2049 ulwp->ul_stop &= ~TSTP_FORK; 2050 ulwp_broadcast(ulwp); 2051 if (!ulwp->ul_stop) 2052 force_continue(ulwp); 2053 lmutex_unlock(mp); 2054 } 2055 lmutex_unlock(&udp->link_lock); 2056 } 2057 2058 int 2059 _thrp_continue(thread_t tid, uchar_t whystopped) 2060 { 2061 uberdata_t *udp = curthread->ul_uberdata; 2062 ulwp_t *ulwp; 2063 mutex_t *mp; 2064 int error = 0; 2065 2066 ASSERT(whystopped == TSTP_REGULAR || 2067 whystopped == TSTP_MUTATOR); 2068 2069 /* 2070 * We single-thread the entire thread suspend/continue mechanism. 2071 */ 2072 fork_lock_enter(); 2073 2074 if ((ulwp = find_lwp(tid)) == NULL) { 2075 fork_lock_exit(); 2076 return (ESRCH); 2077 } 2078 2079 mp = ulwp_mutex(ulwp, udp); 2080 if ((whystopped == TSTP_MUTATOR && !ulwp->ul_mutator)) { 2081 error = EINVAL; 2082 } else if (ulwp->ul_stop & whystopped) { 2083 ulwp->ul_stop &= ~whystopped; 2084 ulwp_broadcast(ulwp); 2085 if (!ulwp->ul_stop) { 2086 if (whystopped == TSTP_REGULAR && ulwp->ul_created) { 2087 ulwp->ul_sp = 0; 2088 ulwp->ul_created = 0; 2089 } 2090 force_continue(ulwp); 2091 } 2092 } 2093 lmutex_unlock(mp); 2094 2095 fork_lock_exit(); 2096 return (error); 2097 } 2098 2099 int 2100 thr_suspend(thread_t tid) 2101 { 2102 return (_thrp_suspend(tid, TSTP_REGULAR)); 2103 } 2104 2105 int 2106 thr_continue(thread_t tid) 2107 { 2108 return (_thrp_continue(tid, TSTP_REGULAR)); 2109 } 2110 2111 void 2112 thr_yield() 2113 { 2114 yield(); 2115 } 2116 2117 #pragma weak pthread_kill = thr_kill 2118 #pragma weak _thr_kill = thr_kill 2119 int 2120 thr_kill(thread_t tid, int sig) 2121 { 2122 if (sig == SIGCANCEL) 2123 return (EINVAL); 2124 return (_lwp_kill(tid, sig)); 2125 } 2126 2127 /* 2128 * Exit a critical section, take deferred actions if necessary. 2129 */ 2130 void 2131 do_exit_critical() 2132 { 2133 ulwp_t *self = curthread; 2134 int sig; 2135 2136 ASSERT(self->ul_critical == 0); 2137 if (self->ul_dead) 2138 return; 2139 2140 while (self->ul_pleasestop || 2141 (self->ul_cursig != 0 && self->ul_sigdefer == 0)) { 2142 /* 2143 * Avoid a recursive call to exit_critical() in _thrp_suspend() 2144 * by keeping self->ul_critical == 1 here. 2145 */ 2146 self->ul_critical++; 2147 while (self->ul_pleasestop) { 2148 /* 2149 * Guard against suspending ourself while on a sleep 2150 * queue. See the comments in call_user_handler(). 2151 */ 2152 unsleep_self(); 2153 set_parking_flag(self, 0); 2154 (void) _thrp_suspend(self->ul_lwpid, 2155 self->ul_pleasestop); 2156 } 2157 self->ul_critical--; 2158 2159 if ((sig = self->ul_cursig) != 0 && self->ul_sigdefer == 0) { 2160 /* 2161 * Clear ul_cursig before proceeding. 2162 * This protects us from the dynamic linker's 2163 * calls to bind_guard()/bind_clear() in the 2164 * event that it is invoked to resolve a symbol 2165 * like take_deferred_signal() below. 2166 */ 2167 self->ul_cursig = 0; 2168 take_deferred_signal(sig); 2169 ASSERT(self->ul_cursig == 0); 2170 } 2171 } 2172 ASSERT(self->ul_critical == 0); 2173 } 2174 2175 /* 2176 * _ti_bind_guard() and _ti_bind_clear() are called by the dynamic linker 2177 * (ld.so.1) when it has do do something, like resolve a symbol to be called 2178 * by the application or one of its libraries. _ti_bind_guard() is called 2179 * on entry to ld.so.1, _ti_bind_clear() on exit from ld.so.1 back to the 2180 * application. The dynamic linker gets special dispensation from libc to 2181 * run in a critical region (all signals deferred and no thread suspension 2182 * or forking allowed), and to be immune from cancellation for the duration. 2183 */ 2184 int 2185 _ti_bind_guard(int flags) 2186 { 2187 ulwp_t *self = curthread; 2188 uberdata_t *udp = self->ul_uberdata; 2189 int bindflag = (flags & THR_FLG_RTLD); 2190 2191 if ((self->ul_bindflags & bindflag) == bindflag) 2192 return (0); 2193 if ((flags & (THR_FLG_NOLOCK | THR_FLG_REENTER)) == THR_FLG_NOLOCK) { 2194 ASSERT(self->ul_critical == 0); 2195 sigoff(self); /* see no signals while holding ld_lock */ 2196 (void) mutex_lock(&udp->ld_lock); 2197 } 2198 enter_critical(self); 2199 self->ul_save_state = self->ul_cancel_disabled; 2200 self->ul_cancel_disabled = 1; 2201 set_cancel_pending_flag(self, 0); 2202 self->ul_bindflags |= bindflag; 2203 return (1); 2204 } 2205 2206 int 2207 _ti_bind_clear(int flags) 2208 { 2209 ulwp_t *self = curthread; 2210 uberdata_t *udp = self->ul_uberdata; 2211 int bindflag = (flags & THR_FLG_RTLD); 2212 2213 if ((self->ul_bindflags & bindflag) == 0) 2214 return (self->ul_bindflags); 2215 self->ul_bindflags &= ~bindflag; 2216 self->ul_cancel_disabled = self->ul_save_state; 2217 set_cancel_pending_flag(self, 0); 2218 exit_critical(self); 2219 if ((flags & (THR_FLG_NOLOCK | THR_FLG_REENTER)) == THR_FLG_NOLOCK) { 2220 ASSERT(self->ul_critical == 0); 2221 if (MUTEX_OWNED(&udp->ld_lock, self)) { 2222 (void) mutex_unlock(&udp->ld_lock); 2223 sigon(self); /* reenable signals */ 2224 } 2225 } 2226 return (self->ul_bindflags); 2227 } 2228 2229 /* 2230 * sigoff() and sigon() enable cond_wait() to behave (optionally) like 2231 * it does in the old libthread (see the comments in cond_wait_queue()). 2232 * Also, signals are deferred at thread startup until TLS constructors 2233 * have all been called, at which time _thrp_setup() calls sigon(). 2234 * 2235 * _sigoff() and _sigon() are external consolidation-private interfaces to 2236 * sigoff() and sigon(), respectively, in libc. These are used in libnsl. 2237 * Also, _sigoff() and _sigon() are called from dbx's run-time checking 2238 * (librtc.so) to defer signals during its critical sections (not to be 2239 * confused with libc critical sections [see exit_critical() above]). 2240 */ 2241 void 2242 _sigoff(void) 2243 { 2244 sigoff(curthread); 2245 } 2246 2247 void 2248 _sigon(void) 2249 { 2250 sigon(curthread); 2251 } 2252 2253 void 2254 sigon(ulwp_t *self) 2255 { 2256 int sig; 2257 2258 ASSERT(self->ul_sigdefer > 0); 2259 if (--self->ul_sigdefer == 0) { 2260 if ((sig = self->ul_cursig) != 0 && self->ul_critical == 0) { 2261 self->ul_cursig = 0; 2262 take_deferred_signal(sig); 2263 ASSERT(self->ul_cursig == 0); 2264 } 2265 } 2266 } 2267 2268 int 2269 thr_getconcurrency() 2270 { 2271 return (thr_concurrency); 2272 } 2273 2274 int 2275 pthread_getconcurrency() 2276 { 2277 return (pthread_concurrency); 2278 } 2279 2280 int 2281 thr_setconcurrency(int new_level) 2282 { 2283 uberdata_t *udp = curthread->ul_uberdata; 2284 2285 if (new_level < 0) 2286 return (EINVAL); 2287 if (new_level > 65536) /* 65536 is totally arbitrary */ 2288 return (EAGAIN); 2289 lmutex_lock(&udp->link_lock); 2290 if (new_level > thr_concurrency) 2291 thr_concurrency = new_level; 2292 lmutex_unlock(&udp->link_lock); 2293 return (0); 2294 } 2295 2296 int 2297 pthread_setconcurrency(int new_level) 2298 { 2299 if (new_level < 0) 2300 return (EINVAL); 2301 if (new_level > 65536) /* 65536 is totally arbitrary */ 2302 return (EAGAIN); 2303 pthread_concurrency = new_level; 2304 return (0); 2305 } 2306 2307 size_t 2308 thr_min_stack(void) 2309 { 2310 return (MINSTACK); 2311 } 2312 2313 int 2314 __nthreads(void) 2315 { 2316 return (curthread->ul_uberdata->nthreads); 2317 } 2318 2319 /* 2320 * XXX 2321 * The remainder of this file implements the private interfaces to java for 2322 * garbage collection. It is no longer used, at least by java 1.2. 2323 * It can all go away once all old JVMs have disappeared. 2324 */ 2325 2326 int suspendingallmutators; /* when non-zero, suspending all mutators. */ 2327 int suspendedallmutators; /* when non-zero, all mutators suspended. */ 2328 int mutatorsbarrier; /* when non-zero, mutators barrier imposed. */ 2329 mutex_t mutatorslock = DEFAULTMUTEX; /* used to enforce mutators barrier. */ 2330 cond_t mutatorscv = DEFAULTCV; /* where non-mutators sleep. */ 2331 2332 /* 2333 * Get the available register state for the target thread. 2334 * Return non-volatile registers: TRS_NONVOLATILE 2335 */ 2336 #pragma weak _thr_getstate = thr_getstate 2337 int 2338 thr_getstate(thread_t tid, int *flag, lwpid_t *lwp, stack_t *ss, gregset_t rs) 2339 { 2340 ulwp_t *self = curthread; 2341 uberdata_t *udp = self->ul_uberdata; 2342 ulwp_t **ulwpp; 2343 ulwp_t *ulwp; 2344 int error = 0; 2345 int trs_flag = TRS_LWPID; 2346 2347 if (tid == 0 || self->ul_lwpid == tid) { 2348 ulwp = self; 2349 ulwp_lock(ulwp, udp); 2350 } else if ((ulwpp = find_lwpp(tid)) != NULL) { 2351 ulwp = *ulwpp; 2352 } else { 2353 if (flag) 2354 *flag = TRS_INVALID; 2355 return (ESRCH); 2356 } 2357 2358 if (ulwp->ul_dead) { 2359 trs_flag = TRS_INVALID; 2360 } else if (!ulwp->ul_stop && !suspendedallmutators) { 2361 error = EINVAL; 2362 trs_flag = TRS_INVALID; 2363 } else if (ulwp->ul_stop) { 2364 trs_flag = TRS_NONVOLATILE; 2365 getgregs(ulwp, rs); 2366 } 2367 2368 if (flag) 2369 *flag = trs_flag; 2370 if (lwp) 2371 *lwp = tid; 2372 if (ss != NULL) 2373 (void) _thrp_stksegment(ulwp, ss); 2374 2375 ulwp_unlock(ulwp, udp); 2376 return (error); 2377 } 2378 2379 /* 2380 * Set the appropriate register state for the target thread. 2381 * This is not used by java. It exists solely for the MSTC test suite. 2382 */ 2383 #pragma weak _thr_setstate = thr_setstate 2384 int 2385 thr_setstate(thread_t tid, int flag, gregset_t rs) 2386 { 2387 uberdata_t *udp = curthread->ul_uberdata; 2388 ulwp_t *ulwp; 2389 int error = 0; 2390 2391 if ((ulwp = find_lwp(tid)) == NULL) 2392 return (ESRCH); 2393 2394 if (!ulwp->ul_stop && !suspendedallmutators) 2395 error = EINVAL; 2396 else if (rs != NULL) { 2397 switch (flag) { 2398 case TRS_NONVOLATILE: 2399 /* do /proc stuff here? */ 2400 if (ulwp->ul_stop) 2401 setgregs(ulwp, rs); 2402 else 2403 error = EINVAL; 2404 break; 2405 case TRS_LWPID: /* do /proc stuff here? */ 2406 default: 2407 error = EINVAL; 2408 break; 2409 } 2410 } 2411 2412 ulwp_unlock(ulwp, udp); 2413 return (error); 2414 } 2415 2416 int 2417 getlwpstatus(thread_t tid, struct lwpstatus *sp) 2418 { 2419 extern ssize_t __pread(int, void *, size_t, off_t); 2420 char buf[100]; 2421 int fd; 2422 2423 /* "/proc/self/lwp/%u/lwpstatus" w/o stdio */ 2424 (void) strcpy(buf, "/proc/self/lwp/"); 2425 ultos((uint64_t)tid, 10, buf + strlen(buf)); 2426 (void) strcat(buf, "/lwpstatus"); 2427 if ((fd = __open(buf, O_RDONLY, 0)) >= 0) { 2428 while (__pread(fd, sp, sizeof (*sp), 0) == sizeof (*sp)) { 2429 if (sp->pr_flags & PR_STOPPED) { 2430 (void) __close(fd); 2431 return (0); 2432 } 2433 yield(); /* give him a chance to stop */ 2434 } 2435 (void) __close(fd); 2436 } 2437 return (-1); 2438 } 2439 2440 int 2441 putlwpregs(thread_t tid, prgregset_t prp) 2442 { 2443 extern ssize_t __writev(int, const struct iovec *, int); 2444 char buf[100]; 2445 int fd; 2446 long dstop_sreg[2]; 2447 long run_null[2]; 2448 iovec_t iov[3]; 2449 2450 /* "/proc/self/lwp/%u/lwpctl" w/o stdio */ 2451 (void) strcpy(buf, "/proc/self/lwp/"); 2452 ultos((uint64_t)tid, 10, buf + strlen(buf)); 2453 (void) strcat(buf, "/lwpctl"); 2454 if ((fd = __open(buf, O_WRONLY, 0)) >= 0) { 2455 dstop_sreg[0] = PCDSTOP; /* direct it to stop */ 2456 dstop_sreg[1] = PCSREG; /* set the registers */ 2457 iov[0].iov_base = (caddr_t)dstop_sreg; 2458 iov[0].iov_len = sizeof (dstop_sreg); 2459 iov[1].iov_base = (caddr_t)prp; /* from the register set */ 2460 iov[1].iov_len = sizeof (prgregset_t); 2461 run_null[0] = PCRUN; /* make it runnable again */ 2462 run_null[1] = 0; 2463 iov[2].iov_base = (caddr_t)run_null; 2464 iov[2].iov_len = sizeof (run_null); 2465 if (__writev(fd, iov, 3) >= 0) { 2466 (void) __close(fd); 2467 return (0); 2468 } 2469 (void) __close(fd); 2470 } 2471 return (-1); 2472 } 2473 2474 static ulong_t 2475 gettsp_slow(thread_t tid) 2476 { 2477 char buf[100]; 2478 struct lwpstatus status; 2479 2480 if (getlwpstatus(tid, &status) != 0) { 2481 /* "__gettsp(%u): can't read lwpstatus" w/o stdio */ 2482 (void) strcpy(buf, "__gettsp("); 2483 ultos((uint64_t)tid, 10, buf + strlen(buf)); 2484 (void) strcat(buf, "): can't read lwpstatus"); 2485 thr_panic(buf); 2486 } 2487 return (status.pr_reg[R_SP]); 2488 } 2489 2490 ulong_t 2491 __gettsp(thread_t tid) 2492 { 2493 uberdata_t *udp = curthread->ul_uberdata; 2494 ulwp_t *ulwp; 2495 ulong_t result; 2496 2497 if ((ulwp = find_lwp(tid)) == NULL) 2498 return (0); 2499 2500 if (ulwp->ul_stop && (result = ulwp->ul_sp) != 0) { 2501 ulwp_unlock(ulwp, udp); 2502 return (result); 2503 } 2504 2505 result = gettsp_slow(tid); 2506 ulwp_unlock(ulwp, udp); 2507 return (result); 2508 } 2509 2510 /* 2511 * This tells java stack walkers how to find the ucontext 2512 * structure passed to signal handlers. 2513 */ 2514 #pragma weak _thr_sighndlrinfo = thr_sighndlrinfo 2515 void 2516 thr_sighndlrinfo(void (**func)(), int *funcsize) 2517 { 2518 *func = &__sighndlr; 2519 *funcsize = (char *)&__sighndlrend - (char *)&__sighndlr; 2520 } 2521 2522 /* 2523 * Mark a thread a mutator or reset a mutator to being a default, 2524 * non-mutator thread. 2525 */ 2526 #pragma weak _thr_setmutator = thr_setmutator 2527 int 2528 thr_setmutator(thread_t tid, int enabled) 2529 { 2530 ulwp_t *self = curthread; 2531 uberdata_t *udp = self->ul_uberdata; 2532 ulwp_t *ulwp; 2533 int error; 2534 int cancel_state; 2535 2536 enabled = enabled? 1 : 0; 2537 top: 2538 if (tid == 0) { 2539 ulwp = self; 2540 ulwp_lock(ulwp, udp); 2541 } else if ((ulwp = find_lwp(tid)) == NULL) { 2542 return (ESRCH); 2543 } 2544 2545 /* 2546 * The target thread should be the caller itself or a suspended thread. 2547 * This prevents the target from also changing its ul_mutator field. 2548 */ 2549 error = 0; 2550 if (ulwp != self && !ulwp->ul_stop && enabled) 2551 error = EINVAL; 2552 else if (ulwp->ul_mutator != enabled) { 2553 lmutex_lock(&mutatorslock); 2554 if (mutatorsbarrier) { 2555 ulwp_unlock(ulwp, udp); 2556 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, 2557 &cancel_state); 2558 while (mutatorsbarrier) 2559 (void) cond_wait(&mutatorscv, &mutatorslock); 2560 (void) pthread_setcancelstate(cancel_state, NULL); 2561 lmutex_unlock(&mutatorslock); 2562 goto top; 2563 } 2564 ulwp->ul_mutator = enabled; 2565 lmutex_unlock(&mutatorslock); 2566 } 2567 2568 ulwp_unlock(ulwp, udp); 2569 return (error); 2570 } 2571 2572 /* 2573 * Establish a barrier against new mutators. Any non-mutator trying 2574 * to become a mutator is suspended until the barrier is removed. 2575 */ 2576 #pragma weak _thr_mutators_barrier = thr_mutators_barrier 2577 void 2578 thr_mutators_barrier(int enabled) 2579 { 2580 int oldvalue; 2581 int cancel_state; 2582 2583 lmutex_lock(&mutatorslock); 2584 2585 /* 2586 * Wait if trying to set the barrier while it is already set. 2587 */ 2588 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state); 2589 while (mutatorsbarrier && enabled) 2590 (void) cond_wait(&mutatorscv, &mutatorslock); 2591 (void) pthread_setcancelstate(cancel_state, NULL); 2592 2593 oldvalue = mutatorsbarrier; 2594 mutatorsbarrier = enabled; 2595 /* 2596 * Wakeup any blocked non-mutators when barrier is removed. 2597 */ 2598 if (oldvalue && !enabled) 2599 (void) cond_broadcast(&mutatorscv); 2600 lmutex_unlock(&mutatorslock); 2601 } 2602 2603 /* 2604 * Suspend the set of all mutators except for the caller. The list 2605 * of actively running threads is searched and only the mutators 2606 * in this list are suspended. Actively running non-mutators remain 2607 * running. Any other thread is suspended. 2608 */ 2609 #pragma weak _thr_suspend_allmutators = thr_suspend_allmutators 2610 int 2611 thr_suspend_allmutators(void) 2612 { 2613 ulwp_t *self = curthread; 2614 uberdata_t *udp = self->ul_uberdata; 2615 ulwp_t *ulwp; 2616 int link_dropped; 2617 2618 /* 2619 * We single-thread the entire thread suspend/continue mechanism. 2620 */ 2621 fork_lock_enter(); 2622 2623 top: 2624 lmutex_lock(&udp->link_lock); 2625 2626 if (suspendingallmutators || suspendedallmutators) { 2627 lmutex_unlock(&udp->link_lock); 2628 fork_lock_exit(); 2629 return (EINVAL); 2630 } 2631 suspendingallmutators = 1; 2632 2633 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2634 ulwp_lock(ulwp, udp); 2635 if (!ulwp->ul_mutator) { 2636 ulwp_unlock(ulwp, udp); 2637 } else if (ulwp->ul_stop) { /* already stopped */ 2638 ulwp->ul_stop |= TSTP_MUTATOR; 2639 ulwp_broadcast(ulwp); 2640 ulwp_unlock(ulwp, udp); 2641 } else { 2642 /* 2643 * Move the stopped lwp out of a critical section. 2644 */ 2645 if (safe_suspend(ulwp, TSTP_MUTATOR, &link_dropped) || 2646 link_dropped) { 2647 suspendingallmutators = 0; 2648 goto top; 2649 } 2650 } 2651 } 2652 2653 suspendedallmutators = 1; 2654 suspendingallmutators = 0; 2655 lmutex_unlock(&udp->link_lock); 2656 fork_lock_exit(); 2657 return (0); 2658 } 2659 2660 /* 2661 * Suspend the target mutator. The caller is permitted to suspend 2662 * itself. If a mutator barrier is enabled, the caller will suspend 2663 * itself as though it had been suspended by thr_suspend_allmutators(). 2664 * When the barrier is removed, this thread will be resumed. Any 2665 * suspended mutator, whether suspended by thr_suspend_mutator(), or by 2666 * thr_suspend_allmutators(), can be resumed by thr_continue_mutator(). 2667 */ 2668 #pragma weak _thr_suspend_mutator = thr_suspend_mutator 2669 int 2670 thr_suspend_mutator(thread_t tid) 2671 { 2672 if (tid == 0) 2673 tid = curthread->ul_lwpid; 2674 return (_thrp_suspend(tid, TSTP_MUTATOR)); 2675 } 2676 2677 /* 2678 * Resume the set of all suspended mutators. 2679 */ 2680 #pragma weak _thr_continue_allmutators = thr_continue_allmutators 2681 int 2682 thr_continue_allmutators() 2683 { 2684 ulwp_t *self = curthread; 2685 uberdata_t *udp = self->ul_uberdata; 2686 ulwp_t *ulwp; 2687 2688 /* 2689 * We single-thread the entire thread suspend/continue mechanism. 2690 */ 2691 fork_lock_enter(); 2692 2693 lmutex_lock(&udp->link_lock); 2694 if (!suspendedallmutators) { 2695 lmutex_unlock(&udp->link_lock); 2696 fork_lock_exit(); 2697 return (EINVAL); 2698 } 2699 suspendedallmutators = 0; 2700 2701 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2702 mutex_t *mp = ulwp_mutex(ulwp, udp); 2703 lmutex_lock(mp); 2704 if (ulwp->ul_stop & TSTP_MUTATOR) { 2705 ulwp->ul_stop &= ~TSTP_MUTATOR; 2706 ulwp_broadcast(ulwp); 2707 if (!ulwp->ul_stop) 2708 force_continue(ulwp); 2709 } 2710 lmutex_unlock(mp); 2711 } 2712 2713 lmutex_unlock(&udp->link_lock); 2714 fork_lock_exit(); 2715 return (0); 2716 } 2717 2718 /* 2719 * Resume a suspended mutator. 2720 */ 2721 #pragma weak _thr_continue_mutator = thr_continue_mutator 2722 int 2723 thr_continue_mutator(thread_t tid) 2724 { 2725 return (_thrp_continue(tid, TSTP_MUTATOR)); 2726 } 2727 2728 #pragma weak _thr_wait_mutator = thr_wait_mutator 2729 int 2730 thr_wait_mutator(thread_t tid, int dontwait) 2731 { 2732 uberdata_t *udp = curthread->ul_uberdata; 2733 ulwp_t *ulwp; 2734 int cancel_state; 2735 int error = 0; 2736 2737 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state); 2738 top: 2739 if ((ulwp = find_lwp(tid)) == NULL) { 2740 (void) pthread_setcancelstate(cancel_state, NULL); 2741 return (ESRCH); 2742 } 2743 2744 if (!ulwp->ul_mutator) 2745 error = EINVAL; 2746 else if (dontwait) { 2747 if (!(ulwp->ul_stop & TSTP_MUTATOR)) 2748 error = EWOULDBLOCK; 2749 } else if (!(ulwp->ul_stop & TSTP_MUTATOR)) { 2750 cond_t *cvp = ulwp_condvar(ulwp, udp); 2751 mutex_t *mp = ulwp_mutex(ulwp, udp); 2752 2753 (void) cond_wait(cvp, mp); 2754 (void) lmutex_unlock(mp); 2755 goto top; 2756 } 2757 2758 ulwp_unlock(ulwp, udp); 2759 (void) pthread_setcancelstate(cancel_state, NULL); 2760 return (error); 2761 } 2762 2763 /* PROBE_SUPPORT begin */ 2764 2765 void 2766 thr_probe_setup(void *data) 2767 { 2768 curthread->ul_tpdp = data; 2769 } 2770 2771 static void * 2772 _thread_probe_getfunc() 2773 { 2774 return (curthread->ul_tpdp); 2775 } 2776 2777 void * (*thr_probe_getfunc_addr)(void) = _thread_probe_getfunc; 2778 2779 /* ARGSUSED */ 2780 void 2781 _resume(ulwp_t *ulwp, caddr_t sp, int dontsave) 2782 { 2783 /* never called */ 2784 } 2785 2786 /* ARGSUSED */ 2787 void 2788 _resume_ret(ulwp_t *oldlwp) 2789 { 2790 /* never called */ 2791 } 2792 2793 /* PROBE_SUPPORT end */ 2794