1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "lint.h" 30 #include "thr_uberdata.h" 31 #include <pthread.h> 32 #include <procfs.h> 33 #include <sys/uio.h> 34 #include <ctype.h> 35 #include "libc.h" 36 37 /* 38 * These symbols should not be exported from libc, but 39 * /lib/libm.so.2 references _thr_main. libm needs to be fixed. 40 * Also, some older versions of the Studio compiler/debugger 41 * components reference them. These need to be fixed, too. 42 */ 43 #pragma weak _thr_main = thr_main 44 #pragma weak _thr_create = thr_create 45 #pragma weak _thr_join = thr_join 46 #pragma weak _thr_self = thr_self 47 48 #undef errno 49 extern int errno; 50 51 /* 52 * Between Solaris 2.5 and Solaris 9, __threaded was used to indicate 53 * "we are linked with libthread". The Sun Workshop 6 update 1 compilation 54 * system used it illegally (it is a consolidation private symbol). 55 * To accommodate this and possibly other abusers of the symbol, 56 * we make it always equal to 1 now that libthread has been folded 57 * into libc. The new __libc_threaded symbol is used to indicate 58 * the new meaning, "more than one thread exists". 59 */ 60 int __threaded = 1; /* always equal to 1 */ 61 int __libc_threaded = 0; /* zero until first thr_create() */ 62 63 /* 64 * thr_concurrency and pthread_concurrency are not used by the library. 65 * They exist solely to hold and return the values set by calls to 66 * thr_setconcurrency() and pthread_setconcurrency(). 67 * Because thr_concurrency is affected by the THR_NEW_LWP flag 68 * to thr_create(), thr_concurrency is protected by link_lock. 69 */ 70 static int thr_concurrency = 1; 71 static int pthread_concurrency; 72 73 #define HASHTBLSZ 1024 /* must be a power of two */ 74 #define TIDHASH(tid, udp) (tid & (udp)->hash_mask) 75 76 /* initial allocation, just enough for one lwp */ 77 #pragma align 64(init_hash_table) 78 thr_hash_table_t init_hash_table[1] = { 79 { DEFAULTMUTEX, DEFAULTCV, NULL }, 80 }; 81 82 extern const Lc_interface rtld_funcs[]; 83 84 /* 85 * The weak version is known to libc_db and mdb. 86 */ 87 #pragma weak _uberdata = __uberdata 88 uberdata_t __uberdata = { 89 { DEFAULTMUTEX, NULL, 0 }, /* link_lock */ 90 { RECURSIVEMUTEX, NULL, 0 }, /* ld_lock */ 91 { RECURSIVEMUTEX, NULL, 0 }, /* fork_lock */ 92 { RECURSIVEMUTEX, NULL, 0 }, /* atfork_lock */ 93 { RECURSIVEMUTEX, NULL, 0 }, /* callout_lock */ 94 { DEFAULTMUTEX, NULL, 0 }, /* tdb_hash_lock */ 95 { 0, }, /* tdb_hash_lock_stats */ 96 { { 0 }, }, /* siguaction[NSIG] */ 97 {{ DEFAULTMUTEX, NULL, 0 }, /* bucket[NBUCKETS] */ 98 { DEFAULTMUTEX, NULL, 0 }, 99 { DEFAULTMUTEX, NULL, 0 }, 100 { DEFAULTMUTEX, NULL, 0 }, 101 { DEFAULTMUTEX, NULL, 0 }, 102 { DEFAULTMUTEX, NULL, 0 }, 103 { DEFAULTMUTEX, NULL, 0 }, 104 { DEFAULTMUTEX, NULL, 0 }, 105 { DEFAULTMUTEX, NULL, 0 }, 106 { DEFAULTMUTEX, NULL, 0 }}, 107 { RECURSIVEMUTEX, NULL, NULL }, /* atexit_root */ 108 { DEFAULTMUTEX, 0, 0, NULL }, /* tsd_metadata */ 109 { DEFAULTMUTEX, {0, 0}, {0, 0} }, /* tls_metadata */ 110 0, /* primary_map */ 111 0, /* bucket_init */ 112 0, /* pad[0] */ 113 0, /* pad[1] */ 114 { 0 }, /* uberflags */ 115 NULL, /* queue_head */ 116 init_hash_table, /* thr_hash_table */ 117 1, /* hash_size: size of the hash table */ 118 0, /* hash_mask: hash_size - 1 */ 119 NULL, /* ulwp_one */ 120 NULL, /* all_lwps */ 121 NULL, /* all_zombies */ 122 0, /* nthreads */ 123 0, /* nzombies */ 124 0, /* ndaemons */ 125 0, /* pid */ 126 sigacthandler, /* sigacthandler */ 127 NULL, /* lwp_stacks */ 128 NULL, /* lwp_laststack */ 129 0, /* nfreestack */ 130 10, /* thread_stack_cache */ 131 NULL, /* ulwp_freelist */ 132 NULL, /* ulwp_lastfree */ 133 NULL, /* ulwp_replace_free */ 134 NULL, /* ulwp_replace_last */ 135 NULL, /* atforklist */ 136 NULL, /* robustlocks */ 137 NULL, /* __tdb_bootstrap */ 138 { /* tdb */ 139 NULL, /* tdb_sync_addr_hash */ 140 0, /* tdb_register_count */ 141 0, /* tdb_hash_alloc_failed */ 142 NULL, /* tdb_sync_addr_free */ 143 NULL, /* tdb_sync_addr_last */ 144 0, /* tdb_sync_alloc */ 145 { 0, 0 }, /* tdb_ev_global_mask */ 146 tdb_events, /* tdb_events array */ 147 }, 148 }; 149 150 /* 151 * The weak version is known to libc_db and mdb. 152 */ 153 #pragma weak _tdb_bootstrap = __tdb_bootstrap 154 uberdata_t **__tdb_bootstrap = NULL; 155 156 int thread_queue_fifo = 4; 157 int thread_queue_dump = 0; 158 int thread_cond_wait_defer = 0; 159 int thread_error_detection = 0; 160 int thread_async_safe = 0; 161 int thread_stack_cache = 10; 162 int thread_door_noreserve = 0; 163 int thread_locks_misaligned = 0; 164 165 static ulwp_t *ulwp_alloc(void); 166 static void ulwp_free(ulwp_t *); 167 168 /* 169 * Insert the lwp into the hash table. 170 */ 171 void 172 hash_in_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp) 173 { 174 ulwp->ul_hash = udp->thr_hash_table[ix].hash_bucket; 175 udp->thr_hash_table[ix].hash_bucket = ulwp; 176 ulwp->ul_ix = ix; 177 } 178 179 void 180 hash_in(ulwp_t *ulwp, uberdata_t *udp) 181 { 182 int ix = TIDHASH(ulwp->ul_lwpid, udp); 183 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock; 184 185 lmutex_lock(mp); 186 hash_in_unlocked(ulwp, ix, udp); 187 lmutex_unlock(mp); 188 } 189 190 /* 191 * Delete the lwp from the hash table. 192 */ 193 void 194 hash_out_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp) 195 { 196 ulwp_t **ulwpp; 197 198 for (ulwpp = &udp->thr_hash_table[ix].hash_bucket; 199 ulwp != *ulwpp; 200 ulwpp = &(*ulwpp)->ul_hash) 201 ; 202 *ulwpp = ulwp->ul_hash; 203 ulwp->ul_hash = NULL; 204 ulwp->ul_ix = -1; 205 } 206 207 void 208 hash_out(ulwp_t *ulwp, uberdata_t *udp) 209 { 210 int ix; 211 212 if ((ix = ulwp->ul_ix) >= 0) { 213 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock; 214 215 lmutex_lock(mp); 216 hash_out_unlocked(ulwp, ix, udp); 217 lmutex_unlock(mp); 218 } 219 } 220 221 /* 222 * Retain stack information for thread structures that are being recycled for 223 * new threads. All other members of the thread structure should be zeroed. 224 */ 225 static void 226 ulwp_clean(ulwp_t *ulwp) 227 { 228 caddr_t stk = ulwp->ul_stk; 229 size_t mapsiz = ulwp->ul_mapsiz; 230 size_t guardsize = ulwp->ul_guardsize; 231 uintptr_t stktop = ulwp->ul_stktop; 232 size_t stksiz = ulwp->ul_stksiz; 233 234 (void) memset(ulwp, 0, sizeof (*ulwp)); 235 236 ulwp->ul_stk = stk; 237 ulwp->ul_mapsiz = mapsiz; 238 ulwp->ul_guardsize = guardsize; 239 ulwp->ul_stktop = stktop; 240 ulwp->ul_stksiz = stksiz; 241 } 242 243 static int stackprot; 244 245 /* 246 * Answer the question, "Is the lwp in question really dead?" 247 * We must inquire of the operating system to be really sure 248 * because the lwp may have called lwp_exit() but it has not 249 * yet completed the exit. 250 */ 251 static int 252 dead_and_buried(ulwp_t *ulwp) 253 { 254 if (ulwp->ul_lwpid == (lwpid_t)(-1)) 255 return (1); 256 if (ulwp->ul_dead && ulwp->ul_detached && 257 _lwp_kill(ulwp->ul_lwpid, 0) == ESRCH) { 258 ulwp->ul_lwpid = (lwpid_t)(-1); 259 return (1); 260 } 261 return (0); 262 } 263 264 /* 265 * Attempt to keep the stack cache within the specified cache limit. 266 */ 267 static void 268 trim_stack_cache(int cache_limit) 269 { 270 ulwp_t *self = curthread; 271 uberdata_t *udp = self->ul_uberdata; 272 ulwp_t *prev = NULL; 273 ulwp_t **ulwpp = &udp->lwp_stacks; 274 ulwp_t *ulwp; 275 276 ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, self)); 277 278 while (udp->nfreestack > cache_limit && (ulwp = *ulwpp) != NULL) { 279 if (dead_and_buried(ulwp)) { 280 *ulwpp = ulwp->ul_next; 281 if (ulwp == udp->lwp_laststack) 282 udp->lwp_laststack = prev; 283 hash_out(ulwp, udp); 284 udp->nfreestack--; 285 (void) munmap(ulwp->ul_stk, ulwp->ul_mapsiz); 286 /* 287 * Now put the free ulwp on the ulwp freelist. 288 */ 289 ulwp->ul_mapsiz = 0; 290 ulwp->ul_next = NULL; 291 if (udp->ulwp_freelist == NULL) 292 udp->ulwp_freelist = udp->ulwp_lastfree = ulwp; 293 else { 294 udp->ulwp_lastfree->ul_next = ulwp; 295 udp->ulwp_lastfree = ulwp; 296 } 297 } else { 298 prev = ulwp; 299 ulwpp = &ulwp->ul_next; 300 } 301 } 302 } 303 304 /* 305 * Find an unused stack of the requested size 306 * or create a new stack of the requested size. 307 * Return a pointer to the ulwp_t structure referring to the stack, or NULL. 308 * thr_exit() stores 1 in the ul_dead member. 309 * thr_join() stores -1 in the ul_lwpid member. 310 */ 311 ulwp_t * 312 find_stack(size_t stksize, size_t guardsize) 313 { 314 static size_t pagesize = 0; 315 316 uberdata_t *udp = curthread->ul_uberdata; 317 size_t mapsize; 318 ulwp_t *prev; 319 ulwp_t *ulwp; 320 ulwp_t **ulwpp; 321 void *stk; 322 323 /* 324 * The stack is allocated PROT_READ|PROT_WRITE|PROT_EXEC 325 * unless overridden by the system's configuration. 326 */ 327 if (stackprot == 0) { /* do this once */ 328 long lprot = _sysconf(_SC_STACK_PROT); 329 if (lprot <= 0) 330 lprot = (PROT_READ|PROT_WRITE|PROT_EXEC); 331 stackprot = (int)lprot; 332 } 333 if (pagesize == 0) /* do this once */ 334 pagesize = _sysconf(_SC_PAGESIZE); 335 336 /* 337 * One megabyte stacks by default, but subtract off 338 * two pages for the system-created red zones. 339 * Round up a non-zero stack size to a pagesize multiple. 340 */ 341 if (stksize == 0) 342 stksize = DEFAULTSTACK - 2 * pagesize; 343 else 344 stksize = ((stksize + pagesize - 1) & -pagesize); 345 346 /* 347 * Round up the mapping size to a multiple of pagesize. 348 * Note: mmap() provides at least one page of red zone 349 * so we deduct that from the value of guardsize. 350 */ 351 if (guardsize != 0) 352 guardsize = ((guardsize + pagesize - 1) & -pagesize) - pagesize; 353 mapsize = stksize + guardsize; 354 355 lmutex_lock(&udp->link_lock); 356 for (prev = NULL, ulwpp = &udp->lwp_stacks; 357 (ulwp = *ulwpp) != NULL; 358 prev = ulwp, ulwpp = &ulwp->ul_next) { 359 if (ulwp->ul_mapsiz == mapsize && 360 ulwp->ul_guardsize == guardsize && 361 dead_and_buried(ulwp)) { 362 /* 363 * The previous lwp is gone; reuse the stack. 364 * Remove the ulwp from the stack list. 365 */ 366 *ulwpp = ulwp->ul_next; 367 ulwp->ul_next = NULL; 368 if (ulwp == udp->lwp_laststack) 369 udp->lwp_laststack = prev; 370 hash_out(ulwp, udp); 371 udp->nfreestack--; 372 lmutex_unlock(&udp->link_lock); 373 ulwp_clean(ulwp); 374 return (ulwp); 375 } 376 } 377 378 /* 379 * None of the cached stacks matched our mapping size. 380 * Reduce the stack cache to get rid of possibly 381 * very old stacks that will never be reused. 382 */ 383 if (udp->nfreestack > udp->thread_stack_cache) 384 trim_stack_cache(udp->thread_stack_cache); 385 else if (udp->nfreestack > 0) 386 trim_stack_cache(udp->nfreestack - 1); 387 lmutex_unlock(&udp->link_lock); 388 389 /* 390 * Create a new stack. 391 */ 392 if ((stk = mmap(NULL, mapsize, stackprot, 393 MAP_PRIVATE|MAP_NORESERVE|MAP_ANON, -1, (off_t)0)) != MAP_FAILED) { 394 /* 395 * We have allocated our stack. Now allocate the ulwp. 396 */ 397 ulwp = ulwp_alloc(); 398 if (ulwp == NULL) 399 (void) munmap(stk, mapsize); 400 else { 401 ulwp->ul_stk = stk; 402 ulwp->ul_mapsiz = mapsize; 403 ulwp->ul_guardsize = guardsize; 404 ulwp->ul_stktop = (uintptr_t)stk + mapsize; 405 ulwp->ul_stksiz = stksize; 406 ulwp->ul_ix = -1; 407 if (guardsize) /* protect the extra red zone */ 408 (void) mprotect(stk, guardsize, PROT_NONE); 409 } 410 } 411 return (ulwp); 412 } 413 414 /* 415 * Get a ulwp_t structure from the free list or allocate a new one. 416 * Such ulwp_t's do not have a stack allocated by the library. 417 */ 418 static ulwp_t * 419 ulwp_alloc(void) 420 { 421 ulwp_t *self = curthread; 422 uberdata_t *udp = self->ul_uberdata; 423 size_t tls_size; 424 ulwp_t *prev; 425 ulwp_t *ulwp; 426 ulwp_t **ulwpp; 427 caddr_t data; 428 429 lmutex_lock(&udp->link_lock); 430 for (prev = NULL, ulwpp = &udp->ulwp_freelist; 431 (ulwp = *ulwpp) != NULL; 432 prev = ulwp, ulwpp = &ulwp->ul_next) { 433 if (dead_and_buried(ulwp)) { 434 *ulwpp = ulwp->ul_next; 435 ulwp->ul_next = NULL; 436 if (ulwp == udp->ulwp_lastfree) 437 udp->ulwp_lastfree = prev; 438 hash_out(ulwp, udp); 439 lmutex_unlock(&udp->link_lock); 440 ulwp_clean(ulwp); 441 return (ulwp); 442 } 443 } 444 lmutex_unlock(&udp->link_lock); 445 446 tls_size = roundup64(udp->tls_metadata.static_tls.tls_size); 447 data = lmalloc(sizeof (*ulwp) + tls_size); 448 if (data != NULL) { 449 /* LINTED pointer cast may result in improper alignment */ 450 ulwp = (ulwp_t *)(data + tls_size); 451 } 452 return (ulwp); 453 } 454 455 /* 456 * Free a ulwp structure. 457 * If there is an associated stack, put it on the stack list and 458 * munmap() previously freed stacks up to the residual cache limit. 459 * Else put it on the ulwp free list and never call lfree() on it. 460 */ 461 static void 462 ulwp_free(ulwp_t *ulwp) 463 { 464 uberdata_t *udp = curthread->ul_uberdata; 465 466 ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, curthread)); 467 ulwp->ul_next = NULL; 468 if (ulwp == udp->ulwp_one) /* don't reuse the primoridal stack */ 469 /*EMPTY*/; 470 else if (ulwp->ul_mapsiz != 0) { 471 if (udp->lwp_stacks == NULL) 472 udp->lwp_stacks = udp->lwp_laststack = ulwp; 473 else { 474 udp->lwp_laststack->ul_next = ulwp; 475 udp->lwp_laststack = ulwp; 476 } 477 if (++udp->nfreestack > udp->thread_stack_cache) 478 trim_stack_cache(udp->thread_stack_cache); 479 } else { 480 if (udp->ulwp_freelist == NULL) 481 udp->ulwp_freelist = udp->ulwp_lastfree = ulwp; 482 else { 483 udp->ulwp_lastfree->ul_next = ulwp; 484 udp->ulwp_lastfree = ulwp; 485 } 486 } 487 } 488 489 /* 490 * Find a named lwp and return a pointer to its hash list location. 491 * On success, returns with the hash lock held. 492 */ 493 ulwp_t ** 494 find_lwpp(thread_t tid) 495 { 496 uberdata_t *udp = curthread->ul_uberdata; 497 int ix = TIDHASH(tid, udp); 498 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock; 499 ulwp_t *ulwp; 500 ulwp_t **ulwpp; 501 502 if (tid == 0) 503 return (NULL); 504 505 lmutex_lock(mp); 506 for (ulwpp = &udp->thr_hash_table[ix].hash_bucket; 507 (ulwp = *ulwpp) != NULL; 508 ulwpp = &ulwp->ul_hash) { 509 if (ulwp->ul_lwpid == tid) 510 return (ulwpp); 511 } 512 lmutex_unlock(mp); 513 return (NULL); 514 } 515 516 /* 517 * Wake up all lwps waiting on this lwp for some reason. 518 */ 519 void 520 ulwp_broadcast(ulwp_t *ulwp) 521 { 522 ulwp_t *self = curthread; 523 uberdata_t *udp = self->ul_uberdata; 524 525 ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self)); 526 (void) cond_broadcast(ulwp_condvar(ulwp, udp)); 527 } 528 529 /* 530 * Find a named lwp and return a pointer to it. 531 * Returns with the hash lock held. 532 */ 533 ulwp_t * 534 find_lwp(thread_t tid) 535 { 536 ulwp_t *self = curthread; 537 uberdata_t *udp = self->ul_uberdata; 538 ulwp_t *ulwp = NULL; 539 ulwp_t **ulwpp; 540 541 if (self->ul_lwpid == tid) { 542 ulwp = self; 543 ulwp_lock(ulwp, udp); 544 } else if ((ulwpp = find_lwpp(tid)) != NULL) { 545 ulwp = *ulwpp; 546 } 547 548 if (ulwp && ulwp->ul_dead) { 549 ulwp_unlock(ulwp, udp); 550 ulwp = NULL; 551 } 552 553 return (ulwp); 554 } 555 556 int 557 _thrp_create(void *stk, size_t stksize, void *(*func)(void *), void *arg, 558 long flags, thread_t *new_thread, size_t guardsize) 559 { 560 ulwp_t *self = curthread; 561 uberdata_t *udp = self->ul_uberdata; 562 ucontext_t uc; 563 uint_t lwp_flags; 564 thread_t tid; 565 int error = 0; 566 ulwp_t *ulwp; 567 568 /* 569 * Enforce the restriction of not creating any threads 570 * until the primary link map has been initialized. 571 * Also, disallow thread creation to a child of vfork(). 572 */ 573 if (!self->ul_primarymap || self->ul_vfork) 574 return (ENOTSUP); 575 576 if (udp->hash_size == 1) 577 finish_init(); 578 579 if ((stk || stksize) && stksize < MINSTACK) 580 return (EINVAL); 581 582 if (stk == NULL) { 583 if ((ulwp = find_stack(stksize, guardsize)) == NULL) 584 return (ENOMEM); 585 stksize = ulwp->ul_mapsiz - ulwp->ul_guardsize; 586 } else { 587 /* initialize the private stack */ 588 if ((ulwp = ulwp_alloc()) == NULL) 589 return (ENOMEM); 590 ulwp->ul_stk = stk; 591 ulwp->ul_stktop = (uintptr_t)stk + stksize; 592 ulwp->ul_stksiz = stksize; 593 ulwp->ul_ix = -1; 594 } 595 ulwp->ul_errnop = &ulwp->ul_errno; 596 597 lwp_flags = LWP_SUSPENDED; 598 if (flags & (THR_DETACHED|THR_DAEMON)) { 599 flags |= THR_DETACHED; 600 lwp_flags |= LWP_DETACHED; 601 } 602 if (flags & THR_DAEMON) 603 lwp_flags |= LWP_DAEMON; 604 605 /* creating a thread: enforce mt-correctness in mutex_lock() */ 606 self->ul_async_safe = 1; 607 608 /* per-thread copies of global variables, for speed */ 609 ulwp->ul_queue_fifo = self->ul_queue_fifo; 610 ulwp->ul_cond_wait_defer = self->ul_cond_wait_defer; 611 ulwp->ul_error_detection = self->ul_error_detection; 612 ulwp->ul_async_safe = self->ul_async_safe; 613 ulwp->ul_max_spinners = self->ul_max_spinners; 614 ulwp->ul_adaptive_spin = self->ul_adaptive_spin; 615 ulwp->ul_queue_spin = self->ul_queue_spin; 616 ulwp->ul_door_noreserve = self->ul_door_noreserve; 617 ulwp->ul_misaligned = self->ul_misaligned; 618 619 /* new thread inherits creating thread's scheduling parameters */ 620 ulwp->ul_policy = self->ul_policy; 621 ulwp->ul_pri = (self->ul_epri? self->ul_epri : self->ul_pri); 622 ulwp->ul_cid = self->ul_cid; 623 ulwp->ul_rtclassid = self->ul_rtclassid; 624 625 ulwp->ul_primarymap = self->ul_primarymap; 626 ulwp->ul_self = ulwp; 627 ulwp->ul_uberdata = udp; 628 629 /* debugger support */ 630 ulwp->ul_usropts = flags; 631 632 #ifdef __sparc 633 /* 634 * We cache several instructions in the thread structure for use 635 * by the fasttrap DTrace provider. When changing this, read the 636 * comment in fasttrap.h for the all the other places that must 637 * be changed. 638 */ 639 ulwp->ul_dsave = 0x9de04000; /* save %g1, %g0, %sp */ 640 ulwp->ul_drestore = 0x81e80000; /* restore %g0, %g0, %g0 */ 641 ulwp->ul_dftret = 0x91d0203a; /* ta 0x3a */ 642 ulwp->ul_dreturn = 0x81ca0000; /* return %o0 */ 643 #endif 644 645 ulwp->ul_startpc = func; 646 ulwp->ul_startarg = arg; 647 _fpinherit(ulwp); 648 /* 649 * Defer signals on the new thread until its TLS constructors 650 * have been called. _thrp_setup() will call sigon() after 651 * it has called tls_setup(). 652 */ 653 ulwp->ul_sigdefer = 1; 654 655 if (setup_context(&uc, _thrp_setup, ulwp, 656 (caddr_t)ulwp->ul_stk + ulwp->ul_guardsize, stksize) != 0) 657 error = EAGAIN; 658 659 /* 660 * Call enter_critical() to avoid being suspended until we 661 * have linked the new thread into the proper lists. 662 * This is necessary because forkall() and fork1() must 663 * suspend all threads and they must see a complete list. 664 */ 665 enter_critical(self); 666 uc.uc_sigmask = ulwp->ul_sigmask = self->ul_sigmask; 667 if (error != 0 || 668 (error = __lwp_create(&uc, lwp_flags, &tid)) != 0) { 669 exit_critical(self); 670 ulwp->ul_lwpid = (lwpid_t)(-1); 671 ulwp->ul_dead = 1; 672 ulwp->ul_detached = 1; 673 lmutex_lock(&udp->link_lock); 674 ulwp_free(ulwp); 675 lmutex_unlock(&udp->link_lock); 676 return (error); 677 } 678 self->ul_nocancel = 0; /* cancellation is now possible */ 679 udp->uberflags.uf_mt = 1; 680 if (new_thread) 681 *new_thread = tid; 682 if (flags & THR_DETACHED) 683 ulwp->ul_detached = 1; 684 ulwp->ul_lwpid = tid; 685 ulwp->ul_stop = TSTP_REGULAR; 686 if (flags & THR_SUSPENDED) 687 ulwp->ul_created = 1; 688 689 lmutex_lock(&udp->link_lock); 690 ulwp->ul_forw = udp->all_lwps; 691 ulwp->ul_back = udp->all_lwps->ul_back; 692 ulwp->ul_back->ul_forw = ulwp; 693 ulwp->ul_forw->ul_back = ulwp; 694 hash_in(ulwp, udp); 695 udp->nthreads++; 696 if (flags & THR_DAEMON) 697 udp->ndaemons++; 698 if (flags & THR_NEW_LWP) 699 thr_concurrency++; 700 __libc_threaded = 1; /* inform stdio */ 701 lmutex_unlock(&udp->link_lock); 702 703 if (__td_event_report(self, TD_CREATE, udp)) { 704 self->ul_td_evbuf.eventnum = TD_CREATE; 705 self->ul_td_evbuf.eventdata = (void *)(uintptr_t)tid; 706 tdb_event(TD_CREATE, udp); 707 } 708 709 exit_critical(self); 710 711 if (!(flags & THR_SUSPENDED)) 712 (void) _thrp_continue(tid, TSTP_REGULAR); 713 714 return (0); 715 } 716 717 int 718 thr_create(void *stk, size_t stksize, void *(*func)(void *), void *arg, 719 long flags, thread_t *new_thread) 720 { 721 return (_thrp_create(stk, stksize, func, arg, flags, new_thread, 0)); 722 } 723 724 /* 725 * A special cancellation cleanup hook for DCE. 726 * cleanuphndlr, when it is not NULL, will contain a callback 727 * function to be called before a thread is terminated in 728 * thr_exit() as a result of being cancelled. 729 */ 730 static void (*cleanuphndlr)(void) = NULL; 731 732 /* 733 * _pthread_setcleanupinit: sets the cleanup hook. 734 */ 735 int 736 _pthread_setcleanupinit(void (*func)(void)) 737 { 738 cleanuphndlr = func; 739 return (0); 740 } 741 742 void 743 _thrp_exit() 744 { 745 ulwp_t *self = curthread; 746 uberdata_t *udp = self->ul_uberdata; 747 ulwp_t *replace = NULL; 748 749 if (__td_event_report(self, TD_DEATH, udp)) { 750 self->ul_td_evbuf.eventnum = TD_DEATH; 751 tdb_event(TD_DEATH, udp); 752 } 753 754 ASSERT(self->ul_sigdefer != 0); 755 756 lmutex_lock(&udp->link_lock); 757 udp->nthreads--; 758 if (self->ul_usropts & THR_NEW_LWP) 759 thr_concurrency--; 760 if (self->ul_usropts & THR_DAEMON) 761 udp->ndaemons--; 762 else if (udp->nthreads == udp->ndaemons) { 763 /* 764 * We are the last non-daemon thread exiting. 765 * Exit the process. We retain our TSD and TLS so 766 * that atexit() application functions can use them. 767 */ 768 lmutex_unlock(&udp->link_lock); 769 exit(0); 770 thr_panic("_thrp_exit(): exit(0) returned"); 771 } 772 lmutex_unlock(&udp->link_lock); 773 774 tsd_exit(); /* deallocate thread-specific data */ 775 tls_exit(); /* deallocate thread-local storage */ 776 heldlock_exit(); /* deal with left-over held locks */ 777 778 /* block all signals to finish exiting */ 779 block_all_signals(self); 780 /* also prevent ourself from being suspended */ 781 enter_critical(self); 782 rwl_free(self); 783 lmutex_lock(&udp->link_lock); 784 ulwp_free(self); 785 (void) ulwp_lock(self, udp); 786 787 if (self->ul_mapsiz && !self->ul_detached) { 788 /* 789 * We want to free the stack for reuse but must keep 790 * the ulwp_t struct for the benefit of thr_join(). 791 * For this purpose we allocate a replacement ulwp_t. 792 */ 793 if ((replace = udp->ulwp_replace_free) == NULL) 794 replace = lmalloc(REPLACEMENT_SIZE); 795 else if ((udp->ulwp_replace_free = replace->ul_next) == NULL) 796 udp->ulwp_replace_last = NULL; 797 } 798 799 if (udp->all_lwps == self) 800 udp->all_lwps = self->ul_forw; 801 if (udp->all_lwps == self) 802 udp->all_lwps = NULL; 803 else { 804 self->ul_forw->ul_back = self->ul_back; 805 self->ul_back->ul_forw = self->ul_forw; 806 } 807 self->ul_forw = self->ul_back = NULL; 808 #if defined(THREAD_DEBUG) 809 /* collect queue lock statistics before marking ourself dead */ 810 record_spin_locks(self); 811 #endif 812 self->ul_dead = 1; 813 self->ul_pleasestop = 0; 814 if (replace != NULL) { 815 int ix = self->ul_ix; /* the hash index */ 816 (void) memcpy(replace, self, REPLACEMENT_SIZE); 817 replace->ul_self = replace; 818 replace->ul_next = NULL; /* clone not on stack list */ 819 replace->ul_mapsiz = 0; /* allows clone to be freed */ 820 replace->ul_replace = 1; /* requires clone to be freed */ 821 hash_out_unlocked(self, ix, udp); 822 hash_in_unlocked(replace, ix, udp); 823 ASSERT(!(self->ul_detached)); 824 self->ul_detached = 1; /* this frees the stack */ 825 self->ul_schedctl = NULL; 826 self->ul_schedctl_called = &udp->uberflags; 827 set_curthread(self = replace); 828 /* 829 * Having just changed the address of curthread, we 830 * must reset the ownership of the locks we hold so 831 * that assertions will not fire when we release them. 832 */ 833 udp->link_lock.mutex_owner = (uintptr_t)self; 834 ulwp_mutex(self, udp)->mutex_owner = (uintptr_t)self; 835 /* 836 * NOTE: 837 * On i386, %gs still references the original, not the 838 * replacement, ulwp structure. Fetching the replacement 839 * curthread pointer via %gs:0 works correctly since the 840 * original ulwp structure will not be reallocated until 841 * this lwp has completed its lwp_exit() system call (see 842 * dead_and_buried()), but from here on out, we must make 843 * no references to %gs:<offset> other than %gs:0. 844 */ 845 } 846 /* 847 * Put non-detached terminated threads in the all_zombies list. 848 */ 849 if (!self->ul_detached) { 850 udp->nzombies++; 851 if (udp->all_zombies == NULL) { 852 ASSERT(udp->nzombies == 1); 853 udp->all_zombies = self->ul_forw = self->ul_back = self; 854 } else { 855 self->ul_forw = udp->all_zombies; 856 self->ul_back = udp->all_zombies->ul_back; 857 self->ul_back->ul_forw = self; 858 self->ul_forw->ul_back = self; 859 } 860 } 861 /* 862 * Notify everyone waiting for this thread. 863 */ 864 ulwp_broadcast(self); 865 (void) ulwp_unlock(self, udp); 866 /* 867 * Prevent any more references to the schedctl data. 868 * We are exiting and continue_fork() may not find us. 869 * Do this just before dropping link_lock, since fork 870 * serializes on link_lock. 871 */ 872 self->ul_schedctl = NULL; 873 self->ul_schedctl_called = &udp->uberflags; 874 lmutex_unlock(&udp->link_lock); 875 876 ASSERT(self->ul_critical == 1); 877 ASSERT(self->ul_preempt == 0); 878 _lwp_terminate(); /* never returns */ 879 thr_panic("_thrp_exit(): _lwp_terminate() returned"); 880 } 881 882 #if defined(THREAD_DEBUG) 883 void 884 collect_queue_statistics() 885 { 886 uberdata_t *udp = curthread->ul_uberdata; 887 ulwp_t *ulwp; 888 889 if (thread_queue_dump) { 890 lmutex_lock(&udp->link_lock); 891 if ((ulwp = udp->all_lwps) != NULL) { 892 do { 893 record_spin_locks(ulwp); 894 } while ((ulwp = ulwp->ul_forw) != udp->all_lwps); 895 } 896 lmutex_unlock(&udp->link_lock); 897 } 898 } 899 #endif 900 901 static void __NORETURN 902 _thrp_exit_common(void *status, int unwind) 903 { 904 ulwp_t *self = curthread; 905 int cancelled = (self->ul_cancel_pending && status == PTHREAD_CANCELED); 906 907 ASSERT(self->ul_critical == 0 && self->ul_preempt == 0); 908 909 /* 910 * Disable cancellation and call the special DCE cancellation 911 * cleanup hook if it is enabled. Do nothing else before calling 912 * the DCE cancellation cleanup hook; it may call longjmp() and 913 * never return here. 914 */ 915 self->ul_cancel_disabled = 1; 916 self->ul_cancel_async = 0; 917 self->ul_save_async = 0; 918 self->ul_cancelable = 0; 919 self->ul_cancel_pending = 0; 920 set_cancel_pending_flag(self, 1); 921 if (cancelled && cleanuphndlr != NULL) 922 (*cleanuphndlr)(); 923 924 /* 925 * Block application signals while we are exiting. 926 * We call out to C++, TSD, and TLS destructors while exiting 927 * and these are application-defined, so we cannot be assured 928 * that they won't reset the signal mask. We use sigoff() to 929 * defer any signals that may be received as a result of this 930 * bad behavior. Such signals will be lost to the process 931 * when the thread finishes exiting. 932 */ 933 (void) thr_sigsetmask(SIG_SETMASK, &maskset, NULL); 934 sigoff(self); 935 936 self->ul_rval = status; 937 938 /* 939 * If thr_exit is being called from the places where 940 * C++ destructors are to be called such as cancellation 941 * points, then set this flag. It is checked in _t_cancel() 942 * to decide whether _ex_unwind() is to be called or not. 943 */ 944 if (unwind) 945 self->ul_unwind = 1; 946 947 /* 948 * _thrp_unwind() will eventually call _thrp_exit(). 949 * It never returns. 950 */ 951 _thrp_unwind(NULL); 952 thr_panic("_thrp_exit_common(): _thrp_unwind() returned"); 953 954 for (;;) /* to shut the compiler up about __NORETURN */ 955 continue; 956 } 957 958 /* 959 * Called when a thread returns from its start function. 960 * We are at the top of the stack; no unwinding is necessary. 961 */ 962 void 963 _thrp_terminate(void *status) 964 { 965 _thrp_exit_common(status, 0); 966 } 967 968 #pragma weak pthread_exit = thr_exit 969 #pragma weak _thr_exit = thr_exit 970 void 971 thr_exit(void *status) 972 { 973 _thrp_exit_common(status, 1); 974 } 975 976 int 977 _thrp_join(thread_t tid, thread_t *departed, void **status, int do_cancel) 978 { 979 uberdata_t *udp = curthread->ul_uberdata; 980 mutex_t *mp; 981 void *rval; 982 thread_t found; 983 ulwp_t *ulwp; 984 ulwp_t **ulwpp; 985 int replace; 986 int error; 987 988 if (do_cancel) 989 error = lwp_wait(tid, &found); 990 else { 991 while ((error = __lwp_wait(tid, &found)) == EINTR) 992 ; 993 } 994 if (error) 995 return (error); 996 997 /* 998 * We must hold link_lock to avoid a race condition with find_stack(). 999 */ 1000 lmutex_lock(&udp->link_lock); 1001 if ((ulwpp = find_lwpp(found)) == NULL) { 1002 /* 1003 * lwp_wait() found an lwp that the library doesn't know 1004 * about. It must have been created with _lwp_create(). 1005 * Just return its lwpid; we can't know its status. 1006 */ 1007 lmutex_unlock(&udp->link_lock); 1008 rval = NULL; 1009 } else { 1010 /* 1011 * Remove ulwp from the hash table. 1012 */ 1013 ulwp = *ulwpp; 1014 *ulwpp = ulwp->ul_hash; 1015 ulwp->ul_hash = NULL; 1016 /* 1017 * Remove ulwp from all_zombies list. 1018 */ 1019 ASSERT(udp->nzombies >= 1); 1020 if (udp->all_zombies == ulwp) 1021 udp->all_zombies = ulwp->ul_forw; 1022 if (udp->all_zombies == ulwp) 1023 udp->all_zombies = NULL; 1024 else { 1025 ulwp->ul_forw->ul_back = ulwp->ul_back; 1026 ulwp->ul_back->ul_forw = ulwp->ul_forw; 1027 } 1028 ulwp->ul_forw = ulwp->ul_back = NULL; 1029 udp->nzombies--; 1030 ASSERT(ulwp->ul_dead && !ulwp->ul_detached && 1031 !(ulwp->ul_usropts & (THR_DETACHED|THR_DAEMON))); 1032 /* 1033 * We can't call ulwp_unlock(ulwp) after we set 1034 * ulwp->ul_ix = -1 so we have to get a pointer to the 1035 * ulwp's hash table mutex now in order to unlock it below. 1036 */ 1037 mp = ulwp_mutex(ulwp, udp); 1038 ulwp->ul_lwpid = (lwpid_t)(-1); 1039 ulwp->ul_ix = -1; 1040 rval = ulwp->ul_rval; 1041 replace = ulwp->ul_replace; 1042 lmutex_unlock(mp); 1043 if (replace) { 1044 ulwp->ul_next = NULL; 1045 if (udp->ulwp_replace_free == NULL) 1046 udp->ulwp_replace_free = 1047 udp->ulwp_replace_last = ulwp; 1048 else { 1049 udp->ulwp_replace_last->ul_next = ulwp; 1050 udp->ulwp_replace_last = ulwp; 1051 } 1052 } 1053 lmutex_unlock(&udp->link_lock); 1054 } 1055 1056 if (departed != NULL) 1057 *departed = found; 1058 if (status != NULL) 1059 *status = rval; 1060 return (0); 1061 } 1062 1063 int 1064 thr_join(thread_t tid, thread_t *departed, void **status) 1065 { 1066 int error = _thrp_join(tid, departed, status, 1); 1067 return ((error == EINVAL)? ESRCH : error); 1068 } 1069 1070 /* 1071 * pthread_join() differs from Solaris thr_join(): 1072 * It does not return the departed thread's id 1073 * and hence does not have a "departed" argument. 1074 * It returns EINVAL if tid refers to a detached thread. 1075 */ 1076 #pragma weak _pthread_join = pthread_join 1077 int 1078 pthread_join(pthread_t tid, void **status) 1079 { 1080 return ((tid == 0)? ESRCH : _thrp_join(tid, NULL, status, 1)); 1081 } 1082 1083 int 1084 pthread_detach(pthread_t tid) 1085 { 1086 uberdata_t *udp = curthread->ul_uberdata; 1087 ulwp_t *ulwp; 1088 ulwp_t **ulwpp; 1089 int error = 0; 1090 1091 if ((ulwpp = find_lwpp(tid)) == NULL) 1092 return (ESRCH); 1093 ulwp = *ulwpp; 1094 1095 if (ulwp->ul_dead) { 1096 ulwp_unlock(ulwp, udp); 1097 error = _thrp_join(tid, NULL, NULL, 0); 1098 } else { 1099 error = __lwp_detach(tid); 1100 ulwp->ul_detached = 1; 1101 ulwp->ul_usropts |= THR_DETACHED; 1102 ulwp_unlock(ulwp, udp); 1103 } 1104 return (error); 1105 } 1106 1107 static const char * 1108 ematch(const char *ev, const char *match) 1109 { 1110 int c; 1111 1112 while ((c = *match++) != '\0') { 1113 if (*ev++ != c) 1114 return (NULL); 1115 } 1116 if (*ev++ != '=') 1117 return (NULL); 1118 return (ev); 1119 } 1120 1121 static int 1122 envvar(const char *ev, const char *match, int limit) 1123 { 1124 int val = -1; 1125 const char *ename; 1126 1127 if ((ename = ematch(ev, match)) != NULL) { 1128 int c; 1129 for (val = 0; (c = *ename) != '\0'; ename++) { 1130 if (!isdigit(c)) { 1131 val = -1; 1132 break; 1133 } 1134 val = val * 10 + (c - '0'); 1135 if (val > limit) { 1136 val = limit; 1137 break; 1138 } 1139 } 1140 } 1141 return (val); 1142 } 1143 1144 static void 1145 etest(const char *ev) 1146 { 1147 int value; 1148 1149 if ((value = envvar(ev, "QUEUE_SPIN", 1000000)) >= 0) 1150 thread_queue_spin = value; 1151 if ((value = envvar(ev, "ADAPTIVE_SPIN", 1000000)) >= 0) 1152 thread_adaptive_spin = value; 1153 if ((value = envvar(ev, "MAX_SPINNERS", 255)) >= 0) 1154 thread_max_spinners = value; 1155 if ((value = envvar(ev, "QUEUE_FIFO", 8)) >= 0) 1156 thread_queue_fifo = value; 1157 #if defined(THREAD_DEBUG) 1158 if ((value = envvar(ev, "QUEUE_VERIFY", 1)) >= 0) 1159 thread_queue_verify = value; 1160 if ((value = envvar(ev, "QUEUE_DUMP", 1)) >= 0) 1161 thread_queue_dump = value; 1162 #endif 1163 if ((value = envvar(ev, "STACK_CACHE", 10000)) >= 0) 1164 thread_stack_cache = value; 1165 if ((value = envvar(ev, "COND_WAIT_DEFER", 1)) >= 0) 1166 thread_cond_wait_defer = value; 1167 if ((value = envvar(ev, "ERROR_DETECTION", 2)) >= 0) 1168 thread_error_detection = value; 1169 if ((value = envvar(ev, "ASYNC_SAFE", 1)) >= 0) 1170 thread_async_safe = value; 1171 if ((value = envvar(ev, "DOOR_NORESERVE", 1)) >= 0) 1172 thread_door_noreserve = value; 1173 if ((value = envvar(ev, "LOCKS_MISALIGNED", 1)) >= 0) 1174 thread_locks_misaligned = value; 1175 } 1176 1177 /* 1178 * Look for and evaluate environment variables of the form "_THREAD_*". 1179 * For compatibility with the past, we also look for environment 1180 * names of the form "LIBTHREAD_*". 1181 */ 1182 static void 1183 set_thread_vars() 1184 { 1185 extern const char **_environ; 1186 const char **pev; 1187 const char *ev; 1188 char c; 1189 1190 if ((pev = _environ) == NULL) 1191 return; 1192 while ((ev = *pev++) != NULL) { 1193 c = *ev; 1194 if (c == '_' && strncmp(ev, "_THREAD_", 8) == 0) 1195 etest(ev + 8); 1196 if (c == 'L' && strncmp(ev, "LIBTHREAD_", 10) == 0) 1197 etest(ev + 10); 1198 } 1199 } 1200 1201 /* PROBE_SUPPORT begin */ 1202 #pragma weak __tnf_probe_notify 1203 extern void __tnf_probe_notify(void); 1204 /* PROBE_SUPPORT end */ 1205 1206 /* same as atexit() but private to the library */ 1207 extern int _atexit(void (*)(void)); 1208 1209 /* same as _cleanup() but private to the library */ 1210 extern void __cleanup(void); 1211 1212 extern void atfork_init(void); 1213 1214 #ifdef __amd64 1215 extern void __proc64id(void); 1216 #endif 1217 1218 /* 1219 * libc_init() is called by ld.so.1 for library initialization. 1220 * We perform minimal initialization; enough to work with the main thread. 1221 */ 1222 void 1223 libc_init(void) 1224 { 1225 uberdata_t *udp = &__uberdata; 1226 ulwp_t *oldself = __curthread(); 1227 ucontext_t uc; 1228 ulwp_t *self; 1229 struct rlimit rl; 1230 caddr_t data; 1231 size_t tls_size; 1232 int setmask; 1233 1234 /* 1235 * For the initial stage of initialization, we must be careful 1236 * not to call any function that could possibly call _cerror(). 1237 * For this purpose, we call only the raw system call wrappers. 1238 */ 1239 1240 #ifdef __amd64 1241 /* 1242 * Gather information about cache layouts for optimized 1243 * AMD and Intel assembler strfoo() and memfoo() functions. 1244 */ 1245 __proc64id(); 1246 #endif 1247 1248 /* 1249 * Every libc, regardless of which link map, must register __cleanup(). 1250 */ 1251 (void) _atexit(__cleanup); 1252 1253 /* 1254 * We keep our uberdata on one of (a) the first alternate link map 1255 * or (b) the primary link map. We switch to the primary link map 1256 * and stay there once we see it. All intermediate link maps are 1257 * subject to being unloaded at any time. 1258 */ 1259 if (oldself != NULL && (oldself->ul_primarymap || !primary_link_map)) { 1260 __tdb_bootstrap = oldself->ul_uberdata->tdb_bootstrap; 1261 mutex_setup(); 1262 atfork_init(); /* every link map needs atfork() processing */ 1263 return; 1264 } 1265 1266 /* 1267 * To establish the main stack information, we have to get our context. 1268 * This is also convenient to use for getting our signal mask. 1269 */ 1270 uc.uc_flags = UC_ALL; 1271 (void) __getcontext(&uc); 1272 ASSERT(uc.uc_link == NULL); 1273 1274 tls_size = roundup64(udp->tls_metadata.static_tls.tls_size); 1275 ASSERT(primary_link_map || tls_size == 0); 1276 data = lmalloc(sizeof (ulwp_t) + tls_size); 1277 if (data == NULL) 1278 thr_panic("cannot allocate thread structure for main thread"); 1279 /* LINTED pointer cast may result in improper alignment */ 1280 self = (ulwp_t *)(data + tls_size); 1281 init_hash_table[0].hash_bucket = self; 1282 1283 self->ul_sigmask = uc.uc_sigmask; 1284 delete_reserved_signals(&self->ul_sigmask); 1285 /* 1286 * Are the old and new sets different? 1287 * (This can happen if we are currently blocking SIGCANCEL.) 1288 * If so, we must explicitly set our signal mask, below. 1289 */ 1290 setmask = 1291 ((self->ul_sigmask.__sigbits[0] ^ uc.uc_sigmask.__sigbits[0]) | 1292 (self->ul_sigmask.__sigbits[1] ^ uc.uc_sigmask.__sigbits[1])); 1293 1294 #ifdef __sparc 1295 /* 1296 * We cache several instructions in the thread structure for use 1297 * by the fasttrap DTrace provider. When changing this, read the 1298 * comment in fasttrap.h for the all the other places that must 1299 * be changed. 1300 */ 1301 self->ul_dsave = 0x9de04000; /* save %g1, %g0, %sp */ 1302 self->ul_drestore = 0x81e80000; /* restore %g0, %g0, %g0 */ 1303 self->ul_dftret = 0x91d0203a; /* ta 0x3a */ 1304 self->ul_dreturn = 0x81ca0000; /* return %o0 */ 1305 #endif 1306 1307 self->ul_stktop = (uintptr_t)uc.uc_stack.ss_sp + uc.uc_stack.ss_size; 1308 (void) getrlimit(RLIMIT_STACK, &rl); 1309 self->ul_stksiz = rl.rlim_cur; 1310 self->ul_stk = (caddr_t)(self->ul_stktop - self->ul_stksiz); 1311 1312 self->ul_forw = self->ul_back = self; 1313 self->ul_hash = NULL; 1314 self->ul_ix = 0; 1315 self->ul_lwpid = 1; /* _lwp_self() */ 1316 self->ul_main = 1; 1317 self->ul_self = self; 1318 self->ul_policy = -1; /* initialize only when needed */ 1319 self->ul_pri = 0; 1320 self->ul_cid = 0; 1321 self->ul_rtclassid = -1; 1322 self->ul_uberdata = udp; 1323 if (oldself != NULL) { 1324 int i; 1325 1326 ASSERT(primary_link_map); 1327 ASSERT(oldself->ul_main == 1); 1328 self->ul_stsd = oldself->ul_stsd; 1329 for (i = 0; i < TSD_NFAST; i++) 1330 self->ul_ftsd[i] = oldself->ul_ftsd[i]; 1331 self->ul_tls = oldself->ul_tls; 1332 /* 1333 * Retrieve all pointers to uberdata allocated 1334 * while running on previous link maps. 1335 * We would like to do a structure assignment here, but 1336 * gcc turns structure assignments into calls to memcpy(), 1337 * a function exported from libc. We can't call any such 1338 * external functions until we establish curthread, below, 1339 * so we just call our private version of memcpy(). 1340 */ 1341 (void) memcpy(udp, oldself->ul_uberdata, sizeof (*udp)); 1342 /* 1343 * These items point to global data on the primary link map. 1344 */ 1345 udp->thr_hash_table = init_hash_table; 1346 udp->sigacthandler = sigacthandler; 1347 udp->tdb.tdb_events = tdb_events; 1348 ASSERT(udp->nthreads == 1 && !udp->uberflags.uf_mt); 1349 ASSERT(udp->lwp_stacks == NULL); 1350 ASSERT(udp->ulwp_freelist == NULL); 1351 ASSERT(udp->ulwp_replace_free == NULL); 1352 ASSERT(udp->hash_size == 1); 1353 } 1354 udp->all_lwps = self; 1355 udp->ulwp_one = self; 1356 udp->pid = getpid(); 1357 udp->nthreads = 1; 1358 /* 1359 * In every link map, tdb_bootstrap points to the same piece of 1360 * allocated memory. When the primary link map is initialized, 1361 * the allocated memory is assigned a pointer to the one true 1362 * uberdata. This allows libc_db to initialize itself regardless 1363 * of which instance of libc it finds in the address space. 1364 */ 1365 if (udp->tdb_bootstrap == NULL) 1366 udp->tdb_bootstrap = lmalloc(sizeof (uberdata_t *)); 1367 __tdb_bootstrap = udp->tdb_bootstrap; 1368 if (primary_link_map) { 1369 self->ul_primarymap = 1; 1370 udp->primary_map = 1; 1371 *udp->tdb_bootstrap = udp; 1372 } 1373 /* 1374 * Cancellation can't happen until: 1375 * pthread_cancel() is called 1376 * or: 1377 * another thread is created 1378 * For now, as a single-threaded process, set the flag that tells 1379 * PROLOGUE/EPILOGUE (in scalls.c) that cancellation can't happen. 1380 */ 1381 self->ul_nocancel = 1; 1382 1383 #if defined(__amd64) 1384 (void) ___lwp_private(_LWP_SETPRIVATE, _LWP_FSBASE, self); 1385 #elif defined(__i386) 1386 (void) ___lwp_private(_LWP_SETPRIVATE, _LWP_GSBASE, self); 1387 #endif /* __i386 || __amd64 */ 1388 set_curthread(self); /* redundant on i386 */ 1389 /* 1390 * Now curthread is established and it is safe to call any 1391 * function in libc except one that uses thread-local storage. 1392 */ 1393 self->ul_errnop = &errno; 1394 if (oldself != NULL) { 1395 /* tls_size was zero when oldself was allocated */ 1396 lfree(oldself, sizeof (ulwp_t)); 1397 } 1398 mutex_setup(); 1399 atfork_init(); 1400 signal_init(); 1401 1402 /* 1403 * If the stack is unlimited, we set the size to zero to disable 1404 * stack checking. 1405 * XXX: Work harder here. Get the stack size from /proc/self/rmap 1406 */ 1407 if (self->ul_stksiz == RLIM_INFINITY) { 1408 self->ul_ustack.ss_sp = (void *)self->ul_stktop; 1409 self->ul_ustack.ss_size = 0; 1410 } else { 1411 self->ul_ustack.ss_sp = self->ul_stk; 1412 self->ul_ustack.ss_size = self->ul_stksiz; 1413 } 1414 self->ul_ustack.ss_flags = 0; 1415 (void) setustack(&self->ul_ustack); 1416 1417 /* 1418 * Get the variables that affect thread behavior from the environment. 1419 */ 1420 set_thread_vars(); 1421 udp->uberflags.uf_thread_error_detection = (char)thread_error_detection; 1422 udp->thread_stack_cache = thread_stack_cache; 1423 1424 /* 1425 * Make per-thread copies of global variables, for speed. 1426 */ 1427 self->ul_queue_fifo = (char)thread_queue_fifo; 1428 self->ul_cond_wait_defer = (char)thread_cond_wait_defer; 1429 self->ul_error_detection = (char)thread_error_detection; 1430 self->ul_async_safe = (char)thread_async_safe; 1431 self->ul_door_noreserve = (char)thread_door_noreserve; 1432 self->ul_misaligned = (char)thread_locks_misaligned; 1433 self->ul_max_spinners = (uint8_t)thread_max_spinners; 1434 self->ul_adaptive_spin = thread_adaptive_spin; 1435 self->ul_queue_spin = thread_queue_spin; 1436 1437 /* 1438 * When we have initialized the primary link map, inform 1439 * the dynamic linker about our interface functions. 1440 */ 1441 if (self->ul_primarymap) 1442 _ld_libc((void *)rtld_funcs); 1443 1444 /* 1445 * Defer signals until TLS constructors have been called. 1446 */ 1447 sigoff(self); 1448 tls_setup(); 1449 sigon(self); 1450 if (setmask) 1451 (void) restore_signals(self); 1452 1453 /* 1454 * Make private copies of __xpg4 and __xpg6 so libc can test 1455 * them after this point without invoking the dynamic linker. 1456 */ 1457 libc__xpg4 = __xpg4; 1458 libc__xpg6 = __xpg6; 1459 1460 /* PROBE_SUPPORT begin */ 1461 if (self->ul_primarymap && __tnf_probe_notify != NULL) 1462 __tnf_probe_notify(); 1463 /* PROBE_SUPPORT end */ 1464 1465 init_sigev_thread(); 1466 init_aio(); 1467 1468 /* 1469 * We need to reset __threaded dynamically at runtime, so that 1470 * __threaded can be bound to __threaded outside libc which may not 1471 * have initial value of 1 (without a copy relocation in a.out). 1472 */ 1473 __threaded = 1; 1474 } 1475 1476 #pragma fini(libc_fini) 1477 void 1478 libc_fini() 1479 { 1480 /* 1481 * If we are doing fini processing for the instance of libc 1482 * on the first alternate link map (this happens only when 1483 * the dynamic linker rejects a bad audit library), then clear 1484 * __curthread(). We abandon whatever memory was allocated by 1485 * lmalloc() while running on this alternate link-map but we 1486 * don't care (and can't find the memory in any case); we just 1487 * want to protect the application from this bad audit library. 1488 * No fini processing is done by libc in the normal case. 1489 */ 1490 1491 uberdata_t *udp = curthread->ul_uberdata; 1492 1493 if (udp->primary_map == 0 && udp == &__uberdata) 1494 set_curthread(NULL); 1495 } 1496 1497 /* 1498 * finish_init is called when we are about to become multi-threaded, 1499 * that is, on the first call to thr_create(). 1500 */ 1501 void 1502 finish_init() 1503 { 1504 ulwp_t *self = curthread; 1505 uberdata_t *udp = self->ul_uberdata; 1506 thr_hash_table_t *htp; 1507 void *data; 1508 int i; 1509 1510 /* 1511 * No locks needed here; we are single-threaded on the first call. 1512 * We can be called only after the primary link map has been set up. 1513 */ 1514 ASSERT(self->ul_primarymap); 1515 ASSERT(self == udp->ulwp_one); 1516 ASSERT(!udp->uberflags.uf_mt); 1517 ASSERT(udp->hash_size == 1); 1518 1519 /* 1520 * Initialize self->ul_policy, self->ul_cid, and self->ul_pri. 1521 */ 1522 update_sched(self); 1523 1524 /* 1525 * Allocate the queue_head array if not already allocated. 1526 */ 1527 if (udp->queue_head == NULL) 1528 queue_alloc(); 1529 1530 /* 1531 * Now allocate the thread hash table. 1532 */ 1533 if ((data = mmap(NULL, HASHTBLSZ * sizeof (thr_hash_table_t), 1534 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0)) 1535 == MAP_FAILED) 1536 thr_panic("cannot allocate thread hash table"); 1537 1538 udp->thr_hash_table = htp = (thr_hash_table_t *)data; 1539 udp->hash_size = HASHTBLSZ; 1540 udp->hash_mask = HASHTBLSZ - 1; 1541 1542 for (i = 0; i < HASHTBLSZ; i++, htp++) { 1543 htp->hash_lock.mutex_flag = LOCK_INITED; 1544 htp->hash_lock.mutex_magic = MUTEX_MAGIC; 1545 htp->hash_cond.cond_magic = COND_MAGIC; 1546 } 1547 hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp); 1548 1549 /* 1550 * Set up the SIGCANCEL handler for threads cancellation. 1551 */ 1552 setup_cancelsig(SIGCANCEL); 1553 1554 /* 1555 * Arrange to do special things on exit -- 1556 * - collect queue statistics from all remaining active threads. 1557 * - dump queue statistics to stderr if _THREAD_QUEUE_DUMP is set. 1558 * - grab assert_lock to ensure that assertion failures 1559 * and a core dump take precedence over _exit(). 1560 * (Functions are called in the reverse order of their registration.) 1561 */ 1562 (void) _atexit(grab_assert_lock); 1563 #if defined(THREAD_DEBUG) 1564 (void) _atexit(dump_queue_statistics); 1565 (void) _atexit(collect_queue_statistics); 1566 #endif 1567 } 1568 1569 /* 1570 * Used only by postfork1_child(), below. 1571 */ 1572 static void 1573 mark_dead_and_buried(ulwp_t *ulwp) 1574 { 1575 ulwp->ul_dead = 1; 1576 ulwp->ul_lwpid = (lwpid_t)(-1); 1577 ulwp->ul_hash = NULL; 1578 ulwp->ul_ix = -1; 1579 ulwp->ul_schedctl = NULL; 1580 ulwp->ul_schedctl_called = NULL; 1581 } 1582 1583 /* 1584 * This is called from fork1() in the child. 1585 * Reset our data structures to reflect one lwp. 1586 */ 1587 void 1588 postfork1_child() 1589 { 1590 ulwp_t *self = curthread; 1591 uberdata_t *udp = self->ul_uberdata; 1592 queue_head_t *qp; 1593 ulwp_t *next; 1594 ulwp_t *ulwp; 1595 int i; 1596 1597 /* daemon threads shouldn't call fork1(), but oh well... */ 1598 self->ul_usropts &= ~THR_DAEMON; 1599 udp->nthreads = 1; 1600 udp->ndaemons = 0; 1601 udp->uberflags.uf_mt = 0; 1602 __libc_threaded = 0; 1603 for (i = 0; i < udp->hash_size; i++) 1604 udp->thr_hash_table[i].hash_bucket = NULL; 1605 self->ul_lwpid = _lwp_self(); 1606 hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp); 1607 1608 /* 1609 * Some thread in the parent might have been suspended 1610 * while holding udp->callout_lock or udp->ld_lock. 1611 * Reinitialize the child's copies. 1612 */ 1613 (void) mutex_init(&udp->callout_lock, 1614 USYNC_THREAD | LOCK_RECURSIVE, NULL); 1615 (void) mutex_init(&udp->ld_lock, 1616 USYNC_THREAD | LOCK_RECURSIVE, NULL); 1617 1618 /* no one in the child is on a sleep queue; reinitialize */ 1619 if ((qp = udp->queue_head) != NULL) { 1620 (void) memset(qp, 0, 2 * QHASHSIZE * sizeof (queue_head_t)); 1621 for (i = 0; i < 2 * QHASHSIZE; qp++, i++) { 1622 qp->qh_type = (i < QHASHSIZE)? MX : CV; 1623 qp->qh_lock.mutex_flag = LOCK_INITED; 1624 qp->qh_lock.mutex_magic = MUTEX_MAGIC; 1625 qp->qh_hlist = &qp->qh_def_root; 1626 #if defined(THREAD_DEBUG) 1627 qp->qh_hlen = 1; 1628 qp->qh_hmax = 1; 1629 #endif 1630 } 1631 } 1632 1633 /* 1634 * All lwps except ourself are gone. Mark them so. 1635 * First mark all of the lwps that have already been freed. 1636 * Then mark and free all of the active lwps except ourself. 1637 * Since we are single-threaded, no locks are required here. 1638 */ 1639 for (ulwp = udp->lwp_stacks; ulwp != NULL; ulwp = ulwp->ul_next) 1640 mark_dead_and_buried(ulwp); 1641 for (ulwp = udp->ulwp_freelist; ulwp != NULL; ulwp = ulwp->ul_next) 1642 mark_dead_and_buried(ulwp); 1643 for (ulwp = self->ul_forw; ulwp != self; ulwp = next) { 1644 next = ulwp->ul_forw; 1645 ulwp->ul_forw = ulwp->ul_back = NULL; 1646 mark_dead_and_buried(ulwp); 1647 tsd_free(ulwp); 1648 tls_free(ulwp); 1649 rwl_free(ulwp); 1650 heldlock_free(ulwp); 1651 ulwp_free(ulwp); 1652 } 1653 self->ul_forw = self->ul_back = udp->all_lwps = self; 1654 if (self != udp->ulwp_one) 1655 mark_dead_and_buried(udp->ulwp_one); 1656 if ((ulwp = udp->all_zombies) != NULL) { 1657 ASSERT(udp->nzombies != 0); 1658 do { 1659 next = ulwp->ul_forw; 1660 ulwp->ul_forw = ulwp->ul_back = NULL; 1661 mark_dead_and_buried(ulwp); 1662 udp->nzombies--; 1663 if (ulwp->ul_replace) { 1664 ulwp->ul_next = NULL; 1665 if (udp->ulwp_replace_free == NULL) { 1666 udp->ulwp_replace_free = 1667 udp->ulwp_replace_last = ulwp; 1668 } else { 1669 udp->ulwp_replace_last->ul_next = ulwp; 1670 udp->ulwp_replace_last = ulwp; 1671 } 1672 } 1673 } while ((ulwp = next) != udp->all_zombies); 1674 ASSERT(udp->nzombies == 0); 1675 udp->all_zombies = NULL; 1676 udp->nzombies = 0; 1677 } 1678 trim_stack_cache(0); 1679 1680 /* 1681 * Do post-fork1 processing for subsystems that need it. 1682 */ 1683 postfork1_child_tpool(); 1684 postfork1_child_sigev_aio(); 1685 postfork1_child_sigev_mq(); 1686 postfork1_child_sigev_timer(); 1687 postfork1_child_aio(); 1688 } 1689 1690 lwpid_t 1691 lwp_self(void) 1692 { 1693 return (curthread->ul_lwpid); 1694 } 1695 1696 #pragma weak _ti_thr_self = thr_self 1697 #pragma weak pthread_self = thr_self 1698 thread_t 1699 thr_self() 1700 { 1701 return (curthread->ul_lwpid); 1702 } 1703 1704 int 1705 thr_main() 1706 { 1707 ulwp_t *self = __curthread(); 1708 1709 return ((self == NULL)? -1 : self->ul_main); 1710 } 1711 1712 int 1713 _thrp_cancelled(void) 1714 { 1715 return (curthread->ul_rval == PTHREAD_CANCELED); 1716 } 1717 1718 int 1719 _thrp_stksegment(ulwp_t *ulwp, stack_t *stk) 1720 { 1721 stk->ss_sp = (void *)ulwp->ul_stktop; 1722 stk->ss_size = ulwp->ul_stksiz; 1723 stk->ss_flags = 0; 1724 return (0); 1725 } 1726 1727 #pragma weak _thr_stksegment = thr_stksegment 1728 int 1729 thr_stksegment(stack_t *stk) 1730 { 1731 return (_thrp_stksegment(curthread, stk)); 1732 } 1733 1734 void 1735 force_continue(ulwp_t *ulwp) 1736 { 1737 #if defined(THREAD_DEBUG) 1738 ulwp_t *self = curthread; 1739 uberdata_t *udp = self->ul_uberdata; 1740 #endif 1741 int error; 1742 timespec_t ts; 1743 1744 ASSERT(MUTEX_OWNED(&udp->fork_lock, self)); 1745 ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self)); 1746 1747 for (;;) { 1748 error = _lwp_continue(ulwp->ul_lwpid); 1749 if (error != 0 && error != EINTR) 1750 break; 1751 error = 0; 1752 if (ulwp->ul_stopping) { /* he is stopping himself */ 1753 ts.tv_sec = 0; /* give him a chance to run */ 1754 ts.tv_nsec = 100000; /* 100 usecs or clock tick */ 1755 (void) __nanosleep(&ts, NULL); 1756 } 1757 if (!ulwp->ul_stopping) /* he is running now */ 1758 break; /* so we are done */ 1759 /* 1760 * He is marked as being in the process of stopping 1761 * himself. Loop around and continue him again. 1762 * He may not have been stopped the first time. 1763 */ 1764 } 1765 } 1766 1767 /* 1768 * Suspend an lwp with lwp_suspend(), then move it to a safe 1769 * point, that is, to a point where ul_critical is zero. 1770 * On return, the ulwp_lock() is dropped as with ulwp_unlock(). 1771 * If 'link_dropped' is non-NULL, then 'link_lock' is held on entry. 1772 * If we have to drop link_lock, we store 1 through link_dropped. 1773 * If the lwp exits before it can be suspended, we return ESRCH. 1774 */ 1775 int 1776 safe_suspend(ulwp_t *ulwp, uchar_t whystopped, int *link_dropped) 1777 { 1778 ulwp_t *self = curthread; 1779 uberdata_t *udp = self->ul_uberdata; 1780 cond_t *cvp = ulwp_condvar(ulwp, udp); 1781 mutex_t *mp = ulwp_mutex(ulwp, udp); 1782 thread_t tid = ulwp->ul_lwpid; 1783 int ix = ulwp->ul_ix; 1784 int error = 0; 1785 1786 ASSERT(whystopped == TSTP_REGULAR || 1787 whystopped == TSTP_MUTATOR || 1788 whystopped == TSTP_FORK); 1789 ASSERT(ulwp != self); 1790 ASSERT(!ulwp->ul_stop); 1791 ASSERT(MUTEX_OWNED(&udp->fork_lock, self)); 1792 ASSERT(MUTEX_OWNED(mp, self)); 1793 1794 if (link_dropped != NULL) 1795 *link_dropped = 0; 1796 1797 /* 1798 * We must grab the target's spin lock before suspending it. 1799 * See the comments below and in _thrp_suspend() for why. 1800 */ 1801 spin_lock_set(&ulwp->ul_spinlock); 1802 (void) ___lwp_suspend(tid); 1803 spin_lock_clear(&ulwp->ul_spinlock); 1804 1805 top: 1806 if (ulwp->ul_critical == 0 || ulwp->ul_stopping) { 1807 /* thread is already safe */ 1808 ulwp->ul_stop |= whystopped; 1809 } else { 1810 /* 1811 * Setting ul_pleasestop causes the target thread to stop 1812 * itself in _thrp_suspend(), below, after we drop its lock. 1813 * We must continue the critical thread before dropping 1814 * link_lock because the critical thread may be holding 1815 * the queue lock for link_lock. This is delicate. 1816 */ 1817 ulwp->ul_pleasestop |= whystopped; 1818 force_continue(ulwp); 1819 if (link_dropped != NULL) { 1820 *link_dropped = 1; 1821 lmutex_unlock(&udp->link_lock); 1822 /* be sure to drop link_lock only once */ 1823 link_dropped = NULL; 1824 } 1825 1826 /* 1827 * The thread may disappear by calling thr_exit() so we 1828 * cannot rely on the ulwp pointer after dropping the lock. 1829 * Instead, we search the hash table to find it again. 1830 * When we return, we may find that the thread has been 1831 * continued by some other thread. The suspend/continue 1832 * interfaces are prone to such race conditions by design. 1833 */ 1834 while (ulwp && !ulwp->ul_dead && !ulwp->ul_stop && 1835 (ulwp->ul_pleasestop & whystopped)) { 1836 (void) __cond_wait(cvp, mp); 1837 for (ulwp = udp->thr_hash_table[ix].hash_bucket; 1838 ulwp != NULL; ulwp = ulwp->ul_hash) { 1839 if (ulwp->ul_lwpid == tid) 1840 break; 1841 } 1842 } 1843 1844 if (ulwp == NULL || ulwp->ul_dead) 1845 error = ESRCH; 1846 else { 1847 /* 1848 * Do another lwp_suspend() to make sure we don't 1849 * return until the target thread is fully stopped 1850 * in the kernel. Don't apply lwp_suspend() until 1851 * we know that the target is not holding any 1852 * queue locks, that is, that it has completed 1853 * ulwp_unlock(self) and has, or at least is 1854 * about to, call lwp_suspend() on itself. We do 1855 * this by grabbing the target's spin lock. 1856 */ 1857 ASSERT(ulwp->ul_lwpid == tid); 1858 spin_lock_set(&ulwp->ul_spinlock); 1859 (void) ___lwp_suspend(tid); 1860 spin_lock_clear(&ulwp->ul_spinlock); 1861 /* 1862 * If some other thread did a thr_continue() 1863 * on the target thread we have to start over. 1864 */ 1865 if (!ulwp->ul_stopping || !(ulwp->ul_stop & whystopped)) 1866 goto top; 1867 } 1868 } 1869 1870 (void) cond_broadcast(cvp); 1871 lmutex_unlock(mp); 1872 return (error); 1873 } 1874 1875 int 1876 _thrp_suspend(thread_t tid, uchar_t whystopped) 1877 { 1878 ulwp_t *self = curthread; 1879 uberdata_t *udp = self->ul_uberdata; 1880 ulwp_t *ulwp; 1881 int error = 0; 1882 1883 ASSERT((whystopped & (TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) != 0); 1884 ASSERT((whystopped & ~(TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) == 0); 1885 1886 /* 1887 * We can't suspend anyone except ourself while 1888 * some other thread is performing a fork. 1889 * This also allows only one suspension at a time. 1890 */ 1891 if (tid != self->ul_lwpid) 1892 fork_lock_enter(); 1893 1894 if ((ulwp = find_lwp(tid)) == NULL) 1895 error = ESRCH; 1896 else if (whystopped == TSTP_MUTATOR && !ulwp->ul_mutator) { 1897 ulwp_unlock(ulwp, udp); 1898 error = EINVAL; 1899 } else if (ulwp->ul_stop) { /* already stopped */ 1900 ulwp->ul_stop |= whystopped; 1901 ulwp_broadcast(ulwp); 1902 ulwp_unlock(ulwp, udp); 1903 } else if (ulwp != self) { 1904 /* 1905 * After suspending the other thread, move it out of a 1906 * critical section and deal with the schedctl mappings. 1907 * safe_suspend() suspends the other thread, calls 1908 * ulwp_broadcast(ulwp) and drops the ulwp lock. 1909 */ 1910 error = safe_suspend(ulwp, whystopped, NULL); 1911 } else { 1912 int schedctl_after_fork = 0; 1913 1914 /* 1915 * We are suspending ourself. We must not take a signal 1916 * until we return from lwp_suspend() and clear ul_stopping. 1917 * This is to guard against siglongjmp(). 1918 */ 1919 enter_critical(self); 1920 self->ul_sp = stkptr(); 1921 _flush_windows(); /* sparc */ 1922 self->ul_pleasestop = 0; 1923 self->ul_stop |= whystopped; 1924 /* 1925 * Grab our spin lock before dropping ulwp_mutex(self). 1926 * This prevents the suspending thread from applying 1927 * lwp_suspend() to us before we emerge from 1928 * lmutex_unlock(mp) and have dropped mp's queue lock. 1929 */ 1930 spin_lock_set(&self->ul_spinlock); 1931 self->ul_stopping = 1; 1932 ulwp_broadcast(self); 1933 ulwp_unlock(self, udp); 1934 /* 1935 * From this point until we return from lwp_suspend(), 1936 * we must not call any function that might invoke the 1937 * dynamic linker, that is, we can only call functions 1938 * private to the library. 1939 * 1940 * Also, this is a nasty race condition for a process 1941 * that is undergoing a forkall() operation: 1942 * Once we clear our spinlock (below), we are vulnerable 1943 * to being suspended by the forkall() thread before 1944 * we manage to suspend ourself in ___lwp_suspend(). 1945 * See safe_suspend() and force_continue(). 1946 * 1947 * To avoid a SIGSEGV due to the disappearance 1948 * of the schedctl mappings in the child process, 1949 * which can happen in spin_lock_clear() if we 1950 * are suspended while we are in the middle of 1951 * its call to preempt(), we preemptively clear 1952 * our own schedctl pointer before dropping our 1953 * spinlock. We reinstate it, in both the parent 1954 * and (if this really is a forkall()) the child. 1955 */ 1956 if (whystopped & TSTP_FORK) { 1957 schedctl_after_fork = 1; 1958 self->ul_schedctl = NULL; 1959 self->ul_schedctl_called = &udp->uberflags; 1960 } 1961 spin_lock_clear(&self->ul_spinlock); 1962 (void) ___lwp_suspend(tid); 1963 /* 1964 * Somebody else continued us. 1965 * We can't grab ulwp_lock(self) 1966 * until after clearing ul_stopping. 1967 * force_continue() relies on this. 1968 */ 1969 self->ul_stopping = 0; 1970 self->ul_sp = 0; 1971 if (schedctl_after_fork) { 1972 self->ul_schedctl_called = NULL; 1973 self->ul_schedctl = NULL; 1974 (void) setup_schedctl(); 1975 } 1976 ulwp_lock(self, udp); 1977 ulwp_broadcast(self); 1978 ulwp_unlock(self, udp); 1979 exit_critical(self); 1980 } 1981 1982 if (tid != self->ul_lwpid) 1983 fork_lock_exit(); 1984 1985 return (error); 1986 } 1987 1988 /* 1989 * Suspend all lwps other than ourself in preparation for fork. 1990 */ 1991 void 1992 suspend_fork() 1993 { 1994 ulwp_t *self = curthread; 1995 uberdata_t *udp = self->ul_uberdata; 1996 ulwp_t *ulwp; 1997 int link_dropped; 1998 1999 ASSERT(MUTEX_OWNED(&udp->fork_lock, self)); 2000 top: 2001 lmutex_lock(&udp->link_lock); 2002 2003 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2004 ulwp_lock(ulwp, udp); 2005 if (ulwp->ul_stop) { /* already stopped */ 2006 ulwp->ul_stop |= TSTP_FORK; 2007 ulwp_broadcast(ulwp); 2008 ulwp_unlock(ulwp, udp); 2009 } else { 2010 /* 2011 * Move the stopped lwp out of a critical section. 2012 */ 2013 if (safe_suspend(ulwp, TSTP_FORK, &link_dropped) || 2014 link_dropped) 2015 goto top; 2016 } 2017 } 2018 2019 lmutex_unlock(&udp->link_lock); 2020 } 2021 2022 void 2023 continue_fork(int child) 2024 { 2025 ulwp_t *self = curthread; 2026 uberdata_t *udp = self->ul_uberdata; 2027 ulwp_t *ulwp; 2028 2029 ASSERT(MUTEX_OWNED(&udp->fork_lock, self)); 2030 2031 /* 2032 * Clear the schedctl pointers in the child of forkall(). 2033 */ 2034 if (child) { 2035 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2036 ulwp->ul_schedctl_called = 2037 ulwp->ul_dead? &udp->uberflags : NULL; 2038 ulwp->ul_schedctl = NULL; 2039 } 2040 } 2041 2042 /* 2043 * Set all lwps that were stopped for fork() running again. 2044 */ 2045 lmutex_lock(&udp->link_lock); 2046 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2047 mutex_t *mp = ulwp_mutex(ulwp, udp); 2048 lmutex_lock(mp); 2049 ASSERT(ulwp->ul_stop & TSTP_FORK); 2050 ulwp->ul_stop &= ~TSTP_FORK; 2051 ulwp_broadcast(ulwp); 2052 if (!ulwp->ul_stop) 2053 force_continue(ulwp); 2054 lmutex_unlock(mp); 2055 } 2056 lmutex_unlock(&udp->link_lock); 2057 } 2058 2059 int 2060 _thrp_continue(thread_t tid, uchar_t whystopped) 2061 { 2062 uberdata_t *udp = curthread->ul_uberdata; 2063 ulwp_t *ulwp; 2064 mutex_t *mp; 2065 int error = 0; 2066 2067 ASSERT(whystopped == TSTP_REGULAR || 2068 whystopped == TSTP_MUTATOR); 2069 2070 /* 2071 * We single-thread the entire thread suspend/continue mechanism. 2072 */ 2073 fork_lock_enter(); 2074 2075 if ((ulwp = find_lwp(tid)) == NULL) { 2076 fork_lock_exit(); 2077 return (ESRCH); 2078 } 2079 2080 mp = ulwp_mutex(ulwp, udp); 2081 if ((whystopped == TSTP_MUTATOR && !ulwp->ul_mutator)) { 2082 error = EINVAL; 2083 } else if (ulwp->ul_stop & whystopped) { 2084 ulwp->ul_stop &= ~whystopped; 2085 ulwp_broadcast(ulwp); 2086 if (!ulwp->ul_stop) { 2087 if (whystopped == TSTP_REGULAR && ulwp->ul_created) { 2088 ulwp->ul_sp = 0; 2089 ulwp->ul_created = 0; 2090 } 2091 force_continue(ulwp); 2092 } 2093 } 2094 lmutex_unlock(mp); 2095 2096 fork_lock_exit(); 2097 return (error); 2098 } 2099 2100 int 2101 thr_suspend(thread_t tid) 2102 { 2103 return (_thrp_suspend(tid, TSTP_REGULAR)); 2104 } 2105 2106 int 2107 thr_continue(thread_t tid) 2108 { 2109 return (_thrp_continue(tid, TSTP_REGULAR)); 2110 } 2111 2112 void 2113 thr_yield() 2114 { 2115 yield(); 2116 } 2117 2118 #pragma weak pthread_kill = thr_kill 2119 #pragma weak _thr_kill = thr_kill 2120 int 2121 thr_kill(thread_t tid, int sig) 2122 { 2123 if (sig == SIGCANCEL) 2124 return (EINVAL); 2125 return (_lwp_kill(tid, sig)); 2126 } 2127 2128 /* 2129 * Exit a critical section, take deferred actions if necessary. 2130 */ 2131 void 2132 do_exit_critical() 2133 { 2134 ulwp_t *self = curthread; 2135 int sig; 2136 2137 ASSERT(self->ul_critical == 0); 2138 if (self->ul_dead) 2139 return; 2140 2141 while (self->ul_pleasestop || 2142 (self->ul_cursig != 0 && self->ul_sigdefer == 0)) { 2143 /* 2144 * Avoid a recursive call to exit_critical() in _thrp_suspend() 2145 * by keeping self->ul_critical == 1 here. 2146 */ 2147 self->ul_critical++; 2148 while (self->ul_pleasestop) { 2149 /* 2150 * Guard against suspending ourself while on a sleep 2151 * queue. See the comments in call_user_handler(). 2152 */ 2153 unsleep_self(); 2154 set_parking_flag(self, 0); 2155 (void) _thrp_suspend(self->ul_lwpid, 2156 self->ul_pleasestop); 2157 } 2158 self->ul_critical--; 2159 2160 if ((sig = self->ul_cursig) != 0 && self->ul_sigdefer == 0) { 2161 /* 2162 * Clear ul_cursig before proceeding. 2163 * This protects us from the dynamic linker's 2164 * calls to bind_guard()/bind_clear() in the 2165 * event that it is invoked to resolve a symbol 2166 * like take_deferred_signal() below. 2167 */ 2168 self->ul_cursig = 0; 2169 take_deferred_signal(sig); 2170 ASSERT(self->ul_cursig == 0); 2171 } 2172 } 2173 ASSERT(self->ul_critical == 0); 2174 } 2175 2176 /* 2177 * _ti_bind_guard() and _ti_bind_clear() are called by the dynamic linker 2178 * (ld.so.1) when it has do do something, like resolve a symbol to be called 2179 * by the application or one of its libraries. _ti_bind_guard() is called 2180 * on entry to ld.so.1, _ti_bind_clear() on exit from ld.so.1 back to the 2181 * application. The dynamic linker gets special dispensation from libc to 2182 * run in a critical region (all signals deferred and no thread suspension 2183 * or forking allowed), and to be immune from cancellation for the duration. 2184 */ 2185 int 2186 _ti_bind_guard(int flags) 2187 { 2188 ulwp_t *self = curthread; 2189 uberdata_t *udp = self->ul_uberdata; 2190 int bindflag = (flags & THR_FLG_RTLD); 2191 2192 if ((self->ul_bindflags & bindflag) == bindflag) 2193 return (0); 2194 if ((flags & (THR_FLG_NOLOCK | THR_FLG_REENTER)) == THR_FLG_NOLOCK) { 2195 ASSERT(self->ul_critical == 0); 2196 sigoff(self); /* see no signals while holding ld_lock */ 2197 (void) mutex_lock(&udp->ld_lock); 2198 } 2199 enter_critical(self); 2200 self->ul_save_state = self->ul_cancel_disabled; 2201 self->ul_cancel_disabled = 1; 2202 set_cancel_pending_flag(self, 0); 2203 self->ul_bindflags |= bindflag; 2204 return (1); 2205 } 2206 2207 int 2208 _ti_bind_clear(int flags) 2209 { 2210 ulwp_t *self = curthread; 2211 uberdata_t *udp = self->ul_uberdata; 2212 int bindflag = (flags & THR_FLG_RTLD); 2213 2214 if ((self->ul_bindflags & bindflag) == 0) 2215 return (self->ul_bindflags); 2216 self->ul_bindflags &= ~bindflag; 2217 self->ul_cancel_disabled = self->ul_save_state; 2218 set_cancel_pending_flag(self, 0); 2219 exit_critical(self); 2220 if ((flags & (THR_FLG_NOLOCK | THR_FLG_REENTER)) == THR_FLG_NOLOCK) { 2221 ASSERT(self->ul_critical == 0); 2222 if (MUTEX_OWNED(&udp->ld_lock, self)) { 2223 (void) mutex_unlock(&udp->ld_lock); 2224 sigon(self); /* reenable signals */ 2225 } 2226 } 2227 return (self->ul_bindflags); 2228 } 2229 2230 /* 2231 * sigoff() and sigon() enable cond_wait() to behave (optionally) like 2232 * it does in the old libthread (see the comments in cond_wait_queue()). 2233 * Also, signals are deferred at thread startup until TLS constructors 2234 * have all been called, at which time _thrp_setup() calls sigon(). 2235 * 2236 * _sigoff() and _sigon() are external consolidation-private interfaces to 2237 * sigoff() and sigon(), respectively, in libc. These are used in libnsl. 2238 * Also, _sigoff() and _sigon() are called from dbx's run-time checking 2239 * (librtc.so) to defer signals during its critical sections (not to be 2240 * confused with libc critical sections [see exit_critical() above]). 2241 */ 2242 void 2243 _sigoff(void) 2244 { 2245 sigoff(curthread); 2246 } 2247 2248 void 2249 _sigon(void) 2250 { 2251 sigon(curthread); 2252 } 2253 2254 void 2255 sigon(ulwp_t *self) 2256 { 2257 int sig; 2258 2259 ASSERT(self->ul_sigdefer > 0); 2260 if (--self->ul_sigdefer == 0) { 2261 if ((sig = self->ul_cursig) != 0 && self->ul_critical == 0) { 2262 self->ul_cursig = 0; 2263 take_deferred_signal(sig); 2264 ASSERT(self->ul_cursig == 0); 2265 } 2266 } 2267 } 2268 2269 int 2270 thr_getconcurrency() 2271 { 2272 return (thr_concurrency); 2273 } 2274 2275 int 2276 pthread_getconcurrency() 2277 { 2278 return (pthread_concurrency); 2279 } 2280 2281 int 2282 thr_setconcurrency(int new_level) 2283 { 2284 uberdata_t *udp = curthread->ul_uberdata; 2285 2286 if (new_level < 0) 2287 return (EINVAL); 2288 if (new_level > 65536) /* 65536 is totally arbitrary */ 2289 return (EAGAIN); 2290 lmutex_lock(&udp->link_lock); 2291 if (new_level > thr_concurrency) 2292 thr_concurrency = new_level; 2293 lmutex_unlock(&udp->link_lock); 2294 return (0); 2295 } 2296 2297 int 2298 pthread_setconcurrency(int new_level) 2299 { 2300 if (new_level < 0) 2301 return (EINVAL); 2302 if (new_level > 65536) /* 65536 is totally arbitrary */ 2303 return (EAGAIN); 2304 pthread_concurrency = new_level; 2305 return (0); 2306 } 2307 2308 size_t 2309 thr_min_stack(void) 2310 { 2311 return (MINSTACK); 2312 } 2313 2314 int 2315 __nthreads(void) 2316 { 2317 return (curthread->ul_uberdata->nthreads); 2318 } 2319 2320 /* 2321 * XXX 2322 * The remainder of this file implements the private interfaces to java for 2323 * garbage collection. It is no longer used, at least by java 1.2. 2324 * It can all go away once all old JVMs have disappeared. 2325 */ 2326 2327 int suspendingallmutators; /* when non-zero, suspending all mutators. */ 2328 int suspendedallmutators; /* when non-zero, all mutators suspended. */ 2329 int mutatorsbarrier; /* when non-zero, mutators barrier imposed. */ 2330 mutex_t mutatorslock = DEFAULTMUTEX; /* used to enforce mutators barrier. */ 2331 cond_t mutatorscv = DEFAULTCV; /* where non-mutators sleep. */ 2332 2333 /* 2334 * Get the available register state for the target thread. 2335 * Return non-volatile registers: TRS_NONVOLATILE 2336 */ 2337 #pragma weak _thr_getstate = thr_getstate 2338 int 2339 thr_getstate(thread_t tid, int *flag, lwpid_t *lwp, stack_t *ss, gregset_t rs) 2340 { 2341 ulwp_t *self = curthread; 2342 uberdata_t *udp = self->ul_uberdata; 2343 ulwp_t **ulwpp; 2344 ulwp_t *ulwp; 2345 int error = 0; 2346 int trs_flag = TRS_LWPID; 2347 2348 if (tid == 0 || self->ul_lwpid == tid) { 2349 ulwp = self; 2350 ulwp_lock(ulwp, udp); 2351 } else if ((ulwpp = find_lwpp(tid)) != NULL) { 2352 ulwp = *ulwpp; 2353 } else { 2354 if (flag) 2355 *flag = TRS_INVALID; 2356 return (ESRCH); 2357 } 2358 2359 if (ulwp->ul_dead) { 2360 trs_flag = TRS_INVALID; 2361 } else if (!ulwp->ul_stop && !suspendedallmutators) { 2362 error = EINVAL; 2363 trs_flag = TRS_INVALID; 2364 } else if (ulwp->ul_stop) { 2365 trs_flag = TRS_NONVOLATILE; 2366 getgregs(ulwp, rs); 2367 } 2368 2369 if (flag) 2370 *flag = trs_flag; 2371 if (lwp) 2372 *lwp = tid; 2373 if (ss != NULL) 2374 (void) _thrp_stksegment(ulwp, ss); 2375 2376 ulwp_unlock(ulwp, udp); 2377 return (error); 2378 } 2379 2380 /* 2381 * Set the appropriate register state for the target thread. 2382 * This is not used by java. It exists solely for the MSTC test suite. 2383 */ 2384 #pragma weak _thr_setstate = thr_setstate 2385 int 2386 thr_setstate(thread_t tid, int flag, gregset_t rs) 2387 { 2388 uberdata_t *udp = curthread->ul_uberdata; 2389 ulwp_t *ulwp; 2390 int error = 0; 2391 2392 if ((ulwp = find_lwp(tid)) == NULL) 2393 return (ESRCH); 2394 2395 if (!ulwp->ul_stop && !suspendedallmutators) 2396 error = EINVAL; 2397 else if (rs != NULL) { 2398 switch (flag) { 2399 case TRS_NONVOLATILE: 2400 /* do /proc stuff here? */ 2401 if (ulwp->ul_stop) 2402 setgregs(ulwp, rs); 2403 else 2404 error = EINVAL; 2405 break; 2406 case TRS_LWPID: /* do /proc stuff here? */ 2407 default: 2408 error = EINVAL; 2409 break; 2410 } 2411 } 2412 2413 ulwp_unlock(ulwp, udp); 2414 return (error); 2415 } 2416 2417 int 2418 getlwpstatus(thread_t tid, struct lwpstatus *sp) 2419 { 2420 extern ssize_t __pread(int, void *, size_t, off_t); 2421 char buf[100]; 2422 int fd; 2423 2424 /* "/proc/self/lwp/%u/lwpstatus" w/o stdio */ 2425 (void) strcpy(buf, "/proc/self/lwp/"); 2426 ultos((uint64_t)tid, 10, buf + strlen(buf)); 2427 (void) strcat(buf, "/lwpstatus"); 2428 if ((fd = __open(buf, O_RDONLY, 0)) >= 0) { 2429 while (__pread(fd, sp, sizeof (*sp), 0) == sizeof (*sp)) { 2430 if (sp->pr_flags & PR_STOPPED) { 2431 (void) __close(fd); 2432 return (0); 2433 } 2434 yield(); /* give him a chance to stop */ 2435 } 2436 (void) __close(fd); 2437 } 2438 return (-1); 2439 } 2440 2441 int 2442 putlwpregs(thread_t tid, prgregset_t prp) 2443 { 2444 extern ssize_t __writev(int, const struct iovec *, int); 2445 char buf[100]; 2446 int fd; 2447 long dstop_sreg[2]; 2448 long run_null[2]; 2449 iovec_t iov[3]; 2450 2451 /* "/proc/self/lwp/%u/lwpctl" w/o stdio */ 2452 (void) strcpy(buf, "/proc/self/lwp/"); 2453 ultos((uint64_t)tid, 10, buf + strlen(buf)); 2454 (void) strcat(buf, "/lwpctl"); 2455 if ((fd = __open(buf, O_WRONLY, 0)) >= 0) { 2456 dstop_sreg[0] = PCDSTOP; /* direct it to stop */ 2457 dstop_sreg[1] = PCSREG; /* set the registers */ 2458 iov[0].iov_base = (caddr_t)dstop_sreg; 2459 iov[0].iov_len = sizeof (dstop_sreg); 2460 iov[1].iov_base = (caddr_t)prp; /* from the register set */ 2461 iov[1].iov_len = sizeof (prgregset_t); 2462 run_null[0] = PCRUN; /* make it runnable again */ 2463 run_null[1] = 0; 2464 iov[2].iov_base = (caddr_t)run_null; 2465 iov[2].iov_len = sizeof (run_null); 2466 if (__writev(fd, iov, 3) >= 0) { 2467 (void) __close(fd); 2468 return (0); 2469 } 2470 (void) __close(fd); 2471 } 2472 return (-1); 2473 } 2474 2475 static ulong_t 2476 gettsp_slow(thread_t tid) 2477 { 2478 char buf[100]; 2479 struct lwpstatus status; 2480 2481 if (getlwpstatus(tid, &status) != 0) { 2482 /* "__gettsp(%u): can't read lwpstatus" w/o stdio */ 2483 (void) strcpy(buf, "__gettsp("); 2484 ultos((uint64_t)tid, 10, buf + strlen(buf)); 2485 (void) strcat(buf, "): can't read lwpstatus"); 2486 thr_panic(buf); 2487 } 2488 return (status.pr_reg[R_SP]); 2489 } 2490 2491 ulong_t 2492 __gettsp(thread_t tid) 2493 { 2494 uberdata_t *udp = curthread->ul_uberdata; 2495 ulwp_t *ulwp; 2496 ulong_t result; 2497 2498 if ((ulwp = find_lwp(tid)) == NULL) 2499 return (0); 2500 2501 if (ulwp->ul_stop && (result = ulwp->ul_sp) != 0) { 2502 ulwp_unlock(ulwp, udp); 2503 return (result); 2504 } 2505 2506 result = gettsp_slow(tid); 2507 ulwp_unlock(ulwp, udp); 2508 return (result); 2509 } 2510 2511 /* 2512 * This tells java stack walkers how to find the ucontext 2513 * structure passed to signal handlers. 2514 */ 2515 #pragma weak _thr_sighndlrinfo = thr_sighndlrinfo 2516 void 2517 thr_sighndlrinfo(void (**func)(), int *funcsize) 2518 { 2519 *func = &__sighndlr; 2520 *funcsize = (char *)&__sighndlrend - (char *)&__sighndlr; 2521 } 2522 2523 /* 2524 * Mark a thread a mutator or reset a mutator to being a default, 2525 * non-mutator thread. 2526 */ 2527 #pragma weak _thr_setmutator = thr_setmutator 2528 int 2529 thr_setmutator(thread_t tid, int enabled) 2530 { 2531 ulwp_t *self = curthread; 2532 uberdata_t *udp = self->ul_uberdata; 2533 ulwp_t *ulwp; 2534 int error; 2535 int cancel_state; 2536 2537 enabled = enabled? 1 : 0; 2538 top: 2539 if (tid == 0) { 2540 ulwp = self; 2541 ulwp_lock(ulwp, udp); 2542 } else if ((ulwp = find_lwp(tid)) == NULL) { 2543 return (ESRCH); 2544 } 2545 2546 /* 2547 * The target thread should be the caller itself or a suspended thread. 2548 * This prevents the target from also changing its ul_mutator field. 2549 */ 2550 error = 0; 2551 if (ulwp != self && !ulwp->ul_stop && enabled) 2552 error = EINVAL; 2553 else if (ulwp->ul_mutator != enabled) { 2554 lmutex_lock(&mutatorslock); 2555 if (mutatorsbarrier) { 2556 ulwp_unlock(ulwp, udp); 2557 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, 2558 &cancel_state); 2559 while (mutatorsbarrier) 2560 (void) cond_wait(&mutatorscv, &mutatorslock); 2561 (void) pthread_setcancelstate(cancel_state, NULL); 2562 lmutex_unlock(&mutatorslock); 2563 goto top; 2564 } 2565 ulwp->ul_mutator = enabled; 2566 lmutex_unlock(&mutatorslock); 2567 } 2568 2569 ulwp_unlock(ulwp, udp); 2570 return (error); 2571 } 2572 2573 /* 2574 * Establish a barrier against new mutators. Any non-mutator trying 2575 * to become a mutator is suspended until the barrier is removed. 2576 */ 2577 #pragma weak _thr_mutators_barrier = thr_mutators_barrier 2578 void 2579 thr_mutators_barrier(int enabled) 2580 { 2581 int oldvalue; 2582 int cancel_state; 2583 2584 lmutex_lock(&mutatorslock); 2585 2586 /* 2587 * Wait if trying to set the barrier while it is already set. 2588 */ 2589 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state); 2590 while (mutatorsbarrier && enabled) 2591 (void) cond_wait(&mutatorscv, &mutatorslock); 2592 (void) pthread_setcancelstate(cancel_state, NULL); 2593 2594 oldvalue = mutatorsbarrier; 2595 mutatorsbarrier = enabled; 2596 /* 2597 * Wakeup any blocked non-mutators when barrier is removed. 2598 */ 2599 if (oldvalue && !enabled) 2600 (void) cond_broadcast(&mutatorscv); 2601 lmutex_unlock(&mutatorslock); 2602 } 2603 2604 /* 2605 * Suspend the set of all mutators except for the caller. The list 2606 * of actively running threads is searched and only the mutators 2607 * in this list are suspended. Actively running non-mutators remain 2608 * running. Any other thread is suspended. 2609 */ 2610 #pragma weak _thr_suspend_allmutators = thr_suspend_allmutators 2611 int 2612 thr_suspend_allmutators(void) 2613 { 2614 ulwp_t *self = curthread; 2615 uberdata_t *udp = self->ul_uberdata; 2616 ulwp_t *ulwp; 2617 int link_dropped; 2618 2619 /* 2620 * We single-thread the entire thread suspend/continue mechanism. 2621 */ 2622 fork_lock_enter(); 2623 2624 top: 2625 lmutex_lock(&udp->link_lock); 2626 2627 if (suspendingallmutators || suspendedallmutators) { 2628 lmutex_unlock(&udp->link_lock); 2629 fork_lock_exit(); 2630 return (EINVAL); 2631 } 2632 suspendingallmutators = 1; 2633 2634 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2635 ulwp_lock(ulwp, udp); 2636 if (!ulwp->ul_mutator) { 2637 ulwp_unlock(ulwp, udp); 2638 } else if (ulwp->ul_stop) { /* already stopped */ 2639 ulwp->ul_stop |= TSTP_MUTATOR; 2640 ulwp_broadcast(ulwp); 2641 ulwp_unlock(ulwp, udp); 2642 } else { 2643 /* 2644 * Move the stopped lwp out of a critical section. 2645 */ 2646 if (safe_suspend(ulwp, TSTP_MUTATOR, &link_dropped) || 2647 link_dropped) { 2648 suspendingallmutators = 0; 2649 goto top; 2650 } 2651 } 2652 } 2653 2654 suspendedallmutators = 1; 2655 suspendingallmutators = 0; 2656 lmutex_unlock(&udp->link_lock); 2657 fork_lock_exit(); 2658 return (0); 2659 } 2660 2661 /* 2662 * Suspend the target mutator. The caller is permitted to suspend 2663 * itself. If a mutator barrier is enabled, the caller will suspend 2664 * itself as though it had been suspended by thr_suspend_allmutators(). 2665 * When the barrier is removed, this thread will be resumed. Any 2666 * suspended mutator, whether suspended by thr_suspend_mutator(), or by 2667 * thr_suspend_allmutators(), can be resumed by thr_continue_mutator(). 2668 */ 2669 #pragma weak _thr_suspend_mutator = thr_suspend_mutator 2670 int 2671 thr_suspend_mutator(thread_t tid) 2672 { 2673 if (tid == 0) 2674 tid = curthread->ul_lwpid; 2675 return (_thrp_suspend(tid, TSTP_MUTATOR)); 2676 } 2677 2678 /* 2679 * Resume the set of all suspended mutators. 2680 */ 2681 #pragma weak _thr_continue_allmutators = thr_continue_allmutators 2682 int 2683 thr_continue_allmutators() 2684 { 2685 ulwp_t *self = curthread; 2686 uberdata_t *udp = self->ul_uberdata; 2687 ulwp_t *ulwp; 2688 2689 /* 2690 * We single-thread the entire thread suspend/continue mechanism. 2691 */ 2692 fork_lock_enter(); 2693 2694 lmutex_lock(&udp->link_lock); 2695 if (!suspendedallmutators) { 2696 lmutex_unlock(&udp->link_lock); 2697 fork_lock_exit(); 2698 return (EINVAL); 2699 } 2700 suspendedallmutators = 0; 2701 2702 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) { 2703 mutex_t *mp = ulwp_mutex(ulwp, udp); 2704 lmutex_lock(mp); 2705 if (ulwp->ul_stop & TSTP_MUTATOR) { 2706 ulwp->ul_stop &= ~TSTP_MUTATOR; 2707 ulwp_broadcast(ulwp); 2708 if (!ulwp->ul_stop) 2709 force_continue(ulwp); 2710 } 2711 lmutex_unlock(mp); 2712 } 2713 2714 lmutex_unlock(&udp->link_lock); 2715 fork_lock_exit(); 2716 return (0); 2717 } 2718 2719 /* 2720 * Resume a suspended mutator. 2721 */ 2722 #pragma weak _thr_continue_mutator = thr_continue_mutator 2723 int 2724 thr_continue_mutator(thread_t tid) 2725 { 2726 return (_thrp_continue(tid, TSTP_MUTATOR)); 2727 } 2728 2729 #pragma weak _thr_wait_mutator = thr_wait_mutator 2730 int 2731 thr_wait_mutator(thread_t tid, int dontwait) 2732 { 2733 uberdata_t *udp = curthread->ul_uberdata; 2734 ulwp_t *ulwp; 2735 int cancel_state; 2736 int error = 0; 2737 2738 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state); 2739 top: 2740 if ((ulwp = find_lwp(tid)) == NULL) { 2741 (void) pthread_setcancelstate(cancel_state, NULL); 2742 return (ESRCH); 2743 } 2744 2745 if (!ulwp->ul_mutator) 2746 error = EINVAL; 2747 else if (dontwait) { 2748 if (!(ulwp->ul_stop & TSTP_MUTATOR)) 2749 error = EWOULDBLOCK; 2750 } else if (!(ulwp->ul_stop & TSTP_MUTATOR)) { 2751 cond_t *cvp = ulwp_condvar(ulwp, udp); 2752 mutex_t *mp = ulwp_mutex(ulwp, udp); 2753 2754 (void) cond_wait(cvp, mp); 2755 (void) lmutex_unlock(mp); 2756 goto top; 2757 } 2758 2759 ulwp_unlock(ulwp, udp); 2760 (void) pthread_setcancelstate(cancel_state, NULL); 2761 return (error); 2762 } 2763 2764 /* PROBE_SUPPORT begin */ 2765 2766 void 2767 thr_probe_setup(void *data) 2768 { 2769 curthread->ul_tpdp = data; 2770 } 2771 2772 static void * 2773 _thread_probe_getfunc() 2774 { 2775 return (curthread->ul_tpdp); 2776 } 2777 2778 void * (*thr_probe_getfunc_addr)(void) = _thread_probe_getfunc; 2779 2780 /* ARGSUSED */ 2781 void 2782 _resume(ulwp_t *ulwp, caddr_t sp, int dontsave) 2783 { 2784 /* never called */ 2785 } 2786 2787 /* ARGSUSED */ 2788 void 2789 _resume_ret(ulwp_t *oldlwp) 2790 { 2791 /* never called */ 2792 } 2793 2794 /* PROBE_SUPPORT end */ 2795