1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2017 by The MathWorks, Inc. All rights reserved.
25 */
26 /*
27 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
28 */
29
30 #include "lint.h"
31 #include "thr_uberdata.h"
32 #include <pthread.h>
33 #include <procfs.h>
34 #include <sys/uio.h>
35 #include <ctype.h>
36 #include "libc.h"
37
38 /*
39 * These symbols should not be exported from libc, but
40 * /lib/libm.so.2 references _thr_main. libm needs to be fixed.
41 * Also, some older versions of the Studio compiler/debugger
42 * components reference them. These need to be fixed, too.
43 */
44 #pragma weak _thr_main = thr_main
45 #pragma weak _thr_create = thr_create
46 #pragma weak _thr_join = thr_join
47 #pragma weak _thr_self = thr_self
48
49 #undef errno
50 extern int errno;
51
52 /*
53 * Between Solaris 2.5 and Solaris 9, __threaded was used to indicate
54 * "we are linked with libthread". The Sun Workshop 6 update 1 compilation
55 * system used it illegally (it is a consolidation private symbol).
56 * To accommodate this and possibly other abusers of the symbol,
57 * we make it always equal to 1 now that libthread has been folded
58 * into libc. The new __libc_threaded symbol is used to indicate
59 * the new meaning, "more than one thread exists".
60 */
61 int __threaded = 1; /* always equal to 1 */
62 int __libc_threaded = 0; /* zero until first thr_create() */
63
64 /*
65 * thr_concurrency and pthread_concurrency are not used by the library.
66 * They exist solely to hold and return the values set by calls to
67 * thr_setconcurrency() and pthread_setconcurrency().
68 * Because thr_concurrency is affected by the THR_NEW_LWP flag
69 * to thr_create(), thr_concurrency is protected by link_lock.
70 */
71 static int thr_concurrency = 1;
72 static int pthread_concurrency;
73
74 #define HASHTBLSZ 1024 /* must be a power of two */
75 #define TIDHASH(tid, udp) (tid & (udp)->hash_mask)
76
77 /* initial allocation, just enough for one lwp */
78 #pragma align 64(init_hash_table)
79 thr_hash_table_t init_hash_table[1] = {
80 { DEFAULTMUTEX, DEFAULTCV, NULL },
81 };
82
83 extern const Lc_interface rtld_funcs[];
84
85 /*
86 * The weak version is known to libc_db and mdb.
87 */
88 #pragma weak _uberdata = __uberdata
89 uberdata_t __uberdata = {
90 { DEFAULTMUTEX, NULL, 0 }, /* link_lock */
91 { RECURSIVEMUTEX, NULL, 0 }, /* ld_lock */
92 { RECURSIVEMUTEX, NULL, 0 }, /* fork_lock */
93 { RECURSIVEMUTEX, NULL, 0 }, /* atfork_lock */
94 { RECURSIVEMUTEX, NULL, 0 }, /* callout_lock */
95 { DEFAULTMUTEX, NULL, 0 }, /* tdb_hash_lock */
96 { 0, }, /* tdb_hash_lock_stats */
97 { { 0 }, }, /* siguaction[NSIG] */
98 {{ DEFAULTMUTEX, NULL, 0 }, /* bucket[NBUCKETS] */
99 { DEFAULTMUTEX, NULL, 0 },
100 { DEFAULTMUTEX, NULL, 0 },
101 { DEFAULTMUTEX, NULL, 0 },
102 { DEFAULTMUTEX, NULL, 0 },
103 { DEFAULTMUTEX, NULL, 0 },
104 { DEFAULTMUTEX, NULL, 0 },
105 { DEFAULTMUTEX, NULL, 0 },
106 { DEFAULTMUTEX, NULL, 0 },
107 { DEFAULTMUTEX, NULL, 0 }},
108 { RECURSIVEMUTEX, NULL, NULL }, /* atexit_root */
109 { DEFAULTMUTEX, 0, 0, NULL }, /* tsd_metadata */
110 { DEFAULTMUTEX, {0, 0}, {0, 0} }, /* tls_metadata */
111 0, /* primary_map */
112 0, /* bucket_init */
113 0, /* pad[0] */
114 0, /* pad[1] */
115 { 0 }, /* uberflags */
116 NULL, /* queue_head */
117 init_hash_table, /* thr_hash_table */
118 1, /* hash_size: size of the hash table */
119 0, /* hash_mask: hash_size - 1 */
120 NULL, /* ulwp_one */
121 NULL, /* all_lwps */
122 NULL, /* all_zombies */
123 0, /* nthreads */
124 0, /* nzombies */
125 0, /* ndaemons */
126 0, /* pid */
127 sigacthandler, /* sigacthandler */
128 NULL, /* lwp_stacks */
129 NULL, /* lwp_laststack */
130 0, /* nfreestack */
131 10, /* thread_stack_cache */
132 NULL, /* ulwp_freelist */
133 NULL, /* ulwp_lastfree */
134 NULL, /* ulwp_replace_free */
135 NULL, /* ulwp_replace_last */
136 NULL, /* atforklist */
137 NULL, /* robustlocks */
138 NULL, /* robustlist */
139 NULL, /* progname */
140 NULL, /* ub_comm_page */
141 NULL, /* __tdb_bootstrap */
142 { /* tdb */
143 NULL, /* tdb_sync_addr_hash */
144 0, /* tdb_register_count */
145 0, /* tdb_hash_alloc_failed */
146 NULL, /* tdb_sync_addr_free */
147 NULL, /* tdb_sync_addr_last */
148 0, /* tdb_sync_alloc */
149 { 0, 0 }, /* tdb_ev_global_mask */
150 tdb_events, /* tdb_events array */
151 },
152 };
153
154 /*
155 * The weak version is known to libc_db and mdb.
156 */
157 #pragma weak _tdb_bootstrap = __tdb_bootstrap
158 uberdata_t **__tdb_bootstrap = NULL;
159
160 int thread_queue_fifo = 4;
161 int thread_queue_dump = 0;
162 int thread_cond_wait_defer = 0;
163 int thread_error_detection = 0;
164 int thread_async_safe = 0;
165 int thread_stack_cache = 10;
166 int thread_door_noreserve = 0;
167 int thread_locks_misaligned = 0;
168
169 static ulwp_t *ulwp_alloc(void);
170 static void ulwp_free(ulwp_t *);
171
172 /*
173 * Insert the lwp into the hash table.
174 */
175 void
hash_in_unlocked(ulwp_t * ulwp,int ix,uberdata_t * udp)176 hash_in_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp)
177 {
178 ulwp->ul_hash = udp->thr_hash_table[ix].hash_bucket;
179 udp->thr_hash_table[ix].hash_bucket = ulwp;
180 ulwp->ul_ix = ix;
181 }
182
183 void
hash_in(ulwp_t * ulwp,uberdata_t * udp)184 hash_in(ulwp_t *ulwp, uberdata_t *udp)
185 {
186 int ix = TIDHASH(ulwp->ul_lwpid, udp);
187 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock;
188
189 lmutex_lock(mp);
190 hash_in_unlocked(ulwp, ix, udp);
191 lmutex_unlock(mp);
192 }
193
194 /*
195 * Delete the lwp from the hash table.
196 */
197 void
hash_out_unlocked(ulwp_t * ulwp,int ix,uberdata_t * udp)198 hash_out_unlocked(ulwp_t *ulwp, int ix, uberdata_t *udp)
199 {
200 ulwp_t **ulwpp;
201
202 for (ulwpp = &udp->thr_hash_table[ix].hash_bucket;
203 ulwp != *ulwpp;
204 ulwpp = &(*ulwpp)->ul_hash)
205 ;
206 *ulwpp = ulwp->ul_hash;
207 ulwp->ul_hash = NULL;
208 ulwp->ul_ix = -1;
209 }
210
211 void
hash_out(ulwp_t * ulwp,uberdata_t * udp)212 hash_out(ulwp_t *ulwp, uberdata_t *udp)
213 {
214 int ix;
215
216 if ((ix = ulwp->ul_ix) >= 0) {
217 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock;
218
219 lmutex_lock(mp);
220 hash_out_unlocked(ulwp, ix, udp);
221 lmutex_unlock(mp);
222 }
223 }
224
225 /*
226 * Retain stack information for thread structures that are being recycled for
227 * new threads. All other members of the thread structure should be zeroed.
228 */
229 static void
ulwp_clean(ulwp_t * ulwp)230 ulwp_clean(ulwp_t *ulwp)
231 {
232 caddr_t stk = ulwp->ul_stk;
233 size_t mapsiz = ulwp->ul_mapsiz;
234 size_t guardsize = ulwp->ul_guardsize;
235 uintptr_t stktop = ulwp->ul_stktop;
236 size_t stksiz = ulwp->ul_stksiz;
237
238 (void) memset(ulwp, 0, sizeof (*ulwp));
239
240 ulwp->ul_stk = stk;
241 ulwp->ul_mapsiz = mapsiz;
242 ulwp->ul_guardsize = guardsize;
243 ulwp->ul_stktop = stktop;
244 ulwp->ul_stksiz = stksiz;
245 }
246
247 static int stackprot;
248
249 /*
250 * Answer the question, "Is the lwp in question really dead?"
251 * We must inquire of the operating system to be really sure
252 * because the lwp may have called lwp_exit() but it has not
253 * yet completed the exit.
254 */
255 static int
dead_and_buried(ulwp_t * ulwp)256 dead_and_buried(ulwp_t *ulwp)
257 {
258 if (ulwp->ul_lwpid == (lwpid_t)(-1))
259 return (1);
260 if (ulwp->ul_dead && ulwp->ul_detached &&
261 _lwp_kill(ulwp->ul_lwpid, 0) == ESRCH) {
262 ulwp->ul_lwpid = (lwpid_t)(-1);
263 return (1);
264 }
265 return (0);
266 }
267
268 /*
269 * Attempt to keep the stack cache within the specified cache limit.
270 */
271 static void
trim_stack_cache(int cache_limit)272 trim_stack_cache(int cache_limit)
273 {
274 ulwp_t *self = curthread;
275 uberdata_t *udp = self->ul_uberdata;
276 ulwp_t *prev = NULL;
277 ulwp_t **ulwpp = &udp->lwp_stacks;
278 ulwp_t *ulwp;
279
280 ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, self));
281
282 while (udp->nfreestack > cache_limit && (ulwp = *ulwpp) != NULL) {
283 if (dead_and_buried(ulwp)) {
284 *ulwpp = ulwp->ul_next;
285 if (ulwp == udp->lwp_laststack)
286 udp->lwp_laststack = prev;
287 hash_out(ulwp, udp);
288 udp->nfreestack--;
289 (void) munmap(ulwp->ul_stk, ulwp->ul_mapsiz);
290 /*
291 * Now put the free ulwp on the ulwp freelist.
292 */
293 ulwp->ul_mapsiz = 0;
294 ulwp->ul_next = NULL;
295 if (udp->ulwp_freelist == NULL)
296 udp->ulwp_freelist = udp->ulwp_lastfree = ulwp;
297 else {
298 udp->ulwp_lastfree->ul_next = ulwp;
299 udp->ulwp_lastfree = ulwp;
300 }
301 } else {
302 prev = ulwp;
303 ulwpp = &ulwp->ul_next;
304 }
305 }
306 }
307
308 /*
309 * Find an unused stack of the requested size
310 * or create a new stack of the requested size.
311 * Return a pointer to the ulwp_t structure referring to the stack, or NULL.
312 * thr_exit() stores 1 in the ul_dead member.
313 * thr_join() stores -1 in the ul_lwpid member.
314 */
315 static ulwp_t *
find_stack(size_t stksize,size_t guardsize)316 find_stack(size_t stksize, size_t guardsize)
317 {
318 static size_t pagesize = 0;
319
320 uberdata_t *udp = curthread->ul_uberdata;
321 size_t mapsize;
322 ulwp_t *prev;
323 ulwp_t *ulwp;
324 ulwp_t **ulwpp;
325 void *stk;
326
327 /*
328 * The stack is allocated PROT_READ|PROT_WRITE|PROT_EXEC
329 * unless overridden by the system's configuration.
330 */
331 if (stackprot == 0) { /* do this once */
332 long lprot = _sysconf(_SC_STACK_PROT);
333 if (lprot <= 0)
334 lprot = (PROT_READ|PROT_WRITE|PROT_EXEC);
335 stackprot = (int)lprot;
336 }
337 if (pagesize == 0) /* do this once */
338 pagesize = _sysconf(_SC_PAGESIZE);
339
340 /*
341 * One megabyte stacks by default, but subtract off
342 * two pages for the system-created red zones.
343 * Round up a non-zero stack size to a pagesize multiple.
344 */
345 if (stksize == 0)
346 stksize = DEFAULTSTACK - 2 * pagesize;
347 else
348 stksize = ((stksize + pagesize - 1) & -pagesize);
349
350 /*
351 * Round up the mapping size to a multiple of pagesize.
352 * Note: mmap() provides at least one page of red zone
353 * so we deduct that from the value of guardsize.
354 */
355 if (guardsize != 0)
356 guardsize = ((guardsize + pagesize - 1) & -pagesize) - pagesize;
357 mapsize = stksize + guardsize;
358
359 lmutex_lock(&udp->link_lock);
360 for (prev = NULL, ulwpp = &udp->lwp_stacks;
361 (ulwp = *ulwpp) != NULL;
362 prev = ulwp, ulwpp = &ulwp->ul_next) {
363 if (ulwp->ul_mapsiz == mapsize &&
364 ulwp->ul_guardsize == guardsize &&
365 dead_and_buried(ulwp)) {
366 /*
367 * The previous lwp is gone; reuse the stack.
368 * Remove the ulwp from the stack list.
369 */
370 *ulwpp = ulwp->ul_next;
371 ulwp->ul_next = NULL;
372 if (ulwp == udp->lwp_laststack)
373 udp->lwp_laststack = prev;
374 hash_out(ulwp, udp);
375 udp->nfreestack--;
376 lmutex_unlock(&udp->link_lock);
377 ulwp_clean(ulwp);
378 return (ulwp);
379 }
380 }
381
382 /*
383 * None of the cached stacks matched our mapping size.
384 * Reduce the stack cache to get rid of possibly
385 * very old stacks that will never be reused.
386 */
387 if (udp->nfreestack > udp->thread_stack_cache)
388 trim_stack_cache(udp->thread_stack_cache);
389 else if (udp->nfreestack > 0)
390 trim_stack_cache(udp->nfreestack - 1);
391 lmutex_unlock(&udp->link_lock);
392
393 /*
394 * Create a new stack.
395 */
396 if ((stk = mmap(NULL, mapsize, stackprot,
397 MAP_PRIVATE|MAP_NORESERVE|MAP_ANON, -1, (off_t)0)) != MAP_FAILED) {
398 /*
399 * We have allocated our stack. Now allocate the ulwp.
400 */
401 ulwp = ulwp_alloc();
402 if (ulwp == NULL)
403 (void) munmap(stk, mapsize);
404 else {
405 ulwp->ul_stk = stk;
406 ulwp->ul_mapsiz = mapsize;
407 ulwp->ul_guardsize = guardsize;
408 ulwp->ul_stktop = (uintptr_t)stk + mapsize;
409 ulwp->ul_stksiz = stksize;
410 if (guardsize) /* protect the extra red zone */
411 (void) mprotect(stk, guardsize, PROT_NONE);
412 }
413 }
414 return (ulwp);
415 }
416
417 /*
418 * Get a ulwp_t structure from the free list or allocate a new one.
419 * Such ulwp_t's do not have a stack allocated by the library.
420 */
421 static ulwp_t *
ulwp_alloc(void)422 ulwp_alloc(void)
423 {
424 ulwp_t *self = curthread;
425 uberdata_t *udp = self->ul_uberdata;
426 size_t tls_size;
427 ulwp_t *prev;
428 ulwp_t *ulwp;
429 ulwp_t **ulwpp;
430 caddr_t data;
431
432 lmutex_lock(&udp->link_lock);
433 for (prev = NULL, ulwpp = &udp->ulwp_freelist;
434 (ulwp = *ulwpp) != NULL;
435 prev = ulwp, ulwpp = &ulwp->ul_next) {
436 if (dead_and_buried(ulwp)) {
437 *ulwpp = ulwp->ul_next;
438 ulwp->ul_next = NULL;
439 if (ulwp == udp->ulwp_lastfree)
440 udp->ulwp_lastfree = prev;
441 hash_out(ulwp, udp);
442 lmutex_unlock(&udp->link_lock);
443 ulwp_clean(ulwp);
444 return (ulwp);
445 }
446 }
447 lmutex_unlock(&udp->link_lock);
448
449 tls_size = roundup64(udp->tls_metadata.static_tls.tls_size);
450 data = lmalloc(sizeof (*ulwp) + tls_size);
451 if (data != NULL) {
452 /* LINTED pointer cast may result in improper alignment */
453 ulwp = (ulwp_t *)(data + tls_size);
454 }
455 return (ulwp);
456 }
457
458 /*
459 * Free a ulwp structure.
460 * If there is an associated stack, put it on the stack list and
461 * munmap() previously freed stacks up to the residual cache limit.
462 * Else put it on the ulwp free list and never call lfree() on it.
463 */
464 static void
ulwp_free(ulwp_t * ulwp)465 ulwp_free(ulwp_t *ulwp)
466 {
467 uberdata_t *udp = curthread->ul_uberdata;
468
469 ASSERT(udp->nthreads <= 1 || MUTEX_OWNED(&udp->link_lock, curthread));
470 ulwp->ul_next = NULL;
471 if (ulwp == udp->ulwp_one) /* don't reuse the primoridal stack */
472 /*EMPTY*/;
473 else if (ulwp->ul_mapsiz != 0) {
474 if (udp->lwp_stacks == NULL)
475 udp->lwp_stacks = udp->lwp_laststack = ulwp;
476 else {
477 udp->lwp_laststack->ul_next = ulwp;
478 udp->lwp_laststack = ulwp;
479 }
480 if (++udp->nfreestack > udp->thread_stack_cache)
481 trim_stack_cache(udp->thread_stack_cache);
482 } else {
483 if (udp->ulwp_freelist == NULL)
484 udp->ulwp_freelist = udp->ulwp_lastfree = ulwp;
485 else {
486 udp->ulwp_lastfree->ul_next = ulwp;
487 udp->ulwp_lastfree = ulwp;
488 }
489 }
490 }
491
492 /*
493 * Find a named lwp and return a pointer to its hash list location.
494 * On success, returns with the hash lock held.
495 */
496 ulwp_t **
find_lwpp(thread_t tid)497 find_lwpp(thread_t tid)
498 {
499 uberdata_t *udp = curthread->ul_uberdata;
500 int ix = TIDHASH(tid, udp);
501 mutex_t *mp = &udp->thr_hash_table[ix].hash_lock;
502 ulwp_t *ulwp;
503 ulwp_t **ulwpp;
504
505 if (tid == 0)
506 return (NULL);
507
508 lmutex_lock(mp);
509 for (ulwpp = &udp->thr_hash_table[ix].hash_bucket;
510 (ulwp = *ulwpp) != NULL;
511 ulwpp = &ulwp->ul_hash) {
512 if (ulwp->ul_lwpid == tid)
513 return (ulwpp);
514 }
515 lmutex_unlock(mp);
516 return (NULL);
517 }
518
519 /*
520 * Wake up all lwps waiting on this lwp for some reason.
521 */
522 void
ulwp_broadcast(ulwp_t * ulwp)523 ulwp_broadcast(ulwp_t *ulwp)
524 {
525 ulwp_t *self = curthread;
526 uberdata_t *udp = self->ul_uberdata;
527
528 ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self));
529 (void) cond_broadcast(ulwp_condvar(ulwp, udp));
530 }
531
532 /*
533 * Find a named lwp and return a pointer to it.
534 * Returns with the hash lock held.
535 */
536 ulwp_t *
find_lwp(thread_t tid)537 find_lwp(thread_t tid)
538 {
539 ulwp_t *self = curthread;
540 uberdata_t *udp = self->ul_uberdata;
541 ulwp_t *ulwp = NULL;
542 ulwp_t **ulwpp;
543
544 if (self->ul_lwpid == tid) {
545 ulwp = self;
546 ulwp_lock(ulwp, udp);
547 } else if ((ulwpp = find_lwpp(tid)) != NULL) {
548 ulwp = *ulwpp;
549 }
550
551 if (ulwp && ulwp->ul_dead) {
552 ulwp_unlock(ulwp, udp);
553 ulwp = NULL;
554 }
555
556 return (ulwp);
557 }
558
559 int
_thrp_create(void * stk,size_t stksize,void * (* func)(void *),void * arg,long flags,thread_t * new_thread,size_t guardsize)560 _thrp_create(void *stk, size_t stksize, void *(*func)(void *), void *arg,
561 long flags, thread_t *new_thread, size_t guardsize)
562 {
563 ulwp_t *self = curthread;
564 uberdata_t *udp = self->ul_uberdata;
565 ucontext_t uc;
566 uint_t lwp_flags;
567 thread_t tid;
568 int error;
569 ulwp_t *ulwp;
570
571 /*
572 * Enforce the restriction of not creating any threads
573 * until the primary link map has been initialized.
574 * Also, disallow thread creation to a child of vfork().
575 */
576 if (!self->ul_primarymap || self->ul_vfork)
577 return (ENOTSUP);
578
579 if (udp->hash_size == 1)
580 finish_init();
581
582 if ((stk || stksize) && stksize < MINSTACK)
583 return (EINVAL);
584
585 if (stk == NULL) {
586 if ((ulwp = find_stack(stksize, guardsize)) == NULL)
587 return (ENOMEM);
588 stksize = ulwp->ul_mapsiz - ulwp->ul_guardsize;
589 } else {
590 /* initialize the private stack */
591 if ((ulwp = ulwp_alloc()) == NULL)
592 return (ENOMEM);
593 ulwp->ul_stk = stk;
594 ulwp->ul_stktop = (uintptr_t)stk + stksize;
595 ulwp->ul_stksiz = stksize;
596 }
597 /* ulwp is not in the hash table; make sure hash_out() doesn't fail */
598 ulwp->ul_ix = -1;
599 ulwp->ul_errnop = &ulwp->ul_errno;
600
601 lwp_flags = LWP_SUSPENDED;
602 if (flags & (THR_DETACHED|THR_DAEMON)) {
603 flags |= THR_DETACHED;
604 lwp_flags |= LWP_DETACHED;
605 }
606 if (flags & THR_DAEMON)
607 lwp_flags |= LWP_DAEMON;
608
609 /* creating a thread: enforce mt-correctness in mutex_lock() */
610 self->ul_async_safe = 1;
611
612 /* per-thread copies of global variables, for speed */
613 ulwp->ul_queue_fifo = self->ul_queue_fifo;
614 ulwp->ul_cond_wait_defer = self->ul_cond_wait_defer;
615 ulwp->ul_error_detection = self->ul_error_detection;
616 ulwp->ul_async_safe = self->ul_async_safe;
617 ulwp->ul_max_spinners = self->ul_max_spinners;
618 ulwp->ul_adaptive_spin = self->ul_adaptive_spin;
619 ulwp->ul_queue_spin = self->ul_queue_spin;
620 ulwp->ul_door_noreserve = self->ul_door_noreserve;
621 ulwp->ul_misaligned = self->ul_misaligned;
622
623 /* new thread inherits creating thread's scheduling parameters */
624 ulwp->ul_policy = self->ul_policy;
625 ulwp->ul_pri = (self->ul_epri? self->ul_epri : self->ul_pri);
626 ulwp->ul_cid = self->ul_cid;
627 ulwp->ul_rtclassid = self->ul_rtclassid;
628
629 ulwp->ul_primarymap = self->ul_primarymap;
630 ulwp->ul_self = ulwp;
631 ulwp->ul_uberdata = udp;
632
633 /* debugger support */
634 ulwp->ul_usropts = flags;
635
636 #ifdef __sparc
637 /*
638 * We cache several instructions in the thread structure for use
639 * by the fasttrap DTrace provider. When changing this, read the
640 * comment in fasttrap.h for the all the other places that must
641 * be changed.
642 */
643 ulwp->ul_dsave = 0x9de04000; /* save %g1, %g0, %sp */
644 ulwp->ul_drestore = 0x81e80000; /* restore %g0, %g0, %g0 */
645 ulwp->ul_dftret = 0x91d0203a; /* ta 0x3a */
646 ulwp->ul_dreturn = 0x81ca0000; /* return %o0 */
647 #endif
648
649 ulwp->ul_startpc = func;
650 ulwp->ul_startarg = arg;
651 _fpinherit(ulwp);
652 /*
653 * Defer signals on the new thread until its TLS constructors
654 * have been called. _thrp_setup() will call sigon() after
655 * it has called tls_setup().
656 */
657 ulwp->ul_sigdefer = 1;
658
659 error = setup_context(&uc, _thrp_setup, ulwp,
660 (caddr_t)ulwp->ul_stk + ulwp->ul_guardsize, stksize);
661 if (error != 0 && stk != NULL) /* inaccessible stack */
662 error = EFAULT;
663
664 /*
665 * Call enter_critical() to avoid being suspended until we
666 * have linked the new thread into the proper lists.
667 * This is necessary because forkall() and fork1() must
668 * suspend all threads and they must see a complete list.
669 */
670 enter_critical(self);
671 uc.uc_sigmask = ulwp->ul_sigmask = self->ul_sigmask;
672 if (error != 0 ||
673 (error = __lwp_create(&uc, lwp_flags, &tid)) != 0) {
674 exit_critical(self);
675 ulwp->ul_lwpid = (lwpid_t)(-1);
676 ulwp->ul_dead = 1;
677 ulwp->ul_detached = 1;
678 lmutex_lock(&udp->link_lock);
679 ulwp_free(ulwp);
680 lmutex_unlock(&udp->link_lock);
681 return (error);
682 }
683 self->ul_nocancel = 0; /* cancellation is now possible */
684 udp->uberflags.uf_mt = 1;
685 if (new_thread)
686 *new_thread = tid;
687 if (flags & THR_DETACHED)
688 ulwp->ul_detached = 1;
689 ulwp->ul_lwpid = tid;
690 ulwp->ul_stop = TSTP_REGULAR;
691 if (flags & THR_SUSPENDED)
692 ulwp->ul_created = 1;
693
694 lmutex_lock(&udp->link_lock);
695 ulwp->ul_forw = udp->all_lwps;
696 ulwp->ul_back = udp->all_lwps->ul_back;
697 ulwp->ul_back->ul_forw = ulwp;
698 ulwp->ul_forw->ul_back = ulwp;
699 hash_in(ulwp, udp);
700 udp->nthreads++;
701 if (flags & THR_DAEMON)
702 udp->ndaemons++;
703 if (flags & THR_NEW_LWP)
704 thr_concurrency++;
705 __libc_threaded = 1; /* inform stdio */
706 lmutex_unlock(&udp->link_lock);
707
708 if (__td_event_report(self, TD_CREATE, udp)) {
709 self->ul_td_evbuf.eventnum = TD_CREATE;
710 self->ul_td_evbuf.eventdata = (void *)(uintptr_t)tid;
711 tdb_event(TD_CREATE, udp);
712 }
713
714 exit_critical(self);
715
716 if (!(flags & THR_SUSPENDED))
717 (void) _thrp_continue(tid, TSTP_REGULAR);
718
719 return (0);
720 }
721
722 int
thr_create(void * stk,size_t stksize,void * (* func)(void *),void * arg,long flags,thread_t * new_thread)723 thr_create(void *stk, size_t stksize, void *(*func)(void *), void *arg,
724 long flags, thread_t *new_thread)
725 {
726 return (_thrp_create(stk, stksize, func, arg, flags, new_thread, 0));
727 }
728
729 /*
730 * A special cancellation cleanup hook for DCE.
731 * cleanuphndlr, when it is not NULL, will contain a callback
732 * function to be called before a thread is terminated in
733 * thr_exit() as a result of being cancelled.
734 */
735 static void (*cleanuphndlr)(void) = NULL;
736
737 /*
738 * _pthread_setcleanupinit: sets the cleanup hook.
739 */
740 int
_pthread_setcleanupinit(void (* func)(void))741 _pthread_setcleanupinit(void (*func)(void))
742 {
743 cleanuphndlr = func;
744 return (0);
745 }
746
747 void
_thrp_exit()748 _thrp_exit()
749 {
750 ulwp_t *self = curthread;
751 uberdata_t *udp = self->ul_uberdata;
752 ulwp_t *replace = NULL;
753
754 if (__td_event_report(self, TD_DEATH, udp)) {
755 self->ul_td_evbuf.eventnum = TD_DEATH;
756 tdb_event(TD_DEATH, udp);
757 }
758
759 ASSERT(self->ul_sigdefer != 0);
760
761 lmutex_lock(&udp->link_lock);
762 udp->nthreads--;
763 if (self->ul_usropts & THR_NEW_LWP)
764 thr_concurrency--;
765 if (self->ul_usropts & THR_DAEMON)
766 udp->ndaemons--;
767 else if (udp->nthreads == udp->ndaemons) {
768 /*
769 * We are the last non-daemon thread exiting.
770 * Exit the process. We retain our TSD and TLS so
771 * that atexit() application functions can use them.
772 */
773 lmutex_unlock(&udp->link_lock);
774 exit(0);
775 thr_panic("_thrp_exit(): exit(0) returned");
776 }
777 lmutex_unlock(&udp->link_lock);
778
779 /*
780 * tsd_exit() may call its destructor free(), thus depending on
781 * tmem, therefore tmem_exit() needs to be called after tsd_exit()
782 * and tls_exit().
783 */
784 tsd_exit(); /* deallocate thread-specific data */
785 tls_exit(); /* deallocate thread-local storage */
786 tmem_exit(); /* deallocate tmem allocations */
787 heldlock_exit(); /* deal with left-over held locks */
788
789 /* block all signals to finish exiting */
790 block_all_signals(self);
791 /* also prevent ourself from being suspended */
792 enter_critical(self);
793 rwl_free(self);
794 lmutex_lock(&udp->link_lock);
795 ulwp_free(self);
796 (void) ulwp_lock(self, udp);
797
798 if (self->ul_mapsiz && !self->ul_detached) {
799 /*
800 * We want to free the stack for reuse but must keep
801 * the ulwp_t struct for the benefit of thr_join().
802 * For this purpose we allocate a replacement ulwp_t.
803 */
804 if ((replace = udp->ulwp_replace_free) == NULL)
805 replace = lmalloc(REPLACEMENT_SIZE);
806 else if ((udp->ulwp_replace_free = replace->ul_next) == NULL)
807 udp->ulwp_replace_last = NULL;
808 }
809
810 if (udp->all_lwps == self)
811 udp->all_lwps = self->ul_forw;
812 if (udp->all_lwps == self)
813 udp->all_lwps = NULL;
814 else {
815 self->ul_forw->ul_back = self->ul_back;
816 self->ul_back->ul_forw = self->ul_forw;
817 }
818 self->ul_forw = self->ul_back = NULL;
819 #if defined(THREAD_DEBUG)
820 /* collect queue lock statistics before marking ourself dead */
821 record_spin_locks(self);
822 #endif
823 self->ul_dead = 1;
824 self->ul_pleasestop = 0;
825 if (replace != NULL) {
826 int ix = self->ul_ix; /* the hash index */
827 (void) memcpy(replace, self, REPLACEMENT_SIZE);
828 replace->ul_self = replace;
829 replace->ul_next = NULL; /* clone not on stack list */
830 replace->ul_mapsiz = 0; /* allows clone to be freed */
831 replace->ul_replace = 1; /* requires clone to be freed */
832 hash_out_unlocked(self, ix, udp);
833 hash_in_unlocked(replace, ix, udp);
834 ASSERT(!(self->ul_detached));
835 self->ul_detached = 1; /* this frees the stack */
836 self->ul_schedctl = NULL;
837 self->ul_schedctl_called = &udp->uberflags;
838 set_curthread(self = replace);
839 /*
840 * Having just changed the address of curthread, we
841 * must reset the ownership of the locks we hold so
842 * that assertions will not fire when we release them.
843 */
844 udp->link_lock.mutex_owner = (uintptr_t)self;
845 ulwp_mutex(self, udp)->mutex_owner = (uintptr_t)self;
846 /*
847 * NOTE:
848 * On i386, %gs still references the original, not the
849 * replacement, ulwp structure. Fetching the replacement
850 * curthread pointer via %gs:0 works correctly since the
851 * original ulwp structure will not be reallocated until
852 * this lwp has completed its lwp_exit() system call (see
853 * dead_and_buried()), but from here on out, we must make
854 * no references to %gs:<offset> other than %gs:0.
855 */
856 }
857 /*
858 * Put non-detached terminated threads in the all_zombies list.
859 */
860 if (!self->ul_detached) {
861 udp->nzombies++;
862 if (udp->all_zombies == NULL) {
863 ASSERT(udp->nzombies == 1);
864 udp->all_zombies = self->ul_forw = self->ul_back = self;
865 } else {
866 self->ul_forw = udp->all_zombies;
867 self->ul_back = udp->all_zombies->ul_back;
868 self->ul_back->ul_forw = self;
869 self->ul_forw->ul_back = self;
870 }
871 }
872 /*
873 * Notify everyone waiting for this thread.
874 */
875 ulwp_broadcast(self);
876 (void) ulwp_unlock(self, udp);
877 /*
878 * Prevent any more references to the schedctl data.
879 * We are exiting and continue_fork() may not find us.
880 * Do this just before dropping link_lock, since fork
881 * serializes on link_lock.
882 */
883 self->ul_schedctl = NULL;
884 self->ul_schedctl_called = &udp->uberflags;
885 lmutex_unlock(&udp->link_lock);
886
887 ASSERT(self->ul_critical == 1);
888 ASSERT(self->ul_preempt == 0);
889 _lwp_terminate(); /* never returns */
890 thr_panic("_thrp_exit(): _lwp_terminate() returned");
891 }
892
893 #if defined(THREAD_DEBUG)
894 void
collect_queue_statistics()895 collect_queue_statistics()
896 {
897 uberdata_t *udp = curthread->ul_uberdata;
898 ulwp_t *ulwp;
899
900 if (thread_queue_dump) {
901 lmutex_lock(&udp->link_lock);
902 if ((ulwp = udp->all_lwps) != NULL) {
903 do {
904 record_spin_locks(ulwp);
905 } while ((ulwp = ulwp->ul_forw) != udp->all_lwps);
906 }
907 lmutex_unlock(&udp->link_lock);
908 }
909 }
910 #endif
911
912 static void __NORETURN
_thrp_exit_common(void * status,int unwind)913 _thrp_exit_common(void *status, int unwind)
914 {
915 ulwp_t *self = curthread;
916 int cancelled = (self->ul_cancel_pending && status == PTHREAD_CANCELED);
917
918 ASSERT(self->ul_critical == 0 && self->ul_preempt == 0);
919
920 /*
921 * Disable cancellation and call the special DCE cancellation
922 * cleanup hook if it is enabled. Do nothing else before calling
923 * the DCE cancellation cleanup hook; it may call longjmp() and
924 * never return here.
925 */
926 self->ul_cancel_disabled = 1;
927 self->ul_cancel_async = 0;
928 self->ul_save_async = 0;
929 self->ul_cancelable = 0;
930 self->ul_cancel_pending = 0;
931 set_cancel_pending_flag(self, 1);
932 if (cancelled && cleanuphndlr != NULL)
933 (*cleanuphndlr)();
934
935 /*
936 * Block application signals while we are exiting.
937 * We call out to C++, TSD, and TLS destructors while exiting
938 * and these are application-defined, so we cannot be assured
939 * that they won't reset the signal mask. We use sigoff() to
940 * defer any signals that may be received as a result of this
941 * bad behavior. Such signals will be lost to the process
942 * when the thread finishes exiting.
943 */
944 (void) thr_sigsetmask(SIG_SETMASK, &maskset, NULL);
945 sigoff(self);
946
947 self->ul_rval = status;
948
949 /*
950 * If thr_exit is being called from the places where
951 * C++ destructors are to be called such as cancellation
952 * points, then set this flag. It is checked in _t_cancel()
953 * to decide whether _ex_unwind() is to be called or not.
954 */
955 if (unwind)
956 self->ul_unwind = 1;
957
958 /*
959 * _thrp_unwind() will eventually call _thrp_exit().
960 * It never returns.
961 */
962 _thrp_unwind(NULL);
963 thr_panic("_thrp_exit_common(): _thrp_unwind() returned");
964
965 for (;;) /* to shut the compiler up about __NORETURN */
966 continue;
967 }
968
969 /*
970 * Called when a thread returns from its start function.
971 * We are at the top of the stack; no unwinding is necessary.
972 */
973 void
_thrp_terminate(void * status)974 _thrp_terminate(void *status)
975 {
976 _thrp_exit_common(status, 0);
977 }
978
979 #pragma weak pthread_exit = thr_exit
980 #pragma weak _thr_exit = thr_exit
981 void
thr_exit(void * status)982 thr_exit(void *status)
983 {
984 _thrp_exit_common(status, 1);
985 }
986
987 int
_thrp_join(thread_t tid,thread_t * departed,void ** status,int do_cancel)988 _thrp_join(thread_t tid, thread_t *departed, void **status, int do_cancel)
989 {
990 uberdata_t *udp = curthread->ul_uberdata;
991 mutex_t *mp;
992 void *rval;
993 thread_t found;
994 ulwp_t *ulwp;
995 ulwp_t **ulwpp;
996 int replace;
997 int error;
998
999 if (do_cancel)
1000 error = lwp_wait(tid, &found);
1001 else {
1002 while ((error = __lwp_wait(tid, &found)) == EINTR)
1003 ;
1004 }
1005 if (error)
1006 return (error);
1007
1008 /*
1009 * We must hold link_lock to avoid a race condition with find_stack().
1010 */
1011 lmutex_lock(&udp->link_lock);
1012 if ((ulwpp = find_lwpp(found)) == NULL) {
1013 /*
1014 * lwp_wait() found an lwp that the library doesn't know
1015 * about. It must have been created with _lwp_create().
1016 * Just return its lwpid; we can't know its status.
1017 */
1018 lmutex_unlock(&udp->link_lock);
1019 rval = NULL;
1020 } else {
1021 /*
1022 * Remove ulwp from the hash table.
1023 */
1024 ulwp = *ulwpp;
1025 *ulwpp = ulwp->ul_hash;
1026 ulwp->ul_hash = NULL;
1027 /*
1028 * Remove ulwp from all_zombies list.
1029 */
1030 ASSERT(udp->nzombies >= 1);
1031 if (udp->all_zombies == ulwp)
1032 udp->all_zombies = ulwp->ul_forw;
1033 if (udp->all_zombies == ulwp)
1034 udp->all_zombies = NULL;
1035 else {
1036 ulwp->ul_forw->ul_back = ulwp->ul_back;
1037 ulwp->ul_back->ul_forw = ulwp->ul_forw;
1038 }
1039 ulwp->ul_forw = ulwp->ul_back = NULL;
1040 udp->nzombies--;
1041 ASSERT(ulwp->ul_dead && !ulwp->ul_detached &&
1042 !(ulwp->ul_usropts & (THR_DETACHED|THR_DAEMON)));
1043 /*
1044 * We can't call ulwp_unlock(ulwp) after we set
1045 * ulwp->ul_ix = -1 so we have to get a pointer to the
1046 * ulwp's hash table mutex now in order to unlock it below.
1047 */
1048 mp = ulwp_mutex(ulwp, udp);
1049 ulwp->ul_lwpid = (lwpid_t)(-1);
1050 ulwp->ul_ix = -1;
1051 rval = ulwp->ul_rval;
1052 replace = ulwp->ul_replace;
1053 lmutex_unlock(mp);
1054 if (replace) {
1055 ulwp->ul_next = NULL;
1056 if (udp->ulwp_replace_free == NULL)
1057 udp->ulwp_replace_free =
1058 udp->ulwp_replace_last = ulwp;
1059 else {
1060 udp->ulwp_replace_last->ul_next = ulwp;
1061 udp->ulwp_replace_last = ulwp;
1062 }
1063 }
1064 lmutex_unlock(&udp->link_lock);
1065 }
1066
1067 if (departed != NULL)
1068 *departed = found;
1069 if (status != NULL)
1070 *status = rval;
1071 return (0);
1072 }
1073
1074 int
thr_join(thread_t tid,thread_t * departed,void ** status)1075 thr_join(thread_t tid, thread_t *departed, void **status)
1076 {
1077 int error = _thrp_join(tid, departed, status, 1);
1078 return ((error == EINVAL)? ESRCH : error);
1079 }
1080
1081 /*
1082 * pthread_join() differs from Solaris thr_join():
1083 * It does not return the departed thread's id
1084 * and hence does not have a "departed" argument.
1085 * It returns EINVAL if tid refers to a detached thread.
1086 */
1087 #pragma weak _pthread_join = pthread_join
1088 int
pthread_join(pthread_t tid,void ** status)1089 pthread_join(pthread_t tid, void **status)
1090 {
1091 return ((tid == 0)? ESRCH : _thrp_join(tid, NULL, status, 1));
1092 }
1093
1094 int
pthread_detach(pthread_t tid)1095 pthread_detach(pthread_t tid)
1096 {
1097 uberdata_t *udp = curthread->ul_uberdata;
1098 ulwp_t *ulwp;
1099 ulwp_t **ulwpp;
1100 int error = 0;
1101
1102 if ((ulwpp = find_lwpp(tid)) == NULL)
1103 return (ESRCH);
1104 ulwp = *ulwpp;
1105
1106 if (ulwp->ul_dead) {
1107 ulwp_unlock(ulwp, udp);
1108 error = _thrp_join(tid, NULL, NULL, 0);
1109 } else {
1110 error = __lwp_detach(tid);
1111 ulwp->ul_detached = 1;
1112 ulwp->ul_usropts |= THR_DETACHED;
1113 ulwp_unlock(ulwp, udp);
1114 }
1115 return (error);
1116 }
1117
1118 static const char *
ematch(const char * ev,const char * match)1119 ematch(const char *ev, const char *match)
1120 {
1121 int c;
1122
1123 while ((c = *match++) != '\0') {
1124 if (*ev++ != c)
1125 return (NULL);
1126 }
1127 if (*ev++ != '=')
1128 return (NULL);
1129 return (ev);
1130 }
1131
1132 static int
envvar(const char * ev,const char * match,int limit)1133 envvar(const char *ev, const char *match, int limit)
1134 {
1135 int val = -1;
1136 const char *ename;
1137
1138 if ((ename = ematch(ev, match)) != NULL) {
1139 int c;
1140 for (val = 0; (c = *ename) != '\0'; ename++) {
1141 if (!isdigit(c)) {
1142 val = -1;
1143 break;
1144 }
1145 val = val * 10 + (c - '0');
1146 if (val > limit) {
1147 val = limit;
1148 break;
1149 }
1150 }
1151 }
1152 return (val);
1153 }
1154
1155 static void
etest(const char * ev)1156 etest(const char *ev)
1157 {
1158 int value;
1159
1160 if ((value = envvar(ev, "QUEUE_SPIN", 1000000)) >= 0)
1161 thread_queue_spin = value;
1162 if ((value = envvar(ev, "ADAPTIVE_SPIN", 1000000)) >= 0)
1163 thread_adaptive_spin = value;
1164 if ((value = envvar(ev, "MAX_SPINNERS", 255)) >= 0)
1165 thread_max_spinners = value;
1166 if ((value = envvar(ev, "QUEUE_FIFO", 8)) >= 0)
1167 thread_queue_fifo = value;
1168 #if defined(THREAD_DEBUG)
1169 if ((value = envvar(ev, "QUEUE_VERIFY", 1)) >= 0)
1170 thread_queue_verify = value;
1171 if ((value = envvar(ev, "QUEUE_DUMP", 1)) >= 0)
1172 thread_queue_dump = value;
1173 #endif
1174 if ((value = envvar(ev, "STACK_CACHE", 10000)) >= 0)
1175 thread_stack_cache = value;
1176 if ((value = envvar(ev, "COND_WAIT_DEFER", 1)) >= 0)
1177 thread_cond_wait_defer = value;
1178 if ((value = envvar(ev, "ERROR_DETECTION", 2)) >= 0)
1179 thread_error_detection = value;
1180 if ((value = envvar(ev, "ASYNC_SAFE", 1)) >= 0)
1181 thread_async_safe = value;
1182 if ((value = envvar(ev, "DOOR_NORESERVE", 1)) >= 0)
1183 thread_door_noreserve = value;
1184 if ((value = envvar(ev, "LOCKS_MISALIGNED", 1)) >= 0)
1185 thread_locks_misaligned = value;
1186 }
1187
1188 /*
1189 * Look for and evaluate environment variables of the form "_THREAD_*".
1190 * For compatibility with the past, we also look for environment
1191 * names of the form "LIBTHREAD_*".
1192 */
1193 static void
set_thread_vars()1194 set_thread_vars()
1195 {
1196 extern const char **_environ;
1197 const char **pev;
1198 const char *ev;
1199 char c;
1200
1201 if ((pev = _environ) == NULL)
1202 return;
1203 while ((ev = *pev++) != NULL) {
1204 c = *ev;
1205 if (c == '_' && strncmp(ev, "_THREAD_", 8) == 0)
1206 etest(ev + 8);
1207 if (c == 'L' && strncmp(ev, "LIBTHREAD_", 10) == 0)
1208 etest(ev + 10);
1209 }
1210 }
1211
1212 /* PROBE_SUPPORT begin */
1213 #pragma weak __tnf_probe_notify
1214 extern void __tnf_probe_notify(void);
1215 /* PROBE_SUPPORT end */
1216
1217 /* same as atexit() but private to the library */
1218 extern int _atexit(void (*)(void));
1219
1220 /* same as _cleanup() but private to the library */
1221 extern void __cleanup(void);
1222
1223 extern void atfork_init(void);
1224
1225 #ifdef __amd64
1226 extern void __proc64id(void);
1227 #endif
1228
1229 static void
init_auxv_data(uberdata_t * udp)1230 init_auxv_data(uberdata_t *udp)
1231 {
1232 Dl_argsinfo_t args;
1233
1234 udp->ub_comm_page = NULL;
1235 if (dlinfo(RTLD_SELF, RTLD_DI_ARGSINFO, &args) < 0)
1236 return;
1237
1238 while (args.dla_auxv->a_type != AT_NULL) {
1239 if (args.dla_auxv->a_type == AT_SUN_COMMPAGE) {
1240 udp->ub_comm_page = args.dla_auxv->a_un.a_ptr;
1241 }
1242 args.dla_auxv++;
1243 }
1244 }
1245
1246 /*
1247 * libc_init() is called by ld.so.1 for library initialization.
1248 * We perform minimal initialization; enough to work with the main thread.
1249 */
1250 void
libc_init(void)1251 libc_init(void)
1252 {
1253 uberdata_t *udp = &__uberdata;
1254 ulwp_t *oldself = __curthread();
1255 ucontext_t uc;
1256 ulwp_t *self;
1257 struct rlimit rl;
1258 caddr_t data;
1259 size_t tls_size;
1260 int setmask;
1261
1262 /*
1263 * For the initial stage of initialization, we must be careful
1264 * not to call any function that could possibly call _cerror().
1265 * For this purpose, we call only the raw system call wrappers.
1266 */
1267
1268 #ifdef __amd64
1269 /*
1270 * Gather information about cache layouts for optimized
1271 * AMD and Intel assembler strfoo() and memfoo() functions.
1272 */
1273 __proc64id();
1274 #endif
1275
1276 /*
1277 * Every libc, regardless of which link map, must register __cleanup().
1278 */
1279 (void) _atexit(__cleanup);
1280
1281 /*
1282 * Every libc, regardless of link map, needs to go through and check
1283 * its aux vectors. Doing so will indicate whether or not this has
1284 * been given a comm page (to optimize certain system actions).
1285 */
1286 init_auxv_data(udp);
1287
1288 /*
1289 * We keep our uberdata on one of (a) the first alternate link map
1290 * or (b) the primary link map. We switch to the primary link map
1291 * and stay there once we see it. All intermediate link maps are
1292 * subject to being unloaded at any time.
1293 */
1294 if (oldself != NULL && (oldself->ul_primarymap || !primary_link_map)) {
1295 __tdb_bootstrap = oldself->ul_uberdata->tdb_bootstrap;
1296 mutex_setup();
1297 atfork_init(); /* every link map needs atfork() processing */
1298 init_progname();
1299 return;
1300 }
1301
1302 /*
1303 * To establish the main stack information, we have to get our context.
1304 * This is also convenient to use for getting our signal mask.
1305 */
1306 uc.uc_flags = UC_ALL;
1307 (void) __getcontext(&uc);
1308 ASSERT(uc.uc_link == NULL);
1309
1310 tls_size = roundup64(udp->tls_metadata.static_tls.tls_size);
1311 ASSERT(primary_link_map || tls_size == 0);
1312 data = lmalloc(sizeof (ulwp_t) + tls_size);
1313 if (data == NULL)
1314 thr_panic("cannot allocate thread structure for main thread");
1315 /* LINTED pointer cast may result in improper alignment */
1316 self = (ulwp_t *)(data + tls_size);
1317 init_hash_table[0].hash_bucket = self;
1318
1319 self->ul_sigmask = uc.uc_sigmask;
1320 delete_reserved_signals(&self->ul_sigmask);
1321 /*
1322 * Are the old and new sets different?
1323 * (This can happen if we are currently blocking SIGCANCEL.)
1324 * If so, we must explicitly set our signal mask, below.
1325 */
1326 setmask =
1327 ((self->ul_sigmask.__sigbits[0] ^ uc.uc_sigmask.__sigbits[0]) |
1328 (self->ul_sigmask.__sigbits[1] ^ uc.uc_sigmask.__sigbits[1]) |
1329 (self->ul_sigmask.__sigbits[2] ^ uc.uc_sigmask.__sigbits[2]) |
1330 (self->ul_sigmask.__sigbits[3] ^ uc.uc_sigmask.__sigbits[3]));
1331
1332 #ifdef __sparc
1333 /*
1334 * We cache several instructions in the thread structure for use
1335 * by the fasttrap DTrace provider. When changing this, read the
1336 * comment in fasttrap.h for the all the other places that must
1337 * be changed.
1338 */
1339 self->ul_dsave = 0x9de04000; /* save %g1, %g0, %sp */
1340 self->ul_drestore = 0x81e80000; /* restore %g0, %g0, %g0 */
1341 self->ul_dftret = 0x91d0203a; /* ta 0x3a */
1342 self->ul_dreturn = 0x81ca0000; /* return %o0 */
1343 #endif
1344
1345 self->ul_stktop = (uintptr_t)uc.uc_stack.ss_sp + uc.uc_stack.ss_size;
1346 (void) getrlimit(RLIMIT_STACK, &rl);
1347 self->ul_stksiz = rl.rlim_cur;
1348 self->ul_stk = (caddr_t)(self->ul_stktop - self->ul_stksiz);
1349
1350 self->ul_forw = self->ul_back = self;
1351 self->ul_hash = NULL;
1352 self->ul_ix = 0;
1353 self->ul_lwpid = 1; /* _lwp_self() */
1354 self->ul_main = 1;
1355 self->ul_self = self;
1356 self->ul_policy = -1; /* initialize only when needed */
1357 self->ul_pri = 0;
1358 self->ul_cid = 0;
1359 self->ul_rtclassid = -1;
1360 self->ul_uberdata = udp;
1361 if (oldself != NULL) {
1362 int i;
1363
1364 ASSERT(primary_link_map);
1365 ASSERT(oldself->ul_main == 1);
1366 self->ul_stsd = oldself->ul_stsd;
1367 for (i = 0; i < TSD_NFAST; i++)
1368 self->ul_ftsd[i] = oldself->ul_ftsd[i];
1369 self->ul_tls = oldself->ul_tls;
1370 /*
1371 * Retrieve all pointers to uberdata allocated
1372 * while running on previous link maps.
1373 * We would like to do a structure assignment here, but
1374 * gcc turns structure assignments into calls to memcpy(),
1375 * a function exported from libc. We can't call any such
1376 * external functions until we establish curthread, below,
1377 * so we just call our private version of memcpy().
1378 */
1379 (void) memcpy(udp, oldself->ul_uberdata, sizeof (*udp));
1380 /*
1381 * These items point to global data on the primary link map.
1382 */
1383 udp->thr_hash_table = init_hash_table;
1384 udp->sigacthandler = sigacthandler;
1385 udp->tdb.tdb_events = tdb_events;
1386 ASSERT(udp->nthreads == 1 && !udp->uberflags.uf_mt);
1387 ASSERT(udp->lwp_stacks == NULL);
1388 ASSERT(udp->ulwp_freelist == NULL);
1389 ASSERT(udp->ulwp_replace_free == NULL);
1390 ASSERT(udp->hash_size == 1);
1391 }
1392 udp->all_lwps = self;
1393 udp->ulwp_one = self;
1394 udp->pid = getpid();
1395 udp->nthreads = 1;
1396 /*
1397 * In every link map, tdb_bootstrap points to the same piece of
1398 * allocated memory. When the primary link map is initialized,
1399 * the allocated memory is assigned a pointer to the one true
1400 * uberdata. This allows libc_db to initialize itself regardless
1401 * of which instance of libc it finds in the address space.
1402 */
1403 if (udp->tdb_bootstrap == NULL)
1404 udp->tdb_bootstrap = lmalloc(sizeof (uberdata_t *));
1405 __tdb_bootstrap = udp->tdb_bootstrap;
1406 if (primary_link_map) {
1407 self->ul_primarymap = 1;
1408 udp->primary_map = 1;
1409 *udp->tdb_bootstrap = udp;
1410 }
1411 /*
1412 * Cancellation can't happen until:
1413 * pthread_cancel() is called
1414 * or:
1415 * another thread is created
1416 * For now, as a single-threaded process, set the flag that tells
1417 * PROLOGUE/EPILOGUE (in scalls.c) that cancellation can't happen.
1418 */
1419 self->ul_nocancel = 1;
1420
1421 #if defined(__amd64)
1422 (void) ___lwp_private(_LWP_SETPRIVATE, _LWP_FSBASE, self);
1423 #elif defined(__i386)
1424 (void) ___lwp_private(_LWP_SETPRIVATE, _LWP_GSBASE, self);
1425 #endif /* __i386 || __amd64 */
1426 set_curthread(self); /* redundant on i386 */
1427 /*
1428 * Now curthread is established and it is safe to call any
1429 * function in libc except one that uses thread-local storage.
1430 */
1431 self->ul_errnop = &errno;
1432 if (oldself != NULL) {
1433 /* tls_size was zero when oldself was allocated */
1434 lfree(oldself, sizeof (ulwp_t));
1435 }
1436 mutex_setup();
1437 atfork_init();
1438 signal_init();
1439
1440 /*
1441 * If the stack is unlimited, we set the size to zero to disable
1442 * stack checking.
1443 * XXX: Work harder here. Get the stack size from /proc/self/rmap
1444 */
1445 if (self->ul_stksiz == RLIM_INFINITY) {
1446 self->ul_ustack.ss_sp = (void *)self->ul_stktop;
1447 self->ul_ustack.ss_size = 0;
1448 } else {
1449 self->ul_ustack.ss_sp = self->ul_stk;
1450 self->ul_ustack.ss_size = self->ul_stksiz;
1451 }
1452 self->ul_ustack.ss_flags = 0;
1453 (void) setustack(&self->ul_ustack);
1454
1455 /*
1456 * Get the variables that affect thread behavior from the environment.
1457 */
1458 set_thread_vars();
1459 udp->uberflags.uf_thread_error_detection = (char)thread_error_detection;
1460 udp->thread_stack_cache = thread_stack_cache;
1461
1462 /*
1463 * Make per-thread copies of global variables, for speed.
1464 */
1465 self->ul_queue_fifo = (char)thread_queue_fifo;
1466 self->ul_cond_wait_defer = (char)thread_cond_wait_defer;
1467 self->ul_error_detection = (char)thread_error_detection;
1468 self->ul_async_safe = (char)thread_async_safe;
1469 self->ul_door_noreserve = (char)thread_door_noreserve;
1470 self->ul_misaligned = (char)thread_locks_misaligned;
1471 self->ul_max_spinners = (uint8_t)thread_max_spinners;
1472 self->ul_adaptive_spin = thread_adaptive_spin;
1473 self->ul_queue_spin = thread_queue_spin;
1474
1475 #if defined(__sparc) && !defined(_LP64)
1476 if (self->ul_misaligned) {
1477 /*
1478 * Tell the kernel to fix up ldx/stx instructions that
1479 * refer to non-8-byte aligned data instead of giving
1480 * the process an alignment trap and generating SIGBUS.
1481 *
1482 * Programs compiled for 32-bit sparc with the Studio SS12
1483 * compiler get this done for them automatically (in _init()).
1484 * We do it here for the benefit of programs compiled with
1485 * other compilers, like gcc.
1486 *
1487 * This is necessary for the _THREAD_LOCKS_MISALIGNED=1
1488 * environment variable horrible hack to work.
1489 */
1490 extern void _do_fix_align(void);
1491 _do_fix_align();
1492 }
1493 #endif
1494
1495 /*
1496 * When we have initialized the primary link map, inform
1497 * the dynamic linker about our interface functions.
1498 * Set up our pointer to the program name.
1499 */
1500 if (self->ul_primarymap)
1501 _ld_libc((void *)rtld_funcs);
1502 init_progname();
1503
1504 /*
1505 * Defer signals until TLS constructors have been called.
1506 */
1507 sigoff(self);
1508 tls_setup();
1509 sigon(self);
1510 if (setmask)
1511 (void) restore_signals(self);
1512
1513 /*
1514 * Make private copies of __xpg4 and __xpg6 so libc can test
1515 * them after this point without invoking the dynamic linker.
1516 */
1517 libc__xpg4 = __xpg4;
1518 libc__xpg6 = __xpg6;
1519
1520 /* PROBE_SUPPORT begin */
1521 if (self->ul_primarymap && __tnf_probe_notify != NULL)
1522 __tnf_probe_notify();
1523 /* PROBE_SUPPORT end */
1524
1525 init_sigev_thread();
1526 init_aio();
1527
1528 /*
1529 * We need to reset __threaded dynamically at runtime, so that
1530 * __threaded can be bound to __threaded outside libc which may not
1531 * have initial value of 1 (without a copy relocation in a.out).
1532 */
1533 __threaded = 1;
1534 }
1535
1536 #pragma fini(libc_fini)
1537 void
libc_fini()1538 libc_fini()
1539 {
1540 /*
1541 * If we are doing fini processing for the instance of libc
1542 * on the first alternate link map (this happens only when
1543 * the dynamic linker rejects a bad audit library), then clear
1544 * __curthread(). We abandon whatever memory was allocated by
1545 * lmalloc() while running on this alternate link-map but we
1546 * don't care (and can't find the memory in any case); we just
1547 * want to protect the application from this bad audit library.
1548 * No fini processing is done by libc in the normal case.
1549 */
1550
1551 uberdata_t *udp = curthread->ul_uberdata;
1552
1553 if (udp->primary_map == 0 && udp == &__uberdata)
1554 set_curthread(NULL);
1555 }
1556
1557 /*
1558 * finish_init is called when we are about to become multi-threaded,
1559 * that is, on the first call to thr_create().
1560 */
1561 void
finish_init()1562 finish_init()
1563 {
1564 ulwp_t *self = curthread;
1565 uberdata_t *udp = self->ul_uberdata;
1566 thr_hash_table_t *htp;
1567 void *data;
1568 int i;
1569
1570 /*
1571 * No locks needed here; we are single-threaded on the first call.
1572 * We can be called only after the primary link map has been set up.
1573 */
1574 ASSERT(self->ul_primarymap);
1575 ASSERT(self == udp->ulwp_one);
1576 ASSERT(!udp->uberflags.uf_mt);
1577 ASSERT(udp->hash_size == 1);
1578
1579 /*
1580 * Initialize self->ul_policy, self->ul_cid, and self->ul_pri.
1581 */
1582 update_sched(self);
1583
1584 /*
1585 * Allocate the queue_head array if not already allocated.
1586 */
1587 if (udp->queue_head == NULL)
1588 queue_alloc();
1589
1590 /*
1591 * Now allocate the thread hash table.
1592 */
1593 if ((data = mmap(NULL, HASHTBLSZ * sizeof (thr_hash_table_t),
1594 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0))
1595 == MAP_FAILED)
1596 thr_panic("cannot allocate thread hash table");
1597
1598 udp->thr_hash_table = htp = (thr_hash_table_t *)data;
1599 udp->hash_size = HASHTBLSZ;
1600 udp->hash_mask = HASHTBLSZ - 1;
1601
1602 for (i = 0; i < HASHTBLSZ; i++, htp++) {
1603 htp->hash_lock.mutex_flag = LOCK_INITED;
1604 htp->hash_lock.mutex_magic = MUTEX_MAGIC;
1605 htp->hash_cond.cond_magic = COND_MAGIC;
1606 }
1607 hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp);
1608
1609 /*
1610 * Set up the SIGCANCEL handler for threads cancellation.
1611 */
1612 setup_cancelsig(SIGCANCEL);
1613
1614 /*
1615 * Arrange to do special things on exit --
1616 * - collect queue statistics from all remaining active threads.
1617 * - dump queue statistics to stderr if _THREAD_QUEUE_DUMP is set.
1618 * - grab assert_lock to ensure that assertion failures
1619 * and a core dump take precedence over _exit().
1620 * (Functions are called in the reverse order of their registration.)
1621 */
1622 (void) _atexit(grab_assert_lock);
1623 #if defined(THREAD_DEBUG)
1624 (void) _atexit(dump_queue_statistics);
1625 (void) _atexit(collect_queue_statistics);
1626 #endif
1627 }
1628
1629 /*
1630 * Used only by postfork1_child(), below.
1631 */
1632 static void
mark_dead_and_buried(ulwp_t * ulwp)1633 mark_dead_and_buried(ulwp_t *ulwp)
1634 {
1635 ulwp->ul_dead = 1;
1636 ulwp->ul_lwpid = (lwpid_t)(-1);
1637 ulwp->ul_hash = NULL;
1638 ulwp->ul_ix = -1;
1639 ulwp->ul_schedctl = NULL;
1640 ulwp->ul_schedctl_called = NULL;
1641 }
1642
1643 /*
1644 * This is called from fork1() in the child.
1645 * Reset our data structures to reflect one lwp.
1646 */
1647 void
postfork1_child()1648 postfork1_child()
1649 {
1650 ulwp_t *self = curthread;
1651 uberdata_t *udp = self->ul_uberdata;
1652 queue_head_t *qp;
1653 ulwp_t *next;
1654 ulwp_t *ulwp;
1655 int i;
1656
1657 /* daemon threads shouldn't call fork1(), but oh well... */
1658 self->ul_usropts &= ~THR_DAEMON;
1659 udp->nthreads = 1;
1660 udp->ndaemons = 0;
1661 udp->uberflags.uf_mt = 0;
1662 __libc_threaded = 0;
1663 for (i = 0; i < udp->hash_size; i++)
1664 udp->thr_hash_table[i].hash_bucket = NULL;
1665 self->ul_lwpid = _lwp_self();
1666 hash_in_unlocked(self, TIDHASH(self->ul_lwpid, udp), udp);
1667
1668 /*
1669 * Some thread in the parent might have been suspended
1670 * while holding udp->callout_lock or udp->ld_lock.
1671 * Reinitialize the child's copies.
1672 */
1673 (void) mutex_init(&udp->callout_lock,
1674 USYNC_THREAD | LOCK_RECURSIVE, NULL);
1675 (void) mutex_init(&udp->ld_lock,
1676 USYNC_THREAD | LOCK_RECURSIVE, NULL);
1677
1678 /* no one in the child is on a sleep queue; reinitialize */
1679 if ((qp = udp->queue_head) != NULL) {
1680 (void) memset(qp, 0, 2 * QHASHSIZE * sizeof (queue_head_t));
1681 for (i = 0; i < 2 * QHASHSIZE; qp++, i++) {
1682 qp->qh_type = (i < QHASHSIZE)? MX : CV;
1683 qp->qh_lock.mutex_flag = LOCK_INITED;
1684 qp->qh_lock.mutex_magic = MUTEX_MAGIC;
1685 qp->qh_hlist = &qp->qh_def_root;
1686 #if defined(THREAD_DEBUG)
1687 qp->qh_hlen = 1;
1688 qp->qh_hmax = 1;
1689 #endif
1690 }
1691 }
1692
1693 /*
1694 * Do post-fork1 processing for subsystems that need it.
1695 * We need to do this before unmapping all of the abandoned
1696 * threads' stacks, below(), because the post-fork1 actions
1697 * might require access to those stacks.
1698 */
1699 postfork1_child_sigev_aio();
1700 postfork1_child_sigev_mq();
1701 postfork1_child_sigev_timer();
1702 postfork1_child_aio();
1703 /*
1704 * The above subsystems use thread pools, so this action
1705 * must be performed after those actions.
1706 */
1707 postfork1_child_tpool();
1708
1709 /*
1710 * All lwps except ourself are gone. Mark them so.
1711 * First mark all of the lwps that have already been freed.
1712 * Then mark and free all of the active lwps except ourself.
1713 * Since we are single-threaded, no locks are required here.
1714 */
1715 for (ulwp = udp->lwp_stacks; ulwp != NULL; ulwp = ulwp->ul_next)
1716 mark_dead_and_buried(ulwp);
1717 for (ulwp = udp->ulwp_freelist; ulwp != NULL; ulwp = ulwp->ul_next)
1718 mark_dead_and_buried(ulwp);
1719 for (ulwp = self->ul_forw; ulwp != self; ulwp = next) {
1720 next = ulwp->ul_forw;
1721 ulwp->ul_forw = ulwp->ul_back = NULL;
1722 mark_dead_and_buried(ulwp);
1723 tsd_free(ulwp);
1724 tls_free(ulwp);
1725 rwl_free(ulwp);
1726 heldlock_free(ulwp);
1727 ulwp_free(ulwp);
1728 }
1729 self->ul_forw = self->ul_back = udp->all_lwps = self;
1730 if (self != udp->ulwp_one)
1731 mark_dead_and_buried(udp->ulwp_one);
1732 if ((ulwp = udp->all_zombies) != NULL) {
1733 ASSERT(udp->nzombies != 0);
1734 do {
1735 next = ulwp->ul_forw;
1736 ulwp->ul_forw = ulwp->ul_back = NULL;
1737 mark_dead_and_buried(ulwp);
1738 udp->nzombies--;
1739 if (ulwp->ul_replace) {
1740 ulwp->ul_next = NULL;
1741 if (udp->ulwp_replace_free == NULL) {
1742 udp->ulwp_replace_free =
1743 udp->ulwp_replace_last = ulwp;
1744 } else {
1745 udp->ulwp_replace_last->ul_next = ulwp;
1746 udp->ulwp_replace_last = ulwp;
1747 }
1748 }
1749 } while ((ulwp = next) != udp->all_zombies);
1750 ASSERT(udp->nzombies == 0);
1751 udp->all_zombies = NULL;
1752 udp->nzombies = 0;
1753 }
1754 trim_stack_cache(0);
1755 }
1756
1757 lwpid_t
lwp_self(void)1758 lwp_self(void)
1759 {
1760 return (curthread->ul_lwpid);
1761 }
1762
1763 #pragma weak _ti_thr_self = thr_self
1764 #pragma weak pthread_self = thr_self
1765 thread_t
thr_self()1766 thr_self()
1767 {
1768 return (curthread->ul_lwpid);
1769 }
1770
1771 int
thr_main()1772 thr_main()
1773 {
1774 ulwp_t *self = __curthread();
1775
1776 return ((self == NULL)? -1 : self->ul_main);
1777 }
1778
1779 int
_thrp_cancelled(void)1780 _thrp_cancelled(void)
1781 {
1782 return (curthread->ul_rval == PTHREAD_CANCELED);
1783 }
1784
1785 int
_thrp_stksegment(ulwp_t * ulwp,stack_t * stk)1786 _thrp_stksegment(ulwp_t *ulwp, stack_t *stk)
1787 {
1788 stk->ss_sp = (void *)ulwp->ul_stktop;
1789 stk->ss_size = ulwp->ul_stksiz;
1790 stk->ss_flags = 0;
1791 return (0);
1792 }
1793
1794 #pragma weak _thr_stksegment = thr_stksegment
1795 int
thr_stksegment(stack_t * stk)1796 thr_stksegment(stack_t *stk)
1797 {
1798 return (_thrp_stksegment(curthread, stk));
1799 }
1800
1801 void
force_continue(ulwp_t * ulwp)1802 force_continue(ulwp_t *ulwp)
1803 {
1804 #if defined(THREAD_DEBUG)
1805 ulwp_t *self = curthread;
1806 uberdata_t *udp = self->ul_uberdata;
1807 #endif
1808 int error;
1809 timespec_t ts;
1810
1811 ASSERT(MUTEX_OWNED(&udp->fork_lock, self));
1812 ASSERT(MUTEX_OWNED(ulwp_mutex(ulwp, udp), self));
1813
1814 for (;;) {
1815 error = _lwp_continue(ulwp->ul_lwpid);
1816 if (error != 0 && error != EINTR)
1817 break;
1818 error = 0;
1819 if (ulwp->ul_stopping) { /* he is stopping himself */
1820 ts.tv_sec = 0; /* give him a chance to run */
1821 ts.tv_nsec = 100000; /* 100 usecs or clock tick */
1822 (void) __nanosleep(&ts, NULL);
1823 }
1824 if (!ulwp->ul_stopping) /* he is running now */
1825 break; /* so we are done */
1826 /*
1827 * He is marked as being in the process of stopping
1828 * himself. Loop around and continue him again.
1829 * He may not have been stopped the first time.
1830 */
1831 }
1832 }
1833
1834 /*
1835 * Suspend an lwp with lwp_suspend(), then move it to a safe point,
1836 * that is, to a point where ul_critical and ul_rtld are both zero.
1837 * On return, the ulwp_lock() is dropped as with ulwp_unlock().
1838 * If 'link_dropped' is non-NULL, then 'link_lock' is held on entry.
1839 * If we have to drop link_lock, we store 1 through link_dropped.
1840 * If the lwp exits before it can be suspended, we return ESRCH.
1841 */
1842 int
safe_suspend(ulwp_t * ulwp,uchar_t whystopped,int * link_dropped)1843 safe_suspend(ulwp_t *ulwp, uchar_t whystopped, int *link_dropped)
1844 {
1845 ulwp_t *self = curthread;
1846 uberdata_t *udp = self->ul_uberdata;
1847 cond_t *cvp = ulwp_condvar(ulwp, udp);
1848 mutex_t *mp = ulwp_mutex(ulwp, udp);
1849 thread_t tid = ulwp->ul_lwpid;
1850 int ix = ulwp->ul_ix;
1851 int error = 0;
1852
1853 ASSERT(whystopped == TSTP_REGULAR ||
1854 whystopped == TSTP_MUTATOR ||
1855 whystopped == TSTP_FORK);
1856 ASSERT(ulwp != self);
1857 ASSERT(!ulwp->ul_stop);
1858 ASSERT(MUTEX_OWNED(&udp->fork_lock, self));
1859 ASSERT(MUTEX_OWNED(mp, self));
1860
1861 if (link_dropped != NULL)
1862 *link_dropped = 0;
1863
1864 /*
1865 * We must grab the target's spin lock before suspending it.
1866 * See the comments below and in _thrp_suspend() for why.
1867 */
1868 spin_lock_set(&ulwp->ul_spinlock);
1869 (void) ___lwp_suspend(tid);
1870 spin_lock_clear(&ulwp->ul_spinlock);
1871
1872 top:
1873 if ((ulwp->ul_critical == 0 && ulwp->ul_rtld == 0) ||
1874 ulwp->ul_stopping) {
1875 /* thread is already safe */
1876 ulwp->ul_stop |= whystopped;
1877 } else {
1878 /*
1879 * Setting ul_pleasestop causes the target thread to stop
1880 * itself in _thrp_suspend(), below, after we drop its lock.
1881 * We must continue the critical thread before dropping
1882 * link_lock because the critical thread may be holding
1883 * the queue lock for link_lock. This is delicate.
1884 */
1885 ulwp->ul_pleasestop |= whystopped;
1886 force_continue(ulwp);
1887 if (link_dropped != NULL) {
1888 *link_dropped = 1;
1889 lmutex_unlock(&udp->link_lock);
1890 /* be sure to drop link_lock only once */
1891 link_dropped = NULL;
1892 }
1893
1894 /*
1895 * The thread may disappear by calling thr_exit() so we
1896 * cannot rely on the ulwp pointer after dropping the lock.
1897 * Instead, we search the hash table to find it again.
1898 * When we return, we may find that the thread has been
1899 * continued by some other thread. The suspend/continue
1900 * interfaces are prone to such race conditions by design.
1901 */
1902 while (ulwp && !ulwp->ul_dead && !ulwp->ul_stop &&
1903 (ulwp->ul_pleasestop & whystopped)) {
1904 (void) __cond_wait(cvp, mp);
1905 for (ulwp = udp->thr_hash_table[ix].hash_bucket;
1906 ulwp != NULL; ulwp = ulwp->ul_hash) {
1907 if (ulwp->ul_lwpid == tid)
1908 break;
1909 }
1910 }
1911
1912 if (ulwp == NULL || ulwp->ul_dead)
1913 error = ESRCH;
1914 else {
1915 /*
1916 * Do another lwp_suspend() to make sure we don't
1917 * return until the target thread is fully stopped
1918 * in the kernel. Don't apply lwp_suspend() until
1919 * we know that the target is not holding any
1920 * queue locks, that is, that it has completed
1921 * ulwp_unlock(self) and has, or at least is
1922 * about to, call lwp_suspend() on itself. We do
1923 * this by grabbing the target's spin lock.
1924 */
1925 ASSERT(ulwp->ul_lwpid == tid);
1926 spin_lock_set(&ulwp->ul_spinlock);
1927 (void) ___lwp_suspend(tid);
1928 spin_lock_clear(&ulwp->ul_spinlock);
1929 /*
1930 * If some other thread did a thr_continue()
1931 * on the target thread we have to start over.
1932 */
1933 if (!ulwp->ul_stopping || !(ulwp->ul_stop & whystopped))
1934 goto top;
1935 }
1936 }
1937
1938 (void) cond_broadcast(cvp);
1939 lmutex_unlock(mp);
1940 return (error);
1941 }
1942
1943 int
_thrp_suspend(thread_t tid,uchar_t whystopped)1944 _thrp_suspend(thread_t tid, uchar_t whystopped)
1945 {
1946 ulwp_t *self = curthread;
1947 uberdata_t *udp = self->ul_uberdata;
1948 ulwp_t *ulwp;
1949 int error = 0;
1950
1951 ASSERT((whystopped & (TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) != 0);
1952 ASSERT((whystopped & ~(TSTP_REGULAR|TSTP_MUTATOR|TSTP_FORK)) == 0);
1953
1954 /*
1955 * We can't suspend anyone except ourself while
1956 * some other thread is performing a fork.
1957 * This also allows only one suspension at a time.
1958 */
1959 if (tid != self->ul_lwpid)
1960 fork_lock_enter();
1961
1962 if ((ulwp = find_lwp(tid)) == NULL)
1963 error = ESRCH;
1964 else if (whystopped == TSTP_MUTATOR && !ulwp->ul_mutator) {
1965 ulwp_unlock(ulwp, udp);
1966 error = EINVAL;
1967 } else if (ulwp->ul_stop) { /* already stopped */
1968 ulwp->ul_stop |= whystopped;
1969 ulwp_broadcast(ulwp);
1970 ulwp_unlock(ulwp, udp);
1971 } else if (ulwp != self) {
1972 /*
1973 * After suspending the other thread, move it out of a
1974 * critical section and deal with the schedctl mappings.
1975 * safe_suspend() suspends the other thread, calls
1976 * ulwp_broadcast(ulwp) and drops the ulwp lock.
1977 */
1978 error = safe_suspend(ulwp, whystopped, NULL);
1979 } else {
1980 int schedctl_after_fork = 0;
1981
1982 /*
1983 * We are suspending ourself. We must not take a signal
1984 * until we return from lwp_suspend() and clear ul_stopping.
1985 * This is to guard against siglongjmp().
1986 */
1987 enter_critical(self);
1988 self->ul_sp = stkptr();
1989 _flush_windows(); /* sparc */
1990 self->ul_pleasestop = 0;
1991 self->ul_stop |= whystopped;
1992 /*
1993 * Grab our spin lock before dropping ulwp_mutex(self).
1994 * This prevents the suspending thread from applying
1995 * lwp_suspend() to us before we emerge from
1996 * lmutex_unlock(mp) and have dropped mp's queue lock.
1997 */
1998 spin_lock_set(&self->ul_spinlock);
1999 self->ul_stopping = 1;
2000 ulwp_broadcast(self);
2001 ulwp_unlock(self, udp);
2002 /*
2003 * From this point until we return from lwp_suspend(),
2004 * we must not call any function that might invoke the
2005 * dynamic linker, that is, we can only call functions
2006 * private to the library.
2007 *
2008 * Also, this is a nasty race condition for a process
2009 * that is undergoing a forkall() operation:
2010 * Once we clear our spinlock (below), we are vulnerable
2011 * to being suspended by the forkall() thread before
2012 * we manage to suspend ourself in ___lwp_suspend().
2013 * See safe_suspend() and force_continue().
2014 *
2015 * To avoid a SIGSEGV due to the disappearance
2016 * of the schedctl mappings in the child process,
2017 * which can happen in spin_lock_clear() if we
2018 * are suspended while we are in the middle of
2019 * its call to preempt(), we preemptively clear
2020 * our own schedctl pointer before dropping our
2021 * spinlock. We reinstate it, in both the parent
2022 * and (if this really is a forkall()) the child.
2023 */
2024 if (whystopped & TSTP_FORK) {
2025 schedctl_after_fork = 1;
2026 self->ul_schedctl = NULL;
2027 self->ul_schedctl_called = &udp->uberflags;
2028 }
2029 spin_lock_clear(&self->ul_spinlock);
2030 (void) ___lwp_suspend(tid);
2031 /*
2032 * Somebody else continued us.
2033 * We can't grab ulwp_lock(self)
2034 * until after clearing ul_stopping.
2035 * force_continue() relies on this.
2036 */
2037 self->ul_stopping = 0;
2038 self->ul_sp = 0;
2039 if (schedctl_after_fork) {
2040 self->ul_schedctl_called = NULL;
2041 self->ul_schedctl = NULL;
2042 (void) setup_schedctl();
2043 }
2044 ulwp_lock(self, udp);
2045 ulwp_broadcast(self);
2046 ulwp_unlock(self, udp);
2047 exit_critical(self);
2048 }
2049
2050 if (tid != self->ul_lwpid)
2051 fork_lock_exit();
2052
2053 return (error);
2054 }
2055
2056 /*
2057 * Suspend all lwps other than ourself in preparation for fork.
2058 */
2059 void
suspend_fork()2060 suspend_fork()
2061 {
2062 ulwp_t *self = curthread;
2063 uberdata_t *udp = self->ul_uberdata;
2064 ulwp_t *ulwp;
2065 int link_dropped;
2066
2067 ASSERT(MUTEX_OWNED(&udp->fork_lock, self));
2068 top:
2069 lmutex_lock(&udp->link_lock);
2070
2071 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2072 ulwp_lock(ulwp, udp);
2073 if (ulwp->ul_stop) { /* already stopped */
2074 ulwp->ul_stop |= TSTP_FORK;
2075 ulwp_broadcast(ulwp);
2076 ulwp_unlock(ulwp, udp);
2077 } else {
2078 /*
2079 * Move the stopped lwp out of a critical section.
2080 */
2081 if (safe_suspend(ulwp, TSTP_FORK, &link_dropped) ||
2082 link_dropped)
2083 goto top;
2084 }
2085 }
2086
2087 lmutex_unlock(&udp->link_lock);
2088 }
2089
2090 void
continue_fork(int child)2091 continue_fork(int child)
2092 {
2093 ulwp_t *self = curthread;
2094 uberdata_t *udp = self->ul_uberdata;
2095 ulwp_t *ulwp;
2096
2097 ASSERT(MUTEX_OWNED(&udp->fork_lock, self));
2098
2099 /*
2100 * Clear the schedctl pointers in the child of forkall().
2101 */
2102 if (child) {
2103 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2104 ulwp->ul_schedctl_called =
2105 ulwp->ul_dead? &udp->uberflags : NULL;
2106 ulwp->ul_schedctl = NULL;
2107 }
2108 }
2109
2110 /*
2111 * Set all lwps that were stopped for fork() running again.
2112 */
2113 lmutex_lock(&udp->link_lock);
2114 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2115 mutex_t *mp = ulwp_mutex(ulwp, udp);
2116 lmutex_lock(mp);
2117 ASSERT(ulwp->ul_stop & TSTP_FORK);
2118 ulwp->ul_stop &= ~TSTP_FORK;
2119 ulwp_broadcast(ulwp);
2120 if (!ulwp->ul_stop)
2121 force_continue(ulwp);
2122 lmutex_unlock(mp);
2123 }
2124 lmutex_unlock(&udp->link_lock);
2125 }
2126
2127 int
_thrp_continue(thread_t tid,uchar_t whystopped)2128 _thrp_continue(thread_t tid, uchar_t whystopped)
2129 {
2130 uberdata_t *udp = curthread->ul_uberdata;
2131 ulwp_t *ulwp;
2132 mutex_t *mp;
2133 int error = 0;
2134
2135 ASSERT(whystopped == TSTP_REGULAR ||
2136 whystopped == TSTP_MUTATOR);
2137
2138 /*
2139 * We single-thread the entire thread suspend/continue mechanism.
2140 */
2141 fork_lock_enter();
2142
2143 if ((ulwp = find_lwp(tid)) == NULL) {
2144 fork_lock_exit();
2145 return (ESRCH);
2146 }
2147
2148 mp = ulwp_mutex(ulwp, udp);
2149 if ((whystopped == TSTP_MUTATOR && !ulwp->ul_mutator)) {
2150 error = EINVAL;
2151 } else if (ulwp->ul_stop & whystopped) {
2152 ulwp->ul_stop &= ~whystopped;
2153 ulwp_broadcast(ulwp);
2154 if (!ulwp->ul_stop) {
2155 if (whystopped == TSTP_REGULAR && ulwp->ul_created) {
2156 ulwp->ul_sp = 0;
2157 ulwp->ul_created = 0;
2158 }
2159 force_continue(ulwp);
2160 }
2161 }
2162 lmutex_unlock(mp);
2163
2164 fork_lock_exit();
2165 return (error);
2166 }
2167
2168 int
thr_suspend(thread_t tid)2169 thr_suspend(thread_t tid)
2170 {
2171 return (_thrp_suspend(tid, TSTP_REGULAR));
2172 }
2173
2174 int
thr_continue(thread_t tid)2175 thr_continue(thread_t tid)
2176 {
2177 return (_thrp_continue(tid, TSTP_REGULAR));
2178 }
2179
2180 void
thr_yield()2181 thr_yield()
2182 {
2183 yield();
2184 }
2185
2186 #pragma weak pthread_kill = thr_kill
2187 #pragma weak _thr_kill = thr_kill
2188 int
thr_kill(thread_t tid,int sig)2189 thr_kill(thread_t tid, int sig)
2190 {
2191 if (sig == SIGCANCEL)
2192 return (EINVAL);
2193 return (_lwp_kill(tid, sig));
2194 }
2195
2196 /*
2197 * Exit a critical section, take deferred actions if necessary.
2198 * Called from exit_critical() and from sigon().
2199 */
2200 void
do_exit_critical()2201 do_exit_critical()
2202 {
2203 ulwp_t *self = curthread;
2204 int sig;
2205
2206 ASSERT(self->ul_critical == 0);
2207
2208 /*
2209 * Don't suspend ourself or take a deferred signal while dying
2210 * or while executing inside the dynamic linker (ld.so.1).
2211 */
2212 if (self->ul_dead || self->ul_rtld)
2213 return;
2214
2215 while (self->ul_pleasestop ||
2216 (self->ul_cursig != 0 && self->ul_sigdefer == 0)) {
2217 /*
2218 * Avoid a recursive call to exit_critical() in _thrp_suspend()
2219 * by keeping self->ul_critical == 1 here.
2220 */
2221 self->ul_critical++;
2222 while (self->ul_pleasestop) {
2223 /*
2224 * Guard against suspending ourself while on a sleep
2225 * queue. See the comments in call_user_handler().
2226 */
2227 unsleep_self();
2228 set_parking_flag(self, 0);
2229 (void) _thrp_suspend(self->ul_lwpid,
2230 self->ul_pleasestop);
2231 }
2232 self->ul_critical--;
2233
2234 if ((sig = self->ul_cursig) != 0 && self->ul_sigdefer == 0) {
2235 /*
2236 * Clear ul_cursig before proceeding.
2237 * This protects us from the dynamic linker's
2238 * calls to bind_guard()/bind_clear() in the
2239 * event that it is invoked to resolve a symbol
2240 * like take_deferred_signal() below.
2241 */
2242 self->ul_cursig = 0;
2243 take_deferred_signal(sig);
2244 ASSERT(self->ul_cursig == 0);
2245 }
2246 }
2247 ASSERT(self->ul_critical == 0);
2248 }
2249
2250 /*
2251 * _ti_bind_guard() and _ti_bind_clear() are called by the dynamic linker
2252 * (ld.so.1) when it has do do something, like resolve a symbol to be called
2253 * by the application or one of its libraries. _ti_bind_guard() is called
2254 * on entry to ld.so.1, _ti_bind_clear() on exit from ld.so.1 back to the
2255 * application. The dynamic linker gets special dispensation from libc to
2256 * run in a critical region (all signals deferred and no thread suspension
2257 * or forking allowed), and to be immune from cancellation for the duration.
2258 */
2259 int
_ti_bind_guard(int flags)2260 _ti_bind_guard(int flags)
2261 {
2262 ulwp_t *self = curthread;
2263 uberdata_t *udp = self->ul_uberdata;
2264 int bindflag = (flags & THR_FLG_RTLD);
2265
2266 if ((self->ul_bindflags & bindflag) == bindflag)
2267 return (0);
2268 self->ul_bindflags |= bindflag;
2269 if ((flags & (THR_FLG_NOLOCK | THR_FLG_REENTER)) == THR_FLG_NOLOCK) {
2270 sigoff(self); /* see no signals while holding ld_lock */
2271 self->ul_rtld++; /* don't suspend while in ld.so.1 */
2272 (void) mutex_lock(&udp->ld_lock);
2273 }
2274 enter_critical(self);
2275 self->ul_save_state = self->ul_cancel_disabled;
2276 self->ul_cancel_disabled = 1;
2277 set_cancel_pending_flag(self, 0);
2278 return (1);
2279 }
2280
2281 int
_ti_bind_clear(int flags)2282 _ti_bind_clear(int flags)
2283 {
2284 ulwp_t *self = curthread;
2285 uberdata_t *udp = self->ul_uberdata;
2286 int bindflag = (flags & THR_FLG_RTLD);
2287
2288 if ((self->ul_bindflags & bindflag) == 0)
2289 return (self->ul_bindflags);
2290 self->ul_bindflags &= ~bindflag;
2291 self->ul_cancel_disabled = self->ul_save_state;
2292 set_cancel_pending_flag(self, 0);
2293 exit_critical(self);
2294 if ((flags & (THR_FLG_NOLOCK | THR_FLG_REENTER)) == THR_FLG_NOLOCK) {
2295 if (MUTEX_OWNED(&udp->ld_lock, self)) {
2296 (void) mutex_unlock(&udp->ld_lock);
2297 self->ul_rtld--;
2298 sigon(self); /* reenable signals */
2299 }
2300 }
2301 return (self->ul_bindflags);
2302 }
2303
2304 /*
2305 * Tell the dynamic linker (ld.so.1) whether or not it was entered from
2306 * a critical region in libc. Return zero if not, else return non-zero.
2307 */
2308 int
_ti_critical(void)2309 _ti_critical(void)
2310 {
2311 ulwp_t *self = curthread;
2312 int level = self->ul_critical;
2313
2314 if ((self->ul_bindflags & THR_FLG_RTLD) == 0 || level == 0)
2315 return (level); /* ld.so.1 hasn't (yet) called enter() */
2316 return (level - 1);
2317 }
2318
2319 /*
2320 * sigoff() and sigon() enable cond_wait() to behave (optionally) like
2321 * it does in the old libthread (see the comments in cond_wait_queue()).
2322 * Also, signals are deferred at thread startup until TLS constructors
2323 * have all been called, at which time _thrp_setup() calls sigon().
2324 *
2325 * _sigoff() and _sigon() are external consolidation-private interfaces to
2326 * sigoff() and sigon(), respectively, in libc. These are used in libnsl.
2327 * Also, _sigoff() and _sigon() are called from dbx's run-time checking
2328 * (librtc.so) to defer signals during its critical sections (not to be
2329 * confused with libc critical sections [see exit_critical() above]).
2330 */
2331 void
_sigoff(void)2332 _sigoff(void)
2333 {
2334 ulwp_t *self = curthread;
2335
2336 sigoff(self);
2337 }
2338
2339 void
_sigon(void)2340 _sigon(void)
2341 {
2342 ulwp_t *self = curthread;
2343
2344 ASSERT(self->ul_sigdefer > 0);
2345 sigon(self);
2346 }
2347
2348 int
thr_getconcurrency()2349 thr_getconcurrency()
2350 {
2351 return (thr_concurrency);
2352 }
2353
2354 int
pthread_getconcurrency()2355 pthread_getconcurrency()
2356 {
2357 return (pthread_concurrency);
2358 }
2359
2360 int
thr_setconcurrency(int new_level)2361 thr_setconcurrency(int new_level)
2362 {
2363 uberdata_t *udp = curthread->ul_uberdata;
2364
2365 if (new_level < 0)
2366 return (EINVAL);
2367 if (new_level > 65536) /* 65536 is totally arbitrary */
2368 return (EAGAIN);
2369 lmutex_lock(&udp->link_lock);
2370 if (new_level > thr_concurrency)
2371 thr_concurrency = new_level;
2372 lmutex_unlock(&udp->link_lock);
2373 return (0);
2374 }
2375
2376 int
pthread_setconcurrency(int new_level)2377 pthread_setconcurrency(int new_level)
2378 {
2379 if (new_level < 0)
2380 return (EINVAL);
2381 if (new_level > 65536) /* 65536 is totally arbitrary */
2382 return (EAGAIN);
2383 pthread_concurrency = new_level;
2384 return (0);
2385 }
2386
2387 size_t
thr_min_stack(void)2388 thr_min_stack(void)
2389 {
2390 return (MINSTACK);
2391 }
2392
2393 int
__nthreads(void)2394 __nthreads(void)
2395 {
2396 return (curthread->ul_uberdata->nthreads);
2397 }
2398
2399 /*
2400 * XXX
2401 * The remainder of this file implements the private interfaces to java for
2402 * garbage collection. It is no longer used, at least by java 1.2.
2403 * It can all go away once all old JVMs have disappeared.
2404 */
2405
2406 int suspendingallmutators; /* when non-zero, suspending all mutators. */
2407 int suspendedallmutators; /* when non-zero, all mutators suspended. */
2408 int mutatorsbarrier; /* when non-zero, mutators barrier imposed. */
2409 mutex_t mutatorslock = DEFAULTMUTEX; /* used to enforce mutators barrier. */
2410 cond_t mutatorscv = DEFAULTCV; /* where non-mutators sleep. */
2411
2412 /*
2413 * Get the available register state for the target thread.
2414 * Return non-volatile registers: TRS_NONVOLATILE
2415 */
2416 #pragma weak _thr_getstate = thr_getstate
2417 int
thr_getstate(thread_t tid,int * flag,lwpid_t * lwp,stack_t * ss,gregset_t rs)2418 thr_getstate(thread_t tid, int *flag, lwpid_t *lwp, stack_t *ss, gregset_t rs)
2419 {
2420 ulwp_t *self = curthread;
2421 uberdata_t *udp = self->ul_uberdata;
2422 ulwp_t **ulwpp;
2423 ulwp_t *ulwp;
2424 int error = 0;
2425 int trs_flag = TRS_LWPID;
2426
2427 if (tid == 0 || self->ul_lwpid == tid) {
2428 ulwp = self;
2429 ulwp_lock(ulwp, udp);
2430 } else if ((ulwpp = find_lwpp(tid)) != NULL) {
2431 ulwp = *ulwpp;
2432 } else {
2433 if (flag)
2434 *flag = TRS_INVALID;
2435 return (ESRCH);
2436 }
2437
2438 if (ulwp->ul_dead) {
2439 trs_flag = TRS_INVALID;
2440 } else if (!ulwp->ul_stop && !suspendedallmutators) {
2441 error = EINVAL;
2442 trs_flag = TRS_INVALID;
2443 } else if (ulwp->ul_stop) {
2444 trs_flag = TRS_NONVOLATILE;
2445 getgregs(ulwp, rs);
2446 }
2447
2448 if (flag)
2449 *flag = trs_flag;
2450 if (lwp)
2451 *lwp = tid;
2452 if (ss != NULL)
2453 (void) _thrp_stksegment(ulwp, ss);
2454
2455 ulwp_unlock(ulwp, udp);
2456 return (error);
2457 }
2458
2459 /*
2460 * Set the appropriate register state for the target thread.
2461 * This is not used by java. It exists solely for the MSTC test suite.
2462 */
2463 #pragma weak _thr_setstate = thr_setstate
2464 int
thr_setstate(thread_t tid,int flag,gregset_t rs)2465 thr_setstate(thread_t tid, int flag, gregset_t rs)
2466 {
2467 uberdata_t *udp = curthread->ul_uberdata;
2468 ulwp_t *ulwp;
2469 int error = 0;
2470
2471 if ((ulwp = find_lwp(tid)) == NULL)
2472 return (ESRCH);
2473
2474 if (!ulwp->ul_stop && !suspendedallmutators)
2475 error = EINVAL;
2476 else if (rs != NULL) {
2477 switch (flag) {
2478 case TRS_NONVOLATILE:
2479 /* do /proc stuff here? */
2480 if (ulwp->ul_stop)
2481 setgregs(ulwp, rs);
2482 else
2483 error = EINVAL;
2484 break;
2485 case TRS_LWPID: /* do /proc stuff here? */
2486 default:
2487 error = EINVAL;
2488 break;
2489 }
2490 }
2491
2492 ulwp_unlock(ulwp, udp);
2493 return (error);
2494 }
2495
2496 int
getlwpstatus(thread_t tid,struct lwpstatus * sp)2497 getlwpstatus(thread_t tid, struct lwpstatus *sp)
2498 {
2499 extern ssize_t __pread(int, void *, size_t, off_t);
2500 char buf[100];
2501 int fd;
2502
2503 /* "/proc/self/lwp/%u/lwpstatus" w/o stdio */
2504 (void) strcpy(buf, "/proc/self/lwp/");
2505 ultos((uint64_t)tid, 10, buf + strlen(buf));
2506 (void) strcat(buf, "/lwpstatus");
2507 if ((fd = __open(buf, O_RDONLY, 0)) >= 0) {
2508 while (__pread(fd, sp, sizeof (*sp), 0) == sizeof (*sp)) {
2509 if (sp->pr_flags & PR_STOPPED) {
2510 (void) __close(fd);
2511 return (0);
2512 }
2513 yield(); /* give him a chance to stop */
2514 }
2515 (void) __close(fd);
2516 }
2517 return (-1);
2518 }
2519
2520 int
putlwpregs(thread_t tid,prgregset_t prp)2521 putlwpregs(thread_t tid, prgregset_t prp)
2522 {
2523 extern ssize_t __writev(int, const struct iovec *, int);
2524 char buf[100];
2525 int fd;
2526 long dstop_sreg[2];
2527 long run_null[2];
2528 iovec_t iov[3];
2529
2530 /* "/proc/self/lwp/%u/lwpctl" w/o stdio */
2531 (void) strcpy(buf, "/proc/self/lwp/");
2532 ultos((uint64_t)tid, 10, buf + strlen(buf));
2533 (void) strcat(buf, "/lwpctl");
2534 if ((fd = __open(buf, O_WRONLY, 0)) >= 0) {
2535 dstop_sreg[0] = PCDSTOP; /* direct it to stop */
2536 dstop_sreg[1] = PCSREG; /* set the registers */
2537 iov[0].iov_base = (caddr_t)dstop_sreg;
2538 iov[0].iov_len = sizeof (dstop_sreg);
2539 iov[1].iov_base = (caddr_t)prp; /* from the register set */
2540 iov[1].iov_len = sizeof (prgregset_t);
2541 run_null[0] = PCRUN; /* make it runnable again */
2542 run_null[1] = 0;
2543 iov[2].iov_base = (caddr_t)run_null;
2544 iov[2].iov_len = sizeof (run_null);
2545 if (__writev(fd, iov, 3) >= 0) {
2546 (void) __close(fd);
2547 return (0);
2548 }
2549 (void) __close(fd);
2550 }
2551 return (-1);
2552 }
2553
2554 static ulong_t
gettsp_slow(thread_t tid)2555 gettsp_slow(thread_t tid)
2556 {
2557 char buf[100];
2558 struct lwpstatus status;
2559
2560 if (getlwpstatus(tid, &status) != 0) {
2561 /* "__gettsp(%u): can't read lwpstatus" w/o stdio */
2562 (void) strcpy(buf, "__gettsp(");
2563 ultos((uint64_t)tid, 10, buf + strlen(buf));
2564 (void) strcat(buf, "): can't read lwpstatus");
2565 thr_panic(buf);
2566 }
2567 return (status.pr_reg[R_SP]);
2568 }
2569
2570 ulong_t
__gettsp(thread_t tid)2571 __gettsp(thread_t tid)
2572 {
2573 uberdata_t *udp = curthread->ul_uberdata;
2574 ulwp_t *ulwp;
2575 ulong_t result;
2576
2577 if ((ulwp = find_lwp(tid)) == NULL)
2578 return (0);
2579
2580 if (ulwp->ul_stop && (result = ulwp->ul_sp) != 0) {
2581 ulwp_unlock(ulwp, udp);
2582 return (result);
2583 }
2584
2585 result = gettsp_slow(tid);
2586 ulwp_unlock(ulwp, udp);
2587 return (result);
2588 }
2589
2590 /*
2591 * This tells java stack walkers how to find the ucontext
2592 * structure passed to signal handlers.
2593 */
2594 #pragma weak _thr_sighndlrinfo = thr_sighndlrinfo
2595 void
thr_sighndlrinfo(void (** func)(),int * funcsize)2596 thr_sighndlrinfo(void (**func)(), int *funcsize)
2597 {
2598 *func = &__sighndlr;
2599 *funcsize = (char *)&__sighndlrend - (char *)&__sighndlr;
2600 }
2601
2602 /*
2603 * Mark a thread a mutator or reset a mutator to being a default,
2604 * non-mutator thread.
2605 */
2606 #pragma weak _thr_setmutator = thr_setmutator
2607 int
thr_setmutator(thread_t tid,int enabled)2608 thr_setmutator(thread_t tid, int enabled)
2609 {
2610 ulwp_t *self = curthread;
2611 uberdata_t *udp = self->ul_uberdata;
2612 ulwp_t *ulwp;
2613 int error;
2614 int cancel_state;
2615
2616 enabled = enabled? 1 : 0;
2617 top:
2618 if (tid == 0) {
2619 ulwp = self;
2620 ulwp_lock(ulwp, udp);
2621 } else if ((ulwp = find_lwp(tid)) == NULL) {
2622 return (ESRCH);
2623 }
2624
2625 /*
2626 * The target thread should be the caller itself or a suspended thread.
2627 * This prevents the target from also changing its ul_mutator field.
2628 */
2629 error = 0;
2630 if (ulwp != self && !ulwp->ul_stop && enabled)
2631 error = EINVAL;
2632 else if (ulwp->ul_mutator != enabled) {
2633 lmutex_lock(&mutatorslock);
2634 if (mutatorsbarrier) {
2635 ulwp_unlock(ulwp, udp);
2636 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE,
2637 &cancel_state);
2638 while (mutatorsbarrier)
2639 (void) cond_wait(&mutatorscv, &mutatorslock);
2640 (void) pthread_setcancelstate(cancel_state, NULL);
2641 lmutex_unlock(&mutatorslock);
2642 goto top;
2643 }
2644 ulwp->ul_mutator = enabled;
2645 lmutex_unlock(&mutatorslock);
2646 }
2647
2648 ulwp_unlock(ulwp, udp);
2649 return (error);
2650 }
2651
2652 /*
2653 * Establish a barrier against new mutators. Any non-mutator trying
2654 * to become a mutator is suspended until the barrier is removed.
2655 */
2656 #pragma weak _thr_mutators_barrier = thr_mutators_barrier
2657 void
thr_mutators_barrier(int enabled)2658 thr_mutators_barrier(int enabled)
2659 {
2660 int oldvalue;
2661 int cancel_state;
2662
2663 lmutex_lock(&mutatorslock);
2664
2665 /*
2666 * Wait if trying to set the barrier while it is already set.
2667 */
2668 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
2669 while (mutatorsbarrier && enabled)
2670 (void) cond_wait(&mutatorscv, &mutatorslock);
2671 (void) pthread_setcancelstate(cancel_state, NULL);
2672
2673 oldvalue = mutatorsbarrier;
2674 mutatorsbarrier = enabled;
2675 /*
2676 * Wakeup any blocked non-mutators when barrier is removed.
2677 */
2678 if (oldvalue && !enabled)
2679 (void) cond_broadcast(&mutatorscv);
2680 lmutex_unlock(&mutatorslock);
2681 }
2682
2683 /*
2684 * Suspend the set of all mutators except for the caller. The list
2685 * of actively running threads is searched and only the mutators
2686 * in this list are suspended. Actively running non-mutators remain
2687 * running. Any other thread is suspended.
2688 */
2689 #pragma weak _thr_suspend_allmutators = thr_suspend_allmutators
2690 int
thr_suspend_allmutators(void)2691 thr_suspend_allmutators(void)
2692 {
2693 ulwp_t *self = curthread;
2694 uberdata_t *udp = self->ul_uberdata;
2695 ulwp_t *ulwp;
2696 int link_dropped;
2697
2698 /*
2699 * We single-thread the entire thread suspend/continue mechanism.
2700 */
2701 fork_lock_enter();
2702
2703 top:
2704 lmutex_lock(&udp->link_lock);
2705
2706 if (suspendingallmutators || suspendedallmutators) {
2707 lmutex_unlock(&udp->link_lock);
2708 fork_lock_exit();
2709 return (EINVAL);
2710 }
2711 suspendingallmutators = 1;
2712
2713 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2714 ulwp_lock(ulwp, udp);
2715 if (!ulwp->ul_mutator) {
2716 ulwp_unlock(ulwp, udp);
2717 } else if (ulwp->ul_stop) { /* already stopped */
2718 ulwp->ul_stop |= TSTP_MUTATOR;
2719 ulwp_broadcast(ulwp);
2720 ulwp_unlock(ulwp, udp);
2721 } else {
2722 /*
2723 * Move the stopped lwp out of a critical section.
2724 */
2725 if (safe_suspend(ulwp, TSTP_MUTATOR, &link_dropped) ||
2726 link_dropped) {
2727 suspendingallmutators = 0;
2728 goto top;
2729 }
2730 }
2731 }
2732
2733 suspendedallmutators = 1;
2734 suspendingallmutators = 0;
2735 lmutex_unlock(&udp->link_lock);
2736 fork_lock_exit();
2737 return (0);
2738 }
2739
2740 /*
2741 * Suspend the target mutator. The caller is permitted to suspend
2742 * itself. If a mutator barrier is enabled, the caller will suspend
2743 * itself as though it had been suspended by thr_suspend_allmutators().
2744 * When the barrier is removed, this thread will be resumed. Any
2745 * suspended mutator, whether suspended by thr_suspend_mutator(), or by
2746 * thr_suspend_allmutators(), can be resumed by thr_continue_mutator().
2747 */
2748 #pragma weak _thr_suspend_mutator = thr_suspend_mutator
2749 int
thr_suspend_mutator(thread_t tid)2750 thr_suspend_mutator(thread_t tid)
2751 {
2752 if (tid == 0)
2753 tid = curthread->ul_lwpid;
2754 return (_thrp_suspend(tid, TSTP_MUTATOR));
2755 }
2756
2757 /*
2758 * Resume the set of all suspended mutators.
2759 */
2760 #pragma weak _thr_continue_allmutators = thr_continue_allmutators
2761 int
thr_continue_allmutators()2762 thr_continue_allmutators()
2763 {
2764 ulwp_t *self = curthread;
2765 uberdata_t *udp = self->ul_uberdata;
2766 ulwp_t *ulwp;
2767
2768 /*
2769 * We single-thread the entire thread suspend/continue mechanism.
2770 */
2771 fork_lock_enter();
2772
2773 lmutex_lock(&udp->link_lock);
2774 if (!suspendedallmutators) {
2775 lmutex_unlock(&udp->link_lock);
2776 fork_lock_exit();
2777 return (EINVAL);
2778 }
2779 suspendedallmutators = 0;
2780
2781 for (ulwp = self->ul_forw; ulwp != self; ulwp = ulwp->ul_forw) {
2782 mutex_t *mp = ulwp_mutex(ulwp, udp);
2783 lmutex_lock(mp);
2784 if (ulwp->ul_stop & TSTP_MUTATOR) {
2785 ulwp->ul_stop &= ~TSTP_MUTATOR;
2786 ulwp_broadcast(ulwp);
2787 if (!ulwp->ul_stop)
2788 force_continue(ulwp);
2789 }
2790 lmutex_unlock(mp);
2791 }
2792
2793 lmutex_unlock(&udp->link_lock);
2794 fork_lock_exit();
2795 return (0);
2796 }
2797
2798 /*
2799 * Resume a suspended mutator.
2800 */
2801 #pragma weak _thr_continue_mutator = thr_continue_mutator
2802 int
thr_continue_mutator(thread_t tid)2803 thr_continue_mutator(thread_t tid)
2804 {
2805 return (_thrp_continue(tid, TSTP_MUTATOR));
2806 }
2807
2808 #pragma weak _thr_wait_mutator = thr_wait_mutator
2809 int
thr_wait_mutator(thread_t tid,int dontwait)2810 thr_wait_mutator(thread_t tid, int dontwait)
2811 {
2812 uberdata_t *udp = curthread->ul_uberdata;
2813 ulwp_t *ulwp;
2814 int cancel_state;
2815 int error = 0;
2816
2817 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
2818 top:
2819 if ((ulwp = find_lwp(tid)) == NULL) {
2820 (void) pthread_setcancelstate(cancel_state, NULL);
2821 return (ESRCH);
2822 }
2823
2824 if (!ulwp->ul_mutator)
2825 error = EINVAL;
2826 else if (dontwait) {
2827 if (!(ulwp->ul_stop & TSTP_MUTATOR))
2828 error = EWOULDBLOCK;
2829 } else if (!(ulwp->ul_stop & TSTP_MUTATOR)) {
2830 cond_t *cvp = ulwp_condvar(ulwp, udp);
2831 mutex_t *mp = ulwp_mutex(ulwp, udp);
2832
2833 (void) cond_wait(cvp, mp);
2834 (void) lmutex_unlock(mp);
2835 goto top;
2836 }
2837
2838 ulwp_unlock(ulwp, udp);
2839 (void) pthread_setcancelstate(cancel_state, NULL);
2840 return (error);
2841 }
2842
2843 /* PROBE_SUPPORT begin */
2844
2845 void
thr_probe_setup(void * data)2846 thr_probe_setup(void *data)
2847 {
2848 curthread->ul_tpdp = data;
2849 }
2850
2851 static void *
_thread_probe_getfunc()2852 _thread_probe_getfunc()
2853 {
2854 return (curthread->ul_tpdp);
2855 }
2856
2857 void * (*thr_probe_getfunc_addr)(void) = _thread_probe_getfunc;
2858
2859 /* ARGSUSED */
2860 void
_resume(ulwp_t * ulwp,caddr_t sp,int dontsave)2861 _resume(ulwp_t *ulwp, caddr_t sp, int dontsave)
2862 {
2863 /* never called */
2864 }
2865
2866 /* ARGSUSED */
2867 void
_resume_ret(ulwp_t * oldlwp)2868 _resume_ret(ulwp_t *oldlwp)
2869 {
2870 /* never called */
2871 }
2872
2873 /* PROBE_SUPPORT end */
2874